xref: /titanic_44/usr/src/uts/common/io/hxge/hxge_rxdma.c (revision 1195e687f1c03c8d57417b5999578922e20a3554)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <hxge_impl.h>
27 #include <hxge_rxdma.h>
28 #include <hpi.h>
29 #include <hpi_vir.h>
30 
31 /*
32  * Number of blocks to accumulate before re-enabling DMA
33  * when we get RBR empty.
34  */
35 #define	HXGE_RBR_EMPTY_THRESHOLD	64
36 
37 /*
38  * Globals: tunable parameters (/etc/system or adb)
39  *
40  */
41 extern uint32_t hxge_rbr_size;
42 extern uint32_t hxge_rcr_size;
43 extern uint32_t hxge_rbr_spare_size;
44 extern uint32_t hxge_mblks_pending;
45 
46 /*
47  * Tunables to manage the receive buffer blocks.
48  *
49  * hxge_rx_threshold_hi: copy all buffers.
50  * hxge_rx_bcopy_size_type: receive buffer block size type.
51  * hxge_rx_threshold_lo: copy only up to tunable block size type.
52  */
53 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi;
54 extern hxge_rxbuf_type_t hxge_rx_buf_size_type;
55 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo;
56 
57 /*
58  * Static local functions.
59  */
60 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep);
61 static void hxge_unmap_rxdma(p_hxge_t hxgep);
62 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep);
63 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep);
64 static void hxge_rxdma_hw_stop(p_hxge_t hxgep);
65 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
66     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
67     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
68     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
69     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
70 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
71 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
72 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep,
73     uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p,
74     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
75     p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
76 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
77 	p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
78 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep,
79 	uint16_t channel, p_hxge_dma_common_t *dma_buf_p,
80 	p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks);
81 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
82 	p_rx_rbr_ring_t rbr_p);
83 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
84 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
85 	int n_init_kick);
86 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel);
87 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
88 	p_rx_rcr_ring_t	rcr_p, rdc_stat_t cs, int bytes_to_read);
89 static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcr_p,
90     p_rcr_entry_t rcr_desc_rd_head_p, uint32_t num_rcrs);
91 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
92 	p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p,
93 	mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry);
94 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep,
95 	uint16_t channel);
96 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t);
97 static void hxge_freeb(p_rx_msg_t);
98 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index,
99 	p_hxge_ldv_t ldvp, rdc_stat_t cs);
100 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep,
101 	p_rx_rbr_ring_t rx_dmap);
102 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep,
103 	uint16_t channel);
104 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep);
105 static void hxge_rbr_empty_restore(p_hxge_t hxgep,
106 	p_rx_rbr_ring_t rx_rbr_p);
107 
108 hxge_status_t
109 hxge_init_rxdma_channels(p_hxge_t hxgep)
110 {
111 	hxge_status_t		status = HXGE_OK;
112 	block_reset_t		reset_reg;
113 	int			i;
114 
115 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels"));
116 
117 	for (i = 0; i < HXGE_MAX_RDCS; i++)
118 		hxgep->rdc_first_intr[i] = B_TRUE;
119 
120 	/* Reset RDC block from PEU to clear any previous state */
121 	reset_reg.value = 0;
122 	reset_reg.bits.rdc_rst = 1;
123 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
124 	HXGE_DELAY(1000);
125 
126 	status = hxge_map_rxdma(hxgep);
127 	if (status != HXGE_OK) {
128 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
129 		    "<== hxge_init_rxdma: status 0x%x", status));
130 		return (status);
131 	}
132 
133 	status = hxge_rxdma_hw_start_common(hxgep);
134 	if (status != HXGE_OK) {
135 		hxge_unmap_rxdma(hxgep);
136 	}
137 
138 	status = hxge_rxdma_hw_start(hxgep);
139 	if (status != HXGE_OK) {
140 		hxge_unmap_rxdma(hxgep);
141 	}
142 
143 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
144 	    "<== hxge_init_rxdma_channels: status 0x%x", status));
145 	return (status);
146 }
147 
148 void
149 hxge_uninit_rxdma_channels(p_hxge_t hxgep)
150 {
151 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels"));
152 
153 	hxge_rxdma_hw_stop(hxgep);
154 	hxge_unmap_rxdma(hxgep);
155 
156 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels"));
157 }
158 
159 hxge_status_t
160 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel,
161     rdc_stat_t *cs_p)
162 {
163 	hpi_handle_t	handle;
164 	hpi_status_t	rs = HPI_SUCCESS;
165 	hxge_status_t	status = HXGE_OK;
166 
167 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
168 	    "<== hxge_init_rxdma_channel_cntl_stat"));
169 
170 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
171 	rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p);
172 
173 	if (rs != HPI_SUCCESS) {
174 		status = HXGE_ERROR | rs;
175 	}
176 	return (status);
177 }
178 
179 
180 hxge_status_t
181 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
182     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
183     int n_init_kick)
184 {
185 	hpi_handle_t		handle;
186 	rdc_desc_cfg_t 		rdc_desc;
187 	rdc_rcr_cfg_b_t		*cfgb_p;
188 	hpi_status_t		rs = HPI_SUCCESS;
189 
190 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel"));
191 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
192 
193 	/*
194 	 * Use configuration data composed at init time. Write to hardware the
195 	 * receive ring configurations.
196 	 */
197 	rdc_desc.mbox_enable = 1;
198 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
199 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
200 	    "==> hxge_enable_rxdma_channel: mboxp $%p($%p)",
201 	    mbox_p->mbox_addr, rdc_desc.mbox_addr));
202 
203 	rdc_desc.rbr_len = rbr_p->rbb_max;
204 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
205 
206 	switch (hxgep->rx_bksize_code) {
207 	case RBR_BKSIZE_4K:
208 		rdc_desc.page_size = SIZE_4KB;
209 		break;
210 	case RBR_BKSIZE_8K:
211 		rdc_desc.page_size = SIZE_8KB;
212 		break;
213 	}
214 
215 	rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0;
216 	rdc_desc.valid0 = 1;
217 
218 	rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1;
219 	rdc_desc.valid1 = 1;
220 
221 	rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2;
222 	rdc_desc.valid2 = 1;
223 
224 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
225 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
226 
227 	rdc_desc.rcr_len = rcr_p->comp_size;
228 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
229 
230 	cfgb_p = &(rcr_p->rcr_cfgb);
231 	rdc_desc.rcr_threshold = cfgb_p->bits.pthres;
232 	rdc_desc.rcr_timeout = cfgb_p->bits.timeout;
233 	rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout;
234 
235 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
236 	    "rbr_len qlen %d pagesize code %d rcr_len %d",
237 	    rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
238 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
239 	    "size 0 %d size 1 %d size 2 %d",
240 	    rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1,
241 	    rbr_p->hpi_pkt_buf_size2));
242 
243 	rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
244 	if (rs != HPI_SUCCESS) {
245 		return (HXGE_ERROR | rs);
246 	}
247 
248 	/*
249 	 * Enable the timeout and threshold.
250 	 */
251 	rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
252 	    rdc_desc.rcr_threshold);
253 	if (rs != HPI_SUCCESS) {
254 		return (HXGE_ERROR | rs);
255 	}
256 
257 	rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
258 	    rdc_desc.rcr_timeout);
259 	if (rs != HPI_SUCCESS) {
260 		return (HXGE_ERROR | rs);
261 	}
262 
263 	/* Kick the DMA engine */
264 	hpi_rxdma_rdc_rbr_kick(handle, channel, n_init_kick);
265 
266 	/* Clear the rbr empty bit */
267 	(void) hpi_rxdma_channel_rbr_empty_clear(handle, channel);
268 
269 	/*
270 	 * Enable the DMA
271 	 */
272 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
273 	if (rs != HPI_SUCCESS) {
274 		return (HXGE_ERROR | rs);
275 	}
276 
277 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel"));
278 
279 	return (HXGE_OK);
280 }
281 
282 static hxge_status_t
283 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel)
284 {
285 	hpi_handle_t handle;
286 	hpi_status_t rs = HPI_SUCCESS;
287 
288 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel"));
289 
290 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
291 
292 	/* disable the DMA */
293 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
294 	if (rs != HPI_SUCCESS) {
295 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
296 		    "<== hxge_disable_rxdma_channel:failed (0x%x)", rs));
297 		return (HXGE_ERROR | rs);
298 	}
299 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel"));
300 	return (HXGE_OK);
301 }
302 
303 hxge_status_t
304 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel)
305 {
306 	hpi_handle_t	handle;
307 	hxge_status_t	status = HXGE_OK;
308 
309 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
310 	    "==> hxge_rxdma_channel_rcrflush"));
311 
312 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
313 	hpi_rxdma_rdc_rcr_flush(handle, channel);
314 
315 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
316 	    "<== hxge_rxdma_channel_rcrflush"));
317 	return (status);
318 
319 }
320 
321 #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
322 
323 #define	TO_LEFT -1
324 #define	TO_RIGHT 1
325 #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
326 #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
327 #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
328 #define	NO_HINT 0xffffffff
329 
330 /*ARGSUSED*/
331 hxge_status_t
332 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p,
333     uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
334     uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
335 {
336 	int			bufsize;
337 	uint64_t		pktbuf_pp;
338 	uint64_t		dvma_addr;
339 	rxring_info_t		*ring_info;
340 	int			base_side, end_side;
341 	int			r_index, l_index, anchor_index;
342 	int			found, search_done;
343 	uint32_t		offset, chunk_size, block_size, page_size_mask;
344 	uint32_t		chunk_index, block_index, total_index;
345 	int			max_iterations, iteration;
346 	rxbuf_index_info_t	*bufinfo;
347 
348 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp"));
349 
350 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
351 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
352 	    pkt_buf_addr_pp, pktbufsz_type));
353 
354 #if defined(__i386)
355 	pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
356 #else
357 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
358 #endif
359 
360 	switch (pktbufsz_type) {
361 	case 0:
362 		bufsize = rbr_p->pkt_buf_size0;
363 		break;
364 	case 1:
365 		bufsize = rbr_p->pkt_buf_size1;
366 		break;
367 	case 2:
368 		bufsize = rbr_p->pkt_buf_size2;
369 		break;
370 	case RCR_SINGLE_BLOCK:
371 		bufsize = 0;
372 		anchor_index = 0;
373 		break;
374 	default:
375 		return (HXGE_ERROR);
376 	}
377 
378 	if (rbr_p->num_blocks == 1) {
379 		anchor_index = 0;
380 		ring_info = rbr_p->ring_info;
381 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
382 
383 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
384 		    "==> hxge_rxbuf_pp_to_vp: (found, 1 block) "
385 		    "buf_pp $%p btype %d anchor_index %d bufinfo $%p",
386 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo));
387 
388 		goto found_index;
389 	}
390 
391 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
392 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d",
393 	    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
394 
395 	ring_info = rbr_p->ring_info;
396 	found = B_FALSE;
397 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
398 	iteration = 0;
399 	max_iterations = ring_info->max_iterations;
400 
401 	/*
402 	 * First check if this block have been seen recently. This is indicated
403 	 * by a hint which is initialized when the first buffer of the block is
404 	 * seen. The hint is reset when the last buffer of the block has been
405 	 * processed. As three block sizes are supported, three hints are kept.
406 	 * The idea behind the hints is that once the hardware  uses a block
407 	 * for a buffer  of that size, it will use it exclusively for that size
408 	 * and will use it until it is exhausted. It is assumed that there
409 	 * would a single block being used for the same buffer sizes at any
410 	 * given time.
411 	 */
412 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
413 		anchor_index = ring_info->hint[pktbufsz_type];
414 		dvma_addr = bufinfo[anchor_index].dvma_addr;
415 		chunk_size = bufinfo[anchor_index].buf_size;
416 		if ((pktbuf_pp >= dvma_addr) &&
417 		    (pktbuf_pp < (dvma_addr + chunk_size))) {
418 			found = B_TRUE;
419 			/*
420 			 * check if this is the last buffer in the block If so,
421 			 * then reset the hint for the size;
422 			 */
423 
424 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
425 				ring_info->hint[pktbufsz_type] = NO_HINT;
426 		}
427 	}
428 
429 	if (found == B_FALSE) {
430 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
431 		    "==> hxge_rxbuf_pp_to_vp: (!found)"
432 		    "buf_pp $%p btype %d anchor_index %d",
433 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
434 
435 		/*
436 		 * This is the first buffer of the block of this size. Need to
437 		 * search the whole information array. the search algorithm
438 		 * uses a binary tree search algorithm. It assumes that the
439 		 * information is already sorted with increasing order info[0]
440 		 * < info[1] < info[2]  .... < info[n-1] where n is the size of
441 		 * the information array
442 		 */
443 		r_index = rbr_p->num_blocks - 1;
444 		l_index = 0;
445 		search_done = B_FALSE;
446 		anchor_index = MID_INDEX(r_index, l_index);
447 		while (search_done == B_FALSE) {
448 			if ((r_index == l_index) ||
449 			    (iteration >= max_iterations))
450 				search_done = B_TRUE;
451 
452 			end_side = TO_RIGHT;	/* to the right */
453 			base_side = TO_LEFT;	/* to the left */
454 			/* read the DVMA address information and sort it */
455 			dvma_addr = bufinfo[anchor_index].dvma_addr;
456 			chunk_size = bufinfo[anchor_index].buf_size;
457 
458 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
459 			    "==> hxge_rxbuf_pp_to_vp: (searching)"
460 			    "buf_pp $%p btype %d "
461 			    "anchor_index %d chunk_size %d dvmaaddr $%p",
462 			    pkt_buf_addr_pp, pktbufsz_type, anchor_index,
463 			    chunk_size, dvma_addr));
464 
465 			if (pktbuf_pp >= dvma_addr)
466 				base_side = TO_RIGHT;	/* to the right */
467 			if (pktbuf_pp < (dvma_addr + chunk_size))
468 				end_side = TO_LEFT;	/* to the left */
469 
470 			switch (base_side + end_side) {
471 			case IN_MIDDLE:
472 				/* found */
473 				found = B_TRUE;
474 				search_done = B_TRUE;
475 				if ((pktbuf_pp + bufsize) <
476 				    (dvma_addr + chunk_size))
477 					ring_info->hint[pktbufsz_type] =
478 					    bufinfo[anchor_index].buf_index;
479 				break;
480 			case BOTH_RIGHT:
481 				/* not found: go to the right */
482 				l_index = anchor_index + 1;
483 				anchor_index = MID_INDEX(r_index, l_index);
484 				break;
485 
486 			case BOTH_LEFT:
487 				/* not found: go to the left */
488 				r_index = anchor_index - 1;
489 				anchor_index = MID_INDEX(r_index, l_index);
490 				break;
491 			default:	/* should not come here */
492 				return (HXGE_ERROR);
493 			}
494 			iteration++;
495 		}
496 
497 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
498 		    "==> hxge_rxbuf_pp_to_vp: (search done)"
499 		    "buf_pp $%p btype %d anchor_index %d",
500 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
501 	}
502 
503 	if (found == B_FALSE) {
504 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
505 		    "==> hxge_rxbuf_pp_to_vp: (search failed)"
506 		    "buf_pp $%p btype %d anchor_index %d",
507 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
508 		return (HXGE_ERROR);
509 	}
510 
511 found_index:
512 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
513 	    "==> hxge_rxbuf_pp_to_vp: (FOUND1)"
514 	    "buf_pp $%p btype %d bufsize %d anchor_index %d",
515 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index));
516 
517 	/* index of the first block in this chunk */
518 	chunk_index = bufinfo[anchor_index].start_index;
519 	dvma_addr = bufinfo[anchor_index].dvma_addr;
520 	page_size_mask = ring_info->block_size_mask;
521 
522 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
523 	    "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
524 	    "buf_pp $%p btype %d bufsize %d "
525 	    "anchor_index %d chunk_index %d dvma $%p",
526 	    pkt_buf_addr_pp, pktbufsz_type, bufsize,
527 	    anchor_index, chunk_index, dvma_addr));
528 
529 	offset = pktbuf_pp - dvma_addr;	/* offset within the chunk */
530 	block_size = rbr_p->block_size;	/* System  block(page) size */
531 
532 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
533 	    "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
534 	    "buf_pp $%p btype %d bufsize %d "
535 	    "anchor_index %d chunk_index %d dvma $%p "
536 	    "offset %d block_size %d",
537 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index,
538 	    chunk_index, dvma_addr, offset, block_size));
539 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index"));
540 
541 	block_index = (offset / block_size);	/* index within chunk */
542 	total_index = chunk_index + block_index;
543 
544 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
545 	    "==> hxge_rxbuf_pp_to_vp: "
546 	    "total_index %d dvma_addr $%p "
547 	    "offset %d block_size %d "
548 	    "block_index %d ",
549 	    total_index, dvma_addr, offset, block_size, block_index));
550 
551 #if defined(__i386)
552 	*pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
553 	    (uint32_t)offset);
554 #else
555 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
556 	    offset);
557 #endif
558 
559 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
560 	    "==> hxge_rxbuf_pp_to_vp: "
561 	    "total_index %d dvma_addr $%p "
562 	    "offset %d block_size %d "
563 	    "block_index %d "
564 	    "*pkt_buf_addr_p $%p",
565 	    total_index, dvma_addr, offset, block_size,
566 	    block_index, *pkt_buf_addr_p));
567 
568 	*msg_index = total_index;
569 	*bufoffset = (offset & page_size_mask);
570 
571 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
572 	    "==> hxge_rxbuf_pp_to_vp: get msg index: "
573 	    "msg_index %d bufoffset_index %d",
574 	    *msg_index, *bufoffset));
575 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp"));
576 
577 	return (HXGE_OK);
578 }
579 
580 
581 /*
582  * used by quick sort (qsort) function
583  * to perform comparison
584  */
585 static int
586 hxge_sort_compare(const void *p1, const void *p2)
587 {
588 
589 	rxbuf_index_info_t *a, *b;
590 
591 	a = (rxbuf_index_info_t *)p1;
592 	b = (rxbuf_index_info_t *)p2;
593 
594 	if (a->dvma_addr > b->dvma_addr)
595 		return (1);
596 	if (a->dvma_addr < b->dvma_addr)
597 		return (-1);
598 	return (0);
599 }
600 
601 /*
602  * Grabbed this sort implementation from common/syscall/avl.c
603  *
604  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
605  * v = Ptr to array/vector of objs
606  * n = # objs in the array
607  * s = size of each obj (must be multiples of a word size)
608  * f = ptr to function to compare two objs
609  *	returns (-1 = less than, 0 = equal, 1 = greater than
610  */
611 void
612 hxge_ksort(caddr_t v, int n, int s, int (*f) ())
613 {
614 	int		g, i, j, ii;
615 	unsigned int	*p1, *p2;
616 	unsigned int	tmp;
617 
618 	/* No work to do */
619 	if (v == NULL || n <= 1)
620 		return;
621 	/* Sanity check on arguments */
622 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
623 	ASSERT(s > 0);
624 
625 	for (g = n / 2; g > 0; g /= 2) {
626 		for (i = g; i < n; i++) {
627 			for (j = i - g; j >= 0 &&
628 			    (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) {
629 				p1 = (unsigned *)(v + j * s);
630 				p2 = (unsigned *)(v + (j + g) * s);
631 				for (ii = 0; ii < s / 4; ii++) {
632 					tmp = *p1;
633 					*p1++ = *p2;
634 					*p2++ = tmp;
635 				}
636 			}
637 		}
638 	}
639 }
640 
641 /*
642  * Initialize data structures required for rxdma
643  * buffer dvma->vmem address lookup
644  */
645 /*ARGSUSED*/
646 static hxge_status_t
647 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp)
648 {
649 	int		index;
650 	rxring_info_t	*ring_info;
651 	int		max_iteration = 0, max_index = 0;
652 
653 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init"));
654 
655 	ring_info = rbrp->ring_info;
656 	ring_info->hint[0] = NO_HINT;
657 	ring_info->hint[1] = NO_HINT;
658 	ring_info->hint[2] = NO_HINT;
659 	ring_info->hint[3] = NO_HINT;
660 	max_index = rbrp->num_blocks;
661 
662 	/* read the DVMA address information and sort it */
663 	/* do init of the information array */
664 
665 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
666 	    " hxge_rxbuf_index_info_init Sort ptrs"));
667 
668 	/* sort the array */
669 	hxge_ksort((void *) ring_info->buffer, max_index,
670 	    sizeof (rxbuf_index_info_t), hxge_sort_compare);
671 
672 	for (index = 0; index < max_index; index++) {
673 		HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
674 		    " hxge_rxbuf_index_info_init: sorted chunk %d "
675 		    " ioaddr $%p kaddr $%p size %x",
676 		    index, ring_info->buffer[index].dvma_addr,
677 		    ring_info->buffer[index].kaddr,
678 		    ring_info->buffer[index].buf_size));
679 	}
680 
681 	max_iteration = 0;
682 	while (max_index >= (1ULL << max_iteration))
683 		max_iteration++;
684 	ring_info->max_iterations = max_iteration + 1;
685 
686 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
687 	    " hxge_rxbuf_index_info_init Find max iter %d",
688 	    ring_info->max_iterations));
689 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init"));
690 
691 	return (HXGE_OK);
692 }
693 
694 /*ARGSUSED*/
695 void
696 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p)
697 {
698 #ifdef	HXGE_DEBUG
699 
700 	uint32_t bptr;
701 	uint64_t pp;
702 
703 	bptr = entry_p->bits.pkt_buf_addr;
704 
705 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
706 	    "\trcr entry $%p "
707 	    "\trcr entry 0x%0llx "
708 	    "\trcr entry 0x%08x "
709 	    "\trcr entry 0x%08x "
710 	    "\tvalue 0x%0llx\n"
711 	    "\tmulti = %d\n"
712 	    "\tpkt_type = 0x%x\n"
713 	    "\terror = 0x%04x\n"
714 	    "\tl2_len = %d\n"
715 	    "\tpktbufsize = %d\n"
716 	    "\tpkt_buf_addr = $%p\n"
717 	    "\tpkt_buf_addr (<< 6) = $%p\n",
718 	    entry_p,
719 	    *(int64_t *)entry_p,
720 	    *(int32_t *)entry_p,
721 	    *(int32_t *)((char *)entry_p + 32),
722 	    entry_p->value,
723 	    entry_p->bits.multi,
724 	    entry_p->bits.pkt_type,
725 	    entry_p->bits.error,
726 	    entry_p->bits.l2_len,
727 	    entry_p->bits.pktbufsz,
728 	    bptr,
729 	    entry_p->bits.pkt_buf_addr_l));
730 
731 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
732 	    RCR_PKT_BUF_ADDR_SHIFT;
733 
734 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
735 	    pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
736 #endif
737 }
738 
739 /*ARGSUSED*/
740 void
741 hxge_rxdma_stop(p_hxge_t hxgep)
742 {
743 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop"));
744 
745 	MUTEX_ENTER(&hxgep->vmac_lock);
746 	(void) hxge_rx_vmac_disable(hxgep);
747 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
748 	MUTEX_EXIT(&hxgep->vmac_lock);
749 
750 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop"));
751 }
752 
753 void
754 hxge_rxdma_stop_reinit(p_hxge_t hxgep)
755 {
756 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit"));
757 
758 	(void) hxge_rxdma_stop(hxgep);
759 	(void) hxge_uninit_rxdma_channels(hxgep);
760 	(void) hxge_init_rxdma_channels(hxgep);
761 
762 	MUTEX_ENTER(&hxgep->vmac_lock);
763 	(void) hxge_rx_vmac_enable(hxgep);
764 	MUTEX_EXIT(&hxgep->vmac_lock);
765 
766 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit"));
767 }
768 
769 hxge_status_t
770 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
771 {
772 	int			i, ndmas;
773 	uint16_t		channel;
774 	p_rx_rbr_rings_t	rx_rbr_rings;
775 	p_rx_rbr_ring_t		*rbr_rings;
776 	hpi_handle_t		handle;
777 	hpi_status_t		rs = HPI_SUCCESS;
778 	hxge_status_t		status = HXGE_OK;
779 
780 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
781 	    "==> hxge_rxdma_hw_mode: mode %d", enable));
782 
783 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
784 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
785 		    "<== hxge_rxdma_mode: not initialized"));
786 		return (HXGE_ERROR);
787 	}
788 
789 	rx_rbr_rings = hxgep->rx_rbr_rings;
790 	if (rx_rbr_rings == NULL) {
791 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
792 		    "<== hxge_rxdma_mode: NULL ring pointer"));
793 		return (HXGE_ERROR);
794 	}
795 
796 	if (rx_rbr_rings->rbr_rings == NULL) {
797 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
798 		    "<== hxge_rxdma_mode: NULL rbr rings pointer"));
799 		return (HXGE_ERROR);
800 	}
801 
802 	ndmas = rx_rbr_rings->ndmas;
803 	if (!ndmas) {
804 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
805 		    "<== hxge_rxdma_mode: no channel"));
806 		return (HXGE_ERROR);
807 	}
808 
809 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
810 	    "==> hxge_rxdma_mode (ndmas %d)", ndmas));
811 
812 	rbr_rings = rx_rbr_rings->rbr_rings;
813 
814 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
815 
816 	for (i = 0; i < ndmas; i++) {
817 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
818 			continue;
819 		}
820 		channel = rbr_rings[i]->rdc;
821 		if (enable) {
822 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
823 			    "==> hxge_rxdma_hw_mode: channel %d (enable)",
824 			    channel));
825 			rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
826 		} else {
827 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
828 			    "==> hxge_rxdma_hw_mode: channel %d (disable)",
829 			    channel));
830 			rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
831 		}
832 	}
833 
834 	status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
835 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
836 	    "<== hxge_rxdma_hw_mode: status 0x%x", status));
837 
838 	return (status);
839 }
840 
841 /*
842  * Static functions start here.
843  */
844 static p_rx_msg_t
845 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p)
846 {
847 	p_rx_msg_t		hxge_mp = NULL;
848 	p_hxge_dma_common_t	dmamsg_p;
849 	uchar_t			*buffer;
850 
851 	hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
852 	if (hxge_mp == NULL) {
853 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
854 		    "Allocation of a rx msg failed."));
855 		goto hxge_allocb_exit;
856 	}
857 
858 	hxge_mp->use_buf_pool = B_FALSE;
859 	if (dmabuf_p) {
860 		hxge_mp->use_buf_pool = B_TRUE;
861 
862 		dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma;
863 		*dmamsg_p = *dmabuf_p;
864 		dmamsg_p->nblocks = 1;
865 		dmamsg_p->block_size = size;
866 		dmamsg_p->alength = size;
867 		buffer = (uchar_t *)dmabuf_p->kaddrp;
868 
869 		dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size);
870 		dmabuf_p->ioaddr_pp = (void *)
871 		    ((char *)dmabuf_p->ioaddr_pp + size);
872 
873 		dmabuf_p->alength -= size;
874 		dmabuf_p->offset += size;
875 		dmabuf_p->dma_cookie.dmac_laddress += size;
876 		dmabuf_p->dma_cookie.dmac_size -= size;
877 	} else {
878 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
879 		if (buffer == NULL) {
880 			HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
881 			    "Allocation of a receive page failed."));
882 			goto hxge_allocb_fail1;
883 		}
884 	}
885 
886 	hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb);
887 	if (hxge_mp->rx_mblk_p == NULL) {
888 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed."));
889 		goto hxge_allocb_fail2;
890 	}
891 	hxge_mp->buffer = buffer;
892 	hxge_mp->block_size = size;
893 	hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb;
894 	hxge_mp->freeb.free_arg = (caddr_t)hxge_mp;
895 	hxge_mp->ref_cnt = 1;
896 	hxge_mp->free = B_TRUE;
897 	hxge_mp->rx_use_bcopy = B_FALSE;
898 
899 	atomic_inc_32(&hxge_mblks_pending);
900 
901 	goto hxge_allocb_exit;
902 
903 hxge_allocb_fail2:
904 	if (!hxge_mp->use_buf_pool) {
905 		KMEM_FREE(buffer, size);
906 	}
907 hxge_allocb_fail1:
908 	KMEM_FREE(hxge_mp, sizeof (rx_msg_t));
909 	hxge_mp = NULL;
910 
911 hxge_allocb_exit:
912 	return (hxge_mp);
913 }
914 
915 p_mblk_t
916 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
917 {
918 	p_mblk_t mp;
919 
920 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb"));
921 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p "
922 	    "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size));
923 
924 	mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb);
925 	if (mp == NULL) {
926 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
927 		goto hxge_dupb_exit;
928 	}
929 
930 	atomic_inc_32(&hxge_mp->ref_cnt);
931 
932 hxge_dupb_exit:
933 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
934 	return (mp);
935 }
936 
937 p_mblk_t
938 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
939 {
940 	p_mblk_t	mp;
941 	uchar_t		*dp;
942 
943 	mp = allocb(size + HXGE_RXBUF_EXTRA, 0);
944 	if (mp == NULL) {
945 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
946 		goto hxge_dupb_bcopy_exit;
947 	}
948 	dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA;
949 	bcopy((void *) &hxge_mp->buffer[offset], dp, size);
950 	mp->b_wptr = dp + size;
951 
952 hxge_dupb_bcopy_exit:
953 
954 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
955 
956 	return (mp);
957 }
958 
959 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p,
960     p_rx_msg_t rx_msg_p);
961 
962 void
963 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
964 {
965 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page"));
966 
967 	/* Reuse this buffer */
968 	rx_msg_p->free = B_FALSE;
969 	rx_msg_p->cur_usage_cnt = 0;
970 	rx_msg_p->max_usage_cnt = 0;
971 	rx_msg_p->pkt_buf_size = 0;
972 
973 	if (rx_rbr_p->rbr_use_bcopy) {
974 		rx_msg_p->rx_use_bcopy = B_FALSE;
975 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
976 	}
977 	atomic_dec_32(&rx_rbr_p->rbr_used);
978 
979 	/*
980 	 * Get the rbr header pointer and its offset index.
981 	 */
982 	rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
983 	    rx_rbr_p->rbr_wrap_mask);
984 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
985 
986 	/*
987 	 * Accumulate some buffers in the ring before re-enabling the
988 	 * DMA channel, if rbr empty was signaled.
989 	 */
990 	hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1);
991 	if (rx_rbr_p->rbr_is_empty && (rx_rbr_p->rbb_max -
992 	    rx_rbr_p->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) {
993 		hxge_rbr_empty_restore(hxgep, rx_rbr_p);
994 	}
995 
996 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
997 	    "<== hxge_post_page (channel %d post_next_index %d)",
998 	    rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
999 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page"));
1000 }
1001 
1002 void
1003 hxge_freeb(p_rx_msg_t rx_msg_p)
1004 {
1005 	size_t		size;
1006 	uchar_t		*buffer = NULL;
1007 	int		ref_cnt;
1008 	boolean_t	free_state = B_FALSE;
1009 	rx_rbr_ring_t	*ring = rx_msg_p->rx_rbr_p;
1010 
1011 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb"));
1012 	HXGE_DEBUG_MSG((NULL, MEM2_CTL,
1013 	    "hxge_freeb:rx_msg_p = $%p (block pending %d)",
1014 	    rx_msg_p, hxge_mblks_pending));
1015 
1016 	if (ring == NULL)
1017 		return;
1018 
1019 	/*
1020 	 * This is to prevent posting activities while we are recovering
1021 	 * from fatal errors. This should not be a performance drag since
1022 	 * ref_cnt != 0 most times.
1023 	 */
1024 	if (ring->rbr_state == RBR_POSTING)
1025 		MUTEX_ENTER(&ring->post_lock);
1026 
1027 	/*
1028 	 * First we need to get the free state, then
1029 	 * atomic decrement the reference count to prevent
1030 	 * the race condition with the interrupt thread that
1031 	 * is processing a loaned up buffer block.
1032 	 */
1033 	free_state = rx_msg_p->free;
1034 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
1035 	if (!ref_cnt) {
1036 		atomic_dec_32(&hxge_mblks_pending);
1037 
1038 		buffer = rx_msg_p->buffer;
1039 		size = rx_msg_p->block_size;
1040 
1041 		HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: "
1042 		    "will free: rx_msg_p = $%p (block pending %d)",
1043 		    rx_msg_p, hxge_mblks_pending));
1044 
1045 		if (!rx_msg_p->use_buf_pool) {
1046 			KMEM_FREE(buffer, size);
1047 		}
1048 
1049 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
1050 		/*
1051 		 * Decrement the receive buffer ring's reference
1052 		 * count, too.
1053 		 */
1054 		atomic_dec_32(&ring->rbr_ref_cnt);
1055 
1056 		/*
1057 		 * Free the receive buffer ring, iff
1058 		 * 1. all the receive buffers have been freed
1059 		 * 2. and we are in the proper state (that is,
1060 		 *    we are not UNMAPPING).
1061 		 */
1062 		if (ring->rbr_ref_cnt == 0 &&
1063 		    ring->rbr_state == RBR_UNMAPPED) {
1064 			KMEM_FREE(ring, sizeof (*ring));
1065 			/* post_lock has been destroyed already */
1066 			return;
1067 		}
1068 	}
1069 
1070 	/*
1071 	 * Repost buffer.
1072 	 */
1073 	if (free_state && (ref_cnt == 1)) {
1074 		HXGE_DEBUG_MSG((NULL, RX_CTL,
1075 		    "hxge_freeb: post page $%p:", rx_msg_p));
1076 		if (ring->rbr_state == RBR_POSTING)
1077 			hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p);
1078 	}
1079 
1080 	if (ring->rbr_state == RBR_POSTING)
1081 		MUTEX_EXIT(&ring->post_lock);
1082 
1083 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb"));
1084 }
1085 
1086 uint_t
1087 hxge_rx_intr(caddr_t arg1, caddr_t arg2)
1088 {
1089 	p_hxge_ring_handle_t	rhp;
1090 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
1091 	p_hxge_t		hxgep = (p_hxge_t)arg2;
1092 	p_hxge_ldg_t		ldgp;
1093 	uint8_t			channel;
1094 	hpi_handle_t		handle;
1095 	rdc_stat_t		cs;
1096 	p_rx_rcr_ring_t		ring;
1097 	p_rx_rbr_ring_t		rbrp;
1098 	mblk_t			*mp = NULL;
1099 
1100 	if (ldvp == NULL) {
1101 		HXGE_DEBUG_MSG((NULL, RX_INT_CTL,
1102 		    "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1103 		return (DDI_INTR_UNCLAIMED);
1104 	}
1105 
1106 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
1107 		hxgep = ldvp->hxgep;
1108 	}
1109 
1110 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1111 	    "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
1112 
1113 	/*
1114 	 * This interrupt handler is for a specific receive dma channel.
1115 	 */
1116 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1117 
1118 	/*
1119 	 * Get the control and status for this channel.
1120 	 */
1121 	channel = ldvp->vdma_index;
1122 	ring = hxgep->rx_rcr_rings->rcr_rings[channel];
1123 	rhp = &hxgep->rx_ring_handles[channel];
1124 	ldgp = ldvp->ldgp;
1125 
1126 	ASSERT(ring != NULL);
1127 #if defined(DEBUG)
1128 	if (rhp->started) {
1129 		ASSERT(ring->ldgp == ldgp);
1130 		ASSERT(ring->ldvp == ldvp);
1131 	}
1132 #endif
1133 
1134 	MUTEX_ENTER(&ring->lock);
1135 
1136 	if (!ring->poll_flag) {
1137 		RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
1138 		cs.bits.ptrread = 0;
1139 		cs.bits.pktread = 0;
1140 		RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1141 
1142 		/*
1143 		 * Process packets, if we are not in polling mode, the ring is
1144 		 * started and the interface is started. The MAC layer under
1145 		 * load will be operating in polling mode for RX traffic.
1146 		 */
1147 		if ((rhp->started) &&
1148 		    (hxgep->hxge_mac_state == HXGE_MAC_STARTED)) {
1149 			mp = hxge_rx_pkts(hxgep, ldvp->vdma_index,
1150 			    ldvp, ring, cs, -1);
1151 		}
1152 
1153 		/* Process error events. */
1154 		if (cs.value & RDC_STAT_ERROR) {
1155 			MUTEX_EXIT(&ring->lock);
1156 			(void) hxge_rx_err_evnts(hxgep, channel, ldvp, cs);
1157 			MUTEX_ENTER(&ring->lock);
1158 		}
1159 
1160 		/*
1161 		 * Enable the mailbox update interrupt if we want to use
1162 		 * mailbox. We probably don't need to use mailbox as it only
1163 		 * saves us one pio read.  Also write 1 to rcrthres and
1164 		 * rcrto to clear these two edge triggered bits.
1165 		 */
1166 		rbrp = hxgep->rx_rbr_rings->rbr_rings[channel];
1167 		MUTEX_ENTER(&rbrp->post_lock);
1168 		if (!rbrp->rbr_is_empty) {
1169 			cs.value = 0;
1170 			cs.bits.mex = 1;
1171 			cs.bits.ptrread = 0;
1172 			cs.bits.pktread = 0;
1173 			RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
1174 		}
1175 		MUTEX_EXIT(&rbrp->post_lock);
1176 
1177 		if (ldgp->nldvs == 1) {
1178 			/*
1179 			 * Re-arm the group.
1180 			 */
1181 			(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
1182 			    ldgp->ldg_timer);
1183 		}
1184 	} else if ((ldgp->nldvs == 1) && (ring->poll_flag)) {
1185 		/*
1186 		 * Disarm the group, if we are not a shared interrupt.
1187 		 */
1188 		(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_FALSE, 0);
1189 	} else if (ring->poll_flag) {
1190 		/*
1191 		 * Mask-off this device from the group.
1192 		 */
1193 		(void) hpi_intr_mask_set(handle, ldvp->ldv, 1);
1194 	}
1195 
1196 	MUTEX_EXIT(&ring->lock);
1197 
1198 	/*
1199 	 * Send the packets up the stack.
1200 	 */
1201 	if (mp != NULL) {
1202 		mac_rx_ring(hxgep->mach, ring->rcr_mac_handle, mp,
1203 		    ring->rcr_gen_num);
1204 	}
1205 
1206 	HXGE_DEBUG_MSG((NULL, RX_INT_CTL, "<== hxge_rx_intr"));
1207 	return (DDI_INTR_CLAIMED);
1208 }
1209 
1210 /*
1211  * Enable polling for a ring. Interrupt for the ring is disabled when
1212  * the hxge interrupt comes (see hxge_rx_intr).
1213  */
1214 int
1215 hxge_enable_poll(void *arg)
1216 {
1217 	p_hxge_ring_handle_t	ring_handle = (p_hxge_ring_handle_t)arg;
1218 	p_rx_rcr_ring_t		ringp;
1219 	p_hxge_t		hxgep;
1220 	p_hxge_ldg_t		ldgp;
1221 
1222 	if (ring_handle == NULL) {
1223 		ASSERT(ring_handle != NULL);
1224 		return (1);
1225 	}
1226 
1227 
1228 	hxgep = ring_handle->hxgep;
1229 	ringp = hxgep->rx_rcr_rings->rcr_rings[ring_handle->index];
1230 
1231 	MUTEX_ENTER(&ringp->lock);
1232 
1233 	/*
1234 	 * Are we already polling ?
1235 	 */
1236 	if (ringp->poll_flag) {
1237 		MUTEX_EXIT(&ringp->lock);
1238 		return (1);
1239 	}
1240 
1241 	ldgp = ringp->ldgp;
1242 	if (ldgp == NULL) {
1243 		MUTEX_EXIT(&ringp->lock);
1244 		return (1);
1245 	}
1246 
1247 	/*
1248 	 * Enable polling
1249 	 */
1250 	ringp->poll_flag = B_TRUE;
1251 
1252 	MUTEX_EXIT(&ringp->lock);
1253 	return (0);
1254 }
1255 
1256 /*
1257  * Disable polling for a ring and enable its interrupt.
1258  */
1259 int
1260 hxge_disable_poll(void *arg)
1261 {
1262 	p_hxge_ring_handle_t	ring_handle = (p_hxge_ring_handle_t)arg;
1263 	p_rx_rcr_ring_t		ringp;
1264 	p_hxge_t		hxgep;
1265 
1266 	if (ring_handle == NULL) {
1267 		ASSERT(ring_handle != NULL);
1268 		return (0);
1269 	}
1270 
1271 	hxgep = ring_handle->hxgep;
1272 	ringp = hxgep->rx_rcr_rings->rcr_rings[ring_handle->index];
1273 
1274 	MUTEX_ENTER(&ringp->lock);
1275 
1276 	/*
1277 	 * Disable polling: enable interrupt
1278 	 */
1279 	if (ringp->poll_flag) {
1280 		hpi_handle_t		handle;
1281 		rdc_stat_t		cs;
1282 		p_hxge_ldg_t		ldgp;
1283 
1284 		/*
1285 		 * Get the control and status for this channel.
1286 		 */
1287 		handle = HXGE_DEV_HPI_HANDLE(hxgep);
1288 
1289 		/*
1290 		 * Rearm this logical group if this is a single device
1291 		 * group.
1292 		 */
1293 		ldgp = ringp->ldgp;
1294 		if (ldgp == NULL) {
1295 			MUTEX_EXIT(&ringp->lock);
1296 			return (1);
1297 		}
1298 
1299 		ringp->poll_flag = B_FALSE;
1300 
1301 		/*
1302 		 * Enable mailbox update, to start interrupts again.
1303 		 */
1304 		cs.value = 0ULL;
1305 		cs.bits.mex = 1;
1306 		cs.bits.pktread = 0;
1307 		cs.bits.ptrread = 0;
1308 		RXDMA_REG_WRITE64(handle, RDC_STAT, ringp->rdc, cs.value);
1309 
1310 		if (ldgp->nldvs == 1) {
1311 			/*
1312 			 * Re-arm the group, since it is the only member
1313 			 * of the group.
1314 			 */
1315 			(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
1316 			    ldgp->ldg_timer);
1317 		} else {
1318 			/*
1319 			 * Mask-on interrupts for the device and re-arm
1320 			 * the group.
1321 			 */
1322 			(void) hpi_intr_mask_set(handle, ringp->ldvp->ldv, 0);
1323 			(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
1324 			    ldgp->ldg_timer);
1325 		}
1326 	}
1327 	MUTEX_EXIT(&ringp->lock);
1328 	return (0);
1329 }
1330 
1331 /*
1332  * Poll 'bytes_to_pickup' bytes of message from the rx ring.
1333  */
1334 mblk_t *
1335 hxge_rx_poll(void *arg, int bytes_to_pickup)
1336 {
1337 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)arg;
1338 	p_rx_rcr_ring_t		ring;
1339 	p_hxge_t		hxgep;
1340 	hpi_handle_t		handle;
1341 	rdc_stat_t		cs;
1342 	mblk_t			*mblk;
1343 	p_hxge_ldv_t		ldvp;
1344 
1345 	hxgep = rhp->hxgep;
1346 
1347 	/*
1348 	 * Get the control and status for this channel.
1349 	 */
1350 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1351 	ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
1352 
1353 	MUTEX_ENTER(&ring->lock);
1354 	ASSERT(ring->poll_flag == B_TRUE);
1355 	ASSERT(rhp->started);
1356 
1357 	if (!ring->poll_flag) {
1358 		MUTEX_EXIT(&ring->lock);
1359 		return ((mblk_t *)NULL);
1360 	}
1361 
1362 	/*
1363 	 * Get the control and status bits for the ring.
1364 	 */
1365 	RXDMA_REG_READ64(handle, RDC_STAT, rhp->index, &cs.value);
1366 	cs.bits.ptrread = 0;
1367 	cs.bits.pktread = 0;
1368 	RXDMA_REG_WRITE64(handle, RDC_STAT, rhp->index, cs.value);
1369 
1370 	/*
1371 	 * Process packets.
1372 	 */
1373 	mblk = hxge_rx_pkts(hxgep, ring->ldvp->vdma_index,
1374 	    ring->ldvp, ring, cs, bytes_to_pickup);
1375 	ldvp = ring->ldvp;
1376 
1377 	/*
1378 	 * Process Error Events.
1379 	 */
1380 	if (ldvp && (cs.value & RDC_STAT_ERROR)) {
1381 		/*
1382 		 * Recovery routines will grab the RCR ring lock.
1383 		 */
1384 		MUTEX_EXIT(&ring->lock);
1385 		(void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
1386 		MUTEX_ENTER(&ring->lock);
1387 	}
1388 
1389 	MUTEX_EXIT(&ring->lock);
1390 	return (mblk);
1391 }
1392 
1393 /*ARGSUSED*/
1394 mblk_t *
1395 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
1396     p_rx_rcr_ring_t rcrp, rdc_stat_t cs, int bytes_to_read)
1397 {
1398 	hpi_handle_t		handle;
1399 	uint8_t			channel;
1400 	uint32_t		comp_rd_index;
1401 	p_rcr_entry_t		rcr_desc_rd_head_p;
1402 	p_rcr_entry_t		rcr_desc_rd_head_pp;
1403 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
1404 	uint16_t		qlen, nrcr_read, npkt_read;
1405 	uint32_t		qlen_hw, npkts, num_rcrs;
1406 	uint32_t		invalid_rcr_entry;
1407 	boolean_t		multi;
1408 	rdc_stat_t		pktcs;
1409 	rdc_rcr_cfg_b_t		rcr_cfg_b;
1410 	uint64_t		rcr_head_index, rcr_tail_index;
1411 	uint64_t		rcr_tail;
1412 	rdc_rcr_tail_t		rcr_tail_reg;
1413 	p_hxge_rx_ring_stats_t	rdc_stats;
1414 	int			totallen = 0;
1415 
1416 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d "
1417 	    "channel %d", vindex, ldvp->channel));
1418 
1419 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1420 	channel = rcrp->rdc;
1421 	if (channel != ldvp->channel) {
1422 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
1423 		    "channel %d, and rcr channel %d not matched.",
1424 		    vindex, ldvp->channel, channel));
1425 		return (NULL);
1426 	}
1427 
1428 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1429 	    "==> hxge_rx_pkts: START: rcr channel %d "
1430 	    "head_p $%p head_pp $%p  index %d ",
1431 	    channel, rcrp->rcr_desc_rd_head_p,
1432 	    rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index));
1433 
1434 	(void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
1435 	RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value);
1436 	rcr_tail = rcr_tail_reg.bits.tail;
1437 
1438 	if (!qlen) {
1439 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1440 		    "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)",
1441 		    channel, qlen));
1442 		return (NULL);
1443 	}
1444 
1445 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d "
1446 	    "qlen %d", channel, qlen));
1447 
1448 	comp_rd_index = rcrp->comp_rd_index;
1449 
1450 	rcr_desc_rd_head_p = rcrp->rcr_desc_rd_head_p;
1451 	rcr_desc_rd_head_pp = rcrp->rcr_desc_rd_head_pp;
1452 	nrcr_read = npkt_read = 0;
1453 
1454 	if (hxgep->rdc_first_intr[channel])
1455 		qlen_hw = qlen;
1456 	else
1457 		qlen_hw = qlen - 1;
1458 
1459 	head_mp = NULL;
1460 	tail_mp = &head_mp;
1461 	nmp = mp_cont = NULL;
1462 	multi = B_FALSE;
1463 
1464 	rcr_head_index = rcrp->rcr_desc_rd_head_p - rcrp->rcr_desc_first_p;
1465 	rcr_tail_index = rcr_tail - rcrp->rcr_tail_begin;
1466 
1467 	if (rcr_tail_index >= rcr_head_index) {
1468 		num_rcrs = rcr_tail_index - rcr_head_index;
1469 	} else {
1470 		/* rcr_tail has wrapped around */
1471 		num_rcrs = (rcrp->comp_size - rcr_head_index) + rcr_tail_index;
1472 	}
1473 
1474 	npkts = hxge_scan_for_last_eop(rcrp, rcr_desc_rd_head_p, num_rcrs);
1475 	if (!npkts)
1476 		return (NULL);
1477 
1478 	if (qlen_hw > npkts) {
1479 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1480 		    "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n",
1481 		    channel, qlen_hw, qlen_sw));
1482 		qlen_hw = npkts;
1483 	}
1484 
1485 	while (qlen_hw) {
1486 #ifdef HXGE_DEBUG
1487 		hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p);
1488 #endif
1489 		/*
1490 		 * Process one completion ring entry.
1491 		 */
1492 		invalid_rcr_entry = 0;
1493 		hxge_receive_packet(hxgep,
1494 		    rcrp, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont,
1495 		    &invalid_rcr_entry);
1496 		if (invalid_rcr_entry != 0) {
1497 			rdc_stats = rcrp->rdc_stats;
1498 			rdc_stats->rcr_invalids++;
1499 			HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1500 			    "Channel %d could only read 0x%x packets, "
1501 			    "but 0x%x pending\n", channel, npkt_read, qlen_hw));
1502 			break;
1503 		}
1504 
1505 		/*
1506 		 * message chaining modes (nemo msg chaining)
1507 		 */
1508 		if (nmp) {
1509 			nmp->b_next = NULL;
1510 			if (!multi && !mp_cont) { /* frame fits a partition */
1511 				*tail_mp = nmp;
1512 				tail_mp = &nmp->b_next;
1513 				nmp = NULL;
1514 			} else if (multi && !mp_cont) { /* first segment */
1515 				*tail_mp = nmp;
1516 				tail_mp = &nmp->b_cont;
1517 			} else if (multi && mp_cont) {	/* mid of multi segs */
1518 				*tail_mp = mp_cont;
1519 				tail_mp = &mp_cont->b_cont;
1520 			} else if (!multi && mp_cont) { /* last segment */
1521 				*tail_mp = mp_cont;
1522 				tail_mp = &nmp->b_next;
1523 				totallen += MBLKL(mp_cont);
1524 				nmp = NULL;
1525 			}
1526 		}
1527 
1528 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1529 		    "==> hxge_rx_pkts: loop: rcr channel %d "
1530 		    "before updating: multi %d "
1531 		    "nrcr_read %d "
1532 		    "npk read %d "
1533 		    "head_pp $%p  index %d ",
1534 		    channel, multi,
1535 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index));
1536 
1537 		if (!multi) {
1538 			qlen_hw--;
1539 			npkt_read++;
1540 		}
1541 
1542 		/*
1543 		 * Update the next read entry.
1544 		 */
1545 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
1546 		    rcrp->comp_wrap_mask);
1547 
1548 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
1549 		    rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p);
1550 
1551 		nrcr_read++;
1552 
1553 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1554 		    "<== hxge_rx_pkts: (SAM, process one packet) "
1555 		    "nrcr_read %d", nrcr_read));
1556 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1557 		    "==> hxge_rx_pkts: loop: rcr channel %d "
1558 		    "multi %d nrcr_read %d npk read %d head_pp $%p  index %d ",
1559 		    channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp,
1560 		    comp_rd_index));
1561 
1562 		if ((bytes_to_read != -1) &&
1563 		    (totallen >= bytes_to_read)) {
1564 			break;
1565 		}
1566 	}
1567 
1568 	rcrp->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
1569 	rcrp->comp_rd_index = comp_rd_index;
1570 	rcrp->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
1571 
1572 	if ((hxgep->intr_timeout != rcrp->intr_timeout) ||
1573 	    (hxgep->intr_threshold != rcrp->intr_threshold)) {
1574 		rcrp->intr_timeout = hxgep->intr_timeout;
1575 		rcrp->intr_threshold = hxgep->intr_threshold;
1576 		rcr_cfg_b.value = 0x0ULL;
1577 		if (rcrp->intr_timeout)
1578 			rcr_cfg_b.bits.entout = 1;
1579 		rcr_cfg_b.bits.timeout = rcrp->intr_timeout;
1580 		rcr_cfg_b.bits.pthres = rcrp->intr_threshold;
1581 		RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B,
1582 		    channel, rcr_cfg_b.value);
1583 	}
1584 
1585 	pktcs.value = 0;
1586 	if (hxgep->rdc_first_intr[channel] && (npkt_read > 0)) {
1587 		hxgep->rdc_first_intr[channel] = B_FALSE;
1588 		pktcs.bits.pktread = npkt_read - 1;
1589 	} else
1590 		pktcs.bits.pktread = npkt_read;
1591 	pktcs.bits.ptrread = nrcr_read;
1592 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, pktcs.value);
1593 
1594 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
1595 	    "==> hxge_rx_pkts: EXIT: rcr channel %d "
1596 	    "head_pp $%p  index %016llx ",
1597 	    channel, rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index));
1598 
1599 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts"));
1600 	return (head_mp);
1601 }
1602 
1603 #define	RCR_ENTRY_PATTERN	0x5a5a6b6b7c7c8d8dULL
1604 #define	NO_PORT_BIT		0x20
1605 #define	L4_CS_EQ_BIT		0x40
1606 
1607 static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcrp,
1608     p_rcr_entry_t rcr_desc_rd_head_p, uint32_t num_rcrs)
1609 {
1610 	uint64_t	rcr_entry;
1611 	uint32_t	rcrs = 0;
1612 	uint32_t	pkts = 0;
1613 
1614 	while (rcrs < num_rcrs) {
1615 		rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
1616 
1617 		if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN))
1618 			break;
1619 
1620 		if (!(rcr_entry & RCR_MULTI_MASK))
1621 			pkts++;
1622 
1623 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
1624 		    rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p);
1625 
1626 		rcrs++;
1627 	}
1628 
1629 	return (pkts);
1630 }
1631 
1632 /*ARGSUSED*/
1633 void
1634 hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
1635     p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, mblk_t **mp,
1636     mblk_t **mp_cont, uint32_t *invalid_rcr_entry)
1637 {
1638 	p_mblk_t nmp = NULL;
1639 	uint64_t multi;
1640 	uint8_t channel;
1641 	boolean_t first_entry = B_TRUE;
1642 	boolean_t is_tcp_udp = B_FALSE;
1643 	boolean_t buffer_free = B_FALSE;
1644 	boolean_t error_send_up = B_FALSE;
1645 	uint8_t error_type;
1646 	uint16_t l2_len;
1647 	uint16_t skip_len;
1648 	uint8_t pktbufsz_type;
1649 	uint64_t rcr_entry;
1650 	uint64_t *pkt_buf_addr_pp;
1651 	uint64_t *pkt_buf_addr_p;
1652 	uint32_t buf_offset;
1653 	uint32_t bsize;
1654 	uint32_t msg_index;
1655 	p_rx_rbr_ring_t rx_rbr_p;
1656 	p_rx_msg_t *rx_msg_ring_p;
1657 	p_rx_msg_t rx_msg_p;
1658 	uint16_t sw_offset_bytes = 0, hdr_size = 0;
1659 	hxge_status_t status = HXGE_OK;
1660 	boolean_t is_valid = B_FALSE;
1661 	p_hxge_rx_ring_stats_t rdc_stats;
1662 	uint32_t bytes_read;
1663 	uint8_t header0 = 0;
1664 	uint8_t header1 = 0;
1665 	uint64_t pkt_type;
1666 	uint8_t no_port_bit = 0;
1667 	uint8_t l4_cs_eq_bit = 0;
1668 
1669 	channel = rcr_p->rdc;
1670 
1671 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet"));
1672 
1673 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
1674 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
1675 
1676 	/* Verify the content of the rcr_entry for a hardware bug workaround */
1677 	if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) {
1678 		*invalid_rcr_entry = 1;
1679 		HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet "
1680 		    "Channel %d invalid RCR entry 0x%llx found, returning\n",
1681 		    channel, (long long) rcr_entry));
1682 		return;
1683 	}
1684 	*((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN;
1685 
1686 	multi = (rcr_entry & RCR_MULTI_MASK);
1687 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
1688 
1689 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
1690 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
1691 
1692 	/*
1693 	 * Hardware does not strip the CRC due bug ID 11451 where
1694 	 * the hardware mis handles minimum size packets.
1695 	 */
1696 	l2_len -= ETHERFCSL;
1697 
1698 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
1699 	    RCR_PKTBUFSZ_SHIFT);
1700 #if defined(__i386)
1701 	pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
1702 	    RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
1703 #else
1704 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
1705 	    RCR_PKT_BUF_ADDR_SHIFT);
1706 #endif
1707 
1708 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1709 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1710 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
1711 	    "error_type 0x%x pktbufsz_type %d ",
1712 	    rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len,
1713 	    multi, error_type, pktbufsz_type));
1714 
1715 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1716 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
1717 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
1718 	    "error_type 0x%x ", rcr_desc_rd_head_p,
1719 	    rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type));
1720 
1721 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1722 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1723 	    "full pkt_buf_addr_pp $%p l2_len %d",
1724 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1725 
1726 	/* get the stats ptr */
1727 	rdc_stats = rcr_p->rdc_stats;
1728 
1729 	if (!l2_len) {
1730 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1731 		    "<== hxge_receive_packet: failed: l2 length is 0."));
1732 		return;
1733 	}
1734 
1735 	/* shift 6 bits to get the full io address */
1736 #if defined(__i386)
1737 	pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
1738 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
1739 #else
1740 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
1741 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
1742 #endif
1743 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1744 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
1745 	    "full pkt_buf_addr_pp $%p l2_len %d",
1746 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1747 
1748 	rx_rbr_p = rcr_p->rx_rbr_p;
1749 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
1750 
1751 	if (first_entry) {
1752 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
1753 		    RXDMA_HDR_SIZE_DEFAULT);
1754 
1755 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1756 		    "==> hxge_receive_packet: first entry 0x%016llx "
1757 		    "pkt_buf_addr_pp $%p l2_len %d hdr %d",
1758 		    rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size));
1759 	}
1760 
1761 	MUTEX_ENTER(&rx_rbr_p->lock);
1762 
1763 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1764 	    "==> (rbr 1) hxge_receive_packet: entry 0x%0llx "
1765 	    "full pkt_buf_addr_pp $%p l2_len %d",
1766 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1767 
1768 	/*
1769 	 * Packet buffer address in the completion entry points to the starting
1770 	 * buffer address (offset 0). Use the starting buffer address to locate
1771 	 * the corresponding kernel address.
1772 	 */
1773 	status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p,
1774 	    pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
1775 	    &buf_offset, &msg_index);
1776 
1777 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1778 	    "==> (rbr 2) hxge_receive_packet: entry 0x%0llx "
1779 	    "full pkt_buf_addr_pp $%p l2_len %d",
1780 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1781 
1782 	if (status != HXGE_OK) {
1783 		MUTEX_EXIT(&rx_rbr_p->lock);
1784 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1785 		    "<== hxge_receive_packet: found vaddr failed %d", status));
1786 		return;
1787 	}
1788 
1789 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1790 	    "==> (rbr 3) hxge_receive_packet: entry 0x%0llx "
1791 	    "full pkt_buf_addr_pp $%p l2_len %d",
1792 	    rcr_entry, pkt_buf_addr_pp, l2_len));
1793 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1794 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1795 	    "full pkt_buf_addr_pp $%p l2_len %d",
1796 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1797 
1798 	if (msg_index >= rx_rbr_p->tnblocks) {
1799 		MUTEX_EXIT(&rx_rbr_p->lock);
1800 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1801 		    "==> hxge_receive_packet: FATAL msg_index (%d) "
1802 		    "should be smaller than tnblocks (%d)\n",
1803 		    msg_index, rx_rbr_p->tnblocks));
1804 		return;
1805 	}
1806 
1807 	rx_msg_p = rx_msg_ring_p[msg_index];
1808 
1809 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1810 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
1811 	    "full pkt_buf_addr_pp $%p l2_len %d",
1812 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
1813 
1814 	switch (pktbufsz_type) {
1815 	case RCR_PKTBUFSZ_0:
1816 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
1817 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1818 		    "==> hxge_receive_packet: 0 buf %d", bsize));
1819 		break;
1820 	case RCR_PKTBUFSZ_1:
1821 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
1822 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1823 		    "==> hxge_receive_packet: 1 buf %d", bsize));
1824 		break;
1825 	case RCR_PKTBUFSZ_2:
1826 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
1827 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1828 		    "==> hxge_receive_packet: 2 buf %d", bsize));
1829 		break;
1830 	case RCR_SINGLE_BLOCK:
1831 		bsize = rx_msg_p->block_size;
1832 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1833 		    "==> hxge_receive_packet: single %d", bsize));
1834 
1835 		break;
1836 	default:
1837 		MUTEX_EXIT(&rx_rbr_p->lock);
1838 		return;
1839 	}
1840 
1841 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
1842 	    (buf_offset + sw_offset_bytes), (hdr_size + l2_len),
1843 	    DDI_DMA_SYNC_FORCPU);
1844 
1845 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1846 	    "==> hxge_receive_packet: after first dump:usage count"));
1847 
1848 	if (rx_msg_p->cur_usage_cnt == 0) {
1849 		atomic_inc_32(&rx_rbr_p->rbr_used);
1850 		if (rx_rbr_p->rbr_use_bcopy) {
1851 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
1852 			if (rx_rbr_p->rbr_consumed <
1853 			    rx_rbr_p->rbr_threshold_hi) {
1854 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
1855 				    ((rx_rbr_p->rbr_consumed >=
1856 				    rx_rbr_p->rbr_threshold_lo) &&
1857 				    (rx_rbr_p->rbr_bufsize_type >=
1858 				    pktbufsz_type))) {
1859 					rx_msg_p->rx_use_bcopy = B_TRUE;
1860 				}
1861 			} else {
1862 				rx_msg_p->rx_use_bcopy = B_TRUE;
1863 			}
1864 		}
1865 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1866 		    "==> hxge_receive_packet: buf %d (new block) ", bsize));
1867 
1868 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
1869 		rx_msg_p->pkt_buf_size = bsize;
1870 		rx_msg_p->cur_usage_cnt = 1;
1871 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
1872 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1873 			    "==> hxge_receive_packet: buf %d (single block) ",
1874 			    bsize));
1875 			/*
1876 			 * Buffer can be reused once the free function is
1877 			 * called.
1878 			 */
1879 			rx_msg_p->max_usage_cnt = 1;
1880 			buffer_free = B_TRUE;
1881 		} else {
1882 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize;
1883 			if (rx_msg_p->max_usage_cnt == 1) {
1884 				buffer_free = B_TRUE;
1885 			}
1886 		}
1887 	} else {
1888 		rx_msg_p->cur_usage_cnt++;
1889 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
1890 			buffer_free = B_TRUE;
1891 		}
1892 	}
1893 
1894 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
1895 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
1896 	    msg_index, l2_len,
1897 	    rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
1898 
1899 	if (error_type) {
1900 		rdc_stats->ierrors++;
1901 		/* Update error stats */
1902 		rdc_stats->errlog.compl_err_type = error_type;
1903 		HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR);
1904 
1905 		if (error_type & RCR_CTRL_FIFO_DED) {
1906 			rdc_stats->ctrl_fifo_ecc_err++;
1907 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1908 			    " hxge_receive_packet: "
1909 			    " channel %d RCR ctrl_fifo_ded error", channel));
1910 		} else if (error_type & RCR_DATA_FIFO_DED) {
1911 			rdc_stats->data_fifo_ecc_err++;
1912 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1913 			    " hxge_receive_packet: channel %d"
1914 			    " RCR data_fifo_ded error", channel));
1915 		}
1916 
1917 		/*
1918 		 * Update and repost buffer block if max usage count is
1919 		 * reached.
1920 		 */
1921 		if (error_send_up == B_FALSE) {
1922 			atomic_inc_32(&rx_msg_p->ref_cnt);
1923 			if (buffer_free == B_TRUE) {
1924 				rx_msg_p->free = B_TRUE;
1925 			}
1926 
1927 			MUTEX_EXIT(&rx_rbr_p->lock);
1928 			hxge_freeb(rx_msg_p);
1929 			return;
1930 		}
1931 	}
1932 
1933 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
1934 	    "==> hxge_receive_packet: DMA sync second "));
1935 
1936 	bytes_read = rcr_p->rcvd_pkt_bytes;
1937 	skip_len = sw_offset_bytes + hdr_size;
1938 
1939 	if (first_entry) {
1940 		header0 = rx_msg_p->buffer[buf_offset];
1941 		no_port_bit = header0 & NO_PORT_BIT;
1942 		header1 = rx_msg_p->buffer[buf_offset + 1];
1943 		l4_cs_eq_bit = header1 & L4_CS_EQ_BIT;
1944 	}
1945 
1946 	if (!rx_msg_p->rx_use_bcopy) {
1947 		/*
1948 		 * For loaned up buffers, the driver reference count
1949 		 * will be incremented first and then the free state.
1950 		 */
1951 		if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
1952 			if (first_entry) {
1953 				nmp->b_rptr = &nmp->b_rptr[skip_len];
1954 				if (l2_len < bsize - skip_len) {
1955 					nmp->b_wptr = &nmp->b_rptr[l2_len];
1956 				} else {
1957 					nmp->b_wptr = &nmp->b_rptr[bsize
1958 					    - skip_len];
1959 				}
1960 			} else {
1961 				if (l2_len - bytes_read < bsize) {
1962 					nmp->b_wptr =
1963 					    &nmp->b_rptr[l2_len - bytes_read];
1964 				} else {
1965 					nmp->b_wptr = &nmp->b_rptr[bsize];
1966 				}
1967 			}
1968 		}
1969 	} else {
1970 		if (first_entry) {
1971 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
1972 			    l2_len < bsize - skip_len ?
1973 			    l2_len : bsize - skip_len);
1974 		} else {
1975 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset,
1976 			    l2_len - bytes_read < bsize ?
1977 			    l2_len - bytes_read : bsize);
1978 		}
1979 	}
1980 
1981 	if (nmp != NULL) {
1982 		if (first_entry)
1983 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
1984 		else
1985 			bytes_read += nmp->b_wptr - nmp->b_rptr;
1986 
1987 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
1988 		    "==> hxge_receive_packet after dupb: "
1989 		    "rbr consumed %d "
1990 		    "pktbufsz_type %d "
1991 		    "nmp $%p rptr $%p wptr $%p "
1992 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
1993 		    rx_rbr_p->rbr_consumed,
1994 		    pktbufsz_type,
1995 		    nmp, nmp->b_rptr, nmp->b_wptr,
1996 		    buf_offset, bsize, l2_len, skip_len));
1997 	} else {
1998 		cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)");
1999 
2000 		atomic_inc_32(&rx_msg_p->ref_cnt);
2001 		if (buffer_free == B_TRUE) {
2002 			rx_msg_p->free = B_TRUE;
2003 		}
2004 
2005 		MUTEX_EXIT(&rx_rbr_p->lock);
2006 		hxge_freeb(rx_msg_p);
2007 		return;
2008 	}
2009 
2010 	if (buffer_free == B_TRUE) {
2011 		rx_msg_p->free = B_TRUE;
2012 	}
2013 
2014 	/*
2015 	 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a
2016 	 * packet is not fragmented and no error bit is set, then L4 checksum
2017 	 * is OK.
2018 	 */
2019 	is_valid = (nmp != NULL);
2020 	if (first_entry) {
2021 		rdc_stats->ipackets++; /* count only 1st seg for jumbo */
2022 		if (l2_len > (STD_FRAME_SIZE - ETHERFCSL))
2023 			rdc_stats->jumbo_pkts++;
2024 		rdc_stats->ibytes += skip_len + l2_len < bsize ?
2025 		    l2_len : bsize;
2026 	} else {
2027 		/*
2028 		 * Add the current portion of the packet to the kstats.
2029 		 * The current portion of the packet is calculated by using
2030 		 * length of the packet and the previously received portion.
2031 		 */
2032 		rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ?
2033 		    l2_len - rcr_p->rcvd_pkt_bytes : bsize;
2034 	}
2035 
2036 	rcr_p->rcvd_pkt_bytes = bytes_read;
2037 
2038 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
2039 		atomic_inc_32(&rx_msg_p->ref_cnt);
2040 		MUTEX_EXIT(&rx_rbr_p->lock);
2041 		hxge_freeb(rx_msg_p);
2042 	} else
2043 		MUTEX_EXIT(&rx_rbr_p->lock);
2044 
2045 	if (is_valid) {
2046 		nmp->b_cont = NULL;
2047 		if (first_entry) {
2048 			*mp = nmp;
2049 			*mp_cont = NULL;
2050 		} else {
2051 			*mp_cont = nmp;
2052 		}
2053 	}
2054 
2055 	/*
2056 	 * Update stats and hardware checksuming.
2057 	 */
2058 	if (is_valid && !multi) {
2059 		is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
2060 		    pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE);
2061 
2062 		if (!no_port_bit && l4_cs_eq_bit && is_tcp_udp && !error_type) {
2063 			(void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0,
2064 			    HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0);
2065 
2066 			HXGE_DEBUG_MSG((hxgep, RX_CTL,
2067 			    "==> hxge_receive_packet: Full tcp/udp cksum "
2068 			    "is_valid 0x%x multi %d error %d",
2069 			    is_valid, multi, error_type));
2070 		}
2071 	}
2072 
2073 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
2074 	    "==> hxge_receive_packet: *mp 0x%016llx", *mp));
2075 
2076 	*multi_p = (multi == RCR_MULTI_MASK);
2077 
2078 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: "
2079 	    "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
2080 	    *multi_p, nmp, *mp, *mp_cont));
2081 }
2082 
2083 static void
2084 hxge_rx_rbr_empty_recover(p_hxge_t hxgep, uint8_t channel)
2085 {
2086 	hpi_handle_t	handle;
2087 	p_rx_rcr_ring_t	rcrp;
2088 	p_rx_rbr_ring_t	rbrp;
2089 
2090 	rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
2091 	rbrp = rcrp->rx_rbr_p;
2092 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
2093 
2094 	/*
2095 	 * Wait for the channel to be quiet
2096 	 */
2097 	(void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel);
2098 
2099 	/*
2100 	 * Post page will accumulate some buffers before re-enabling
2101 	 * the DMA channel.
2102 	 */
2103 
2104 	MUTEX_ENTER(&rbrp->post_lock);
2105 	if ((rbrp->rbb_max - rbrp->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) {
2106 		hxge_rbr_empty_restore(hxgep, rbrp);
2107 	} else {
2108 		rbrp->rbr_is_empty = B_TRUE;
2109 	}
2110 	MUTEX_EXIT(&rbrp->post_lock);
2111 }
2112 
2113 
2114 /*ARGSUSED*/
2115 static hxge_status_t
2116 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
2117     rdc_stat_t cs)
2118 {
2119 	p_hxge_rx_ring_stats_t	rdc_stats;
2120 	hpi_handle_t		handle;
2121 	boolean_t		rxchan_fatal = B_FALSE;
2122 	uint8_t			channel;
2123 	hxge_status_t		status = HXGE_OK;
2124 
2125 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts"));
2126 
2127 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
2128 	channel = ldvp->channel;
2129 
2130 	rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index];
2131 
2132 	if (cs.bits.rbr_cpl_to) {
2133 		rdc_stats->rbr_tmout++;
2134 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2135 		    HXGE_FM_EREPORT_RDMC_RBR_CPL_TO);
2136 		rxchan_fatal = B_TRUE;
2137 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2138 		    "==> hxge_rx_err_evnts(channel %d): "
2139 		    "fatal error: rx_rbr_timeout", channel));
2140 	}
2141 
2142 	if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) {
2143 		(void) hpi_rxdma_ring_perr_stat_get(handle,
2144 		    &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par);
2145 	}
2146 
2147 	if (cs.bits.rcr_shadow_par_err) {
2148 		rdc_stats->rcr_sha_par++;
2149 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2150 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
2151 		rxchan_fatal = B_TRUE;
2152 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2153 		    "==> hxge_rx_err_evnts(channel %d): "
2154 		    "fatal error: rcr_shadow_par_err", channel));
2155 	}
2156 
2157 	if (cs.bits.rbr_prefetch_par_err) {
2158 		rdc_stats->rbr_pre_par++;
2159 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2160 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
2161 		rxchan_fatal = B_TRUE;
2162 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2163 		    "==> hxge_rx_err_evnts(channel %d): "
2164 		    "fatal error: rbr_prefetch_par_err", channel));
2165 	}
2166 
2167 	if (cs.bits.rbr_pre_empty) {
2168 		rdc_stats->rbr_pre_empty++;
2169 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2170 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY);
2171 		rxchan_fatal = B_TRUE;
2172 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2173 		    "==> hxge_rx_err_evnts(channel %d): "
2174 		    "fatal error: rbr_pre_empty", channel));
2175 	}
2176 
2177 	if (cs.bits.peu_resp_err) {
2178 		rdc_stats->peu_resp_err++;
2179 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2180 		    HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR);
2181 		rxchan_fatal = B_TRUE;
2182 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2183 		    "==> hxge_rx_err_evnts(channel %d): "
2184 		    "fatal error: peu_resp_err", channel));
2185 	}
2186 
2187 	if (cs.bits.rcr_thres) {
2188 		rdc_stats->rcr_thres++;
2189 	}
2190 
2191 	if (cs.bits.rcr_to) {
2192 		rdc_stats->rcr_to++;
2193 	}
2194 
2195 	if (cs.bits.rcr_shadow_full) {
2196 		rdc_stats->rcr_shadow_full++;
2197 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2198 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL);
2199 		rxchan_fatal = B_TRUE;
2200 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2201 		    "==> hxge_rx_err_evnts(channel %d): "
2202 		    "fatal error: rcr_shadow_full", channel));
2203 	}
2204 
2205 	if (cs.bits.rcr_full) {
2206 		rdc_stats->rcrfull++;
2207 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2208 		    HXGE_FM_EREPORT_RDMC_RCRFULL);
2209 		rxchan_fatal = B_TRUE;
2210 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2211 		    "==> hxge_rx_err_evnts(channel %d): "
2212 		    "fatal error: rcrfull error", channel));
2213 	}
2214 
2215 	if (cs.bits.rbr_empty) {
2216 		rdc_stats->rbr_empty++;
2217 		hxge_rx_rbr_empty_recover(hxgep, channel);
2218 	}
2219 
2220 	if (cs.bits.rbr_full) {
2221 		rdc_stats->rbrfull++;
2222 		HXGE_FM_REPORT_ERROR(hxgep, channel,
2223 		    HXGE_FM_EREPORT_RDMC_RBRFULL);
2224 		rxchan_fatal = B_TRUE;
2225 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2226 		    "==> hxge_rx_err_evnts(channel %d): "
2227 		    "fatal error: rbr_full error", channel));
2228 	}
2229 
2230 	if (rxchan_fatal) {
2231 		p_rx_rcr_ring_t	rcrp;
2232 		p_rx_rbr_ring_t rbrp;
2233 
2234 		rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
2235 		rbrp = rcrp->rx_rbr_p;
2236 
2237 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2238 		    " hxge_rx_err_evnts: fatal error on Channel #%d\n",
2239 		    channel));
2240 
2241 		MUTEX_ENTER(&rbrp->post_lock);
2242 		/* This function needs to be inside the post_lock */
2243 		status = hxge_rxdma_fatal_err_recover(hxgep, channel);
2244 		MUTEX_EXIT(&rbrp->post_lock);
2245 		if (status == HXGE_OK) {
2246 			FM_SERVICE_RESTORED(hxgep);
2247 		}
2248 	}
2249 
2250 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts"));
2251 	return (status);
2252 }
2253 
2254 static hxge_status_t
2255 hxge_map_rxdma(p_hxge_t hxgep)
2256 {
2257 	int			i, ndmas;
2258 	uint16_t		channel;
2259 	p_rx_rbr_rings_t	rx_rbr_rings;
2260 	p_rx_rbr_ring_t		*rbr_rings;
2261 	p_rx_rcr_rings_t	rx_rcr_rings;
2262 	p_rx_rcr_ring_t		*rcr_rings;
2263 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2264 	p_rx_mbox_t		*rx_mbox_p;
2265 	p_hxge_dma_pool_t	dma_buf_poolp;
2266 	p_hxge_dma_common_t	*dma_buf_p;
2267 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
2268 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
2269 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
2270 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
2271 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
2272 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
2273 	uint32_t		*num_chunks;
2274 	hxge_status_t		status = HXGE_OK;
2275 
2276 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma"));
2277 
2278 	dma_buf_poolp = hxgep->rx_buf_pool_p;
2279 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
2280 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
2281 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
2282 
2283 	if (!dma_buf_poolp->buf_allocated ||
2284 	    !dma_rbr_cntl_poolp->buf_allocated ||
2285 	    !dma_rcr_cntl_poolp->buf_allocated ||
2286 	    !dma_mbox_cntl_poolp->buf_allocated) {
2287 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2288 		    "<== hxge_map_rxdma: buf not allocated"));
2289 		return (HXGE_ERROR);
2290 	}
2291 
2292 	ndmas = dma_buf_poolp->ndmas;
2293 	if (!ndmas) {
2294 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2295 		    "<== hxge_map_rxdma: no dma allocated"));
2296 		return (HXGE_ERROR);
2297 	}
2298 
2299 	num_chunks = dma_buf_poolp->num_chunks;
2300 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2301 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
2302 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
2303 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
2304 
2305 	rx_rbr_rings = (p_rx_rbr_rings_t)
2306 	    KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
2307 	rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC(
2308 	    sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
2309 
2310 	rx_rcr_rings = (p_rx_rcr_rings_t)
2311 	    KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
2312 	rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC(
2313 	    sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
2314 
2315 	rx_mbox_areas_p = (p_rx_mbox_areas_t)
2316 	    KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
2317 	rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC(
2318 	    sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
2319 
2320 	/*
2321 	 * Timeout should be set based on the system clock divider.
2322 	 * The following timeout value of 1 assumes that the
2323 	 * granularity (1000) is 3 microseconds running at 300MHz.
2324 	 */
2325 
2326 	hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
2327 	hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
2328 
2329 	/*
2330 	 * Map descriptors from the buffer polls for each dam channel.
2331 	 */
2332 	for (i = 0; i < ndmas; i++) {
2333 		if (((p_hxge_dma_common_t)dma_buf_p[i]) == NULL) {
2334 			status = HXGE_ERROR;
2335 			goto hxge_map_rxdma_fail1;
2336 		}
2337 
2338 		/*
2339 		 * Set up and prepare buffer blocks, descriptors and mailbox.
2340 		 */
2341 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2342 		status = hxge_map_rxdma_channel(hxgep, channel,
2343 		    (p_hxge_dma_common_t *)&dma_buf_p[i],
2344 		    (p_rx_rbr_ring_t *)&rbr_rings[i],
2345 		    num_chunks[i],
2346 		    (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i],
2347 		    (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i],
2348 		    (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i],
2349 		    (p_rx_rcr_ring_t *)&rcr_rings[i],
2350 		    (p_rx_mbox_t *)&rx_mbox_p[i]);
2351 		if (status != HXGE_OK) {
2352 			goto hxge_map_rxdma_fail1;
2353 		}
2354 		rbr_rings[i]->index = (uint16_t)i;
2355 		rcr_rings[i]->index = (uint16_t)i;
2356 		rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i];
2357 	}
2358 
2359 	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
2360 	rx_rbr_rings->rbr_rings = rbr_rings;
2361 	hxgep->rx_rbr_rings = rx_rbr_rings;
2362 	rx_rcr_rings->rcr_rings = rcr_rings;
2363 	hxgep->rx_rcr_rings = rx_rcr_rings;
2364 
2365 	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
2366 	hxgep->rx_mbox_areas_p = rx_mbox_areas_p;
2367 
2368 	goto hxge_map_rxdma_exit;
2369 
2370 hxge_map_rxdma_fail1:
2371 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2372 	    "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)",
2373 	    status, channel, i));
2374 	i--;
2375 	for (; i >= 0; i--) {
2376 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2377 		hxge_unmap_rxdma_channel(hxgep, channel,
2378 		    rbr_rings[i], rcr_rings[i], rx_mbox_p[i]);
2379 	}
2380 
2381 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2382 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2383 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2384 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2385 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2386 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2387 
2388 hxge_map_rxdma_exit:
2389 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2390 	    "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel));
2391 
2392 	return (status);
2393 }
2394 
2395 static void
2396 hxge_unmap_rxdma(p_hxge_t hxgep)
2397 {
2398 	int			i, ndmas;
2399 	uint16_t		channel;
2400 	p_rx_rbr_rings_t	rx_rbr_rings;
2401 	p_rx_rbr_ring_t		*rbr_rings;
2402 	p_rx_rcr_rings_t	rx_rcr_rings;
2403 	p_rx_rcr_ring_t		*rcr_rings;
2404 	p_rx_mbox_areas_t	rx_mbox_areas_p;
2405 	p_rx_mbox_t		*rx_mbox_p;
2406 	p_hxge_dma_pool_t	dma_buf_poolp;
2407 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
2408 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
2409 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
2410 	p_hxge_dma_common_t	*dma_buf_p;
2411 
2412 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma"));
2413 
2414 	dma_buf_poolp = hxgep->rx_buf_pool_p;
2415 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
2416 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
2417 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
2418 
2419 	if (!dma_buf_poolp->buf_allocated ||
2420 	    !dma_rbr_cntl_poolp->buf_allocated ||
2421 	    !dma_rcr_cntl_poolp->buf_allocated ||
2422 	    !dma_mbox_cntl_poolp->buf_allocated) {
2423 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2424 		    "<== hxge_unmap_rxdma: NULL buf pointers"));
2425 		return;
2426 	}
2427 
2428 	rx_rbr_rings = hxgep->rx_rbr_rings;
2429 	rx_rcr_rings = hxgep->rx_rcr_rings;
2430 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
2431 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2432 		    "<== hxge_unmap_rxdma: NULL pointers"));
2433 		return;
2434 	}
2435 
2436 	ndmas = rx_rbr_rings->ndmas;
2437 	if (!ndmas) {
2438 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2439 		    "<== hxge_unmap_rxdma: no channel"));
2440 		return;
2441 	}
2442 
2443 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2444 	    "==> hxge_unmap_rxdma (ndmas %d)", ndmas));
2445 
2446 	rbr_rings = rx_rbr_rings->rbr_rings;
2447 	rcr_rings = rx_rcr_rings->rcr_rings;
2448 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
2449 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
2450 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
2451 
2452 	for (i = 0; i < ndmas; i++) {
2453 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
2454 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2455 		    "==> hxge_unmap_rxdma (ndmas %d) channel %d",
2456 		    ndmas, channel));
2457 		(void) hxge_unmap_rxdma_channel(hxgep, channel,
2458 		    (p_rx_rbr_ring_t)rbr_rings[i],
2459 		    (p_rx_rcr_ring_t)rcr_rings[i],
2460 		    (p_rx_mbox_t)rx_mbox_p[i]);
2461 	}
2462 
2463 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
2464 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
2465 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
2466 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
2467 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
2468 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
2469 
2470 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma"));
2471 }
2472 
2473 hxge_status_t
2474 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2475     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
2476     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
2477     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
2478     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2479 {
2480 	int status = HXGE_OK;
2481 
2482 	/*
2483 	 * Set up and prepare buffer blocks, descriptors and mailbox.
2484 	 */
2485 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2486 	    "==> hxge_map_rxdma_channel (channel %d)", channel));
2487 
2488 	/*
2489 	 * Receive buffer blocks
2490 	 */
2491 	status = hxge_map_rxdma_channel_buf_ring(hxgep, channel,
2492 	    dma_buf_p, rbr_p, num_chunks);
2493 	if (status != HXGE_OK) {
2494 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2495 		    "==> hxge_map_rxdma_channel (channel %d): "
2496 		    "map buffer failed 0x%x", channel, status));
2497 		goto hxge_map_rxdma_channel_exit;
2498 	}
2499 
2500 	/*
2501 	 * Receive block ring, completion ring and mailbox.
2502 	 */
2503 	status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel,
2504 	    dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p,
2505 	    rbr_p, rcr_p, rx_mbox_p);
2506 	if (status != HXGE_OK) {
2507 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2508 		    "==> hxge_map_rxdma_channel (channel %d): "
2509 		    "map config failed 0x%x", channel, status));
2510 		goto hxge_map_rxdma_channel_fail2;
2511 	}
2512 	goto hxge_map_rxdma_channel_exit;
2513 
2514 hxge_map_rxdma_channel_fail3:
2515 	/* Free rbr, rcr */
2516 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2517 	    "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)",
2518 	    status, channel));
2519 	hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p);
2520 
2521 hxge_map_rxdma_channel_fail2:
2522 	/* Free buffer blocks */
2523 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2524 	    "==> hxge_map_rxdma_channel: free rx buffers"
2525 	    "(hxgep 0x%x status 0x%x channel %d)",
2526 	    hxgep, status, channel));
2527 	hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p);
2528 
2529 	status = HXGE_ERROR;
2530 
2531 hxge_map_rxdma_channel_exit:
2532 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2533 	    "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)",
2534 	    hxgep, status, channel));
2535 
2536 	return (status);
2537 }
2538 
2539 /*ARGSUSED*/
2540 static void
2541 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
2542     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2543 {
2544 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2545 	    "==> hxge_unmap_rxdma_channel (channel %d)", channel));
2546 
2547 	/*
2548 	 * unmap receive block ring, completion ring and mailbox.
2549 	 */
2550 	(void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p);
2551 
2552 	/* unmap buffer blocks */
2553 	(void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p);
2554 
2555 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel"));
2556 }
2557 
2558 /*ARGSUSED*/
2559 static hxge_status_t
2560 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
2561     p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p,
2562     p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p,
2563     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
2564 {
2565 	p_rx_rbr_ring_t 	rbrp;
2566 	p_rx_rcr_ring_t 	rcrp;
2567 	p_rx_mbox_t 		mboxp;
2568 	p_hxge_dma_common_t 	cntl_dmap;
2569 	p_hxge_dma_common_t 	dmap;
2570 	p_rx_msg_t 		*rx_msg_ring;
2571 	p_rx_msg_t 		rx_msg_p;
2572 	rdc_rbr_cfg_a_t		*rcfga_p;
2573 	rdc_rbr_cfg_b_t		*rcfgb_p;
2574 	rdc_rcr_cfg_a_t		*cfga_p;
2575 	rdc_rcr_cfg_b_t		*cfgb_p;
2576 	rdc_rx_cfg1_t		*cfig1_p;
2577 	rdc_rx_cfg2_t		*cfig2_p;
2578 	rdc_rbr_kick_t		*kick_p;
2579 	uint32_t		dmaaddrp;
2580 	uint32_t		*rbr_vaddrp;
2581 	uint32_t		bkaddr;
2582 	hxge_status_t		status = HXGE_OK;
2583 	int			i;
2584 	uint32_t 		hxge_port_rcr_size;
2585 
2586 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2587 	    "==> hxge_map_rxdma_channel_cfg_ring"));
2588 
2589 	cntl_dmap = *dma_rbr_cntl_p;
2590 
2591 	/*
2592 	 * Map in the receive block ring
2593 	 */
2594 	rbrp = *rbr_p;
2595 	dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc;
2596 	hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
2597 
2598 	/*
2599 	 * Zero out buffer block ring descriptors.
2600 	 */
2601 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2602 
2603 	rcfga_p = &(rbrp->rbr_cfga);
2604 	rcfgb_p = &(rbrp->rbr_cfgb);
2605 	kick_p = &(rbrp->rbr_kick);
2606 	rcfga_p->value = 0;
2607 	rcfgb_p->value = 0;
2608 	kick_p->value = 0;
2609 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
2610 	rcfga_p->value = (rbrp->rbr_addr &
2611 	    (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
2612 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
2613 
2614 	/* XXXX: how to choose packet buffer sizes */
2615 	rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0;
2616 	rcfgb_p->bits.vld0 = 1;
2617 	rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1;
2618 	rcfgb_p->bits.vld1 = 1;
2619 	rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2;
2620 	rcfgb_p->bits.vld2 = 1;
2621 	rcfgb_p->bits.bksize = hxgep->rx_bksize_code;
2622 
2623 	/*
2624 	 * For each buffer block, enter receive block address to the ring.
2625 	 */
2626 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
2627 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
2628 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2629 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2630 	    "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
2631 
2632 	rx_msg_ring = rbrp->rx_msg_ring;
2633 	for (i = 0; i < rbrp->tnblocks; i++) {
2634 		rx_msg_p = rx_msg_ring[i];
2635 		rx_msg_p->hxgep = hxgep;
2636 		rx_msg_p->rx_rbr_p = rbrp;
2637 		bkaddr = (uint32_t)
2638 		    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2639 		    RBR_BKADDR_SHIFT));
2640 		rx_msg_p->free = B_FALSE;
2641 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
2642 
2643 		*rbr_vaddrp++ = bkaddr;
2644 	}
2645 
2646 	kick_p->bits.bkadd = rbrp->rbb_max;
2647 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
2648 
2649 	rbrp->rbr_rd_index = 0;
2650 
2651 	rbrp->rbr_consumed = 0;
2652 	rbrp->rbr_used = 0;
2653 	rbrp->rbr_use_bcopy = B_TRUE;
2654 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
2655 
2656 	/*
2657 	 * Do bcopy on packets greater than bcopy size once the lo threshold is
2658 	 * reached. This lo threshold should be less than the hi threshold.
2659 	 *
2660 	 * Do bcopy on every packet once the hi threshold is reached.
2661 	 */
2662 	if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) {
2663 		/* default it to use hi */
2664 		hxge_rx_threshold_lo = hxge_rx_threshold_hi;
2665 	}
2666 	if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) {
2667 		hxge_rx_buf_size_type = HXGE_RBR_TYPE2;
2668 	}
2669 	rbrp->rbr_bufsize_type = hxge_rx_buf_size_type;
2670 
2671 	switch (hxge_rx_threshold_hi) {
2672 	default:
2673 	case HXGE_RX_COPY_NONE:
2674 		/* Do not do bcopy at all */
2675 		rbrp->rbr_use_bcopy = B_FALSE;
2676 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
2677 		break;
2678 
2679 	case HXGE_RX_COPY_1:
2680 	case HXGE_RX_COPY_2:
2681 	case HXGE_RX_COPY_3:
2682 	case HXGE_RX_COPY_4:
2683 	case HXGE_RX_COPY_5:
2684 	case HXGE_RX_COPY_6:
2685 	case HXGE_RX_COPY_7:
2686 		rbrp->rbr_threshold_hi =
2687 		    rbrp->rbb_max * (hxge_rx_threshold_hi) /
2688 		    HXGE_RX_BCOPY_SCALE;
2689 		break;
2690 
2691 	case HXGE_RX_COPY_ALL:
2692 		rbrp->rbr_threshold_hi = 0;
2693 		break;
2694 	}
2695 
2696 	switch (hxge_rx_threshold_lo) {
2697 	default:
2698 	case HXGE_RX_COPY_NONE:
2699 		/* Do not do bcopy at all */
2700 		if (rbrp->rbr_use_bcopy) {
2701 			rbrp->rbr_use_bcopy = B_FALSE;
2702 		}
2703 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
2704 		break;
2705 
2706 	case HXGE_RX_COPY_1:
2707 	case HXGE_RX_COPY_2:
2708 	case HXGE_RX_COPY_3:
2709 	case HXGE_RX_COPY_4:
2710 	case HXGE_RX_COPY_5:
2711 	case HXGE_RX_COPY_6:
2712 	case HXGE_RX_COPY_7:
2713 		rbrp->rbr_threshold_lo =
2714 		    rbrp->rbb_max * (hxge_rx_threshold_lo) /
2715 		    HXGE_RX_BCOPY_SCALE;
2716 		break;
2717 
2718 	case HXGE_RX_COPY_ALL:
2719 		rbrp->rbr_threshold_lo = 0;
2720 		break;
2721 	}
2722 
2723 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
2724 	    "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d "
2725 	    "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d "
2726 	    "rbb_threshold_lo %d",
2727 	    dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type,
2728 	    rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo));
2729 
2730 	/* Map in the receive completion ring */
2731 	rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
2732 	MUTEX_INIT(&rcrp->lock, NULL, MUTEX_DRIVER,
2733 	    (void *) hxgep->interrupt_cookie);
2734 	rcrp->rdc = dma_channel;
2735 	rcrp->hxgep = hxgep;
2736 
2737 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
2738 	rcrp->comp_size = hxge_port_rcr_size;
2739 	rcrp->comp_wrap_mask = hxge_port_rcr_size - 1;
2740 
2741 	cntl_dmap = *dma_rcr_cntl_p;
2742 
2743 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
2744 	hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
2745 	    sizeof (rcr_entry_t));
2746 	rcrp->comp_rd_index = 0;
2747 	rcrp->comp_wt_index = 0;
2748 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
2749 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
2750 #if defined(__i386)
2751 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2752 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2753 #else
2754 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
2755 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
2756 #endif
2757 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
2758 	    (hxge_port_rcr_size - 1);
2759 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
2760 	    (hxge_port_rcr_size - 1);
2761 
2762 	rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
2763 	rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
2764 
2765 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2766 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
2767 	    "rbr_vaddrp $%p rcr_desc_rd_head_p $%p "
2768 	    "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p "
2769 	    "rcr_desc_rd_last_pp $%p ",
2770 	    dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p,
2771 	    rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p,
2772 	    rcrp->rcr_desc_last_pp));
2773 
2774 	/*
2775 	 * Zero out buffer block ring descriptors.
2776 	 */
2777 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2778 	rcrp->intr_timeout = hxgep->intr_timeout;
2779 	rcrp->intr_threshold = hxgep->intr_threshold;
2780 	rcrp->full_hdr_flag = B_FALSE;
2781 	rcrp->sw_priv_hdr_len = 0;
2782 
2783 	cfga_p = &(rcrp->rcr_cfga);
2784 	cfgb_p = &(rcrp->rcr_cfgb);
2785 	cfga_p->value = 0;
2786 	cfgb_p->value = 0;
2787 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
2788 
2789 	cfga_p->value = (rcrp->rcr_addr &
2790 	    (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
2791 
2792 	cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF);
2793 
2794 	/*
2795 	 * Timeout should be set based on the system clock divider. The
2796 	 * following timeout value of 1 assumes that the granularity (1000) is
2797 	 * 3 microseconds running at 300MHz.
2798 	 */
2799 	cfgb_p->bits.pthres = rcrp->intr_threshold;
2800 	cfgb_p->bits.timeout = rcrp->intr_timeout;
2801 	cfgb_p->bits.entout = 1;
2802 
2803 	/* Map in the mailbox */
2804 	cntl_dmap = *dma_mbox_cntl_p;
2805 	mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
2806 	dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox;
2807 	hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
2808 	cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1;
2809 	cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2;
2810 	cfig1_p->value = cfig2_p->value = 0;
2811 
2812 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
2813 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2814 	    "==> hxge_map_rxdma_channel_cfg_ring: "
2815 	    "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
2816 	    dma_channel, cfig1_p->value, cfig2_p->value,
2817 	    mboxp->mbox_addr));
2818 
2819 	dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff);
2820 	cfig1_p->bits.mbaddr_h = dmaaddrp;
2821 
2822 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
2823 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
2824 	    RXDMA_CFIG2_MBADDR_L_MASK);
2825 
2826 	cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
2827 
2828 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2829 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p "
2830 	    "cfg1 0x%016llx cfig2 0x%016llx",
2831 	    dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value));
2832 
2833 	cfig2_p->bits.full_hdr = rcrp->full_hdr_flag;
2834 	cfig2_p->bits.offset = rcrp->sw_priv_hdr_len;
2835 
2836 	rbrp->rx_rcr_p = rcrp;
2837 	rcrp->rx_rbr_p = rbrp;
2838 	*rcr_p = rcrp;
2839 	*rx_mbox_p = mboxp;
2840 
2841 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2842 	    "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
2843 	return (status);
2844 }
2845 
2846 /*ARGSUSED*/
2847 static void
2848 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
2849     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
2850 {
2851 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2852 	    "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc));
2853 
2854 	MUTEX_DESTROY(&rcr_p->lock);
2855 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
2856 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
2857 
2858 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2859 	    "<== hxge_unmap_rxdma_channel_cfg_ring"));
2860 }
2861 
2862 static hxge_status_t
2863 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
2864     p_hxge_dma_common_t *dma_buf_p,
2865     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
2866 {
2867 	p_rx_rbr_ring_t		rbrp;
2868 	p_hxge_dma_common_t	dma_bufp, tmp_bufp;
2869 	p_rx_msg_t		*rx_msg_ring;
2870 	p_rx_msg_t		rx_msg_p;
2871 	p_mblk_t		mblk_p;
2872 
2873 	rxring_info_t *ring_info;
2874 	hxge_status_t status = HXGE_OK;
2875 	int i, j, index;
2876 	uint32_t size, bsize, nblocks, nmsgs;
2877 
2878 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2879 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel));
2880 
2881 	dma_bufp = tmp_bufp = *dma_buf_p;
2882 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2883 	    " hxge_map_rxdma_channel_buf_ring: channel %d to map %d "
2884 	    "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp));
2885 
2886 	nmsgs = 0;
2887 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2888 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2889 		    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2890 		    "bufp 0x%016llx nblocks %d nmsgs %d",
2891 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2892 		nmsgs += tmp_bufp->nblocks;
2893 	}
2894 	if (!nmsgs) {
2895 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2896 		    "<== hxge_map_rxdma_channel_buf_ring: channel %d "
2897 		    "no msg blocks", channel));
2898 		status = HXGE_ERROR;
2899 		goto hxge_map_rxdma_channel_buf_ring_exit;
2900 	}
2901 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
2902 
2903 	size = nmsgs * sizeof (p_rx_msg_t);
2904 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2905 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
2906 	    KM_SLEEP);
2907 
2908 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
2909 	    (void *) hxgep->interrupt_cookie);
2910 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
2911 	    (void *) hxgep->interrupt_cookie);
2912 
2913 	rbrp->rdc = channel;
2914 	rbrp->num_blocks = num_chunks;
2915 	rbrp->tnblocks = nmsgs;
2916 	rbrp->rbb_max = nmsgs;
2917 	rbrp->rbr_max_size = nmsgs;
2918 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
2919 
2920 	/*
2921 	 * Buffer sizes: 256, 1K, and 2K.
2922 	 *
2923 	 * Blk 0 size.
2924 	 */
2925 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
2926 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
2927 	rbrp->hpi_pkt_buf_size0 = SIZE_256B;
2928 
2929 	/*
2930 	 * Blk 1 size.
2931 	 */
2932 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
2933 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
2934 	rbrp->hpi_pkt_buf_size1 = SIZE_1KB;
2935 
2936 	/*
2937 	 * Blk 2 size.
2938 	 */
2939 	rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
2940 	rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
2941 	rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
2942 
2943 	rbrp->block_size = hxgep->rx_default_block_size;
2944 
2945 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2946 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
2947 	    "actual rbr max %d rbb_max %d nmsgs %d "
2948 	    "rbrp->block_size %d default_block_size %d "
2949 	    "(config hxge_rbr_size %d hxge_rbr_spare_size %d)",
2950 	    channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
2951 	    rbrp->block_size, hxgep->rx_default_block_size,
2952 	    hxge_rbr_size, hxge_rbr_spare_size));
2953 
2954 	/*
2955 	 * Map in buffers from the buffer pool.
2956 	 * Note that num_blocks is the num_chunks. For Sparc, there is likely
2957 	 * only one chunk. For x86, there will be many chunks.
2958 	 * Loop over chunks.
2959 	 */
2960 	index = 0;
2961 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
2962 		bsize = dma_bufp->block_size;
2963 		nblocks = dma_bufp->nblocks;
2964 #if defined(__i386)
2965 		ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
2966 #else
2967 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
2968 #endif
2969 		ring_info->buffer[i].buf_index = i;
2970 		ring_info->buffer[i].buf_size = dma_bufp->alength;
2971 		ring_info->buffer[i].start_index = index;
2972 #if defined(__i386)
2973 		ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
2974 #else
2975 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
2976 #endif
2977 
2978 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
2979 		    " hxge_map_rxdma_channel_buf_ring: map channel %d "
2980 		    "chunk %d nblocks %d chunk_size %x block_size 0x%x "
2981 		    "dma_bufp $%p dvma_addr $%p", channel, i,
2982 		    dma_bufp->nblocks,
2983 		    ring_info->buffer[i].buf_size, bsize, dma_bufp,
2984 		    ring_info->buffer[i].dvma_addr));
2985 
2986 		/* loop over blocks within a chunk */
2987 		for (j = 0; j < nblocks; j++) {
2988 			if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO,
2989 			    dma_bufp)) == NULL) {
2990 				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2991 				    "allocb failed (index %d i %d j %d)",
2992 				    index, i, j));
2993 				goto hxge_map_rxdma_channel_buf_ring_fail1;
2994 			}
2995 			rx_msg_ring[index] = rx_msg_p;
2996 			rx_msg_p->block_index = index;
2997 			rx_msg_p->shifted_addr = (uint32_t)
2998 			    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
2999 			    RBR_BKADDR_SHIFT));
3000 			/*
3001 			 * Too much output
3002 			 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3003 			 *	"index %d j %d rx_msg_p $%p mblk %p",
3004 			 *	index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
3005 			 */
3006 			mblk_p = rx_msg_p->rx_mblk_p;
3007 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
3008 
3009 			rbrp->rbr_ref_cnt++;
3010 			index++;
3011 			rx_msg_p->buf_dma.dma_channel = channel;
3012 		}
3013 	}
3014 	if (i < rbrp->num_blocks) {
3015 		goto hxge_map_rxdma_channel_buf_ring_fail1;
3016 	}
3017 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3018 	    "hxge_map_rxdma_channel_buf_ring: done buf init "
3019 	    "channel %d msg block entries %d", channel, index));
3020 	ring_info->block_size_mask = bsize - 1;
3021 	rbrp->rx_msg_ring = rx_msg_ring;
3022 	rbrp->dma_bufp = dma_buf_p;
3023 	rbrp->ring_info = ring_info;
3024 
3025 	status = hxge_rxbuf_index_info_init(hxgep, rbrp);
3026 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: "
3027 	    "channel %d done buf info init", channel));
3028 
3029 	/*
3030 	 * Finally, permit hxge_freeb() to call hxge_post_page().
3031 	 */
3032 	rbrp->rbr_state = RBR_POSTING;
3033 
3034 	*rbr_p = rbrp;
3035 
3036 	goto hxge_map_rxdma_channel_buf_ring_exit;
3037 
3038 hxge_map_rxdma_channel_buf_ring_fail1:
3039 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3040 	    " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
3041 	    channel, status));
3042 
3043 	index--;
3044 	for (; index >= 0; index--) {
3045 		rx_msg_p = rx_msg_ring[index];
3046 		if (rx_msg_p != NULL) {
3047 			freeb(rx_msg_p->rx_mblk_p);
3048 			rx_msg_ring[index] = NULL;
3049 		}
3050 	}
3051 
3052 hxge_map_rxdma_channel_buf_ring_fail:
3053 	MUTEX_DESTROY(&rbrp->post_lock);
3054 	MUTEX_DESTROY(&rbrp->lock);
3055 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
3056 	KMEM_FREE(rx_msg_ring, size);
3057 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
3058 
3059 	status = HXGE_ERROR;
3060 
3061 hxge_map_rxdma_channel_buf_ring_exit:
3062 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3063 	    "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status));
3064 
3065 	return (status);
3066 }
3067 
3068 /*ARGSUSED*/
3069 static void
3070 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
3071     p_rx_rbr_ring_t rbr_p)
3072 {
3073 	p_rx_msg_t	*rx_msg_ring;
3074 	p_rx_msg_t	rx_msg_p;
3075 	rxring_info_t	*ring_info;
3076 	int		i;
3077 	uint32_t	size;
3078 
3079 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3080 	    "==> hxge_unmap_rxdma_channel_buf_ring"));
3081 	if (rbr_p == NULL) {
3082 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3083 		    "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
3084 		return;
3085 	}
3086 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3087 	    "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc));
3088 
3089 	rx_msg_ring = rbr_p->rx_msg_ring;
3090 	ring_info = rbr_p->ring_info;
3091 
3092 	if (rx_msg_ring == NULL || ring_info == NULL) {
3093 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3094 		    "<== hxge_unmap_rxdma_channel_buf_ring: "
3095 		    "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info));
3096 		return;
3097 	}
3098 
3099 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
3100 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3101 	    " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
3102 	    "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks,
3103 	    rbr_p->tnblocks, rbr_p->rbr_max_size, size));
3104 
3105 	for (i = 0; i < rbr_p->tnblocks; i++) {
3106 		rx_msg_p = rx_msg_ring[i];
3107 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3108 		    " hxge_unmap_rxdma_channel_buf_ring: "
3109 		    "rx_msg_p $%p", rx_msg_p));
3110 		if (rx_msg_p != NULL) {
3111 			freeb(rx_msg_p->rx_mblk_p);
3112 			rx_msg_ring[i] = NULL;
3113 		}
3114 	}
3115 
3116 	/*
3117 	 * We no longer may use the mutex <post_lock>. By setting
3118 	 * <rbr_state> to anything but POSTING, we prevent
3119 	 * hxge_post_page() from accessing a dead mutex.
3120 	 */
3121 	rbr_p->rbr_state = RBR_UNMAPPING;
3122 	MUTEX_DESTROY(&rbr_p->post_lock);
3123 
3124 	MUTEX_DESTROY(&rbr_p->lock);
3125 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
3126 	KMEM_FREE(rx_msg_ring, size);
3127 
3128 	if (rbr_p->rbr_ref_cnt == 0) {
3129 		/* This is the normal state of affairs. */
3130 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
3131 	} else {
3132 		/*
3133 		 * Some of our buffers are still being used.
3134 		 * Therefore, tell hxge_freeb() this ring is
3135 		 * unmapped, so it may free <rbr_p> for us.
3136 		 */
3137 		rbr_p->rbr_state = RBR_UNMAPPED;
3138 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3139 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
3140 		    rbr_p->rbr_ref_cnt,
3141 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
3142 	}
3143 
3144 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3145 	    "<== hxge_unmap_rxdma_channel_buf_ring"));
3146 }
3147 
3148 static hxge_status_t
3149 hxge_rxdma_hw_start_common(p_hxge_t hxgep)
3150 {
3151 	hxge_status_t status = HXGE_OK;
3152 
3153 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
3154 
3155 	/*
3156 	 * Load the sharable parameters by writing to the function zero control
3157 	 * registers. These FZC registers should be initialized only once for
3158 	 * the entire chip.
3159 	 */
3160 	(void) hxge_init_fzc_rx_common(hxgep);
3161 
3162 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
3163 
3164 	return (status);
3165 }
3166 
3167 static hxge_status_t
3168 hxge_rxdma_hw_start(p_hxge_t hxgep)
3169 {
3170 	int			i, ndmas;
3171 	uint16_t		channel;
3172 	p_rx_rbr_rings_t	rx_rbr_rings;
3173 	p_rx_rbr_ring_t		*rbr_rings;
3174 	p_rx_rcr_rings_t	rx_rcr_rings;
3175 	p_rx_rcr_ring_t		*rcr_rings;
3176 	p_rx_mbox_areas_t	rx_mbox_areas_p;
3177 	p_rx_mbox_t		*rx_mbox_p;
3178 	hxge_status_t		status = HXGE_OK;
3179 
3180 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start"));
3181 
3182 	rx_rbr_rings = hxgep->rx_rbr_rings;
3183 	rx_rcr_rings = hxgep->rx_rcr_rings;
3184 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3185 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3186 		    "<== hxge_rxdma_hw_start: NULL ring pointers"));
3187 		return (HXGE_ERROR);
3188 	}
3189 
3190 	ndmas = rx_rbr_rings->ndmas;
3191 	if (ndmas == 0) {
3192 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3193 		    "<== hxge_rxdma_hw_start: no dma channel allocated"));
3194 		return (HXGE_ERROR);
3195 	}
3196 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3197 	    "==> hxge_rxdma_hw_start (ndmas %d)", ndmas));
3198 
3199 	/*
3200 	 * Scrub the RDC Rx DMA Prefetch Buffer Command.
3201 	 */
3202 	for (i = 0; i < 128; i++) {
3203 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i);
3204 	}
3205 
3206 	/*
3207 	 * Scrub Rx DMA Shadow Tail Command.
3208 	 */
3209 	for (i = 0; i < 64; i++) {
3210 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i);
3211 	}
3212 
3213 	/*
3214 	 * Scrub Rx DMA Control Fifo Command.
3215 	 */
3216 	for (i = 0; i < 512; i++) {
3217 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i);
3218 	}
3219 
3220 	/*
3221 	 * Scrub Rx DMA Data Fifo Command.
3222 	 */
3223 	for (i = 0; i < 1536; i++) {
3224 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i);
3225 	}
3226 
3227 	/*
3228 	 * Reset the FIFO Error Stat.
3229 	 */
3230 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF);
3231 
3232 	/* Set the error mask to receive interrupts */
3233 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3234 
3235 	rbr_rings = rx_rbr_rings->rbr_rings;
3236 	rcr_rings = rx_rcr_rings->rcr_rings;
3237 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
3238 	if (rx_mbox_areas_p) {
3239 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
3240 	}
3241 
3242 	for (i = 0; i < ndmas; i++) {
3243 		channel = rbr_rings[i]->rdc;
3244 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3245 		    "==> hxge_rxdma_hw_start (ndmas %d) channel %d",
3246 		    ndmas, channel));
3247 		status = hxge_rxdma_start_channel(hxgep, channel,
3248 		    (p_rx_rbr_ring_t)rbr_rings[i],
3249 		    (p_rx_rcr_ring_t)rcr_rings[i],
3250 		    (p_rx_mbox_t)rx_mbox_p[i], rbr_rings[i]->rbb_max);
3251 		if (status != HXGE_OK) {
3252 			goto hxge_rxdma_hw_start_fail1;
3253 		}
3254 	}
3255 
3256 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: "
3257 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
3258 	    rx_rbr_rings, rx_rcr_rings));
3259 	goto hxge_rxdma_hw_start_exit;
3260 
3261 hxge_rxdma_hw_start_fail1:
3262 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3263 	    "==> hxge_rxdma_hw_start: disable "
3264 	    "(status 0x%x channel %d i %d)", status, channel, i));
3265 	for (; i >= 0; i--) {
3266 		channel = rbr_rings[i]->rdc;
3267 		(void) hxge_rxdma_stop_channel(hxgep, channel);
3268 	}
3269 
3270 hxge_rxdma_hw_start_exit:
3271 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3272 	    "==> hxge_rxdma_hw_start: (status 0x%x)", status));
3273 	return (status);
3274 }
3275 
3276 static void
3277 hxge_rxdma_hw_stop(p_hxge_t hxgep)
3278 {
3279 	int			i, ndmas;
3280 	uint16_t		channel;
3281 	p_rx_rbr_rings_t	rx_rbr_rings;
3282 	p_rx_rbr_ring_t		*rbr_rings;
3283 	p_rx_rcr_rings_t	rx_rcr_rings;
3284 
3285 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop"));
3286 
3287 	rx_rbr_rings = hxgep->rx_rbr_rings;
3288 	rx_rcr_rings = hxgep->rx_rcr_rings;
3289 
3290 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
3291 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3292 		    "<== hxge_rxdma_hw_stop: NULL ring pointers"));
3293 		return;
3294 	}
3295 
3296 	ndmas = rx_rbr_rings->ndmas;
3297 	if (!ndmas) {
3298 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
3299 		    "<== hxge_rxdma_hw_stop: no dma channel allocated"));
3300 		return;
3301 	}
3302 
3303 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3304 	    "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas));
3305 
3306 	rbr_rings = rx_rbr_rings->rbr_rings;
3307 	for (i = 0; i < ndmas; i++) {
3308 		channel = rbr_rings[i]->rdc;
3309 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3310 		    "==> hxge_rxdma_hw_stop (ndmas %d) channel %d",
3311 		    ndmas, channel));
3312 		(void) hxge_rxdma_stop_channel(hxgep, channel);
3313 	}
3314 
3315 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: "
3316 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
3317 	    rx_rbr_rings, rx_rcr_rings));
3318 
3319 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop"));
3320 }
3321 
3322 static hxge_status_t
3323 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
3324     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
3325     int n_init_kick)
3326 {
3327 	hpi_handle_t		handle;
3328 	hpi_status_t		rs = HPI_SUCCESS;
3329 	rdc_stat_t		cs;
3330 	rdc_int_mask_t		ent_mask;
3331 	hxge_status_t		status = HXGE_OK;
3332 
3333 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel"));
3334 
3335 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3336 
3337 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: "
3338 	    "hpi handle addr $%p acc $%p",
3339 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3340 
3341 	/* Reset RXDMA channel */
3342 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3343 	if (rs != HPI_SUCCESS) {
3344 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3345 		    "==> hxge_rxdma_start_channel: "
3346 		    "reset rxdma failed (0x%08x channel %d)",
3347 		    status, channel));
3348 		return (HXGE_ERROR | rs);
3349 	}
3350 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3351 	    "==> hxge_rxdma_start_channel: reset done: channel %d", channel));
3352 
3353 	/*
3354 	 * Initialize the RXDMA channel specific FZC control configurations.
3355 	 * These FZC registers are pertaining to each RX channel (logical
3356 	 * pages).
3357 	 */
3358 	status = hxge_init_fzc_rxdma_channel(hxgep,
3359 	    channel, rbr_p, rcr_p, mbox_p);
3360 	if (status != HXGE_OK) {
3361 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3362 		    "==> hxge_rxdma_start_channel: "
3363 		    "init fzc rxdma failed (0x%08x channel %d)",
3364 		    status, channel));
3365 		return (status);
3366 	}
3367 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3368 	    "==> hxge_rxdma_start_channel: fzc done"));
3369 
3370 	/*
3371 	 * Zero out the shadow  and prefetch ram.
3372 	 */
3373 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3374 	    "==> hxge_rxdma_start_channel: ram done"));
3375 
3376 	/* Set up the interrupt event masks. */
3377 	ent_mask.value = 0;
3378 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3379 	if (rs != HPI_SUCCESS) {
3380 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3381 		    "==> hxge_rxdma_start_channel: "
3382 		    "init rxdma event masks failed (0x%08x channel %d)",
3383 		    status, channel));
3384 		return (HXGE_ERROR | rs);
3385 	}
3386 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3387 	    "event done: channel %d (mask 0x%016llx)",
3388 	    channel, ent_mask.value));
3389 
3390 	/*
3391 	 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA
3392 	 * channels and enable each DMA channel.
3393 	 */
3394 	status = hxge_enable_rxdma_channel(hxgep,
3395 	    channel, rbr_p, rcr_p, mbox_p, n_init_kick);
3396 	if (status != HXGE_OK) {
3397 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3398 		    " hxge_rxdma_start_channel: "
3399 		    " init enable rxdma failed (0x%08x channel %d)",
3400 		    status, channel));
3401 		return (status);
3402 	}
3403 
3404 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3405 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
3406 
3407 	/*
3408 	 * Initialize the receive DMA control and status register
3409 	 * Note that rdc_stat HAS to be set after RBR and RCR rings are set
3410 	 */
3411 	cs.value = 0;
3412 	cs.bits.mex = 1;
3413 	cs.bits.rcr_thres = 1;
3414 	cs.bits.rcr_to = 1;
3415 	cs.bits.rbr_empty = 1;
3416 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3417 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3418 	    "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
3419 	if (status != HXGE_OK) {
3420 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3421 		    "==> hxge_rxdma_start_channel: "
3422 		    "init rxdma control register failed (0x%08x channel %d",
3423 		    status, channel));
3424 		return (status);
3425 	}
3426 
3427 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
3428 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
3429 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
3430 	    "==> hxge_rxdma_start_channel: enable done"));
3431 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel"));
3432 	return (HXGE_OK);
3433 }
3434 
3435 static hxge_status_t
3436 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel)
3437 {
3438 	hpi_handle_t		handle;
3439 	hpi_status_t		rs = HPI_SUCCESS;
3440 	rdc_stat_t		cs;
3441 	rdc_int_mask_t		ent_mask;
3442 	hxge_status_t		status = HXGE_OK;
3443 
3444 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel"));
3445 
3446 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3447 
3448 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: "
3449 	    "hpi handle addr $%p acc $%p",
3450 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
3451 
3452 	/* Reset RXDMA channel */
3453 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3454 	if (rs != HPI_SUCCESS) {
3455 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3456 		    " hxge_rxdma_stop_channel: "
3457 		    " reset rxdma failed (0x%08x channel %d)",
3458 		    rs, channel));
3459 		return (HXGE_ERROR | rs);
3460 	}
3461 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3462 	    "==> hxge_rxdma_stop_channel: reset done"));
3463 
3464 	/* Set up the interrupt event masks. */
3465 	ent_mask.value = RDC_INT_MASK_ALL;
3466 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3467 	if (rs != HPI_SUCCESS) {
3468 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3469 		    "==> hxge_rxdma_stop_channel: "
3470 		    "set rxdma event masks failed (0x%08x channel %d)",
3471 		    rs, channel));
3472 		return (HXGE_ERROR | rs);
3473 	}
3474 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3475 	    "==> hxge_rxdma_stop_channel: event done"));
3476 
3477 	/* Initialize the receive DMA control and status register */
3478 	cs.value = 0;
3479 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
3480 
3481 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control "
3482 	    " to default (all 0s) 0x%08x", cs.value));
3483 
3484 	if (status != HXGE_OK) {
3485 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3486 		    " hxge_rxdma_stop_channel: init rxdma"
3487 		    " control register failed (0x%08x channel %d",
3488 		    status, channel));
3489 		return (status);
3490 	}
3491 
3492 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3493 	    "==> hxge_rxdma_stop_channel: control done"));
3494 
3495 	/* disable dma channel */
3496 	status = hxge_disable_rxdma_channel(hxgep, channel);
3497 
3498 	if (status != HXGE_OK) {
3499 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3500 		    " hxge_rxdma_stop_channel: "
3501 		    " init enable rxdma failed (0x%08x channel %d)",
3502 		    status, channel));
3503 		return (status);
3504 	}
3505 
3506 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
3507 	    "==> hxge_rxdma_stop_channel: disable done"));
3508 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel"));
3509 
3510 	return (HXGE_OK);
3511 }
3512 
3513 hxge_status_t
3514 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)
3515 {
3516 	hpi_handle_t		handle;
3517 	p_hxge_rdc_sys_stats_t	statsp;
3518 	rdc_fifo_err_stat_t	stat;
3519 	hxge_status_t		status = HXGE_OK;
3520 
3521 	handle = hxgep->hpi_handle;
3522 	statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
3523 
3524 	/* Get the error status and clear the register */
3525 	HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value);
3526 	HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value);
3527 
3528 	if (stat.bits.rx_ctrl_fifo_sec) {
3529 		statsp->ctrl_fifo_sec++;
3530 		if (statsp->ctrl_fifo_sec == 1)
3531 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3532 			    "==> hxge_rxdma_handle_sys_errors: "
3533 			    "rx_ctrl_fifo_sec"));
3534 	}
3535 
3536 	if (stat.bits.rx_ctrl_fifo_ded) {
3537 		/* Global fatal error encountered */
3538 		statsp->ctrl_fifo_ded++;
3539 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
3540 		    HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED);
3541 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3542 		    "==> hxge_rxdma_handle_sys_errors: "
3543 		    "fatal error: rx_ctrl_fifo_ded error"));
3544 	}
3545 
3546 	if (stat.bits.rx_data_fifo_sec) {
3547 		statsp->data_fifo_sec++;
3548 		if (statsp->data_fifo_sec == 1)
3549 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3550 			    "==> hxge_rxdma_handle_sys_errors: "
3551 			    "rx_data_fifo_sec"));
3552 	}
3553 
3554 	if (stat.bits.rx_data_fifo_ded) {
3555 		/* Global fatal error encountered */
3556 		statsp->data_fifo_ded++;
3557 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
3558 		    HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED);
3559 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3560 		    "==> hxge_rxdma_handle_sys_errors: "
3561 		    "fatal error: rx_data_fifo_ded error"));
3562 	}
3563 
3564 	if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) {
3565 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3566 		    " hxge_rxdma_handle_sys_errors: fatal error\n"));
3567 		status = hxge_rx_port_fatal_err_recover(hxgep);
3568 		if (status == HXGE_OK) {
3569 			FM_SERVICE_RESTORED(hxgep);
3570 		}
3571 	}
3572 
3573 	return (HXGE_OK);
3574 }
3575 
3576 static hxge_status_t
3577 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
3578 {
3579 	hpi_handle_t		handle;
3580 	hpi_status_t 		rs = HPI_SUCCESS;
3581 	p_rx_rbr_ring_t		rbrp;
3582 	p_rx_rcr_ring_t		rcrp;
3583 	p_rx_mbox_t		mboxp;
3584 	rdc_int_mask_t		ent_mask;
3585 	p_hxge_dma_common_t	dmap;
3586 	p_rx_msg_t		rx_msg_p;
3587 	int			i;
3588 	uint32_t		hxge_port_rcr_size;
3589 	uint64_t		tmp;
3590 	int			n_init_kick = 0;
3591 
3592 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover"));
3593 
3594 	/*
3595 	 * Stop the dma channel waits for the stop done. If the stop done bit
3596 	 * is not set, then create an error.
3597 	 */
3598 
3599 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3600 
3601 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop..."));
3602 
3603 	rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[channel];
3604 	rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[channel];
3605 
3606 	MUTEX_ENTER(&rcrp->lock);
3607 	MUTEX_ENTER(&rbrp->lock);
3608 
3609 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel..."));
3610 
3611 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
3612 	if (rs != HPI_SUCCESS) {
3613 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3614 		    "hxge_disable_rxdma_channel:failed"));
3615 		goto fail;
3616 	}
3617 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt..."));
3618 
3619 	/* Disable interrupt */
3620 	ent_mask.value = RDC_INT_MASK_ALL;
3621 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
3622 	if (rs != HPI_SUCCESS) {
3623 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3624 		    "Set rxdma event masks failed (channel %d)", channel));
3625 	}
3626 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset..."));
3627 
3628 	/* Reset RXDMA channel */
3629 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
3630 	if (rs != HPI_SUCCESS) {
3631 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3632 		    "Reset rxdma failed (channel %d)", channel));
3633 		goto fail;
3634 	}
3635 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
3636 	mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[channel];
3637 
3638 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
3639 	rbrp->rbr_rd_index = 0;
3640 
3641 	rcrp->comp_rd_index = 0;
3642 	rcrp->comp_wt_index = 0;
3643 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
3644 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
3645 #if defined(__i386)
3646 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3647 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3648 #else
3649 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
3650 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
3651 #endif
3652 
3653 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
3654 	    (hxge_port_rcr_size - 1);
3655 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
3656 	    (hxge_port_rcr_size - 1);
3657 
3658 	rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
3659 	rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
3660 
3661 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
3662 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
3663 
3664 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n",
3665 	    rbrp->rbr_max_size));
3666 
3667 	/* Count the number of buffers owned by the hardware at this moment */
3668 	for (i = 0; i < rbrp->rbr_max_size; i++) {
3669 		rx_msg_p = rbrp->rx_msg_ring[i];
3670 		if (rx_msg_p->ref_cnt == 1) {
3671 			n_init_kick++;
3672 		}
3673 	}
3674 
3675 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start..."));
3676 
3677 	/*
3678 	 * This is error recover! Some buffers are owned by the hardware and
3679 	 * the rest are owned by the apps. We should only kick in those
3680 	 * owned by the hardware initially. The apps will post theirs
3681 	 * eventually.
3682 	 */
3683 	(void) hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp,
3684 	    n_init_kick);
3685 
3686 	/*
3687 	 * The DMA channel may disable itself automatically.
3688 	 * The following is a work-around.
3689 	 */
3690 	HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp);
3691 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
3692 	if (rs != HPI_SUCCESS) {
3693 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3694 		    "hpi_rxdma_cfg_rdc_enable (channel %d)", channel));
3695 	}
3696 
3697 	/*
3698 	 * Delay a bit of time by doing reads.
3699 	 */
3700 	for (i = 0; i < 1024; i++) {
3701 		uint64_t value;
3702 		RXDMA_REG_READ64(HXGE_DEV_HPI_HANDLE(hxgep),
3703 		    RDC_INT_MASK, i & 3, &value);
3704 	}
3705 
3706 	MUTEX_EXIT(&rbrp->lock);
3707 	MUTEX_EXIT(&rcrp->lock);
3708 
3709 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover"));
3710 	return (HXGE_OK);
3711 
3712 fail:
3713 	MUTEX_EXIT(&rbrp->lock);
3714 	MUTEX_EXIT(&rcrp->lock);
3715 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3716 	    "Error Recovery failed for channel(%d)", channel));
3717 	return (HXGE_ERROR | rs);
3718 }
3719 
3720 static hxge_status_t
3721 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
3722 {
3723 	hxge_status_t		status = HXGE_OK;
3724 	p_hxge_dma_common_t	*dma_buf_p;
3725 	uint16_t		channel;
3726 	int			ndmas;
3727 	int			i;
3728 	block_reset_t		reset_reg;
3729 	p_rx_rcr_ring_t	rcrp;
3730 	p_rx_rbr_ring_t rbrp;
3731 
3732 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover"));
3733 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ..."));
3734 
3735 	/* Disable RxMAC */
3736 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n"));
3737 	MUTEX_ENTER(&hxgep->vmac_lock);
3738 	if (hxge_rx_vmac_disable(hxgep) != HXGE_OK)
3739 		goto fail;
3740 
3741 	HXGE_DELAY(1000);
3742 
3743 	/*
3744 	 * Reset RDC block from PEU for this fatal error
3745 	 */
3746 	reset_reg.value = 0;
3747 	reset_reg.bits.rdc_rst = 1;
3748 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
3749 
3750 	HXGE_DELAY(1000);
3751 
3752 	/* Restore any common settings after PEU reset */
3753 	if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK)
3754 		goto fail;
3755 
3756 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels..."));
3757 
3758 	ndmas = hxgep->rx_buf_pool_p->ndmas;
3759 	dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p;
3760 
3761 	for (i = 0; i < ndmas; i++) {
3762 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
3763 		rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
3764 		rbrp = rcrp->rx_rbr_p;
3765 
3766 		MUTEX_ENTER(&rbrp->post_lock);
3767 
3768 		/*
3769 		 * This function needs to be inside the post_lock
3770 		 */
3771 		if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) {
3772 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3773 			    "Could not recover channel %d", channel));
3774 		}
3775 		MUTEX_EXIT(&rbrp->post_lock);
3776 	}
3777 
3778 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC..."));
3779 
3780 	/* Reset RxMAC */
3781 	if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) {
3782 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3783 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3784 		goto fail;
3785 	}
3786 
3787 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC..."));
3788 
3789 	/* Re-Initialize RxMAC */
3790 	if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) {
3791 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3792 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
3793 		goto fail;
3794 	}
3795 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC..."));
3796 
3797 	/* Re-enable RxMAC */
3798 	if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) {
3799 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3800 		    "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC"));
3801 		goto fail;
3802 	}
3803 	MUTEX_EXIT(&hxgep->vmac_lock);
3804 
3805 	/* Reset the error mask since PEU reset cleared it */
3806 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
3807 
3808 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3809 	    "Recovery Successful, RxPort Restored"));
3810 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover"));
3811 	return (HXGE_OK);
3812 
3813 fail:
3814 	MUTEX_EXIT(&hxgep->vmac_lock);
3815 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3816 	    "Error Recovery failed for hxge(%d)", hxgep->instance));
3817 	return (status);
3818 }
3819 
3820 static void
3821 hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p)
3822 {
3823 	hpi_status_t		hpi_status;
3824 	hxge_status_t		status;
3825 	rdc_stat_t		cs;
3826 	p_hxge_rx_ring_stats_t	rdc_stats;
3827 
3828 	rdc_stats = &hxgep->statsp->rdc_stats[rx_rbr_p->rdc];
3829 
3830 	/*
3831 	 * Complete the processing for the RBR Empty by:
3832 	 *	0) kicking back HXGE_RBR_EMPTY_THRESHOLD
3833 	 *	   packets.
3834 	 *	1) Disable the RX vmac.
3835 	 *	2) Re-enable the affected DMA channel.
3836 	 *	3) Re-enable the RX vmac.
3837 	 */
3838 
3839 	/*
3840 	 * Disable the RX VMAC, but setting the framelength
3841 	 * to 0, since there is a hardware bug when disabling
3842 	 * the vmac.
3843 	 */
3844 	MUTEX_ENTER(&hxgep->vmac_lock);
3845 	(void) hxge_rx_vmac_disable(hxgep);
3846 
3847 	/*
3848 	 * Re-arm the mex bit for interrupts to be enabled.
3849 	 */
3850 	cs.value = 0;
3851 	cs.bits.mex = 1;
3852 	RXDMA_REG_WRITE64(HXGE_DEV_HPI_HANDLE(hxgep), RDC_STAT,
3853 	    rx_rbr_p->rdc, cs.value);
3854 
3855 	hpi_status = hpi_rxdma_cfg_rdc_enable(
3856 	    HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc);
3857 	if (hpi_status != HPI_SUCCESS) {
3858 		rdc_stats->rbr_empty_fail++;
3859 
3860 		/* Assume we are already inside the post_lock */
3861 		status = hxge_rxdma_fatal_err_recover(hxgep, rx_rbr_p->rdc);
3862 		if (status != HXGE_OK) {
3863 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3864 			    "hxge(%d): channel(%d) is empty.",
3865 			    hxgep->instance, rx_rbr_p->rdc));
3866 		}
3867 	}
3868 
3869 	/*
3870 	 * Re-enable the RX VMAC.
3871 	 */
3872 	(void) hxge_rx_vmac_enable(hxgep);
3873 	MUTEX_EXIT(&hxgep->vmac_lock);
3874 
3875 	rdc_stats->rbr_empty_restore++;
3876 	rx_rbr_p->rbr_is_empty = B_FALSE;
3877 }
3878