xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_txdma.c (revision 628e3cbed6489fa1db545d8524a06cd6535af456)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/nxge/nxge_impl.h>
27 #include <sys/nxge/nxge_txdma.h>
28 #include <sys/nxge/nxge_hio.h>
29 #include <npi_tx_rd64.h>
30 #include <npi_tx_wr64.h>
31 #include <sys/llc1.h>
32 
33 uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
34 uint32_t	nxge_tx_minfree = 32;
35 uint32_t	nxge_tx_intr_thres = 0;
36 uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
37 uint32_t	nxge_tx_tiny_pack = 1;
38 uint32_t	nxge_tx_use_bcopy = 1;
39 
40 extern uint32_t 	nxge_tx_ring_size;
41 extern uint32_t 	nxge_bcopy_thresh;
42 extern uint32_t 	nxge_dvma_thresh;
43 extern uint32_t 	nxge_dma_stream_thresh;
44 extern dma_method_t 	nxge_force_dma;
45 extern uint32_t		nxge_cksum_offload;
46 
47 /* Device register access attributes for PIO.  */
48 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
49 /* Device descriptor access attributes for DMA.  */
50 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
51 /* Device buffer access attributes for DMA.  */
52 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
53 extern ddi_dma_attr_t nxge_desc_dma_attr;
54 extern ddi_dma_attr_t nxge_tx_dma_attr;
55 
56 extern int nxge_serial_tx(mblk_t *mp, void *arg);
57 
58 void nxge_txdma_freemsg_task(p_tx_ring_t tx_ring_p);
59 
60 static nxge_status_t nxge_map_txdma(p_nxge_t, int);
61 
62 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
63 
64 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
65 	p_nxge_dma_common_t *, p_tx_ring_t *,
66 	uint32_t, p_nxge_dma_common_t *,
67 	p_tx_mbox_t *);
68 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
69 
70 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
71 	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
72 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
73 
74 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
75 	p_nxge_dma_common_t *, p_tx_ring_t,
76 	p_tx_mbox_t *);
77 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
78 	p_tx_ring_t, p_tx_mbox_t);
79 
80 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
81     p_tx_ring_t, p_tx_mbox_t);
82 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
83 
84 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
85 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
86 	p_nxge_ldv_t, tx_cs_t);
87 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
88 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
89 	uint16_t, p_tx_ring_t);
90 
91 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
92     p_tx_ring_t ring_p, uint16_t channel);
93 
94 nxge_status_t
95 nxge_init_txdma_channels(p_nxge_t nxgep)
96 {
97 	nxge_grp_set_t *set = &nxgep->tx_set;
98 	int i, count;
99 
100 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
101 
102 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
103 		if ((1 << i) & set->lg.map) {
104 			int tdc;
105 			nxge_grp_t *group = set->group[i];
106 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
107 				if ((1 << tdc) & group->map) {
108 					if ((nxge_grp_dc_add(nxgep,
109 					    group, VP_BOUND_TX, tdc)))
110 						return (NXGE_ERROR);
111 				}
112 			}
113 		}
114 		if (++count == set->lg.count)
115 			break;
116 	}
117 
118 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
119 
120 	return (NXGE_OK);
121 }
122 
123 nxge_status_t
124 nxge_init_txdma_channel(
125 	p_nxge_t nxge,
126 	int channel)
127 {
128 	nxge_status_t status;
129 
130 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
131 
132 	status = nxge_map_txdma(nxge, channel);
133 	if (status != NXGE_OK) {
134 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
135 		    "<== nxge_init_txdma_channel: status 0x%x", status));
136 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
137 		return (status);
138 	}
139 
140 	status = nxge_txdma_hw_start(nxge, channel);
141 	if (status != NXGE_OK) {
142 		(void) nxge_unmap_txdma_channel(nxge, channel);
143 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
144 		return (status);
145 	}
146 
147 	if (!nxge->statsp->tdc_ksp[channel])
148 		nxge_setup_tdc_kstats(nxge, channel);
149 
150 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
151 
152 	return (status);
153 }
154 
155 void
156 nxge_uninit_txdma_channels(p_nxge_t nxgep)
157 {
158 	nxge_grp_set_t *set = &nxgep->tx_set;
159 	int tdc;
160 
161 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
162 
163 	if (set->owned.map == 0) {
164 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
165 		    "nxge_uninit_txdma_channels: no channels"));
166 		return;
167 	}
168 
169 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
170 		if ((1 << tdc) & set->owned.map) {
171 			nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
172 		}
173 	}
174 
175 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
176 }
177 
178 void
179 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
180 {
181 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
182 
183 	if (nxgep->statsp->tdc_ksp[channel]) {
184 		kstat_delete(nxgep->statsp->tdc_ksp[channel]);
185 		nxgep->statsp->tdc_ksp[channel] = 0;
186 	}
187 
188 	(void) nxge_txdma_stop_channel(nxgep, channel);
189 	nxge_unmap_txdma_channel(nxgep, channel);
190 
191 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
192 	    "<== nxge_uninit_txdma_channel"));
193 }
194 
195 void
196 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
197 	uint32_t entries, uint32_t size)
198 {
199 	size_t		tsize;
200 	*dest_p = *src_p;
201 	tsize = size * entries;
202 	dest_p->alength = tsize;
203 	dest_p->nblocks = entries;
204 	dest_p->block_size = size;
205 	dest_p->offset += tsize;
206 
207 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
208 	src_p->alength -= tsize;
209 	src_p->dma_cookie.dmac_laddress += tsize;
210 	src_p->dma_cookie.dmac_size -= tsize;
211 }
212 
213 /*
214  * nxge_reset_txdma_channel
215  *
216  *	Reset a TDC.
217  *
218  * Arguments:
219  * 	nxgep
220  * 	channel		The channel to reset.
221  * 	reg_data	The current TX_CS.
222  *
223  * Notes:
224  *
225  * NPI/NXGE function calls:
226  *	npi_txdma_channel_reset()
227  *	npi_txdma_channel_control()
228  *
229  * Registers accessed:
230  *	TX_CS		DMC+0x40028 Transmit Control And Status
231  *	TX_RING_KICK	DMC+0x40018 Transmit Ring Kick
232  *
233  * Context:
234  *	Any domain
235  */
236 nxge_status_t
237 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
238 {
239 	npi_status_t		rs = NPI_SUCCESS;
240 	nxge_status_t		status = NXGE_OK;
241 	npi_handle_t		handle;
242 
243 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
244 
245 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
246 	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
247 		rs = npi_txdma_channel_reset(handle, channel);
248 	} else {
249 		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
250 		    channel);
251 	}
252 
253 	if (rs != NPI_SUCCESS) {
254 		status = NXGE_ERROR | rs;
255 	}
256 
257 	/*
258 	 * Reset the tail (kick) register to 0.
259 	 * (Hardware will not reset it. Tx overflow fatal
260 	 * error if tail is not set to 0 after reset!
261 	 */
262 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
263 
264 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
265 	return (status);
266 }
267 
268 /*
269  * nxge_init_txdma_channel_event_mask
270  *
271  *	Enable interrupts for a set of events.
272  *
273  * Arguments:
274  * 	nxgep
275  * 	channel	The channel to map.
276  * 	mask_p	The events to enable.
277  *
278  * Notes:
279  *
280  * NPI/NXGE function calls:
281  *	npi_txdma_event_mask()
282  *
283  * Registers accessed:
284  *	TX_ENT_MSK	DMC+0x40020 Transmit Event Mask
285  *
286  * Context:
287  *	Any domain
288  */
289 nxge_status_t
290 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
291 		p_tx_dma_ent_msk_t mask_p)
292 {
293 	npi_handle_t		handle;
294 	npi_status_t		rs = NPI_SUCCESS;
295 	nxge_status_t		status = NXGE_OK;
296 
297 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
298 	    "<== nxge_init_txdma_channel_event_mask"));
299 
300 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
301 	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
302 	if (rs != NPI_SUCCESS) {
303 		status = NXGE_ERROR | rs;
304 	}
305 
306 	return (status);
307 }
308 
309 /*
310  * nxge_init_txdma_channel_cntl_stat
311  *
312  *	Stop a TDC.  If at first we don't succeed, inject an error.
313  *
314  * Arguments:
315  * 	nxgep
316  * 	channel		The channel to stop.
317  *
318  * Notes:
319  *
320  * NPI/NXGE function calls:
321  *	npi_txdma_control_status()
322  *
323  * Registers accessed:
324  *	TX_CS		DMC+0x40028 Transmit Control And Status
325  *
326  * Context:
327  *	Any domain
328  */
329 nxge_status_t
330 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
331 	uint64_t reg_data)
332 {
333 	npi_handle_t		handle;
334 	npi_status_t		rs = NPI_SUCCESS;
335 	nxge_status_t		status = NXGE_OK;
336 
337 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
338 	    "<== nxge_init_txdma_channel_cntl_stat"));
339 
340 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
341 	rs = npi_txdma_control_status(handle, OP_SET, channel,
342 	    (p_tx_cs_t)&reg_data);
343 
344 	if (rs != NPI_SUCCESS) {
345 		status = NXGE_ERROR | rs;
346 	}
347 
348 	return (status);
349 }
350 
351 /*
352  * nxge_enable_txdma_channel
353  *
354  *	Enable a TDC.
355  *
356  * Arguments:
357  * 	nxgep
358  * 	channel		The channel to enable.
359  * 	tx_desc_p	channel's transmit descriptor ring.
360  * 	mbox_p		channel's mailbox,
361  *
362  * Notes:
363  *
364  * NPI/NXGE function calls:
365  *	npi_txdma_ring_config()
366  *	npi_txdma_mbox_config()
367  *	npi_txdma_channel_init_enable()
368  *
369  * Registers accessed:
370  *	TX_RNG_CFIG	DMC+0x40000 Transmit Ring Configuration
371  *	TXDMA_MBH	DMC+0x40030 TXDMA Mailbox High
372  *	TXDMA_MBL	DMC+0x40038 TXDMA Mailbox Low
373  *	TX_CS		DMC+0x40028 Transmit Control And Status
374  *
375  * Context:
376  *	Any domain
377  */
378 nxge_status_t
379 nxge_enable_txdma_channel(p_nxge_t nxgep,
380 	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
381 {
382 	npi_handle_t		handle;
383 	npi_status_t		rs = NPI_SUCCESS;
384 	nxge_status_t		status = NXGE_OK;
385 
386 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
387 
388 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
389 	/*
390 	 * Use configuration data composed at init time.
391 	 * Write to hardware the transmit ring configurations.
392 	 */
393 	rs = npi_txdma_ring_config(handle, OP_SET, channel,
394 	    (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
395 
396 	if (rs != NPI_SUCCESS) {
397 		return (NXGE_ERROR | rs);
398 	}
399 
400 	if (isLDOMguest(nxgep)) {
401 		/* Add interrupt handler for this channel. */
402 		if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
403 			return (NXGE_ERROR);
404 	}
405 
406 	/* Write to hardware the mailbox */
407 	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
408 	    (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
409 
410 	if (rs != NPI_SUCCESS) {
411 		return (NXGE_ERROR | rs);
412 	}
413 
414 	/* Start the DMA engine. */
415 	rs = npi_txdma_channel_init_enable(handle, channel);
416 
417 	if (rs != NPI_SUCCESS) {
418 		return (NXGE_ERROR | rs);
419 	}
420 
421 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
422 
423 	return (status);
424 }
425 
426 void
427 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
428 		boolean_t l4_cksum, int pkt_len, uint8_t npads,
429 		p_tx_pkt_hdr_all_t pkthdrp,
430 		t_uscalar_t start_offset,
431 		t_uscalar_t stuff_offset)
432 {
433 	p_tx_pkt_header_t	hdrp;
434 	p_mblk_t 		nmp;
435 	uint64_t		tmp;
436 	size_t 			mblk_len;
437 	size_t 			iph_len;
438 	size_t 			hdrs_size;
439 	uint8_t			hdrs_buf[sizeof (struct ether_header) +
440 	    64 + sizeof (uint32_t)];
441 	uint8_t			*cursor;
442 	uint8_t 		*ip_buf;
443 	uint16_t		eth_type;
444 	uint8_t			ipproto;
445 	boolean_t		is_vlan = B_FALSE;
446 	size_t			eth_hdr_size;
447 
448 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
449 
450 	/*
451 	 * Caller should zero out the headers first.
452 	 */
453 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
454 
455 	if (fill_len) {
456 		NXGE_DEBUG_MSG((NULL, TX_CTL,
457 		    "==> nxge_fill_tx_hdr: pkt_len %d "
458 		    "npads %d", pkt_len, npads));
459 		tmp = (uint64_t)pkt_len;
460 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
461 		goto fill_tx_header_done;
462 	}
463 
464 	hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
465 
466 	/*
467 	 * mp is the original data packet (does not include the
468 	 * Neptune transmit header).
469 	 */
470 	nmp = mp;
471 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
472 	    "mp $%p b_rptr $%p len %d",
473 	    mp, nmp->b_rptr, MBLKL(nmp)));
474 	/* copy ether_header from mblk to hdrs_buf */
475 	cursor = &hdrs_buf[0];
476 	tmp = sizeof (struct ether_vlan_header);
477 	while ((nmp != NULL) && (tmp > 0)) {
478 		size_t buflen;
479 		mblk_len = MBLKL(nmp);
480 		buflen = min((size_t)tmp, mblk_len);
481 		bcopy(nmp->b_rptr, cursor, buflen);
482 		cursor += buflen;
483 		tmp -= buflen;
484 		nmp = nmp->b_cont;
485 	}
486 
487 	nmp = mp;
488 	mblk_len = MBLKL(nmp);
489 	ip_buf = NULL;
490 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
491 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
492 	    "ether type 0x%x", eth_type, hdrp->value));
493 
494 	if (eth_type < ETHERMTU) {
495 		tmp = 1ull;
496 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
497 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
498 		    "value 0x%llx", hdrp->value));
499 		if (*(hdrs_buf + sizeof (struct ether_header))
500 		    == LLC_SNAP_SAP) {
501 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
502 			    sizeof (struct ether_header) + 6)));
503 			NXGE_DEBUG_MSG((NULL, TX_CTL,
504 			    "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
505 			    eth_type));
506 		} else {
507 			goto fill_tx_header_done;
508 		}
509 	} else if (eth_type == VLAN_ETHERTYPE) {
510 		tmp = 1ull;
511 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
512 
513 		eth_type = ntohs(((struct ether_vlan_header *)
514 		    hdrs_buf)->ether_type);
515 		is_vlan = B_TRUE;
516 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
517 		    "value 0x%llx", hdrp->value));
518 	}
519 
520 	if (!is_vlan) {
521 		eth_hdr_size = sizeof (struct ether_header);
522 	} else {
523 		eth_hdr_size = sizeof (struct ether_vlan_header);
524 	}
525 
526 	switch (eth_type) {
527 	case ETHERTYPE_IP:
528 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
529 			ip_buf = nmp->b_rptr + eth_hdr_size;
530 			mblk_len -= eth_hdr_size;
531 			iph_len = ((*ip_buf) & 0x0f);
532 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
533 				ip_buf = nmp->b_rptr;
534 				ip_buf += eth_hdr_size;
535 			} else {
536 				ip_buf = NULL;
537 			}
538 
539 		}
540 		if (ip_buf == NULL) {
541 			hdrs_size = 0;
542 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
543 			while ((nmp) && (hdrs_size <
544 			    sizeof (hdrs_buf))) {
545 				mblk_len = (size_t)nmp->b_wptr -
546 				    (size_t)nmp->b_rptr;
547 				if (mblk_len >=
548 				    (sizeof (hdrs_buf) - hdrs_size))
549 					mblk_len = sizeof (hdrs_buf) -
550 					    hdrs_size;
551 				bcopy(nmp->b_rptr,
552 				    &hdrs_buf[hdrs_size], mblk_len);
553 				hdrs_size += mblk_len;
554 				nmp = nmp->b_cont;
555 			}
556 			ip_buf = hdrs_buf;
557 			ip_buf += eth_hdr_size;
558 			iph_len = ((*ip_buf) & 0x0f);
559 		}
560 
561 		ipproto = ip_buf[9];
562 
563 		tmp = (uint64_t)iph_len;
564 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
565 		tmp = (uint64_t)(eth_hdr_size >> 1);
566 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
567 
568 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
569 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
570 		    "tmp 0x%x",
571 		    iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
572 		    ipproto, tmp));
573 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
574 		    "value 0x%llx", hdrp->value));
575 
576 		break;
577 
578 	case ETHERTYPE_IPV6:
579 		hdrs_size = 0;
580 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
581 		while ((nmp) && (hdrs_size <
582 		    sizeof (hdrs_buf))) {
583 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
584 			if (mblk_len >=
585 			    (sizeof (hdrs_buf) - hdrs_size))
586 				mblk_len = sizeof (hdrs_buf) -
587 				    hdrs_size;
588 			bcopy(nmp->b_rptr,
589 			    &hdrs_buf[hdrs_size], mblk_len);
590 			hdrs_size += mblk_len;
591 			nmp = nmp->b_cont;
592 		}
593 		ip_buf = hdrs_buf;
594 		ip_buf += eth_hdr_size;
595 
596 		tmp = 1ull;
597 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
598 
599 		tmp = (eth_hdr_size >> 1);
600 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
601 
602 		/* byte 6 is the next header protocol */
603 		ipproto = ip_buf[6];
604 
605 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
606 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
607 		    iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
608 		    ipproto));
609 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
610 		    "value 0x%llx", hdrp->value));
611 
612 		break;
613 
614 	default:
615 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
616 		goto fill_tx_header_done;
617 	}
618 
619 	switch (ipproto) {
620 	case IPPROTO_TCP:
621 		NXGE_DEBUG_MSG((NULL, TX_CTL,
622 		    "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
623 		if (l4_cksum) {
624 			hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
625 			hdrp->value |=
626 			    (((uint64_t)(start_offset >> 1)) <<
627 			    TX_PKT_HEADER_L4START_SHIFT);
628 			hdrp->value |=
629 			    (((uint64_t)(stuff_offset >> 1)) <<
630 			    TX_PKT_HEADER_L4STUFF_SHIFT);
631 
632 			NXGE_DEBUG_MSG((NULL, TX_CTL,
633 			    "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
634 			    "value 0x%llx", hdrp->value));
635 		}
636 
637 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
638 		    "value 0x%llx", hdrp->value));
639 		break;
640 
641 	case IPPROTO_UDP:
642 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
643 		if (l4_cksum) {
644 			if (!nxge_cksum_offload) {
645 				uint16_t	*up;
646 				uint16_t	cksum;
647 				t_uscalar_t	stuff_len;
648 
649 				/*
650 				 * The checksum field has the
651 				 * partial checksum.
652 				 * IP_CSUM() macro calls ip_cksum() which
653 				 * can add in the partial checksum.
654 				 */
655 				cksum = IP_CSUM(mp, start_offset, 0);
656 				stuff_len = stuff_offset;
657 				nmp = mp;
658 				mblk_len = MBLKL(nmp);
659 				while ((nmp != NULL) &&
660 				    (mblk_len < stuff_len)) {
661 					stuff_len -= mblk_len;
662 					nmp = nmp->b_cont;
663 				}
664 				ASSERT(nmp);
665 				up = (uint16_t *)(nmp->b_rptr + stuff_len);
666 
667 				*up = cksum;
668 				hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
669 				NXGE_DEBUG_MSG((NULL, TX_CTL,
670 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
671 				    "use sw cksum "
672 				    "write to $%p cksum 0x%x content up 0x%x",
673 				    stuff_len,
674 				    up,
675 				    cksum,
676 				    *up));
677 			} else {
678 				/* Hardware will compute the full checksum */
679 				hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
680 				hdrp->value |=
681 				    (((uint64_t)(start_offset >> 1)) <<
682 				    TX_PKT_HEADER_L4START_SHIFT);
683 				hdrp->value |=
684 				    (((uint64_t)(stuff_offset >> 1)) <<
685 				    TX_PKT_HEADER_L4STUFF_SHIFT);
686 
687 				NXGE_DEBUG_MSG((NULL, TX_CTL,
688 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
689 				    " use partial checksum "
690 				    "cksum 0x%x ",
691 				    "value 0x%llx",
692 				    stuff_offset,
693 				    IP_CSUM(mp, start_offset, 0),
694 				    hdrp->value));
695 			}
696 		}
697 
698 		NXGE_DEBUG_MSG((NULL, TX_CTL,
699 		    "==> nxge_tx_pkt_hdr_init: UDP"
700 		    "value 0x%llx", hdrp->value));
701 		break;
702 
703 	default:
704 		goto fill_tx_header_done;
705 	}
706 
707 fill_tx_header_done:
708 	NXGE_DEBUG_MSG((NULL, TX_CTL,
709 	    "==> nxge_fill_tx_hdr: pkt_len %d  "
710 	    "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
711 
712 	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
713 }
714 
715 /*ARGSUSED*/
716 p_mblk_t
717 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
718 {
719 	p_mblk_t 		newmp = NULL;
720 
721 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
722 		NXGE_DEBUG_MSG((NULL, TX_CTL,
723 		    "<== nxge_tx_pkt_header_reserve: allocb failed"));
724 		return (NULL);
725 	}
726 
727 	NXGE_DEBUG_MSG((NULL, TX_CTL,
728 	    "==> nxge_tx_pkt_header_reserve: get new mp"));
729 	DB_TYPE(newmp) = M_DATA;
730 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
731 	linkb(newmp, mp);
732 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
733 
734 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
735 	    "b_rptr $%p b_wptr $%p",
736 	    newmp->b_rptr, newmp->b_wptr));
737 
738 	NXGE_DEBUG_MSG((NULL, TX_CTL,
739 	    "<== nxge_tx_pkt_header_reserve: use new mp"));
740 
741 	return (newmp);
742 }
743 
744 int
745 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
746 {
747 	uint_t 			nmblks;
748 	ssize_t			len;
749 	uint_t 			pkt_len;
750 	p_mblk_t 		nmp, bmp, tmp;
751 	uint8_t 		*b_wptr;
752 
753 	NXGE_DEBUG_MSG((NULL, TX_CTL,
754 	    "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
755 	    "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
756 
757 	nmp = mp;
758 	bmp = mp;
759 	nmblks = 0;
760 	pkt_len = 0;
761 	*tot_xfer_len_p = 0;
762 
763 	while (nmp) {
764 		len = MBLKL(nmp);
765 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
766 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
767 		    len, pkt_len, nmblks,
768 		    *tot_xfer_len_p));
769 
770 		if (len <= 0) {
771 			bmp = nmp;
772 			nmp = nmp->b_cont;
773 			NXGE_DEBUG_MSG((NULL, TX_CTL,
774 			    "==> nxge_tx_pkt_nmblocks: "
775 			    "len (0) pkt_len %d nmblks %d",
776 			    pkt_len, nmblks));
777 			continue;
778 		}
779 
780 		*tot_xfer_len_p += len;
781 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
782 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
783 		    len, pkt_len, nmblks,
784 		    *tot_xfer_len_p));
785 
786 		if (len < nxge_bcopy_thresh) {
787 			NXGE_DEBUG_MSG((NULL, TX_CTL,
788 			    "==> nxge_tx_pkt_nmblocks: "
789 			    "len %d (< thresh) pkt_len %d nmblks %d",
790 			    len, pkt_len, nmblks));
791 			if (pkt_len == 0)
792 				nmblks++;
793 			pkt_len += len;
794 			if (pkt_len >= nxge_bcopy_thresh) {
795 				pkt_len = 0;
796 				len = 0;
797 				nmp = bmp;
798 			}
799 		} else {
800 			NXGE_DEBUG_MSG((NULL, TX_CTL,
801 			    "==> nxge_tx_pkt_nmblocks: "
802 			    "len %d (> thresh) pkt_len %d nmblks %d",
803 			    len, pkt_len, nmblks));
804 			pkt_len = 0;
805 			nmblks++;
806 			/*
807 			 * Hardware limits the transfer length to 4K.
808 			 * If len is more than 4K, we need to break
809 			 * it up to at most 2 more blocks.
810 			 */
811 			if (len > TX_MAX_TRANSFER_LENGTH) {
812 				uint32_t	nsegs;
813 
814 				nsegs = 1;
815 				NXGE_DEBUG_MSG((NULL, TX_CTL,
816 				    "==> nxge_tx_pkt_nmblocks: "
817 				    "len %d pkt_len %d nmblks %d nsegs %d",
818 				    len, pkt_len, nmblks, nsegs));
819 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
820 					++nsegs;
821 				}
822 				do {
823 					b_wptr = nmp->b_rptr +
824 					    TX_MAX_TRANSFER_LENGTH;
825 					nmp->b_wptr = b_wptr;
826 					if ((tmp = dupb(nmp)) == NULL) {
827 						return (0);
828 					}
829 					tmp->b_rptr = b_wptr;
830 					tmp->b_wptr = nmp->b_wptr;
831 					tmp->b_cont = nmp->b_cont;
832 					nmp->b_cont = tmp;
833 					nmblks++;
834 					if (--nsegs) {
835 						nmp = tmp;
836 					}
837 				} while (nsegs);
838 				nmp = tmp;
839 			}
840 		}
841 
842 		/*
843 		 * Hardware limits the transmit gather pointers to 15.
844 		 */
845 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
846 		    TX_MAX_GATHER_POINTERS) {
847 			NXGE_DEBUG_MSG((NULL, TX_CTL,
848 			    "==> nxge_tx_pkt_nmblocks: pull msg - "
849 			    "len %d pkt_len %d nmblks %d",
850 			    len, pkt_len, nmblks));
851 			/* Pull all message blocks from b_cont */
852 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
853 				return (0);
854 			}
855 			freemsg(nmp->b_cont);
856 			nmp->b_cont = tmp;
857 			pkt_len = 0;
858 		}
859 		bmp = nmp;
860 		nmp = nmp->b_cont;
861 	}
862 
863 	NXGE_DEBUG_MSG((NULL, TX_CTL,
864 	    "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
865 	    "nmblks %d len %d tot_xfer_len %d",
866 	    mp->b_rptr, mp->b_wptr, nmblks,
867 	    MBLKL(mp), *tot_xfer_len_p));
868 
869 	return (nmblks);
870 }
871 
872 static void
873 nxge_txdma_freemsg_list_add(p_tx_ring_t tx_ring_p, p_tx_msg_t msgp)
874 {
875 	MUTEX_ENTER(&tx_ring_p->freelock);
876 	if (tx_ring_p->tx_free_list_p != NULL)
877 		msgp->nextp = tx_ring_p->tx_free_list_p;
878 	tx_ring_p->tx_free_list_p = msgp;
879 	MUTEX_EXIT(&tx_ring_p->freelock);
880 }
881 
882 /*
883  * void
884  * nxge_txdma_freemsg_task() -- walk the list of messages to be
885  *	freed and free the messages.
886  */
887 void
888 nxge_txdma_freemsg_task(p_tx_ring_t tx_ring_p)
889 {
890 	p_tx_msg_t	msgp, nextp;
891 
892 	if (tx_ring_p->tx_free_list_p != NULL) {
893 		MUTEX_ENTER(&tx_ring_p->freelock);
894 		msgp = tx_ring_p->tx_free_list_p;
895 		tx_ring_p->tx_free_list_p = (p_tx_msg_t)NULL;
896 		MUTEX_EXIT(&tx_ring_p->freelock);
897 
898 		while (msgp != NULL) {
899 			nextp = msgp->nextp;
900 			if (msgp->tx_message != NULL) {
901 				freemsg(msgp->tx_message);
902 				msgp->tx_message = NULL;
903 			}
904 			msgp->nextp = NULL;
905 			msgp = nextp;
906 		}
907 	}
908 }
909 
910 boolean_t
911 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
912 {
913 	boolean_t 		status = B_TRUE;
914 	p_nxge_dma_common_t	tx_desc_dma_p;
915 	nxge_dma_common_t	desc_area;
916 	p_tx_desc_t 		tx_desc_ring_vp;
917 	p_tx_desc_t 		tx_desc_p;
918 	p_tx_desc_t 		tx_desc_pp;
919 	tx_desc_t 		r_tx_desc;
920 	p_tx_msg_t 		tx_msg_ring;
921 	p_tx_msg_t 		tx_msg_p;
922 	npi_handle_t		handle;
923 	tx_ring_hdl_t		tx_head;
924 	uint32_t 		pkt_len;
925 	uint_t			tx_rd_index;
926 	uint16_t		head_index, tail_index;
927 	uint8_t			tdc;
928 	boolean_t		head_wrap, tail_wrap;
929 	p_nxge_tx_ring_stats_t	tdc_stats;
930 	int			rc;
931 
932 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
933 
934 	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
935 	    (nmblks != 0));
936 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
937 	    "==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
938 	    tx_ring_p->descs_pending, nxge_reclaim_pending,
939 	    nmblks));
940 	if (!status) {
941 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
942 		desc_area = tx_ring_p->tdc_desc;
943 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
944 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
945 		tx_desc_ring_vp =
946 		    (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
947 		tx_rd_index = tx_ring_p->rd_index;
948 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
949 		tx_msg_ring = tx_ring_p->tx_msg_ring;
950 		tx_msg_p = &tx_msg_ring[tx_rd_index];
951 		tdc = tx_ring_p->tdc;
952 		tdc_stats = tx_ring_p->tdc_stats;
953 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
954 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
955 		}
956 
957 		tail_index = tx_ring_p->wr_index;
958 		tail_wrap = tx_ring_p->wr_index_wrap;
959 
960 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
961 		    "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
962 		    "tail_index %d tail_wrap %d "
963 		    "tx_desc_p $%p ($%p) ",
964 		    tdc, tx_rd_index, tail_index, tail_wrap,
965 		    tx_desc_p, (*(uint64_t *)tx_desc_p)));
966 		/*
967 		 * Read the hardware maintained transmit head
968 		 * and wrap around bit.
969 		 */
970 		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
971 		head_index =  tx_head.bits.ldw.head;
972 		head_wrap = tx_head.bits.ldw.wrap;
973 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
974 		    "==> nxge_txdma_reclaim: "
975 		    "tx_rd_index %d tail %d tail_wrap %d "
976 		    "head %d wrap %d",
977 		    tx_rd_index, tail_index, tail_wrap,
978 		    head_index, head_wrap));
979 
980 		if (head_index == tail_index) {
981 			if (TXDMA_RING_EMPTY(head_index, head_wrap,
982 			    tail_index, tail_wrap) &&
983 			    (head_index == tx_rd_index)) {
984 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
985 				    "==> nxge_txdma_reclaim: EMPTY"));
986 				return (B_TRUE);
987 			}
988 
989 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
990 			    "==> nxge_txdma_reclaim: Checking "
991 			    "if ring full"));
992 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
993 			    tail_wrap)) {
994 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
995 				    "==> nxge_txdma_reclaim: full"));
996 				return (B_FALSE);
997 			}
998 		}
999 
1000 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1001 		    "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
1002 
1003 		tx_desc_pp = &r_tx_desc;
1004 		while ((tx_rd_index != head_index) &&
1005 		    (tx_ring_p->descs_pending != 0)) {
1006 
1007 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1008 			    "==> nxge_txdma_reclaim: Checking if pending"));
1009 
1010 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1011 			    "==> nxge_txdma_reclaim: "
1012 			    "descs_pending %d ",
1013 			    tx_ring_p->descs_pending));
1014 
1015 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1016 			    "==> nxge_txdma_reclaim: "
1017 			    "(tx_rd_index %d head_index %d "
1018 			    "(tx_desc_p $%p)",
1019 			    tx_rd_index, head_index,
1020 			    tx_desc_p));
1021 
1022 			tx_desc_pp->value = tx_desc_p->value;
1023 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1024 			    "==> nxge_txdma_reclaim: "
1025 			    "(tx_rd_index %d head_index %d "
1026 			    "tx_desc_p $%p (desc value 0x%llx) ",
1027 			    tx_rd_index, head_index,
1028 			    tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
1029 
1030 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1031 			    "==> nxge_txdma_reclaim: dump desc:"));
1032 
1033 			pkt_len = tx_desc_pp->bits.hdw.tr_len;
1034 			tdc_stats->obytes += pkt_len;
1035 			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
1036 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1037 			    "==> nxge_txdma_reclaim: pkt_len %d "
1038 			    "tdc channel %d opackets %d",
1039 			    pkt_len,
1040 			    tdc,
1041 			    tdc_stats->opackets));
1042 
1043 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
1044 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1045 				    "tx_desc_p = $%p "
1046 				    "tx_desc_pp = $%p "
1047 				    "index = %d",
1048 				    tx_desc_p,
1049 				    tx_desc_pp,
1050 				    tx_ring_p->rd_index));
1051 				(void) dvma_unload(tx_msg_p->dvma_handle,
1052 				    0, -1);
1053 				tx_msg_p->dvma_handle = NULL;
1054 				if (tx_ring_p->dvma_wr_index ==
1055 				    tx_ring_p->dvma_wrap_mask) {
1056 					tx_ring_p->dvma_wr_index = 0;
1057 				} else {
1058 					tx_ring_p->dvma_wr_index++;
1059 				}
1060 				tx_ring_p->dvma_pending--;
1061 			} else if (tx_msg_p->flags.dma_type ==
1062 			    USE_DMA) {
1063 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1064 				    "==> nxge_txdma_reclaim: "
1065 				    "USE DMA"));
1066 				if (rc = ddi_dma_unbind_handle
1067 				    (tx_msg_p->dma_handle)) {
1068 					cmn_err(CE_WARN, "!nxge_reclaim: "
1069 					    "ddi_dma_unbind_handle "
1070 					    "failed. status %d", rc);
1071 				}
1072 			}
1073 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1074 			    "==> nxge_txdma_reclaim: count packets"));
1075 
1076 			/*
1077 			 * count a chained packet only once.
1078 			 */
1079 			if (tx_msg_p->tx_message != NULL) {
1080 				nxge_txdma_freemsg_list_add(tx_ring_p,
1081 				    tx_msg_p);
1082 			}
1083 
1084 			tx_msg_p->flags.dma_type = USE_NONE;
1085 			tx_rd_index = tx_ring_p->rd_index;
1086 			tx_rd_index = (tx_rd_index + 1) &
1087 			    tx_ring_p->tx_wrap_mask;
1088 			tx_ring_p->rd_index = tx_rd_index;
1089 			tx_ring_p->descs_pending--;
1090 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1091 			tx_msg_p = &tx_msg_ring[tx_rd_index];
1092 		}
1093 
1094 		status = (nmblks <= (tx_ring_p->tx_ring_size -
1095 		    tx_ring_p->descs_pending -
1096 		    TX_FULL_MARK));
1097 		if (status) {
1098 			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
1099 		}
1100 	} else {
1101 		status = (nmblks <=
1102 		    (tx_ring_p->tx_ring_size -
1103 		    tx_ring_p->descs_pending -
1104 		    TX_FULL_MARK));
1105 	}
1106 
1107 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1108 	    "<== nxge_txdma_reclaim status = 0x%08x", status));
1109 
1110 	return (status);
1111 }
1112 
1113 /*
1114  * nxge_tx_intr
1115  *
1116  *	Process a TDC interrupt
1117  *
1118  * Arguments:
1119  * 	arg1	A Logical Device state Vector (LSV) data structure.
1120  * 	arg2	nxge_t *
1121  *
1122  * Notes:
1123  *
1124  * NPI/NXGE function calls:
1125  *	npi_txdma_control_status()
1126  *	npi_intr_ldg_mgmt_set()
1127  *
1128  *	nxge_tx_err_evnts()
1129  *	nxge_txdma_reclaim()
1130  *
1131  * Registers accessed:
1132  *	TX_CS		DMC+0x40028 Transmit Control And Status
1133  *	PIO_LDSV
1134  *
1135  * Context:
1136  *	Any domain
1137  */
1138 uint_t
1139 nxge_tx_intr(void *arg1, void *arg2)
1140 {
1141 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
1142 	p_nxge_t		nxgep = (p_nxge_t)arg2;
1143 	p_nxge_ldg_t		ldgp;
1144 	uint8_t			channel;
1145 	uint32_t		vindex;
1146 	npi_handle_t		handle;
1147 	tx_cs_t			cs;
1148 	p_tx_ring_t 		*tx_rings;
1149 	p_tx_ring_t 		tx_ring_p;
1150 	npi_status_t		rs = NPI_SUCCESS;
1151 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
1152 	nxge_status_t 		status = NXGE_OK;
1153 
1154 	if (ldvp == NULL) {
1155 		NXGE_DEBUG_MSG((NULL, INT_CTL,
1156 		    "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1157 		    nxgep, ldvp));
1158 		return (DDI_INTR_UNCLAIMED);
1159 	}
1160 
1161 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1162 		nxgep = ldvp->nxgep;
1163 	}
1164 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1165 	    "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1166 	    nxgep, ldvp));
1167 
1168 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1169 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1170 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1171 		    "<== nxge_tx_intr: interface not started or intialized"));
1172 		return (DDI_INTR_CLAIMED);
1173 	}
1174 
1175 	/*
1176 	 * This interrupt handler is for a specific
1177 	 * transmit dma channel.
1178 	 */
1179 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1180 	/* Get the control and status for this channel. */
1181 	channel = ldvp->channel;
1182 	ldgp = ldvp->ldgp;
1183 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1184 	    "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1185 	    "channel %d",
1186 	    nxgep, ldvp, channel));
1187 
1188 	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1189 	vindex = ldvp->vdma_index;
1190 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1191 	    "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1192 	    channel, vindex, rs));
1193 	if (!rs && cs.bits.ldw.mk) {
1194 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1195 		    "==> nxge_tx_intr:channel %d ring index %d "
1196 		    "status 0x%08x (mk bit set)",
1197 		    channel, vindex, rs));
1198 		tx_rings = nxgep->tx_rings->rings;
1199 		tx_ring_p = tx_rings[vindex];
1200 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1201 		    "==> nxge_tx_intr:channel %d ring index %d "
1202 		    "status 0x%08x (mk bit set, calling reclaim)",
1203 		    channel, vindex, rs));
1204 
1205 		MUTEX_ENTER(&tx_ring_p->lock);
1206 		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
1207 		MUTEX_EXIT(&tx_ring_p->lock);
1208 
1209 		nxge_txdma_freemsg_task(tx_ring_p);
1210 
1211 		mac_tx_update(nxgep->mach);
1212 	}
1213 
1214 	/*
1215 	 * Process other transmit control and status.
1216 	 * Check the ldv state.
1217 	 */
1218 	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1219 	/*
1220 	 * Rearm this logical group if this is a single device
1221 	 * group.
1222 	 */
1223 	if (ldgp->nldvs == 1) {
1224 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1225 		    "==> nxge_tx_intr: rearm"));
1226 		if (status == NXGE_OK) {
1227 			if (isLDOMguest(nxgep)) {
1228 				nxge_hio_ldgimgn(nxgep, ldgp);
1229 			} else {
1230 				(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1231 				    B_TRUE, ldgp->ldg_timer);
1232 			}
1233 		}
1234 	}
1235 
1236 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1237 	serviced = DDI_INTR_CLAIMED;
1238 	return (serviced);
1239 }
1240 
1241 void
1242 nxge_txdma_stop(p_nxge_t nxgep)	/* Dead */
1243 {
1244 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1245 
1246 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1247 
1248 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1249 }
1250 
1251 void
1252 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1253 {
1254 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1255 
1256 	(void) nxge_txdma_stop(nxgep);
1257 
1258 	(void) nxge_fixup_txdma_rings(nxgep);
1259 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1260 	(void) nxge_tx_mac_enable(nxgep);
1261 	(void) nxge_txdma_hw_kick(nxgep);
1262 
1263 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1264 }
1265 
1266 npi_status_t
1267 nxge_txdma_channel_disable(
1268 	nxge_t *nxge,
1269 	int channel)
1270 {
1271 	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxge);
1272 	npi_status_t	rs;
1273 	tdmc_intr_dbg_t	intr_dbg;
1274 
1275 	/*
1276 	 * Stop the dma channel and wait for the stop-done.
1277 	 * If the stop-done bit is not present, then force
1278 	 * an error so TXC will stop.
1279 	 * All channels bound to this port need to be stopped
1280 	 * and reset after injecting an interrupt error.
1281 	 */
1282 	rs = npi_txdma_channel_disable(handle, channel);
1283 	NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1284 	    "==> nxge_txdma_channel_disable(%d) "
1285 	    "rs 0x%x", channel, rs));
1286 	if (rs != NPI_SUCCESS) {
1287 		/* Inject any error */
1288 		intr_dbg.value = 0;
1289 		intr_dbg.bits.ldw.nack_pref = 1;
1290 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1291 		    "==> nxge_txdma_hw_mode: "
1292 		    "channel %d (stop failed 0x%x) "
1293 		    "(inject err)", rs, channel));
1294 		(void) npi_txdma_inj_int_error_set(
1295 		    handle, channel, &intr_dbg);
1296 		rs = npi_txdma_channel_disable(handle, channel);
1297 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1298 		    "==> nxge_txdma_hw_mode: "
1299 		    "channel %d (stop again 0x%x) "
1300 		    "(after inject err)",
1301 		    rs, channel));
1302 	}
1303 
1304 	return (rs);
1305 }
1306 
1307 /*
1308  * nxge_txdma_hw_mode
1309  *
1310  *	Toggle all TDCs on (enable) or off (disable).
1311  *
1312  * Arguments:
1313  * 	nxgep
1314  * 	enable	Enable or disable a TDC.
1315  *
1316  * Notes:
1317  *
1318  * NPI/NXGE function calls:
1319  *	npi_txdma_channel_enable(TX_CS)
1320  *	npi_txdma_channel_disable(TX_CS)
1321  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1322  *
1323  * Registers accessed:
1324  *	TX_CS		DMC+0x40028 Transmit Control And Status
1325  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1326  *
1327  * Context:
1328  *	Any domain
1329  */
1330 nxge_status_t
1331 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1332 {
1333 	nxge_grp_set_t *set = &nxgep->tx_set;
1334 
1335 	npi_handle_t	handle;
1336 	nxge_status_t	status;
1337 	npi_status_t	rs;
1338 	int		tdc;
1339 
1340 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1341 	    "==> nxge_txdma_hw_mode: enable mode %d", enable));
1342 
1343 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1344 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1345 		    "<== nxge_txdma_mode: not initialized"));
1346 		return (NXGE_ERROR);
1347 	}
1348 
1349 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1350 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1351 		    "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1352 		return (NXGE_ERROR);
1353 	}
1354 
1355 	/* Enable or disable all of the TDCs owned by us. */
1356 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1357 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1358 		if ((1 << tdc) & set->owned.map) {
1359 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1360 			if (ring) {
1361 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1362 				    "==> nxge_txdma_hw_mode: channel %d", tdc));
1363 				if (enable) {
1364 					rs = npi_txdma_channel_enable
1365 					    (handle, tdc);
1366 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1367 					    "==> nxge_txdma_hw_mode: "
1368 					    "channel %d (enable) rs 0x%x",
1369 					    tdc, rs));
1370 				} else {
1371 					rs = nxge_txdma_channel_disable
1372 					    (nxgep, tdc);
1373 				}
1374 			}
1375 		}
1376 	}
1377 
1378 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1379 
1380 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1381 	    "<== nxge_txdma_hw_mode: status 0x%x", status));
1382 
1383 	return (status);
1384 }
1385 
1386 void
1387 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1388 {
1389 	npi_handle_t		handle;
1390 
1391 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1392 	    "==> nxge_txdma_enable_channel: channel %d", channel));
1393 
1394 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1395 	/* enable the transmit dma channels */
1396 	(void) npi_txdma_channel_enable(handle, channel);
1397 
1398 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1399 }
1400 
1401 void
1402 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1403 {
1404 	npi_handle_t		handle;
1405 
1406 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1407 	    "==> nxge_txdma_disable_channel: channel %d", channel));
1408 
1409 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1410 	/* stop the transmit dma channels */
1411 	(void) npi_txdma_channel_disable(handle, channel);
1412 
1413 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1414 }
1415 
1416 /*
1417  * nxge_txdma_stop_inj_err
1418  *
1419  *	Stop a TDC.  If at first we don't succeed, inject an error.
1420  *
1421  * Arguments:
1422  * 	nxgep
1423  * 	channel		The channel to stop.
1424  *
1425  * Notes:
1426  *
1427  * NPI/NXGE function calls:
1428  *	npi_txdma_channel_disable()
1429  *	npi_txdma_inj_int_error_set()
1430  * #if defined(NXGE_DEBUG)
1431  *	nxge_txdma_regs_dump_channels(nxgep);
1432  * #endif
1433  *
1434  * Registers accessed:
1435  *	TX_CS		DMC+0x40028 Transmit Control And Status
1436  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1437  *
1438  * Context:
1439  *	Any domain
1440  */
1441 int
1442 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1443 {
1444 	npi_handle_t		handle;
1445 	tdmc_intr_dbg_t		intr_dbg;
1446 	int			status;
1447 	npi_status_t		rs = NPI_SUCCESS;
1448 
1449 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1450 	/*
1451 	 * Stop the dma channel waits for the stop done.
1452 	 * If the stop done bit is not set, then create
1453 	 * an error.
1454 	 */
1455 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1456 	rs = npi_txdma_channel_disable(handle, channel);
1457 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1458 	if (status == NXGE_OK) {
1459 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1460 		    "<== nxge_txdma_stop_inj_err (channel %d): "
1461 		    "stopped OK", channel));
1462 		return (status);
1463 	}
1464 
1465 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1466 	    "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1467 	    "injecting error", channel, rs));
1468 	/* Inject any error */
1469 	intr_dbg.value = 0;
1470 	intr_dbg.bits.ldw.nack_pref = 1;
1471 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1472 
1473 	/* Stop done bit will be set as a result of error injection */
1474 	rs = npi_txdma_channel_disable(handle, channel);
1475 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1476 	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1477 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1478 		    "<== nxge_txdma_stop_inj_err (channel %d): "
1479 		    "stopped OK ", channel));
1480 		return (status);
1481 	}
1482 
1483 #if	defined(NXGE_DEBUG)
1484 	nxge_txdma_regs_dump_channels(nxgep);
1485 #endif
1486 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1487 	    "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1488 	    " (injected error but still not stopped)", channel, rs));
1489 
1490 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1491 	return (status);
1492 }
1493 
1494 /*ARGSUSED*/
1495 void
1496 nxge_fixup_txdma_rings(p_nxge_t nxgep)
1497 {
1498 	nxge_grp_set_t *set = &nxgep->tx_set;
1499 	int tdc;
1500 
1501 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1502 
1503 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1504 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1505 		    "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1506 		return;
1507 	}
1508 
1509 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1510 		if ((1 << tdc) & set->owned.map) {
1511 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1512 			if (ring) {
1513 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1514 				    "==> nxge_fixup_txdma_rings: channel %d",
1515 				    tdc));
1516 				nxge_txdma_fixup_channel(nxgep, ring, tdc);
1517 			}
1518 		}
1519 	}
1520 
1521 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1522 }
1523 
1524 /*ARGSUSED*/
1525 void
1526 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1527 {
1528 	p_tx_ring_t	ring_p;
1529 
1530 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1531 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1532 	if (ring_p == NULL) {
1533 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1534 		return;
1535 	}
1536 
1537 	if (ring_p->tdc != channel) {
1538 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1539 		    "<== nxge_txdma_fix_channel: channel not matched "
1540 		    "ring tdc %d passed channel",
1541 		    ring_p->tdc, channel));
1542 		return;
1543 	}
1544 
1545 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1546 
1547 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1548 }
1549 
1550 /*ARGSUSED*/
1551 void
1552 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1553 {
1554 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1555 
1556 	if (ring_p == NULL) {
1557 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1558 		    "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1559 		return;
1560 	}
1561 
1562 	if (ring_p->tdc != channel) {
1563 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1564 		    "<== nxge_txdma_fixup_channel: channel not matched "
1565 		    "ring tdc %d passed channel",
1566 		    ring_p->tdc, channel));
1567 		return;
1568 	}
1569 
1570 	MUTEX_ENTER(&ring_p->lock);
1571 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1572 	ring_p->rd_index = 0;
1573 	ring_p->wr_index = 0;
1574 	ring_p->ring_head.value = 0;
1575 	ring_p->ring_kick_tail.value = 0;
1576 	ring_p->descs_pending = 0;
1577 	MUTEX_EXIT(&ring_p->lock);
1578 	nxge_txdma_freemsg_task(ring_p);
1579 
1580 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1581 }
1582 
1583 /*ARGSUSED*/
1584 void
1585 nxge_txdma_hw_kick(p_nxge_t nxgep)
1586 {
1587 	nxge_grp_set_t *set = &nxgep->tx_set;
1588 	int tdc;
1589 
1590 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1591 
1592 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1593 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1594 		    "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1595 		return;
1596 	}
1597 
1598 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1599 		if ((1 << tdc) & set->owned.map) {
1600 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1601 			if (ring) {
1602 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1603 				    "==> nxge_txdma_hw_kick: channel %d", tdc));
1604 				nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1605 			}
1606 		}
1607 	}
1608 
1609 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1610 }
1611 
1612 /*ARGSUSED*/
1613 void
1614 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1615 {
1616 	p_tx_ring_t	ring_p;
1617 
1618 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1619 
1620 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1621 	if (ring_p == NULL) {
1622 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1623 		    " nxge_txdma_kick_channel"));
1624 		return;
1625 	}
1626 
1627 	if (ring_p->tdc != channel) {
1628 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1629 		    "<== nxge_txdma_kick_channel: channel not matched "
1630 		    "ring tdc %d passed channel",
1631 		    ring_p->tdc, channel));
1632 		return;
1633 	}
1634 
1635 	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1636 
1637 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1638 }
1639 
1640 /*ARGSUSED*/
1641 void
1642 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1643 {
1644 
1645 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1646 
1647 	if (ring_p == NULL) {
1648 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1649 		    "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1650 		return;
1651 	}
1652 
1653 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1654 }
1655 
1656 /*
1657  * nxge_check_tx_hang
1658  *
1659  *	Check the state of all TDCs belonging to nxgep.
1660  *
1661  * Arguments:
1662  * 	nxgep
1663  *
1664  * Notes:
1665  *	Called by nxge_hw.c:nxge_check_hw_state().
1666  *
1667  * NPI/NXGE function calls:
1668  *
1669  * Registers accessed:
1670  *
1671  * Context:
1672  *	Any domain
1673  */
1674 /*ARGSUSED*/
1675 void
1676 nxge_check_tx_hang(p_nxge_t nxgep)
1677 {
1678 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1679 
1680 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1681 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1682 		goto nxge_check_tx_hang_exit;
1683 	}
1684 
1685 	/*
1686 	 * Needs inputs from hardware for regs:
1687 	 *	head index had not moved since last timeout.
1688 	 *	packets not transmitted or stuffed registers.
1689 	 */
1690 	if (nxge_txdma_hung(nxgep)) {
1691 		nxge_fixup_hung_txdma_rings(nxgep);
1692 	}
1693 
1694 nxge_check_tx_hang_exit:
1695 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1696 }
1697 
1698 /*
1699  * nxge_txdma_hung
1700  *
1701  *	Reset a TDC.
1702  *
1703  * Arguments:
1704  * 	nxgep
1705  * 	channel		The channel to reset.
1706  * 	reg_data	The current TX_CS.
1707  *
1708  * Notes:
1709  *	Called by nxge_check_tx_hang()
1710  *
1711  * NPI/NXGE function calls:
1712  *	nxge_txdma_channel_hung()
1713  *
1714  * Registers accessed:
1715  *
1716  * Context:
1717  *	Any domain
1718  */
1719 int
1720 nxge_txdma_hung(p_nxge_t nxgep)
1721 {
1722 	nxge_grp_set_t	*set = &nxgep->tx_set;
1723 	int		tdc;
1724 	boolean_t	shared;
1725 
1726 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1727 
1728 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1729 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1730 		    "<== nxge_txdma_hung: NULL ring pointer(s)"));
1731 		return (B_FALSE);
1732 	}
1733 
1734 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1735 		/*
1736 		 * Grab the shared state of the TDC.
1737 		 */
1738 		if (isLDOMservice(nxgep)) {
1739 			nxge_hio_data_t *nhd =
1740 			    (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1741 
1742 			MUTEX_ENTER(&nhd->lock);
1743 			shared = nxgep->tdc_is_shared[tdc];
1744 			MUTEX_EXIT(&nhd->lock);
1745 		} else {
1746 			shared = B_FALSE;
1747 		}
1748 
1749 		/*
1750 		 * Now, process continue to process.
1751 		 */
1752 		if (((1 << tdc) & set->owned.map) && !shared) {
1753 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1754 			if (ring) {
1755 				if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1756 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
1757 					    "==> nxge_txdma_hung: TDC %d hung",
1758 					    tdc));
1759 					return (B_TRUE);
1760 				}
1761 			}
1762 		}
1763 	}
1764 
1765 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1766 
1767 	return (B_FALSE);
1768 }
1769 
1770 /*
1771  * nxge_txdma_channel_hung
1772  *
1773  *	Reset a TDC.
1774  *
1775  * Arguments:
1776  * 	nxgep
1777  * 	ring		<channel>'s ring.
1778  * 	channel		The channel to reset.
1779  *
1780  * Notes:
1781  *	Called by nxge_txdma.c:nxge_txdma_hung()
1782  *
1783  * NPI/NXGE function calls:
1784  *	npi_txdma_ring_head_get()
1785  *
1786  * Registers accessed:
1787  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1788  *
1789  * Context:
1790  *	Any domain
1791  */
1792 int
1793 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1794 {
1795 	uint16_t		head_index, tail_index;
1796 	boolean_t		head_wrap, tail_wrap;
1797 	npi_handle_t		handle;
1798 	tx_ring_hdl_t		tx_head;
1799 	uint_t			tx_rd_index;
1800 
1801 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1802 
1803 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1804 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1805 	    "==> nxge_txdma_channel_hung: channel %d", channel));
1806 	MUTEX_ENTER(&tx_ring_p->lock);
1807 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1808 
1809 	tail_index = tx_ring_p->wr_index;
1810 	tail_wrap = tx_ring_p->wr_index_wrap;
1811 	tx_rd_index = tx_ring_p->rd_index;
1812 	MUTEX_EXIT(&tx_ring_p->lock);
1813 	nxge_txdma_freemsg_task(tx_ring_p);
1814 
1815 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1816 	    "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1817 	    "tail_index %d tail_wrap %d ",
1818 	    channel, tx_rd_index, tail_index, tail_wrap));
1819 	/*
1820 	 * Read the hardware maintained transmit head
1821 	 * and wrap around bit.
1822 	 */
1823 	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1824 	head_index =  tx_head.bits.ldw.head;
1825 	head_wrap = tx_head.bits.ldw.wrap;
1826 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1827 	    "==> nxge_txdma_channel_hung: "
1828 	    "tx_rd_index %d tail %d tail_wrap %d "
1829 	    "head %d wrap %d",
1830 	    tx_rd_index, tail_index, tail_wrap,
1831 	    head_index, head_wrap));
1832 
1833 	if (TXDMA_RING_EMPTY(head_index, head_wrap,
1834 	    tail_index, tail_wrap) &&
1835 	    (head_index == tx_rd_index)) {
1836 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1837 		    "==> nxge_txdma_channel_hung: EMPTY"));
1838 		return (B_FALSE);
1839 	}
1840 
1841 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1842 	    "==> nxge_txdma_channel_hung: Checking if ring full"));
1843 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1844 	    tail_wrap)) {
1845 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1846 		    "==> nxge_txdma_channel_hung: full"));
1847 		return (B_TRUE);
1848 	}
1849 
1850 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1851 
1852 	return (B_FALSE);
1853 }
1854 
1855 /*
1856  * nxge_fixup_hung_txdma_rings
1857  *
1858  *	Disable a TDC.
1859  *
1860  * Arguments:
1861  * 	nxgep
1862  * 	channel		The channel to reset.
1863  * 	reg_data	The current TX_CS.
1864  *
1865  * Notes:
1866  *	Called by nxge_check_tx_hang()
1867  *
1868  * NPI/NXGE function calls:
1869  *	npi_txdma_ring_head_get()
1870  *
1871  * Registers accessed:
1872  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1873  *
1874  * Context:
1875  *	Any domain
1876  */
1877 /*ARGSUSED*/
1878 void
1879 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1880 {
1881 	nxge_grp_set_t *set = &nxgep->tx_set;
1882 	int tdc;
1883 
1884 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1885 
1886 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1887 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1888 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1889 		return;
1890 	}
1891 
1892 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1893 		if ((1 << tdc) & set->owned.map) {
1894 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1895 			if (ring) {
1896 				nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1897 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1898 				    "==> nxge_fixup_hung_txdma_rings: TDC %d",
1899 				    tdc));
1900 			}
1901 		}
1902 	}
1903 
1904 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1905 }
1906 
1907 /*
1908  * nxge_txdma_fixup_hung_channel
1909  *
1910  *	'Fix' a hung TDC.
1911  *
1912  * Arguments:
1913  * 	nxgep
1914  * 	channel		The channel to fix.
1915  *
1916  * Notes:
1917  *	Called by nxge_fixup_hung_txdma_rings()
1918  *
1919  *	1. Reclaim the TDC.
1920  *	2. Disable the TDC.
1921  *
1922  * NPI/NXGE function calls:
1923  *	nxge_txdma_reclaim()
1924  *	npi_txdma_channel_disable(TX_CS)
1925  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1926  *
1927  * Registers accessed:
1928  *	TX_CS		DMC+0x40028 Transmit Control And Status
1929  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1930  *
1931  * Context:
1932  *	Any domain
1933  */
1934 /*ARGSUSED*/
1935 void
1936 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1937 {
1938 	p_tx_ring_t	ring_p;
1939 
1940 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1941 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1942 	if (ring_p == NULL) {
1943 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1944 		    "<== nxge_txdma_fix_hung_channel"));
1945 		return;
1946 	}
1947 
1948 	if (ring_p->tdc != channel) {
1949 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1950 		    "<== nxge_txdma_fix_hung_channel: channel not matched "
1951 		    "ring tdc %d passed channel",
1952 		    ring_p->tdc, channel));
1953 		return;
1954 	}
1955 
1956 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1957 
1958 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1959 }
1960 
1961 /*ARGSUSED*/
1962 void
1963 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1964 	uint16_t channel)
1965 {
1966 	npi_handle_t		handle;
1967 	tdmc_intr_dbg_t		intr_dbg;
1968 	int			status = NXGE_OK;
1969 
1970 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1971 
1972 	if (ring_p == NULL) {
1973 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1974 		    "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1975 		return;
1976 	}
1977 
1978 	if (ring_p->tdc != channel) {
1979 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1980 		    "<== nxge_txdma_fixup_hung_channel: channel "
1981 		    "not matched "
1982 		    "ring tdc %d passed channel",
1983 		    ring_p->tdc, channel));
1984 		return;
1985 	}
1986 
1987 	/* Reclaim descriptors */
1988 	MUTEX_ENTER(&ring_p->lock);
1989 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1990 	MUTEX_EXIT(&ring_p->lock);
1991 
1992 	nxge_txdma_freemsg_task(ring_p);
1993 
1994 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1995 	/*
1996 	 * Stop the dma channel waits for the stop done.
1997 	 * If the stop done bit is not set, then force
1998 	 * an error.
1999 	 */
2000 	status = npi_txdma_channel_disable(handle, channel);
2001 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
2002 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2003 		    "<== nxge_txdma_fixup_hung_channel: stopped OK "
2004 		    "ring tdc %d passed channel %d",
2005 		    ring_p->tdc, channel));
2006 		return;
2007 	}
2008 
2009 	/* Inject any error */
2010 	intr_dbg.value = 0;
2011 	intr_dbg.bits.ldw.nack_pref = 1;
2012 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
2013 
2014 	/* Stop done bit will be set as a result of error injection */
2015 	status = npi_txdma_channel_disable(handle, channel);
2016 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
2017 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2018 		    "<== nxge_txdma_fixup_hung_channel: stopped again"
2019 		    "ring tdc %d passed channel",
2020 		    ring_p->tdc, channel));
2021 		return;
2022 	}
2023 
2024 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
2025 	    "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
2026 	    "ring tdc %d passed channel",
2027 	    ring_p->tdc, channel));
2028 
2029 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
2030 }
2031 
2032 /*ARGSUSED*/
2033 void
2034 nxge_reclaim_rings(p_nxge_t nxgep)
2035 {
2036 	nxge_grp_set_t *set = &nxgep->tx_set;
2037 	int tdc;
2038 
2039 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
2040 
2041 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2042 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2043 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2044 		return;
2045 	}
2046 
2047 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2048 		if ((1 << tdc) & set->owned.map) {
2049 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2050 			if (ring) {
2051 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
2052 				    "==> nxge_reclaim_rings: TDC %d", tdc));
2053 				MUTEX_ENTER(&ring->lock);
2054 				(void) nxge_txdma_reclaim(nxgep, ring, tdc);
2055 				MUTEX_EXIT(&ring->lock);
2056 
2057 				nxge_txdma_freemsg_task(ring);
2058 			}
2059 		}
2060 	}
2061 
2062 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
2063 }
2064 
2065 void
2066 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2067 {
2068 	nxge_grp_set_t *set = &nxgep->tx_set;
2069 	npi_handle_t handle;
2070 	int tdc;
2071 
2072 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2073 
2074 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2075 
2076 	if (!isLDOMguest(nxgep)) {
2077 		(void) npi_txdma_dump_fzc_regs(handle);
2078 
2079 		/* Dump TXC registers. */
2080 		(void) npi_txc_dump_fzc_regs(handle);
2081 		(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2082 	}
2083 
2084 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2085 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2086 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2087 		return;
2088 	}
2089 
2090 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2091 		if ((1 << tdc) & set->owned.map) {
2092 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2093 			if (ring) {
2094 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
2095 				    "==> nxge_txdma_regs_dump_channels: "
2096 				    "TDC %d", tdc));
2097 				(void) npi_txdma_dump_tdc_regs(handle, tdc);
2098 
2099 				/* Dump TXC registers, if able to. */
2100 				if (!isLDOMguest(nxgep)) {
2101 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
2102 					    "==> nxge_txdma_regs_dump_channels:"
2103 					    " FZC TDC %d", tdc));
2104 					(void) npi_txc_dump_tdc_fzc_regs
2105 					    (handle, tdc);
2106 				}
2107 				nxge_txdma_regs_dump(nxgep, tdc);
2108 			}
2109 		}
2110 	}
2111 
2112 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2113 }
2114 
2115 void
2116 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2117 {
2118 	npi_handle_t		handle;
2119 	tx_ring_hdl_t 		hdl;
2120 	tx_ring_kick_t 		kick;
2121 	tx_cs_t 		cs;
2122 	txc_control_t		control;
2123 	uint32_t		bitmap = 0;
2124 	uint32_t		burst = 0;
2125 	uint32_t		bytes = 0;
2126 	dma_log_page_t		cfg;
2127 
2128 	printf("\n\tfunc # %d tdc %d ",
2129 	    nxgep->function_num, channel);
2130 	cfg.page_num = 0;
2131 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2132 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
2133 	printf("\n\tlog page func %d valid page 0 %d",
2134 	    cfg.func_num, cfg.valid);
2135 	cfg.page_num = 1;
2136 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
2137 	printf("\n\tlog page func %d valid page 1 %d",
2138 	    cfg.func_num, cfg.valid);
2139 
2140 	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
2141 	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2142 	printf("\n\thead value is 0x%0llx",
2143 	    (long long)hdl.value);
2144 	printf("\n\thead index %d", hdl.bits.ldw.head);
2145 	printf("\n\tkick value is 0x%0llx",
2146 	    (long long)kick.value);
2147 	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2148 
2149 	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2150 	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2151 	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2152 
2153 	(void) npi_txc_control(handle, OP_GET, &control);
2154 	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2155 	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2156 	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2157 
2158 	printf("\n\tTXC port control 0x%0llx",
2159 	    (long long)control.value);
2160 	printf("\n\tTXC port bitmap 0x%x", bitmap);
2161 	printf("\n\tTXC max burst %d", burst);
2162 	printf("\n\tTXC bytes xmt %d\n", bytes);
2163 
2164 	{
2165 		ipp_status_t status;
2166 
2167 		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2168 #if defined(__i386)
2169 		printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2170 #else
2171 		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2172 #endif
2173 	}
2174 }
2175 
2176 /*
2177  * nxge_tdc_hvio_setup
2178  *
2179  *	I'm not exactly sure what this code does.
2180  *
2181  * Arguments:
2182  * 	nxgep
2183  * 	channel	The channel to map.
2184  *
2185  * Notes:
2186  *
2187  * NPI/NXGE function calls:
2188  *	na
2189  *
2190  * Context:
2191  *	Service domain?
2192  */
2193 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2194 static void
2195 nxge_tdc_hvio_setup(
2196 	nxge_t *nxgep, int channel)
2197 {
2198 	nxge_dma_common_t	*data;
2199 	nxge_dma_common_t	*control;
2200 	tx_ring_t 		*ring;
2201 
2202 	ring = nxgep->tx_rings->rings[channel];
2203 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2204 
2205 	ring->hv_set = B_FALSE;
2206 
2207 	ring->hv_tx_buf_base_ioaddr_pp =
2208 	    (uint64_t)data->orig_ioaddr_pp;
2209 	ring->hv_tx_buf_ioaddr_size =
2210 	    (uint64_t)data->orig_alength;
2211 
2212 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2213 	    "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2214 	    "orig vatopa base io $%p orig_len 0x%llx (%d)",
2215 	    ring->hv_tx_buf_base_ioaddr_pp,
2216 	    ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2217 	    data->ioaddr_pp, data->orig_vatopa,
2218 	    data->orig_alength, data->orig_alength));
2219 
2220 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2221 
2222 	ring->hv_tx_cntl_base_ioaddr_pp =
2223 	    (uint64_t)control->orig_ioaddr_pp;
2224 	ring->hv_tx_cntl_ioaddr_size =
2225 	    (uint64_t)control->orig_alength;
2226 
2227 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2228 	    "hv cntl base io $%p orig ioaddr_pp ($%p) "
2229 	    "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2230 	    ring->hv_tx_cntl_base_ioaddr_pp,
2231 	    control->orig_ioaddr_pp, control->orig_vatopa,
2232 	    ring->hv_tx_cntl_ioaddr_size,
2233 	    control->orig_alength, control->orig_alength));
2234 }
2235 #endif
2236 
2237 static nxge_status_t
2238 nxge_map_txdma(p_nxge_t nxgep, int channel)
2239 {
2240 	nxge_dma_common_t	**pData;
2241 	nxge_dma_common_t	**pControl;
2242 	tx_ring_t 		**pRing, *ring;
2243 	tx_mbox_t		**mailbox;
2244 	uint32_t		num_chunks;
2245 
2246 	nxge_status_t		status = NXGE_OK;
2247 
2248 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2249 
2250 	if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2251 		if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2252 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2253 			    "<== nxge_map_txdma: buf not allocated"));
2254 			return (NXGE_ERROR);
2255 		}
2256 	}
2257 
2258 	if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2259 		return (NXGE_ERROR);
2260 
2261 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2262 	pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2263 	pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2264 	pRing = &nxgep->tx_rings->rings[channel];
2265 	mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2266 
2267 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2268 	    "tx_rings $%p tx_desc_rings $%p",
2269 	    nxgep->tx_rings, nxgep->tx_rings->rings));
2270 
2271 	/*
2272 	 * Map descriptors from the buffer pools for <channel>.
2273 	 */
2274 
2275 	/*
2276 	 * Set up and prepare buffer blocks, descriptors
2277 	 * and mailbox.
2278 	 */
2279 	status = nxge_map_txdma_channel(nxgep, channel,
2280 	    pData, pRing, num_chunks, pControl, mailbox);
2281 	if (status != NXGE_OK) {
2282 		NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2283 		    "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2284 		    "returned 0x%x",
2285 		    nxgep, channel, status));
2286 		return (status);
2287 	}
2288 
2289 	ring = *pRing;
2290 
2291 	ring->index = (uint16_t)channel;
2292 	ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2293 
2294 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2295 	if (isLDOMguest(nxgep)) {
2296 		(void) nxge_tdc_lp_conf(nxgep, channel);
2297 	} else {
2298 		nxge_tdc_hvio_setup(nxgep, channel);
2299 	}
2300 #endif
2301 
2302 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2303 	    "(status 0x%x channel %d)", status, channel));
2304 
2305 	return (status);
2306 }
2307 
2308 static nxge_status_t
2309 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2310 	p_nxge_dma_common_t *dma_buf_p,
2311 	p_tx_ring_t *tx_desc_p,
2312 	uint32_t num_chunks,
2313 	p_nxge_dma_common_t *dma_cntl_p,
2314 	p_tx_mbox_t *tx_mbox_p)
2315 {
2316 	int	status = NXGE_OK;
2317 
2318 	/*
2319 	 * Set up and prepare buffer blocks, descriptors
2320 	 * and mailbox.
2321 	 */
2322 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2323 	    "==> nxge_map_txdma_channel (channel %d)", channel));
2324 	/*
2325 	 * Transmit buffer blocks
2326 	 */
2327 	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2328 	    dma_buf_p, tx_desc_p, num_chunks);
2329 	if (status != NXGE_OK) {
2330 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2331 		    "==> nxge_map_txdma_channel (channel %d): "
2332 		    "map buffer failed 0x%x", channel, status));
2333 		goto nxge_map_txdma_channel_exit;
2334 	}
2335 
2336 	/*
2337 	 * Transmit block ring, and mailbox.
2338 	 */
2339 	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2340 	    tx_mbox_p);
2341 
2342 	goto nxge_map_txdma_channel_exit;
2343 
2344 nxge_map_txdma_channel_fail1:
2345 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2346 	    "==> nxge_map_txdma_channel: unmap buf"
2347 	    "(status 0x%x channel %d)",
2348 	    status, channel));
2349 	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2350 
2351 nxge_map_txdma_channel_exit:
2352 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2353 	    "<== nxge_map_txdma_channel: "
2354 	    "(status 0x%x channel %d)",
2355 	    status, channel));
2356 
2357 	return (status);
2358 }
2359 
2360 /*ARGSUSED*/
2361 static void
2362 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2363 {
2364 	tx_ring_t *ring;
2365 	tx_mbox_t *mailbox;
2366 
2367 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2368 	    "==> nxge_unmap_txdma_channel (channel %d)", channel));
2369 	/*
2370 	 * unmap tx block ring, and mailbox.
2371 	 */
2372 	ring = nxgep->tx_rings->rings[channel];
2373 	mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2374 
2375 	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2376 
2377 	/* unmap buffer blocks */
2378 	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2379 
2380 	nxge_free_txb(nxgep, channel);
2381 
2382 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2383 }
2384 
2385 /*
2386  * nxge_map_txdma_channel_cfg_ring
2387  *
2388  *	Map a TDC into our kernel space.
2389  *	This function allocates all of the per-channel data structures.
2390  *
2391  * Arguments:
2392  * 	nxgep
2393  * 	dma_channel	The channel to map.
2394  *	dma_cntl_p
2395  *	tx_ring_p	dma_channel's transmit ring
2396  *	tx_mbox_p	dma_channel's mailbox
2397  *
2398  * Notes:
2399  *
2400  * NPI/NXGE function calls:
2401  *	nxge_setup_dma_common()
2402  *
2403  * Registers accessed:
2404  *	none.
2405  *
2406  * Context:
2407  *	Any domain
2408  */
2409 /*ARGSUSED*/
2410 static void
2411 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2412 	p_nxge_dma_common_t *dma_cntl_p,
2413 	p_tx_ring_t tx_ring_p,
2414 	p_tx_mbox_t *tx_mbox_p)
2415 {
2416 	p_tx_mbox_t 		mboxp;
2417 	p_nxge_dma_common_t 	cntl_dmap;
2418 	p_nxge_dma_common_t 	dmap;
2419 	p_tx_rng_cfig_t		tx_ring_cfig_p;
2420 	p_tx_ring_kick_t	tx_ring_kick_p;
2421 	p_tx_cs_t		tx_cs_p;
2422 	p_tx_dma_ent_msk_t	tx_evmask_p;
2423 	p_txdma_mbh_t		mboxh_p;
2424 	p_txdma_mbl_t		mboxl_p;
2425 	uint64_t		tx_desc_len;
2426 
2427 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2428 	    "==> nxge_map_txdma_channel_cfg_ring"));
2429 
2430 	cntl_dmap = *dma_cntl_p;
2431 
2432 	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2433 	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2434 	    sizeof (tx_desc_t));
2435 	/*
2436 	 * Zero out transmit ring descriptors.
2437 	 */
2438 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2439 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2440 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2441 	tx_cs_p = &(tx_ring_p->tx_cs);
2442 	tx_evmask_p = &(tx_ring_p->tx_evmask);
2443 	tx_ring_cfig_p->value = 0;
2444 	tx_ring_kick_p->value = 0;
2445 	tx_cs_p->value = 0;
2446 	tx_evmask_p->value = 0;
2447 
2448 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2449 	    "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2450 	    dma_channel,
2451 	    dmap->dma_cookie.dmac_laddress));
2452 
2453 	tx_ring_cfig_p->value = 0;
2454 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2455 	tx_ring_cfig_p->value =
2456 	    (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2457 	    (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2458 
2459 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2460 	    "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2461 	    dma_channel,
2462 	    tx_ring_cfig_p->value));
2463 
2464 	tx_cs_p->bits.ldw.rst = 1;
2465 
2466 	/* Map in mailbox */
2467 	mboxp = (p_tx_mbox_t)
2468 	    KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2469 	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2470 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2471 	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2472 	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2473 	mboxh_p->value = mboxl_p->value = 0;
2474 
2475 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2476 	    "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2477 	    dmap->dma_cookie.dmac_laddress));
2478 
2479 	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2480 	    TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2481 
2482 	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2483 	    TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2484 
2485 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2486 	    "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2487 	    dmap->dma_cookie.dmac_laddress));
2488 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2489 	    "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2490 	    "mbox $%p",
2491 	    mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2492 	tx_ring_p->page_valid.value = 0;
2493 	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2494 	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2495 	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2496 	tx_ring_p->page_hdl.value = 0;
2497 
2498 	tx_ring_p->page_valid.bits.ldw.page0 = 1;
2499 	tx_ring_p->page_valid.bits.ldw.page1 = 1;
2500 
2501 	tx_ring_p->max_burst.value = 0;
2502 	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2503 
2504 	*tx_mbox_p = mboxp;
2505 
2506 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2507 	    "<== nxge_map_txdma_channel_cfg_ring"));
2508 }
2509 
2510 /*ARGSUSED*/
2511 static void
2512 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2513 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2514 {
2515 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2516 	    "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2517 	    tx_ring_p->tdc));
2518 
2519 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2520 
2521 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2522 	    "<== nxge_unmap_txdma_channel_cfg_ring"));
2523 }
2524 
2525 /*
2526  * nxge_map_txdma_channel_buf_ring
2527  *
2528  *
2529  * Arguments:
2530  * 	nxgep
2531  * 	channel		The channel to map.
2532  *	dma_buf_p
2533  *	tx_desc_p	channel's descriptor ring
2534  *	num_chunks
2535  *
2536  * Notes:
2537  *
2538  * NPI/NXGE function calls:
2539  *	nxge_setup_dma_common()
2540  *
2541  * Registers accessed:
2542  *	none.
2543  *
2544  * Context:
2545  *	Any domain
2546  */
2547 static nxge_status_t
2548 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2549 	p_nxge_dma_common_t *dma_buf_p,
2550 	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2551 {
2552 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
2553 	p_nxge_dma_common_t 	dmap;
2554 	nxge_os_dma_handle_t	tx_buf_dma_handle;
2555 	p_tx_ring_t 		tx_ring_p;
2556 	p_tx_msg_t 		tx_msg_ring;
2557 	nxge_status_t		status = NXGE_OK;
2558 	int			ddi_status = DDI_SUCCESS;
2559 	int			i, j, index;
2560 	uint32_t		size, bsize;
2561 	uint32_t 		nblocks, nmsgs;
2562 
2563 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2564 	    "==> nxge_map_txdma_channel_buf_ring"));
2565 
2566 	dma_bufp = tmp_bufp = *dma_buf_p;
2567 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2568 		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2569 		"chunks bufp $%p",
2570 		    channel, num_chunks, dma_bufp));
2571 
2572 	nmsgs = 0;
2573 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2574 		nmsgs += tmp_bufp->nblocks;
2575 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2576 		    "==> nxge_map_txdma_channel_buf_ring: channel %d "
2577 		    "bufp $%p nblocks %d nmsgs %d",
2578 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2579 	}
2580 	if (!nmsgs) {
2581 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2582 		    "<== nxge_map_txdma_channel_buf_ring: channel %d "
2583 		    "no msg blocks",
2584 		    channel));
2585 		status = NXGE_ERROR;
2586 		goto nxge_map_txdma_channel_buf_ring_exit;
2587 	}
2588 
2589 	tx_ring_p = (p_tx_ring_t)
2590 	    KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2591 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2592 	    (void *)nxgep->interrupt_cookie);
2593 	MUTEX_INIT(&tx_ring_p->freelock, NULL, MUTEX_DRIVER,
2594 	    (void *)nxgep->interrupt_cookie);
2595 
2596 	(void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2597 	tx_ring_p->tx_ring_busy = B_FALSE;
2598 	tx_ring_p->nxgep = nxgep;
2599 	tx_ring_p->serial = nxge_serialize_create(nmsgs,
2600 	    nxge_serial_tx, tx_ring_p);
2601 	/*
2602 	 * Allocate transmit message rings and handles for packets
2603 	 * not to be copied to premapped buffers.
2604 	 */
2605 	size = nmsgs * sizeof (tx_msg_t);
2606 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2607 	for (i = 0; i < nmsgs; i++) {
2608 		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2609 		    DDI_DMA_DONTWAIT, 0,
2610 		    &tx_msg_ring[i].dma_handle);
2611 		if (ddi_status != DDI_SUCCESS) {
2612 			status |= NXGE_DDI_FAILED;
2613 			break;
2614 		}
2615 	}
2616 	if (i < nmsgs) {
2617 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2618 		    "Allocate handles failed."));
2619 		goto nxge_map_txdma_channel_buf_ring_fail1;
2620 	}
2621 
2622 	tx_ring_p->tdc = channel;
2623 	tx_ring_p->tx_msg_ring = tx_msg_ring;
2624 	tx_ring_p->tx_ring_size = nmsgs;
2625 	tx_ring_p->num_chunks = num_chunks;
2626 	if (!nxge_tx_intr_thres) {
2627 		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2628 	}
2629 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2630 	tx_ring_p->rd_index = 0;
2631 	tx_ring_p->wr_index = 0;
2632 	tx_ring_p->ring_head.value = 0;
2633 	tx_ring_p->ring_kick_tail.value = 0;
2634 	tx_ring_p->descs_pending = 0;
2635 
2636 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2637 	    "==> nxge_map_txdma_channel_buf_ring: channel %d "
2638 	    "actual tx desc max %d nmsgs %d "
2639 	    "(config nxge_tx_ring_size %d)",
2640 	    channel, tx_ring_p->tx_ring_size, nmsgs,
2641 	    nxge_tx_ring_size));
2642 
2643 	/*
2644 	 * Map in buffers from the buffer pool.
2645 	 */
2646 	index = 0;
2647 	bsize = dma_bufp->block_size;
2648 
2649 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2650 	    "dma_bufp $%p tx_rng_p $%p "
2651 	    "tx_msg_rng_p $%p bsize %d",
2652 	    dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2653 
2654 	tx_buf_dma_handle = dma_bufp->dma_handle;
2655 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
2656 		bsize = dma_bufp->block_size;
2657 		nblocks = dma_bufp->nblocks;
2658 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2659 		    "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2660 		    "size %d dma_bufp $%p",
2661 		    i, sizeof (nxge_dma_common_t), dma_bufp));
2662 
2663 		for (j = 0; j < nblocks; j++) {
2664 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2665 			tx_msg_ring[index].nextp = NULL;
2666 			dmap = &tx_msg_ring[index++].buf_dma;
2667 #ifdef TX_MEM_DEBUG
2668 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2669 			    "==> nxge_map_txdma_channel_buf_ring: j %d"
2670 			    "dmap $%p", i, dmap));
2671 #endif
2672 			nxge_setup_dma_common(dmap, dma_bufp, 1,
2673 			    bsize);
2674 		}
2675 	}
2676 
2677 	if (i < num_chunks) {
2678 		status = NXGE_ERROR;
2679 		goto nxge_map_txdma_channel_buf_ring_fail1;
2680 	}
2681 
2682 	*tx_desc_p = tx_ring_p;
2683 
2684 	goto nxge_map_txdma_channel_buf_ring_exit;
2685 
2686 nxge_map_txdma_channel_buf_ring_fail1:
2687 	if (tx_ring_p->serial) {
2688 		nxge_serialize_destroy(tx_ring_p->serial);
2689 		tx_ring_p->serial = NULL;
2690 	}
2691 
2692 	index--;
2693 	for (; index >= 0; index--) {
2694 		if (tx_msg_ring[index].dma_handle != NULL) {
2695 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2696 		}
2697 	}
2698 
2699 	MUTEX_DESTROY(&tx_ring_p->freelock);
2700 	MUTEX_DESTROY(&tx_ring_p->lock);
2701 	KMEM_FREE(tx_msg_ring, size);
2702 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2703 
2704 	status = NXGE_ERROR;
2705 
2706 nxge_map_txdma_channel_buf_ring_exit:
2707 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2708 	    "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2709 
2710 	return (status);
2711 }
2712 
2713 /*ARGSUSED*/
2714 static void
2715 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2716 {
2717 	p_tx_msg_t 		tx_msg_ring;
2718 	p_tx_msg_t 		tx_msg_p;
2719 	int			i;
2720 
2721 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2722 	    "==> nxge_unmap_txdma_channel_buf_ring"));
2723 	if (tx_ring_p == NULL) {
2724 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2725 		    "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2726 		return;
2727 	}
2728 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2729 	    "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2730 	    tx_ring_p->tdc));
2731 
2732 	tx_msg_ring = tx_ring_p->tx_msg_ring;
2733 
2734 	/*
2735 	 * Since the serialization thread, timer thread and
2736 	 * interrupt thread can all call the transmit reclaim,
2737 	 * the unmapping function needs to acquire the lock
2738 	 * to free those buffers which were transmitted
2739 	 * by the hardware already.
2740 	 */
2741 	MUTEX_ENTER(&tx_ring_p->lock);
2742 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
2743 	    "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2744 	    "channel %d",
2745 	    tx_ring_p->tdc));
2746 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2747 
2748 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2749 		tx_msg_p = &tx_msg_ring[i];
2750 		if (tx_msg_p->tx_message != NULL) {
2751 			freemsg(tx_msg_p->tx_message);
2752 			tx_msg_p->tx_message = NULL;
2753 		}
2754 	}
2755 
2756 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2757 		if (tx_msg_ring[i].dma_handle != NULL) {
2758 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2759 		}
2760 		tx_msg_ring[i].dma_handle = NULL;
2761 	}
2762 
2763 	MUTEX_EXIT(&tx_ring_p->lock);
2764 
2765 	if (tx_ring_p->serial) {
2766 		nxge_serialize_destroy(tx_ring_p->serial);
2767 		tx_ring_p->serial = NULL;
2768 	}
2769 
2770 	MUTEX_DESTROY(&tx_ring_p->freelock);
2771 	MUTEX_DESTROY(&tx_ring_p->lock);
2772 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2773 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2774 
2775 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2776 	    "<== nxge_unmap_txdma_channel_buf_ring"));
2777 }
2778 
2779 static nxge_status_t
2780 nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2781 {
2782 	p_tx_rings_t 		tx_rings;
2783 	p_tx_ring_t 		*tx_desc_rings;
2784 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
2785 	p_tx_mbox_t		*tx_mbox_p;
2786 	nxge_status_t		status = NXGE_OK;
2787 
2788 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2789 
2790 	tx_rings = nxgep->tx_rings;
2791 	if (tx_rings == NULL) {
2792 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2793 		    "<== nxge_txdma_hw_start: NULL ring pointer"));
2794 		return (NXGE_ERROR);
2795 	}
2796 	tx_desc_rings = tx_rings->rings;
2797 	if (tx_desc_rings == NULL) {
2798 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2799 		    "<== nxge_txdma_hw_start: NULL ring pointers"));
2800 		return (NXGE_ERROR);
2801 	}
2802 
2803 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2804 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2805 
2806 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2807 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2808 
2809 	status = nxge_txdma_start_channel(nxgep, channel,
2810 	    (p_tx_ring_t)tx_desc_rings[channel],
2811 	    (p_tx_mbox_t)tx_mbox_p[channel]);
2812 	if (status != NXGE_OK) {
2813 		goto nxge_txdma_hw_start_fail1;
2814 	}
2815 
2816 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2817 	    "tx_rings $%p rings $%p",
2818 	    nxgep->tx_rings, nxgep->tx_rings->rings));
2819 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2820 	    "tx_rings $%p tx_desc_rings $%p",
2821 	    nxgep->tx_rings, tx_desc_rings));
2822 
2823 	goto nxge_txdma_hw_start_exit;
2824 
2825 nxge_txdma_hw_start_fail1:
2826 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2827 	    "==> nxge_txdma_hw_start: disable "
2828 	    "(status 0x%x channel %d)", status, channel));
2829 
2830 nxge_txdma_hw_start_exit:
2831 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2832 	    "==> nxge_txdma_hw_start: (status 0x%x)", status));
2833 
2834 	return (status);
2835 }
2836 
2837 /*
2838  * nxge_txdma_start_channel
2839  *
2840  *	Start a TDC.
2841  *
2842  * Arguments:
2843  * 	nxgep
2844  * 	channel		The channel to start.
2845  * 	tx_ring_p	channel's transmit descriptor ring.
2846  * 	tx_mbox_p	channel' smailbox.
2847  *
2848  * Notes:
2849  *
2850  * NPI/NXGE function calls:
2851  *	nxge_reset_txdma_channel()
2852  *	nxge_init_txdma_channel_event_mask()
2853  *	nxge_enable_txdma_channel()
2854  *
2855  * Registers accessed:
2856  *	none directly (see functions above).
2857  *
2858  * Context:
2859  *	Any domain
2860  */
2861 static nxge_status_t
2862 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2863     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2864 
2865 {
2866 	nxge_status_t		status = NXGE_OK;
2867 
2868 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2869 		"==> nxge_txdma_start_channel (channel %d)", channel));
2870 	/*
2871 	 * TXDMA/TXC must be in stopped state.
2872 	 */
2873 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
2874 
2875 	/*
2876 	 * Reset TXDMA channel
2877 	 */
2878 	tx_ring_p->tx_cs.value = 0;
2879 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
2880 	status = nxge_reset_txdma_channel(nxgep, channel,
2881 			tx_ring_p->tx_cs.value);
2882 	if (status != NXGE_OK) {
2883 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2884 			"==> nxge_txdma_start_channel (channel %d)"
2885 			" reset channel failed 0x%x", channel, status));
2886 		goto nxge_txdma_start_channel_exit;
2887 	}
2888 
2889 	/*
2890 	 * Initialize the TXDMA channel specific FZC control
2891 	 * configurations. These FZC registers are pertaining
2892 	 * to each TX channel (i.e. logical pages).
2893 	 */
2894 	if (!isLDOMguest(nxgep)) {
2895 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
2896 		    tx_ring_p, tx_mbox_p);
2897 		if (status != NXGE_OK) {
2898 			goto nxge_txdma_start_channel_exit;
2899 		}
2900 	}
2901 
2902 	/*
2903 	 * Initialize the event masks.
2904 	 */
2905 	tx_ring_p->tx_evmask.value = 0;
2906 	status = nxge_init_txdma_channel_event_mask(nxgep,
2907 	    channel, &tx_ring_p->tx_evmask);
2908 	if (status != NXGE_OK) {
2909 		goto nxge_txdma_start_channel_exit;
2910 	}
2911 
2912 	/*
2913 	 * Load TXDMA descriptors, buffers, mailbox,
2914 	 * initialise the DMA channels and
2915 	 * enable each DMA channel.
2916 	 */
2917 	status = nxge_enable_txdma_channel(nxgep, channel,
2918 			tx_ring_p, tx_mbox_p);
2919 	if (status != NXGE_OK) {
2920 		goto nxge_txdma_start_channel_exit;
2921 	}
2922 
2923 nxge_txdma_start_channel_exit:
2924 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2925 
2926 	return (status);
2927 }
2928 
2929 /*
2930  * nxge_txdma_stop_channel
2931  *
2932  *	Stop a TDC.
2933  *
2934  * Arguments:
2935  * 	nxgep
2936  * 	channel		The channel to stop.
2937  * 	tx_ring_p	channel's transmit descriptor ring.
2938  * 	tx_mbox_p	channel' smailbox.
2939  *
2940  * Notes:
2941  *
2942  * NPI/NXGE function calls:
2943  *	nxge_txdma_stop_inj_err()
2944  *	nxge_reset_txdma_channel()
2945  *	nxge_init_txdma_channel_event_mask()
2946  *	nxge_init_txdma_channel_cntl_stat()
2947  *	nxge_disable_txdma_channel()
2948  *
2949  * Registers accessed:
2950  *	none directly (see functions above).
2951  *
2952  * Context:
2953  *	Any domain
2954  */
2955 /*ARGSUSED*/
2956 static nxge_status_t
2957 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2958 {
2959 	p_tx_ring_t tx_ring_p;
2960 	int status = NXGE_OK;
2961 
2962 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2963 	    "==> nxge_txdma_stop_channel: channel %d", channel));
2964 
2965 	/*
2966 	 * Stop (disable) TXDMA and TXC (if stop bit is set
2967 	 * and STOP_N_GO bit not set, the TXDMA reset state will
2968 	 * not be set if reset TXDMA.
2969 	 */
2970 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
2971 
2972 	tx_ring_p = nxgep->tx_rings->rings[channel];
2973 
2974 	/*
2975 	 * Reset TXDMA channel
2976 	 */
2977 	tx_ring_p->tx_cs.value = 0;
2978 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
2979 	status = nxge_reset_txdma_channel(nxgep, channel,
2980 	    tx_ring_p->tx_cs.value);
2981 	if (status != NXGE_OK) {
2982 		goto nxge_txdma_stop_channel_exit;
2983 	}
2984 
2985 #ifdef HARDWARE_REQUIRED
2986 	/* Set up the interrupt event masks. */
2987 	tx_ring_p->tx_evmask.value = 0;
2988 	status = nxge_init_txdma_channel_event_mask(nxgep,
2989 	    channel, &tx_ring_p->tx_evmask);
2990 	if (status != NXGE_OK) {
2991 		goto nxge_txdma_stop_channel_exit;
2992 	}
2993 
2994 	/* Initialize the DMA control and status register */
2995 	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2996 	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2997 	    tx_ring_p->tx_cs.value);
2998 	if (status != NXGE_OK) {
2999 		goto nxge_txdma_stop_channel_exit;
3000 	}
3001 
3002 	tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
3003 
3004 	/* Disable channel */
3005 	status = nxge_disable_txdma_channel(nxgep, channel,
3006 	    tx_ring_p, tx_mbox_p);
3007 	if (status != NXGE_OK) {
3008 		goto nxge_txdma_start_channel_exit;
3009 	}
3010 
3011 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3012 	    "==> nxge_txdma_stop_channel: event done"));
3013 
3014 #endif
3015 
3016 nxge_txdma_stop_channel_exit:
3017 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
3018 	return (status);
3019 }
3020 
3021 /*
3022  * nxge_txdma_get_ring
3023  *
3024  *	Get the ring for a TDC.
3025  *
3026  * Arguments:
3027  * 	nxgep
3028  * 	channel
3029  *
3030  * Notes:
3031  *
3032  * NPI/NXGE function calls:
3033  *
3034  * Registers accessed:
3035  *
3036  * Context:
3037  *	Any domain
3038  */
3039 static p_tx_ring_t
3040 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
3041 {
3042 	nxge_grp_set_t *set = &nxgep->tx_set;
3043 	int tdc;
3044 
3045 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
3046 
3047 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3048 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3049 		    "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
3050 		goto return_null;
3051 	}
3052 
3053 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3054 		if ((1 << tdc) & set->owned.map) {
3055 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3056 			if (ring) {
3057 				if (channel == ring->tdc) {
3058 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
3059 					    "<== nxge_txdma_get_ring: "
3060 					    "tdc %d ring $%p", tdc, ring));
3061 					return (ring);
3062 				}
3063 			}
3064 		}
3065 	}
3066 
3067 return_null:
3068 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
3069 	    "ring not found"));
3070 
3071 	return (NULL);
3072 }
3073 
3074 /*
3075  * nxge_txdma_get_mbox
3076  *
3077  *	Get the mailbox for a TDC.
3078  *
3079  * Arguments:
3080  * 	nxgep
3081  * 	channel
3082  *
3083  * Notes:
3084  *
3085  * NPI/NXGE function calls:
3086  *
3087  * Registers accessed:
3088  *
3089  * Context:
3090  *	Any domain
3091  */
3092 static p_tx_mbox_t
3093 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3094 {
3095 	nxge_grp_set_t *set = &nxgep->tx_set;
3096 	int tdc;
3097 
3098 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3099 
3100 	if (nxgep->tx_mbox_areas_p == 0 ||
3101 	    nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3102 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3103 		    "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3104 		goto return_null;
3105 	}
3106 
3107 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3108 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3109 		    "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3110 		goto return_null;
3111 	}
3112 
3113 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3114 		if ((1 << tdc) & set->owned.map) {
3115 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3116 			if (ring) {
3117 				if (channel == ring->tdc) {
3118 					tx_mbox_t *mailbox = nxgep->
3119 					    tx_mbox_areas_p->
3120 					    txmbox_areas_p[tdc];
3121 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
3122 					    "<== nxge_txdma_get_mbox: tdc %d "
3123 					    "ring $%p", tdc, mailbox));
3124 					return (mailbox);
3125 				}
3126 			}
3127 		}
3128 	}
3129 
3130 return_null:
3131 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3132 	    "mailbox not found"));
3133 
3134 	return (NULL);
3135 }
3136 
3137 /*
3138  * nxge_tx_err_evnts
3139  *
3140  *	Recover a TDC.
3141  *
3142  * Arguments:
3143  * 	nxgep
3144  * 	index	The index to the TDC ring.
3145  * 	ldvp	Used to get the channel number ONLY.
3146  * 	cs	A copy of the bits from TX_CS.
3147  *
3148  * Notes:
3149  *	Calling tree:
3150  *	 nxge_tx_intr()
3151  *
3152  * NPI/NXGE function calls:
3153  *	npi_txdma_ring_error_get()
3154  *	npi_txdma_inj_par_error_get()
3155  *	nxge_txdma_fatal_err_recover()
3156  *
3157  * Registers accessed:
3158  *	TX_RNG_ERR_LOGH	DMC+0x40048 Transmit Ring Error Log High
3159  *	TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3160  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3161  *
3162  * Context:
3163  *	Any domain	XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3164  */
3165 /*ARGSUSED*/
3166 static nxge_status_t
3167 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3168 {
3169 	npi_handle_t		handle;
3170 	npi_status_t		rs;
3171 	uint8_t			channel;
3172 	p_tx_ring_t 		*tx_rings;
3173 	p_tx_ring_t 		tx_ring_p;
3174 	p_nxge_tx_ring_stats_t	tdc_stats;
3175 	boolean_t		txchan_fatal = B_FALSE;
3176 	nxge_status_t		status = NXGE_OK;
3177 	tdmc_inj_par_err_t	par_err;
3178 	uint32_t		value;
3179 
3180 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3181 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3182 	channel = ldvp->channel;
3183 
3184 	tx_rings = nxgep->tx_rings->rings;
3185 	tx_ring_p = tx_rings[index];
3186 	tdc_stats = tx_ring_p->tdc_stats;
3187 	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3188 	    (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3189 	    (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3190 		if ((rs = npi_txdma_ring_error_get(handle, channel,
3191 		    &tdc_stats->errlog)) != NPI_SUCCESS)
3192 			return (NXGE_ERROR | rs);
3193 	}
3194 
3195 	if (cs.bits.ldw.mbox_err) {
3196 		tdc_stats->mbox_err++;
3197 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3198 		    NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3199 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3200 		    "==> nxge_tx_err_evnts(channel %d): "
3201 		    "fatal error: mailbox", channel));
3202 		txchan_fatal = B_TRUE;
3203 	}
3204 	if (cs.bits.ldw.pkt_size_err) {
3205 		tdc_stats->pkt_size_err++;
3206 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3207 		    NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3208 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3209 		    "==> nxge_tx_err_evnts(channel %d): "
3210 		    "fatal error: pkt_size_err", channel));
3211 		txchan_fatal = B_TRUE;
3212 	}
3213 	if (cs.bits.ldw.tx_ring_oflow) {
3214 		tdc_stats->tx_ring_oflow++;
3215 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3216 		    NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3217 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3218 		    "==> nxge_tx_err_evnts(channel %d): "
3219 		    "fatal error: tx_ring_oflow", channel));
3220 		txchan_fatal = B_TRUE;
3221 	}
3222 	if (cs.bits.ldw.pref_buf_par_err) {
3223 		tdc_stats->pre_buf_par_err++;
3224 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3225 		    NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3226 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3227 		    "==> nxge_tx_err_evnts(channel %d): "
3228 		    "fatal error: pre_buf_par_err", channel));
3229 		/* Clear error injection source for parity error */
3230 		(void) npi_txdma_inj_par_error_get(handle, &value);
3231 		par_err.value = value;
3232 		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3233 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3234 		txchan_fatal = B_TRUE;
3235 	}
3236 	if (cs.bits.ldw.nack_pref) {
3237 		tdc_stats->nack_pref++;
3238 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3239 		    NXGE_FM_EREPORT_TDMC_NACK_PREF);
3240 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3241 		    "==> nxge_tx_err_evnts(channel %d): "
3242 		    "fatal error: nack_pref", channel));
3243 		txchan_fatal = B_TRUE;
3244 	}
3245 	if (cs.bits.ldw.nack_pkt_rd) {
3246 		tdc_stats->nack_pkt_rd++;
3247 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3248 		    NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3249 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3250 		    "==> nxge_tx_err_evnts(channel %d): "
3251 		    "fatal error: nack_pkt_rd", channel));
3252 		txchan_fatal = B_TRUE;
3253 	}
3254 	if (cs.bits.ldw.conf_part_err) {
3255 		tdc_stats->conf_part_err++;
3256 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3257 		    NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3258 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3259 		    "==> nxge_tx_err_evnts(channel %d): "
3260 		    "fatal error: config_partition_err", channel));
3261 		txchan_fatal = B_TRUE;
3262 	}
3263 	if (cs.bits.ldw.pkt_prt_err) {
3264 		tdc_stats->pkt_part_err++;
3265 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3266 		    NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3267 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3268 		    "==> nxge_tx_err_evnts(channel %d): "
3269 		    "fatal error: pkt_prt_err", channel));
3270 		txchan_fatal = B_TRUE;
3271 	}
3272 
3273 	/* Clear error injection source in case this is an injected error */
3274 	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3275 
3276 	if (txchan_fatal) {
3277 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3278 		    " nxge_tx_err_evnts: "
3279 		    " fatal error on channel %d cs 0x%llx\n",
3280 		    channel, cs.value));
3281 		status = nxge_txdma_fatal_err_recover(nxgep, channel,
3282 		    tx_ring_p);
3283 		if (status == NXGE_OK) {
3284 			FM_SERVICE_RESTORED(nxgep);
3285 		}
3286 	}
3287 
3288 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3289 
3290 	return (status);
3291 }
3292 
3293 static nxge_status_t
3294 nxge_txdma_fatal_err_recover(
3295 	p_nxge_t nxgep,
3296 	uint16_t channel,
3297 	p_tx_ring_t tx_ring_p)
3298 {
3299 	npi_handle_t	handle;
3300 	npi_status_t	rs = NPI_SUCCESS;
3301 	p_tx_mbox_t	tx_mbox_p;
3302 	nxge_status_t	status = NXGE_OK;
3303 
3304 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3305 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3306 	    "Recovering from TxDMAChannel#%d error...", channel));
3307 
3308 	/*
3309 	 * Stop the dma channel waits for the stop done.
3310 	 * If the stop done bit is not set, then create
3311 	 * an error.
3312 	 */
3313 
3314 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3315 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3316 	MUTEX_ENTER(&tx_ring_p->lock);
3317 	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3318 	if (rs != NPI_SUCCESS) {
3319 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3320 		    "==> nxge_txdma_fatal_err_recover (channel %d): "
3321 		    "stop failed ", channel));
3322 		goto fail;
3323 	}
3324 
3325 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3326 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3327 
3328 	/*
3329 	 * Reset TXDMA channel
3330 	 */
3331 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3332 	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3333 	    NPI_SUCCESS) {
3334 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3335 		    "==> nxge_txdma_fatal_err_recover (channel %d)"
3336 		    " reset channel failed 0x%x", channel, rs));
3337 		goto fail;
3338 	}
3339 
3340 	/*
3341 	 * Reset the tail (kick) register to 0.
3342 	 * (Hardware will not reset it. Tx overflow fatal
3343 	 * error if tail is not set to 0 after reset!
3344 	 */
3345 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3346 
3347 	/* Restart TXDMA channel */
3348 
3349 	if (!isLDOMguest(nxgep)) {
3350 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3351 
3352 		// XXX This is a problem in HIO!
3353 		/*
3354 		 * Initialize the TXDMA channel specific FZC control
3355 		 * configurations. These FZC registers are pertaining
3356 		 * to each TX channel (i.e. logical pages).
3357 		 */
3358 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3359 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
3360 		    tx_ring_p, tx_mbox_p);
3361 		if (status != NXGE_OK)
3362 			goto fail;
3363 	}
3364 
3365 	/*
3366 	 * Initialize the event masks.
3367 	 */
3368 	tx_ring_p->tx_evmask.value = 0;
3369 	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3370 	    &tx_ring_p->tx_evmask);
3371 	if (status != NXGE_OK)
3372 		goto fail;
3373 
3374 	tx_ring_p->wr_index_wrap = B_FALSE;
3375 	tx_ring_p->wr_index = 0;
3376 	tx_ring_p->rd_index = 0;
3377 
3378 	/*
3379 	 * Load TXDMA descriptors, buffers, mailbox,
3380 	 * initialise the DMA channels and
3381 	 * enable each DMA channel.
3382 	 */
3383 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3384 	status = nxge_enable_txdma_channel(nxgep, channel,
3385 	    tx_ring_p, tx_mbox_p);
3386 	MUTEX_EXIT(&tx_ring_p->lock);
3387 	if (status != NXGE_OK)
3388 		goto fail;
3389 
3390 	nxge_txdma_freemsg_task(tx_ring_p);
3391 
3392 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3393 	    "Recovery Successful, TxDMAChannel#%d Restored",
3394 	    channel));
3395 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3396 
3397 	return (NXGE_OK);
3398 
3399 fail:
3400 	MUTEX_EXIT(&tx_ring_p->lock);
3401 
3402 	nxge_txdma_freemsg_task(tx_ring_p);
3403 
3404 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
3405 	    "nxge_txdma_fatal_err_recover (channel %d): "
3406 	    "failed to recover this txdma channel", channel));
3407 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3408 
3409 	return (status);
3410 }
3411 
3412 /*
3413  * nxge_tx_port_fatal_err_recover
3414  *
3415  *	Attempt to recover from a fatal port error.
3416  *
3417  * Arguments:
3418  * 	nxgep
3419  *
3420  * Notes:
3421  *	How would a guest do this?
3422  *
3423  * NPI/NXGE function calls:
3424  *
3425  * Registers accessed:
3426  *
3427  * Context:
3428  *	Service domain
3429  */
3430 nxge_status_t
3431 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3432 {
3433 	nxge_grp_set_t *set = &nxgep->tx_set;
3434 	nxge_channel_t tdc;
3435 
3436 	tx_ring_t	*ring;
3437 	tx_mbox_t	*mailbox;
3438 
3439 	npi_handle_t	handle;
3440 	nxge_status_t	status;
3441 	npi_status_t	rs;
3442 
3443 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3444 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3445 	    "Recovering from TxPort error..."));
3446 
3447 	if (isLDOMguest(nxgep)) {
3448 		return (NXGE_OK);
3449 	}
3450 
3451 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3452 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3453 		    "<== nxge_tx_port_fatal_err_recover: not initialized"));
3454 		return (NXGE_ERROR);
3455 	}
3456 
3457 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3458 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3459 		    "<== nxge_tx_port_fatal_err_recover: "
3460 		    "NULL ring pointer(s)"));
3461 		return (NXGE_ERROR);
3462 	}
3463 
3464 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3465 		if ((1 << tdc) & set->owned.map) {
3466 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3467 			if (ring)
3468 				MUTEX_ENTER(&ring->lock);
3469 		}
3470 	}
3471 
3472 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3473 
3474 	/*
3475 	 * Stop all the TDCs owned by us.
3476 	 * (The shared TDCs will have been stopped by their owners.)
3477 	 */
3478 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3479 		if ((1 << tdc) & set->owned.map) {
3480 			ring = nxgep->tx_rings->rings[tdc];
3481 			if (ring) {
3482 				rs = npi_txdma_channel_control
3483 				    (handle, TXDMA_STOP, tdc);
3484 				if (rs != NPI_SUCCESS) {
3485 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3486 					    "nxge_tx_port_fatal_err_recover "
3487 					    "(channel %d): stop failed ", tdc));
3488 					goto fail;
3489 				}
3490 			}
3491 		}
3492 	}
3493 
3494 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3495 
3496 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3497 		if ((1 << tdc) & set->owned.map) {
3498 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3499 			if (ring) {
3500 				MUTEX_ENTER(&ring->lock);
3501 				(void) nxge_txdma_reclaim(nxgep, ring, 0);
3502 				MUTEX_EXIT(&ring->lock);
3503 
3504 				nxge_txdma_freemsg_task(ring);
3505 			}
3506 		}
3507 	}
3508 
3509 	/*
3510 	 * Reset all the TDCs.
3511 	 */
3512 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3513 
3514 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3515 		if ((1 << tdc) & set->owned.map) {
3516 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3517 			if (ring) {
3518 				if ((rs = npi_txdma_channel_control
3519 				    (handle, TXDMA_RESET, tdc))
3520 				    != NPI_SUCCESS) {
3521 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3522 					    "nxge_tx_port_fatal_err_recover "
3523 					    "(channel %d) reset channel "
3524 					    "failed 0x%x", tdc, rs));
3525 					goto fail;
3526 				}
3527 			}
3528 			/*
3529 			 * Reset the tail (kick) register to 0.
3530 			 * (Hardware will not reset it. Tx overflow fatal
3531 			 * error if tail is not set to 0 after reset!
3532 			 */
3533 			TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3534 		}
3535 	}
3536 
3537 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3538 
3539 	/* Restart all the TDCs */
3540 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3541 		if ((1 << tdc) & set->owned.map) {
3542 			ring = nxgep->tx_rings->rings[tdc];
3543 			if (ring) {
3544 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3545 				status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3546 				    ring, mailbox);
3547 				ring->tx_evmask.value = 0;
3548 				/*
3549 				 * Initialize the event masks.
3550 				 */
3551 				status = nxge_init_txdma_channel_event_mask
3552 				    (nxgep, tdc, &ring->tx_evmask);
3553 
3554 				ring->wr_index_wrap = B_FALSE;
3555 				ring->wr_index = 0;
3556 				ring->rd_index = 0;
3557 
3558 				if (status != NXGE_OK)
3559 					goto fail;
3560 				if (status != NXGE_OK)
3561 					goto fail;
3562 			}
3563 		}
3564 	}
3565 
3566 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3567 
3568 	/* Re-enable all the TDCs */
3569 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3570 		if ((1 << tdc) & set->owned.map) {
3571 			ring = nxgep->tx_rings->rings[tdc];
3572 			if (ring) {
3573 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3574 				status = nxge_enable_txdma_channel(nxgep, tdc,
3575 				    ring, mailbox);
3576 				if (status != NXGE_OK)
3577 					goto fail;
3578 			}
3579 		}
3580 	}
3581 
3582 	/*
3583 	 * Unlock all the TDCs.
3584 	 */
3585 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3586 		if ((1 << tdc) & set->owned.map) {
3587 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3588 			if (ring)
3589 				MUTEX_EXIT(&ring->lock);
3590 		}
3591 	}
3592 
3593 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3594 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3595 
3596 	return (NXGE_OK);
3597 
3598 fail:
3599 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3600 		if ((1 << tdc) & set->owned.map) {
3601 			ring = nxgep->tx_rings->rings[tdc];
3602 			if (ring)
3603 				MUTEX_EXIT(&ring->lock);
3604 		}
3605 	}
3606 
3607 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3608 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3609 
3610 	return (status);
3611 }
3612 
3613 /*
3614  * nxge_txdma_inject_err
3615  *
3616  *	Inject an error into a TDC.
3617  *
3618  * Arguments:
3619  * 	nxgep
3620  * 	err_id	The error to inject.
3621  * 	chan	The channel to inject into.
3622  *
3623  * Notes:
3624  *	This is called from nxge_main.c:nxge_err_inject()
3625  *	Has this ioctl ever been used?
3626  *
3627  * NPI/NXGE function calls:
3628  *	npi_txdma_inj_par_error_get()
3629  *	npi_txdma_inj_par_error_set()
3630  *
3631  * Registers accessed:
3632  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3633  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3634  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3635  *
3636  * Context:
3637  *	Service domain
3638  */
3639 void
3640 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3641 {
3642 	tdmc_intr_dbg_t		tdi;
3643 	tdmc_inj_par_err_t	par_err;
3644 	uint32_t		value;
3645 	npi_handle_t		handle;
3646 
3647 	switch (err_id) {
3648 
3649 	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3650 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
3651 		/* Clear error injection source for parity error */
3652 		(void) npi_txdma_inj_par_error_get(handle, &value);
3653 		par_err.value = value;
3654 		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3655 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3656 
3657 		par_err.bits.ldw.inject_parity_error = (1 << chan);
3658 		(void) npi_txdma_inj_par_error_get(handle, &value);
3659 		par_err.value = value;
3660 		par_err.bits.ldw.inject_parity_error |= (1 << chan);
3661 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3662 		    (unsigned long long)par_err.value);
3663 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3664 		break;
3665 
3666 	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3667 	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3668 	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3669 	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3670 	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3671 	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3672 	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3673 		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3674 		    chan, &tdi.value);
3675 		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3676 			tdi.bits.ldw.pref_buf_par_err = 1;
3677 		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3678 			tdi.bits.ldw.mbox_err = 1;
3679 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3680 			tdi.bits.ldw.nack_pref = 1;
3681 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3682 			tdi.bits.ldw.nack_pkt_rd = 1;
3683 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3684 			tdi.bits.ldw.pkt_size_err = 1;
3685 		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3686 			tdi.bits.ldw.tx_ring_oflow = 1;
3687 		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3688 			tdi.bits.ldw.conf_part_err = 1;
3689 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3690 			tdi.bits.ldw.pkt_part_err = 1;
3691 #if defined(__i386)
3692 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3693 		    tdi.value);
3694 #else
3695 		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3696 		    tdi.value);
3697 #endif
3698 		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3699 		    chan, tdi.value);
3700 
3701 		break;
3702 	}
3703 }
3704