xref: /titanic_41/usr/src/uts/common/io/nxge/nxge_txdma.c (revision d4660949aa62dd6a963f4913b7120b383cf473c4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/nxge/nxge_impl.h>
29 #include <sys/nxge/nxge_txdma.h>
30 #include <sys/nxge/nxge_hio.h>
31 #include <npi_tx_rd64.h>
32 #include <npi_tx_wr64.h>
33 #include <sys/llc1.h>
34 
35 uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
36 uint32_t	nxge_tx_minfree = 32;
37 uint32_t	nxge_tx_intr_thres = 0;
38 uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
39 uint32_t	nxge_tx_tiny_pack = 1;
40 uint32_t	nxge_tx_use_bcopy = 1;
41 
42 extern uint32_t 	nxge_tx_ring_size;
43 extern uint32_t 	nxge_bcopy_thresh;
44 extern uint32_t 	nxge_dvma_thresh;
45 extern uint32_t 	nxge_dma_stream_thresh;
46 extern dma_method_t 	nxge_force_dma;
47 
48 /* Device register access attributes for PIO.  */
49 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
50 /* Device descriptor access attributes for DMA.  */
51 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
52 /* Device buffer access attributes for DMA.  */
53 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
54 extern ddi_dma_attr_t nxge_desc_dma_attr;
55 extern ddi_dma_attr_t nxge_tx_dma_attr;
56 
57 extern int nxge_serial_tx(mblk_t *mp, void *arg);
58 
59 static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60 
61 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
62 
63 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
64 	p_nxge_dma_common_t *, p_tx_ring_t *,
65 	uint32_t, p_nxge_dma_common_t *,
66 	p_tx_mbox_t *);
67 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
68 
69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
70 	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
72 
73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
74 	p_nxge_dma_common_t *, p_tx_ring_t,
75 	p_tx_mbox_t *);
76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
77 	p_tx_ring_t, p_tx_mbox_t);
78 
79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
80     p_tx_ring_t, p_tx_mbox_t);
81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
82 
83 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
84 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
85 	p_nxge_ldv_t, tx_cs_t);
86 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
87 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
88 	uint16_t, p_tx_ring_t);
89 
90 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91     p_tx_ring_t ring_p, uint16_t channel);
92 
93 nxge_status_t
94 nxge_init_txdma_channels(p_nxge_t nxgep)
95 {
96 	nxge_grp_set_t *set = &nxgep->tx_set;
97 	int i, count;
98 
99 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
100 
101 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
102 		if ((1 << i) & set->lg.map) {
103 			int tdc;
104 			nxge_grp_t *group = set->group[i];
105 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
106 				if ((1 << tdc) & group->map) {
107 					if ((nxge_grp_dc_add(nxgep,
108 						(vr_handle_t)group,
109 						VP_BOUND_TX, tdc)))
110 						return (NXGE_ERROR);
111 				}
112 			}
113 		}
114 		if (++count == set->lg.count)
115 			break;
116 	}
117 
118 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
119 
120 	return (NXGE_OK);
121 }
122 
123 nxge_status_t
124 nxge_init_txdma_channel(
125 	p_nxge_t nxge,
126 	int channel)
127 {
128 	nxge_status_t status;
129 
130 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
131 
132 	status = nxge_map_txdma(nxge, channel);
133 	if (status != NXGE_OK) {
134 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
135 		    "<== nxge_init_txdma_channel: status 0x%x", status));
136 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
137 		return (status);
138 	}
139 
140 	status = nxge_txdma_hw_start(nxge, channel);
141 	if (status != NXGE_OK) {
142 		(void) nxge_unmap_txdma_channel(nxge, channel);
143 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
144 		return (status);
145 	}
146 
147 	if (!nxge->statsp->tdc_ksp[channel])
148 		nxge_setup_tdc_kstats(nxge, channel);
149 
150 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
151 
152 	return (status);
153 }
154 
155 void
156 nxge_uninit_txdma_channels(p_nxge_t nxgep)
157 {
158 	nxge_grp_set_t *set = &nxgep->tx_set;
159 	int tdc;
160 
161 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
162 
163 	if (set->owned.map == 0) {
164 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
165 		    "nxge_uninit_txdma_channels: no channels"));
166 		return;
167 	}
168 
169 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
170 		if ((1 << tdc) & set->owned.map) {
171 			nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
172 		}
173 	}
174 
175 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
176 }
177 
178 void
179 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
180 {
181 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
182 
183 	if (nxgep->statsp->tdc_ksp[channel]) {
184 		kstat_delete(nxgep->statsp->tdc_ksp[channel]);
185 		nxgep->statsp->tdc_ksp[channel] = 0;
186 	}
187 
188 	(void) nxge_txdma_stop_channel(nxgep, channel);
189 	nxge_unmap_txdma_channel(nxgep, channel);
190 
191 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
192 		"<== nxge_uninit_txdma_channel"));
193 }
194 
195 void
196 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
197 	uint32_t entries, uint32_t size)
198 {
199 	size_t		tsize;
200 	*dest_p = *src_p;
201 	tsize = size * entries;
202 	dest_p->alength = tsize;
203 	dest_p->nblocks = entries;
204 	dest_p->block_size = size;
205 	dest_p->offset += tsize;
206 
207 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
208 	src_p->alength -= tsize;
209 	src_p->dma_cookie.dmac_laddress += tsize;
210 	src_p->dma_cookie.dmac_size -= tsize;
211 }
212 
213 /*
214  * nxge_reset_txdma_channel
215  *
216  *	Reset a TDC.
217  *
218  * Arguments:
219  * 	nxgep
220  * 	channel		The channel to reset.
221  * 	reg_data	The current TX_CS.
222  *
223  * Notes:
224  *
225  * NPI/NXGE function calls:
226  *	npi_txdma_channel_reset()
227  *	npi_txdma_channel_control()
228  *
229  * Registers accessed:
230  *	TX_CS		DMC+0x40028 Transmit Control And Status
231  *	TX_RING_KICK	DMC+0x40018 Transmit Ring Kick
232  *
233  * Context:
234  *	Any domain
235  */
236 nxge_status_t
237 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
238 {
239 	npi_status_t		rs = NPI_SUCCESS;
240 	nxge_status_t		status = NXGE_OK;
241 	npi_handle_t		handle;
242 
243 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
244 
245 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
246 	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
247 		rs = npi_txdma_channel_reset(handle, channel);
248 	} else {
249 		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
250 				channel);
251 	}
252 
253 	if (rs != NPI_SUCCESS) {
254 		status = NXGE_ERROR | rs;
255 	}
256 
257 	/*
258 	 * Reset the tail (kick) register to 0.
259 	 * (Hardware will not reset it. Tx overflow fatal
260 	 * error if tail is not set to 0 after reset!
261 	 */
262 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
263 
264 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
265 	return (status);
266 }
267 
268 /*
269  * nxge_init_txdma_channel_event_mask
270  *
271  *	Enable interrupts for a set of events.
272  *
273  * Arguments:
274  * 	nxgep
275  * 	channel	The channel to map.
276  * 	mask_p	The events to enable.
277  *
278  * Notes:
279  *
280  * NPI/NXGE function calls:
281  *	npi_txdma_event_mask()
282  *
283  * Registers accessed:
284  *	TX_ENT_MSK	DMC+0x40020 Transmit Event Mask
285  *
286  * Context:
287  *	Any domain
288  */
289 nxge_status_t
290 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
291 		p_tx_dma_ent_msk_t mask_p)
292 {
293 	npi_handle_t		handle;
294 	npi_status_t		rs = NPI_SUCCESS;
295 	nxge_status_t		status = NXGE_OK;
296 
297 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
298 		"<== nxge_init_txdma_channel_event_mask"));
299 
300 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
301 	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
302 	if (rs != NPI_SUCCESS) {
303 		status = NXGE_ERROR | rs;
304 	}
305 
306 	return (status);
307 }
308 
309 /*
310  * nxge_init_txdma_channel_cntl_stat
311  *
312  *	Stop a TDC.  If at first we don't succeed, inject an error.
313  *
314  * Arguments:
315  * 	nxgep
316  * 	channel		The channel to stop.
317  *
318  * Notes:
319  *
320  * NPI/NXGE function calls:
321  *	npi_txdma_control_status()
322  *
323  * Registers accessed:
324  *	TX_CS		DMC+0x40028 Transmit Control And Status
325  *
326  * Context:
327  *	Any domain
328  */
329 nxge_status_t
330 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
331 	uint64_t reg_data)
332 {
333 	npi_handle_t		handle;
334 	npi_status_t		rs = NPI_SUCCESS;
335 	nxge_status_t		status = NXGE_OK;
336 
337 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
338 		"<== nxge_init_txdma_channel_cntl_stat"));
339 
340 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
341 	rs = npi_txdma_control_status(handle, OP_SET, channel,
342 			(p_tx_cs_t)&reg_data);
343 
344 	if (rs != NPI_SUCCESS) {
345 		status = NXGE_ERROR | rs;
346 	}
347 
348 	return (status);
349 }
350 
351 /*
352  * nxge_enable_txdma_channel
353  *
354  *	Enable a TDC.
355  *
356  * Arguments:
357  * 	nxgep
358  * 	channel		The channel to enable.
359  * 	tx_desc_p	channel's transmit descriptor ring.
360  * 	mbox_p		channel's mailbox,
361  *
362  * Notes:
363  *
364  * NPI/NXGE function calls:
365  *	npi_txdma_ring_config()
366  *	npi_txdma_mbox_config()
367  *	npi_txdma_channel_init_enable()
368  *
369  * Registers accessed:
370  *	TX_RNG_CFIG	DMC+0x40000 Transmit Ring Configuration
371  *	TXDMA_MBH	DMC+0x40030 TXDMA Mailbox High
372  *	TXDMA_MBL	DMC+0x40038 TXDMA Mailbox Low
373  *	TX_CS		DMC+0x40028 Transmit Control And Status
374  *
375  * Context:
376  *	Any domain
377  */
378 nxge_status_t
379 nxge_enable_txdma_channel(p_nxge_t nxgep,
380 	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
381 {
382 	npi_handle_t		handle;
383 	npi_status_t		rs = NPI_SUCCESS;
384 	nxge_status_t		status = NXGE_OK;
385 
386 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
387 
388 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
389 	/*
390 	 * Use configuration data composed at init time.
391 	 * Write to hardware the transmit ring configurations.
392 	 */
393 	rs = npi_txdma_ring_config(handle, OP_SET, channel,
394 	    (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
395 
396 	if (rs != NPI_SUCCESS) {
397 		return (NXGE_ERROR | rs);
398 	}
399 
400 	if (isLDOMguest(nxgep)) {
401 		/* Add interrupt handler for this channel. */
402 		if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
403 			return (NXGE_ERROR);
404 	}
405 
406 	/* Write to hardware the mailbox */
407 	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
408 		(uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
409 
410 	if (rs != NPI_SUCCESS) {
411 		return (NXGE_ERROR | rs);
412 	}
413 
414 	/* Start the DMA engine. */
415 	rs = npi_txdma_channel_init_enable(handle, channel);
416 
417 	if (rs != NPI_SUCCESS) {
418 		return (NXGE_ERROR | rs);
419 	}
420 
421 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
422 
423 	return (status);
424 }
425 
426 void
427 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
428 		boolean_t l4_cksum, int pkt_len, uint8_t npads,
429 		p_tx_pkt_hdr_all_t pkthdrp)
430 {
431 	p_tx_pkt_header_t	hdrp;
432 	p_mblk_t 		nmp;
433 	uint64_t		tmp;
434 	size_t 			mblk_len;
435 	size_t 			iph_len;
436 	size_t 			hdrs_size;
437 	uint8_t			hdrs_buf[sizeof (struct ether_header) +
438 					64 + sizeof (uint32_t)];
439 	uint8_t			*cursor;
440 	uint8_t 		*ip_buf;
441 	uint16_t		eth_type;
442 	uint8_t			ipproto;
443 	boolean_t		is_vlan = B_FALSE;
444 	size_t			eth_hdr_size;
445 
446 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
447 
448 	/*
449 	 * Caller should zero out the headers first.
450 	 */
451 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
452 
453 	if (fill_len) {
454 		NXGE_DEBUG_MSG((NULL, TX_CTL,
455 			"==> nxge_fill_tx_hdr: pkt_len %d "
456 			"npads %d", pkt_len, npads));
457 		tmp = (uint64_t)pkt_len;
458 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
459 		goto fill_tx_header_done;
460 	}
461 
462 	tmp = (uint64_t)npads;
463 	hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT);
464 
465 	/*
466 	 * mp is the original data packet (does not include the
467 	 * Neptune transmit header).
468 	 */
469 	nmp = mp;
470 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
471 		"mp $%p b_rptr $%p len %d",
472 		mp, nmp->b_rptr, MBLKL(nmp)));
473 	/* copy ether_header from mblk to hdrs_buf */
474 	cursor = &hdrs_buf[0];
475 	tmp = sizeof (struct ether_vlan_header);
476 	while ((nmp != NULL) && (tmp > 0)) {
477 		size_t buflen;
478 		mblk_len = MBLKL(nmp);
479 		buflen = min((size_t)tmp, mblk_len);
480 		bcopy(nmp->b_rptr, cursor, buflen);
481 		cursor += buflen;
482 		tmp -= buflen;
483 		nmp = nmp->b_cont;
484 	}
485 
486 	nmp = mp;
487 	mblk_len = MBLKL(nmp);
488 	ip_buf = NULL;
489 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
490 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
491 		"ether type 0x%x", eth_type, hdrp->value));
492 
493 	if (eth_type < ETHERMTU) {
494 		tmp = 1ull;
495 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
496 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
497 			"value 0x%llx", hdrp->value));
498 		if (*(hdrs_buf + sizeof (struct ether_header))
499 				== LLC_SNAP_SAP) {
500 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
501 					sizeof (struct ether_header) + 6)));
502 			NXGE_DEBUG_MSG((NULL, TX_CTL,
503 				"==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
504 				eth_type));
505 		} else {
506 			goto fill_tx_header_done;
507 		}
508 	} else if (eth_type == VLAN_ETHERTYPE) {
509 		tmp = 1ull;
510 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
511 
512 		eth_type = ntohs(((struct ether_vlan_header *)
513 			hdrs_buf)->ether_type);
514 		is_vlan = B_TRUE;
515 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
516 			"value 0x%llx", hdrp->value));
517 	}
518 
519 	if (!is_vlan) {
520 		eth_hdr_size = sizeof (struct ether_header);
521 	} else {
522 		eth_hdr_size = sizeof (struct ether_vlan_header);
523 	}
524 
525 	switch (eth_type) {
526 	case ETHERTYPE_IP:
527 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
528 			ip_buf = nmp->b_rptr + eth_hdr_size;
529 			mblk_len -= eth_hdr_size;
530 			iph_len = ((*ip_buf) & 0x0f);
531 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
532 				ip_buf = nmp->b_rptr;
533 				ip_buf += eth_hdr_size;
534 			} else {
535 				ip_buf = NULL;
536 			}
537 
538 		}
539 		if (ip_buf == NULL) {
540 			hdrs_size = 0;
541 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
542 			while ((nmp) && (hdrs_size <
543 					sizeof (hdrs_buf))) {
544 				mblk_len = (size_t)nmp->b_wptr -
545 					(size_t)nmp->b_rptr;
546 				if (mblk_len >=
547 					(sizeof (hdrs_buf) - hdrs_size))
548 					mblk_len = sizeof (hdrs_buf) -
549 						hdrs_size;
550 				bcopy(nmp->b_rptr,
551 					&hdrs_buf[hdrs_size], mblk_len);
552 				hdrs_size += mblk_len;
553 				nmp = nmp->b_cont;
554 			}
555 			ip_buf = hdrs_buf;
556 			ip_buf += eth_hdr_size;
557 			iph_len = ((*ip_buf) & 0x0f);
558 		}
559 
560 		ipproto = ip_buf[9];
561 
562 		tmp = (uint64_t)iph_len;
563 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
564 		tmp = (uint64_t)(eth_hdr_size >> 1);
565 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
566 
567 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
568 			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
569 			"tmp 0x%x",
570 			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
571 			ipproto, tmp));
572 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
573 			"value 0x%llx", hdrp->value));
574 
575 		break;
576 
577 	case ETHERTYPE_IPV6:
578 		hdrs_size = 0;
579 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
580 		while ((nmp) && (hdrs_size <
581 				sizeof (hdrs_buf))) {
582 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
583 			if (mblk_len >=
584 				(sizeof (hdrs_buf) - hdrs_size))
585 				mblk_len = sizeof (hdrs_buf) -
586 					hdrs_size;
587 			bcopy(nmp->b_rptr,
588 				&hdrs_buf[hdrs_size], mblk_len);
589 			hdrs_size += mblk_len;
590 			nmp = nmp->b_cont;
591 		}
592 		ip_buf = hdrs_buf;
593 		ip_buf += eth_hdr_size;
594 
595 		tmp = 1ull;
596 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
597 
598 		tmp = (eth_hdr_size >> 1);
599 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
600 
601 		/* byte 6 is the next header protocol */
602 		ipproto = ip_buf[6];
603 
604 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
605 			" iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
606 			iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
607 			ipproto));
608 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
609 			"value 0x%llx", hdrp->value));
610 
611 		break;
612 
613 	default:
614 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
615 		goto fill_tx_header_done;
616 	}
617 
618 	switch (ipproto) {
619 	case IPPROTO_TCP:
620 		NXGE_DEBUG_MSG((NULL, TX_CTL,
621 			"==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
622 		if (l4_cksum) {
623 			tmp = 1ull;
624 			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
625 			NXGE_DEBUG_MSG((NULL, TX_CTL,
626 				"==> nxge_tx_pkt_hdr_init: TCP CKSUM"
627 				"value 0x%llx", hdrp->value));
628 		}
629 
630 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
631 			"value 0x%llx", hdrp->value));
632 		break;
633 
634 	case IPPROTO_UDP:
635 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
636 		if (l4_cksum) {
637 			tmp = 0x2ull;
638 			hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT);
639 		}
640 		NXGE_DEBUG_MSG((NULL, TX_CTL,
641 			"==> nxge_tx_pkt_hdr_init: UDP"
642 			"value 0x%llx", hdrp->value));
643 		break;
644 
645 	default:
646 		goto fill_tx_header_done;
647 	}
648 
649 fill_tx_header_done:
650 	NXGE_DEBUG_MSG((NULL, TX_CTL,
651 		"==> nxge_fill_tx_hdr: pkt_len %d  "
652 		"npads %d value 0x%llx", pkt_len, npads, hdrp->value));
653 
654 	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
655 }
656 
657 /*ARGSUSED*/
658 p_mblk_t
659 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
660 {
661 	p_mblk_t 		newmp = NULL;
662 
663 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
664 		NXGE_DEBUG_MSG((NULL, TX_CTL,
665 			"<== nxge_tx_pkt_header_reserve: allocb failed"));
666 		return (NULL);
667 	}
668 
669 	NXGE_DEBUG_MSG((NULL, TX_CTL,
670 		"==> nxge_tx_pkt_header_reserve: get new mp"));
671 	DB_TYPE(newmp) = M_DATA;
672 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
673 	linkb(newmp, mp);
674 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
675 
676 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
677 		"b_rptr $%p b_wptr $%p",
678 		newmp->b_rptr, newmp->b_wptr));
679 
680 	NXGE_DEBUG_MSG((NULL, TX_CTL,
681 		"<== nxge_tx_pkt_header_reserve: use new mp"));
682 
683 	return (newmp);
684 }
685 
686 int
687 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
688 {
689 	uint_t 			nmblks;
690 	ssize_t			len;
691 	uint_t 			pkt_len;
692 	p_mblk_t 		nmp, bmp, tmp;
693 	uint8_t 		*b_wptr;
694 
695 	NXGE_DEBUG_MSG((NULL, TX_CTL,
696 		"==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
697 		"len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
698 
699 	nmp = mp;
700 	bmp = mp;
701 	nmblks = 0;
702 	pkt_len = 0;
703 	*tot_xfer_len_p = 0;
704 
705 	while (nmp) {
706 		len = MBLKL(nmp);
707 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
708 			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
709 			len, pkt_len, nmblks,
710 			*tot_xfer_len_p));
711 
712 		if (len <= 0) {
713 			bmp = nmp;
714 			nmp = nmp->b_cont;
715 			NXGE_DEBUG_MSG((NULL, TX_CTL,
716 				"==> nxge_tx_pkt_nmblocks: "
717 				"len (0) pkt_len %d nmblks %d",
718 				pkt_len, nmblks));
719 			continue;
720 		}
721 
722 		*tot_xfer_len_p += len;
723 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
724 			"len %d pkt_len %d nmblks %d tot_xfer_len %d",
725 			len, pkt_len, nmblks,
726 			*tot_xfer_len_p));
727 
728 		if (len < nxge_bcopy_thresh) {
729 			NXGE_DEBUG_MSG((NULL, TX_CTL,
730 				"==> nxge_tx_pkt_nmblocks: "
731 				"len %d (< thresh) pkt_len %d nmblks %d",
732 				len, pkt_len, nmblks));
733 			if (pkt_len == 0)
734 				nmblks++;
735 			pkt_len += len;
736 			if (pkt_len >= nxge_bcopy_thresh) {
737 				pkt_len = 0;
738 				len = 0;
739 				nmp = bmp;
740 			}
741 		} else {
742 			NXGE_DEBUG_MSG((NULL, TX_CTL,
743 				"==> nxge_tx_pkt_nmblocks: "
744 				"len %d (> thresh) pkt_len %d nmblks %d",
745 				len, pkt_len, nmblks));
746 			pkt_len = 0;
747 			nmblks++;
748 			/*
749 			 * Hardware limits the transfer length to 4K.
750 			 * If len is more than 4K, we need to break
751 			 * it up to at most 2 more blocks.
752 			 */
753 			if (len > TX_MAX_TRANSFER_LENGTH) {
754 				uint32_t	nsegs;
755 
756 				nsegs = 1;
757 				NXGE_DEBUG_MSG((NULL, TX_CTL,
758 					"==> nxge_tx_pkt_nmblocks: "
759 					"len %d pkt_len %d nmblks %d nsegs %d",
760 					len, pkt_len, nmblks, nsegs));
761 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
762 					++nsegs;
763 				}
764 				do {
765 					b_wptr = nmp->b_rptr +
766 						TX_MAX_TRANSFER_LENGTH;
767 					nmp->b_wptr = b_wptr;
768 					if ((tmp = dupb(nmp)) == NULL) {
769 						return (0);
770 					}
771 					tmp->b_rptr = b_wptr;
772 					tmp->b_wptr = nmp->b_wptr;
773 					tmp->b_cont = nmp->b_cont;
774 					nmp->b_cont = tmp;
775 					nmblks++;
776 					if (--nsegs) {
777 						nmp = tmp;
778 					}
779 				} while (nsegs);
780 				nmp = tmp;
781 			}
782 		}
783 
784 		/*
785 		 * Hardware limits the transmit gather pointers to 15.
786 		 */
787 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
788 				TX_MAX_GATHER_POINTERS) {
789 			NXGE_DEBUG_MSG((NULL, TX_CTL,
790 				"==> nxge_tx_pkt_nmblocks: pull msg - "
791 				"len %d pkt_len %d nmblks %d",
792 				len, pkt_len, nmblks));
793 			/* Pull all message blocks from b_cont */
794 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
795 				return (0);
796 			}
797 			freemsg(nmp->b_cont);
798 			nmp->b_cont = tmp;
799 			pkt_len = 0;
800 		}
801 		bmp = nmp;
802 		nmp = nmp->b_cont;
803 	}
804 
805 	NXGE_DEBUG_MSG((NULL, TX_CTL,
806 		"<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
807 		"nmblks %d len %d tot_xfer_len %d",
808 		mp->b_rptr, mp->b_wptr, nmblks,
809 		MBLKL(mp), *tot_xfer_len_p));
810 
811 	return (nmblks);
812 }
813 
814 boolean_t
815 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
816 {
817 	boolean_t 		status = B_TRUE;
818 	p_nxge_dma_common_t	tx_desc_dma_p;
819 	nxge_dma_common_t	desc_area;
820 	p_tx_desc_t 		tx_desc_ring_vp;
821 	p_tx_desc_t 		tx_desc_p;
822 	p_tx_desc_t 		tx_desc_pp;
823 	tx_desc_t 		r_tx_desc;
824 	p_tx_msg_t 		tx_msg_ring;
825 	p_tx_msg_t 		tx_msg_p;
826 	npi_handle_t		handle;
827 	tx_ring_hdl_t		tx_head;
828 	uint32_t 		pkt_len;
829 	uint_t			tx_rd_index;
830 	uint16_t		head_index, tail_index;
831 	uint8_t			tdc;
832 	boolean_t		head_wrap, tail_wrap;
833 	p_nxge_tx_ring_stats_t tdc_stats;
834 	int			rc;
835 
836 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
837 
838 	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
839 			(nmblks != 0));
840 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
841 		"==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
842 			tx_ring_p->descs_pending, nxge_reclaim_pending,
843 			nmblks));
844 	if (!status) {
845 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
846 		desc_area = tx_ring_p->tdc_desc;
847 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
848 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
849 		tx_desc_ring_vp =
850 			(p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
851 		tx_rd_index = tx_ring_p->rd_index;
852 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
853 		tx_msg_ring = tx_ring_p->tx_msg_ring;
854 		tx_msg_p = &tx_msg_ring[tx_rd_index];
855 		tdc = tx_ring_p->tdc;
856 		tdc_stats = tx_ring_p->tdc_stats;
857 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
858 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
859 		}
860 
861 		tail_index = tx_ring_p->wr_index;
862 		tail_wrap = tx_ring_p->wr_index_wrap;
863 
864 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
865 			"==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
866 			"tail_index %d tail_wrap %d "
867 			"tx_desc_p $%p ($%p) ",
868 			tdc, tx_rd_index, tail_index, tail_wrap,
869 			tx_desc_p, (*(uint64_t *)tx_desc_p)));
870 		/*
871 		 * Read the hardware maintained transmit head
872 		 * and wrap around bit.
873 		 */
874 		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
875 		head_index =  tx_head.bits.ldw.head;
876 		head_wrap = tx_head.bits.ldw.wrap;
877 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
878 			"==> nxge_txdma_reclaim: "
879 			"tx_rd_index %d tail %d tail_wrap %d "
880 			"head %d wrap %d",
881 			tx_rd_index, tail_index, tail_wrap,
882 			head_index, head_wrap));
883 
884 		if (head_index == tail_index) {
885 			if (TXDMA_RING_EMPTY(head_index, head_wrap,
886 					tail_index, tail_wrap) &&
887 					(head_index == tx_rd_index)) {
888 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
889 					"==> nxge_txdma_reclaim: EMPTY"));
890 				return (B_TRUE);
891 			}
892 
893 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
894 				"==> nxge_txdma_reclaim: Checking "
895 					"if ring full"));
896 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
897 					tail_wrap)) {
898 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
899 					"==> nxge_txdma_reclaim: full"));
900 				return (B_FALSE);
901 			}
902 		}
903 
904 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
905 			"==> nxge_txdma_reclaim: tx_rd_index and head_index"));
906 
907 		tx_desc_pp = &r_tx_desc;
908 		while ((tx_rd_index != head_index) &&
909 			(tx_ring_p->descs_pending != 0)) {
910 
911 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
912 				"==> nxge_txdma_reclaim: Checking if pending"));
913 
914 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
915 				"==> nxge_txdma_reclaim: "
916 				"descs_pending %d ",
917 				tx_ring_p->descs_pending));
918 
919 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
920 				"==> nxge_txdma_reclaim: "
921 				"(tx_rd_index %d head_index %d "
922 				"(tx_desc_p $%p)",
923 				tx_rd_index, head_index,
924 				tx_desc_p));
925 
926 			tx_desc_pp->value = tx_desc_p->value;
927 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
928 				"==> nxge_txdma_reclaim: "
929 				"(tx_rd_index %d head_index %d "
930 				"tx_desc_p $%p (desc value 0x%llx) ",
931 				tx_rd_index, head_index,
932 				tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
933 
934 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
935 				"==> nxge_txdma_reclaim: dump desc:"));
936 
937 			pkt_len = tx_desc_pp->bits.hdw.tr_len;
938 			tdc_stats->obytes += pkt_len;
939 			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
940 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
941 				"==> nxge_txdma_reclaim: pkt_len %d "
942 				"tdc channel %d opackets %d",
943 				pkt_len,
944 				tdc,
945 				tdc_stats->opackets));
946 
947 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
948 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
949 					"tx_desc_p = $%p "
950 					"tx_desc_pp = $%p "
951 					"index = %d",
952 					tx_desc_p,
953 					tx_desc_pp,
954 					tx_ring_p->rd_index));
955 				(void) dvma_unload(tx_msg_p->dvma_handle,
956 					0, -1);
957 				tx_msg_p->dvma_handle = NULL;
958 				if (tx_ring_p->dvma_wr_index ==
959 					tx_ring_p->dvma_wrap_mask) {
960 					tx_ring_p->dvma_wr_index = 0;
961 				} else {
962 					tx_ring_p->dvma_wr_index++;
963 				}
964 				tx_ring_p->dvma_pending--;
965 			} else if (tx_msg_p->flags.dma_type ==
966 					USE_DMA) {
967 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
968 					"==> nxge_txdma_reclaim: "
969 					"USE DMA"));
970 				if (rc = ddi_dma_unbind_handle
971 					(tx_msg_p->dma_handle)) {
972 					cmn_err(CE_WARN, "!nxge_reclaim: "
973 						"ddi_dma_unbind_handle "
974 						"failed. status %d", rc);
975 				}
976 			}
977 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
978 				"==> nxge_txdma_reclaim: count packets"));
979 			/*
980 			 * count a chained packet only once.
981 			 */
982 			if (tx_msg_p->tx_message != NULL) {
983 				freemsg(tx_msg_p->tx_message);
984 				tx_msg_p->tx_message = NULL;
985 			}
986 
987 			tx_msg_p->flags.dma_type = USE_NONE;
988 			tx_rd_index = tx_ring_p->rd_index;
989 			tx_rd_index = (tx_rd_index + 1) &
990 					tx_ring_p->tx_wrap_mask;
991 			tx_ring_p->rd_index = tx_rd_index;
992 			tx_ring_p->descs_pending--;
993 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
994 			tx_msg_p = &tx_msg_ring[tx_rd_index];
995 		}
996 
997 		status = (nmblks <= (tx_ring_p->tx_ring_size -
998 				tx_ring_p->descs_pending -
999 				TX_FULL_MARK));
1000 		if (status) {
1001 			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
1002 		}
1003 	} else {
1004 		status = (nmblks <=
1005 			(tx_ring_p->tx_ring_size -
1006 				tx_ring_p->descs_pending -
1007 				TX_FULL_MARK));
1008 	}
1009 
1010 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1011 		"<== nxge_txdma_reclaim status = 0x%08x", status));
1012 
1013 	return (status);
1014 }
1015 
1016 /*
1017  * nxge_tx_intr
1018  *
1019  *	Process a TDC interrupt
1020  *
1021  * Arguments:
1022  * 	arg1	A Logical Device state Vector (LSV) data structure.
1023  * 	arg2	nxge_t *
1024  *
1025  * Notes:
1026  *
1027  * NPI/NXGE function calls:
1028  *	npi_txdma_control_status()
1029  *	npi_intr_ldg_mgmt_set()
1030  *
1031  *	nxge_tx_err_evnts()
1032  *	nxge_txdma_reclaim()
1033  *
1034  * Registers accessed:
1035  *	TX_CS		DMC+0x40028 Transmit Control And Status
1036  *	PIO_LDSV
1037  *
1038  * Context:
1039  *	Any domain
1040  */
1041 uint_t
1042 nxge_tx_intr(void *arg1, void *arg2)
1043 {
1044 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
1045 	p_nxge_t		nxgep = (p_nxge_t)arg2;
1046 	p_nxge_ldg_t		ldgp;
1047 	uint8_t			channel;
1048 	uint32_t		vindex;
1049 	npi_handle_t		handle;
1050 	tx_cs_t			cs;
1051 	p_tx_ring_t 		*tx_rings;
1052 	p_tx_ring_t 		tx_ring_p;
1053 	npi_status_t		rs = NPI_SUCCESS;
1054 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
1055 	nxge_status_t 		status = NXGE_OK;
1056 
1057 	if (ldvp == NULL) {
1058 		NXGE_DEBUG_MSG((NULL, INT_CTL,
1059 			"<== nxge_tx_intr: nxgep $%p ldvp $%p",
1060 			nxgep, ldvp));
1061 		return (DDI_INTR_UNCLAIMED);
1062 	}
1063 
1064 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1065 		nxgep = ldvp->nxgep;
1066 	}
1067 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1068 		"==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1069 		nxgep, ldvp));
1070 	/*
1071 	 * This interrupt handler is for a specific
1072 	 * transmit dma channel.
1073 	 */
1074 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1075 	/* Get the control and status for this channel. */
1076 	channel = ldvp->channel;
1077 	ldgp = ldvp->ldgp;
1078 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1079 		"==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1080 		"channel %d",
1081 		nxgep, ldvp, channel));
1082 
1083 	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1084 	vindex = ldvp->vdma_index;
1085 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1086 		"==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1087 		channel, vindex, rs));
1088 	if (!rs && cs.bits.ldw.mk) {
1089 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1090 			"==> nxge_tx_intr:channel %d ring index %d "
1091 			"status 0x%08x (mk bit set)",
1092 			channel, vindex, rs));
1093 		tx_rings = nxgep->tx_rings->rings;
1094 		tx_ring_p = tx_rings[vindex];
1095 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1096 			"==> nxge_tx_intr:channel %d ring index %d "
1097 			"status 0x%08x (mk bit set, calling reclaim)",
1098 			channel, vindex, rs));
1099 
1100 		MUTEX_ENTER(&tx_ring_p->lock);
1101 		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
1102 		MUTEX_EXIT(&tx_ring_p->lock);
1103 		mac_tx_update(nxgep->mach);
1104 	}
1105 
1106 	/*
1107 	 * Process other transmit control and status.
1108 	 * Check the ldv state.
1109 	 */
1110 	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1111 	/*
1112 	 * Rearm this logical group if this is a single device
1113 	 * group.
1114 	 */
1115 	if (ldgp->nldvs == 1) {
1116 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1117 			"==> nxge_tx_intr: rearm"));
1118 		if (status == NXGE_OK) {
1119 			if (isLDOMguest(nxgep)) {
1120 				nxge_hio_ldgimgn(nxgep, ldgp);
1121 			} else {
1122 				(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1123 				    B_TRUE, ldgp->ldg_timer);
1124 			}
1125 		}
1126 	}
1127 
1128 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1129 	serviced = DDI_INTR_CLAIMED;
1130 	return (serviced);
1131 }
1132 
1133 void
1134 nxge_txdma_stop(p_nxge_t nxgep)	/* Dead */
1135 {
1136 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1137 
1138 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1139 
1140 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1141 }
1142 
1143 void
1144 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1145 {
1146 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1147 
1148 	(void) nxge_txdma_stop(nxgep);
1149 
1150 	(void) nxge_fixup_txdma_rings(nxgep);
1151 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1152 	(void) nxge_tx_mac_enable(nxgep);
1153 	(void) nxge_txdma_hw_kick(nxgep);
1154 
1155 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1156 }
1157 
1158 npi_status_t
1159 nxge_txdma_channel_disable(
1160 	nxge_t *nxge,
1161 	int channel)
1162 {
1163 	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxge);
1164 	npi_status_t	rs;
1165 	tdmc_intr_dbg_t	intr_dbg;
1166 
1167 	/*
1168 	 * Stop the dma channel and wait for the stop-done.
1169 	 * If the stop-done bit is not present, then force
1170 	 * an error so TXC will stop.
1171 	 * All channels bound to this port need to be stopped
1172 	 * and reset after injecting an interrupt error.
1173 	 */
1174 	rs = npi_txdma_channel_disable(handle, channel);
1175 	NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1176 		"==> nxge_txdma_channel_disable(%d) "
1177 		"rs 0x%x", channel, rs));
1178 	if (rs != NPI_SUCCESS) {
1179 		/* Inject any error */
1180 		intr_dbg.value = 0;
1181 		intr_dbg.bits.ldw.nack_pref = 1;
1182 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1183 			"==> nxge_txdma_hw_mode: "
1184 			"channel %d (stop failed 0x%x) "
1185 			"(inject err)", rs, channel));
1186 		(void) npi_txdma_inj_int_error_set(
1187 			handle, channel, &intr_dbg);
1188 		rs = npi_txdma_channel_disable(handle, channel);
1189 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1190 			"==> nxge_txdma_hw_mode: "
1191 			"channel %d (stop again 0x%x) "
1192 			"(after inject err)",
1193 			rs, channel));
1194 	}
1195 
1196 	return (rs);
1197 }
1198 
1199 /*
1200  * nxge_txdma_hw_mode
1201  *
1202  *	Toggle all TDCs on (enable) or off (disable).
1203  *
1204  * Arguments:
1205  * 	nxgep
1206  * 	enable	Enable or disable a TDC.
1207  *
1208  * Notes:
1209  *
1210  * NPI/NXGE function calls:
1211  *	npi_txdma_channel_enable(TX_CS)
1212  *	npi_txdma_channel_disable(TX_CS)
1213  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1214  *
1215  * Registers accessed:
1216  *	TX_CS		DMC+0x40028 Transmit Control And Status
1217  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1218  *
1219  * Context:
1220  *	Any domain
1221  */
1222 nxge_status_t
1223 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1224 {
1225 	nxge_grp_set_t *set = &nxgep->tx_set;
1226 
1227 	npi_handle_t	handle;
1228 	nxge_status_t	status;
1229 	npi_status_t	rs;
1230 	int		tdc;
1231 
1232 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1233 		"==> nxge_txdma_hw_mode: enable mode %d", enable));
1234 
1235 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1236 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1237 			"<== nxge_txdma_mode: not initialized"));
1238 		return (NXGE_ERROR);
1239 	}
1240 
1241 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1242 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1243 		    "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1244 		return (NXGE_ERROR);
1245 	}
1246 
1247 	/* Enable or disable all of the TDCs owned by us. */
1248 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1249 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1250 		if ((1 << tdc) & set->owned.map) {
1251 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1252 			if (ring) {
1253 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1254 				    "==> nxge_txdma_hw_mode: channel %d", tdc));
1255 				if (enable) {
1256 					rs = npi_txdma_channel_enable
1257 					    (handle, tdc);
1258 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1259 					    "==> nxge_txdma_hw_mode: "
1260 					    "channel %d (enable) rs 0x%x",
1261 					    tdc, rs));
1262 				} else {
1263 					rs = nxge_txdma_channel_disable
1264 					    (nxgep, tdc);
1265 				}
1266 			}
1267 		}
1268 	}
1269 
1270 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1271 
1272 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1273 		"<== nxge_txdma_hw_mode: status 0x%x", status));
1274 
1275 	return (status);
1276 }
1277 
1278 void
1279 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1280 {
1281 	npi_handle_t		handle;
1282 
1283 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1284 		"==> nxge_txdma_enable_channel: channel %d", channel));
1285 
1286 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1287 	/* enable the transmit dma channels */
1288 	(void) npi_txdma_channel_enable(handle, channel);
1289 
1290 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1291 }
1292 
1293 void
1294 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1295 {
1296 	npi_handle_t		handle;
1297 
1298 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1299 		"==> nxge_txdma_disable_channel: channel %d", channel));
1300 
1301 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1302 	/* stop the transmit dma channels */
1303 	(void) npi_txdma_channel_disable(handle, channel);
1304 
1305 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1306 }
1307 
1308 /*
1309  * nxge_txdma_stop_inj_err
1310  *
1311  *	Stop a TDC.  If at first we don't succeed, inject an error.
1312  *
1313  * Arguments:
1314  * 	nxgep
1315  * 	channel		The channel to stop.
1316  *
1317  * Notes:
1318  *
1319  * NPI/NXGE function calls:
1320  *	npi_txdma_channel_disable()
1321  *	npi_txdma_inj_int_error_set()
1322  * #if defined(NXGE_DEBUG)
1323  *	nxge_txdma_regs_dump_channels(nxgep);
1324  * #endif
1325  *
1326  * Registers accessed:
1327  *	TX_CS		DMC+0x40028 Transmit Control And Status
1328  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1329  *
1330  * Context:
1331  *	Any domain
1332  */
1333 int
1334 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1335 {
1336 	npi_handle_t		handle;
1337 	tdmc_intr_dbg_t		intr_dbg;
1338 	int			status;
1339 	npi_status_t		rs = NPI_SUCCESS;
1340 
1341 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1342 	/*
1343 	 * Stop the dma channel waits for the stop done.
1344 	 * If the stop done bit is not set, then create
1345 	 * an error.
1346 	 */
1347 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1348 	rs = npi_txdma_channel_disable(handle, channel);
1349 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1350 	if (status == NXGE_OK) {
1351 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1352 			"<== nxge_txdma_stop_inj_err (channel %d): "
1353 			"stopped OK", channel));
1354 		return (status);
1355 	}
1356 
1357 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1358 		"==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1359 		"injecting error", channel, rs));
1360 	/* Inject any error */
1361 	intr_dbg.value = 0;
1362 	intr_dbg.bits.ldw.nack_pref = 1;
1363 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1364 
1365 	/* Stop done bit will be set as a result of error injection */
1366 	rs = npi_txdma_channel_disable(handle, channel);
1367 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1368 	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1369 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1370 			"<== nxge_txdma_stop_inj_err (channel %d): "
1371 			"stopped OK ", channel));
1372 		return (status);
1373 	}
1374 
1375 #if	defined(NXGE_DEBUG)
1376 	nxge_txdma_regs_dump_channels(nxgep);
1377 #endif
1378 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1379 		"==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1380 		" (injected error but still not stopped)", channel, rs));
1381 
1382 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1383 	return (status);
1384 }
1385 
1386 /*ARGSUSED*/
1387 void
1388 nxge_fixup_txdma_rings(p_nxge_t nxgep)
1389 {
1390 	nxge_grp_set_t *set = &nxgep->tx_set;
1391 	int tdc;
1392 
1393 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1394 
1395 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1396 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1397 		    "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1398 		return;
1399 	}
1400 
1401 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1402 		if ((1 << tdc) & set->owned.map) {
1403 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1404 			if (ring) {
1405 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1406 				    "==> nxge_fixup_txdma_rings: channel %d",
1407 				    tdc));
1408 				nxge_txdma_fixup_channel(nxgep, ring, tdc);
1409 			}
1410 		}
1411 	}
1412 
1413 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1414 }
1415 
1416 /*ARGSUSED*/
1417 void
1418 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1419 {
1420 	p_tx_ring_t	ring_p;
1421 
1422 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1423 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1424 	if (ring_p == NULL) {
1425 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1426 		return;
1427 	}
1428 
1429 	if (ring_p->tdc != channel) {
1430 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1431 			"<== nxge_txdma_fix_channel: channel not matched "
1432 			"ring tdc %d passed channel",
1433 			ring_p->tdc, channel));
1434 		return;
1435 	}
1436 
1437 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1438 
1439 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1440 }
1441 
1442 /*ARGSUSED*/
1443 void
1444 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1445 {
1446 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1447 
1448 	if (ring_p == NULL) {
1449 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1450 			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
1451 		return;
1452 	}
1453 
1454 	if (ring_p->tdc != channel) {
1455 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1456 			"<== nxge_txdma_fixup_channel: channel not matched "
1457 			"ring tdc %d passed channel",
1458 			ring_p->tdc, channel));
1459 		return;
1460 	}
1461 
1462 	MUTEX_ENTER(&ring_p->lock);
1463 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1464 	ring_p->rd_index = 0;
1465 	ring_p->wr_index = 0;
1466 	ring_p->ring_head.value = 0;
1467 	ring_p->ring_kick_tail.value = 0;
1468 	ring_p->descs_pending = 0;
1469 	MUTEX_EXIT(&ring_p->lock);
1470 
1471 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1472 }
1473 
1474 /*ARGSUSED*/
1475 void
1476 nxge_txdma_hw_kick(p_nxge_t nxgep)
1477 {
1478 	nxge_grp_set_t *set = &nxgep->tx_set;
1479 	int tdc;
1480 
1481 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1482 
1483 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1484 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1485 		    "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1486 		return;
1487 	}
1488 
1489 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1490 		if ((1 << tdc) & set->owned.map) {
1491 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1492 			if (ring) {
1493 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1494 				    "==> nxge_txdma_hw_kick: channel %d", tdc));
1495 				nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1496 			}
1497 		}
1498 	}
1499 
1500 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1501 }
1502 
1503 /*ARGSUSED*/
1504 void
1505 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1506 {
1507 	p_tx_ring_t	ring_p;
1508 
1509 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1510 
1511 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1512 	if (ring_p == NULL) {
1513 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1514 			    " nxge_txdma_kick_channel"));
1515 		return;
1516 	}
1517 
1518 	if (ring_p->tdc != channel) {
1519 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1520 			"<== nxge_txdma_kick_channel: channel not matched "
1521 			"ring tdc %d passed channel",
1522 			ring_p->tdc, channel));
1523 		return;
1524 	}
1525 
1526 	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1527 
1528 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1529 }
1530 
1531 /*ARGSUSED*/
1532 void
1533 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1534 {
1535 
1536 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1537 
1538 	if (ring_p == NULL) {
1539 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1540 			"<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1541 		return;
1542 	}
1543 
1544 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1545 }
1546 
1547 /*
1548  * nxge_check_tx_hang
1549  *
1550  *	Check the state of all TDCs belonging to nxgep.
1551  *
1552  * Arguments:
1553  * 	nxgep
1554  *
1555  * Notes:
1556  *	Called by nxge_hw.c:nxge_check_hw_state().
1557  *
1558  * NPI/NXGE function calls:
1559  *
1560  * Registers accessed:
1561  *
1562  * Context:
1563  *	Any domain
1564  */
1565 /*ARGSUSED*/
1566 void
1567 nxge_check_tx_hang(p_nxge_t nxgep)
1568 {
1569 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1570 
1571 	/*
1572 	 * Needs inputs from hardware for regs:
1573 	 *	head index had not moved since last timeout.
1574 	 *	packets not transmitted or stuffed registers.
1575 	 */
1576 	if (nxge_txdma_hung(nxgep)) {
1577 		nxge_fixup_hung_txdma_rings(nxgep);
1578 	}
1579 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1580 }
1581 
1582 /*
1583  * nxge_txdma_hung
1584  *
1585  *	Reset a TDC.
1586  *
1587  * Arguments:
1588  * 	nxgep
1589  * 	channel		The channel to reset.
1590  * 	reg_data	The current TX_CS.
1591  *
1592  * Notes:
1593  *	Called by nxge_check_tx_hang()
1594  *
1595  * NPI/NXGE function calls:
1596  *	nxge_txdma_channel_hung()
1597  *
1598  * Registers accessed:
1599  *
1600  * Context:
1601  *	Any domain
1602  */
1603 int
1604 nxge_txdma_hung(p_nxge_t nxgep)
1605 {
1606 	nxge_grp_set_t *set = &nxgep->tx_set;
1607 	int tdc;
1608 
1609 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1610 
1611 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1612 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1613 		    "<== nxge_txdma_hung: NULL ring pointer(s)"));
1614 		return (B_FALSE);
1615 	}
1616 
1617 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1618 		if ((1 << tdc) & set->owned.map) {
1619 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1620 			if (ring) {
1621 				if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1622 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
1623 					    "==> nxge_txdma_hung: TDC %d hung",
1624 					    tdc));
1625 					return (B_TRUE);
1626 				}
1627 			}
1628 		}
1629 	}
1630 
1631 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1632 
1633 	return (B_FALSE);
1634 }
1635 
1636 /*
1637  * nxge_txdma_channel_hung
1638  *
1639  *	Reset a TDC.
1640  *
1641  * Arguments:
1642  * 	nxgep
1643  * 	ring		<channel>'s ring.
1644  * 	channel		The channel to reset.
1645  *
1646  * Notes:
1647  *	Called by nxge_txdma.c:nxge_txdma_hung()
1648  *
1649  * NPI/NXGE function calls:
1650  *	npi_txdma_ring_head_get()
1651  *
1652  * Registers accessed:
1653  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1654  *
1655  * Context:
1656  *	Any domain
1657  */
1658 int
1659 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1660 {
1661 	uint16_t		head_index, tail_index;
1662 	boolean_t		head_wrap, tail_wrap;
1663 	npi_handle_t		handle;
1664 	tx_ring_hdl_t		tx_head;
1665 	uint_t			tx_rd_index;
1666 
1667 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1668 
1669 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1670 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1671 		"==> nxge_txdma_channel_hung: channel %d", channel));
1672 	MUTEX_ENTER(&tx_ring_p->lock);
1673 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1674 
1675 	tail_index = tx_ring_p->wr_index;
1676 	tail_wrap = tx_ring_p->wr_index_wrap;
1677 	tx_rd_index = tx_ring_p->rd_index;
1678 	MUTEX_EXIT(&tx_ring_p->lock);
1679 
1680 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1681 		"==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1682 		"tail_index %d tail_wrap %d ",
1683 		channel, tx_rd_index, tail_index, tail_wrap));
1684 	/*
1685 	 * Read the hardware maintained transmit head
1686 	 * and wrap around bit.
1687 	 */
1688 	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1689 	head_index =  tx_head.bits.ldw.head;
1690 	head_wrap = tx_head.bits.ldw.wrap;
1691 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1692 		"==> nxge_txdma_channel_hung: "
1693 		"tx_rd_index %d tail %d tail_wrap %d "
1694 		"head %d wrap %d",
1695 		tx_rd_index, tail_index, tail_wrap,
1696 		head_index, head_wrap));
1697 
1698 	if (TXDMA_RING_EMPTY(head_index, head_wrap,
1699 			tail_index, tail_wrap) &&
1700 			(head_index == tx_rd_index)) {
1701 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1702 			"==> nxge_txdma_channel_hung: EMPTY"));
1703 		return (B_FALSE);
1704 	}
1705 
1706 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1707 		"==> nxge_txdma_channel_hung: Checking if ring full"));
1708 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1709 			tail_wrap)) {
1710 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1711 			"==> nxge_txdma_channel_hung: full"));
1712 		return (B_TRUE);
1713 	}
1714 
1715 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1716 
1717 	return (B_FALSE);
1718 }
1719 
1720 /*
1721  * nxge_fixup_hung_txdma_rings
1722  *
1723  *	Disable a TDC.
1724  *
1725  * Arguments:
1726  * 	nxgep
1727  * 	channel		The channel to reset.
1728  * 	reg_data	The current TX_CS.
1729  *
1730  * Notes:
1731  *	Called by nxge_check_tx_hang()
1732  *
1733  * NPI/NXGE function calls:
1734  *	npi_txdma_ring_head_get()
1735  *
1736  * Registers accessed:
1737  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1738  *
1739  * Context:
1740  *	Any domain
1741  */
1742 /*ARGSUSED*/
1743 void
1744 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1745 {
1746 	nxge_grp_set_t *set = &nxgep->tx_set;
1747 	int tdc;
1748 
1749 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1750 
1751 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1752 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1753 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1754 		return;
1755 	}
1756 
1757 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1758 		if ((1 << tdc) & set->owned.map) {
1759 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1760 			if (ring) {
1761 				nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1762 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1763 				    "==> nxge_fixup_hung_txdma_rings: TDC %d",
1764 				    tdc));
1765 			}
1766 		}
1767 	}
1768 
1769 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1770 }
1771 
1772 /*
1773  * nxge_txdma_fixup_hung_channel
1774  *
1775  *	'Fix' a hung TDC.
1776  *
1777  * Arguments:
1778  * 	nxgep
1779  * 	channel		The channel to fix.
1780  *
1781  * Notes:
1782  *	Called by nxge_fixup_hung_txdma_rings()
1783  *
1784  *	1. Reclaim the TDC.
1785  *	2. Disable the TDC.
1786  *
1787  * NPI/NXGE function calls:
1788  *	nxge_txdma_reclaim()
1789  *	npi_txdma_channel_disable(TX_CS)
1790  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1791  *
1792  * Registers accessed:
1793  *	TX_CS		DMC+0x40028 Transmit Control And Status
1794  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1795  *
1796  * Context:
1797  *	Any domain
1798  */
1799 /*ARGSUSED*/
1800 void
1801 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1802 {
1803 	p_tx_ring_t	ring_p;
1804 
1805 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1806 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1807 	if (ring_p == NULL) {
1808 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1809 			"<== nxge_txdma_fix_hung_channel"));
1810 		return;
1811 	}
1812 
1813 	if (ring_p->tdc != channel) {
1814 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1815 			"<== nxge_txdma_fix_hung_channel: channel not matched "
1816 			"ring tdc %d passed channel",
1817 			ring_p->tdc, channel));
1818 		return;
1819 	}
1820 
1821 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1822 
1823 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1824 }
1825 
1826 /*ARGSUSED*/
1827 void
1828 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1829 	uint16_t channel)
1830 {
1831 	npi_handle_t		handle;
1832 	tdmc_intr_dbg_t		intr_dbg;
1833 	int			status = NXGE_OK;
1834 
1835 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1836 
1837 	if (ring_p == NULL) {
1838 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1839 			"<== nxge_txdma_fixup_channel: NULL ring pointer"));
1840 		return;
1841 	}
1842 
1843 	if (ring_p->tdc != channel) {
1844 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1845 			"<== nxge_txdma_fixup_hung_channel: channel "
1846 			"not matched "
1847 			"ring tdc %d passed channel",
1848 			ring_p->tdc, channel));
1849 		return;
1850 	}
1851 
1852 	/* Reclaim descriptors */
1853 	MUTEX_ENTER(&ring_p->lock);
1854 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1855 	MUTEX_EXIT(&ring_p->lock);
1856 
1857 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1858 	/*
1859 	 * Stop the dma channel waits for the stop done.
1860 	 * If the stop done bit is not set, then force
1861 	 * an error.
1862 	 */
1863 	status = npi_txdma_channel_disable(handle, channel);
1864 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
1865 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1866 			"<== nxge_txdma_fixup_hung_channel: stopped OK "
1867 			"ring tdc %d passed channel %d",
1868 			ring_p->tdc, channel));
1869 		return;
1870 	}
1871 
1872 	/* Inject any error */
1873 	intr_dbg.value = 0;
1874 	intr_dbg.bits.ldw.nack_pref = 1;
1875 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1876 
1877 	/* Stop done bit will be set as a result of error injection */
1878 	status = npi_txdma_channel_disable(handle, channel);
1879 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
1880 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1881 			"<== nxge_txdma_fixup_hung_channel: stopped again"
1882 			"ring tdc %d passed channel",
1883 			ring_p->tdc, channel));
1884 		return;
1885 	}
1886 
1887 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1888 		"<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
1889 		"ring tdc %d passed channel",
1890 		ring_p->tdc, channel));
1891 
1892 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
1893 }
1894 
1895 /*ARGSUSED*/
1896 void
1897 nxge_reclaim_rings(p_nxge_t nxgep)
1898 {
1899 	nxge_grp_set_t *set = &nxgep->tx_set;
1900 	int tdc;
1901 
1902 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
1903 
1904 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1905 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1906 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1907 		return;
1908 	}
1909 
1910 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1911 		if ((1 << tdc) & set->owned.map) {
1912 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1913 			if (ring) {
1914 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1915 				    "==> nxge_reclaim_rings: TDC %d", tdc));
1916 				MUTEX_ENTER(&ring->lock);
1917 				(void) nxge_txdma_reclaim(nxgep, ring, tdc);
1918 				MUTEX_EXIT(&ring->lock);
1919 			}
1920 		}
1921 	}
1922 
1923 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
1924 }
1925 
1926 void
1927 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
1928 {
1929 	nxge_grp_set_t *set = &nxgep->tx_set;
1930 	npi_handle_t handle;
1931 	int tdc;
1932 
1933 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
1934 
1935 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1936 
1937 	if (!isLDOMguest(nxgep)) {
1938 		(void) npi_txdma_dump_fzc_regs(handle);
1939 
1940 		/* Dump TXC registers. */
1941 		(void) npi_txc_dump_fzc_regs(handle);
1942 		(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
1943 	}
1944 
1945 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1946 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1947 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1948 		return;
1949 	}
1950 
1951 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1952 		if ((1 << tdc) & set->owned.map) {
1953 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1954 			if (ring) {
1955 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1956 				    "==> nxge_txdma_regs_dump_channels: "
1957 				    "TDC %d", tdc));
1958 				(void) npi_txdma_dump_tdc_regs(handle, tdc);
1959 
1960 				/* Dump TXC registers, if able to. */
1961 				if (!isLDOMguest(nxgep)) {
1962 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
1963 					    "==> nxge_txdma_regs_dump_channels:"
1964 					    " FZC TDC %d", tdc));
1965 					(void) npi_txc_dump_tdc_fzc_regs
1966 					    (handle, tdc);
1967 				}
1968 				nxge_txdma_regs_dump(nxgep, tdc);
1969 			}
1970 		}
1971 	}
1972 
1973 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
1974 }
1975 
1976 void
1977 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
1978 {
1979 	npi_handle_t		handle;
1980 	tx_ring_hdl_t 		hdl;
1981 	tx_ring_kick_t 		kick;
1982 	tx_cs_t 		cs;
1983 	txc_control_t		control;
1984 	uint32_t		bitmap = 0;
1985 	uint32_t		burst = 0;
1986 	uint32_t		bytes = 0;
1987 	dma_log_page_t		cfg;
1988 
1989 	printf("\n\tfunc # %d tdc %d ",
1990 		nxgep->function_num, channel);
1991 	cfg.page_num = 0;
1992 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1993 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
1994 	printf("\n\tlog page func %d valid page 0 %d",
1995 		cfg.func_num, cfg.valid);
1996 	cfg.page_num = 1;
1997 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
1998 	printf("\n\tlog page func %d valid page 1 %d",
1999 		cfg.func_num, cfg.valid);
2000 
2001 	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
2002 	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2003 	printf("\n\thead value is 0x%0llx",
2004 		(long long)hdl.value);
2005 	printf("\n\thead index %d", hdl.bits.ldw.head);
2006 	printf("\n\tkick value is 0x%0llx",
2007 		(long long)kick.value);
2008 	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2009 
2010 	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2011 	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2012 	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2013 
2014 	(void) npi_txc_control(handle, OP_GET, &control);
2015 	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2016 	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2017 	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2018 
2019 	printf("\n\tTXC port control 0x%0llx",
2020 		(long long)control.value);
2021 	printf("\n\tTXC port bitmap 0x%x", bitmap);
2022 	printf("\n\tTXC max burst %d", burst);
2023 	printf("\n\tTXC bytes xmt %d\n", bytes);
2024 
2025 	{
2026 		ipp_status_t status;
2027 
2028 		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2029 #if defined(__i386)
2030 		printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2031 #else
2032 		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2033 #endif
2034 	}
2035 }
2036 
2037 /*
2038  * nxge_tdc_hvio_setup
2039  *
2040  *	I'm not exactly sure what this code does.
2041  *
2042  * Arguments:
2043  * 	nxgep
2044  * 	channel	The channel to map.
2045  *
2046  * Notes:
2047  *
2048  * NPI/NXGE function calls:
2049  *	na
2050  *
2051  * Context:
2052  *	Service domain?
2053  */
2054 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2055 static void
2056 nxge_tdc_hvio_setup(
2057 	nxge_t *nxgep, int channel)
2058 {
2059 	nxge_dma_common_t	*data;
2060 	nxge_dma_common_t	*control;
2061 	tx_ring_t 		*ring;
2062 
2063 	ring = nxgep->tx_rings->rings[channel];
2064 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2065 
2066 	ring->hv_set = B_FALSE;
2067 
2068 	ring->hv_tx_buf_base_ioaddr_pp =
2069 	    (uint64_t)data->orig_ioaddr_pp;
2070 	ring->hv_tx_buf_ioaddr_size =
2071 	    (uint64_t)data->orig_alength;
2072 
2073 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2074 		"hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2075 		"orig vatopa base io $%p orig_len 0x%llx (%d)",
2076 		ring->hv_tx_buf_base_ioaddr_pp,
2077 		ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2078 		data->ioaddr_pp, data->orig_vatopa,
2079 		data->orig_alength, data->orig_alength));
2080 
2081 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2082 
2083 	ring->hv_tx_cntl_base_ioaddr_pp =
2084 	    (uint64_t)control->orig_ioaddr_pp;
2085 	ring->hv_tx_cntl_ioaddr_size =
2086 	    (uint64_t)control->orig_alength;
2087 
2088 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2089 		"hv cntl base io $%p orig ioaddr_pp ($%p) "
2090 		"orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2091 		ring->hv_tx_cntl_base_ioaddr_pp,
2092 		control->orig_ioaddr_pp, control->orig_vatopa,
2093 		ring->hv_tx_cntl_ioaddr_size,
2094 		control->orig_alength, control->orig_alength));
2095 }
2096 #endif
2097 
2098 static nxge_status_t
2099 nxge_map_txdma(p_nxge_t nxgep, int channel)
2100 {
2101 	nxge_dma_common_t	**pData;
2102 	nxge_dma_common_t	**pControl;
2103 	tx_ring_t 		**pRing, *ring;
2104 	tx_mbox_t		**mailbox;
2105 	uint32_t		num_chunks;
2106 
2107 	nxge_status_t		status = NXGE_OK;
2108 
2109 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2110 
2111 	if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2112 		if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2113 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2114 			    "<== nxge_map_txdma: buf not allocated"));
2115 			return (NXGE_ERROR);
2116 		}
2117 	}
2118 
2119 	if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2120 		return (NXGE_ERROR);
2121 
2122 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2123 	pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2124 	pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2125 	pRing = &nxgep->tx_rings->rings[channel];
2126 	mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2127 
2128 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2129 		"tx_rings $%p tx_desc_rings $%p",
2130 		nxgep->tx_rings, nxgep->tx_rings->rings));
2131 
2132 	/*
2133 	 * Map descriptors from the buffer pools for <channel>.
2134 	 */
2135 
2136 	/*
2137 	 * Set up and prepare buffer blocks, descriptors
2138 	 * and mailbox.
2139 	 */
2140 	status = nxge_map_txdma_channel(nxgep, channel,
2141 	    pData, pRing, num_chunks, pControl, mailbox);
2142 	if (status != NXGE_OK) {
2143 		NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2144 			"==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2145 			"returned 0x%x",
2146 			nxgep, channel, status));
2147 		return (status);
2148 	}
2149 
2150 	ring = *pRing;
2151 
2152 	ring->index = (uint16_t)channel;
2153 	ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2154 
2155 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2156 	if (isLDOMguest(nxgep)) {
2157 		(void) nxge_tdc_lp_conf(nxgep, channel);
2158 	} else {
2159 		nxge_tdc_hvio_setup(nxgep, channel);
2160 	}
2161 #endif
2162 
2163 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2164 	    "(status 0x%x channel %d)", status, channel));
2165 
2166 	return (status);
2167 }
2168 
2169 static nxge_status_t
2170 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2171 	p_nxge_dma_common_t *dma_buf_p,
2172 	p_tx_ring_t *tx_desc_p,
2173 	uint32_t num_chunks,
2174 	p_nxge_dma_common_t *dma_cntl_p,
2175 	p_tx_mbox_t *tx_mbox_p)
2176 {
2177 	int	status = NXGE_OK;
2178 
2179 	/*
2180 	 * Set up and prepare buffer blocks, descriptors
2181 	 * and mailbox.
2182 	 */
2183 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2184 		"==> nxge_map_txdma_channel (channel %d)", channel));
2185 	/*
2186 	 * Transmit buffer blocks
2187 	 */
2188 	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2189 			dma_buf_p, tx_desc_p, num_chunks);
2190 	if (status != NXGE_OK) {
2191 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2192 			"==> nxge_map_txdma_channel (channel %d): "
2193 			"map buffer failed 0x%x", channel, status));
2194 		goto nxge_map_txdma_channel_exit;
2195 	}
2196 
2197 	/*
2198 	 * Transmit block ring, and mailbox.
2199 	 */
2200 	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2201 					tx_mbox_p);
2202 
2203 	goto nxge_map_txdma_channel_exit;
2204 
2205 nxge_map_txdma_channel_fail1:
2206 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2207 		"==> nxge_map_txdma_channel: unmap buf"
2208 		"(status 0x%x channel %d)",
2209 		status, channel));
2210 	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2211 
2212 nxge_map_txdma_channel_exit:
2213 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2214 		"<== nxge_map_txdma_channel: "
2215 		"(status 0x%x channel %d)",
2216 		status, channel));
2217 
2218 	return (status);
2219 }
2220 
2221 /*ARGSUSED*/
2222 static void
2223 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2224 {
2225 	tx_ring_t *ring;
2226 	tx_mbox_t *mailbox;
2227 
2228 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2229 		"==> nxge_unmap_txdma_channel (channel %d)", channel));
2230 	/*
2231 	 * unmap tx block ring, and mailbox.
2232 	 */
2233 	ring = nxgep->tx_rings->rings[channel];
2234 	mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2235 
2236 	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2237 
2238 	/* unmap buffer blocks */
2239 	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2240 
2241 	nxge_free_txb(nxgep, channel);
2242 
2243 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2244 }
2245 
2246 /*
2247  * nxge_map_txdma_channel_cfg_ring
2248  *
2249  *	Map a TDC into our kernel space.
2250  *	This function allocates all of the per-channel data structures.
2251  *
2252  * Arguments:
2253  * 	nxgep
2254  * 	dma_channel	The channel to map.
2255  *	dma_cntl_p
2256  *	tx_ring_p	dma_channel's transmit ring
2257  *	tx_mbox_p	dma_channel's mailbox
2258  *
2259  * Notes:
2260  *
2261  * NPI/NXGE function calls:
2262  *	nxge_setup_dma_common()
2263  *
2264  * Registers accessed:
2265  *	none.
2266  *
2267  * Context:
2268  *	Any domain
2269  */
2270 /*ARGSUSED*/
2271 static void
2272 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2273 	p_nxge_dma_common_t *dma_cntl_p,
2274 	p_tx_ring_t tx_ring_p,
2275 	p_tx_mbox_t *tx_mbox_p)
2276 {
2277 	p_tx_mbox_t 		mboxp;
2278 	p_nxge_dma_common_t 	cntl_dmap;
2279 	p_nxge_dma_common_t 	dmap;
2280 	p_tx_rng_cfig_t		tx_ring_cfig_p;
2281 	p_tx_ring_kick_t	tx_ring_kick_p;
2282 	p_tx_cs_t		tx_cs_p;
2283 	p_tx_dma_ent_msk_t	tx_evmask_p;
2284 	p_txdma_mbh_t		mboxh_p;
2285 	p_txdma_mbl_t		mboxl_p;
2286 	uint64_t		tx_desc_len;
2287 
2288 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2289 		"==> nxge_map_txdma_channel_cfg_ring"));
2290 
2291 	cntl_dmap = *dma_cntl_p;
2292 
2293 	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2294 	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2295 			sizeof (tx_desc_t));
2296 	/*
2297 	 * Zero out transmit ring descriptors.
2298 	 */
2299 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2300 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2301 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2302 	tx_cs_p = &(tx_ring_p->tx_cs);
2303 	tx_evmask_p = &(tx_ring_p->tx_evmask);
2304 	tx_ring_cfig_p->value = 0;
2305 	tx_ring_kick_p->value = 0;
2306 	tx_cs_p->value = 0;
2307 	tx_evmask_p->value = 0;
2308 
2309 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2310 		"==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2311 		dma_channel,
2312 		dmap->dma_cookie.dmac_laddress));
2313 
2314 	tx_ring_cfig_p->value = 0;
2315 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2316 	tx_ring_cfig_p->value =
2317 		(dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2318 		(tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2319 
2320 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2321 		"==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2322 		dma_channel,
2323 		tx_ring_cfig_p->value));
2324 
2325 	tx_cs_p->bits.ldw.rst = 1;
2326 
2327 	/* Map in mailbox */
2328 	mboxp = (p_tx_mbox_t)
2329 		KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2330 	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2331 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2332 	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2333 	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2334 	mboxh_p->value = mboxl_p->value = 0;
2335 
2336 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2337 		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2338 		dmap->dma_cookie.dmac_laddress));
2339 
2340 	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2341 				TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2342 
2343 	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2344 				TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2345 
2346 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2347 		"==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2348 		dmap->dma_cookie.dmac_laddress));
2349 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2350 		"==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2351 		"mbox $%p",
2352 		mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2353 	tx_ring_p->page_valid.value = 0;
2354 	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2355 	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2356 	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2357 	tx_ring_p->page_hdl.value = 0;
2358 
2359 	tx_ring_p->page_valid.bits.ldw.page0 = 1;
2360 	tx_ring_p->page_valid.bits.ldw.page1 = 1;
2361 
2362 	tx_ring_p->max_burst.value = 0;
2363 	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2364 
2365 	*tx_mbox_p = mboxp;
2366 
2367 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2368 				"<== nxge_map_txdma_channel_cfg_ring"));
2369 }
2370 
2371 /*ARGSUSED*/
2372 static void
2373 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2374 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2375 {
2376 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2377 		"==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2378 		tx_ring_p->tdc));
2379 
2380 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2381 
2382 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2383 		"<== nxge_unmap_txdma_channel_cfg_ring"));
2384 }
2385 
2386 /*
2387  * nxge_map_txdma_channel_buf_ring
2388  *
2389  *
2390  * Arguments:
2391  * 	nxgep
2392  * 	channel		The channel to map.
2393  *	dma_buf_p
2394  *	tx_desc_p	channel's descriptor ring
2395  *	num_chunks
2396  *
2397  * Notes:
2398  *
2399  * NPI/NXGE function calls:
2400  *	nxge_setup_dma_common()
2401  *
2402  * Registers accessed:
2403  *	none.
2404  *
2405  * Context:
2406  *	Any domain
2407  */
2408 static nxge_status_t
2409 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2410 	p_nxge_dma_common_t *dma_buf_p,
2411 	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2412 {
2413 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
2414 	p_nxge_dma_common_t 	dmap;
2415 	nxge_os_dma_handle_t	tx_buf_dma_handle;
2416 	p_tx_ring_t 		tx_ring_p;
2417 	p_tx_msg_t 		tx_msg_ring;
2418 	nxge_status_t		status = NXGE_OK;
2419 	int			ddi_status = DDI_SUCCESS;
2420 	int			i, j, index;
2421 	uint32_t		size, bsize;
2422 	uint32_t 		nblocks, nmsgs;
2423 
2424 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2425 		"==> nxge_map_txdma_channel_buf_ring"));
2426 
2427 	dma_bufp = tmp_bufp = *dma_buf_p;
2428 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2429 		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2430 		"chunks bufp $%p",
2431 		channel, num_chunks, dma_bufp));
2432 
2433 	nmsgs = 0;
2434 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2435 		nmsgs += tmp_bufp->nblocks;
2436 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2437 			"==> nxge_map_txdma_channel_buf_ring: channel %d "
2438 			"bufp $%p nblocks %d nmsgs %d",
2439 			channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2440 	}
2441 	if (!nmsgs) {
2442 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2443 			"<== nxge_map_txdma_channel_buf_ring: channel %d "
2444 			"no msg blocks",
2445 			channel));
2446 		status = NXGE_ERROR;
2447 		goto nxge_map_txdma_channel_buf_ring_exit;
2448 	}
2449 
2450 	tx_ring_p = (p_tx_ring_t)
2451 		KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2452 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2453 		(void *)nxgep->interrupt_cookie);
2454 
2455 	tx_ring_p->nxgep = nxgep;
2456 	tx_ring_p->serial = nxge_serialize_create(nmsgs,
2457 				nxge_serial_tx, tx_ring_p);
2458 	/*
2459 	 * Allocate transmit message rings and handles for packets
2460 	 * not to be copied to premapped buffers.
2461 	 */
2462 	size = nmsgs * sizeof (tx_msg_t);
2463 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2464 	for (i = 0; i < nmsgs; i++) {
2465 		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2466 				DDI_DMA_DONTWAIT, 0,
2467 				&tx_msg_ring[i].dma_handle);
2468 		if (ddi_status != DDI_SUCCESS) {
2469 			status |= NXGE_DDI_FAILED;
2470 			break;
2471 		}
2472 	}
2473 	if (i < nmsgs) {
2474 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2475 		    "Allocate handles failed."));
2476 		goto nxge_map_txdma_channel_buf_ring_fail1;
2477 	}
2478 
2479 	tx_ring_p->tdc = channel;
2480 	tx_ring_p->tx_msg_ring = tx_msg_ring;
2481 	tx_ring_p->tx_ring_size = nmsgs;
2482 	tx_ring_p->num_chunks = num_chunks;
2483 	if (!nxge_tx_intr_thres) {
2484 		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2485 	}
2486 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2487 	tx_ring_p->rd_index = 0;
2488 	tx_ring_p->wr_index = 0;
2489 	tx_ring_p->ring_head.value = 0;
2490 	tx_ring_p->ring_kick_tail.value = 0;
2491 	tx_ring_p->descs_pending = 0;
2492 
2493 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2494 		"==> nxge_map_txdma_channel_buf_ring: channel %d "
2495 		"actual tx desc max %d nmsgs %d "
2496 		"(config nxge_tx_ring_size %d)",
2497 		channel, tx_ring_p->tx_ring_size, nmsgs,
2498 		nxge_tx_ring_size));
2499 
2500 	/*
2501 	 * Map in buffers from the buffer pool.
2502 	 */
2503 	index = 0;
2504 	bsize = dma_bufp->block_size;
2505 
2506 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2507 		"dma_bufp $%p tx_rng_p $%p "
2508 		"tx_msg_rng_p $%p bsize %d",
2509 		dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2510 
2511 	tx_buf_dma_handle = dma_bufp->dma_handle;
2512 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
2513 		bsize = dma_bufp->block_size;
2514 		nblocks = dma_bufp->nblocks;
2515 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2516 			"==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2517 			"size %d dma_bufp $%p",
2518 			i, sizeof (nxge_dma_common_t), dma_bufp));
2519 
2520 		for (j = 0; j < nblocks; j++) {
2521 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2522 			dmap = &tx_msg_ring[index++].buf_dma;
2523 #ifdef TX_MEM_DEBUG
2524 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2525 				"==> nxge_map_txdma_channel_buf_ring: j %d"
2526 				"dmap $%p", i, dmap));
2527 #endif
2528 			nxge_setup_dma_common(dmap, dma_bufp, 1,
2529 				bsize);
2530 		}
2531 	}
2532 
2533 	if (i < num_chunks) {
2534 		status = NXGE_ERROR;
2535 		goto nxge_map_txdma_channel_buf_ring_fail1;
2536 	}
2537 
2538 	*tx_desc_p = tx_ring_p;
2539 
2540 	goto nxge_map_txdma_channel_buf_ring_exit;
2541 
2542 nxge_map_txdma_channel_buf_ring_fail1:
2543 	if (tx_ring_p->serial) {
2544 		nxge_serialize_destroy(tx_ring_p->serial);
2545 		tx_ring_p->serial = NULL;
2546 	}
2547 
2548 	index--;
2549 	for (; index >= 0; index--) {
2550 		if (tx_msg_ring[index].dma_handle != NULL) {
2551 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2552 		}
2553 	}
2554 	MUTEX_DESTROY(&tx_ring_p->lock);
2555 	KMEM_FREE(tx_msg_ring, size);
2556 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2557 
2558 	status = NXGE_ERROR;
2559 
2560 nxge_map_txdma_channel_buf_ring_exit:
2561 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2562 		"<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2563 
2564 	return (status);
2565 }
2566 
2567 /*ARGSUSED*/
2568 static void
2569 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2570 {
2571 	p_tx_msg_t 		tx_msg_ring;
2572 	p_tx_msg_t 		tx_msg_p;
2573 	int			i;
2574 
2575 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2576 		"==> nxge_unmap_txdma_channel_buf_ring"));
2577 	if (tx_ring_p == NULL) {
2578 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2579 			"<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2580 		return;
2581 	}
2582 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2583 		"==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2584 		tx_ring_p->tdc));
2585 
2586 	tx_msg_ring = tx_ring_p->tx_msg_ring;
2587 
2588 	/*
2589 	 * Since the serialization thread, timer thread and
2590 	 * interrupt thread can all call the transmit reclaim,
2591 	 * the unmapping function needs to acquire the lock
2592 	 * to free those buffers which were transmitted
2593 	 * by the hardware already.
2594 	 */
2595 	MUTEX_ENTER(&tx_ring_p->lock);
2596 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
2597 	    "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2598 	    "channel %d",
2599 	    tx_ring_p->tdc));
2600 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2601 
2602 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2603 		tx_msg_p = &tx_msg_ring[i];
2604 		if (tx_msg_p->tx_message != NULL) {
2605 			freemsg(tx_msg_p->tx_message);
2606 			tx_msg_p->tx_message = NULL;
2607 		}
2608 	}
2609 
2610 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2611 		if (tx_msg_ring[i].dma_handle != NULL) {
2612 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2613 		}
2614 		tx_msg_ring[i].dma_handle = NULL;
2615 	}
2616 
2617 	MUTEX_EXIT(&tx_ring_p->lock);
2618 
2619 	if (tx_ring_p->serial) {
2620 		nxge_serialize_destroy(tx_ring_p->serial);
2621 		tx_ring_p->serial = NULL;
2622 	}
2623 
2624 	MUTEX_DESTROY(&tx_ring_p->lock);
2625 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2626 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2627 
2628 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2629 		"<== nxge_unmap_txdma_channel_buf_ring"));
2630 }
2631 
2632 static nxge_status_t
2633 nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2634 {
2635 	p_tx_rings_t 		tx_rings;
2636 	p_tx_ring_t 		*tx_desc_rings;
2637 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
2638 	p_tx_mbox_t		*tx_mbox_p;
2639 	nxge_status_t		status = NXGE_OK;
2640 
2641 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2642 
2643 	tx_rings = nxgep->tx_rings;
2644 	if (tx_rings == NULL) {
2645 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2646 			"<== nxge_txdma_hw_start: NULL ring pointer"));
2647 		return (NXGE_ERROR);
2648 	}
2649 	tx_desc_rings = tx_rings->rings;
2650 	if (tx_desc_rings == NULL) {
2651 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2652 			"<== nxge_txdma_hw_start: NULL ring pointers"));
2653 		return (NXGE_ERROR);
2654 	}
2655 
2656 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2657 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2658 
2659 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2660 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2661 
2662 	status = nxge_txdma_start_channel(nxgep, channel,
2663 	    (p_tx_ring_t)tx_desc_rings[channel],
2664 	    (p_tx_mbox_t)tx_mbox_p[channel]);
2665 	if (status != NXGE_OK) {
2666 		goto nxge_txdma_hw_start_fail1;
2667 	}
2668 
2669 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2670 		"tx_rings $%p rings $%p",
2671 		nxgep->tx_rings, nxgep->tx_rings->rings));
2672 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2673 		"tx_rings $%p tx_desc_rings $%p",
2674 		nxgep->tx_rings, tx_desc_rings));
2675 
2676 	goto nxge_txdma_hw_start_exit;
2677 
2678 nxge_txdma_hw_start_fail1:
2679 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2680 		"==> nxge_txdma_hw_start: disable "
2681 		"(status 0x%x channel %d)", status, channel));
2682 
2683 nxge_txdma_hw_start_exit:
2684 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2685 		"==> nxge_txdma_hw_start: (status 0x%x)", status));
2686 
2687 	return (status);
2688 }
2689 
2690 /*
2691  * nxge_txdma_start_channel
2692  *
2693  *	Start a TDC.
2694  *
2695  * Arguments:
2696  * 	nxgep
2697  * 	channel		The channel to start.
2698  * 	tx_ring_p	channel's transmit descriptor ring.
2699  * 	tx_mbox_p	channel' smailbox.
2700  *
2701  * Notes:
2702  *
2703  * NPI/NXGE function calls:
2704  *	nxge_reset_txdma_channel()
2705  *	nxge_init_txdma_channel_event_mask()
2706  *	nxge_enable_txdma_channel()
2707  *
2708  * Registers accessed:
2709  *	none directly (see functions above).
2710  *
2711  * Context:
2712  *	Any domain
2713  */
2714 static nxge_status_t
2715 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2716     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2717 
2718 {
2719 	nxge_status_t		status = NXGE_OK;
2720 
2721 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2722 		"==> nxge_txdma_start_channel (channel %d)", channel));
2723 	/*
2724 	 * TXDMA/TXC must be in stopped state.
2725 	 */
2726 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
2727 
2728 	/*
2729 	 * Reset TXDMA channel
2730 	 */
2731 	tx_ring_p->tx_cs.value = 0;
2732 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
2733 	status = nxge_reset_txdma_channel(nxgep, channel,
2734 			tx_ring_p->tx_cs.value);
2735 	if (status != NXGE_OK) {
2736 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2737 			"==> nxge_txdma_start_channel (channel %d)"
2738 			" reset channel failed 0x%x", channel, status));
2739 		goto nxge_txdma_start_channel_exit;
2740 	}
2741 
2742 	/*
2743 	 * Initialize the TXDMA channel specific FZC control
2744 	 * configurations. These FZC registers are pertaining
2745 	 * to each TX channel (i.e. logical pages).
2746 	 */
2747 	if (!isLDOMguest(nxgep)) {
2748 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
2749 		    tx_ring_p, tx_mbox_p);
2750 		if (status != NXGE_OK) {
2751 			goto nxge_txdma_start_channel_exit;
2752 		}
2753 	}
2754 
2755 	/*
2756 	 * Initialize the event masks.
2757 	 */
2758 	tx_ring_p->tx_evmask.value = 0;
2759 	status = nxge_init_txdma_channel_event_mask(nxgep,
2760 	    channel, &tx_ring_p->tx_evmask);
2761 	if (status != NXGE_OK) {
2762 		goto nxge_txdma_start_channel_exit;
2763 	}
2764 
2765 	/*
2766 	 * Load TXDMA descriptors, buffers, mailbox,
2767 	 * initialise the DMA channels and
2768 	 * enable each DMA channel.
2769 	 */
2770 	status = nxge_enable_txdma_channel(nxgep, channel,
2771 			tx_ring_p, tx_mbox_p);
2772 	if (status != NXGE_OK) {
2773 		goto nxge_txdma_start_channel_exit;
2774 	}
2775 
2776 nxge_txdma_start_channel_exit:
2777 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2778 
2779 	return (status);
2780 }
2781 
2782 /*
2783  * nxge_txdma_stop_channel
2784  *
2785  *	Stop a TDC.
2786  *
2787  * Arguments:
2788  * 	nxgep
2789  * 	channel		The channel to stop.
2790  * 	tx_ring_p	channel's transmit descriptor ring.
2791  * 	tx_mbox_p	channel' smailbox.
2792  *
2793  * Notes:
2794  *
2795  * NPI/NXGE function calls:
2796  *	nxge_txdma_stop_inj_err()
2797  *	nxge_reset_txdma_channel()
2798  *	nxge_init_txdma_channel_event_mask()
2799  *	nxge_init_txdma_channel_cntl_stat()
2800  *	nxge_disable_txdma_channel()
2801  *
2802  * Registers accessed:
2803  *	none directly (see functions above).
2804  *
2805  * Context:
2806  *	Any domain
2807  */
2808 /*ARGSUSED*/
2809 static nxge_status_t
2810 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2811 {
2812 	p_tx_ring_t tx_ring_p;
2813 	int status = NXGE_OK;
2814 
2815 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2816 		"==> nxge_txdma_stop_channel: channel %d", channel));
2817 
2818 	/*
2819 	 * Stop (disable) TXDMA and TXC (if stop bit is set
2820 	 * and STOP_N_GO bit not set, the TXDMA reset state will
2821 	 * not be set if reset TXDMA.
2822 	 */
2823 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
2824 
2825 	tx_ring_p = nxgep->tx_rings->rings[channel];
2826 
2827 	/*
2828 	 * Reset TXDMA channel
2829 	 */
2830 	tx_ring_p->tx_cs.value = 0;
2831 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
2832 	status = nxge_reset_txdma_channel(nxgep, channel,
2833 			tx_ring_p->tx_cs.value);
2834 	if (status != NXGE_OK) {
2835 		goto nxge_txdma_stop_channel_exit;
2836 	}
2837 
2838 #ifdef HARDWARE_REQUIRED
2839 	/* Set up the interrupt event masks. */
2840 	tx_ring_p->tx_evmask.value = 0;
2841 	status = nxge_init_txdma_channel_event_mask(nxgep,
2842 			channel, &tx_ring_p->tx_evmask);
2843 	if (status != NXGE_OK) {
2844 		goto nxge_txdma_stop_channel_exit;
2845 	}
2846 
2847 	/* Initialize the DMA control and status register */
2848 	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2849 	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2850 			tx_ring_p->tx_cs.value);
2851 	if (status != NXGE_OK) {
2852 		goto nxge_txdma_stop_channel_exit;
2853 	}
2854 
2855 	tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2856 
2857 	/* Disable channel */
2858 	status = nxge_disable_txdma_channel(nxgep, channel,
2859 	    tx_ring_p, tx_mbox_p);
2860 	if (status != NXGE_OK) {
2861 		goto nxge_txdma_start_channel_exit;
2862 	}
2863 
2864 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2865 		"==> nxge_txdma_stop_channel: event done"));
2866 
2867 #endif
2868 
2869 nxge_txdma_stop_channel_exit:
2870 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
2871 	return (status);
2872 }
2873 
2874 /*
2875  * nxge_txdma_get_ring
2876  *
2877  *	Get the ring for a TDC.
2878  *
2879  * Arguments:
2880  * 	nxgep
2881  * 	channel
2882  *
2883  * Notes:
2884  *
2885  * NPI/NXGE function calls:
2886  *
2887  * Registers accessed:
2888  *
2889  * Context:
2890  *	Any domain
2891  */
2892 static p_tx_ring_t
2893 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
2894 {
2895 	nxge_grp_set_t *set = &nxgep->tx_set;
2896 	int tdc;
2897 
2898 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
2899 
2900 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2901 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2902 		    "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
2903 		goto return_null;
2904 	}
2905 
2906 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2907 		if ((1 << tdc) & set->owned.map) {
2908 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2909 			if (ring) {
2910 				if (channel == ring->tdc) {
2911 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
2912 					    "<== nxge_txdma_get_ring: "
2913 					    "tdc %d ring $%p", tdc, ring));
2914 					return (ring);
2915 				}
2916 			}
2917 		}
2918 	}
2919 
2920 return_null:
2921 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
2922 		"ring not found"));
2923 
2924 	return (NULL);
2925 }
2926 
2927 /*
2928  * nxge_txdma_get_mbox
2929  *
2930  *	Get the mailbox for a TDC.
2931  *
2932  * Arguments:
2933  * 	nxgep
2934  * 	channel
2935  *
2936  * Notes:
2937  *
2938  * NPI/NXGE function calls:
2939  *
2940  * Registers accessed:
2941  *
2942  * Context:
2943  *	Any domain
2944  */
2945 static p_tx_mbox_t
2946 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
2947 {
2948 	nxge_grp_set_t *set = &nxgep->tx_set;
2949 	int tdc;
2950 
2951 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
2952 
2953 	if (nxgep->tx_mbox_areas_p == 0 ||
2954 	    nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
2955 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2956 		    "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
2957 		goto return_null;
2958 	}
2959 
2960 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2961 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2962 		    "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
2963 		goto return_null;
2964 	}
2965 
2966 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2967 		if ((1 << tdc) & set->owned.map) {
2968 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2969 			if (ring) {
2970 				if (channel == ring->tdc) {
2971 					tx_mbox_t *mailbox = nxgep->
2972 					    tx_mbox_areas_p->
2973 					    txmbox_areas_p[tdc];
2974 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
2975 					    "<== nxge_txdma_get_mbox: tdc %d "
2976 					    "ring $%p", tdc, mailbox));
2977 					return (mailbox);
2978 				}
2979 			}
2980 		}
2981 	}
2982 
2983 return_null:
2984 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
2985 		"mailbox not found"));
2986 
2987 	return (NULL);
2988 }
2989 
2990 /*
2991  * nxge_tx_err_evnts
2992  *
2993  *	Recover a TDC.
2994  *
2995  * Arguments:
2996  * 	nxgep
2997  * 	index	The index to the TDC ring.
2998  * 	ldvp	Used to get the channel number ONLY.
2999  * 	cs	A copy of the bits from TX_CS.
3000  *
3001  * Notes:
3002  *	Calling tree:
3003  *	 nxge_tx_intr()
3004  *
3005  * NPI/NXGE function calls:
3006  *	npi_txdma_ring_error_get()
3007  *	npi_txdma_inj_par_error_get()
3008  *	nxge_txdma_fatal_err_recover()
3009  *
3010  * Registers accessed:
3011  *	TX_RNG_ERR_LOGH	DMC+0x40048 Transmit Ring Error Log High
3012  *	TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3013  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3014  *
3015  * Context:
3016  *	Any domain	XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3017  */
3018 /*ARGSUSED*/
3019 static nxge_status_t
3020 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3021 {
3022 	npi_handle_t		handle;
3023 	npi_status_t		rs;
3024 	uint8_t			channel;
3025 	p_tx_ring_t 		*tx_rings;
3026 	p_tx_ring_t 		tx_ring_p;
3027 	p_nxge_tx_ring_stats_t	tdc_stats;
3028 	boolean_t		txchan_fatal = B_FALSE;
3029 	nxge_status_t		status = NXGE_OK;
3030 	tdmc_inj_par_err_t	par_err;
3031 	uint32_t		value;
3032 
3033 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3034 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3035 	channel = ldvp->channel;
3036 
3037 	tx_rings = nxgep->tx_rings->rings;
3038 	tx_ring_p = tx_rings[index];
3039 	tdc_stats = tx_ring_p->tdc_stats;
3040 	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3041 		(cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3042 		(cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3043 		if ((rs = npi_txdma_ring_error_get(handle, channel,
3044 					&tdc_stats->errlog)) != NPI_SUCCESS)
3045 			return (NXGE_ERROR | rs);
3046 	}
3047 
3048 	if (cs.bits.ldw.mbox_err) {
3049 		tdc_stats->mbox_err++;
3050 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3051 					NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3052 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3053 			"==> nxge_tx_err_evnts(channel %d): "
3054 			"fatal error: mailbox", channel));
3055 		txchan_fatal = B_TRUE;
3056 	}
3057 	if (cs.bits.ldw.pkt_size_err) {
3058 		tdc_stats->pkt_size_err++;
3059 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3060 					NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3061 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3062 			"==> nxge_tx_err_evnts(channel %d): "
3063 			"fatal error: pkt_size_err", channel));
3064 		txchan_fatal = B_TRUE;
3065 	}
3066 	if (cs.bits.ldw.tx_ring_oflow) {
3067 		tdc_stats->tx_ring_oflow++;
3068 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3069 					NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3070 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3071 			"==> nxge_tx_err_evnts(channel %d): "
3072 			"fatal error: tx_ring_oflow", channel));
3073 		txchan_fatal = B_TRUE;
3074 	}
3075 	if (cs.bits.ldw.pref_buf_par_err) {
3076 		tdc_stats->pre_buf_par_err++;
3077 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3078 					NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3079 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3080 			"==> nxge_tx_err_evnts(channel %d): "
3081 			"fatal error: pre_buf_par_err", channel));
3082 		/* Clear error injection source for parity error */
3083 		(void) npi_txdma_inj_par_error_get(handle, &value);
3084 		par_err.value = value;
3085 		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3086 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3087 		txchan_fatal = B_TRUE;
3088 	}
3089 	if (cs.bits.ldw.nack_pref) {
3090 		tdc_stats->nack_pref++;
3091 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3092 					NXGE_FM_EREPORT_TDMC_NACK_PREF);
3093 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3094 			"==> nxge_tx_err_evnts(channel %d): "
3095 			"fatal error: nack_pref", channel));
3096 		txchan_fatal = B_TRUE;
3097 	}
3098 	if (cs.bits.ldw.nack_pkt_rd) {
3099 		tdc_stats->nack_pkt_rd++;
3100 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3101 					NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3102 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3103 			"==> nxge_tx_err_evnts(channel %d): "
3104 			"fatal error: nack_pkt_rd", channel));
3105 		txchan_fatal = B_TRUE;
3106 	}
3107 	if (cs.bits.ldw.conf_part_err) {
3108 		tdc_stats->conf_part_err++;
3109 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3110 					NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3111 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3112 			"==> nxge_tx_err_evnts(channel %d): "
3113 			"fatal error: config_partition_err", channel));
3114 		txchan_fatal = B_TRUE;
3115 	}
3116 	if (cs.bits.ldw.pkt_prt_err) {
3117 		tdc_stats->pkt_part_err++;
3118 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3119 					NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3120 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3121 			"==> nxge_tx_err_evnts(channel %d): "
3122 			"fatal error: pkt_prt_err", channel));
3123 		txchan_fatal = B_TRUE;
3124 	}
3125 
3126 	/* Clear error injection source in case this is an injected error */
3127 	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3128 
3129 	if (txchan_fatal) {
3130 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3131 			" nxge_tx_err_evnts: "
3132 			" fatal error on channel %d cs 0x%llx\n",
3133 			channel, cs.value));
3134 		status = nxge_txdma_fatal_err_recover(nxgep, channel,
3135 								tx_ring_p);
3136 		if (status == NXGE_OK) {
3137 			FM_SERVICE_RESTORED(nxgep);
3138 		}
3139 	}
3140 
3141 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3142 
3143 	return (status);
3144 }
3145 
3146 static nxge_status_t
3147 nxge_txdma_fatal_err_recover(
3148 	p_nxge_t nxgep,
3149 	uint16_t channel,
3150 	p_tx_ring_t tx_ring_p)
3151 {
3152 	npi_handle_t	handle;
3153 	npi_status_t	rs = NPI_SUCCESS;
3154 	p_tx_mbox_t	tx_mbox_p;
3155 	nxge_status_t	status = NXGE_OK;
3156 
3157 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3158 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3159 			"Recovering from TxDMAChannel#%d error...", channel));
3160 
3161 	/*
3162 	 * Stop the dma channel waits for the stop done.
3163 	 * If the stop done bit is not set, then create
3164 	 * an error.
3165 	 */
3166 
3167 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3168 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3169 	MUTEX_ENTER(&tx_ring_p->lock);
3170 	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3171 	if (rs != NPI_SUCCESS) {
3172 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3173 			"==> nxge_txdma_fatal_err_recover (channel %d): "
3174 			"stop failed ", channel));
3175 		goto fail;
3176 	}
3177 
3178 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3179 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3180 
3181 	/*
3182 	 * Reset TXDMA channel
3183 	 */
3184 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3185 	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3186 						NPI_SUCCESS) {
3187 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3188 			"==> nxge_txdma_fatal_err_recover (channel %d)"
3189 			" reset channel failed 0x%x", channel, rs));
3190 		goto fail;
3191 	}
3192 
3193 	/*
3194 	 * Reset the tail (kick) register to 0.
3195 	 * (Hardware will not reset it. Tx overflow fatal
3196 	 * error if tail is not set to 0 after reset!
3197 	 */
3198 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3199 
3200 	/* Restart TXDMA channel */
3201 
3202 	if (!isLDOMguest(nxgep)) {
3203 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3204 
3205 		// XXX This is a problem in HIO!
3206 		/*
3207 		 * Initialize the TXDMA channel specific FZC control
3208 		 * configurations. These FZC registers are pertaining
3209 		 * to each TX channel (i.e. logical pages).
3210 		 */
3211 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3212 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
3213 		    tx_ring_p, tx_mbox_p);
3214 		if (status != NXGE_OK)
3215 			goto fail;
3216 	}
3217 
3218 	/*
3219 	 * Initialize the event masks.
3220 	 */
3221 	tx_ring_p->tx_evmask.value = 0;
3222 	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3223 							&tx_ring_p->tx_evmask);
3224 	if (status != NXGE_OK)
3225 		goto fail;
3226 
3227 	tx_ring_p->wr_index_wrap = B_FALSE;
3228 	tx_ring_p->wr_index = 0;
3229 	tx_ring_p->rd_index = 0;
3230 
3231 	/*
3232 	 * Load TXDMA descriptors, buffers, mailbox,
3233 	 * initialise the DMA channels and
3234 	 * enable each DMA channel.
3235 	 */
3236 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3237 	status = nxge_enable_txdma_channel(nxgep, channel,
3238 						tx_ring_p, tx_mbox_p);
3239 	MUTEX_EXIT(&tx_ring_p->lock);
3240 	if (status != NXGE_OK)
3241 		goto fail;
3242 
3243 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3244 			"Recovery Successful, TxDMAChannel#%d Restored",
3245 			channel));
3246 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3247 
3248 	return (NXGE_OK);
3249 
3250 fail:
3251 	MUTEX_EXIT(&tx_ring_p->lock);
3252 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
3253 		"nxge_txdma_fatal_err_recover (channel %d): "
3254 		"failed to recover this txdma channel", channel));
3255 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3256 
3257 	return (status);
3258 }
3259 
3260 /*
3261  * nxge_tx_port_fatal_err_recover
3262  *
3263  *	Attempt to recover from a fatal port error.
3264  *
3265  * Arguments:
3266  * 	nxgep
3267  *
3268  * Notes:
3269  *	How would a guest do this?
3270  *
3271  * NPI/NXGE function calls:
3272  *
3273  * Registers accessed:
3274  *
3275  * Context:
3276  *	Service domain
3277  */
3278 nxge_status_t
3279 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3280 {
3281 	nxge_grp_set_t *set = &nxgep->tx_set;
3282 	nxge_channel_t tdc;
3283 
3284 	tx_ring_t	*ring;
3285 	tx_mbox_t	*mailbox;
3286 
3287 	npi_handle_t	handle;
3288 	nxge_status_t	status;
3289 	npi_status_t	rs;
3290 
3291 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3292 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3293 	    "Recovering from TxPort error..."));
3294 
3295 	if (isLDOMguest(nxgep)) {
3296 		return (NXGE_OK);
3297 	}
3298 
3299 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3300 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3301 		    "<== nxge_tx_port_fatal_err_recover: not initialized"));
3302 		return (NXGE_ERROR);
3303 	}
3304 
3305 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3306 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3307 		    "<== nxge_tx_port_fatal_err_recover: "
3308 		    "NULL ring pointer(s)"));
3309 		return (NXGE_ERROR);
3310 	}
3311 
3312 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3313 		if ((1 << tdc) & set->owned.map) {
3314 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3315 			if (ring)
3316 				MUTEX_ENTER(&ring->lock);
3317 		}
3318 	}
3319 
3320 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3321 
3322 	/*
3323 	 * Stop all the TDCs owned by us.
3324 	 * (The shared TDCs will have been stopped by their owners.)
3325 	 */
3326 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3327 		if ((1 << tdc) & set->owned.map) {
3328 			ring = nxgep->tx_rings->rings[tdc];
3329 			if (ring) {
3330 				rs = npi_txdma_channel_control
3331 				    (handle, TXDMA_STOP, tdc);
3332 				if (rs != NPI_SUCCESS) {
3333 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3334 					    "nxge_tx_port_fatal_err_recover "
3335 					    "(channel %d): stop failed ", tdc));
3336 					goto fail;
3337 				}
3338 			}
3339 		}
3340 	}
3341 
3342 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3343 
3344 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3345 		if ((1 << tdc) & set->owned.map) {
3346 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3347 			if (ring)
3348 				(void) nxge_txdma_reclaim(nxgep, ring, 0);
3349 		}
3350 	}
3351 
3352 	/*
3353 	 * Reset all the TDCs.
3354 	 */
3355 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3356 
3357 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3358 		if ((1 << tdc) & set->owned.map) {
3359 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3360 			if (ring) {
3361 				if ((rs = npi_txdma_channel_control
3362 					(handle, TXDMA_RESET, tdc))
3363 				    != NPI_SUCCESS) {
3364 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3365 					    "nxge_tx_port_fatal_err_recover "
3366 					    "(channel %d) reset channel "
3367 					    "failed 0x%x", tdc, rs));
3368 					goto fail;
3369 				}
3370 			}
3371 			/*
3372 			 * Reset the tail (kick) register to 0.
3373 			 * (Hardware will not reset it. Tx overflow fatal
3374 			 * error if tail is not set to 0 after reset!
3375 			 */
3376 			TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3377 		}
3378 	}
3379 
3380 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3381 
3382 	/* Restart all the TDCs */
3383 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3384 		if ((1 << tdc) & set->owned.map) {
3385 			ring = nxgep->tx_rings->rings[tdc];
3386 			if (ring) {
3387 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3388 				status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3389 				    ring, mailbox);
3390 				ring->tx_evmask.value = 0;
3391 				/*
3392 				 * Initialize the event masks.
3393 				 */
3394 				status = nxge_init_txdma_channel_event_mask
3395 				    (nxgep, tdc, &ring->tx_evmask);
3396 
3397 				ring->wr_index_wrap = B_FALSE;
3398 				ring->wr_index = 0;
3399 				ring->rd_index = 0;
3400 
3401 				if (status != NXGE_OK)
3402 					goto fail;
3403 				if (status != NXGE_OK)
3404 					goto fail;
3405 			}
3406 		}
3407 	}
3408 
3409 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3410 
3411 	/* Re-enable all the TDCs */
3412 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3413 		if ((1 << tdc) & set->owned.map) {
3414 			ring = nxgep->tx_rings->rings[tdc];
3415 			if (ring) {
3416 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3417 				status = nxge_enable_txdma_channel(nxgep, tdc,
3418 				    ring, mailbox);
3419 				if (status != NXGE_OK)
3420 					goto fail;
3421 			}
3422 		}
3423 	}
3424 
3425 	/*
3426 	 * Unlock all the TDCs.
3427 	 */
3428 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3429 		if ((1 << tdc) & set->owned.map) {
3430 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3431 			if (ring)
3432 				MUTEX_EXIT(&ring->lock);
3433 		}
3434 	}
3435 
3436 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3437 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3438 
3439 	return (NXGE_OK);
3440 
3441 fail:
3442 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3443 		if ((1 << tdc) & set->owned.map) {
3444 			ring = nxgep->tx_rings->rings[tdc];
3445 			if (ring)
3446 				MUTEX_EXIT(&ring->lock);
3447 		}
3448 	}
3449 
3450 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3451 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3452 
3453 	return (status);
3454 }
3455 
3456 /*
3457  * nxge_txdma_inject_err
3458  *
3459  *	Inject an error into a TDC.
3460  *
3461  * Arguments:
3462  * 	nxgep
3463  * 	err_id	The error to inject.
3464  * 	chan	The channel to inject into.
3465  *
3466  * Notes:
3467  *	This is called from nxge_main.c:nxge_err_inject()
3468  *	Has this ioctl ever been used?
3469  *
3470  * NPI/NXGE function calls:
3471  *	npi_txdma_inj_par_error_get()
3472  *	npi_txdma_inj_par_error_set()
3473  *
3474  * Registers accessed:
3475  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3476  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3477  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3478  *
3479  * Context:
3480  *	Service domain
3481  */
3482 void
3483 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3484 {
3485 	tdmc_intr_dbg_t		tdi;
3486 	tdmc_inj_par_err_t	par_err;
3487 	uint32_t		value;
3488 	npi_handle_t		handle;
3489 
3490 	switch (err_id) {
3491 
3492 	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3493 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
3494 		/* Clear error injection source for parity error */
3495 		(void) npi_txdma_inj_par_error_get(handle, &value);
3496 		par_err.value = value;
3497 		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3498 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3499 
3500 		par_err.bits.ldw.inject_parity_error = (1 << chan);
3501 		(void) npi_txdma_inj_par_error_get(handle, &value);
3502 		par_err.value = value;
3503 		par_err.bits.ldw.inject_parity_error |= (1 << chan);
3504 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3505 				(unsigned long long)par_err.value);
3506 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3507 		break;
3508 
3509 	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3510 	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3511 	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3512 	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3513 	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3514 	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3515 	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3516 		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3517 			chan, &tdi.value);
3518 		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3519 			tdi.bits.ldw.pref_buf_par_err = 1;
3520 		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3521 			tdi.bits.ldw.mbox_err = 1;
3522 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3523 			tdi.bits.ldw.nack_pref = 1;
3524 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3525 			tdi.bits.ldw.nack_pkt_rd = 1;
3526 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3527 			tdi.bits.ldw.pkt_size_err = 1;
3528 		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3529 			tdi.bits.ldw.tx_ring_oflow = 1;
3530 		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3531 			tdi.bits.ldw.conf_part_err = 1;
3532 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3533 			tdi.bits.ldw.pkt_part_err = 1;
3534 #if defined(__i386)
3535 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3536 				tdi.value);
3537 #else
3538 		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3539 				tdi.value);
3540 #endif
3541 		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3542 			chan, tdi.value);
3543 
3544 		break;
3545 	}
3546 }
3547