xref: /titanic_44/usr/src/uts/common/io/nxge/nxge_txdma.c (revision bda1f129971950880940a17bab0bf096d5744b0c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_txdma.h>
29 #include <sys/nxge/nxge_hio.h>
30 #include <npi_tx_rd64.h>
31 #include <npi_tx_wr64.h>
32 #include <sys/llc1.h>
33 
34 uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
35 uint32_t	nxge_tx_minfree = 64;
36 uint32_t	nxge_tx_intr_thres = 0;
37 uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
38 uint32_t	nxge_tx_tiny_pack = 1;
39 uint32_t	nxge_tx_use_bcopy = 1;
40 
41 extern uint32_t 	nxge_tx_ring_size;
42 extern uint32_t 	nxge_bcopy_thresh;
43 extern uint32_t 	nxge_dvma_thresh;
44 extern uint32_t 	nxge_dma_stream_thresh;
45 extern dma_method_t 	nxge_force_dma;
46 extern uint32_t		nxge_cksum_offload;
47 
48 /* Device register access attributes for PIO.  */
49 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
50 /* Device descriptor access attributes for DMA.  */
51 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
52 /* Device buffer access attributes for DMA.  */
53 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
54 extern ddi_dma_attr_t nxge_desc_dma_attr;
55 extern ddi_dma_attr_t nxge_tx_dma_attr;
56 
57 extern void nxge_tx_ring_task(void *arg);
58 
59 static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60 
61 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
62 
63 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
64 	p_nxge_dma_common_t *, p_tx_ring_t *,
65 	uint32_t, p_nxge_dma_common_t *,
66 	p_tx_mbox_t *);
67 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
68 
69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
70 	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
72 
73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
74 	p_nxge_dma_common_t *, p_tx_ring_t,
75 	p_tx_mbox_t *);
76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
77 	p_tx_ring_t, p_tx_mbox_t);
78 
79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
80     p_tx_ring_t, p_tx_mbox_t);
81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
82 
83 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
84 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
85 	p_nxge_ldv_t, tx_cs_t);
86 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
87 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
88 	uint16_t, p_tx_ring_t);
89 
90 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91     p_tx_ring_t ring_p, uint16_t channel);
92 
93 nxge_status_t
94 nxge_init_txdma_channels(p_nxge_t nxgep)
95 {
96 	nxge_grp_set_t	*set = &nxgep->tx_set;
97 	int		i, tdc, count;
98 	nxge_grp_t	*group;
99 	dc_map_t	map;
100 	int		dev_gindex;
101 
102 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
103 
104 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
105 		if ((1 << i) & set->lg.map) {
106 			group = set->group[i];
107 			dev_gindex =
108 			    nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
109 			map = nxgep->pt_config.tdc_grps[dev_gindex].map;
110 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
111 				if ((1 << tdc) & map) {
112 					if ((nxge_grp_dc_add(nxgep,
113 					    group, VP_BOUND_TX, tdc)))
114 						goto init_txdma_channels_exit;
115 				}
116 			}
117 		}
118 		if (++count == set->lg.count)
119 			break;
120 	}
121 
122 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
123 	return (NXGE_OK);
124 
125 init_txdma_channels_exit:
126 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
127 		if ((1 << i) & set->lg.map) {
128 			group = set->group[i];
129 			dev_gindex =
130 			    nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
131 			map = nxgep->pt_config.tdc_grps[dev_gindex].map;
132 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
133 				if ((1 << tdc) & map) {
134 					nxge_grp_dc_remove(nxgep,
135 					    VP_BOUND_TX, tdc);
136 				}
137 			}
138 		}
139 		if (++count == set->lg.count)
140 			break;
141 	}
142 
143 	return (NXGE_ERROR);
144 
145 }
146 
147 nxge_status_t
148 nxge_init_txdma_channel(
149 	p_nxge_t nxge,
150 	int channel)
151 {
152 	nxge_status_t status;
153 
154 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
155 
156 	status = nxge_map_txdma(nxge, channel);
157 	if (status != NXGE_OK) {
158 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
159 		    "<== nxge_init_txdma_channel: status 0x%x", status));
160 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
161 		return (status);
162 	}
163 
164 	status = nxge_txdma_hw_start(nxge, channel);
165 	if (status != NXGE_OK) {
166 		(void) nxge_unmap_txdma_channel(nxge, channel);
167 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
168 		return (status);
169 	}
170 
171 	if (!nxge->statsp->tdc_ksp[channel])
172 		nxge_setup_tdc_kstats(nxge, channel);
173 
174 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
175 
176 	return (status);
177 }
178 
179 void
180 nxge_uninit_txdma_channels(p_nxge_t nxgep)
181 {
182 	nxge_grp_set_t *set = &nxgep->tx_set;
183 	int tdc;
184 
185 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
186 
187 	if (set->owned.map == 0) {
188 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
189 		    "nxge_uninit_txdma_channels: no channels"));
190 		return;
191 	}
192 
193 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
194 		if ((1 << tdc) & set->owned.map) {
195 			nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
196 		}
197 	}
198 
199 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
200 }
201 
202 void
203 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
204 {
205 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
206 
207 	if (nxgep->statsp->tdc_ksp[channel]) {
208 		kstat_delete(nxgep->statsp->tdc_ksp[channel]);
209 		nxgep->statsp->tdc_ksp[channel] = 0;
210 	}
211 
212 	(void) nxge_txdma_stop_channel(nxgep, channel);
213 	nxge_unmap_txdma_channel(nxgep, channel);
214 
215 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
216 	    "<== nxge_uninit_txdma_channel"));
217 }
218 
219 void
220 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
221 	uint32_t entries, uint32_t size)
222 {
223 	size_t		tsize;
224 	*dest_p = *src_p;
225 	tsize = size * entries;
226 	dest_p->alength = tsize;
227 	dest_p->nblocks = entries;
228 	dest_p->block_size = size;
229 	dest_p->offset += tsize;
230 
231 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
232 	src_p->alength -= tsize;
233 	src_p->dma_cookie.dmac_laddress += tsize;
234 	src_p->dma_cookie.dmac_size -= tsize;
235 }
236 
237 /*
238  * nxge_reset_txdma_channel
239  *
240  *	Reset a TDC.
241  *
242  * Arguments:
243  * 	nxgep
244  * 	channel		The channel to reset.
245  * 	reg_data	The current TX_CS.
246  *
247  * Notes:
248  *
249  * NPI/NXGE function calls:
250  *	npi_txdma_channel_reset()
251  *	npi_txdma_channel_control()
252  *
253  * Registers accessed:
254  *	TX_CS		DMC+0x40028 Transmit Control And Status
255  *	TX_RING_KICK	DMC+0x40018 Transmit Ring Kick
256  *
257  * Context:
258  *	Any domain
259  */
260 nxge_status_t
261 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
262 {
263 	npi_status_t		rs = NPI_SUCCESS;
264 	nxge_status_t		status = NXGE_OK;
265 	npi_handle_t		handle;
266 
267 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
268 
269 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
270 	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
271 		rs = npi_txdma_channel_reset(handle, channel);
272 	} else {
273 		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
274 		    channel);
275 	}
276 
277 	if (rs != NPI_SUCCESS) {
278 		status = NXGE_ERROR | rs;
279 	}
280 
281 	/*
282 	 * Reset the tail (kick) register to 0.
283 	 * (Hardware will not reset it. Tx overflow fatal
284 	 * error if tail is not set to 0 after reset!
285 	 */
286 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
287 
288 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
289 	return (status);
290 }
291 
292 /*
293  * nxge_init_txdma_channel_event_mask
294  *
295  *	Enable interrupts for a set of events.
296  *
297  * Arguments:
298  * 	nxgep
299  * 	channel	The channel to map.
300  * 	mask_p	The events to enable.
301  *
302  * Notes:
303  *
304  * NPI/NXGE function calls:
305  *	npi_txdma_event_mask()
306  *
307  * Registers accessed:
308  *	TX_ENT_MSK	DMC+0x40020 Transmit Event Mask
309  *
310  * Context:
311  *	Any domain
312  */
313 nxge_status_t
314 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
315 		p_tx_dma_ent_msk_t mask_p)
316 {
317 	npi_handle_t		handle;
318 	npi_status_t		rs = NPI_SUCCESS;
319 	nxge_status_t		status = NXGE_OK;
320 
321 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
322 	    "<== nxge_init_txdma_channel_event_mask"));
323 
324 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
325 	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
326 	if (rs != NPI_SUCCESS) {
327 		status = NXGE_ERROR | rs;
328 	}
329 
330 	return (status);
331 }
332 
333 /*
334  * nxge_init_txdma_channel_cntl_stat
335  *
336  *	Stop a TDC.  If at first we don't succeed, inject an error.
337  *
338  * Arguments:
339  * 	nxgep
340  * 	channel		The channel to stop.
341  *
342  * Notes:
343  *
344  * NPI/NXGE function calls:
345  *	npi_txdma_control_status()
346  *
347  * Registers accessed:
348  *	TX_CS		DMC+0x40028 Transmit Control And Status
349  *
350  * Context:
351  *	Any domain
352  */
353 nxge_status_t
354 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
355 	uint64_t reg_data)
356 {
357 	npi_handle_t		handle;
358 	npi_status_t		rs = NPI_SUCCESS;
359 	nxge_status_t		status = NXGE_OK;
360 
361 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
362 	    "<== nxge_init_txdma_channel_cntl_stat"));
363 
364 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
365 	rs = npi_txdma_control_status(handle, OP_SET, channel,
366 	    (p_tx_cs_t)&reg_data);
367 
368 	if (rs != NPI_SUCCESS) {
369 		status = NXGE_ERROR | rs;
370 	}
371 
372 	return (status);
373 }
374 
375 /*
376  * nxge_enable_txdma_channel
377  *
378  *	Enable a TDC.
379  *
380  * Arguments:
381  * 	nxgep
382  * 	channel		The channel to enable.
383  * 	tx_desc_p	channel's transmit descriptor ring.
384  * 	mbox_p		channel's mailbox,
385  *
386  * Notes:
387  *
388  * NPI/NXGE function calls:
389  *	npi_txdma_ring_config()
390  *	npi_txdma_mbox_config()
391  *	npi_txdma_channel_init_enable()
392  *
393  * Registers accessed:
394  *	TX_RNG_CFIG	DMC+0x40000 Transmit Ring Configuration
395  *	TXDMA_MBH	DMC+0x40030 TXDMA Mailbox High
396  *	TXDMA_MBL	DMC+0x40038 TXDMA Mailbox Low
397  *	TX_CS		DMC+0x40028 Transmit Control And Status
398  *
399  * Context:
400  *	Any domain
401  */
402 nxge_status_t
403 nxge_enable_txdma_channel(p_nxge_t nxgep,
404 	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
405 {
406 	npi_handle_t		handle;
407 	npi_status_t		rs = NPI_SUCCESS;
408 	nxge_status_t		status = NXGE_OK;
409 
410 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
411 
412 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
413 	/*
414 	 * Use configuration data composed at init time.
415 	 * Write to hardware the transmit ring configurations.
416 	 */
417 	rs = npi_txdma_ring_config(handle, OP_SET, channel,
418 	    (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
419 
420 	if (rs != NPI_SUCCESS) {
421 		return (NXGE_ERROR | rs);
422 	}
423 
424 	if (isLDOMguest(nxgep)) {
425 		/* Add interrupt handler for this channel. */
426 		if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
427 			return (NXGE_ERROR);
428 	}
429 
430 	/* Write to hardware the mailbox */
431 	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
432 	    (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
433 
434 	if (rs != NPI_SUCCESS) {
435 		return (NXGE_ERROR | rs);
436 	}
437 
438 	/* Start the DMA engine. */
439 	rs = npi_txdma_channel_init_enable(handle, channel);
440 
441 	if (rs != NPI_SUCCESS) {
442 		return (NXGE_ERROR | rs);
443 	}
444 
445 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
446 
447 	return (status);
448 }
449 
450 void
451 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
452 		boolean_t l4_cksum, int pkt_len, uint8_t npads,
453 		p_tx_pkt_hdr_all_t pkthdrp,
454 		t_uscalar_t start_offset,
455 		t_uscalar_t stuff_offset)
456 {
457 	p_tx_pkt_header_t	hdrp;
458 	p_mblk_t 		nmp;
459 	uint64_t		tmp;
460 	size_t 			mblk_len;
461 	size_t 			iph_len;
462 	size_t 			hdrs_size;
463 	uint8_t			hdrs_buf[sizeof (struct ether_header) +
464 	    64 + sizeof (uint32_t)];
465 	uint8_t			*cursor;
466 	uint8_t 		*ip_buf;
467 	uint16_t		eth_type;
468 	uint8_t			ipproto;
469 	boolean_t		is_vlan = B_FALSE;
470 	size_t			eth_hdr_size;
471 
472 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
473 
474 	/*
475 	 * Caller should zero out the headers first.
476 	 */
477 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
478 
479 	if (fill_len) {
480 		NXGE_DEBUG_MSG((NULL, TX_CTL,
481 		    "==> nxge_fill_tx_hdr: pkt_len %d "
482 		    "npads %d", pkt_len, npads));
483 		tmp = (uint64_t)pkt_len;
484 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
485 		goto fill_tx_header_done;
486 	}
487 
488 	hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
489 
490 	/*
491 	 * mp is the original data packet (does not include the
492 	 * Neptune transmit header).
493 	 */
494 	nmp = mp;
495 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
496 	    "mp $%p b_rptr $%p len %d",
497 	    mp, nmp->b_rptr, MBLKL(nmp)));
498 	/* copy ether_header from mblk to hdrs_buf */
499 	cursor = &hdrs_buf[0];
500 	tmp = sizeof (struct ether_vlan_header);
501 	while ((nmp != NULL) && (tmp > 0)) {
502 		size_t buflen;
503 		mblk_len = MBLKL(nmp);
504 		buflen = min((size_t)tmp, mblk_len);
505 		bcopy(nmp->b_rptr, cursor, buflen);
506 		cursor += buflen;
507 		tmp -= buflen;
508 		nmp = nmp->b_cont;
509 	}
510 
511 	nmp = mp;
512 	mblk_len = MBLKL(nmp);
513 	ip_buf = NULL;
514 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
515 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
516 	    "ether type 0x%x", eth_type, hdrp->value));
517 
518 	if (eth_type < ETHERMTU) {
519 		tmp = 1ull;
520 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
521 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
522 		    "value 0x%llx", hdrp->value));
523 		if (*(hdrs_buf + sizeof (struct ether_header))
524 		    == LLC_SNAP_SAP) {
525 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
526 			    sizeof (struct ether_header) + 6)));
527 			NXGE_DEBUG_MSG((NULL, TX_CTL,
528 			    "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
529 			    eth_type));
530 		} else {
531 			goto fill_tx_header_done;
532 		}
533 	} else if (eth_type == VLAN_ETHERTYPE) {
534 		tmp = 1ull;
535 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
536 
537 		eth_type = ntohs(((struct ether_vlan_header *)
538 		    hdrs_buf)->ether_type);
539 		is_vlan = B_TRUE;
540 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
541 		    "value 0x%llx", hdrp->value));
542 	}
543 
544 	if (!is_vlan) {
545 		eth_hdr_size = sizeof (struct ether_header);
546 	} else {
547 		eth_hdr_size = sizeof (struct ether_vlan_header);
548 	}
549 
550 	switch (eth_type) {
551 	case ETHERTYPE_IP:
552 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
553 			ip_buf = nmp->b_rptr + eth_hdr_size;
554 			mblk_len -= eth_hdr_size;
555 			iph_len = ((*ip_buf) & 0x0f);
556 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
557 				ip_buf = nmp->b_rptr;
558 				ip_buf += eth_hdr_size;
559 			} else {
560 				ip_buf = NULL;
561 			}
562 
563 		}
564 		if (ip_buf == NULL) {
565 			hdrs_size = 0;
566 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
567 			while ((nmp) && (hdrs_size <
568 			    sizeof (hdrs_buf))) {
569 				mblk_len = (size_t)nmp->b_wptr -
570 				    (size_t)nmp->b_rptr;
571 				if (mblk_len >=
572 				    (sizeof (hdrs_buf) - hdrs_size))
573 					mblk_len = sizeof (hdrs_buf) -
574 					    hdrs_size;
575 				bcopy(nmp->b_rptr,
576 				    &hdrs_buf[hdrs_size], mblk_len);
577 				hdrs_size += mblk_len;
578 				nmp = nmp->b_cont;
579 			}
580 			ip_buf = hdrs_buf;
581 			ip_buf += eth_hdr_size;
582 			iph_len = ((*ip_buf) & 0x0f);
583 		}
584 
585 		ipproto = ip_buf[9];
586 
587 		tmp = (uint64_t)iph_len;
588 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
589 		tmp = (uint64_t)(eth_hdr_size >> 1);
590 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
591 
592 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
593 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
594 		    "tmp 0x%x",
595 		    iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
596 		    ipproto, tmp));
597 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
598 		    "value 0x%llx", hdrp->value));
599 
600 		break;
601 
602 	case ETHERTYPE_IPV6:
603 		hdrs_size = 0;
604 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
605 		while ((nmp) && (hdrs_size <
606 		    sizeof (hdrs_buf))) {
607 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
608 			if (mblk_len >=
609 			    (sizeof (hdrs_buf) - hdrs_size))
610 				mblk_len = sizeof (hdrs_buf) -
611 				    hdrs_size;
612 			bcopy(nmp->b_rptr,
613 			    &hdrs_buf[hdrs_size], mblk_len);
614 			hdrs_size += mblk_len;
615 			nmp = nmp->b_cont;
616 		}
617 		ip_buf = hdrs_buf;
618 		ip_buf += eth_hdr_size;
619 
620 		tmp = 1ull;
621 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
622 
623 		tmp = (eth_hdr_size >> 1);
624 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
625 
626 		/* byte 6 is the next header protocol */
627 		ipproto = ip_buf[6];
628 
629 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
630 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
631 		    iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
632 		    ipproto));
633 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
634 		    "value 0x%llx", hdrp->value));
635 
636 		break;
637 
638 	default:
639 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
640 		goto fill_tx_header_done;
641 	}
642 
643 	switch (ipproto) {
644 	case IPPROTO_TCP:
645 		NXGE_DEBUG_MSG((NULL, TX_CTL,
646 		    "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
647 		if (l4_cksum) {
648 			hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
649 			hdrp->value |=
650 			    (((uint64_t)(start_offset >> 1)) <<
651 			    TX_PKT_HEADER_L4START_SHIFT);
652 			hdrp->value |=
653 			    (((uint64_t)(stuff_offset >> 1)) <<
654 			    TX_PKT_HEADER_L4STUFF_SHIFT);
655 
656 			NXGE_DEBUG_MSG((NULL, TX_CTL,
657 			    "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
658 			    "value 0x%llx", hdrp->value));
659 		}
660 
661 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
662 		    "value 0x%llx", hdrp->value));
663 		break;
664 
665 	case IPPROTO_UDP:
666 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
667 		if (l4_cksum) {
668 			if (!nxge_cksum_offload) {
669 				uint16_t	*up;
670 				uint16_t	cksum;
671 				t_uscalar_t	stuff_len;
672 
673 				/*
674 				 * The checksum field has the
675 				 * partial checksum.
676 				 * IP_CSUM() macro calls ip_cksum() which
677 				 * can add in the partial checksum.
678 				 */
679 				cksum = IP_CSUM(mp, start_offset, 0);
680 				stuff_len = stuff_offset;
681 				nmp = mp;
682 				mblk_len = MBLKL(nmp);
683 				while ((nmp != NULL) &&
684 				    (mblk_len < stuff_len)) {
685 					stuff_len -= mblk_len;
686 					nmp = nmp->b_cont;
687 				}
688 				ASSERT(nmp);
689 				up = (uint16_t *)(nmp->b_rptr + stuff_len);
690 
691 				*up = cksum;
692 				hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
693 				NXGE_DEBUG_MSG((NULL, TX_CTL,
694 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
695 				    "use sw cksum "
696 				    "write to $%p cksum 0x%x content up 0x%x",
697 				    stuff_len,
698 				    up,
699 				    cksum,
700 				    *up));
701 			} else {
702 				/* Hardware will compute the full checksum */
703 				hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
704 				hdrp->value |=
705 				    (((uint64_t)(start_offset >> 1)) <<
706 				    TX_PKT_HEADER_L4START_SHIFT);
707 				hdrp->value |=
708 				    (((uint64_t)(stuff_offset >> 1)) <<
709 				    TX_PKT_HEADER_L4STUFF_SHIFT);
710 
711 				NXGE_DEBUG_MSG((NULL, TX_CTL,
712 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
713 				    " use partial checksum "
714 				    "cksum 0x%x ",
715 				    "value 0x%llx",
716 				    stuff_offset,
717 				    IP_CSUM(mp, start_offset, 0),
718 				    hdrp->value));
719 			}
720 		}
721 
722 		NXGE_DEBUG_MSG((NULL, TX_CTL,
723 		    "==> nxge_tx_pkt_hdr_init: UDP"
724 		    "value 0x%llx", hdrp->value));
725 		break;
726 
727 	default:
728 		goto fill_tx_header_done;
729 	}
730 
731 fill_tx_header_done:
732 	NXGE_DEBUG_MSG((NULL, TX_CTL,
733 	    "==> nxge_fill_tx_hdr: pkt_len %d  "
734 	    "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
735 
736 	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
737 }
738 
739 /*ARGSUSED*/
740 p_mblk_t
741 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
742 {
743 	p_mblk_t 		newmp = NULL;
744 
745 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
746 		NXGE_DEBUG_MSG((NULL, TX_CTL,
747 		    "<== nxge_tx_pkt_header_reserve: allocb failed"));
748 		return (NULL);
749 	}
750 
751 	NXGE_DEBUG_MSG((NULL, TX_CTL,
752 	    "==> nxge_tx_pkt_header_reserve: get new mp"));
753 	DB_TYPE(newmp) = M_DATA;
754 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
755 	linkb(newmp, mp);
756 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
757 
758 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
759 	    "b_rptr $%p b_wptr $%p",
760 	    newmp->b_rptr, newmp->b_wptr));
761 
762 	NXGE_DEBUG_MSG((NULL, TX_CTL,
763 	    "<== nxge_tx_pkt_header_reserve: use new mp"));
764 
765 	return (newmp);
766 }
767 
768 int
769 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
770 {
771 	uint_t 			nmblks;
772 	ssize_t			len;
773 	uint_t 			pkt_len;
774 	p_mblk_t 		nmp, bmp, tmp;
775 	uint8_t 		*b_wptr;
776 
777 	NXGE_DEBUG_MSG((NULL, TX_CTL,
778 	    "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
779 	    "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
780 
781 	nmp = mp;
782 	bmp = mp;
783 	nmblks = 0;
784 	pkt_len = 0;
785 	*tot_xfer_len_p = 0;
786 
787 	while (nmp) {
788 		len = MBLKL(nmp);
789 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
790 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
791 		    len, pkt_len, nmblks,
792 		    *tot_xfer_len_p));
793 
794 		if (len <= 0) {
795 			bmp = nmp;
796 			nmp = nmp->b_cont;
797 			NXGE_DEBUG_MSG((NULL, TX_CTL,
798 			    "==> nxge_tx_pkt_nmblocks: "
799 			    "len (0) pkt_len %d nmblks %d",
800 			    pkt_len, nmblks));
801 			continue;
802 		}
803 
804 		*tot_xfer_len_p += len;
805 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
806 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
807 		    len, pkt_len, nmblks,
808 		    *tot_xfer_len_p));
809 
810 		if (len < nxge_bcopy_thresh) {
811 			NXGE_DEBUG_MSG((NULL, TX_CTL,
812 			    "==> nxge_tx_pkt_nmblocks: "
813 			    "len %d (< thresh) pkt_len %d nmblks %d",
814 			    len, pkt_len, nmblks));
815 			if (pkt_len == 0)
816 				nmblks++;
817 			pkt_len += len;
818 			if (pkt_len >= nxge_bcopy_thresh) {
819 				pkt_len = 0;
820 				len = 0;
821 				nmp = bmp;
822 			}
823 		} else {
824 			NXGE_DEBUG_MSG((NULL, TX_CTL,
825 			    "==> nxge_tx_pkt_nmblocks: "
826 			    "len %d (> thresh) pkt_len %d nmblks %d",
827 			    len, pkt_len, nmblks));
828 			pkt_len = 0;
829 			nmblks++;
830 			/*
831 			 * Hardware limits the transfer length to 4K.
832 			 * If len is more than 4K, we need to break
833 			 * it up to at most 2 more blocks.
834 			 */
835 			if (len > TX_MAX_TRANSFER_LENGTH) {
836 				uint32_t	nsegs;
837 
838 				nsegs = 1;
839 				NXGE_DEBUG_MSG((NULL, TX_CTL,
840 				    "==> nxge_tx_pkt_nmblocks: "
841 				    "len %d pkt_len %d nmblks %d nsegs %d",
842 				    len, pkt_len, nmblks, nsegs));
843 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
844 					++nsegs;
845 				}
846 				do {
847 					b_wptr = nmp->b_rptr +
848 					    TX_MAX_TRANSFER_LENGTH;
849 					nmp->b_wptr = b_wptr;
850 					if ((tmp = dupb(nmp)) == NULL) {
851 						return (0);
852 					}
853 					tmp->b_rptr = b_wptr;
854 					tmp->b_wptr = nmp->b_wptr;
855 					tmp->b_cont = nmp->b_cont;
856 					nmp->b_cont = tmp;
857 					nmblks++;
858 					if (--nsegs) {
859 						nmp = tmp;
860 					}
861 				} while (nsegs);
862 				nmp = tmp;
863 			}
864 		}
865 
866 		/*
867 		 * Hardware limits the transmit gather pointers to 15.
868 		 */
869 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
870 		    TX_MAX_GATHER_POINTERS) {
871 			NXGE_DEBUG_MSG((NULL, TX_CTL,
872 			    "==> nxge_tx_pkt_nmblocks: pull msg - "
873 			    "len %d pkt_len %d nmblks %d",
874 			    len, pkt_len, nmblks));
875 			/* Pull all message blocks from b_cont */
876 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
877 				return (0);
878 			}
879 			freemsg(nmp->b_cont);
880 			nmp->b_cont = tmp;
881 			pkt_len = 0;
882 		}
883 		bmp = nmp;
884 		nmp = nmp->b_cont;
885 	}
886 
887 	NXGE_DEBUG_MSG((NULL, TX_CTL,
888 	    "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
889 	    "nmblks %d len %d tot_xfer_len %d",
890 	    mp->b_rptr, mp->b_wptr, nmblks,
891 	    MBLKL(mp), *tot_xfer_len_p));
892 
893 	return (nmblks);
894 }
895 
896 boolean_t
897 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
898 {
899 	boolean_t 		status = B_TRUE;
900 	p_nxge_dma_common_t	tx_desc_dma_p;
901 	nxge_dma_common_t	desc_area;
902 	p_tx_desc_t 		tx_desc_ring_vp;
903 	p_tx_desc_t 		tx_desc_p;
904 	p_tx_desc_t 		tx_desc_pp;
905 	tx_desc_t 		r_tx_desc;
906 	p_tx_msg_t 		tx_msg_ring;
907 	p_tx_msg_t 		tx_msg_p;
908 	npi_handle_t		handle;
909 	tx_ring_hdl_t		tx_head;
910 	uint32_t 		pkt_len;
911 	uint_t			tx_rd_index;
912 	uint16_t		head_index, tail_index;
913 	uint8_t			tdc;
914 	boolean_t		head_wrap, tail_wrap;
915 	p_nxge_tx_ring_stats_t tdc_stats;
916 	int			rc;
917 
918 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
919 
920 	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
921 	    (nmblks != 0));
922 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
923 	    "==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
924 	    tx_ring_p->descs_pending, nxge_reclaim_pending,
925 	    nmblks));
926 	if (!status) {
927 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
928 		desc_area = tx_ring_p->tdc_desc;
929 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
930 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
931 		tx_desc_ring_vp =
932 		    (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
933 		tx_rd_index = tx_ring_p->rd_index;
934 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
935 		tx_msg_ring = tx_ring_p->tx_msg_ring;
936 		tx_msg_p = &tx_msg_ring[tx_rd_index];
937 		tdc = tx_ring_p->tdc;
938 		tdc_stats = tx_ring_p->tdc_stats;
939 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
940 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
941 		}
942 
943 		tail_index = tx_ring_p->wr_index;
944 		tail_wrap = tx_ring_p->wr_index_wrap;
945 
946 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
947 		    "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
948 		    "tail_index %d tail_wrap %d "
949 		    "tx_desc_p $%p ($%p) ",
950 		    tdc, tx_rd_index, tail_index, tail_wrap,
951 		    tx_desc_p, (*(uint64_t *)tx_desc_p)));
952 		/*
953 		 * Read the hardware maintained transmit head
954 		 * and wrap around bit.
955 		 */
956 		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
957 		head_index =  tx_head.bits.ldw.head;
958 		head_wrap = tx_head.bits.ldw.wrap;
959 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
960 		    "==> nxge_txdma_reclaim: "
961 		    "tx_rd_index %d tail %d tail_wrap %d "
962 		    "head %d wrap %d",
963 		    tx_rd_index, tail_index, tail_wrap,
964 		    head_index, head_wrap));
965 
966 		if (head_index == tail_index) {
967 			if (TXDMA_RING_EMPTY(head_index, head_wrap,
968 			    tail_index, tail_wrap) &&
969 			    (head_index == tx_rd_index)) {
970 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
971 				    "==> nxge_txdma_reclaim: EMPTY"));
972 				return (B_TRUE);
973 			}
974 
975 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
976 			    "==> nxge_txdma_reclaim: Checking "
977 			    "if ring full"));
978 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
979 			    tail_wrap)) {
980 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
981 				    "==> nxge_txdma_reclaim: full"));
982 				return (B_FALSE);
983 			}
984 		}
985 
986 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
987 		    "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
988 
989 		tx_desc_pp = &r_tx_desc;
990 		while ((tx_rd_index != head_index) &&
991 		    (tx_ring_p->descs_pending != 0)) {
992 
993 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
994 			    "==> nxge_txdma_reclaim: Checking if pending"));
995 
996 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
997 			    "==> nxge_txdma_reclaim: "
998 			    "descs_pending %d ",
999 			    tx_ring_p->descs_pending));
1000 
1001 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1002 			    "==> nxge_txdma_reclaim: "
1003 			    "(tx_rd_index %d head_index %d "
1004 			    "(tx_desc_p $%p)",
1005 			    tx_rd_index, head_index,
1006 			    tx_desc_p));
1007 
1008 			tx_desc_pp->value = tx_desc_p->value;
1009 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1010 			    "==> nxge_txdma_reclaim: "
1011 			    "(tx_rd_index %d head_index %d "
1012 			    "tx_desc_p $%p (desc value 0x%llx) ",
1013 			    tx_rd_index, head_index,
1014 			    tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
1015 
1016 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1017 			    "==> nxge_txdma_reclaim: dump desc:"));
1018 
1019 			pkt_len = tx_desc_pp->bits.hdw.tr_len;
1020 			tdc_stats->obytes += pkt_len;
1021 			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
1022 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1023 			    "==> nxge_txdma_reclaim: pkt_len %d "
1024 			    "tdc channel %d opackets %d",
1025 			    pkt_len,
1026 			    tdc,
1027 			    tdc_stats->opackets));
1028 
1029 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
1030 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1031 				    "tx_desc_p = $%p "
1032 				    "tx_desc_pp = $%p "
1033 				    "index = %d",
1034 				    tx_desc_p,
1035 				    tx_desc_pp,
1036 				    tx_ring_p->rd_index));
1037 				(void) dvma_unload(tx_msg_p->dvma_handle,
1038 				    0, -1);
1039 				tx_msg_p->dvma_handle = NULL;
1040 				if (tx_ring_p->dvma_wr_index ==
1041 				    tx_ring_p->dvma_wrap_mask) {
1042 					tx_ring_p->dvma_wr_index = 0;
1043 				} else {
1044 					tx_ring_p->dvma_wr_index++;
1045 				}
1046 				tx_ring_p->dvma_pending--;
1047 			} else if (tx_msg_p->flags.dma_type ==
1048 			    USE_DMA) {
1049 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1050 				    "==> nxge_txdma_reclaim: "
1051 				    "USE DMA"));
1052 				if (rc = ddi_dma_unbind_handle
1053 				    (tx_msg_p->dma_handle)) {
1054 					cmn_err(CE_WARN, "!nxge_reclaim: "
1055 					    "ddi_dma_unbind_handle "
1056 					    "failed. status %d", rc);
1057 				}
1058 			}
1059 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1060 			    "==> nxge_txdma_reclaim: count packets"));
1061 			/*
1062 			 * count a chained packet only once.
1063 			 */
1064 			if (tx_msg_p->tx_message != NULL) {
1065 				freemsg(tx_msg_p->tx_message);
1066 				tx_msg_p->tx_message = NULL;
1067 			}
1068 
1069 			tx_msg_p->flags.dma_type = USE_NONE;
1070 			tx_rd_index = tx_ring_p->rd_index;
1071 			tx_rd_index = (tx_rd_index + 1) &
1072 			    tx_ring_p->tx_wrap_mask;
1073 			tx_ring_p->rd_index = tx_rd_index;
1074 			tx_ring_p->descs_pending--;
1075 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1076 			tx_msg_p = &tx_msg_ring[tx_rd_index];
1077 		}
1078 
1079 		status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1080 		    (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1081 		if (status) {
1082 			(void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
1083 		}
1084 	} else {
1085 		status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1086 		    (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1087 	}
1088 
1089 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1090 	    "<== nxge_txdma_reclaim status = 0x%08x", status));
1091 
1092 	return (status);
1093 }
1094 
1095 /*
1096  * nxge_tx_intr
1097  *
1098  *	Process a TDC interrupt
1099  *
1100  * Arguments:
1101  * 	arg1	A Logical Device state Vector (LSV) data structure.
1102  * 	arg2	nxge_t *
1103  *
1104  * Notes:
1105  *
1106  * NPI/NXGE function calls:
1107  *	npi_txdma_control_status()
1108  *	npi_intr_ldg_mgmt_set()
1109  *
1110  *	nxge_tx_err_evnts()
1111  *	nxge_txdma_reclaim()
1112  *
1113  * Registers accessed:
1114  *	TX_CS		DMC+0x40028 Transmit Control And Status
1115  *	PIO_LDSV
1116  *
1117  * Context:
1118  *	Any domain
1119  */
1120 uint_t
1121 nxge_tx_intr(void *arg1, void *arg2)
1122 {
1123 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
1124 	p_nxge_t		nxgep = (p_nxge_t)arg2;
1125 	p_nxge_ldg_t		ldgp;
1126 	uint8_t			channel;
1127 	uint32_t		vindex;
1128 	npi_handle_t		handle;
1129 	tx_cs_t			cs;
1130 	p_tx_ring_t 		*tx_rings;
1131 	p_tx_ring_t 		tx_ring_p;
1132 	npi_status_t		rs = NPI_SUCCESS;
1133 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
1134 	nxge_status_t 		status = NXGE_OK;
1135 
1136 	if (ldvp == NULL) {
1137 		NXGE_DEBUG_MSG((NULL, INT_CTL,
1138 		    "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1139 		    nxgep, ldvp));
1140 		return (DDI_INTR_UNCLAIMED);
1141 	}
1142 
1143 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1144 		nxgep = ldvp->nxgep;
1145 	}
1146 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1147 	    "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1148 	    nxgep, ldvp));
1149 
1150 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1151 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1152 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1153 		    "<== nxge_tx_intr: interface not started or intialized"));
1154 		return (DDI_INTR_CLAIMED);
1155 	}
1156 
1157 	/*
1158 	 * This interrupt handler is for a specific
1159 	 * transmit dma channel.
1160 	 */
1161 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1162 	/* Get the control and status for this channel. */
1163 	channel = ldvp->channel;
1164 	ldgp = ldvp->ldgp;
1165 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1166 	    "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1167 	    "channel %d",
1168 	    nxgep, ldvp, channel));
1169 
1170 	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1171 	vindex = ldvp->vdma_index;
1172 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1173 	    "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1174 	    channel, vindex, rs));
1175 	if (!rs && cs.bits.ldw.mk) {
1176 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1177 		    "==> nxge_tx_intr:channel %d ring index %d "
1178 		    "status 0x%08x (mk bit set)",
1179 		    channel, vindex, rs));
1180 		tx_rings = nxgep->tx_rings->rings;
1181 		tx_ring_p = tx_rings[vindex];
1182 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1183 		    "==> nxge_tx_intr:channel %d ring index %d "
1184 		    "status 0x%08x (mk bit set, calling reclaim)",
1185 		    channel, vindex, rs));
1186 
1187 		nxge_tx_ring_task((void *)tx_ring_p);
1188 	}
1189 
1190 	/*
1191 	 * Process other transmit control and status.
1192 	 * Check the ldv state.
1193 	 */
1194 	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1195 	/*
1196 	 * Rearm this logical group if this is a single device
1197 	 * group.
1198 	 */
1199 	if (ldgp->nldvs == 1) {
1200 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1201 		    "==> nxge_tx_intr: rearm"));
1202 		if (status == NXGE_OK) {
1203 			if (isLDOMguest(nxgep)) {
1204 				nxge_hio_ldgimgn(nxgep, ldgp);
1205 			} else {
1206 				(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1207 				    B_TRUE, ldgp->ldg_timer);
1208 			}
1209 		}
1210 	}
1211 
1212 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1213 	serviced = DDI_INTR_CLAIMED;
1214 	return (serviced);
1215 }
1216 
1217 void
1218 nxge_txdma_stop(p_nxge_t nxgep)	/* Dead */
1219 {
1220 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1221 
1222 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1223 
1224 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1225 }
1226 
1227 void
1228 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1229 {
1230 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1231 
1232 	(void) nxge_txdma_stop(nxgep);
1233 
1234 	(void) nxge_fixup_txdma_rings(nxgep);
1235 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1236 	(void) nxge_tx_mac_enable(nxgep);
1237 	(void) nxge_txdma_hw_kick(nxgep);
1238 
1239 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1240 }
1241 
1242 npi_status_t
1243 nxge_txdma_channel_disable(
1244 	nxge_t *nxge,
1245 	int channel)
1246 {
1247 	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxge);
1248 	npi_status_t	rs;
1249 	tdmc_intr_dbg_t	intr_dbg;
1250 
1251 	/*
1252 	 * Stop the dma channel and wait for the stop-done.
1253 	 * If the stop-done bit is not present, then force
1254 	 * an error so TXC will stop.
1255 	 * All channels bound to this port need to be stopped
1256 	 * and reset after injecting an interrupt error.
1257 	 */
1258 	rs = npi_txdma_channel_disable(handle, channel);
1259 	NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1260 	    "==> nxge_txdma_channel_disable(%d) "
1261 	    "rs 0x%x", channel, rs));
1262 	if (rs != NPI_SUCCESS) {
1263 		/* Inject any error */
1264 		intr_dbg.value = 0;
1265 		intr_dbg.bits.ldw.nack_pref = 1;
1266 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1267 		    "==> nxge_txdma_hw_mode: "
1268 		    "channel %d (stop failed 0x%x) "
1269 		    "(inject err)", rs, channel));
1270 		(void) npi_txdma_inj_int_error_set(
1271 		    handle, channel, &intr_dbg);
1272 		rs = npi_txdma_channel_disable(handle, channel);
1273 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1274 		    "==> nxge_txdma_hw_mode: "
1275 		    "channel %d (stop again 0x%x) "
1276 		    "(after inject err)",
1277 		    rs, channel));
1278 	}
1279 
1280 	return (rs);
1281 }
1282 
1283 /*
1284  * nxge_txdma_hw_mode
1285  *
1286  *	Toggle all TDCs on (enable) or off (disable).
1287  *
1288  * Arguments:
1289  * 	nxgep
1290  * 	enable	Enable or disable a TDC.
1291  *
1292  * Notes:
1293  *
1294  * NPI/NXGE function calls:
1295  *	npi_txdma_channel_enable(TX_CS)
1296  *	npi_txdma_channel_disable(TX_CS)
1297  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1298  *
1299  * Registers accessed:
1300  *	TX_CS		DMC+0x40028 Transmit Control And Status
1301  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1302  *
1303  * Context:
1304  *	Any domain
1305  */
1306 nxge_status_t
1307 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1308 {
1309 	nxge_grp_set_t *set = &nxgep->tx_set;
1310 
1311 	npi_handle_t	handle;
1312 	nxge_status_t	status;
1313 	npi_status_t	rs;
1314 	int		tdc;
1315 
1316 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1317 	    "==> nxge_txdma_hw_mode: enable mode %d", enable));
1318 
1319 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1320 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1321 		    "<== nxge_txdma_mode: not initialized"));
1322 		return (NXGE_ERROR);
1323 	}
1324 
1325 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1326 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1327 		    "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1328 		return (NXGE_ERROR);
1329 	}
1330 
1331 	/* Enable or disable all of the TDCs owned by us. */
1332 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1333 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1334 		if ((1 << tdc) & set->owned.map) {
1335 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1336 			if (ring) {
1337 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1338 				    "==> nxge_txdma_hw_mode: channel %d", tdc));
1339 				if (enable) {
1340 					rs = npi_txdma_channel_enable
1341 					    (handle, tdc);
1342 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1343 					    "==> nxge_txdma_hw_mode: "
1344 					    "channel %d (enable) rs 0x%x",
1345 					    tdc, rs));
1346 				} else {
1347 					rs = nxge_txdma_channel_disable
1348 					    (nxgep, tdc);
1349 				}
1350 			}
1351 		}
1352 	}
1353 
1354 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1355 
1356 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1357 	    "<== nxge_txdma_hw_mode: status 0x%x", status));
1358 
1359 	return (status);
1360 }
1361 
1362 void
1363 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1364 {
1365 	npi_handle_t		handle;
1366 
1367 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1368 	    "==> nxge_txdma_enable_channel: channel %d", channel));
1369 
1370 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1371 	/* enable the transmit dma channels */
1372 	(void) npi_txdma_channel_enable(handle, channel);
1373 
1374 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1375 }
1376 
1377 void
1378 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1379 {
1380 	npi_handle_t		handle;
1381 
1382 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1383 	    "==> nxge_txdma_disable_channel: channel %d", channel));
1384 
1385 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1386 	/* stop the transmit dma channels */
1387 	(void) npi_txdma_channel_disable(handle, channel);
1388 
1389 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1390 }
1391 
1392 /*
1393  * nxge_txdma_stop_inj_err
1394  *
1395  *	Stop a TDC.  If at first we don't succeed, inject an error.
1396  *
1397  * Arguments:
1398  * 	nxgep
1399  * 	channel		The channel to stop.
1400  *
1401  * Notes:
1402  *
1403  * NPI/NXGE function calls:
1404  *	npi_txdma_channel_disable()
1405  *	npi_txdma_inj_int_error_set()
1406  * #if defined(NXGE_DEBUG)
1407  *	nxge_txdma_regs_dump_channels(nxgep);
1408  * #endif
1409  *
1410  * Registers accessed:
1411  *	TX_CS		DMC+0x40028 Transmit Control And Status
1412  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1413  *
1414  * Context:
1415  *	Any domain
1416  */
1417 int
1418 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1419 {
1420 	npi_handle_t		handle;
1421 	tdmc_intr_dbg_t		intr_dbg;
1422 	int			status;
1423 	npi_status_t		rs = NPI_SUCCESS;
1424 
1425 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1426 	/*
1427 	 * Stop the dma channel waits for the stop done.
1428 	 * If the stop done bit is not set, then create
1429 	 * an error.
1430 	 */
1431 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1432 	rs = npi_txdma_channel_disable(handle, channel);
1433 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1434 	if (status == NXGE_OK) {
1435 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1436 		    "<== nxge_txdma_stop_inj_err (channel %d): "
1437 		    "stopped OK", channel));
1438 		return (status);
1439 	}
1440 
1441 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1442 	    "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1443 	    "injecting error", channel, rs));
1444 	/* Inject any error */
1445 	intr_dbg.value = 0;
1446 	intr_dbg.bits.ldw.nack_pref = 1;
1447 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1448 
1449 	/* Stop done bit will be set as a result of error injection */
1450 	rs = npi_txdma_channel_disable(handle, channel);
1451 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1452 	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1453 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1454 		    "<== nxge_txdma_stop_inj_err (channel %d): "
1455 		    "stopped OK ", channel));
1456 		return (status);
1457 	}
1458 
1459 #if	defined(NXGE_DEBUG)
1460 	nxge_txdma_regs_dump_channels(nxgep);
1461 #endif
1462 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1463 	    "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1464 	    " (injected error but still not stopped)", channel, rs));
1465 
1466 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1467 	return (status);
1468 }
1469 
1470 /*ARGSUSED*/
1471 void
1472 nxge_fixup_txdma_rings(p_nxge_t nxgep)
1473 {
1474 	nxge_grp_set_t *set = &nxgep->tx_set;
1475 	int tdc;
1476 
1477 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1478 
1479 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1480 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1481 		    "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1482 		return;
1483 	}
1484 
1485 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1486 		if ((1 << tdc) & set->owned.map) {
1487 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1488 			if (ring) {
1489 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1490 				    "==> nxge_fixup_txdma_rings: channel %d",
1491 				    tdc));
1492 				nxge_txdma_fixup_channel(nxgep, ring, tdc);
1493 			}
1494 		}
1495 	}
1496 
1497 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1498 }
1499 
1500 /*ARGSUSED*/
1501 void
1502 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1503 {
1504 	p_tx_ring_t	ring_p;
1505 
1506 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1507 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1508 	if (ring_p == NULL) {
1509 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1510 		return;
1511 	}
1512 
1513 	if (ring_p->tdc != channel) {
1514 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1515 		    "<== nxge_txdma_fix_channel: channel not matched "
1516 		    "ring tdc %d passed channel",
1517 		    ring_p->tdc, channel));
1518 		return;
1519 	}
1520 
1521 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1522 
1523 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1524 }
1525 
1526 /*ARGSUSED*/
1527 void
1528 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1529 {
1530 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1531 
1532 	if (ring_p == NULL) {
1533 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1534 		    "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1535 		return;
1536 	}
1537 
1538 	if (ring_p->tdc != channel) {
1539 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1540 		    "<== nxge_txdma_fixup_channel: channel not matched "
1541 		    "ring tdc %d passed channel",
1542 		    ring_p->tdc, channel));
1543 		return;
1544 	}
1545 
1546 	MUTEX_ENTER(&ring_p->lock);
1547 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1548 	ring_p->rd_index = 0;
1549 	ring_p->wr_index = 0;
1550 	ring_p->ring_head.value = 0;
1551 	ring_p->ring_kick_tail.value = 0;
1552 	ring_p->descs_pending = 0;
1553 	MUTEX_EXIT(&ring_p->lock);
1554 
1555 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1556 }
1557 
1558 /*ARGSUSED*/
1559 void
1560 nxge_txdma_hw_kick(p_nxge_t nxgep)
1561 {
1562 	nxge_grp_set_t *set = &nxgep->tx_set;
1563 	int tdc;
1564 
1565 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1566 
1567 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1568 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1569 		    "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1570 		return;
1571 	}
1572 
1573 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1574 		if ((1 << tdc) & set->owned.map) {
1575 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1576 			if (ring) {
1577 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1578 				    "==> nxge_txdma_hw_kick: channel %d", tdc));
1579 				nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1580 			}
1581 		}
1582 	}
1583 
1584 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1585 }
1586 
1587 /*ARGSUSED*/
1588 void
1589 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1590 {
1591 	p_tx_ring_t	ring_p;
1592 
1593 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1594 
1595 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1596 	if (ring_p == NULL) {
1597 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1598 		    " nxge_txdma_kick_channel"));
1599 		return;
1600 	}
1601 
1602 	if (ring_p->tdc != channel) {
1603 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1604 		    "<== nxge_txdma_kick_channel: channel not matched "
1605 		    "ring tdc %d passed channel",
1606 		    ring_p->tdc, channel));
1607 		return;
1608 	}
1609 
1610 	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1611 
1612 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1613 }
1614 
1615 /*ARGSUSED*/
1616 void
1617 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1618 {
1619 
1620 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1621 
1622 	if (ring_p == NULL) {
1623 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1624 		    "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1625 		return;
1626 	}
1627 
1628 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1629 }
1630 
1631 /*
1632  * nxge_check_tx_hang
1633  *
1634  *	Check the state of all TDCs belonging to nxgep.
1635  *
1636  * Arguments:
1637  * 	nxgep
1638  *
1639  * Notes:
1640  *	Called by nxge_hw.c:nxge_check_hw_state().
1641  *
1642  * NPI/NXGE function calls:
1643  *
1644  * Registers accessed:
1645  *
1646  * Context:
1647  *	Any domain
1648  */
1649 /*ARGSUSED*/
1650 void
1651 nxge_check_tx_hang(p_nxge_t nxgep)
1652 {
1653 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1654 
1655 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1656 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1657 		goto nxge_check_tx_hang_exit;
1658 	}
1659 
1660 	/*
1661 	 * Needs inputs from hardware for regs:
1662 	 *	head index had not moved since last timeout.
1663 	 *	packets not transmitted or stuffed registers.
1664 	 */
1665 	if (nxge_txdma_hung(nxgep)) {
1666 		nxge_fixup_hung_txdma_rings(nxgep);
1667 	}
1668 
1669 nxge_check_tx_hang_exit:
1670 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1671 }
1672 
1673 /*
1674  * nxge_txdma_hung
1675  *
1676  *	Reset a TDC.
1677  *
1678  * Arguments:
1679  * 	nxgep
1680  * 	channel		The channel to reset.
1681  * 	reg_data	The current TX_CS.
1682  *
1683  * Notes:
1684  *	Called by nxge_check_tx_hang()
1685  *
1686  * NPI/NXGE function calls:
1687  *	nxge_txdma_channel_hung()
1688  *
1689  * Registers accessed:
1690  *
1691  * Context:
1692  *	Any domain
1693  */
1694 int
1695 nxge_txdma_hung(p_nxge_t nxgep)
1696 {
1697 	nxge_grp_set_t	*set = &nxgep->tx_set;
1698 	int		tdc;
1699 	boolean_t	shared;
1700 
1701 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1702 
1703 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1704 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1705 		    "<== nxge_txdma_hung: NULL ring pointer(s)"));
1706 		return (B_FALSE);
1707 	}
1708 
1709 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1710 		/*
1711 		 * Grab the shared state of the TDC.
1712 		 */
1713 		if (isLDOMservice(nxgep)) {
1714 			nxge_hio_data_t *nhd =
1715 			    (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1716 
1717 			MUTEX_ENTER(&nhd->lock);
1718 			shared = nxgep->tdc_is_shared[tdc];
1719 			MUTEX_EXIT(&nhd->lock);
1720 		} else {
1721 			shared = B_FALSE;
1722 		}
1723 
1724 		/*
1725 		 * Now, process continue to process.
1726 		 */
1727 		if (((1 << tdc) & set->owned.map) && !shared) {
1728 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1729 			if (ring) {
1730 				if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1731 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
1732 					    "==> nxge_txdma_hung: TDC %d hung",
1733 					    tdc));
1734 					return (B_TRUE);
1735 				}
1736 			}
1737 		}
1738 	}
1739 
1740 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1741 
1742 	return (B_FALSE);
1743 }
1744 
1745 /*
1746  * nxge_txdma_channel_hung
1747  *
1748  *	Reset a TDC.
1749  *
1750  * Arguments:
1751  * 	nxgep
1752  * 	ring		<channel>'s ring.
1753  * 	channel		The channel to reset.
1754  *
1755  * Notes:
1756  *	Called by nxge_txdma.c:nxge_txdma_hung()
1757  *
1758  * NPI/NXGE function calls:
1759  *	npi_txdma_ring_head_get()
1760  *
1761  * Registers accessed:
1762  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1763  *
1764  * Context:
1765  *	Any domain
1766  */
1767 int
1768 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1769 {
1770 	uint16_t		head_index, tail_index;
1771 	boolean_t		head_wrap, tail_wrap;
1772 	npi_handle_t		handle;
1773 	tx_ring_hdl_t		tx_head;
1774 	uint_t			tx_rd_index;
1775 
1776 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1777 
1778 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1779 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1780 	    "==> nxge_txdma_channel_hung: channel %d", channel));
1781 	MUTEX_ENTER(&tx_ring_p->lock);
1782 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1783 
1784 	tail_index = tx_ring_p->wr_index;
1785 	tail_wrap = tx_ring_p->wr_index_wrap;
1786 	tx_rd_index = tx_ring_p->rd_index;
1787 	MUTEX_EXIT(&tx_ring_p->lock);
1788 
1789 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1790 	    "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1791 	    "tail_index %d tail_wrap %d ",
1792 	    channel, tx_rd_index, tail_index, tail_wrap));
1793 	/*
1794 	 * Read the hardware maintained transmit head
1795 	 * and wrap around bit.
1796 	 */
1797 	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1798 	head_index =  tx_head.bits.ldw.head;
1799 	head_wrap = tx_head.bits.ldw.wrap;
1800 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1801 	    "==> nxge_txdma_channel_hung: "
1802 	    "tx_rd_index %d tail %d tail_wrap %d "
1803 	    "head %d wrap %d",
1804 	    tx_rd_index, tail_index, tail_wrap,
1805 	    head_index, head_wrap));
1806 
1807 	if (TXDMA_RING_EMPTY(head_index, head_wrap,
1808 	    tail_index, tail_wrap) &&
1809 	    (head_index == tx_rd_index)) {
1810 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1811 		    "==> nxge_txdma_channel_hung: EMPTY"));
1812 		return (B_FALSE);
1813 	}
1814 
1815 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1816 	    "==> nxge_txdma_channel_hung: Checking if ring full"));
1817 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1818 	    tail_wrap)) {
1819 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1820 		    "==> nxge_txdma_channel_hung: full"));
1821 		return (B_TRUE);
1822 	}
1823 
1824 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1825 
1826 	return (B_FALSE);
1827 }
1828 
1829 /*
1830  * nxge_fixup_hung_txdma_rings
1831  *
1832  *	Disable a TDC.
1833  *
1834  * Arguments:
1835  * 	nxgep
1836  * 	channel		The channel to reset.
1837  * 	reg_data	The current TX_CS.
1838  *
1839  * Notes:
1840  *	Called by nxge_check_tx_hang()
1841  *
1842  * NPI/NXGE function calls:
1843  *	npi_txdma_ring_head_get()
1844  *
1845  * Registers accessed:
1846  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1847  *
1848  * Context:
1849  *	Any domain
1850  */
1851 /*ARGSUSED*/
1852 void
1853 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1854 {
1855 	nxge_grp_set_t *set = &nxgep->tx_set;
1856 	int tdc;
1857 
1858 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1859 
1860 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1861 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1862 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1863 		return;
1864 	}
1865 
1866 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1867 		if ((1 << tdc) & set->owned.map) {
1868 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1869 			if (ring) {
1870 				nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1871 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1872 				    "==> nxge_fixup_hung_txdma_rings: TDC %d",
1873 				    tdc));
1874 			}
1875 		}
1876 	}
1877 
1878 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1879 }
1880 
1881 /*
1882  * nxge_txdma_fixup_hung_channel
1883  *
1884  *	'Fix' a hung TDC.
1885  *
1886  * Arguments:
1887  * 	nxgep
1888  * 	channel		The channel to fix.
1889  *
1890  * Notes:
1891  *	Called by nxge_fixup_hung_txdma_rings()
1892  *
1893  *	1. Reclaim the TDC.
1894  *	2. Disable the TDC.
1895  *
1896  * NPI/NXGE function calls:
1897  *	nxge_txdma_reclaim()
1898  *	npi_txdma_channel_disable(TX_CS)
1899  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1900  *
1901  * Registers accessed:
1902  *	TX_CS		DMC+0x40028 Transmit Control And Status
1903  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1904  *
1905  * Context:
1906  *	Any domain
1907  */
1908 /*ARGSUSED*/
1909 void
1910 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1911 {
1912 	p_tx_ring_t	ring_p;
1913 
1914 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1915 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1916 	if (ring_p == NULL) {
1917 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1918 		    "<== nxge_txdma_fix_hung_channel"));
1919 		return;
1920 	}
1921 
1922 	if (ring_p->tdc != channel) {
1923 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1924 		    "<== nxge_txdma_fix_hung_channel: channel not matched "
1925 		    "ring tdc %d passed channel",
1926 		    ring_p->tdc, channel));
1927 		return;
1928 	}
1929 
1930 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1931 
1932 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1933 }
1934 
1935 /*ARGSUSED*/
1936 void
1937 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1938 	uint16_t channel)
1939 {
1940 	npi_handle_t		handle;
1941 	tdmc_intr_dbg_t		intr_dbg;
1942 	int			status = NXGE_OK;
1943 
1944 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1945 
1946 	if (ring_p == NULL) {
1947 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1948 		    "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1949 		return;
1950 	}
1951 
1952 	if (ring_p->tdc != channel) {
1953 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1954 		    "<== nxge_txdma_fixup_hung_channel: channel "
1955 		    "not matched "
1956 		    "ring tdc %d passed channel",
1957 		    ring_p->tdc, channel));
1958 		return;
1959 	}
1960 
1961 	/* Reclaim descriptors */
1962 	MUTEX_ENTER(&ring_p->lock);
1963 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1964 	MUTEX_EXIT(&ring_p->lock);
1965 
1966 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1967 	/*
1968 	 * Stop the dma channel waits for the stop done.
1969 	 * If the stop done bit is not set, then force
1970 	 * an error.
1971 	 */
1972 	status = npi_txdma_channel_disable(handle, channel);
1973 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
1974 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1975 		    "<== nxge_txdma_fixup_hung_channel: stopped OK "
1976 		    "ring tdc %d passed channel %d",
1977 		    ring_p->tdc, channel));
1978 		return;
1979 	}
1980 
1981 	/* Inject any error */
1982 	intr_dbg.value = 0;
1983 	intr_dbg.bits.ldw.nack_pref = 1;
1984 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1985 
1986 	/* Stop done bit will be set as a result of error injection */
1987 	status = npi_txdma_channel_disable(handle, channel);
1988 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
1989 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1990 		    "<== nxge_txdma_fixup_hung_channel: stopped again"
1991 		    "ring tdc %d passed channel",
1992 		    ring_p->tdc, channel));
1993 		return;
1994 	}
1995 
1996 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1997 	    "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
1998 	    "ring tdc %d passed channel",
1999 	    ring_p->tdc, channel));
2000 
2001 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
2002 }
2003 
2004 /*ARGSUSED*/
2005 void
2006 nxge_reclaim_rings(p_nxge_t nxgep)
2007 {
2008 	nxge_grp_set_t *set = &nxgep->tx_set;
2009 	int tdc;
2010 
2011 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
2012 
2013 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2014 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2015 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2016 		return;
2017 	}
2018 
2019 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2020 		if ((1 << tdc) & set->owned.map) {
2021 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2022 			if (ring) {
2023 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
2024 				    "==> nxge_reclaim_rings: TDC %d", tdc));
2025 				MUTEX_ENTER(&ring->lock);
2026 				(void) nxge_txdma_reclaim(nxgep, ring, 0);
2027 				MUTEX_EXIT(&ring->lock);
2028 			}
2029 		}
2030 	}
2031 
2032 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
2033 }
2034 
2035 void
2036 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2037 {
2038 	nxge_grp_set_t *set = &nxgep->tx_set;
2039 	npi_handle_t handle;
2040 	int tdc;
2041 
2042 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2043 
2044 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2045 
2046 	if (!isLDOMguest(nxgep)) {
2047 		(void) npi_txdma_dump_fzc_regs(handle);
2048 
2049 		/* Dump TXC registers. */
2050 		(void) npi_txc_dump_fzc_regs(handle);
2051 		(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2052 	}
2053 
2054 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2055 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2056 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2057 		return;
2058 	}
2059 
2060 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2061 		if ((1 << tdc) & set->owned.map) {
2062 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2063 			if (ring) {
2064 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
2065 				    "==> nxge_txdma_regs_dump_channels: "
2066 				    "TDC %d", tdc));
2067 				(void) npi_txdma_dump_tdc_regs(handle, tdc);
2068 
2069 				/* Dump TXC registers, if able to. */
2070 				if (!isLDOMguest(nxgep)) {
2071 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
2072 					    "==> nxge_txdma_regs_dump_channels:"
2073 					    " FZC TDC %d", tdc));
2074 					(void) npi_txc_dump_tdc_fzc_regs
2075 					    (handle, tdc);
2076 				}
2077 				nxge_txdma_regs_dump(nxgep, tdc);
2078 			}
2079 		}
2080 	}
2081 
2082 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2083 }
2084 
2085 void
2086 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2087 {
2088 	npi_handle_t		handle;
2089 	tx_ring_hdl_t 		hdl;
2090 	tx_ring_kick_t 		kick;
2091 	tx_cs_t 		cs;
2092 	txc_control_t		control;
2093 	uint32_t		bitmap = 0;
2094 	uint32_t		burst = 0;
2095 	uint32_t		bytes = 0;
2096 	dma_log_page_t		cfg;
2097 
2098 	printf("\n\tfunc # %d tdc %d ",
2099 	    nxgep->function_num, channel);
2100 	cfg.page_num = 0;
2101 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2102 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
2103 	printf("\n\tlog page func %d valid page 0 %d",
2104 	    cfg.func_num, cfg.valid);
2105 	cfg.page_num = 1;
2106 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
2107 	printf("\n\tlog page func %d valid page 1 %d",
2108 	    cfg.func_num, cfg.valid);
2109 
2110 	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
2111 	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2112 	printf("\n\thead value is 0x%0llx",
2113 	    (long long)hdl.value);
2114 	printf("\n\thead index %d", hdl.bits.ldw.head);
2115 	printf("\n\tkick value is 0x%0llx",
2116 	    (long long)kick.value);
2117 	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2118 
2119 	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2120 	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2121 	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2122 
2123 	(void) npi_txc_control(handle, OP_GET, &control);
2124 	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2125 	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2126 	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2127 
2128 	printf("\n\tTXC port control 0x%0llx",
2129 	    (long long)control.value);
2130 	printf("\n\tTXC port bitmap 0x%x", bitmap);
2131 	printf("\n\tTXC max burst %d", burst);
2132 	printf("\n\tTXC bytes xmt %d\n", bytes);
2133 
2134 	{
2135 		ipp_status_t status;
2136 
2137 		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2138 #if defined(__i386)
2139 		printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2140 #else
2141 		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2142 #endif
2143 	}
2144 }
2145 
2146 /*
2147  * nxge_tdc_hvio_setup
2148  *
2149  *	I'm not exactly sure what this code does.
2150  *
2151  * Arguments:
2152  * 	nxgep
2153  * 	channel	The channel to map.
2154  *
2155  * Notes:
2156  *
2157  * NPI/NXGE function calls:
2158  *	na
2159  *
2160  * Context:
2161  *	Service domain?
2162  */
2163 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2164 static void
2165 nxge_tdc_hvio_setup(
2166 	nxge_t *nxgep, int channel)
2167 {
2168 	nxge_dma_common_t	*data;
2169 	nxge_dma_common_t	*control;
2170 	tx_ring_t 		*ring;
2171 
2172 	ring = nxgep->tx_rings->rings[channel];
2173 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2174 
2175 	ring->hv_set = B_FALSE;
2176 
2177 	ring->hv_tx_buf_base_ioaddr_pp =
2178 	    (uint64_t)data->orig_ioaddr_pp;
2179 	ring->hv_tx_buf_ioaddr_size =
2180 	    (uint64_t)data->orig_alength;
2181 
2182 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2183 	    "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2184 	    "orig vatopa base io $%p orig_len 0x%llx (%d)",
2185 	    ring->hv_tx_buf_base_ioaddr_pp,
2186 	    ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2187 	    data->ioaddr_pp, data->orig_vatopa,
2188 	    data->orig_alength, data->orig_alength));
2189 
2190 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2191 
2192 	ring->hv_tx_cntl_base_ioaddr_pp =
2193 	    (uint64_t)control->orig_ioaddr_pp;
2194 	ring->hv_tx_cntl_ioaddr_size =
2195 	    (uint64_t)control->orig_alength;
2196 
2197 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2198 	    "hv cntl base io $%p orig ioaddr_pp ($%p) "
2199 	    "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2200 	    ring->hv_tx_cntl_base_ioaddr_pp,
2201 	    control->orig_ioaddr_pp, control->orig_vatopa,
2202 	    ring->hv_tx_cntl_ioaddr_size,
2203 	    control->orig_alength, control->orig_alength));
2204 }
2205 #endif
2206 
2207 static nxge_status_t
2208 nxge_map_txdma(p_nxge_t nxgep, int channel)
2209 {
2210 	nxge_dma_common_t	**pData;
2211 	nxge_dma_common_t	**pControl;
2212 	tx_ring_t 		**pRing, *ring;
2213 	tx_mbox_t		**mailbox;
2214 	uint32_t		num_chunks;
2215 
2216 	nxge_status_t		status = NXGE_OK;
2217 
2218 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2219 
2220 	if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2221 		if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2222 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2223 			    "<== nxge_map_txdma: buf not allocated"));
2224 			return (NXGE_ERROR);
2225 		}
2226 	}
2227 
2228 	if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2229 		return (NXGE_ERROR);
2230 
2231 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2232 	pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2233 	pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2234 	pRing = &nxgep->tx_rings->rings[channel];
2235 	mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2236 
2237 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2238 	    "tx_rings $%p tx_desc_rings $%p",
2239 	    nxgep->tx_rings, nxgep->tx_rings->rings));
2240 
2241 	/*
2242 	 * Map descriptors from the buffer pools for <channel>.
2243 	 */
2244 
2245 	/*
2246 	 * Set up and prepare buffer blocks, descriptors
2247 	 * and mailbox.
2248 	 */
2249 	status = nxge_map_txdma_channel(nxgep, channel,
2250 	    pData, pRing, num_chunks, pControl, mailbox);
2251 	if (status != NXGE_OK) {
2252 		NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2253 		    "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2254 		    "returned 0x%x",
2255 		    nxgep, channel, status));
2256 		return (status);
2257 	}
2258 
2259 	ring = *pRing;
2260 
2261 	ring->index = (uint16_t)channel;
2262 	ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2263 
2264 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2265 	if (isLDOMguest(nxgep)) {
2266 		(void) nxge_tdc_lp_conf(nxgep, channel);
2267 	} else {
2268 		nxge_tdc_hvio_setup(nxgep, channel);
2269 	}
2270 #endif
2271 
2272 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2273 	    "(status 0x%x channel %d)", status, channel));
2274 
2275 	return (status);
2276 }
2277 
2278 static nxge_status_t
2279 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2280 	p_nxge_dma_common_t *dma_buf_p,
2281 	p_tx_ring_t *tx_desc_p,
2282 	uint32_t num_chunks,
2283 	p_nxge_dma_common_t *dma_cntl_p,
2284 	p_tx_mbox_t *tx_mbox_p)
2285 {
2286 	int	status = NXGE_OK;
2287 
2288 	/*
2289 	 * Set up and prepare buffer blocks, descriptors
2290 	 * and mailbox.
2291 	 */
2292 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2293 	    "==> nxge_map_txdma_channel (channel %d)", channel));
2294 	/*
2295 	 * Transmit buffer blocks
2296 	 */
2297 	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2298 	    dma_buf_p, tx_desc_p, num_chunks);
2299 	if (status != NXGE_OK) {
2300 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2301 		    "==> nxge_map_txdma_channel (channel %d): "
2302 		    "map buffer failed 0x%x", channel, status));
2303 		goto nxge_map_txdma_channel_exit;
2304 	}
2305 
2306 	/*
2307 	 * Transmit block ring, and mailbox.
2308 	 */
2309 	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2310 	    tx_mbox_p);
2311 
2312 	goto nxge_map_txdma_channel_exit;
2313 
2314 nxge_map_txdma_channel_fail1:
2315 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2316 	    "==> nxge_map_txdma_channel: unmap buf"
2317 	    "(status 0x%x channel %d)",
2318 	    status, channel));
2319 	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2320 
2321 nxge_map_txdma_channel_exit:
2322 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2323 	    "<== nxge_map_txdma_channel: "
2324 	    "(status 0x%x channel %d)",
2325 	    status, channel));
2326 
2327 	return (status);
2328 }
2329 
2330 /*ARGSUSED*/
2331 static void
2332 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2333 {
2334 	tx_ring_t *ring;
2335 	tx_mbox_t *mailbox;
2336 
2337 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2338 	    "==> nxge_unmap_txdma_channel (channel %d)", channel));
2339 	/*
2340 	 * unmap tx block ring, and mailbox.
2341 	 */
2342 	ring = nxgep->tx_rings->rings[channel];
2343 	mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2344 
2345 	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2346 
2347 	/* unmap buffer blocks */
2348 	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2349 
2350 	nxge_free_txb(nxgep, channel);
2351 
2352 	/*
2353 	 * Cleanup the reference to the ring now that it does not exist.
2354 	 */
2355 	nxgep->tx_rings->rings[channel] = NULL;
2356 
2357 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2358 }
2359 
2360 /*
2361  * nxge_map_txdma_channel_cfg_ring
2362  *
2363  *	Map a TDC into our kernel space.
2364  *	This function allocates all of the per-channel data structures.
2365  *
2366  * Arguments:
2367  * 	nxgep
2368  * 	dma_channel	The channel to map.
2369  *	dma_cntl_p
2370  *	tx_ring_p	dma_channel's transmit ring
2371  *	tx_mbox_p	dma_channel's mailbox
2372  *
2373  * Notes:
2374  *
2375  * NPI/NXGE function calls:
2376  *	nxge_setup_dma_common()
2377  *
2378  * Registers accessed:
2379  *	none.
2380  *
2381  * Context:
2382  *	Any domain
2383  */
2384 /*ARGSUSED*/
2385 static void
2386 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2387 	p_nxge_dma_common_t *dma_cntl_p,
2388 	p_tx_ring_t tx_ring_p,
2389 	p_tx_mbox_t *tx_mbox_p)
2390 {
2391 	p_tx_mbox_t 		mboxp;
2392 	p_nxge_dma_common_t 	cntl_dmap;
2393 	p_nxge_dma_common_t 	dmap;
2394 	p_tx_rng_cfig_t		tx_ring_cfig_p;
2395 	p_tx_ring_kick_t	tx_ring_kick_p;
2396 	p_tx_cs_t		tx_cs_p;
2397 	p_tx_dma_ent_msk_t	tx_evmask_p;
2398 	p_txdma_mbh_t		mboxh_p;
2399 	p_txdma_mbl_t		mboxl_p;
2400 	uint64_t		tx_desc_len;
2401 
2402 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2403 	    "==> nxge_map_txdma_channel_cfg_ring"));
2404 
2405 	cntl_dmap = *dma_cntl_p;
2406 
2407 	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2408 	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2409 	    sizeof (tx_desc_t));
2410 	/*
2411 	 * Zero out transmit ring descriptors.
2412 	 */
2413 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2414 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2415 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2416 	tx_cs_p = &(tx_ring_p->tx_cs);
2417 	tx_evmask_p = &(tx_ring_p->tx_evmask);
2418 	tx_ring_cfig_p->value = 0;
2419 	tx_ring_kick_p->value = 0;
2420 	tx_cs_p->value = 0;
2421 	tx_evmask_p->value = 0;
2422 
2423 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2424 	    "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2425 	    dma_channel,
2426 	    dmap->dma_cookie.dmac_laddress));
2427 
2428 	tx_ring_cfig_p->value = 0;
2429 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2430 	tx_ring_cfig_p->value =
2431 	    (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2432 	    (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2433 
2434 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2435 	    "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2436 	    dma_channel,
2437 	    tx_ring_cfig_p->value));
2438 
2439 	tx_cs_p->bits.ldw.rst = 1;
2440 
2441 	/* Map in mailbox */
2442 	mboxp = (p_tx_mbox_t)
2443 	    KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2444 	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2445 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2446 	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2447 	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2448 	mboxh_p->value = mboxl_p->value = 0;
2449 
2450 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2451 	    "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2452 	    dmap->dma_cookie.dmac_laddress));
2453 
2454 	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2455 	    TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2456 
2457 	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2458 	    TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2459 
2460 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2461 	    "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2462 	    dmap->dma_cookie.dmac_laddress));
2463 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2464 	    "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2465 	    "mbox $%p",
2466 	    mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2467 	tx_ring_p->page_valid.value = 0;
2468 	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2469 	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2470 	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2471 	tx_ring_p->page_hdl.value = 0;
2472 
2473 	tx_ring_p->page_valid.bits.ldw.page0 = 1;
2474 	tx_ring_p->page_valid.bits.ldw.page1 = 1;
2475 
2476 	tx_ring_p->max_burst.value = 0;
2477 	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2478 
2479 	*tx_mbox_p = mboxp;
2480 
2481 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2482 	    "<== nxge_map_txdma_channel_cfg_ring"));
2483 }
2484 
2485 /*ARGSUSED*/
2486 static void
2487 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2488 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2489 {
2490 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2491 	    "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2492 	    tx_ring_p->tdc));
2493 
2494 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2495 
2496 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2497 	    "<== nxge_unmap_txdma_channel_cfg_ring"));
2498 }
2499 
2500 /*
2501  * nxge_map_txdma_channel_buf_ring
2502  *
2503  *
2504  * Arguments:
2505  * 	nxgep
2506  * 	channel		The channel to map.
2507  *	dma_buf_p
2508  *	tx_desc_p	channel's descriptor ring
2509  *	num_chunks
2510  *
2511  * Notes:
2512  *
2513  * NPI/NXGE function calls:
2514  *	nxge_setup_dma_common()
2515  *
2516  * Registers accessed:
2517  *	none.
2518  *
2519  * Context:
2520  *	Any domain
2521  */
2522 static nxge_status_t
2523 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2524 	p_nxge_dma_common_t *dma_buf_p,
2525 	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2526 {
2527 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
2528 	p_nxge_dma_common_t 	dmap;
2529 	nxge_os_dma_handle_t	tx_buf_dma_handle;
2530 	p_tx_ring_t 		tx_ring_p;
2531 	p_tx_msg_t 		tx_msg_ring;
2532 	nxge_status_t		status = NXGE_OK;
2533 	int			ddi_status = DDI_SUCCESS;
2534 	int			i, j, index;
2535 	uint32_t		size, bsize;
2536 	uint32_t 		nblocks, nmsgs;
2537 	char			qname[TASKQ_NAMELEN];
2538 
2539 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2540 	    "==> nxge_map_txdma_channel_buf_ring"));
2541 
2542 	dma_bufp = tmp_bufp = *dma_buf_p;
2543 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2544 		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2545 		"chunks bufp $%p",
2546 		    channel, num_chunks, dma_bufp));
2547 
2548 	nmsgs = 0;
2549 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2550 		nmsgs += tmp_bufp->nblocks;
2551 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2552 		    "==> nxge_map_txdma_channel_buf_ring: channel %d "
2553 		    "bufp $%p nblocks %d nmsgs %d",
2554 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2555 	}
2556 	if (!nmsgs) {
2557 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2558 		    "<== nxge_map_txdma_channel_buf_ring: channel %d "
2559 		    "no msg blocks",
2560 		    channel));
2561 		status = NXGE_ERROR;
2562 		goto nxge_map_txdma_channel_buf_ring_exit;
2563 	}
2564 
2565 	tx_ring_p = (p_tx_ring_t)
2566 	    KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2567 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2568 	    (void *)nxgep->interrupt_cookie);
2569 
2570 	(void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2571 	tx_ring_p->tx_ring_busy = B_FALSE;
2572 	tx_ring_p->nxgep = nxgep;
2573 	tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL;
2574 	(void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d",
2575 	    nxgep->instance, channel);
2576 	tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1,
2577 	    TASKQ_DEFAULTPRI, 0);
2578 	if (tx_ring_p->taskq == NULL) {
2579 		goto nxge_map_txdma_channel_buf_ring_fail1;
2580 	}
2581 
2582 	/*
2583 	 * Allocate transmit message rings and handles for packets
2584 	 * not to be copied to premapped buffers.
2585 	 */
2586 	size = nmsgs * sizeof (tx_msg_t);
2587 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2588 	for (i = 0; i < nmsgs; i++) {
2589 		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2590 		    DDI_DMA_DONTWAIT, 0,
2591 		    &tx_msg_ring[i].dma_handle);
2592 		if (ddi_status != DDI_SUCCESS) {
2593 			status |= NXGE_DDI_FAILED;
2594 			break;
2595 		}
2596 	}
2597 	if (i < nmsgs) {
2598 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2599 		    "Allocate handles failed."));
2600 		goto nxge_map_txdma_channel_buf_ring_fail1;
2601 	}
2602 
2603 	tx_ring_p->tdc = channel;
2604 	tx_ring_p->tx_msg_ring = tx_msg_ring;
2605 	tx_ring_p->tx_ring_size = nmsgs;
2606 	tx_ring_p->num_chunks = num_chunks;
2607 	if (!nxge_tx_intr_thres) {
2608 		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2609 	}
2610 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2611 	tx_ring_p->rd_index = 0;
2612 	tx_ring_p->wr_index = 0;
2613 	tx_ring_p->ring_head.value = 0;
2614 	tx_ring_p->ring_kick_tail.value = 0;
2615 	tx_ring_p->descs_pending = 0;
2616 
2617 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2618 	    "==> nxge_map_txdma_channel_buf_ring: channel %d "
2619 	    "actual tx desc max %d nmsgs %d "
2620 	    "(config nxge_tx_ring_size %d)",
2621 	    channel, tx_ring_p->tx_ring_size, nmsgs,
2622 	    nxge_tx_ring_size));
2623 
2624 	/*
2625 	 * Map in buffers from the buffer pool.
2626 	 */
2627 	index = 0;
2628 	bsize = dma_bufp->block_size;
2629 
2630 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2631 	    "dma_bufp $%p tx_rng_p $%p "
2632 	    "tx_msg_rng_p $%p bsize %d",
2633 	    dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2634 
2635 	tx_buf_dma_handle = dma_bufp->dma_handle;
2636 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
2637 		bsize = dma_bufp->block_size;
2638 		nblocks = dma_bufp->nblocks;
2639 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2640 		    "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2641 		    "size %d dma_bufp $%p",
2642 		    i, sizeof (nxge_dma_common_t), dma_bufp));
2643 
2644 		for (j = 0; j < nblocks; j++) {
2645 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2646 			dmap = &tx_msg_ring[index++].buf_dma;
2647 #ifdef TX_MEM_DEBUG
2648 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2649 			    "==> nxge_map_txdma_channel_buf_ring: j %d"
2650 			    "dmap $%p", i, dmap));
2651 #endif
2652 			nxge_setup_dma_common(dmap, dma_bufp, 1,
2653 			    bsize);
2654 		}
2655 	}
2656 
2657 	if (i < num_chunks) {
2658 		status = NXGE_ERROR;
2659 		goto nxge_map_txdma_channel_buf_ring_fail1;
2660 	}
2661 
2662 	*tx_desc_p = tx_ring_p;
2663 
2664 	goto nxge_map_txdma_channel_buf_ring_exit;
2665 
2666 nxge_map_txdma_channel_buf_ring_fail1:
2667 	if (tx_ring_p->taskq) {
2668 		ddi_taskq_destroy(tx_ring_p->taskq);
2669 		tx_ring_p->taskq = NULL;
2670 	}
2671 
2672 	index--;
2673 	for (; index >= 0; index--) {
2674 		if (tx_msg_ring[index].dma_handle != NULL) {
2675 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2676 		}
2677 	}
2678 	MUTEX_DESTROY(&tx_ring_p->lock);
2679 	KMEM_FREE(tx_msg_ring, size);
2680 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2681 
2682 	status = NXGE_ERROR;
2683 
2684 nxge_map_txdma_channel_buf_ring_exit:
2685 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2686 	    "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2687 
2688 	return (status);
2689 }
2690 
2691 /*ARGSUSED*/
2692 static void
2693 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2694 {
2695 	p_tx_msg_t 		tx_msg_ring;
2696 	p_tx_msg_t 		tx_msg_p;
2697 	int			i;
2698 
2699 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2700 	    "==> nxge_unmap_txdma_channel_buf_ring"));
2701 	if (tx_ring_p == NULL) {
2702 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2703 		    "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2704 		return;
2705 	}
2706 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2707 	    "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2708 	    tx_ring_p->tdc));
2709 
2710 	tx_msg_ring = tx_ring_p->tx_msg_ring;
2711 
2712 	/*
2713 	 * Since the serialization thread, timer thread and
2714 	 * interrupt thread can all call the transmit reclaim,
2715 	 * the unmapping function needs to acquire the lock
2716 	 * to free those buffers which were transmitted
2717 	 * by the hardware already.
2718 	 */
2719 	MUTEX_ENTER(&tx_ring_p->lock);
2720 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
2721 	    "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2722 	    "channel %d",
2723 	    tx_ring_p->tdc));
2724 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2725 
2726 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2727 		tx_msg_p = &tx_msg_ring[i];
2728 		if (tx_msg_p->tx_message != NULL) {
2729 			freemsg(tx_msg_p->tx_message);
2730 			tx_msg_p->tx_message = NULL;
2731 		}
2732 	}
2733 
2734 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2735 		if (tx_msg_ring[i].dma_handle != NULL) {
2736 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2737 		}
2738 		tx_msg_ring[i].dma_handle = NULL;
2739 	}
2740 
2741 	MUTEX_EXIT(&tx_ring_p->lock);
2742 
2743 	if (tx_ring_p->taskq) {
2744 		ddi_taskq_destroy(tx_ring_p->taskq);
2745 		tx_ring_p->taskq = NULL;
2746 	}
2747 
2748 	MUTEX_DESTROY(&tx_ring_p->lock);
2749 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2750 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2751 
2752 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2753 	    "<== nxge_unmap_txdma_channel_buf_ring"));
2754 }
2755 
2756 static nxge_status_t
2757 nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2758 {
2759 	p_tx_rings_t 		tx_rings;
2760 	p_tx_ring_t 		*tx_desc_rings;
2761 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
2762 	p_tx_mbox_t		*tx_mbox_p;
2763 	nxge_status_t		status = NXGE_OK;
2764 
2765 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2766 
2767 	tx_rings = nxgep->tx_rings;
2768 	if (tx_rings == NULL) {
2769 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2770 		    "<== nxge_txdma_hw_start: NULL ring pointer"));
2771 		return (NXGE_ERROR);
2772 	}
2773 	tx_desc_rings = tx_rings->rings;
2774 	if (tx_desc_rings == NULL) {
2775 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2776 		    "<== nxge_txdma_hw_start: NULL ring pointers"));
2777 		return (NXGE_ERROR);
2778 	}
2779 
2780 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2781 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2782 
2783 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2784 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2785 
2786 	status = nxge_txdma_start_channel(nxgep, channel,
2787 	    (p_tx_ring_t)tx_desc_rings[channel],
2788 	    (p_tx_mbox_t)tx_mbox_p[channel]);
2789 	if (status != NXGE_OK) {
2790 		goto nxge_txdma_hw_start_fail1;
2791 	}
2792 
2793 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2794 	    "tx_rings $%p rings $%p",
2795 	    nxgep->tx_rings, nxgep->tx_rings->rings));
2796 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2797 	    "tx_rings $%p tx_desc_rings $%p",
2798 	    nxgep->tx_rings, tx_desc_rings));
2799 
2800 	goto nxge_txdma_hw_start_exit;
2801 
2802 nxge_txdma_hw_start_fail1:
2803 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2804 	    "==> nxge_txdma_hw_start: disable "
2805 	    "(status 0x%x channel %d)", status, channel));
2806 
2807 nxge_txdma_hw_start_exit:
2808 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2809 	    "==> nxge_txdma_hw_start: (status 0x%x)", status));
2810 
2811 	return (status);
2812 }
2813 
2814 /*
2815  * nxge_txdma_start_channel
2816  *
2817  *	Start a TDC.
2818  *
2819  * Arguments:
2820  * 	nxgep
2821  * 	channel		The channel to start.
2822  * 	tx_ring_p	channel's transmit descriptor ring.
2823  * 	tx_mbox_p	channel' smailbox.
2824  *
2825  * Notes:
2826  *
2827  * NPI/NXGE function calls:
2828  *	nxge_reset_txdma_channel()
2829  *	nxge_init_txdma_channel_event_mask()
2830  *	nxge_enable_txdma_channel()
2831  *
2832  * Registers accessed:
2833  *	none directly (see functions above).
2834  *
2835  * Context:
2836  *	Any domain
2837  */
2838 static nxge_status_t
2839 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2840     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2841 
2842 {
2843 	nxge_status_t		status = NXGE_OK;
2844 
2845 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2846 		"==> nxge_txdma_start_channel (channel %d)", channel));
2847 	/*
2848 	 * TXDMA/TXC must be in stopped state.
2849 	 */
2850 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
2851 
2852 	/*
2853 	 * Reset TXDMA channel
2854 	 */
2855 	tx_ring_p->tx_cs.value = 0;
2856 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
2857 	status = nxge_reset_txdma_channel(nxgep, channel,
2858 			tx_ring_p->tx_cs.value);
2859 	if (status != NXGE_OK) {
2860 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2861 			"==> nxge_txdma_start_channel (channel %d)"
2862 			" reset channel failed 0x%x", channel, status));
2863 		goto nxge_txdma_start_channel_exit;
2864 	}
2865 
2866 	/*
2867 	 * Initialize the TXDMA channel specific FZC control
2868 	 * configurations. These FZC registers are pertaining
2869 	 * to each TX channel (i.e. logical pages).
2870 	 */
2871 	if (!isLDOMguest(nxgep)) {
2872 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
2873 		    tx_ring_p, tx_mbox_p);
2874 		if (status != NXGE_OK) {
2875 			goto nxge_txdma_start_channel_exit;
2876 		}
2877 	}
2878 
2879 	/*
2880 	 * Initialize the event masks.
2881 	 */
2882 	tx_ring_p->tx_evmask.value = 0;
2883 	status = nxge_init_txdma_channel_event_mask(nxgep,
2884 	    channel, &tx_ring_p->tx_evmask);
2885 	if (status != NXGE_OK) {
2886 		goto nxge_txdma_start_channel_exit;
2887 	}
2888 
2889 	/*
2890 	 * Load TXDMA descriptors, buffers, mailbox,
2891 	 * initialise the DMA channels and
2892 	 * enable each DMA channel.
2893 	 */
2894 	status = nxge_enable_txdma_channel(nxgep, channel,
2895 			tx_ring_p, tx_mbox_p);
2896 	if (status != NXGE_OK) {
2897 		goto nxge_txdma_start_channel_exit;
2898 	}
2899 
2900 nxge_txdma_start_channel_exit:
2901 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2902 
2903 	return (status);
2904 }
2905 
2906 /*
2907  * nxge_txdma_stop_channel
2908  *
2909  *	Stop a TDC.
2910  *
2911  * Arguments:
2912  * 	nxgep
2913  * 	channel		The channel to stop.
2914  * 	tx_ring_p	channel's transmit descriptor ring.
2915  * 	tx_mbox_p	channel' smailbox.
2916  *
2917  * Notes:
2918  *
2919  * NPI/NXGE function calls:
2920  *	nxge_txdma_stop_inj_err()
2921  *	nxge_reset_txdma_channel()
2922  *	nxge_init_txdma_channel_event_mask()
2923  *	nxge_init_txdma_channel_cntl_stat()
2924  *	nxge_disable_txdma_channel()
2925  *
2926  * Registers accessed:
2927  *	none directly (see functions above).
2928  *
2929  * Context:
2930  *	Any domain
2931  */
2932 /*ARGSUSED*/
2933 static nxge_status_t
2934 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2935 {
2936 	p_tx_ring_t tx_ring_p;
2937 	int status = NXGE_OK;
2938 
2939 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2940 	    "==> nxge_txdma_stop_channel: channel %d", channel));
2941 
2942 	/*
2943 	 * Stop (disable) TXDMA and TXC (if stop bit is set
2944 	 * and STOP_N_GO bit not set, the TXDMA reset state will
2945 	 * not be set if reset TXDMA.
2946 	 */
2947 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
2948 
2949 	tx_ring_p = nxgep->tx_rings->rings[channel];
2950 
2951 	/*
2952 	 * Reset TXDMA channel
2953 	 */
2954 	tx_ring_p->tx_cs.value = 0;
2955 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
2956 	status = nxge_reset_txdma_channel(nxgep, channel,
2957 	    tx_ring_p->tx_cs.value);
2958 	if (status != NXGE_OK) {
2959 		goto nxge_txdma_stop_channel_exit;
2960 	}
2961 
2962 #ifdef HARDWARE_REQUIRED
2963 	/* Set up the interrupt event masks. */
2964 	tx_ring_p->tx_evmask.value = 0;
2965 	status = nxge_init_txdma_channel_event_mask(nxgep,
2966 	    channel, &tx_ring_p->tx_evmask);
2967 	if (status != NXGE_OK) {
2968 		goto nxge_txdma_stop_channel_exit;
2969 	}
2970 
2971 	/* Initialize the DMA control and status register */
2972 	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2973 	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2974 	    tx_ring_p->tx_cs.value);
2975 	if (status != NXGE_OK) {
2976 		goto nxge_txdma_stop_channel_exit;
2977 	}
2978 
2979 	tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2980 
2981 	/* Disable channel */
2982 	status = nxge_disable_txdma_channel(nxgep, channel,
2983 	    tx_ring_p, tx_mbox_p);
2984 	if (status != NXGE_OK) {
2985 		goto nxge_txdma_start_channel_exit;
2986 	}
2987 
2988 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2989 	    "==> nxge_txdma_stop_channel: event done"));
2990 
2991 #endif
2992 
2993 nxge_txdma_stop_channel_exit:
2994 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
2995 	return (status);
2996 }
2997 
2998 /*
2999  * nxge_txdma_get_ring
3000  *
3001  *	Get the ring for a TDC.
3002  *
3003  * Arguments:
3004  * 	nxgep
3005  * 	channel
3006  *
3007  * Notes:
3008  *
3009  * NPI/NXGE function calls:
3010  *
3011  * Registers accessed:
3012  *
3013  * Context:
3014  *	Any domain
3015  */
3016 static p_tx_ring_t
3017 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
3018 {
3019 	nxge_grp_set_t *set = &nxgep->tx_set;
3020 	int tdc;
3021 
3022 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
3023 
3024 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3025 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3026 		    "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
3027 		goto return_null;
3028 	}
3029 
3030 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3031 		if ((1 << tdc) & set->owned.map) {
3032 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3033 			if (ring) {
3034 				if (channel == ring->tdc) {
3035 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
3036 					    "<== nxge_txdma_get_ring: "
3037 					    "tdc %d ring $%p", tdc, ring));
3038 					return (ring);
3039 				}
3040 			}
3041 		}
3042 	}
3043 
3044 return_null:
3045 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
3046 	    "ring not found"));
3047 
3048 	return (NULL);
3049 }
3050 
3051 /*
3052  * nxge_txdma_get_mbox
3053  *
3054  *	Get the mailbox for a TDC.
3055  *
3056  * Arguments:
3057  * 	nxgep
3058  * 	channel
3059  *
3060  * Notes:
3061  *
3062  * NPI/NXGE function calls:
3063  *
3064  * Registers accessed:
3065  *
3066  * Context:
3067  *	Any domain
3068  */
3069 static p_tx_mbox_t
3070 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3071 {
3072 	nxge_grp_set_t *set = &nxgep->tx_set;
3073 	int tdc;
3074 
3075 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3076 
3077 	if (nxgep->tx_mbox_areas_p == 0 ||
3078 	    nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3079 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3080 		    "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3081 		goto return_null;
3082 	}
3083 
3084 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3085 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3086 		    "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3087 		goto return_null;
3088 	}
3089 
3090 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3091 		if ((1 << tdc) & set->owned.map) {
3092 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3093 			if (ring) {
3094 				if (channel == ring->tdc) {
3095 					tx_mbox_t *mailbox = nxgep->
3096 					    tx_mbox_areas_p->
3097 					    txmbox_areas_p[tdc];
3098 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
3099 					    "<== nxge_txdma_get_mbox: tdc %d "
3100 					    "ring $%p", tdc, mailbox));
3101 					return (mailbox);
3102 				}
3103 			}
3104 		}
3105 	}
3106 
3107 return_null:
3108 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3109 	    "mailbox not found"));
3110 
3111 	return (NULL);
3112 }
3113 
3114 /*
3115  * nxge_tx_err_evnts
3116  *
3117  *	Recover a TDC.
3118  *
3119  * Arguments:
3120  * 	nxgep
3121  * 	index	The index to the TDC ring.
3122  * 	ldvp	Used to get the channel number ONLY.
3123  * 	cs	A copy of the bits from TX_CS.
3124  *
3125  * Notes:
3126  *	Calling tree:
3127  *	 nxge_tx_intr()
3128  *
3129  * NPI/NXGE function calls:
3130  *	npi_txdma_ring_error_get()
3131  *	npi_txdma_inj_par_error_get()
3132  *	nxge_txdma_fatal_err_recover()
3133  *
3134  * Registers accessed:
3135  *	TX_RNG_ERR_LOGH	DMC+0x40048 Transmit Ring Error Log High
3136  *	TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3137  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3138  *
3139  * Context:
3140  *	Any domain	XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3141  */
3142 /*ARGSUSED*/
3143 static nxge_status_t
3144 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3145 {
3146 	npi_handle_t		handle;
3147 	npi_status_t		rs;
3148 	uint8_t			channel;
3149 	p_tx_ring_t 		*tx_rings;
3150 	p_tx_ring_t 		tx_ring_p;
3151 	p_nxge_tx_ring_stats_t	tdc_stats;
3152 	boolean_t		txchan_fatal = B_FALSE;
3153 	nxge_status_t		status = NXGE_OK;
3154 	tdmc_inj_par_err_t	par_err;
3155 	uint32_t		value;
3156 
3157 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3158 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3159 	channel = ldvp->channel;
3160 
3161 	tx_rings = nxgep->tx_rings->rings;
3162 	tx_ring_p = tx_rings[index];
3163 	tdc_stats = tx_ring_p->tdc_stats;
3164 	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3165 	    (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3166 	    (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3167 		if ((rs = npi_txdma_ring_error_get(handle, channel,
3168 		    &tdc_stats->errlog)) != NPI_SUCCESS)
3169 			return (NXGE_ERROR | rs);
3170 	}
3171 
3172 	if (cs.bits.ldw.mbox_err) {
3173 		tdc_stats->mbox_err++;
3174 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3175 		    NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3176 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3177 		    "==> nxge_tx_err_evnts(channel %d): "
3178 		    "fatal error: mailbox", channel));
3179 		txchan_fatal = B_TRUE;
3180 	}
3181 	if (cs.bits.ldw.pkt_size_err) {
3182 		tdc_stats->pkt_size_err++;
3183 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3184 		    NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3185 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3186 		    "==> nxge_tx_err_evnts(channel %d): "
3187 		    "fatal error: pkt_size_err", channel));
3188 		txchan_fatal = B_TRUE;
3189 	}
3190 	if (cs.bits.ldw.tx_ring_oflow) {
3191 		tdc_stats->tx_ring_oflow++;
3192 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3193 		    NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3194 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3195 		    "==> nxge_tx_err_evnts(channel %d): "
3196 		    "fatal error: tx_ring_oflow", channel));
3197 		txchan_fatal = B_TRUE;
3198 	}
3199 	if (cs.bits.ldw.pref_buf_par_err) {
3200 		tdc_stats->pre_buf_par_err++;
3201 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3202 		    NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3203 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3204 		    "==> nxge_tx_err_evnts(channel %d): "
3205 		    "fatal error: pre_buf_par_err", channel));
3206 		/* Clear error injection source for parity error */
3207 		(void) npi_txdma_inj_par_error_get(handle, &value);
3208 		par_err.value = value;
3209 		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3210 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3211 		txchan_fatal = B_TRUE;
3212 	}
3213 	if (cs.bits.ldw.nack_pref) {
3214 		tdc_stats->nack_pref++;
3215 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3216 		    NXGE_FM_EREPORT_TDMC_NACK_PREF);
3217 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3218 		    "==> nxge_tx_err_evnts(channel %d): "
3219 		    "fatal error: nack_pref", channel));
3220 		txchan_fatal = B_TRUE;
3221 	}
3222 	if (cs.bits.ldw.nack_pkt_rd) {
3223 		tdc_stats->nack_pkt_rd++;
3224 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3225 		    NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3226 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3227 		    "==> nxge_tx_err_evnts(channel %d): "
3228 		    "fatal error: nack_pkt_rd", channel));
3229 		txchan_fatal = B_TRUE;
3230 	}
3231 	if (cs.bits.ldw.conf_part_err) {
3232 		tdc_stats->conf_part_err++;
3233 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3234 		    NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3235 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3236 		    "==> nxge_tx_err_evnts(channel %d): "
3237 		    "fatal error: config_partition_err", channel));
3238 		txchan_fatal = B_TRUE;
3239 	}
3240 	if (cs.bits.ldw.pkt_prt_err) {
3241 		tdc_stats->pkt_part_err++;
3242 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3243 		    NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3244 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3245 		    "==> nxge_tx_err_evnts(channel %d): "
3246 		    "fatal error: pkt_prt_err", channel));
3247 		txchan_fatal = B_TRUE;
3248 	}
3249 
3250 	/* Clear error injection source in case this is an injected error */
3251 	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3252 
3253 	if (txchan_fatal) {
3254 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3255 		    " nxge_tx_err_evnts: "
3256 		    " fatal error on channel %d cs 0x%llx\n",
3257 		    channel, cs.value));
3258 		status = nxge_txdma_fatal_err_recover(nxgep, channel,
3259 		    tx_ring_p);
3260 		if (status == NXGE_OK) {
3261 			FM_SERVICE_RESTORED(nxgep);
3262 		}
3263 	}
3264 
3265 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3266 
3267 	return (status);
3268 }
3269 
3270 static nxge_status_t
3271 nxge_txdma_fatal_err_recover(
3272 	p_nxge_t nxgep,
3273 	uint16_t channel,
3274 	p_tx_ring_t tx_ring_p)
3275 {
3276 	npi_handle_t	handle;
3277 	npi_status_t	rs = NPI_SUCCESS;
3278 	p_tx_mbox_t	tx_mbox_p;
3279 	nxge_status_t	status = NXGE_OK;
3280 
3281 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3282 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3283 	    "Recovering from TxDMAChannel#%d error...", channel));
3284 
3285 	/*
3286 	 * Stop the dma channel waits for the stop done.
3287 	 * If the stop done bit is not set, then create
3288 	 * an error.
3289 	 */
3290 
3291 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3292 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3293 	MUTEX_ENTER(&tx_ring_p->lock);
3294 	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3295 	if (rs != NPI_SUCCESS) {
3296 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3297 		    "==> nxge_txdma_fatal_err_recover (channel %d): "
3298 		    "stop failed ", channel));
3299 		goto fail;
3300 	}
3301 
3302 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3303 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3304 
3305 	/*
3306 	 * Reset TXDMA channel
3307 	 */
3308 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3309 	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3310 	    NPI_SUCCESS) {
3311 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3312 		    "==> nxge_txdma_fatal_err_recover (channel %d)"
3313 		    " reset channel failed 0x%x", channel, rs));
3314 		goto fail;
3315 	}
3316 
3317 	/*
3318 	 * Reset the tail (kick) register to 0.
3319 	 * (Hardware will not reset it. Tx overflow fatal
3320 	 * error if tail is not set to 0 after reset!
3321 	 */
3322 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3323 
3324 	/* Restart TXDMA channel */
3325 
3326 	if (!isLDOMguest(nxgep)) {
3327 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3328 
3329 		// XXX This is a problem in HIO!
3330 		/*
3331 		 * Initialize the TXDMA channel specific FZC control
3332 		 * configurations. These FZC registers are pertaining
3333 		 * to each TX channel (i.e. logical pages).
3334 		 */
3335 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3336 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
3337 		    tx_ring_p, tx_mbox_p);
3338 		if (status != NXGE_OK)
3339 			goto fail;
3340 	}
3341 
3342 	/*
3343 	 * Initialize the event masks.
3344 	 */
3345 	tx_ring_p->tx_evmask.value = 0;
3346 	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3347 	    &tx_ring_p->tx_evmask);
3348 	if (status != NXGE_OK)
3349 		goto fail;
3350 
3351 	tx_ring_p->wr_index_wrap = B_FALSE;
3352 	tx_ring_p->wr_index = 0;
3353 	tx_ring_p->rd_index = 0;
3354 
3355 	/*
3356 	 * Load TXDMA descriptors, buffers, mailbox,
3357 	 * initialise the DMA channels and
3358 	 * enable each DMA channel.
3359 	 */
3360 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3361 	status = nxge_enable_txdma_channel(nxgep, channel,
3362 	    tx_ring_p, tx_mbox_p);
3363 	MUTEX_EXIT(&tx_ring_p->lock);
3364 	if (status != NXGE_OK)
3365 		goto fail;
3366 
3367 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3368 	    "Recovery Successful, TxDMAChannel#%d Restored",
3369 	    channel));
3370 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3371 
3372 	return (NXGE_OK);
3373 
3374 fail:
3375 	MUTEX_EXIT(&tx_ring_p->lock);
3376 
3377 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
3378 	    "nxge_txdma_fatal_err_recover (channel %d): "
3379 	    "failed to recover this txdma channel", channel));
3380 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3381 
3382 	return (status);
3383 }
3384 
3385 /*
3386  * nxge_tx_port_fatal_err_recover
3387  *
3388  *	Attempt to recover from a fatal port error.
3389  *
3390  * Arguments:
3391  * 	nxgep
3392  *
3393  * Notes:
3394  *	How would a guest do this?
3395  *
3396  * NPI/NXGE function calls:
3397  *
3398  * Registers accessed:
3399  *
3400  * Context:
3401  *	Service domain
3402  */
3403 nxge_status_t
3404 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3405 {
3406 	nxge_grp_set_t *set = &nxgep->tx_set;
3407 	nxge_channel_t tdc;
3408 
3409 	tx_ring_t	*ring;
3410 	tx_mbox_t	*mailbox;
3411 
3412 	npi_handle_t	handle;
3413 	nxge_status_t	status;
3414 	npi_status_t	rs;
3415 
3416 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3417 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3418 	    "Recovering from TxPort error..."));
3419 
3420 	if (isLDOMguest(nxgep)) {
3421 		return (NXGE_OK);
3422 	}
3423 
3424 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3425 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3426 		    "<== nxge_tx_port_fatal_err_recover: not initialized"));
3427 		return (NXGE_ERROR);
3428 	}
3429 
3430 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3431 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3432 		    "<== nxge_tx_port_fatal_err_recover: "
3433 		    "NULL ring pointer(s)"));
3434 		return (NXGE_ERROR);
3435 	}
3436 
3437 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3438 		if ((1 << tdc) & set->owned.map) {
3439 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3440 			if (ring)
3441 				MUTEX_ENTER(&ring->lock);
3442 		}
3443 	}
3444 
3445 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3446 
3447 	/*
3448 	 * Stop all the TDCs owned by us.
3449 	 * (The shared TDCs will have been stopped by their owners.)
3450 	 */
3451 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3452 		if ((1 << tdc) & set->owned.map) {
3453 			ring = nxgep->tx_rings->rings[tdc];
3454 			if (ring) {
3455 				rs = npi_txdma_channel_control
3456 				    (handle, TXDMA_STOP, tdc);
3457 				if (rs != NPI_SUCCESS) {
3458 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3459 					    "nxge_tx_port_fatal_err_recover "
3460 					    "(channel %d): stop failed ", tdc));
3461 					goto fail;
3462 				}
3463 			}
3464 		}
3465 	}
3466 
3467 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3468 
3469 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3470 		if ((1 << tdc) & set->owned.map) {
3471 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3472 			if (ring) {
3473 				(void) nxge_txdma_reclaim(nxgep, ring, 0);
3474 			}
3475 		}
3476 	}
3477 
3478 	/*
3479 	 * Reset all the TDCs.
3480 	 */
3481 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3482 
3483 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3484 		if ((1 << tdc) & set->owned.map) {
3485 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3486 			if (ring) {
3487 				if ((rs = npi_txdma_channel_control
3488 				    (handle, TXDMA_RESET, tdc))
3489 				    != NPI_SUCCESS) {
3490 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3491 					    "nxge_tx_port_fatal_err_recover "
3492 					    "(channel %d) reset channel "
3493 					    "failed 0x%x", tdc, rs));
3494 					goto fail;
3495 				}
3496 			}
3497 			/*
3498 			 * Reset the tail (kick) register to 0.
3499 			 * (Hardware will not reset it. Tx overflow fatal
3500 			 * error if tail is not set to 0 after reset!
3501 			 */
3502 			TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3503 		}
3504 	}
3505 
3506 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3507 
3508 	/* Restart all the TDCs */
3509 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3510 		if ((1 << tdc) & set->owned.map) {
3511 			ring = nxgep->tx_rings->rings[tdc];
3512 			if (ring) {
3513 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3514 				status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3515 				    ring, mailbox);
3516 				ring->tx_evmask.value = 0;
3517 				/*
3518 				 * Initialize the event masks.
3519 				 */
3520 				status = nxge_init_txdma_channel_event_mask
3521 				    (nxgep, tdc, &ring->tx_evmask);
3522 
3523 				ring->wr_index_wrap = B_FALSE;
3524 				ring->wr_index = 0;
3525 				ring->rd_index = 0;
3526 
3527 				if (status != NXGE_OK)
3528 					goto fail;
3529 				if (status != NXGE_OK)
3530 					goto fail;
3531 			}
3532 		}
3533 	}
3534 
3535 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3536 
3537 	/* Re-enable all the TDCs */
3538 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3539 		if ((1 << tdc) & set->owned.map) {
3540 			ring = nxgep->tx_rings->rings[tdc];
3541 			if (ring) {
3542 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3543 				status = nxge_enable_txdma_channel(nxgep, tdc,
3544 				    ring, mailbox);
3545 				if (status != NXGE_OK)
3546 					goto fail;
3547 			}
3548 		}
3549 	}
3550 
3551 	/*
3552 	 * Unlock all the TDCs.
3553 	 */
3554 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3555 		if ((1 << tdc) & set->owned.map) {
3556 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3557 			if (ring)
3558 				MUTEX_EXIT(&ring->lock);
3559 		}
3560 	}
3561 
3562 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3563 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3564 
3565 	return (NXGE_OK);
3566 
3567 fail:
3568 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3569 		if ((1 << tdc) & set->owned.map) {
3570 			ring = nxgep->tx_rings->rings[tdc];
3571 			if (ring)
3572 				MUTEX_EXIT(&ring->lock);
3573 		}
3574 	}
3575 
3576 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3577 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3578 
3579 	return (status);
3580 }
3581 
3582 /*
3583  * nxge_txdma_inject_err
3584  *
3585  *	Inject an error into a TDC.
3586  *
3587  * Arguments:
3588  * 	nxgep
3589  * 	err_id	The error to inject.
3590  * 	chan	The channel to inject into.
3591  *
3592  * Notes:
3593  *	This is called from nxge_main.c:nxge_err_inject()
3594  *	Has this ioctl ever been used?
3595  *
3596  * NPI/NXGE function calls:
3597  *	npi_txdma_inj_par_error_get()
3598  *	npi_txdma_inj_par_error_set()
3599  *
3600  * Registers accessed:
3601  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3602  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3603  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3604  *
3605  * Context:
3606  *	Service domain
3607  */
3608 void
3609 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3610 {
3611 	tdmc_intr_dbg_t		tdi;
3612 	tdmc_inj_par_err_t	par_err;
3613 	uint32_t		value;
3614 	npi_handle_t		handle;
3615 
3616 	switch (err_id) {
3617 
3618 	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3619 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
3620 		/* Clear error injection source for parity error */
3621 		(void) npi_txdma_inj_par_error_get(handle, &value);
3622 		par_err.value = value;
3623 		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3624 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3625 
3626 		par_err.bits.ldw.inject_parity_error = (1 << chan);
3627 		(void) npi_txdma_inj_par_error_get(handle, &value);
3628 		par_err.value = value;
3629 		par_err.bits.ldw.inject_parity_error |= (1 << chan);
3630 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3631 		    (unsigned long long)par_err.value);
3632 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3633 		break;
3634 
3635 	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3636 	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3637 	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3638 	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3639 	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3640 	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3641 	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3642 		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3643 		    chan, &tdi.value);
3644 		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3645 			tdi.bits.ldw.pref_buf_par_err = 1;
3646 		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3647 			tdi.bits.ldw.mbox_err = 1;
3648 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3649 			tdi.bits.ldw.nack_pref = 1;
3650 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3651 			tdi.bits.ldw.nack_pkt_rd = 1;
3652 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3653 			tdi.bits.ldw.pkt_size_err = 1;
3654 		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3655 			tdi.bits.ldw.tx_ring_oflow = 1;
3656 		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3657 			tdi.bits.ldw.conf_part_err = 1;
3658 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3659 			tdi.bits.ldw.pkt_part_err = 1;
3660 #if defined(__i386)
3661 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3662 		    tdi.value);
3663 #else
3664 		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3665 		    tdi.value);
3666 #endif
3667 		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3668 		    chan, tdi.value);
3669 
3670 		break;
3671 	}
3672 }
3673