xref: /titanic_44/usr/src/uts/common/io/nxge/nxge_txdma.c (revision 22eb7cb54d8a6bcf6fe2674cb4b1f0cf2d85cfb6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/nxge/nxge_impl.h>
29 #include <sys/nxge/nxge_txdma.h>
30 #include <sys/nxge/nxge_hio.h>
31 #include <npi_tx_rd64.h>
32 #include <npi_tx_wr64.h>
33 #include <sys/llc1.h>
34 
35 uint32_t 	nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
36 uint32_t	nxge_tx_minfree = 32;
37 uint32_t	nxge_tx_intr_thres = 0;
38 uint32_t	nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
39 uint32_t	nxge_tx_tiny_pack = 1;
40 uint32_t	nxge_tx_use_bcopy = 1;
41 
42 extern uint32_t 	nxge_tx_ring_size;
43 extern uint32_t 	nxge_bcopy_thresh;
44 extern uint32_t 	nxge_dvma_thresh;
45 extern uint32_t 	nxge_dma_stream_thresh;
46 extern dma_method_t 	nxge_force_dma;
47 extern uint32_t		nxge_cksum_offload;
48 
49 /* Device register access attributes for PIO.  */
50 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
51 /* Device descriptor access attributes for DMA.  */
52 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
53 /* Device buffer access attributes for DMA.  */
54 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
55 extern ddi_dma_attr_t nxge_desc_dma_attr;
56 extern ddi_dma_attr_t nxge_tx_dma_attr;
57 
58 extern int nxge_serial_tx(mblk_t *mp, void *arg);
59 
60 static nxge_status_t nxge_map_txdma(p_nxge_t, int);
61 
62 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
63 
64 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
65 	p_nxge_dma_common_t *, p_tx_ring_t *,
66 	uint32_t, p_nxge_dma_common_t *,
67 	p_tx_mbox_t *);
68 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
69 
70 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
71 	p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
72 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
73 
74 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
75 	p_nxge_dma_common_t *, p_tx_ring_t,
76 	p_tx_mbox_t *);
77 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
78 	p_tx_ring_t, p_tx_mbox_t);
79 
80 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
81     p_tx_ring_t, p_tx_mbox_t);
82 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
83 
84 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
85 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
86 	p_nxge_ldv_t, tx_cs_t);
87 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
88 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
89 	uint16_t, p_tx_ring_t);
90 
91 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
92     p_tx_ring_t ring_p, uint16_t channel);
93 
94 nxge_status_t
95 nxge_init_txdma_channels(p_nxge_t nxgep)
96 {
97 	nxge_grp_set_t *set = &nxgep->tx_set;
98 	int i, count;
99 
100 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
101 
102 	for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
103 		if ((1 << i) & set->lg.map) {
104 			int tdc;
105 			nxge_grp_t *group = set->group[i];
106 			for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
107 				if ((1 << tdc) & group->map) {
108 					if ((nxge_grp_dc_add(nxgep,
109 					    (vr_handle_t)group,
110 					    VP_BOUND_TX, tdc)))
111 						return (NXGE_ERROR);
112 				}
113 			}
114 		}
115 		if (++count == set->lg.count)
116 			break;
117 	}
118 
119 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
120 
121 	return (NXGE_OK);
122 }
123 
124 nxge_status_t
125 nxge_init_txdma_channel(
126 	p_nxge_t nxge,
127 	int channel)
128 {
129 	nxge_status_t status;
130 
131 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
132 
133 	status = nxge_map_txdma(nxge, channel);
134 	if (status != NXGE_OK) {
135 		NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
136 		    "<== nxge_init_txdma_channel: status 0x%x", status));
137 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
138 		return (status);
139 	}
140 
141 	status = nxge_txdma_hw_start(nxge, channel);
142 	if (status != NXGE_OK) {
143 		(void) nxge_unmap_txdma_channel(nxge, channel);
144 		(void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
145 		return (status);
146 	}
147 
148 	if (!nxge->statsp->tdc_ksp[channel])
149 		nxge_setup_tdc_kstats(nxge, channel);
150 
151 	NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
152 
153 	return (status);
154 }
155 
156 void
157 nxge_uninit_txdma_channels(p_nxge_t nxgep)
158 {
159 	nxge_grp_set_t *set = &nxgep->tx_set;
160 	int tdc;
161 
162 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
163 
164 	if (set->owned.map == 0) {
165 		NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
166 		    "nxge_uninit_txdma_channels: no channels"));
167 		return;
168 	}
169 
170 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
171 		if ((1 << tdc) & set->owned.map) {
172 			nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
173 		}
174 	}
175 
176 	NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
177 }
178 
179 void
180 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
181 {
182 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
183 
184 	if (nxgep->statsp->tdc_ksp[channel]) {
185 		kstat_delete(nxgep->statsp->tdc_ksp[channel]);
186 		nxgep->statsp->tdc_ksp[channel] = 0;
187 	}
188 
189 	(void) nxge_txdma_stop_channel(nxgep, channel);
190 	nxge_unmap_txdma_channel(nxgep, channel);
191 
192 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
193 	    "<== nxge_uninit_txdma_channel"));
194 }
195 
196 void
197 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
198 	uint32_t entries, uint32_t size)
199 {
200 	size_t		tsize;
201 	*dest_p = *src_p;
202 	tsize = size * entries;
203 	dest_p->alength = tsize;
204 	dest_p->nblocks = entries;
205 	dest_p->block_size = size;
206 	dest_p->offset += tsize;
207 
208 	src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
209 	src_p->alength -= tsize;
210 	src_p->dma_cookie.dmac_laddress += tsize;
211 	src_p->dma_cookie.dmac_size -= tsize;
212 }
213 
214 /*
215  * nxge_reset_txdma_channel
216  *
217  *	Reset a TDC.
218  *
219  * Arguments:
220  * 	nxgep
221  * 	channel		The channel to reset.
222  * 	reg_data	The current TX_CS.
223  *
224  * Notes:
225  *
226  * NPI/NXGE function calls:
227  *	npi_txdma_channel_reset()
228  *	npi_txdma_channel_control()
229  *
230  * Registers accessed:
231  *	TX_CS		DMC+0x40028 Transmit Control And Status
232  *	TX_RING_KICK	DMC+0x40018 Transmit Ring Kick
233  *
234  * Context:
235  *	Any domain
236  */
237 nxge_status_t
238 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
239 {
240 	npi_status_t		rs = NPI_SUCCESS;
241 	nxge_status_t		status = NXGE_OK;
242 	npi_handle_t		handle;
243 
244 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
245 
246 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
247 	if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
248 		rs = npi_txdma_channel_reset(handle, channel);
249 	} else {
250 		rs = npi_txdma_channel_control(handle, TXDMA_RESET,
251 		    channel);
252 	}
253 
254 	if (rs != NPI_SUCCESS) {
255 		status = NXGE_ERROR | rs;
256 	}
257 
258 	/*
259 	 * Reset the tail (kick) register to 0.
260 	 * (Hardware will not reset it. Tx overflow fatal
261 	 * error if tail is not set to 0 after reset!
262 	 */
263 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
264 
265 	NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
266 	return (status);
267 }
268 
269 /*
270  * nxge_init_txdma_channel_event_mask
271  *
272  *	Enable interrupts for a set of events.
273  *
274  * Arguments:
275  * 	nxgep
276  * 	channel	The channel to map.
277  * 	mask_p	The events to enable.
278  *
279  * Notes:
280  *
281  * NPI/NXGE function calls:
282  *	npi_txdma_event_mask()
283  *
284  * Registers accessed:
285  *	TX_ENT_MSK	DMC+0x40020 Transmit Event Mask
286  *
287  * Context:
288  *	Any domain
289  */
290 nxge_status_t
291 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
292 		p_tx_dma_ent_msk_t mask_p)
293 {
294 	npi_handle_t		handle;
295 	npi_status_t		rs = NPI_SUCCESS;
296 	nxge_status_t		status = NXGE_OK;
297 
298 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
299 	    "<== nxge_init_txdma_channel_event_mask"));
300 
301 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
302 	rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
303 	if (rs != NPI_SUCCESS) {
304 		status = NXGE_ERROR | rs;
305 	}
306 
307 	return (status);
308 }
309 
310 /*
311  * nxge_init_txdma_channel_cntl_stat
312  *
313  *	Stop a TDC.  If at first we don't succeed, inject an error.
314  *
315  * Arguments:
316  * 	nxgep
317  * 	channel		The channel to stop.
318  *
319  * Notes:
320  *
321  * NPI/NXGE function calls:
322  *	npi_txdma_control_status()
323  *
324  * Registers accessed:
325  *	TX_CS		DMC+0x40028 Transmit Control And Status
326  *
327  * Context:
328  *	Any domain
329  */
330 nxge_status_t
331 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
332 	uint64_t reg_data)
333 {
334 	npi_handle_t		handle;
335 	npi_status_t		rs = NPI_SUCCESS;
336 	nxge_status_t		status = NXGE_OK;
337 
338 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
339 	    "<== nxge_init_txdma_channel_cntl_stat"));
340 
341 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
342 	rs = npi_txdma_control_status(handle, OP_SET, channel,
343 	    (p_tx_cs_t)&reg_data);
344 
345 	if (rs != NPI_SUCCESS) {
346 		status = NXGE_ERROR | rs;
347 	}
348 
349 	return (status);
350 }
351 
352 /*
353  * nxge_enable_txdma_channel
354  *
355  *	Enable a TDC.
356  *
357  * Arguments:
358  * 	nxgep
359  * 	channel		The channel to enable.
360  * 	tx_desc_p	channel's transmit descriptor ring.
361  * 	mbox_p		channel's mailbox,
362  *
363  * Notes:
364  *
365  * NPI/NXGE function calls:
366  *	npi_txdma_ring_config()
367  *	npi_txdma_mbox_config()
368  *	npi_txdma_channel_init_enable()
369  *
370  * Registers accessed:
371  *	TX_RNG_CFIG	DMC+0x40000 Transmit Ring Configuration
372  *	TXDMA_MBH	DMC+0x40030 TXDMA Mailbox High
373  *	TXDMA_MBL	DMC+0x40038 TXDMA Mailbox Low
374  *	TX_CS		DMC+0x40028 Transmit Control And Status
375  *
376  * Context:
377  *	Any domain
378  */
379 nxge_status_t
380 nxge_enable_txdma_channel(p_nxge_t nxgep,
381 	uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
382 {
383 	npi_handle_t		handle;
384 	npi_status_t		rs = NPI_SUCCESS;
385 	nxge_status_t		status = NXGE_OK;
386 
387 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
388 
389 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
390 	/*
391 	 * Use configuration data composed at init time.
392 	 * Write to hardware the transmit ring configurations.
393 	 */
394 	rs = npi_txdma_ring_config(handle, OP_SET, channel,
395 	    (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
396 
397 	if (rs != NPI_SUCCESS) {
398 		return (NXGE_ERROR | rs);
399 	}
400 
401 	if (isLDOMguest(nxgep)) {
402 		/* Add interrupt handler for this channel. */
403 		if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
404 			return (NXGE_ERROR);
405 	}
406 
407 	/* Write to hardware the mailbox */
408 	rs = npi_txdma_mbox_config(handle, OP_SET, channel,
409 	    (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
410 
411 	if (rs != NPI_SUCCESS) {
412 		return (NXGE_ERROR | rs);
413 	}
414 
415 	/* Start the DMA engine. */
416 	rs = npi_txdma_channel_init_enable(handle, channel);
417 
418 	if (rs != NPI_SUCCESS) {
419 		return (NXGE_ERROR | rs);
420 	}
421 
422 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
423 
424 	return (status);
425 }
426 
427 void
428 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
429 		boolean_t l4_cksum, int pkt_len, uint8_t npads,
430 		p_tx_pkt_hdr_all_t pkthdrp,
431 		t_uscalar_t start_offset,
432 		t_uscalar_t stuff_offset)
433 {
434 	p_tx_pkt_header_t	hdrp;
435 	p_mblk_t 		nmp;
436 	uint64_t		tmp;
437 	size_t 			mblk_len;
438 	size_t 			iph_len;
439 	size_t 			hdrs_size;
440 	uint8_t			hdrs_buf[sizeof (struct ether_header) +
441 	    64 + sizeof (uint32_t)];
442 	uint8_t			*cursor;
443 	uint8_t 		*ip_buf;
444 	uint16_t		eth_type;
445 	uint8_t			ipproto;
446 	boolean_t		is_vlan = B_FALSE;
447 	size_t			eth_hdr_size;
448 
449 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
450 
451 	/*
452 	 * Caller should zero out the headers first.
453 	 */
454 	hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
455 
456 	if (fill_len) {
457 		NXGE_DEBUG_MSG((NULL, TX_CTL,
458 		    "==> nxge_fill_tx_hdr: pkt_len %d "
459 		    "npads %d", pkt_len, npads));
460 		tmp = (uint64_t)pkt_len;
461 		hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
462 		goto fill_tx_header_done;
463 	}
464 
465 	hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
466 
467 	/*
468 	 * mp is the original data packet (does not include the
469 	 * Neptune transmit header).
470 	 */
471 	nmp = mp;
472 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
473 	    "mp $%p b_rptr $%p len %d",
474 	    mp, nmp->b_rptr, MBLKL(nmp)));
475 	/* copy ether_header from mblk to hdrs_buf */
476 	cursor = &hdrs_buf[0];
477 	tmp = sizeof (struct ether_vlan_header);
478 	while ((nmp != NULL) && (tmp > 0)) {
479 		size_t buflen;
480 		mblk_len = MBLKL(nmp);
481 		buflen = min((size_t)tmp, mblk_len);
482 		bcopy(nmp->b_rptr, cursor, buflen);
483 		cursor += buflen;
484 		tmp -= buflen;
485 		nmp = nmp->b_cont;
486 	}
487 
488 	nmp = mp;
489 	mblk_len = MBLKL(nmp);
490 	ip_buf = NULL;
491 	eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
492 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
493 	    "ether type 0x%x", eth_type, hdrp->value));
494 
495 	if (eth_type < ETHERMTU) {
496 		tmp = 1ull;
497 		hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
498 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
499 		    "value 0x%llx", hdrp->value));
500 		if (*(hdrs_buf + sizeof (struct ether_header))
501 		    == LLC_SNAP_SAP) {
502 			eth_type = ntohs(*((uint16_t *)(hdrs_buf +
503 			    sizeof (struct ether_header) + 6)));
504 			NXGE_DEBUG_MSG((NULL, TX_CTL,
505 			    "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
506 			    eth_type));
507 		} else {
508 			goto fill_tx_header_done;
509 		}
510 	} else if (eth_type == VLAN_ETHERTYPE) {
511 		tmp = 1ull;
512 		hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
513 
514 		eth_type = ntohs(((struct ether_vlan_header *)
515 		    hdrs_buf)->ether_type);
516 		is_vlan = B_TRUE;
517 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
518 		    "value 0x%llx", hdrp->value));
519 	}
520 
521 	if (!is_vlan) {
522 		eth_hdr_size = sizeof (struct ether_header);
523 	} else {
524 		eth_hdr_size = sizeof (struct ether_vlan_header);
525 	}
526 
527 	switch (eth_type) {
528 	case ETHERTYPE_IP:
529 		if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
530 			ip_buf = nmp->b_rptr + eth_hdr_size;
531 			mblk_len -= eth_hdr_size;
532 			iph_len = ((*ip_buf) & 0x0f);
533 			if (mblk_len > (iph_len + sizeof (uint32_t))) {
534 				ip_buf = nmp->b_rptr;
535 				ip_buf += eth_hdr_size;
536 			} else {
537 				ip_buf = NULL;
538 			}
539 
540 		}
541 		if (ip_buf == NULL) {
542 			hdrs_size = 0;
543 			((p_ether_header_t)hdrs_buf)->ether_type = 0;
544 			while ((nmp) && (hdrs_size <
545 			    sizeof (hdrs_buf))) {
546 				mblk_len = (size_t)nmp->b_wptr -
547 				    (size_t)nmp->b_rptr;
548 				if (mblk_len >=
549 				    (sizeof (hdrs_buf) - hdrs_size))
550 					mblk_len = sizeof (hdrs_buf) -
551 					    hdrs_size;
552 				bcopy(nmp->b_rptr,
553 				    &hdrs_buf[hdrs_size], mblk_len);
554 				hdrs_size += mblk_len;
555 				nmp = nmp->b_cont;
556 			}
557 			ip_buf = hdrs_buf;
558 			ip_buf += eth_hdr_size;
559 			iph_len = ((*ip_buf) & 0x0f);
560 		}
561 
562 		ipproto = ip_buf[9];
563 
564 		tmp = (uint64_t)iph_len;
565 		hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
566 		tmp = (uint64_t)(eth_hdr_size >> 1);
567 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
568 
569 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
570 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
571 		    "tmp 0x%x",
572 		    iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
573 		    ipproto, tmp));
574 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
575 		    "value 0x%llx", hdrp->value));
576 
577 		break;
578 
579 	case ETHERTYPE_IPV6:
580 		hdrs_size = 0;
581 		((p_ether_header_t)hdrs_buf)->ether_type = 0;
582 		while ((nmp) && (hdrs_size <
583 		    sizeof (hdrs_buf))) {
584 			mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
585 			if (mblk_len >=
586 			    (sizeof (hdrs_buf) - hdrs_size))
587 				mblk_len = sizeof (hdrs_buf) -
588 				    hdrs_size;
589 			bcopy(nmp->b_rptr,
590 			    &hdrs_buf[hdrs_size], mblk_len);
591 			hdrs_size += mblk_len;
592 			nmp = nmp->b_cont;
593 		}
594 		ip_buf = hdrs_buf;
595 		ip_buf += eth_hdr_size;
596 
597 		tmp = 1ull;
598 		hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
599 
600 		tmp = (eth_hdr_size >> 1);
601 		hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
602 
603 		/* byte 6 is the next header protocol */
604 		ipproto = ip_buf[6];
605 
606 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
607 		    " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
608 		    iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
609 		    ipproto));
610 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
611 		    "value 0x%llx", hdrp->value));
612 
613 		break;
614 
615 	default:
616 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
617 		goto fill_tx_header_done;
618 	}
619 
620 	switch (ipproto) {
621 	case IPPROTO_TCP:
622 		NXGE_DEBUG_MSG((NULL, TX_CTL,
623 		    "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
624 		if (l4_cksum) {
625 			hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
626 			hdrp->value |=
627 			    (((uint64_t)(start_offset >> 1)) <<
628 			    TX_PKT_HEADER_L4START_SHIFT);
629 			hdrp->value |=
630 			    (((uint64_t)(stuff_offset >> 1)) <<
631 			    TX_PKT_HEADER_L4STUFF_SHIFT);
632 
633 			NXGE_DEBUG_MSG((NULL, TX_CTL,
634 			    "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
635 			    "value 0x%llx", hdrp->value));
636 		}
637 
638 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
639 		    "value 0x%llx", hdrp->value));
640 		break;
641 
642 	case IPPROTO_UDP:
643 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
644 		if (l4_cksum) {
645 			if (!nxge_cksum_offload) {
646 				uint16_t	*up;
647 				uint16_t	cksum;
648 				t_uscalar_t	stuff_len;
649 
650 				/*
651 				 * The checksum field has the
652 				 * partial checksum.
653 				 * IP_CSUM() macro calls ip_cksum() which
654 				 * can add in the partial checksum.
655 				 */
656 				cksum = IP_CSUM(mp, start_offset, 0);
657 				stuff_len = stuff_offset;
658 				nmp = mp;
659 				mblk_len = MBLKL(nmp);
660 				while ((nmp != NULL) &&
661 				    (mblk_len < stuff_len)) {
662 					stuff_len -= mblk_len;
663 					nmp = nmp->b_cont;
664 				}
665 				ASSERT(nmp);
666 				up = (uint16_t *)(nmp->b_rptr + stuff_len);
667 
668 				*up = cksum;
669 				hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
670 				NXGE_DEBUG_MSG((NULL, TX_CTL,
671 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
672 				    "use sw cksum "
673 				    "write to $%p cksum 0x%x content up 0x%x",
674 				    stuff_len,
675 				    up,
676 				    cksum,
677 				    *up));
678 			} else {
679 				/* Hardware will compute the full checksum */
680 				hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
681 				hdrp->value |=
682 				    (((uint64_t)(start_offset >> 1)) <<
683 				    TX_PKT_HEADER_L4START_SHIFT);
684 				hdrp->value |=
685 				    (((uint64_t)(stuff_offset >> 1)) <<
686 				    TX_PKT_HEADER_L4STUFF_SHIFT);
687 
688 				NXGE_DEBUG_MSG((NULL, TX_CTL,
689 				    "==> nxge_tx_pkt_hdr_init: UDP offset %d "
690 				    " use partial checksum "
691 				    "cksum 0x%x ",
692 				    "value 0x%llx",
693 				    stuff_offset,
694 				    IP_CSUM(mp, start_offset, 0),
695 				    hdrp->value));
696 			}
697 		}
698 
699 		NXGE_DEBUG_MSG((NULL, TX_CTL,
700 		    "==> nxge_tx_pkt_hdr_init: UDP"
701 		    "value 0x%llx", hdrp->value));
702 		break;
703 
704 	default:
705 		goto fill_tx_header_done;
706 	}
707 
708 fill_tx_header_done:
709 	NXGE_DEBUG_MSG((NULL, TX_CTL,
710 	    "==> nxge_fill_tx_hdr: pkt_len %d  "
711 	    "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
712 
713 	NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
714 }
715 
716 /*ARGSUSED*/
717 p_mblk_t
718 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
719 {
720 	p_mblk_t 		newmp = NULL;
721 
722 	if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
723 		NXGE_DEBUG_MSG((NULL, TX_CTL,
724 		    "<== nxge_tx_pkt_header_reserve: allocb failed"));
725 		return (NULL);
726 	}
727 
728 	NXGE_DEBUG_MSG((NULL, TX_CTL,
729 	    "==> nxge_tx_pkt_header_reserve: get new mp"));
730 	DB_TYPE(newmp) = M_DATA;
731 	newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
732 	linkb(newmp, mp);
733 	newmp->b_rptr -= TX_PKT_HEADER_SIZE;
734 
735 	NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
736 	    "b_rptr $%p b_wptr $%p",
737 	    newmp->b_rptr, newmp->b_wptr));
738 
739 	NXGE_DEBUG_MSG((NULL, TX_CTL,
740 	    "<== nxge_tx_pkt_header_reserve: use new mp"));
741 
742 	return (newmp);
743 }
744 
745 int
746 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
747 {
748 	uint_t 			nmblks;
749 	ssize_t			len;
750 	uint_t 			pkt_len;
751 	p_mblk_t 		nmp, bmp, tmp;
752 	uint8_t 		*b_wptr;
753 
754 	NXGE_DEBUG_MSG((NULL, TX_CTL,
755 	    "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
756 	    "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
757 
758 	nmp = mp;
759 	bmp = mp;
760 	nmblks = 0;
761 	pkt_len = 0;
762 	*tot_xfer_len_p = 0;
763 
764 	while (nmp) {
765 		len = MBLKL(nmp);
766 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
767 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
768 		    len, pkt_len, nmblks,
769 		    *tot_xfer_len_p));
770 
771 		if (len <= 0) {
772 			bmp = nmp;
773 			nmp = nmp->b_cont;
774 			NXGE_DEBUG_MSG((NULL, TX_CTL,
775 			    "==> nxge_tx_pkt_nmblocks: "
776 			    "len (0) pkt_len %d nmblks %d",
777 			    pkt_len, nmblks));
778 			continue;
779 		}
780 
781 		*tot_xfer_len_p += len;
782 		NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
783 		    "len %d pkt_len %d nmblks %d tot_xfer_len %d",
784 		    len, pkt_len, nmblks,
785 		    *tot_xfer_len_p));
786 
787 		if (len < nxge_bcopy_thresh) {
788 			NXGE_DEBUG_MSG((NULL, TX_CTL,
789 			    "==> nxge_tx_pkt_nmblocks: "
790 			    "len %d (< thresh) pkt_len %d nmblks %d",
791 			    len, pkt_len, nmblks));
792 			if (pkt_len == 0)
793 				nmblks++;
794 			pkt_len += len;
795 			if (pkt_len >= nxge_bcopy_thresh) {
796 				pkt_len = 0;
797 				len = 0;
798 				nmp = bmp;
799 			}
800 		} else {
801 			NXGE_DEBUG_MSG((NULL, TX_CTL,
802 			    "==> nxge_tx_pkt_nmblocks: "
803 			    "len %d (> thresh) pkt_len %d nmblks %d",
804 			    len, pkt_len, nmblks));
805 			pkt_len = 0;
806 			nmblks++;
807 			/*
808 			 * Hardware limits the transfer length to 4K.
809 			 * If len is more than 4K, we need to break
810 			 * it up to at most 2 more blocks.
811 			 */
812 			if (len > TX_MAX_TRANSFER_LENGTH) {
813 				uint32_t	nsegs;
814 
815 				nsegs = 1;
816 				NXGE_DEBUG_MSG((NULL, TX_CTL,
817 				    "==> nxge_tx_pkt_nmblocks: "
818 				    "len %d pkt_len %d nmblks %d nsegs %d",
819 				    len, pkt_len, nmblks, nsegs));
820 				if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
821 					++nsegs;
822 				}
823 				do {
824 					b_wptr = nmp->b_rptr +
825 					    TX_MAX_TRANSFER_LENGTH;
826 					nmp->b_wptr = b_wptr;
827 					if ((tmp = dupb(nmp)) == NULL) {
828 						return (0);
829 					}
830 					tmp->b_rptr = b_wptr;
831 					tmp->b_wptr = nmp->b_wptr;
832 					tmp->b_cont = nmp->b_cont;
833 					nmp->b_cont = tmp;
834 					nmblks++;
835 					if (--nsegs) {
836 						nmp = tmp;
837 					}
838 				} while (nsegs);
839 				nmp = tmp;
840 			}
841 		}
842 
843 		/*
844 		 * Hardware limits the transmit gather pointers to 15.
845 		 */
846 		if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
847 		    TX_MAX_GATHER_POINTERS) {
848 			NXGE_DEBUG_MSG((NULL, TX_CTL,
849 			    "==> nxge_tx_pkt_nmblocks: pull msg - "
850 			    "len %d pkt_len %d nmblks %d",
851 			    len, pkt_len, nmblks));
852 			/* Pull all message blocks from b_cont */
853 			if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
854 				return (0);
855 			}
856 			freemsg(nmp->b_cont);
857 			nmp->b_cont = tmp;
858 			pkt_len = 0;
859 		}
860 		bmp = nmp;
861 		nmp = nmp->b_cont;
862 	}
863 
864 	NXGE_DEBUG_MSG((NULL, TX_CTL,
865 	    "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
866 	    "nmblks %d len %d tot_xfer_len %d",
867 	    mp->b_rptr, mp->b_wptr, nmblks,
868 	    MBLKL(mp), *tot_xfer_len_p));
869 
870 	return (nmblks);
871 }
872 
873 boolean_t
874 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
875 {
876 	boolean_t 		status = B_TRUE;
877 	p_nxge_dma_common_t	tx_desc_dma_p;
878 	nxge_dma_common_t	desc_area;
879 	p_tx_desc_t 		tx_desc_ring_vp;
880 	p_tx_desc_t 		tx_desc_p;
881 	p_tx_desc_t 		tx_desc_pp;
882 	tx_desc_t 		r_tx_desc;
883 	p_tx_msg_t 		tx_msg_ring;
884 	p_tx_msg_t 		tx_msg_p;
885 	npi_handle_t		handle;
886 	tx_ring_hdl_t		tx_head;
887 	uint32_t 		pkt_len;
888 	uint_t			tx_rd_index;
889 	uint16_t		head_index, tail_index;
890 	uint8_t			tdc;
891 	boolean_t		head_wrap, tail_wrap;
892 	p_nxge_tx_ring_stats_t tdc_stats;
893 	int			rc;
894 
895 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
896 
897 	status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
898 	    (nmblks != 0));
899 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
900 	    "==> nxge_txdma_reclaim: pending %d  reclaim %d nmblks %d",
901 	    tx_ring_p->descs_pending, nxge_reclaim_pending,
902 	    nmblks));
903 	if (!status) {
904 		tx_desc_dma_p = &tx_ring_p->tdc_desc;
905 		desc_area = tx_ring_p->tdc_desc;
906 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
907 		tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
908 		tx_desc_ring_vp =
909 		    (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
910 		tx_rd_index = tx_ring_p->rd_index;
911 		tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
912 		tx_msg_ring = tx_ring_p->tx_msg_ring;
913 		tx_msg_p = &tx_msg_ring[tx_rd_index];
914 		tdc = tx_ring_p->tdc;
915 		tdc_stats = tx_ring_p->tdc_stats;
916 		if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
917 			tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
918 		}
919 
920 		tail_index = tx_ring_p->wr_index;
921 		tail_wrap = tx_ring_p->wr_index_wrap;
922 
923 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
924 		    "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
925 		    "tail_index %d tail_wrap %d "
926 		    "tx_desc_p $%p ($%p) ",
927 		    tdc, tx_rd_index, tail_index, tail_wrap,
928 		    tx_desc_p, (*(uint64_t *)tx_desc_p)));
929 		/*
930 		 * Read the hardware maintained transmit head
931 		 * and wrap around bit.
932 		 */
933 		TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
934 		head_index =  tx_head.bits.ldw.head;
935 		head_wrap = tx_head.bits.ldw.wrap;
936 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
937 		    "==> nxge_txdma_reclaim: "
938 		    "tx_rd_index %d tail %d tail_wrap %d "
939 		    "head %d wrap %d",
940 		    tx_rd_index, tail_index, tail_wrap,
941 		    head_index, head_wrap));
942 
943 		if (head_index == tail_index) {
944 			if (TXDMA_RING_EMPTY(head_index, head_wrap,
945 			    tail_index, tail_wrap) &&
946 			    (head_index == tx_rd_index)) {
947 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
948 				    "==> nxge_txdma_reclaim: EMPTY"));
949 				return (B_TRUE);
950 			}
951 
952 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
953 			    "==> nxge_txdma_reclaim: Checking "
954 			    "if ring full"));
955 			if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
956 			    tail_wrap)) {
957 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
958 				    "==> nxge_txdma_reclaim: full"));
959 				return (B_FALSE);
960 			}
961 		}
962 
963 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
964 		    "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
965 
966 		tx_desc_pp = &r_tx_desc;
967 		while ((tx_rd_index != head_index) &&
968 		    (tx_ring_p->descs_pending != 0)) {
969 
970 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
971 			    "==> nxge_txdma_reclaim: Checking if pending"));
972 
973 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
974 			    "==> nxge_txdma_reclaim: "
975 			    "descs_pending %d ",
976 			    tx_ring_p->descs_pending));
977 
978 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
979 			    "==> nxge_txdma_reclaim: "
980 			    "(tx_rd_index %d head_index %d "
981 			    "(tx_desc_p $%p)",
982 			    tx_rd_index, head_index,
983 			    tx_desc_p));
984 
985 			tx_desc_pp->value = tx_desc_p->value;
986 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
987 			    "==> nxge_txdma_reclaim: "
988 			    "(tx_rd_index %d head_index %d "
989 			    "tx_desc_p $%p (desc value 0x%llx) ",
990 			    tx_rd_index, head_index,
991 			    tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
992 
993 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
994 			    "==> nxge_txdma_reclaim: dump desc:"));
995 
996 			pkt_len = tx_desc_pp->bits.hdw.tr_len;
997 			tdc_stats->obytes += pkt_len;
998 			tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
999 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1000 			    "==> nxge_txdma_reclaim: pkt_len %d "
1001 			    "tdc channel %d opackets %d",
1002 			    pkt_len,
1003 			    tdc,
1004 			    tdc_stats->opackets));
1005 
1006 			if (tx_msg_p->flags.dma_type == USE_DVMA) {
1007 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1008 				    "tx_desc_p = $%p "
1009 				    "tx_desc_pp = $%p "
1010 				    "index = %d",
1011 				    tx_desc_p,
1012 				    tx_desc_pp,
1013 				    tx_ring_p->rd_index));
1014 				(void) dvma_unload(tx_msg_p->dvma_handle,
1015 				    0, -1);
1016 				tx_msg_p->dvma_handle = NULL;
1017 				if (tx_ring_p->dvma_wr_index ==
1018 				    tx_ring_p->dvma_wrap_mask) {
1019 					tx_ring_p->dvma_wr_index = 0;
1020 				} else {
1021 					tx_ring_p->dvma_wr_index++;
1022 				}
1023 				tx_ring_p->dvma_pending--;
1024 			} else if (tx_msg_p->flags.dma_type ==
1025 			    USE_DMA) {
1026 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1027 				    "==> nxge_txdma_reclaim: "
1028 				    "USE DMA"));
1029 				if (rc = ddi_dma_unbind_handle
1030 				    (tx_msg_p->dma_handle)) {
1031 					cmn_err(CE_WARN, "!nxge_reclaim: "
1032 					    "ddi_dma_unbind_handle "
1033 					    "failed. status %d", rc);
1034 				}
1035 			}
1036 			NXGE_DEBUG_MSG((nxgep, TX_CTL,
1037 			    "==> nxge_txdma_reclaim: count packets"));
1038 			/*
1039 			 * count a chained packet only once.
1040 			 */
1041 			if (tx_msg_p->tx_message != NULL) {
1042 				freemsg(tx_msg_p->tx_message);
1043 				tx_msg_p->tx_message = NULL;
1044 			}
1045 
1046 			tx_msg_p->flags.dma_type = USE_NONE;
1047 			tx_rd_index = tx_ring_p->rd_index;
1048 			tx_rd_index = (tx_rd_index + 1) &
1049 			    tx_ring_p->tx_wrap_mask;
1050 			tx_ring_p->rd_index = tx_rd_index;
1051 			tx_ring_p->descs_pending--;
1052 			tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1053 			tx_msg_p = &tx_msg_ring[tx_rd_index];
1054 		}
1055 
1056 		status = (nmblks <= (tx_ring_p->tx_ring_size -
1057 		    tx_ring_p->descs_pending -
1058 		    TX_FULL_MARK));
1059 		if (status) {
1060 			cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
1061 		}
1062 	} else {
1063 		status = (nmblks <=
1064 		    (tx_ring_p->tx_ring_size -
1065 		    tx_ring_p->descs_pending -
1066 		    TX_FULL_MARK));
1067 	}
1068 
1069 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1070 	    "<== nxge_txdma_reclaim status = 0x%08x", status));
1071 
1072 	return (status);
1073 }
1074 
1075 /*
1076  * nxge_tx_intr
1077  *
1078  *	Process a TDC interrupt
1079  *
1080  * Arguments:
1081  * 	arg1	A Logical Device state Vector (LSV) data structure.
1082  * 	arg2	nxge_t *
1083  *
1084  * Notes:
1085  *
1086  * NPI/NXGE function calls:
1087  *	npi_txdma_control_status()
1088  *	npi_intr_ldg_mgmt_set()
1089  *
1090  *	nxge_tx_err_evnts()
1091  *	nxge_txdma_reclaim()
1092  *
1093  * Registers accessed:
1094  *	TX_CS		DMC+0x40028 Transmit Control And Status
1095  *	PIO_LDSV
1096  *
1097  * Context:
1098  *	Any domain
1099  */
1100 uint_t
1101 nxge_tx_intr(void *arg1, void *arg2)
1102 {
1103 	p_nxge_ldv_t		ldvp = (p_nxge_ldv_t)arg1;
1104 	p_nxge_t		nxgep = (p_nxge_t)arg2;
1105 	p_nxge_ldg_t		ldgp;
1106 	uint8_t			channel;
1107 	uint32_t		vindex;
1108 	npi_handle_t		handle;
1109 	tx_cs_t			cs;
1110 	p_tx_ring_t 		*tx_rings;
1111 	p_tx_ring_t 		tx_ring_p;
1112 	npi_status_t		rs = NPI_SUCCESS;
1113 	uint_t 			serviced = DDI_INTR_UNCLAIMED;
1114 	nxge_status_t 		status = NXGE_OK;
1115 
1116 	if (ldvp == NULL) {
1117 		NXGE_DEBUG_MSG((NULL, INT_CTL,
1118 		    "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1119 		    nxgep, ldvp));
1120 		return (DDI_INTR_UNCLAIMED);
1121 	}
1122 
1123 	if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1124 		nxgep = ldvp->nxgep;
1125 	}
1126 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1127 	    "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1128 	    nxgep, ldvp));
1129 
1130 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1131 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1132 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1133 		    "<== nxge_tx_intr: interface not started or intialized"));
1134 		return (DDI_INTR_CLAIMED);
1135 	}
1136 
1137 	/*
1138 	 * This interrupt handler is for a specific
1139 	 * transmit dma channel.
1140 	 */
1141 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1142 	/* Get the control and status for this channel. */
1143 	channel = ldvp->channel;
1144 	ldgp = ldvp->ldgp;
1145 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1146 	    "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1147 	    "channel %d",
1148 	    nxgep, ldvp, channel));
1149 
1150 	rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1151 	vindex = ldvp->vdma_index;
1152 	NXGE_DEBUG_MSG((nxgep, INT_CTL,
1153 	    "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1154 	    channel, vindex, rs));
1155 	if (!rs && cs.bits.ldw.mk) {
1156 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1157 		    "==> nxge_tx_intr:channel %d ring index %d "
1158 		    "status 0x%08x (mk bit set)",
1159 		    channel, vindex, rs));
1160 		tx_rings = nxgep->tx_rings->rings;
1161 		tx_ring_p = tx_rings[vindex];
1162 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1163 		    "==> nxge_tx_intr:channel %d ring index %d "
1164 		    "status 0x%08x (mk bit set, calling reclaim)",
1165 		    channel, vindex, rs));
1166 
1167 		MUTEX_ENTER(&tx_ring_p->lock);
1168 		(void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0);
1169 		MUTEX_EXIT(&tx_ring_p->lock);
1170 		mac_tx_update(nxgep->mach);
1171 	}
1172 
1173 	/*
1174 	 * Process other transmit control and status.
1175 	 * Check the ldv state.
1176 	 */
1177 	status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1178 	/*
1179 	 * Rearm this logical group if this is a single device
1180 	 * group.
1181 	 */
1182 	if (ldgp->nldvs == 1) {
1183 		NXGE_DEBUG_MSG((nxgep, INT_CTL,
1184 		    "==> nxge_tx_intr: rearm"));
1185 		if (status == NXGE_OK) {
1186 			if (isLDOMguest(nxgep)) {
1187 				nxge_hio_ldgimgn(nxgep, ldgp);
1188 			} else {
1189 				(void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1190 				    B_TRUE, ldgp->ldg_timer);
1191 			}
1192 		}
1193 	}
1194 
1195 	NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1196 	serviced = DDI_INTR_CLAIMED;
1197 	return (serviced);
1198 }
1199 
1200 void
1201 nxge_txdma_stop(p_nxge_t nxgep)	/* Dead */
1202 {
1203 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1204 
1205 	(void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1206 
1207 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1208 }
1209 
1210 void
1211 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1212 {
1213 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1214 
1215 	(void) nxge_txdma_stop(nxgep);
1216 
1217 	(void) nxge_fixup_txdma_rings(nxgep);
1218 	(void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1219 	(void) nxge_tx_mac_enable(nxgep);
1220 	(void) nxge_txdma_hw_kick(nxgep);
1221 
1222 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1223 }
1224 
1225 npi_status_t
1226 nxge_txdma_channel_disable(
1227 	nxge_t *nxge,
1228 	int channel)
1229 {
1230 	npi_handle_t	handle = NXGE_DEV_NPI_HANDLE(nxge);
1231 	npi_status_t	rs;
1232 	tdmc_intr_dbg_t	intr_dbg;
1233 
1234 	/*
1235 	 * Stop the dma channel and wait for the stop-done.
1236 	 * If the stop-done bit is not present, then force
1237 	 * an error so TXC will stop.
1238 	 * All channels bound to this port need to be stopped
1239 	 * and reset after injecting an interrupt error.
1240 	 */
1241 	rs = npi_txdma_channel_disable(handle, channel);
1242 	NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1243 	    "==> nxge_txdma_channel_disable(%d) "
1244 	    "rs 0x%x", channel, rs));
1245 	if (rs != NPI_SUCCESS) {
1246 		/* Inject any error */
1247 		intr_dbg.value = 0;
1248 		intr_dbg.bits.ldw.nack_pref = 1;
1249 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1250 		    "==> nxge_txdma_hw_mode: "
1251 		    "channel %d (stop failed 0x%x) "
1252 		    "(inject err)", rs, channel));
1253 		(void) npi_txdma_inj_int_error_set(
1254 		    handle, channel, &intr_dbg);
1255 		rs = npi_txdma_channel_disable(handle, channel);
1256 		NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1257 		    "==> nxge_txdma_hw_mode: "
1258 		    "channel %d (stop again 0x%x) "
1259 		    "(after inject err)",
1260 		    rs, channel));
1261 	}
1262 
1263 	return (rs);
1264 }
1265 
1266 /*
1267  * nxge_txdma_hw_mode
1268  *
1269  *	Toggle all TDCs on (enable) or off (disable).
1270  *
1271  * Arguments:
1272  * 	nxgep
1273  * 	enable	Enable or disable a TDC.
1274  *
1275  * Notes:
1276  *
1277  * NPI/NXGE function calls:
1278  *	npi_txdma_channel_enable(TX_CS)
1279  *	npi_txdma_channel_disable(TX_CS)
1280  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1281  *
1282  * Registers accessed:
1283  *	TX_CS		DMC+0x40028 Transmit Control And Status
1284  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1285  *
1286  * Context:
1287  *	Any domain
1288  */
1289 nxge_status_t
1290 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1291 {
1292 	nxge_grp_set_t *set = &nxgep->tx_set;
1293 
1294 	npi_handle_t	handle;
1295 	nxge_status_t	status;
1296 	npi_status_t	rs;
1297 	int		tdc;
1298 
1299 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1300 	    "==> nxge_txdma_hw_mode: enable mode %d", enable));
1301 
1302 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1303 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1304 		    "<== nxge_txdma_mode: not initialized"));
1305 		return (NXGE_ERROR);
1306 	}
1307 
1308 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1309 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1310 		    "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1311 		return (NXGE_ERROR);
1312 	}
1313 
1314 	/* Enable or disable all of the TDCs owned by us. */
1315 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1316 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1317 		if ((1 << tdc) & set->owned.map) {
1318 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1319 			if (ring) {
1320 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1321 				    "==> nxge_txdma_hw_mode: channel %d", tdc));
1322 				if (enable) {
1323 					rs = npi_txdma_channel_enable
1324 					    (handle, tdc);
1325 					NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1326 					    "==> nxge_txdma_hw_mode: "
1327 					    "channel %d (enable) rs 0x%x",
1328 					    tdc, rs));
1329 				} else {
1330 					rs = nxge_txdma_channel_disable
1331 					    (nxgep, tdc);
1332 				}
1333 			}
1334 		}
1335 	}
1336 
1337 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1338 
1339 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1340 	    "<== nxge_txdma_hw_mode: status 0x%x", status));
1341 
1342 	return (status);
1343 }
1344 
1345 void
1346 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1347 {
1348 	npi_handle_t		handle;
1349 
1350 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1351 	    "==> nxge_txdma_enable_channel: channel %d", channel));
1352 
1353 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1354 	/* enable the transmit dma channels */
1355 	(void) npi_txdma_channel_enable(handle, channel);
1356 
1357 	NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1358 }
1359 
1360 void
1361 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1362 {
1363 	npi_handle_t		handle;
1364 
1365 	NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1366 	    "==> nxge_txdma_disable_channel: channel %d", channel));
1367 
1368 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1369 	/* stop the transmit dma channels */
1370 	(void) npi_txdma_channel_disable(handle, channel);
1371 
1372 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1373 }
1374 
1375 /*
1376  * nxge_txdma_stop_inj_err
1377  *
1378  *	Stop a TDC.  If at first we don't succeed, inject an error.
1379  *
1380  * Arguments:
1381  * 	nxgep
1382  * 	channel		The channel to stop.
1383  *
1384  * Notes:
1385  *
1386  * NPI/NXGE function calls:
1387  *	npi_txdma_channel_disable()
1388  *	npi_txdma_inj_int_error_set()
1389  * #if defined(NXGE_DEBUG)
1390  *	nxge_txdma_regs_dump_channels(nxgep);
1391  * #endif
1392  *
1393  * Registers accessed:
1394  *	TX_CS		DMC+0x40028 Transmit Control And Status
1395  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1396  *
1397  * Context:
1398  *	Any domain
1399  */
1400 int
1401 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1402 {
1403 	npi_handle_t		handle;
1404 	tdmc_intr_dbg_t		intr_dbg;
1405 	int			status;
1406 	npi_status_t		rs = NPI_SUCCESS;
1407 
1408 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1409 	/*
1410 	 * Stop the dma channel waits for the stop done.
1411 	 * If the stop done bit is not set, then create
1412 	 * an error.
1413 	 */
1414 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1415 	rs = npi_txdma_channel_disable(handle, channel);
1416 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1417 	if (status == NXGE_OK) {
1418 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1419 		    "<== nxge_txdma_stop_inj_err (channel %d): "
1420 		    "stopped OK", channel));
1421 		return (status);
1422 	}
1423 
1424 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1425 	    "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1426 	    "injecting error", channel, rs));
1427 	/* Inject any error */
1428 	intr_dbg.value = 0;
1429 	intr_dbg.bits.ldw.nack_pref = 1;
1430 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1431 
1432 	/* Stop done bit will be set as a result of error injection */
1433 	rs = npi_txdma_channel_disable(handle, channel);
1434 	status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1435 	if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1436 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1437 		    "<== nxge_txdma_stop_inj_err (channel %d): "
1438 		    "stopped OK ", channel));
1439 		return (status);
1440 	}
1441 
1442 #if	defined(NXGE_DEBUG)
1443 	nxge_txdma_regs_dump_channels(nxgep);
1444 #endif
1445 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1446 	    "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1447 	    " (injected error but still not stopped)", channel, rs));
1448 
1449 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1450 	return (status);
1451 }
1452 
1453 /*ARGSUSED*/
1454 void
1455 nxge_fixup_txdma_rings(p_nxge_t nxgep)
1456 {
1457 	nxge_grp_set_t *set = &nxgep->tx_set;
1458 	int tdc;
1459 
1460 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1461 
1462 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1463 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1464 		    "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1465 		return;
1466 	}
1467 
1468 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1469 		if ((1 << tdc) & set->owned.map) {
1470 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1471 			if (ring) {
1472 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1473 				    "==> nxge_fixup_txdma_rings: channel %d",
1474 				    tdc));
1475 				nxge_txdma_fixup_channel(nxgep, ring, tdc);
1476 			}
1477 		}
1478 	}
1479 
1480 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1481 }
1482 
1483 /*ARGSUSED*/
1484 void
1485 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1486 {
1487 	p_tx_ring_t	ring_p;
1488 
1489 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1490 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1491 	if (ring_p == NULL) {
1492 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1493 		return;
1494 	}
1495 
1496 	if (ring_p->tdc != channel) {
1497 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1498 		    "<== nxge_txdma_fix_channel: channel not matched "
1499 		    "ring tdc %d passed channel",
1500 		    ring_p->tdc, channel));
1501 		return;
1502 	}
1503 
1504 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1505 
1506 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1507 }
1508 
1509 /*ARGSUSED*/
1510 void
1511 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1512 {
1513 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1514 
1515 	if (ring_p == NULL) {
1516 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1517 		    "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1518 		return;
1519 	}
1520 
1521 	if (ring_p->tdc != channel) {
1522 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1523 		    "<== nxge_txdma_fixup_channel: channel not matched "
1524 		    "ring tdc %d passed channel",
1525 		    ring_p->tdc, channel));
1526 		return;
1527 	}
1528 
1529 	MUTEX_ENTER(&ring_p->lock);
1530 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1531 	ring_p->rd_index = 0;
1532 	ring_p->wr_index = 0;
1533 	ring_p->ring_head.value = 0;
1534 	ring_p->ring_kick_tail.value = 0;
1535 	ring_p->descs_pending = 0;
1536 	MUTEX_EXIT(&ring_p->lock);
1537 
1538 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1539 }
1540 
1541 /*ARGSUSED*/
1542 void
1543 nxge_txdma_hw_kick(p_nxge_t nxgep)
1544 {
1545 	nxge_grp_set_t *set = &nxgep->tx_set;
1546 	int tdc;
1547 
1548 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1549 
1550 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1551 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1552 		    "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1553 		return;
1554 	}
1555 
1556 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1557 		if ((1 << tdc) & set->owned.map) {
1558 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1559 			if (ring) {
1560 				NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1561 				    "==> nxge_txdma_hw_kick: channel %d", tdc));
1562 				nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1563 			}
1564 		}
1565 	}
1566 
1567 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1568 }
1569 
1570 /*ARGSUSED*/
1571 void
1572 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1573 {
1574 	p_tx_ring_t	ring_p;
1575 
1576 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1577 
1578 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1579 	if (ring_p == NULL) {
1580 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1581 		    " nxge_txdma_kick_channel"));
1582 		return;
1583 	}
1584 
1585 	if (ring_p->tdc != channel) {
1586 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1587 		    "<== nxge_txdma_kick_channel: channel not matched "
1588 		    "ring tdc %d passed channel",
1589 		    ring_p->tdc, channel));
1590 		return;
1591 	}
1592 
1593 	nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1594 
1595 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1596 }
1597 
1598 /*ARGSUSED*/
1599 void
1600 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1601 {
1602 
1603 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1604 
1605 	if (ring_p == NULL) {
1606 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1607 		    "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1608 		return;
1609 	}
1610 
1611 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1612 }
1613 
1614 /*
1615  * nxge_check_tx_hang
1616  *
1617  *	Check the state of all TDCs belonging to nxgep.
1618  *
1619  * Arguments:
1620  * 	nxgep
1621  *
1622  * Notes:
1623  *	Called by nxge_hw.c:nxge_check_hw_state().
1624  *
1625  * NPI/NXGE function calls:
1626  *
1627  * Registers accessed:
1628  *
1629  * Context:
1630  *	Any domain
1631  */
1632 /*ARGSUSED*/
1633 void
1634 nxge_check_tx_hang(p_nxge_t nxgep)
1635 {
1636 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1637 
1638 	if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1639 	    (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1640 		goto nxge_check_tx_hang_exit;
1641 	}
1642 
1643 	/*
1644 	 * Needs inputs from hardware for regs:
1645 	 *	head index had not moved since last timeout.
1646 	 *	packets not transmitted or stuffed registers.
1647 	 */
1648 	if (nxge_txdma_hung(nxgep)) {
1649 		nxge_fixup_hung_txdma_rings(nxgep);
1650 	}
1651 
1652 nxge_check_tx_hang_exit:
1653 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1654 }
1655 
1656 /*
1657  * nxge_txdma_hung
1658  *
1659  *	Reset a TDC.
1660  *
1661  * Arguments:
1662  * 	nxgep
1663  * 	channel		The channel to reset.
1664  * 	reg_data	The current TX_CS.
1665  *
1666  * Notes:
1667  *	Called by nxge_check_tx_hang()
1668  *
1669  * NPI/NXGE function calls:
1670  *	nxge_txdma_channel_hung()
1671  *
1672  * Registers accessed:
1673  *
1674  * Context:
1675  *	Any domain
1676  */
1677 int
1678 nxge_txdma_hung(p_nxge_t nxgep)
1679 {
1680 	nxge_grp_set_t *set = &nxgep->tx_set;
1681 	int tdc;
1682 
1683 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1684 
1685 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1686 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1687 		    "<== nxge_txdma_hung: NULL ring pointer(s)"));
1688 		return (B_FALSE);
1689 	}
1690 
1691 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1692 		if ((1 << tdc) & set->owned.map) {
1693 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1694 			if (ring) {
1695 				if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1696 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
1697 					    "==> nxge_txdma_hung: TDC %d hung",
1698 					    tdc));
1699 					return (B_TRUE);
1700 				}
1701 			}
1702 		}
1703 	}
1704 
1705 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1706 
1707 	return (B_FALSE);
1708 }
1709 
1710 /*
1711  * nxge_txdma_channel_hung
1712  *
1713  *	Reset a TDC.
1714  *
1715  * Arguments:
1716  * 	nxgep
1717  * 	ring		<channel>'s ring.
1718  * 	channel		The channel to reset.
1719  *
1720  * Notes:
1721  *	Called by nxge_txdma.c:nxge_txdma_hung()
1722  *
1723  * NPI/NXGE function calls:
1724  *	npi_txdma_ring_head_get()
1725  *
1726  * Registers accessed:
1727  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1728  *
1729  * Context:
1730  *	Any domain
1731  */
1732 int
1733 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1734 {
1735 	uint16_t		head_index, tail_index;
1736 	boolean_t		head_wrap, tail_wrap;
1737 	npi_handle_t		handle;
1738 	tx_ring_hdl_t		tx_head;
1739 	uint_t			tx_rd_index;
1740 
1741 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1742 
1743 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1744 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1745 	    "==> nxge_txdma_channel_hung: channel %d", channel));
1746 	MUTEX_ENTER(&tx_ring_p->lock);
1747 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1748 
1749 	tail_index = tx_ring_p->wr_index;
1750 	tail_wrap = tx_ring_p->wr_index_wrap;
1751 	tx_rd_index = tx_ring_p->rd_index;
1752 	MUTEX_EXIT(&tx_ring_p->lock);
1753 
1754 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1755 	    "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1756 	    "tail_index %d tail_wrap %d ",
1757 	    channel, tx_rd_index, tail_index, tail_wrap));
1758 	/*
1759 	 * Read the hardware maintained transmit head
1760 	 * and wrap around bit.
1761 	 */
1762 	(void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1763 	head_index =  tx_head.bits.ldw.head;
1764 	head_wrap = tx_head.bits.ldw.wrap;
1765 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1766 	    "==> nxge_txdma_channel_hung: "
1767 	    "tx_rd_index %d tail %d tail_wrap %d "
1768 	    "head %d wrap %d",
1769 	    tx_rd_index, tail_index, tail_wrap,
1770 	    head_index, head_wrap));
1771 
1772 	if (TXDMA_RING_EMPTY(head_index, head_wrap,
1773 	    tail_index, tail_wrap) &&
1774 	    (head_index == tx_rd_index)) {
1775 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1776 		    "==> nxge_txdma_channel_hung: EMPTY"));
1777 		return (B_FALSE);
1778 	}
1779 
1780 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1781 	    "==> nxge_txdma_channel_hung: Checking if ring full"));
1782 	if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1783 	    tail_wrap)) {
1784 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1785 		    "==> nxge_txdma_channel_hung: full"));
1786 		return (B_TRUE);
1787 	}
1788 
1789 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1790 
1791 	return (B_FALSE);
1792 }
1793 
1794 /*
1795  * nxge_fixup_hung_txdma_rings
1796  *
1797  *	Disable a TDC.
1798  *
1799  * Arguments:
1800  * 	nxgep
1801  * 	channel		The channel to reset.
1802  * 	reg_data	The current TX_CS.
1803  *
1804  * Notes:
1805  *	Called by nxge_check_tx_hang()
1806  *
1807  * NPI/NXGE function calls:
1808  *	npi_txdma_ring_head_get()
1809  *
1810  * Registers accessed:
1811  *	TX_RING_HDL	DMC+0x40010 Transmit Ring Head Low
1812  *
1813  * Context:
1814  *	Any domain
1815  */
1816 /*ARGSUSED*/
1817 void
1818 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1819 {
1820 	nxge_grp_set_t *set = &nxgep->tx_set;
1821 	int tdc;
1822 
1823 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1824 
1825 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1826 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1827 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1828 		return;
1829 	}
1830 
1831 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1832 		if ((1 << tdc) & set->owned.map) {
1833 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1834 			if (ring) {
1835 				nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1836 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1837 				    "==> nxge_fixup_hung_txdma_rings: TDC %d",
1838 				    tdc));
1839 			}
1840 		}
1841 	}
1842 
1843 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1844 }
1845 
1846 /*
1847  * nxge_txdma_fixup_hung_channel
1848  *
1849  *	'Fix' a hung TDC.
1850  *
1851  * Arguments:
1852  * 	nxgep
1853  * 	channel		The channel to fix.
1854  *
1855  * Notes:
1856  *	Called by nxge_fixup_hung_txdma_rings()
1857  *
1858  *	1. Reclaim the TDC.
1859  *	2. Disable the TDC.
1860  *
1861  * NPI/NXGE function calls:
1862  *	nxge_txdma_reclaim()
1863  *	npi_txdma_channel_disable(TX_CS)
1864  *	npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1865  *
1866  * Registers accessed:
1867  *	TX_CS		DMC+0x40028 Transmit Control And Status
1868  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
1869  *
1870  * Context:
1871  *	Any domain
1872  */
1873 /*ARGSUSED*/
1874 void
1875 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1876 {
1877 	p_tx_ring_t	ring_p;
1878 
1879 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1880 	ring_p = nxge_txdma_get_ring(nxgep, channel);
1881 	if (ring_p == NULL) {
1882 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1883 		    "<== nxge_txdma_fix_hung_channel"));
1884 		return;
1885 	}
1886 
1887 	if (ring_p->tdc != channel) {
1888 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1889 		    "<== nxge_txdma_fix_hung_channel: channel not matched "
1890 		    "ring tdc %d passed channel",
1891 		    ring_p->tdc, channel));
1892 		return;
1893 	}
1894 
1895 	nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1896 
1897 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1898 }
1899 
1900 /*ARGSUSED*/
1901 void
1902 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1903 	uint16_t channel)
1904 {
1905 	npi_handle_t		handle;
1906 	tdmc_intr_dbg_t		intr_dbg;
1907 	int			status = NXGE_OK;
1908 
1909 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1910 
1911 	if (ring_p == NULL) {
1912 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1913 		    "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1914 		return;
1915 	}
1916 
1917 	if (ring_p->tdc != channel) {
1918 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1919 		    "<== nxge_txdma_fixup_hung_channel: channel "
1920 		    "not matched "
1921 		    "ring tdc %d passed channel",
1922 		    ring_p->tdc, channel));
1923 		return;
1924 	}
1925 
1926 	/* Reclaim descriptors */
1927 	MUTEX_ENTER(&ring_p->lock);
1928 	(void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1929 	MUTEX_EXIT(&ring_p->lock);
1930 
1931 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
1932 	/*
1933 	 * Stop the dma channel waits for the stop done.
1934 	 * If the stop done bit is not set, then force
1935 	 * an error.
1936 	 */
1937 	status = npi_txdma_channel_disable(handle, channel);
1938 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
1939 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1940 		    "<== nxge_txdma_fixup_hung_channel: stopped OK "
1941 		    "ring tdc %d passed channel %d",
1942 		    ring_p->tdc, channel));
1943 		return;
1944 	}
1945 
1946 	/* Inject any error */
1947 	intr_dbg.value = 0;
1948 	intr_dbg.bits.ldw.nack_pref = 1;
1949 	(void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1950 
1951 	/* Stop done bit will be set as a result of error injection */
1952 	status = npi_txdma_channel_disable(handle, channel);
1953 	if (!(status & NPI_TXDMA_STOP_FAILED)) {
1954 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1955 		    "<== nxge_txdma_fixup_hung_channel: stopped again"
1956 		    "ring tdc %d passed channel",
1957 		    ring_p->tdc, channel));
1958 		return;
1959 	}
1960 
1961 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
1962 	    "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
1963 	    "ring tdc %d passed channel",
1964 	    ring_p->tdc, channel));
1965 
1966 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
1967 }
1968 
1969 /*ARGSUSED*/
1970 void
1971 nxge_reclaim_rings(p_nxge_t nxgep)
1972 {
1973 	nxge_grp_set_t *set = &nxgep->tx_set;
1974 	int tdc;
1975 
1976 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
1977 
1978 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1979 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
1980 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1981 		return;
1982 	}
1983 
1984 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1985 		if ((1 << tdc) & set->owned.map) {
1986 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1987 			if (ring) {
1988 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
1989 				    "==> nxge_reclaim_rings: TDC %d", tdc));
1990 				MUTEX_ENTER(&ring->lock);
1991 				(void) nxge_txdma_reclaim(nxgep, ring, tdc);
1992 				MUTEX_EXIT(&ring->lock);
1993 			}
1994 		}
1995 	}
1996 
1997 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
1998 }
1999 
2000 void
2001 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2002 {
2003 	nxge_grp_set_t *set = &nxgep->tx_set;
2004 	npi_handle_t handle;
2005 	int tdc;
2006 
2007 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2008 
2009 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2010 
2011 	if (!isLDOMguest(nxgep)) {
2012 		(void) npi_txdma_dump_fzc_regs(handle);
2013 
2014 		/* Dump TXC registers. */
2015 		(void) npi_txc_dump_fzc_regs(handle);
2016 		(void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2017 	}
2018 
2019 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2020 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2021 		    "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2022 		return;
2023 	}
2024 
2025 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2026 		if ((1 << tdc) & set->owned.map) {
2027 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2028 			if (ring) {
2029 				NXGE_DEBUG_MSG((nxgep, TX_CTL,
2030 				    "==> nxge_txdma_regs_dump_channels: "
2031 				    "TDC %d", tdc));
2032 				(void) npi_txdma_dump_tdc_regs(handle, tdc);
2033 
2034 				/* Dump TXC registers, if able to. */
2035 				if (!isLDOMguest(nxgep)) {
2036 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
2037 					    "==> nxge_txdma_regs_dump_channels:"
2038 					    " FZC TDC %d", tdc));
2039 					(void) npi_txc_dump_tdc_fzc_regs
2040 					    (handle, tdc);
2041 				}
2042 				nxge_txdma_regs_dump(nxgep, tdc);
2043 			}
2044 		}
2045 	}
2046 
2047 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2048 }
2049 
2050 void
2051 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2052 {
2053 	npi_handle_t		handle;
2054 	tx_ring_hdl_t 		hdl;
2055 	tx_ring_kick_t 		kick;
2056 	tx_cs_t 		cs;
2057 	txc_control_t		control;
2058 	uint32_t		bitmap = 0;
2059 	uint32_t		burst = 0;
2060 	uint32_t		bytes = 0;
2061 	dma_log_page_t		cfg;
2062 
2063 	printf("\n\tfunc # %d tdc %d ",
2064 	    nxgep->function_num, channel);
2065 	cfg.page_num = 0;
2066 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
2067 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
2068 	printf("\n\tlog page func %d valid page 0 %d",
2069 	    cfg.func_num, cfg.valid);
2070 	cfg.page_num = 1;
2071 	(void) npi_txdma_log_page_get(handle, channel, &cfg);
2072 	printf("\n\tlog page func %d valid page 1 %d",
2073 	    cfg.func_num, cfg.valid);
2074 
2075 	(void) npi_txdma_ring_head_get(handle, channel, &hdl);
2076 	(void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2077 	printf("\n\thead value is 0x%0llx",
2078 	    (long long)hdl.value);
2079 	printf("\n\thead index %d", hdl.bits.ldw.head);
2080 	printf("\n\tkick value is 0x%0llx",
2081 	    (long long)kick.value);
2082 	printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2083 
2084 	(void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2085 	printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2086 	printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2087 
2088 	(void) npi_txc_control(handle, OP_GET, &control);
2089 	(void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2090 	(void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2091 	(void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2092 
2093 	printf("\n\tTXC port control 0x%0llx",
2094 	    (long long)control.value);
2095 	printf("\n\tTXC port bitmap 0x%x", bitmap);
2096 	printf("\n\tTXC max burst %d", burst);
2097 	printf("\n\tTXC bytes xmt %d\n", bytes);
2098 
2099 	{
2100 		ipp_status_t status;
2101 
2102 		(void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2103 #if defined(__i386)
2104 		printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2105 #else
2106 		printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2107 #endif
2108 	}
2109 }
2110 
2111 /*
2112  * nxge_tdc_hvio_setup
2113  *
2114  *	I'm not exactly sure what this code does.
2115  *
2116  * Arguments:
2117  * 	nxgep
2118  * 	channel	The channel to map.
2119  *
2120  * Notes:
2121  *
2122  * NPI/NXGE function calls:
2123  *	na
2124  *
2125  * Context:
2126  *	Service domain?
2127  */
2128 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2129 static void
2130 nxge_tdc_hvio_setup(
2131 	nxge_t *nxgep, int channel)
2132 {
2133 	nxge_dma_common_t	*data;
2134 	nxge_dma_common_t	*control;
2135 	tx_ring_t 		*ring;
2136 
2137 	ring = nxgep->tx_rings->rings[channel];
2138 	data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2139 
2140 	ring->hv_set = B_FALSE;
2141 
2142 	ring->hv_tx_buf_base_ioaddr_pp =
2143 	    (uint64_t)data->orig_ioaddr_pp;
2144 	ring->hv_tx_buf_ioaddr_size =
2145 	    (uint64_t)data->orig_alength;
2146 
2147 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2148 	    "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2149 	    "orig vatopa base io $%p orig_len 0x%llx (%d)",
2150 	    ring->hv_tx_buf_base_ioaddr_pp,
2151 	    ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2152 	    data->ioaddr_pp, data->orig_vatopa,
2153 	    data->orig_alength, data->orig_alength));
2154 
2155 	control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2156 
2157 	ring->hv_tx_cntl_base_ioaddr_pp =
2158 	    (uint64_t)control->orig_ioaddr_pp;
2159 	ring->hv_tx_cntl_ioaddr_size =
2160 	    (uint64_t)control->orig_alength;
2161 
2162 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2163 	    "hv cntl base io $%p orig ioaddr_pp ($%p) "
2164 	    "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2165 	    ring->hv_tx_cntl_base_ioaddr_pp,
2166 	    control->orig_ioaddr_pp, control->orig_vatopa,
2167 	    ring->hv_tx_cntl_ioaddr_size,
2168 	    control->orig_alength, control->orig_alength));
2169 }
2170 #endif
2171 
2172 static nxge_status_t
2173 nxge_map_txdma(p_nxge_t nxgep, int channel)
2174 {
2175 	nxge_dma_common_t	**pData;
2176 	nxge_dma_common_t	**pControl;
2177 	tx_ring_t 		**pRing, *ring;
2178 	tx_mbox_t		**mailbox;
2179 	uint32_t		num_chunks;
2180 
2181 	nxge_status_t		status = NXGE_OK;
2182 
2183 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2184 
2185 	if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2186 		if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2187 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2188 			    "<== nxge_map_txdma: buf not allocated"));
2189 			return (NXGE_ERROR);
2190 		}
2191 	}
2192 
2193 	if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2194 		return (NXGE_ERROR);
2195 
2196 	num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2197 	pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2198 	pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2199 	pRing = &nxgep->tx_rings->rings[channel];
2200 	mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2201 
2202 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2203 	    "tx_rings $%p tx_desc_rings $%p",
2204 	    nxgep->tx_rings, nxgep->tx_rings->rings));
2205 
2206 	/*
2207 	 * Map descriptors from the buffer pools for <channel>.
2208 	 */
2209 
2210 	/*
2211 	 * Set up and prepare buffer blocks, descriptors
2212 	 * and mailbox.
2213 	 */
2214 	status = nxge_map_txdma_channel(nxgep, channel,
2215 	    pData, pRing, num_chunks, pControl, mailbox);
2216 	if (status != NXGE_OK) {
2217 		NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2218 		    "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2219 		    "returned 0x%x",
2220 		    nxgep, channel, status));
2221 		return (status);
2222 	}
2223 
2224 	ring = *pRing;
2225 
2226 	ring->index = (uint16_t)channel;
2227 	ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2228 
2229 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2230 	if (isLDOMguest(nxgep)) {
2231 		(void) nxge_tdc_lp_conf(nxgep, channel);
2232 	} else {
2233 		nxge_tdc_hvio_setup(nxgep, channel);
2234 	}
2235 #endif
2236 
2237 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2238 	    "(status 0x%x channel %d)", status, channel));
2239 
2240 	return (status);
2241 }
2242 
2243 static nxge_status_t
2244 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2245 	p_nxge_dma_common_t *dma_buf_p,
2246 	p_tx_ring_t *tx_desc_p,
2247 	uint32_t num_chunks,
2248 	p_nxge_dma_common_t *dma_cntl_p,
2249 	p_tx_mbox_t *tx_mbox_p)
2250 {
2251 	int	status = NXGE_OK;
2252 
2253 	/*
2254 	 * Set up and prepare buffer blocks, descriptors
2255 	 * and mailbox.
2256 	 */
2257 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2258 	    "==> nxge_map_txdma_channel (channel %d)", channel));
2259 	/*
2260 	 * Transmit buffer blocks
2261 	 */
2262 	status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2263 	    dma_buf_p, tx_desc_p, num_chunks);
2264 	if (status != NXGE_OK) {
2265 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2266 		    "==> nxge_map_txdma_channel (channel %d): "
2267 		    "map buffer failed 0x%x", channel, status));
2268 		goto nxge_map_txdma_channel_exit;
2269 	}
2270 
2271 	/*
2272 	 * Transmit block ring, and mailbox.
2273 	 */
2274 	nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2275 	    tx_mbox_p);
2276 
2277 	goto nxge_map_txdma_channel_exit;
2278 
2279 nxge_map_txdma_channel_fail1:
2280 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2281 	    "==> nxge_map_txdma_channel: unmap buf"
2282 	    "(status 0x%x channel %d)",
2283 	    status, channel));
2284 	nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2285 
2286 nxge_map_txdma_channel_exit:
2287 	NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2288 	    "<== nxge_map_txdma_channel: "
2289 	    "(status 0x%x channel %d)",
2290 	    status, channel));
2291 
2292 	return (status);
2293 }
2294 
2295 /*ARGSUSED*/
2296 static void
2297 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2298 {
2299 	tx_ring_t *ring;
2300 	tx_mbox_t *mailbox;
2301 
2302 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2303 	    "==> nxge_unmap_txdma_channel (channel %d)", channel));
2304 	/*
2305 	 * unmap tx block ring, and mailbox.
2306 	 */
2307 	ring = nxgep->tx_rings->rings[channel];
2308 	mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2309 
2310 	(void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2311 
2312 	/* unmap buffer blocks */
2313 	(void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2314 
2315 	nxge_free_txb(nxgep, channel);
2316 
2317 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2318 }
2319 
2320 /*
2321  * nxge_map_txdma_channel_cfg_ring
2322  *
2323  *	Map a TDC into our kernel space.
2324  *	This function allocates all of the per-channel data structures.
2325  *
2326  * Arguments:
2327  * 	nxgep
2328  * 	dma_channel	The channel to map.
2329  *	dma_cntl_p
2330  *	tx_ring_p	dma_channel's transmit ring
2331  *	tx_mbox_p	dma_channel's mailbox
2332  *
2333  * Notes:
2334  *
2335  * NPI/NXGE function calls:
2336  *	nxge_setup_dma_common()
2337  *
2338  * Registers accessed:
2339  *	none.
2340  *
2341  * Context:
2342  *	Any domain
2343  */
2344 /*ARGSUSED*/
2345 static void
2346 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2347 	p_nxge_dma_common_t *dma_cntl_p,
2348 	p_tx_ring_t tx_ring_p,
2349 	p_tx_mbox_t *tx_mbox_p)
2350 {
2351 	p_tx_mbox_t 		mboxp;
2352 	p_nxge_dma_common_t 	cntl_dmap;
2353 	p_nxge_dma_common_t 	dmap;
2354 	p_tx_rng_cfig_t		tx_ring_cfig_p;
2355 	p_tx_ring_kick_t	tx_ring_kick_p;
2356 	p_tx_cs_t		tx_cs_p;
2357 	p_tx_dma_ent_msk_t	tx_evmask_p;
2358 	p_txdma_mbh_t		mboxh_p;
2359 	p_txdma_mbl_t		mboxl_p;
2360 	uint64_t		tx_desc_len;
2361 
2362 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2363 	    "==> nxge_map_txdma_channel_cfg_ring"));
2364 
2365 	cntl_dmap = *dma_cntl_p;
2366 
2367 	dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2368 	nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2369 	    sizeof (tx_desc_t));
2370 	/*
2371 	 * Zero out transmit ring descriptors.
2372 	 */
2373 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
2374 	tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2375 	tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2376 	tx_cs_p = &(tx_ring_p->tx_cs);
2377 	tx_evmask_p = &(tx_ring_p->tx_evmask);
2378 	tx_ring_cfig_p->value = 0;
2379 	tx_ring_kick_p->value = 0;
2380 	tx_cs_p->value = 0;
2381 	tx_evmask_p->value = 0;
2382 
2383 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2384 	    "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2385 	    dma_channel,
2386 	    dmap->dma_cookie.dmac_laddress));
2387 
2388 	tx_ring_cfig_p->value = 0;
2389 	tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2390 	tx_ring_cfig_p->value =
2391 	    (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2392 	    (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2393 
2394 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2395 	    "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2396 	    dma_channel,
2397 	    tx_ring_cfig_p->value));
2398 
2399 	tx_cs_p->bits.ldw.rst = 1;
2400 
2401 	/* Map in mailbox */
2402 	mboxp = (p_tx_mbox_t)
2403 	    KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2404 	dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2405 	nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2406 	mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2407 	mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2408 	mboxh_p->value = mboxl_p->value = 0;
2409 
2410 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2411 	    "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2412 	    dmap->dma_cookie.dmac_laddress));
2413 
2414 	mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2415 	    TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2416 
2417 	mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2418 	    TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2419 
2420 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2421 	    "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2422 	    dmap->dma_cookie.dmac_laddress));
2423 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2424 	    "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2425 	    "mbox $%p",
2426 	    mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2427 	tx_ring_p->page_valid.value = 0;
2428 	tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2429 	tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2430 	tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2431 	tx_ring_p->page_hdl.value = 0;
2432 
2433 	tx_ring_p->page_valid.bits.ldw.page0 = 1;
2434 	tx_ring_p->page_valid.bits.ldw.page1 = 1;
2435 
2436 	tx_ring_p->max_burst.value = 0;
2437 	tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2438 
2439 	*tx_mbox_p = mboxp;
2440 
2441 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2442 	    "<== nxge_map_txdma_channel_cfg_ring"));
2443 }
2444 
2445 /*ARGSUSED*/
2446 static void
2447 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2448 	p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2449 {
2450 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2451 	    "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2452 	    tx_ring_p->tdc));
2453 
2454 	KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2455 
2456 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2457 	    "<== nxge_unmap_txdma_channel_cfg_ring"));
2458 }
2459 
2460 /*
2461  * nxge_map_txdma_channel_buf_ring
2462  *
2463  *
2464  * Arguments:
2465  * 	nxgep
2466  * 	channel		The channel to map.
2467  *	dma_buf_p
2468  *	tx_desc_p	channel's descriptor ring
2469  *	num_chunks
2470  *
2471  * Notes:
2472  *
2473  * NPI/NXGE function calls:
2474  *	nxge_setup_dma_common()
2475  *
2476  * Registers accessed:
2477  *	none.
2478  *
2479  * Context:
2480  *	Any domain
2481  */
2482 static nxge_status_t
2483 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2484 	p_nxge_dma_common_t *dma_buf_p,
2485 	p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2486 {
2487 	p_nxge_dma_common_t 	dma_bufp, tmp_bufp;
2488 	p_nxge_dma_common_t 	dmap;
2489 	nxge_os_dma_handle_t	tx_buf_dma_handle;
2490 	p_tx_ring_t 		tx_ring_p;
2491 	p_tx_msg_t 		tx_msg_ring;
2492 	nxge_status_t		status = NXGE_OK;
2493 	int			ddi_status = DDI_SUCCESS;
2494 	int			i, j, index;
2495 	uint32_t		size, bsize;
2496 	uint32_t 		nblocks, nmsgs;
2497 
2498 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2499 	    "==> nxge_map_txdma_channel_buf_ring"));
2500 
2501 	dma_bufp = tmp_bufp = *dma_buf_p;
2502 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2503 		" nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2504 		"chunks bufp $%p",
2505 		    channel, num_chunks, dma_bufp));
2506 
2507 	nmsgs = 0;
2508 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2509 		nmsgs += tmp_bufp->nblocks;
2510 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2511 		    "==> nxge_map_txdma_channel_buf_ring: channel %d "
2512 		    "bufp $%p nblocks %d nmsgs %d",
2513 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2514 	}
2515 	if (!nmsgs) {
2516 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2517 		    "<== nxge_map_txdma_channel_buf_ring: channel %d "
2518 		    "no msg blocks",
2519 		    channel));
2520 		status = NXGE_ERROR;
2521 		goto nxge_map_txdma_channel_buf_ring_exit;
2522 	}
2523 
2524 	tx_ring_p = (p_tx_ring_t)
2525 	    KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2526 	MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2527 	    (void *)nxgep->interrupt_cookie);
2528 
2529 	(void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2530 	tx_ring_p->tx_ring_busy = B_FALSE;
2531 	tx_ring_p->nxgep = nxgep;
2532 	tx_ring_p->serial = nxge_serialize_create(nmsgs,
2533 	    nxge_serial_tx, tx_ring_p);
2534 	/*
2535 	 * Allocate transmit message rings and handles for packets
2536 	 * not to be copied to premapped buffers.
2537 	 */
2538 	size = nmsgs * sizeof (tx_msg_t);
2539 	tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2540 	for (i = 0; i < nmsgs; i++) {
2541 		ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2542 		    DDI_DMA_DONTWAIT, 0,
2543 		    &tx_msg_ring[i].dma_handle);
2544 		if (ddi_status != DDI_SUCCESS) {
2545 			status |= NXGE_DDI_FAILED;
2546 			break;
2547 		}
2548 	}
2549 	if (i < nmsgs) {
2550 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2551 		    "Allocate handles failed."));
2552 		goto nxge_map_txdma_channel_buf_ring_fail1;
2553 	}
2554 
2555 	tx_ring_p->tdc = channel;
2556 	tx_ring_p->tx_msg_ring = tx_msg_ring;
2557 	tx_ring_p->tx_ring_size = nmsgs;
2558 	tx_ring_p->num_chunks = num_chunks;
2559 	if (!nxge_tx_intr_thres) {
2560 		nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2561 	}
2562 	tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2563 	tx_ring_p->rd_index = 0;
2564 	tx_ring_p->wr_index = 0;
2565 	tx_ring_p->ring_head.value = 0;
2566 	tx_ring_p->ring_kick_tail.value = 0;
2567 	tx_ring_p->descs_pending = 0;
2568 
2569 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2570 	    "==> nxge_map_txdma_channel_buf_ring: channel %d "
2571 	    "actual tx desc max %d nmsgs %d "
2572 	    "(config nxge_tx_ring_size %d)",
2573 	    channel, tx_ring_p->tx_ring_size, nmsgs,
2574 	    nxge_tx_ring_size));
2575 
2576 	/*
2577 	 * Map in buffers from the buffer pool.
2578 	 */
2579 	index = 0;
2580 	bsize = dma_bufp->block_size;
2581 
2582 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2583 	    "dma_bufp $%p tx_rng_p $%p "
2584 	    "tx_msg_rng_p $%p bsize %d",
2585 	    dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2586 
2587 	tx_buf_dma_handle = dma_bufp->dma_handle;
2588 	for (i = 0; i < num_chunks; i++, dma_bufp++) {
2589 		bsize = dma_bufp->block_size;
2590 		nblocks = dma_bufp->nblocks;
2591 		NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2592 		    "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2593 		    "size %d dma_bufp $%p",
2594 		    i, sizeof (nxge_dma_common_t), dma_bufp));
2595 
2596 		for (j = 0; j < nblocks; j++) {
2597 			tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2598 			dmap = &tx_msg_ring[index++].buf_dma;
2599 #ifdef TX_MEM_DEBUG
2600 			NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2601 			    "==> nxge_map_txdma_channel_buf_ring: j %d"
2602 			    "dmap $%p", i, dmap));
2603 #endif
2604 			nxge_setup_dma_common(dmap, dma_bufp, 1,
2605 			    bsize);
2606 		}
2607 	}
2608 
2609 	if (i < num_chunks) {
2610 		status = NXGE_ERROR;
2611 		goto nxge_map_txdma_channel_buf_ring_fail1;
2612 	}
2613 
2614 	*tx_desc_p = tx_ring_p;
2615 
2616 	goto nxge_map_txdma_channel_buf_ring_exit;
2617 
2618 nxge_map_txdma_channel_buf_ring_fail1:
2619 	if (tx_ring_p->serial) {
2620 		nxge_serialize_destroy(tx_ring_p->serial);
2621 		tx_ring_p->serial = NULL;
2622 	}
2623 
2624 	index--;
2625 	for (; index >= 0; index--) {
2626 		if (tx_msg_ring[index].dma_handle != NULL) {
2627 			ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2628 		}
2629 	}
2630 	MUTEX_DESTROY(&tx_ring_p->lock);
2631 	KMEM_FREE(tx_msg_ring, size);
2632 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2633 
2634 	status = NXGE_ERROR;
2635 
2636 nxge_map_txdma_channel_buf_ring_exit:
2637 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2638 	    "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2639 
2640 	return (status);
2641 }
2642 
2643 /*ARGSUSED*/
2644 static void
2645 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2646 {
2647 	p_tx_msg_t 		tx_msg_ring;
2648 	p_tx_msg_t 		tx_msg_p;
2649 	int			i;
2650 
2651 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2652 	    "==> nxge_unmap_txdma_channel_buf_ring"));
2653 	if (tx_ring_p == NULL) {
2654 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2655 		    "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2656 		return;
2657 	}
2658 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2659 	    "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2660 	    tx_ring_p->tdc));
2661 
2662 	tx_msg_ring = tx_ring_p->tx_msg_ring;
2663 
2664 	/*
2665 	 * Since the serialization thread, timer thread and
2666 	 * interrupt thread can all call the transmit reclaim,
2667 	 * the unmapping function needs to acquire the lock
2668 	 * to free those buffers which were transmitted
2669 	 * by the hardware already.
2670 	 */
2671 	MUTEX_ENTER(&tx_ring_p->lock);
2672 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
2673 	    "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2674 	    "channel %d",
2675 	    tx_ring_p->tdc));
2676 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2677 
2678 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2679 		tx_msg_p = &tx_msg_ring[i];
2680 		if (tx_msg_p->tx_message != NULL) {
2681 			freemsg(tx_msg_p->tx_message);
2682 			tx_msg_p->tx_message = NULL;
2683 		}
2684 	}
2685 
2686 	for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2687 		if (tx_msg_ring[i].dma_handle != NULL) {
2688 			ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2689 		}
2690 		tx_msg_ring[i].dma_handle = NULL;
2691 	}
2692 
2693 	MUTEX_EXIT(&tx_ring_p->lock);
2694 
2695 	if (tx_ring_p->serial) {
2696 		nxge_serialize_destroy(tx_ring_p->serial);
2697 		tx_ring_p->serial = NULL;
2698 	}
2699 
2700 	MUTEX_DESTROY(&tx_ring_p->lock);
2701 	KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2702 	KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2703 
2704 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2705 	    "<== nxge_unmap_txdma_channel_buf_ring"));
2706 }
2707 
2708 static nxge_status_t
2709 nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2710 {
2711 	p_tx_rings_t 		tx_rings;
2712 	p_tx_ring_t 		*tx_desc_rings;
2713 	p_tx_mbox_areas_t 	tx_mbox_areas_p;
2714 	p_tx_mbox_t		*tx_mbox_p;
2715 	nxge_status_t		status = NXGE_OK;
2716 
2717 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2718 
2719 	tx_rings = nxgep->tx_rings;
2720 	if (tx_rings == NULL) {
2721 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2722 		    "<== nxge_txdma_hw_start: NULL ring pointer"));
2723 		return (NXGE_ERROR);
2724 	}
2725 	tx_desc_rings = tx_rings->rings;
2726 	if (tx_desc_rings == NULL) {
2727 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2728 		    "<== nxge_txdma_hw_start: NULL ring pointers"));
2729 		return (NXGE_ERROR);
2730 	}
2731 
2732 	NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2733 	    "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2734 
2735 	tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2736 	tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2737 
2738 	status = nxge_txdma_start_channel(nxgep, channel,
2739 	    (p_tx_ring_t)tx_desc_rings[channel],
2740 	    (p_tx_mbox_t)tx_mbox_p[channel]);
2741 	if (status != NXGE_OK) {
2742 		goto nxge_txdma_hw_start_fail1;
2743 	}
2744 
2745 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2746 	    "tx_rings $%p rings $%p",
2747 	    nxgep->tx_rings, nxgep->tx_rings->rings));
2748 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2749 	    "tx_rings $%p tx_desc_rings $%p",
2750 	    nxgep->tx_rings, tx_desc_rings));
2751 
2752 	goto nxge_txdma_hw_start_exit;
2753 
2754 nxge_txdma_hw_start_fail1:
2755 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2756 	    "==> nxge_txdma_hw_start: disable "
2757 	    "(status 0x%x channel %d)", status, channel));
2758 
2759 nxge_txdma_hw_start_exit:
2760 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2761 	    "==> nxge_txdma_hw_start: (status 0x%x)", status));
2762 
2763 	return (status);
2764 }
2765 
2766 /*
2767  * nxge_txdma_start_channel
2768  *
2769  *	Start a TDC.
2770  *
2771  * Arguments:
2772  * 	nxgep
2773  * 	channel		The channel to start.
2774  * 	tx_ring_p	channel's transmit descriptor ring.
2775  * 	tx_mbox_p	channel' smailbox.
2776  *
2777  * Notes:
2778  *
2779  * NPI/NXGE function calls:
2780  *	nxge_reset_txdma_channel()
2781  *	nxge_init_txdma_channel_event_mask()
2782  *	nxge_enable_txdma_channel()
2783  *
2784  * Registers accessed:
2785  *	none directly (see functions above).
2786  *
2787  * Context:
2788  *	Any domain
2789  */
2790 static nxge_status_t
2791 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2792     p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2793 
2794 {
2795 	nxge_status_t		status = NXGE_OK;
2796 
2797 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2798 		"==> nxge_txdma_start_channel (channel %d)", channel));
2799 	/*
2800 	 * TXDMA/TXC must be in stopped state.
2801 	 */
2802 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
2803 
2804 	/*
2805 	 * Reset TXDMA channel
2806 	 */
2807 	tx_ring_p->tx_cs.value = 0;
2808 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
2809 	status = nxge_reset_txdma_channel(nxgep, channel,
2810 			tx_ring_p->tx_cs.value);
2811 	if (status != NXGE_OK) {
2812 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2813 			"==> nxge_txdma_start_channel (channel %d)"
2814 			" reset channel failed 0x%x", channel, status));
2815 		goto nxge_txdma_start_channel_exit;
2816 	}
2817 
2818 	/*
2819 	 * Initialize the TXDMA channel specific FZC control
2820 	 * configurations. These FZC registers are pertaining
2821 	 * to each TX channel (i.e. logical pages).
2822 	 */
2823 	if (!isLDOMguest(nxgep)) {
2824 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
2825 		    tx_ring_p, tx_mbox_p);
2826 		if (status != NXGE_OK) {
2827 			goto nxge_txdma_start_channel_exit;
2828 		}
2829 	}
2830 
2831 	/*
2832 	 * Initialize the event masks.
2833 	 */
2834 	tx_ring_p->tx_evmask.value = 0;
2835 	status = nxge_init_txdma_channel_event_mask(nxgep,
2836 	    channel, &tx_ring_p->tx_evmask);
2837 	if (status != NXGE_OK) {
2838 		goto nxge_txdma_start_channel_exit;
2839 	}
2840 
2841 	/*
2842 	 * Load TXDMA descriptors, buffers, mailbox,
2843 	 * initialise the DMA channels and
2844 	 * enable each DMA channel.
2845 	 */
2846 	status = nxge_enable_txdma_channel(nxgep, channel,
2847 			tx_ring_p, tx_mbox_p);
2848 	if (status != NXGE_OK) {
2849 		goto nxge_txdma_start_channel_exit;
2850 	}
2851 
2852 nxge_txdma_start_channel_exit:
2853 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2854 
2855 	return (status);
2856 }
2857 
2858 /*
2859  * nxge_txdma_stop_channel
2860  *
2861  *	Stop a TDC.
2862  *
2863  * Arguments:
2864  * 	nxgep
2865  * 	channel		The channel to stop.
2866  * 	tx_ring_p	channel's transmit descriptor ring.
2867  * 	tx_mbox_p	channel' smailbox.
2868  *
2869  * Notes:
2870  *
2871  * NPI/NXGE function calls:
2872  *	nxge_txdma_stop_inj_err()
2873  *	nxge_reset_txdma_channel()
2874  *	nxge_init_txdma_channel_event_mask()
2875  *	nxge_init_txdma_channel_cntl_stat()
2876  *	nxge_disable_txdma_channel()
2877  *
2878  * Registers accessed:
2879  *	none directly (see functions above).
2880  *
2881  * Context:
2882  *	Any domain
2883  */
2884 /*ARGSUSED*/
2885 static nxge_status_t
2886 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2887 {
2888 	p_tx_ring_t tx_ring_p;
2889 	int status = NXGE_OK;
2890 
2891 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2892 	    "==> nxge_txdma_stop_channel: channel %d", channel));
2893 
2894 	/*
2895 	 * Stop (disable) TXDMA and TXC (if stop bit is set
2896 	 * and STOP_N_GO bit not set, the TXDMA reset state will
2897 	 * not be set if reset TXDMA.
2898 	 */
2899 	(void) nxge_txdma_stop_inj_err(nxgep, channel);
2900 
2901 	tx_ring_p = nxgep->tx_rings->rings[channel];
2902 
2903 	/*
2904 	 * Reset TXDMA channel
2905 	 */
2906 	tx_ring_p->tx_cs.value = 0;
2907 	tx_ring_p->tx_cs.bits.ldw.rst = 1;
2908 	status = nxge_reset_txdma_channel(nxgep, channel,
2909 	    tx_ring_p->tx_cs.value);
2910 	if (status != NXGE_OK) {
2911 		goto nxge_txdma_stop_channel_exit;
2912 	}
2913 
2914 #ifdef HARDWARE_REQUIRED
2915 	/* Set up the interrupt event masks. */
2916 	tx_ring_p->tx_evmask.value = 0;
2917 	status = nxge_init_txdma_channel_event_mask(nxgep,
2918 	    channel, &tx_ring_p->tx_evmask);
2919 	if (status != NXGE_OK) {
2920 		goto nxge_txdma_stop_channel_exit;
2921 	}
2922 
2923 	/* Initialize the DMA control and status register */
2924 	tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2925 	status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2926 	    tx_ring_p->tx_cs.value);
2927 	if (status != NXGE_OK) {
2928 		goto nxge_txdma_stop_channel_exit;
2929 	}
2930 
2931 	tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2932 
2933 	/* Disable channel */
2934 	status = nxge_disable_txdma_channel(nxgep, channel,
2935 	    tx_ring_p, tx_mbox_p);
2936 	if (status != NXGE_OK) {
2937 		goto nxge_txdma_start_channel_exit;
2938 	}
2939 
2940 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2941 	    "==> nxge_txdma_stop_channel: event done"));
2942 
2943 #endif
2944 
2945 nxge_txdma_stop_channel_exit:
2946 	NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
2947 	return (status);
2948 }
2949 
2950 /*
2951  * nxge_txdma_get_ring
2952  *
2953  *	Get the ring for a TDC.
2954  *
2955  * Arguments:
2956  * 	nxgep
2957  * 	channel
2958  *
2959  * Notes:
2960  *
2961  * NPI/NXGE function calls:
2962  *
2963  * Registers accessed:
2964  *
2965  * Context:
2966  *	Any domain
2967  */
2968 static p_tx_ring_t
2969 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
2970 {
2971 	nxge_grp_set_t *set = &nxgep->tx_set;
2972 	int tdc;
2973 
2974 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
2975 
2976 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2977 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
2978 		    "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
2979 		goto return_null;
2980 	}
2981 
2982 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2983 		if ((1 << tdc) & set->owned.map) {
2984 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2985 			if (ring) {
2986 				if (channel == ring->tdc) {
2987 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
2988 					    "<== nxge_txdma_get_ring: "
2989 					    "tdc %d ring $%p", tdc, ring));
2990 					return (ring);
2991 				}
2992 			}
2993 		}
2994 	}
2995 
2996 return_null:
2997 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
2998 	    "ring not found"));
2999 
3000 	return (NULL);
3001 }
3002 
3003 /*
3004  * nxge_txdma_get_mbox
3005  *
3006  *	Get the mailbox for a TDC.
3007  *
3008  * Arguments:
3009  * 	nxgep
3010  * 	channel
3011  *
3012  * Notes:
3013  *
3014  * NPI/NXGE function calls:
3015  *
3016  * Registers accessed:
3017  *
3018  * Context:
3019  *	Any domain
3020  */
3021 static p_tx_mbox_t
3022 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3023 {
3024 	nxge_grp_set_t *set = &nxgep->tx_set;
3025 	int tdc;
3026 
3027 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3028 
3029 	if (nxgep->tx_mbox_areas_p == 0 ||
3030 	    nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3031 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3032 		    "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3033 		goto return_null;
3034 	}
3035 
3036 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3037 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3038 		    "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3039 		goto return_null;
3040 	}
3041 
3042 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3043 		if ((1 << tdc) & set->owned.map) {
3044 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3045 			if (ring) {
3046 				if (channel == ring->tdc) {
3047 					tx_mbox_t *mailbox = nxgep->
3048 					    tx_mbox_areas_p->
3049 					    txmbox_areas_p[tdc];
3050 					NXGE_DEBUG_MSG((nxgep, TX_CTL,
3051 					    "<== nxge_txdma_get_mbox: tdc %d "
3052 					    "ring $%p", tdc, mailbox));
3053 					return (mailbox);
3054 				}
3055 			}
3056 		}
3057 	}
3058 
3059 return_null:
3060 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3061 	    "mailbox not found"));
3062 
3063 	return (NULL);
3064 }
3065 
3066 /*
3067  * nxge_tx_err_evnts
3068  *
3069  *	Recover a TDC.
3070  *
3071  * Arguments:
3072  * 	nxgep
3073  * 	index	The index to the TDC ring.
3074  * 	ldvp	Used to get the channel number ONLY.
3075  * 	cs	A copy of the bits from TX_CS.
3076  *
3077  * Notes:
3078  *	Calling tree:
3079  *	 nxge_tx_intr()
3080  *
3081  * NPI/NXGE function calls:
3082  *	npi_txdma_ring_error_get()
3083  *	npi_txdma_inj_par_error_get()
3084  *	nxge_txdma_fatal_err_recover()
3085  *
3086  * Registers accessed:
3087  *	TX_RNG_ERR_LOGH	DMC+0x40048 Transmit Ring Error Log High
3088  *	TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3089  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3090  *
3091  * Context:
3092  *	Any domain	XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3093  */
3094 /*ARGSUSED*/
3095 static nxge_status_t
3096 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3097 {
3098 	npi_handle_t		handle;
3099 	npi_status_t		rs;
3100 	uint8_t			channel;
3101 	p_tx_ring_t 		*tx_rings;
3102 	p_tx_ring_t 		tx_ring_p;
3103 	p_nxge_tx_ring_stats_t	tdc_stats;
3104 	boolean_t		txchan_fatal = B_FALSE;
3105 	nxge_status_t		status = NXGE_OK;
3106 	tdmc_inj_par_err_t	par_err;
3107 	uint32_t		value;
3108 
3109 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3110 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3111 	channel = ldvp->channel;
3112 
3113 	tx_rings = nxgep->tx_rings->rings;
3114 	tx_ring_p = tx_rings[index];
3115 	tdc_stats = tx_ring_p->tdc_stats;
3116 	if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3117 	    (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3118 	    (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3119 		if ((rs = npi_txdma_ring_error_get(handle, channel,
3120 		    &tdc_stats->errlog)) != NPI_SUCCESS)
3121 			return (NXGE_ERROR | rs);
3122 	}
3123 
3124 	if (cs.bits.ldw.mbox_err) {
3125 		tdc_stats->mbox_err++;
3126 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3127 		    NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3128 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3129 		    "==> nxge_tx_err_evnts(channel %d): "
3130 		    "fatal error: mailbox", channel));
3131 		txchan_fatal = B_TRUE;
3132 	}
3133 	if (cs.bits.ldw.pkt_size_err) {
3134 		tdc_stats->pkt_size_err++;
3135 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3136 		    NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3137 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3138 		    "==> nxge_tx_err_evnts(channel %d): "
3139 		    "fatal error: pkt_size_err", channel));
3140 		txchan_fatal = B_TRUE;
3141 	}
3142 	if (cs.bits.ldw.tx_ring_oflow) {
3143 		tdc_stats->tx_ring_oflow++;
3144 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3145 		    NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3146 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3147 		    "==> nxge_tx_err_evnts(channel %d): "
3148 		    "fatal error: tx_ring_oflow", channel));
3149 		txchan_fatal = B_TRUE;
3150 	}
3151 	if (cs.bits.ldw.pref_buf_par_err) {
3152 		tdc_stats->pre_buf_par_err++;
3153 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3154 		    NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3155 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3156 		    "==> nxge_tx_err_evnts(channel %d): "
3157 		    "fatal error: pre_buf_par_err", channel));
3158 		/* Clear error injection source for parity error */
3159 		(void) npi_txdma_inj_par_error_get(handle, &value);
3160 		par_err.value = value;
3161 		par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3162 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3163 		txchan_fatal = B_TRUE;
3164 	}
3165 	if (cs.bits.ldw.nack_pref) {
3166 		tdc_stats->nack_pref++;
3167 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3168 		    NXGE_FM_EREPORT_TDMC_NACK_PREF);
3169 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3170 		    "==> nxge_tx_err_evnts(channel %d): "
3171 		    "fatal error: nack_pref", channel));
3172 		txchan_fatal = B_TRUE;
3173 	}
3174 	if (cs.bits.ldw.nack_pkt_rd) {
3175 		tdc_stats->nack_pkt_rd++;
3176 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3177 		    NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3178 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3179 		    "==> nxge_tx_err_evnts(channel %d): "
3180 		    "fatal error: nack_pkt_rd", channel));
3181 		txchan_fatal = B_TRUE;
3182 	}
3183 	if (cs.bits.ldw.conf_part_err) {
3184 		tdc_stats->conf_part_err++;
3185 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3186 		    NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3187 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3188 		    "==> nxge_tx_err_evnts(channel %d): "
3189 		    "fatal error: config_partition_err", channel));
3190 		txchan_fatal = B_TRUE;
3191 	}
3192 	if (cs.bits.ldw.pkt_prt_err) {
3193 		tdc_stats->pkt_part_err++;
3194 		NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3195 		    NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3196 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3197 		    "==> nxge_tx_err_evnts(channel %d): "
3198 		    "fatal error: pkt_prt_err", channel));
3199 		txchan_fatal = B_TRUE;
3200 	}
3201 
3202 	/* Clear error injection source in case this is an injected error */
3203 	TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3204 
3205 	if (txchan_fatal) {
3206 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3207 		    " nxge_tx_err_evnts: "
3208 		    " fatal error on channel %d cs 0x%llx\n",
3209 		    channel, cs.value));
3210 		status = nxge_txdma_fatal_err_recover(nxgep, channel,
3211 		    tx_ring_p);
3212 		if (status == NXGE_OK) {
3213 			FM_SERVICE_RESTORED(nxgep);
3214 		}
3215 	}
3216 
3217 	NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3218 
3219 	return (status);
3220 }
3221 
3222 static nxge_status_t
3223 nxge_txdma_fatal_err_recover(
3224 	p_nxge_t nxgep,
3225 	uint16_t channel,
3226 	p_tx_ring_t tx_ring_p)
3227 {
3228 	npi_handle_t	handle;
3229 	npi_status_t	rs = NPI_SUCCESS;
3230 	p_tx_mbox_t	tx_mbox_p;
3231 	nxge_status_t	status = NXGE_OK;
3232 
3233 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3234 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3235 	    "Recovering from TxDMAChannel#%d error...", channel));
3236 
3237 	/*
3238 	 * Stop the dma channel waits for the stop done.
3239 	 * If the stop done bit is not set, then create
3240 	 * an error.
3241 	 */
3242 
3243 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3244 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3245 	MUTEX_ENTER(&tx_ring_p->lock);
3246 	rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3247 	if (rs != NPI_SUCCESS) {
3248 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3249 		    "==> nxge_txdma_fatal_err_recover (channel %d): "
3250 		    "stop failed ", channel));
3251 		goto fail;
3252 	}
3253 
3254 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3255 	(void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3256 
3257 	/*
3258 	 * Reset TXDMA channel
3259 	 */
3260 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3261 	if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3262 	    NPI_SUCCESS) {
3263 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3264 		    "==> nxge_txdma_fatal_err_recover (channel %d)"
3265 		    " reset channel failed 0x%x", channel, rs));
3266 		goto fail;
3267 	}
3268 
3269 	/*
3270 	 * Reset the tail (kick) register to 0.
3271 	 * (Hardware will not reset it. Tx overflow fatal
3272 	 * error if tail is not set to 0 after reset!
3273 	 */
3274 	TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3275 
3276 	/* Restart TXDMA channel */
3277 
3278 	if (!isLDOMguest(nxgep)) {
3279 		tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3280 
3281 		// XXX This is a problem in HIO!
3282 		/*
3283 		 * Initialize the TXDMA channel specific FZC control
3284 		 * configurations. These FZC registers are pertaining
3285 		 * to each TX channel (i.e. logical pages).
3286 		 */
3287 		NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3288 		status = nxge_init_fzc_txdma_channel(nxgep, channel,
3289 		    tx_ring_p, tx_mbox_p);
3290 		if (status != NXGE_OK)
3291 			goto fail;
3292 	}
3293 
3294 	/*
3295 	 * Initialize the event masks.
3296 	 */
3297 	tx_ring_p->tx_evmask.value = 0;
3298 	status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3299 	    &tx_ring_p->tx_evmask);
3300 	if (status != NXGE_OK)
3301 		goto fail;
3302 
3303 	tx_ring_p->wr_index_wrap = B_FALSE;
3304 	tx_ring_p->wr_index = 0;
3305 	tx_ring_p->rd_index = 0;
3306 
3307 	/*
3308 	 * Load TXDMA descriptors, buffers, mailbox,
3309 	 * initialise the DMA channels and
3310 	 * enable each DMA channel.
3311 	 */
3312 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3313 	status = nxge_enable_txdma_channel(nxgep, channel,
3314 	    tx_ring_p, tx_mbox_p);
3315 	MUTEX_EXIT(&tx_ring_p->lock);
3316 	if (status != NXGE_OK)
3317 		goto fail;
3318 
3319 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3320 	    "Recovery Successful, TxDMAChannel#%d Restored",
3321 	    channel));
3322 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3323 
3324 	return (NXGE_OK);
3325 
3326 fail:
3327 	MUTEX_EXIT(&tx_ring_p->lock);
3328 	NXGE_DEBUG_MSG((nxgep, TX_CTL,
3329 	    "nxge_txdma_fatal_err_recover (channel %d): "
3330 	    "failed to recover this txdma channel", channel));
3331 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3332 
3333 	return (status);
3334 }
3335 
3336 /*
3337  * nxge_tx_port_fatal_err_recover
3338  *
3339  *	Attempt to recover from a fatal port error.
3340  *
3341  * Arguments:
3342  * 	nxgep
3343  *
3344  * Notes:
3345  *	How would a guest do this?
3346  *
3347  * NPI/NXGE function calls:
3348  *
3349  * Registers accessed:
3350  *
3351  * Context:
3352  *	Service domain
3353  */
3354 nxge_status_t
3355 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3356 {
3357 	nxge_grp_set_t *set = &nxgep->tx_set;
3358 	nxge_channel_t tdc;
3359 
3360 	tx_ring_t	*ring;
3361 	tx_mbox_t	*mailbox;
3362 
3363 	npi_handle_t	handle;
3364 	nxge_status_t	status;
3365 	npi_status_t	rs;
3366 
3367 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3368 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3369 	    "Recovering from TxPort error..."));
3370 
3371 	if (isLDOMguest(nxgep)) {
3372 		return (NXGE_OK);
3373 	}
3374 
3375 	if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3376 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3377 		    "<== nxge_tx_port_fatal_err_recover: not initialized"));
3378 		return (NXGE_ERROR);
3379 	}
3380 
3381 	if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3382 		NXGE_DEBUG_MSG((nxgep, TX_CTL,
3383 		    "<== nxge_tx_port_fatal_err_recover: "
3384 		    "NULL ring pointer(s)"));
3385 		return (NXGE_ERROR);
3386 	}
3387 
3388 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3389 		if ((1 << tdc) & set->owned.map) {
3390 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3391 			if (ring)
3392 				MUTEX_ENTER(&ring->lock);
3393 		}
3394 	}
3395 
3396 	handle = NXGE_DEV_NPI_HANDLE(nxgep);
3397 
3398 	/*
3399 	 * Stop all the TDCs owned by us.
3400 	 * (The shared TDCs will have been stopped by their owners.)
3401 	 */
3402 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3403 		if ((1 << tdc) & set->owned.map) {
3404 			ring = nxgep->tx_rings->rings[tdc];
3405 			if (ring) {
3406 				rs = npi_txdma_channel_control
3407 				    (handle, TXDMA_STOP, tdc);
3408 				if (rs != NPI_SUCCESS) {
3409 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3410 					    "nxge_tx_port_fatal_err_recover "
3411 					    "(channel %d): stop failed ", tdc));
3412 					goto fail;
3413 				}
3414 			}
3415 		}
3416 	}
3417 
3418 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3419 
3420 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3421 		if ((1 << tdc) & set->owned.map) {
3422 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3423 			if (ring)
3424 				(void) nxge_txdma_reclaim(nxgep, ring, 0);
3425 		}
3426 	}
3427 
3428 	/*
3429 	 * Reset all the TDCs.
3430 	 */
3431 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3432 
3433 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3434 		if ((1 << tdc) & set->owned.map) {
3435 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3436 			if (ring) {
3437 				if ((rs = npi_txdma_channel_control
3438 				    (handle, TXDMA_RESET, tdc))
3439 				    != NPI_SUCCESS) {
3440 					NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3441 					    "nxge_tx_port_fatal_err_recover "
3442 					    "(channel %d) reset channel "
3443 					    "failed 0x%x", tdc, rs));
3444 					goto fail;
3445 				}
3446 			}
3447 			/*
3448 			 * Reset the tail (kick) register to 0.
3449 			 * (Hardware will not reset it. Tx overflow fatal
3450 			 * error if tail is not set to 0 after reset!
3451 			 */
3452 			TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3453 		}
3454 	}
3455 
3456 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3457 
3458 	/* Restart all the TDCs */
3459 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3460 		if ((1 << tdc) & set->owned.map) {
3461 			ring = nxgep->tx_rings->rings[tdc];
3462 			if (ring) {
3463 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3464 				status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3465 				    ring, mailbox);
3466 				ring->tx_evmask.value = 0;
3467 				/*
3468 				 * Initialize the event masks.
3469 				 */
3470 				status = nxge_init_txdma_channel_event_mask
3471 				    (nxgep, tdc, &ring->tx_evmask);
3472 
3473 				ring->wr_index_wrap = B_FALSE;
3474 				ring->wr_index = 0;
3475 				ring->rd_index = 0;
3476 
3477 				if (status != NXGE_OK)
3478 					goto fail;
3479 				if (status != NXGE_OK)
3480 					goto fail;
3481 			}
3482 		}
3483 	}
3484 
3485 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3486 
3487 	/* Re-enable all the TDCs */
3488 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3489 		if ((1 << tdc) & set->owned.map) {
3490 			ring = nxgep->tx_rings->rings[tdc];
3491 			if (ring) {
3492 				mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3493 				status = nxge_enable_txdma_channel(nxgep, tdc,
3494 				    ring, mailbox);
3495 				if (status != NXGE_OK)
3496 					goto fail;
3497 			}
3498 		}
3499 	}
3500 
3501 	/*
3502 	 * Unlock all the TDCs.
3503 	 */
3504 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3505 		if ((1 << tdc) & set->owned.map) {
3506 			tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3507 			if (ring)
3508 				MUTEX_EXIT(&ring->lock);
3509 		}
3510 	}
3511 
3512 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3513 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3514 
3515 	return (NXGE_OK);
3516 
3517 fail:
3518 	for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3519 		if ((1 << tdc) & set->owned.map) {
3520 			ring = nxgep->tx_rings->rings[tdc];
3521 			if (ring)
3522 				MUTEX_EXIT(&ring->lock);
3523 		}
3524 	}
3525 
3526 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3527 	NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3528 
3529 	return (status);
3530 }
3531 
3532 /*
3533  * nxge_txdma_inject_err
3534  *
3535  *	Inject an error into a TDC.
3536  *
3537  * Arguments:
3538  * 	nxgep
3539  * 	err_id	The error to inject.
3540  * 	chan	The channel to inject into.
3541  *
3542  * Notes:
3543  *	This is called from nxge_main.c:nxge_err_inject()
3544  *	Has this ioctl ever been used?
3545  *
3546  * NPI/NXGE function calls:
3547  *	npi_txdma_inj_par_error_get()
3548  *	npi_txdma_inj_par_error_set()
3549  *
3550  * Registers accessed:
3551  *	TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3552  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3553  *	TDMC_INTR_DBG	DMC + 0x40060 Transmit DMA Interrupt Debug
3554  *
3555  * Context:
3556  *	Service domain
3557  */
3558 void
3559 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3560 {
3561 	tdmc_intr_dbg_t		tdi;
3562 	tdmc_inj_par_err_t	par_err;
3563 	uint32_t		value;
3564 	npi_handle_t		handle;
3565 
3566 	switch (err_id) {
3567 
3568 	case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3569 		handle = NXGE_DEV_NPI_HANDLE(nxgep);
3570 		/* Clear error injection source for parity error */
3571 		(void) npi_txdma_inj_par_error_get(handle, &value);
3572 		par_err.value = value;
3573 		par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3574 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3575 
3576 		par_err.bits.ldw.inject_parity_error = (1 << chan);
3577 		(void) npi_txdma_inj_par_error_get(handle, &value);
3578 		par_err.value = value;
3579 		par_err.bits.ldw.inject_parity_error |= (1 << chan);
3580 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3581 		    (unsigned long long)par_err.value);
3582 		(void) npi_txdma_inj_par_error_set(handle, par_err.value);
3583 		break;
3584 
3585 	case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3586 	case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3587 	case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3588 	case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3589 	case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3590 	case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3591 	case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3592 		TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3593 		    chan, &tdi.value);
3594 		if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3595 			tdi.bits.ldw.pref_buf_par_err = 1;
3596 		else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3597 			tdi.bits.ldw.mbox_err = 1;
3598 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3599 			tdi.bits.ldw.nack_pref = 1;
3600 		else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3601 			tdi.bits.ldw.nack_pkt_rd = 1;
3602 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3603 			tdi.bits.ldw.pkt_size_err = 1;
3604 		else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3605 			tdi.bits.ldw.tx_ring_oflow = 1;
3606 		else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3607 			tdi.bits.ldw.conf_part_err = 1;
3608 		else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3609 			tdi.bits.ldw.pkt_part_err = 1;
3610 #if defined(__i386)
3611 		cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3612 		    tdi.value);
3613 #else
3614 		cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3615 		    tdi.value);
3616 #endif
3617 		TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3618 		    chan, tdi.value);
3619 
3620 		break;
3621 	}
3622 }
3623