1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_txdma.h>
29 #include <sys/nxge/nxge_hio.h>
30 #include <npi_tx_rd64.h>
31 #include <npi_tx_wr64.h>
32 #include <sys/llc1.h>
33
34 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
35 uint32_t nxge_tx_minfree = 64;
36 uint32_t nxge_tx_intr_thres = 0;
37 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
38 uint32_t nxge_tx_tiny_pack = 1;
39 uint32_t nxge_tx_use_bcopy = 1;
40
41 extern uint32_t nxge_tx_ring_size;
42 extern uint32_t nxge_bcopy_thresh;
43 extern uint32_t nxge_dvma_thresh;
44 extern uint32_t nxge_dma_stream_thresh;
45 extern dma_method_t nxge_force_dma;
46 extern uint32_t nxge_cksum_offload;
47
48 /* Device register access attributes for PIO. */
49 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
50 /* Device descriptor access attributes for DMA. */
51 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
52 /* Device buffer access attributes for DMA. */
53 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
54 extern ddi_dma_attr_t nxge_desc_dma_attr;
55 extern ddi_dma_attr_t nxge_tx_dma_attr;
56
57 extern void nxge_tx_ring_task(void *arg);
58
59 static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60
61 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
62
63 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
64 p_nxge_dma_common_t *, p_tx_ring_t *,
65 uint32_t, p_nxge_dma_common_t *,
66 p_tx_mbox_t *);
67 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
68
69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
72
73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
74 p_nxge_dma_common_t *, p_tx_ring_t,
75 p_tx_mbox_t *);
76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
77 p_tx_ring_t, p_tx_mbox_t);
78
79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
80 p_tx_ring_t, p_tx_mbox_t);
81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
82
83 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
84 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
85 p_nxge_ldv_t, tx_cs_t);
86 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
87 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
88 uint16_t, p_tx_ring_t);
89
90 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91 p_tx_ring_t ring_p, uint16_t channel);
92
93 nxge_status_t
nxge_init_txdma_channels(p_nxge_t nxgep)94 nxge_init_txdma_channels(p_nxge_t nxgep)
95 {
96 nxge_grp_set_t *set = &nxgep->tx_set;
97 int i, tdc, count;
98 nxge_grp_t *group;
99 dc_map_t map;
100 int dev_gindex;
101
102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
103
104 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
105 if ((1 << i) & set->lg.map) {
106 group = set->group[i];
107 dev_gindex =
108 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
109 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
110 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
111 if ((1 << tdc) & map) {
112 if ((nxge_grp_dc_add(nxgep,
113 group, VP_BOUND_TX, tdc)))
114 goto init_txdma_channels_exit;
115 }
116 }
117 }
118 if (++count == set->lg.count)
119 break;
120 }
121
122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
123 return (NXGE_OK);
124
125 init_txdma_channels_exit:
126 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
127 if ((1 << i) & set->lg.map) {
128 group = set->group[i];
129 dev_gindex =
130 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
131 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
132 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
133 if ((1 << tdc) & map) {
134 nxge_grp_dc_remove(nxgep,
135 VP_BOUND_TX, tdc);
136 }
137 }
138 }
139 if (++count == set->lg.count)
140 break;
141 }
142
143 return (NXGE_ERROR);
144
145 }
146
147 nxge_status_t
nxge_init_txdma_channel(p_nxge_t nxge,int channel)148 nxge_init_txdma_channel(
149 p_nxge_t nxge,
150 int channel)
151 {
152 nxge_status_t status;
153
154 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
155
156 status = nxge_map_txdma(nxge, channel);
157 if (status != NXGE_OK) {
158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
159 "<== nxge_init_txdma_channel: status 0x%x", status));
160 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
161 return (status);
162 }
163
164 status = nxge_txdma_hw_start(nxge, channel);
165 if (status != NXGE_OK) {
166 (void) nxge_unmap_txdma_channel(nxge, channel);
167 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
168 return (status);
169 }
170
171 if (!nxge->statsp->tdc_ksp[channel])
172 nxge_setup_tdc_kstats(nxge, channel);
173
174 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
175
176 return (status);
177 }
178
179 void
nxge_uninit_txdma_channels(p_nxge_t nxgep)180 nxge_uninit_txdma_channels(p_nxge_t nxgep)
181 {
182 nxge_grp_set_t *set = &nxgep->tx_set;
183 int tdc;
184
185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
186
187 if (set->owned.map == 0) {
188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
189 "nxge_uninit_txdma_channels: no channels"));
190 return;
191 }
192
193 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
194 if ((1 << tdc) & set->owned.map) {
195 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
196 }
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
200 }
201
202 void
nxge_uninit_txdma_channel(p_nxge_t nxgep,int channel)203 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
204 {
205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
206
207 if (nxgep->statsp->tdc_ksp[channel]) {
208 kstat_delete(nxgep->statsp->tdc_ksp[channel]);
209 nxgep->statsp->tdc_ksp[channel] = 0;
210 }
211
212 if (nxge_txdma_stop_channel(nxgep, channel) != NXGE_OK)
213 goto nxge_uninit_txdma_channel_exit;
214
215 nxge_unmap_txdma_channel(nxgep, channel);
216
217 nxge_uninit_txdma_channel_exit:
218 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_uninit_txdma_channel"));
219 }
220
221 void
nxge_setup_dma_common(p_nxge_dma_common_t dest_p,p_nxge_dma_common_t src_p,uint32_t entries,uint32_t size)222 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
223 uint32_t entries, uint32_t size)
224 {
225 size_t tsize;
226 *dest_p = *src_p;
227 tsize = size * entries;
228 dest_p->alength = tsize;
229 dest_p->nblocks = entries;
230 dest_p->block_size = size;
231 dest_p->offset += tsize;
232
233 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
234 src_p->alength -= tsize;
235 src_p->dma_cookie.dmac_laddress += tsize;
236 src_p->dma_cookie.dmac_size -= tsize;
237 }
238
239 /*
240 * nxge_reset_txdma_channel
241 *
242 * Reset a TDC.
243 *
244 * Arguments:
245 * nxgep
246 * channel The channel to reset.
247 * reg_data The current TX_CS.
248 *
249 * Notes:
250 *
251 * NPI/NXGE function calls:
252 * npi_txdma_channel_reset()
253 * npi_txdma_channel_control()
254 *
255 * Registers accessed:
256 * TX_CS DMC+0x40028 Transmit Control And Status
257 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick
258 *
259 * Context:
260 * Any domain
261 */
262 nxge_status_t
nxge_reset_txdma_channel(p_nxge_t nxgep,uint16_t channel,uint64_t reg_data)263 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
264 {
265 npi_status_t rs = NPI_SUCCESS;
266 nxge_status_t status = NXGE_OK;
267 npi_handle_t handle;
268
269 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
270
271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
272 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
273 rs = npi_txdma_channel_reset(handle, channel);
274 } else {
275 rs = npi_txdma_channel_control(handle, TXDMA_RESET,
276 channel);
277 }
278
279 if (rs != NPI_SUCCESS) {
280 status = NXGE_ERROR | rs;
281 }
282
283 /*
284 * Reset the tail (kick) register to 0.
285 * (Hardware will not reset it. Tx overflow fatal
286 * error if tail is not set to 0 after reset!
287 */
288 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
289
290 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
291 return (status);
292 }
293
294 /*
295 * nxge_init_txdma_channel_event_mask
296 *
297 * Enable interrupts for a set of events.
298 *
299 * Arguments:
300 * nxgep
301 * channel The channel to map.
302 * mask_p The events to enable.
303 *
304 * Notes:
305 *
306 * NPI/NXGE function calls:
307 * npi_txdma_event_mask()
308 *
309 * Registers accessed:
310 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask
311 *
312 * Context:
313 * Any domain
314 */
315 nxge_status_t
nxge_init_txdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_tx_dma_ent_msk_t mask_p)316 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
317 p_tx_dma_ent_msk_t mask_p)
318 {
319 npi_handle_t handle;
320 npi_status_t rs = NPI_SUCCESS;
321 nxge_status_t status = NXGE_OK;
322
323 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
324 "<== nxge_init_txdma_channel_event_mask"));
325
326 handle = NXGE_DEV_NPI_HANDLE(nxgep);
327 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
328 if (rs != NPI_SUCCESS) {
329 status = NXGE_ERROR | rs;
330 }
331
332 return (status);
333 }
334
335 /*
336 * nxge_init_txdma_channel_cntl_stat
337 *
338 * Stop a TDC. If at first we don't succeed, inject an error.
339 *
340 * Arguments:
341 * nxgep
342 * channel The channel to stop.
343 *
344 * Notes:
345 *
346 * NPI/NXGE function calls:
347 * npi_txdma_control_status()
348 *
349 * Registers accessed:
350 * TX_CS DMC+0x40028 Transmit Control And Status
351 *
352 * Context:
353 * Any domain
354 */
355 nxge_status_t
nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,uint64_t reg_data)356 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
357 uint64_t reg_data)
358 {
359 npi_handle_t handle;
360 npi_status_t rs = NPI_SUCCESS;
361 nxge_status_t status = NXGE_OK;
362
363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
364 "<== nxge_init_txdma_channel_cntl_stat"));
365
366 handle = NXGE_DEV_NPI_HANDLE(nxgep);
367 rs = npi_txdma_control_status(handle, OP_SET, channel,
368 (p_tx_cs_t)®_data);
369
370 if (rs != NPI_SUCCESS) {
371 status = NXGE_ERROR | rs;
372 }
373
374 return (status);
375 }
376
377 /*
378 * nxge_enable_txdma_channel
379 *
380 * Enable a TDC.
381 *
382 * Arguments:
383 * nxgep
384 * channel The channel to enable.
385 * tx_desc_p channel's transmit descriptor ring.
386 * mbox_p channel's mailbox,
387 *
388 * Notes:
389 *
390 * NPI/NXGE function calls:
391 * npi_txdma_ring_config()
392 * npi_txdma_mbox_config()
393 * npi_txdma_channel_init_enable()
394 *
395 * Registers accessed:
396 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration
397 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High
398 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low
399 * TX_CS DMC+0x40028 Transmit Control And Status
400 *
401 * Context:
402 * Any domain
403 */
404 nxge_status_t
nxge_enable_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_desc_p,p_tx_mbox_t mbox_p)405 nxge_enable_txdma_channel(p_nxge_t nxgep,
406 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
407 {
408 npi_handle_t handle;
409 npi_status_t rs = NPI_SUCCESS;
410 nxge_status_t status = NXGE_OK;
411
412 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
413
414 handle = NXGE_DEV_NPI_HANDLE(nxgep);
415 /*
416 * Use configuration data composed at init time.
417 * Write to hardware the transmit ring configurations.
418 */
419 rs = npi_txdma_ring_config(handle, OP_SET, channel,
420 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
421
422 if (rs != NPI_SUCCESS) {
423 return (NXGE_ERROR | rs);
424 }
425
426 if (isLDOMguest(nxgep)) {
427 /* Add interrupt handler for this channel. */
428 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
429 return (NXGE_ERROR);
430 }
431
432 /* Write to hardware the mailbox */
433 rs = npi_txdma_mbox_config(handle, OP_SET, channel,
434 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
435
436 if (rs != NPI_SUCCESS) {
437 return (NXGE_ERROR | rs);
438 }
439
440 /* Start the DMA engine. */
441 rs = npi_txdma_channel_init_enable(handle, channel);
442
443 if (rs != NPI_SUCCESS) {
444 return (NXGE_ERROR | rs);
445 }
446
447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
448
449 return (status);
450 }
451
452 void
nxge_fill_tx_hdr(p_mblk_t mp,boolean_t fill_len,boolean_t l4_cksum,int pkt_len,uint8_t npads,p_tx_pkt_hdr_all_t pkthdrp,t_uscalar_t start_offset,t_uscalar_t stuff_offset)453 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
454 boolean_t l4_cksum, int pkt_len, uint8_t npads,
455 p_tx_pkt_hdr_all_t pkthdrp,
456 t_uscalar_t start_offset,
457 t_uscalar_t stuff_offset)
458 {
459 p_tx_pkt_header_t hdrp;
460 p_mblk_t nmp;
461 uint64_t tmp;
462 size_t mblk_len;
463 size_t iph_len;
464 size_t hdrs_size;
465 uint8_t hdrs_buf[sizeof (struct ether_header) +
466 64 + sizeof (uint32_t)];
467 uint8_t *cursor;
468 uint8_t *ip_buf;
469 uint16_t eth_type;
470 uint8_t ipproto;
471 boolean_t is_vlan = B_FALSE;
472 size_t eth_hdr_size;
473
474 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
475
476 /*
477 * Caller should zero out the headers first.
478 */
479 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
480
481 if (fill_len) {
482 NXGE_DEBUG_MSG((NULL, TX_CTL,
483 "==> nxge_fill_tx_hdr: pkt_len %d "
484 "npads %d", pkt_len, npads));
485 tmp = (uint64_t)pkt_len;
486 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
487 goto fill_tx_header_done;
488 }
489
490 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
491
492 /*
493 * mp is the original data packet (does not include the
494 * Neptune transmit header).
495 */
496 nmp = mp;
497 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
498 "mp $%p b_rptr $%p len %d",
499 mp, nmp->b_rptr, MBLKL(nmp)));
500 /* copy ether_header from mblk to hdrs_buf */
501 cursor = &hdrs_buf[0];
502 tmp = sizeof (struct ether_vlan_header);
503 while ((nmp != NULL) && (tmp > 0)) {
504 size_t buflen;
505 mblk_len = MBLKL(nmp);
506 buflen = min((size_t)tmp, mblk_len);
507 bcopy(nmp->b_rptr, cursor, buflen);
508 cursor += buflen;
509 tmp -= buflen;
510 nmp = nmp->b_cont;
511 }
512
513 nmp = mp;
514 mblk_len = MBLKL(nmp);
515 ip_buf = NULL;
516 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
517 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
518 "ether type 0x%x", eth_type, hdrp->value));
519
520 if (eth_type < ETHERMTU) {
521 tmp = 1ull;
522 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
523 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
524 "value 0x%llx", hdrp->value));
525 if (*(hdrs_buf + sizeof (struct ether_header))
526 == LLC_SNAP_SAP) {
527 eth_type = ntohs(*((uint16_t *)(hdrs_buf +
528 sizeof (struct ether_header) + 6)));
529 NXGE_DEBUG_MSG((NULL, TX_CTL,
530 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
531 eth_type));
532 } else {
533 goto fill_tx_header_done;
534 }
535 } else if (eth_type == VLAN_ETHERTYPE) {
536 tmp = 1ull;
537 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
538
539 eth_type = ntohs(((struct ether_vlan_header *)
540 hdrs_buf)->ether_type);
541 is_vlan = B_TRUE;
542 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
543 "value 0x%llx", hdrp->value));
544 }
545
546 if (!is_vlan) {
547 eth_hdr_size = sizeof (struct ether_header);
548 } else {
549 eth_hdr_size = sizeof (struct ether_vlan_header);
550 }
551
552 switch (eth_type) {
553 case ETHERTYPE_IP:
554 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
555 ip_buf = nmp->b_rptr + eth_hdr_size;
556 mblk_len -= eth_hdr_size;
557 iph_len = ((*ip_buf) & 0x0f);
558 if (mblk_len > (iph_len + sizeof (uint32_t))) {
559 ip_buf = nmp->b_rptr;
560 ip_buf += eth_hdr_size;
561 } else {
562 ip_buf = NULL;
563 }
564
565 }
566 if (ip_buf == NULL) {
567 hdrs_size = 0;
568 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
569 while ((nmp) && (hdrs_size <
570 sizeof (hdrs_buf))) {
571 mblk_len = (size_t)nmp->b_wptr -
572 (size_t)nmp->b_rptr;
573 if (mblk_len >=
574 (sizeof (hdrs_buf) - hdrs_size))
575 mblk_len = sizeof (hdrs_buf) -
576 hdrs_size;
577 bcopy(nmp->b_rptr,
578 &hdrs_buf[hdrs_size], mblk_len);
579 hdrs_size += mblk_len;
580 nmp = nmp->b_cont;
581 }
582 ip_buf = hdrs_buf;
583 ip_buf += eth_hdr_size;
584 iph_len = ((*ip_buf) & 0x0f);
585 }
586
587 ipproto = ip_buf[9];
588
589 tmp = (uint64_t)iph_len;
590 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
591 tmp = (uint64_t)(eth_hdr_size >> 1);
592 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
593
594 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
595 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
596 "tmp 0x%x",
597 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
598 ipproto, tmp));
599 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
600 "value 0x%llx", hdrp->value));
601
602 break;
603
604 case ETHERTYPE_IPV6:
605 hdrs_size = 0;
606 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
607 while ((nmp) && (hdrs_size <
608 sizeof (hdrs_buf))) {
609 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
610 if (mblk_len >=
611 (sizeof (hdrs_buf) - hdrs_size))
612 mblk_len = sizeof (hdrs_buf) -
613 hdrs_size;
614 bcopy(nmp->b_rptr,
615 &hdrs_buf[hdrs_size], mblk_len);
616 hdrs_size += mblk_len;
617 nmp = nmp->b_cont;
618 }
619 ip_buf = hdrs_buf;
620 ip_buf += eth_hdr_size;
621
622 tmp = 1ull;
623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
624
625 tmp = (eth_hdr_size >> 1);
626 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
627
628 /* byte 6 is the next header protocol */
629 ipproto = ip_buf[6];
630
631 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
632 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
633 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
634 ipproto));
635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
636 "value 0x%llx", hdrp->value));
637
638 break;
639
640 default:
641 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
642 goto fill_tx_header_done;
643 }
644
645 switch (ipproto) {
646 case IPPROTO_TCP:
647 NXGE_DEBUG_MSG((NULL, TX_CTL,
648 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
649 if (l4_cksum) {
650 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
651 hdrp->value |=
652 (((uint64_t)(start_offset >> 1)) <<
653 TX_PKT_HEADER_L4START_SHIFT);
654 hdrp->value |=
655 (((uint64_t)(stuff_offset >> 1)) <<
656 TX_PKT_HEADER_L4STUFF_SHIFT);
657
658 NXGE_DEBUG_MSG((NULL, TX_CTL,
659 "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
660 "value 0x%llx", hdrp->value));
661 }
662
663 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
664 "value 0x%llx", hdrp->value));
665 break;
666
667 case IPPROTO_UDP:
668 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
669 if (l4_cksum) {
670 if (!nxge_cksum_offload) {
671 uint16_t *up;
672 uint16_t cksum;
673 t_uscalar_t stuff_len;
674
675 /*
676 * The checksum field has the
677 * partial checksum.
678 * IP_CSUM() macro calls ip_cksum() which
679 * can add in the partial checksum.
680 */
681 cksum = IP_CSUM(mp, start_offset, 0);
682 stuff_len = stuff_offset;
683 nmp = mp;
684 mblk_len = MBLKL(nmp);
685 while ((nmp != NULL) &&
686 (mblk_len < stuff_len)) {
687 stuff_len -= mblk_len;
688 nmp = nmp->b_cont;
689 if (nmp)
690 mblk_len = MBLKL(nmp);
691 }
692 ASSERT(nmp);
693 up = (uint16_t *)(nmp->b_rptr + stuff_len);
694
695 *up = cksum;
696 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
697 NXGE_DEBUG_MSG((NULL, TX_CTL,
698 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
699 "use sw cksum "
700 "write to $%p cksum 0x%x content up 0x%x",
701 stuff_len,
702 up,
703 cksum,
704 *up));
705 } else {
706 /* Hardware will compute the full checksum */
707 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
708 hdrp->value |=
709 (((uint64_t)(start_offset >> 1)) <<
710 TX_PKT_HEADER_L4START_SHIFT);
711 hdrp->value |=
712 (((uint64_t)(stuff_offset >> 1)) <<
713 TX_PKT_HEADER_L4STUFF_SHIFT);
714
715 NXGE_DEBUG_MSG((NULL, TX_CTL,
716 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
717 " use partial checksum "
718 "cksum 0x%x ",
719 "value 0x%llx",
720 stuff_offset,
721 IP_CSUM(mp, start_offset, 0),
722 hdrp->value));
723 }
724 }
725
726 NXGE_DEBUG_MSG((NULL, TX_CTL,
727 "==> nxge_tx_pkt_hdr_init: UDP"
728 "value 0x%llx", hdrp->value));
729 break;
730
731 default:
732 goto fill_tx_header_done;
733 }
734
735 fill_tx_header_done:
736 NXGE_DEBUG_MSG((NULL, TX_CTL,
737 "==> nxge_fill_tx_hdr: pkt_len %d "
738 "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
739
740 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
741 }
742
743 /*ARGSUSED*/
744 p_mblk_t
nxge_tx_pkt_header_reserve(p_mblk_t mp,uint8_t * npads)745 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
746 {
747 p_mblk_t newmp = NULL;
748
749 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
750 NXGE_DEBUG_MSG((NULL, TX_CTL,
751 "<== nxge_tx_pkt_header_reserve: allocb failed"));
752 return (NULL);
753 }
754
755 NXGE_DEBUG_MSG((NULL, TX_CTL,
756 "==> nxge_tx_pkt_header_reserve: get new mp"));
757 DB_TYPE(newmp) = M_DATA;
758 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
759 linkb(newmp, mp);
760 newmp->b_rptr -= TX_PKT_HEADER_SIZE;
761
762 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
763 "b_rptr $%p b_wptr $%p",
764 newmp->b_rptr, newmp->b_wptr));
765
766 NXGE_DEBUG_MSG((NULL, TX_CTL,
767 "<== nxge_tx_pkt_header_reserve: use new mp"));
768
769 return (newmp);
770 }
771
772 int
nxge_tx_pkt_nmblocks(p_mblk_t mp,int * tot_xfer_len_p)773 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
774 {
775 uint_t nmblks;
776 ssize_t len;
777 uint_t pkt_len;
778 p_mblk_t nmp, bmp, tmp;
779 uint8_t *b_wptr;
780
781 NXGE_DEBUG_MSG((NULL, TX_CTL,
782 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
783 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
784
785 nmp = mp;
786 bmp = mp;
787 nmblks = 0;
788 pkt_len = 0;
789 *tot_xfer_len_p = 0;
790
791 while (nmp) {
792 len = MBLKL(nmp);
793 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
794 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
795 len, pkt_len, nmblks,
796 *tot_xfer_len_p));
797
798 if (len <= 0) {
799 bmp = nmp;
800 nmp = nmp->b_cont;
801 NXGE_DEBUG_MSG((NULL, TX_CTL,
802 "==> nxge_tx_pkt_nmblocks: "
803 "len (0) pkt_len %d nmblks %d",
804 pkt_len, nmblks));
805 continue;
806 }
807
808 *tot_xfer_len_p += len;
809 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
810 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
811 len, pkt_len, nmblks,
812 *tot_xfer_len_p));
813
814 if (len < nxge_bcopy_thresh) {
815 NXGE_DEBUG_MSG((NULL, TX_CTL,
816 "==> nxge_tx_pkt_nmblocks: "
817 "len %d (< thresh) pkt_len %d nmblks %d",
818 len, pkt_len, nmblks));
819 if (pkt_len == 0)
820 nmblks++;
821 pkt_len += len;
822 if (pkt_len >= nxge_bcopy_thresh) {
823 pkt_len = 0;
824 len = 0;
825 nmp = bmp;
826 }
827 } else {
828 NXGE_DEBUG_MSG((NULL, TX_CTL,
829 "==> nxge_tx_pkt_nmblocks: "
830 "len %d (> thresh) pkt_len %d nmblks %d",
831 len, pkt_len, nmblks));
832 pkt_len = 0;
833 nmblks++;
834 /*
835 * Hardware limits the transfer length to 4K.
836 * If len is more than 4K, we need to break
837 * it up to at most 2 more blocks.
838 */
839 if (len > TX_MAX_TRANSFER_LENGTH) {
840 uint32_t nsegs;
841
842 nsegs = 1;
843 NXGE_DEBUG_MSG((NULL, TX_CTL,
844 "==> nxge_tx_pkt_nmblocks: "
845 "len %d pkt_len %d nmblks %d nsegs %d",
846 len, pkt_len, nmblks, nsegs));
847 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
848 ++nsegs;
849 }
850 do {
851 b_wptr = nmp->b_rptr +
852 TX_MAX_TRANSFER_LENGTH;
853 nmp->b_wptr = b_wptr;
854 if ((tmp = dupb(nmp)) == NULL) {
855 return (0);
856 }
857 tmp->b_rptr = b_wptr;
858 tmp->b_wptr = nmp->b_wptr;
859 tmp->b_cont = nmp->b_cont;
860 nmp->b_cont = tmp;
861 nmblks++;
862 if (--nsegs) {
863 nmp = tmp;
864 }
865 } while (nsegs);
866 nmp = tmp;
867 }
868 }
869
870 /*
871 * Hardware limits the transmit gather pointers to 15.
872 */
873 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
874 TX_MAX_GATHER_POINTERS) {
875 NXGE_DEBUG_MSG((NULL, TX_CTL,
876 "==> nxge_tx_pkt_nmblocks: pull msg - "
877 "len %d pkt_len %d nmblks %d",
878 len, pkt_len, nmblks));
879 /* Pull all message blocks from b_cont */
880 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
881 return (0);
882 }
883 freemsg(nmp->b_cont);
884 nmp->b_cont = tmp;
885 pkt_len = 0;
886 }
887 bmp = nmp;
888 nmp = nmp->b_cont;
889 }
890
891 NXGE_DEBUG_MSG((NULL, TX_CTL,
892 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
893 "nmblks %d len %d tot_xfer_len %d",
894 mp->b_rptr, mp->b_wptr, nmblks,
895 MBLKL(mp), *tot_xfer_len_p));
896
897 return (nmblks);
898 }
899
900 boolean_t
nxge_txdma_reclaim(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,int nmblks)901 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
902 {
903 boolean_t status = B_TRUE;
904 p_nxge_dma_common_t tx_desc_dma_p;
905 nxge_dma_common_t desc_area;
906 p_tx_desc_t tx_desc_ring_vp;
907 p_tx_desc_t tx_desc_p;
908 p_tx_desc_t tx_desc_pp;
909 tx_desc_t r_tx_desc;
910 p_tx_msg_t tx_msg_ring;
911 p_tx_msg_t tx_msg_p;
912 npi_handle_t handle;
913 tx_ring_hdl_t tx_head;
914 uint32_t pkt_len;
915 uint_t tx_rd_index;
916 uint16_t head_index, tail_index;
917 uint8_t tdc;
918 boolean_t head_wrap, tail_wrap;
919 p_nxge_tx_ring_stats_t tdc_stats;
920 int rc;
921
922 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
923
924 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
925 (nmblks != 0));
926 NXGE_DEBUG_MSG((nxgep, TX_CTL,
927 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
928 tx_ring_p->descs_pending, nxge_reclaim_pending,
929 nmblks));
930 if (!status) {
931 tx_desc_dma_p = &tx_ring_p->tdc_desc;
932 desc_area = tx_ring_p->tdc_desc;
933 handle = NXGE_DEV_NPI_HANDLE(nxgep);
934 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
935 tx_desc_ring_vp =
936 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
937 tx_rd_index = tx_ring_p->rd_index;
938 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
939 tx_msg_ring = tx_ring_p->tx_msg_ring;
940 tx_msg_p = &tx_msg_ring[tx_rd_index];
941 tdc = tx_ring_p->tdc;
942 tdc_stats = tx_ring_p->tdc_stats;
943 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
944 tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
945 }
946
947 tail_index = tx_ring_p->wr_index;
948 tail_wrap = tx_ring_p->wr_index_wrap;
949
950 NXGE_DEBUG_MSG((nxgep, TX_CTL,
951 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
952 "tail_index %d tail_wrap %d "
953 "tx_desc_p $%p ($%p) ",
954 tdc, tx_rd_index, tail_index, tail_wrap,
955 tx_desc_p, (*(uint64_t *)tx_desc_p)));
956 /*
957 * Read the hardware maintained transmit head
958 * and wrap around bit.
959 */
960 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
961 head_index = tx_head.bits.ldw.head;
962 head_wrap = tx_head.bits.ldw.wrap;
963 NXGE_DEBUG_MSG((nxgep, TX_CTL,
964 "==> nxge_txdma_reclaim: "
965 "tx_rd_index %d tail %d tail_wrap %d "
966 "head %d wrap %d",
967 tx_rd_index, tail_index, tail_wrap,
968 head_index, head_wrap));
969
970 if (head_index == tail_index) {
971 if (TXDMA_RING_EMPTY(head_index, head_wrap,
972 tail_index, tail_wrap) &&
973 (head_index == tx_rd_index)) {
974 NXGE_DEBUG_MSG((nxgep, TX_CTL,
975 "==> nxge_txdma_reclaim: EMPTY"));
976 return (B_TRUE);
977 }
978
979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
980 "==> nxge_txdma_reclaim: Checking "
981 "if ring full"));
982 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
983 tail_wrap)) {
984 NXGE_DEBUG_MSG((nxgep, TX_CTL,
985 "==> nxge_txdma_reclaim: full"));
986 return (B_FALSE);
987 }
988 }
989
990 NXGE_DEBUG_MSG((nxgep, TX_CTL,
991 "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
992
993 tx_desc_pp = &r_tx_desc;
994 while ((tx_rd_index != head_index) &&
995 (tx_ring_p->descs_pending != 0)) {
996
997 NXGE_DEBUG_MSG((nxgep, TX_CTL,
998 "==> nxge_txdma_reclaim: Checking if pending"));
999
1000 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1001 "==> nxge_txdma_reclaim: "
1002 "descs_pending %d ",
1003 tx_ring_p->descs_pending));
1004
1005 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1006 "==> nxge_txdma_reclaim: "
1007 "(tx_rd_index %d head_index %d "
1008 "(tx_desc_p $%p)",
1009 tx_rd_index, head_index,
1010 tx_desc_p));
1011
1012 tx_desc_pp->value = tx_desc_p->value;
1013 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1014 "==> nxge_txdma_reclaim: "
1015 "(tx_rd_index %d head_index %d "
1016 "tx_desc_p $%p (desc value 0x%llx) ",
1017 tx_rd_index, head_index,
1018 tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
1019
1020 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1021 "==> nxge_txdma_reclaim: dump desc:"));
1022
1023 pkt_len = tx_desc_pp->bits.hdw.tr_len;
1024 tdc_stats->obytes += (pkt_len - TX_PKT_HEADER_SIZE);
1025 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
1026 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1027 "==> nxge_txdma_reclaim: pkt_len %d "
1028 "tdc channel %d opackets %d",
1029 pkt_len,
1030 tdc,
1031 tdc_stats->opackets));
1032
1033 if (tx_msg_p->flags.dma_type == USE_DVMA) {
1034 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1035 "tx_desc_p = $%p "
1036 "tx_desc_pp = $%p "
1037 "index = %d",
1038 tx_desc_p,
1039 tx_desc_pp,
1040 tx_ring_p->rd_index));
1041 (void) dvma_unload(tx_msg_p->dvma_handle,
1042 0, -1);
1043 tx_msg_p->dvma_handle = NULL;
1044 if (tx_ring_p->dvma_wr_index ==
1045 tx_ring_p->dvma_wrap_mask) {
1046 tx_ring_p->dvma_wr_index = 0;
1047 } else {
1048 tx_ring_p->dvma_wr_index++;
1049 }
1050 tx_ring_p->dvma_pending--;
1051 } else if (tx_msg_p->flags.dma_type ==
1052 USE_DMA) {
1053 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1054 "==> nxge_txdma_reclaim: "
1055 "USE DMA"));
1056 if (rc = ddi_dma_unbind_handle
1057 (tx_msg_p->dma_handle)) {
1058 cmn_err(CE_WARN, "!nxge_reclaim: "
1059 "ddi_dma_unbind_handle "
1060 "failed. status %d", rc);
1061 }
1062 }
1063 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1064 "==> nxge_txdma_reclaim: count packets"));
1065 /*
1066 * count a chained packet only once.
1067 */
1068 if (tx_msg_p->tx_message != NULL) {
1069 freemsg(tx_msg_p->tx_message);
1070 tx_msg_p->tx_message = NULL;
1071 }
1072
1073 tx_msg_p->flags.dma_type = USE_NONE;
1074 tx_rd_index = tx_ring_p->rd_index;
1075 tx_rd_index = (tx_rd_index + 1) &
1076 tx_ring_p->tx_wrap_mask;
1077 tx_ring_p->rd_index = tx_rd_index;
1078 tx_ring_p->descs_pending--;
1079 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1080 tx_msg_p = &tx_msg_ring[tx_rd_index];
1081 }
1082
1083 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1084 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1085 if (status) {
1086 (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
1087 1, 0);
1088 }
1089 } else {
1090 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1091 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1092 }
1093
1094 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1095 "<== nxge_txdma_reclaim status = 0x%08x", status));
1096
1097 return (status);
1098 }
1099
1100 /*
1101 * nxge_tx_intr
1102 *
1103 * Process a TDC interrupt
1104 *
1105 * Arguments:
1106 * arg1 A Logical Device state Vector (LSV) data structure.
1107 * arg2 nxge_t *
1108 *
1109 * Notes:
1110 *
1111 * NPI/NXGE function calls:
1112 * npi_txdma_control_status()
1113 * npi_intr_ldg_mgmt_set()
1114 *
1115 * nxge_tx_err_evnts()
1116 * nxge_txdma_reclaim()
1117 *
1118 * Registers accessed:
1119 * TX_CS DMC+0x40028 Transmit Control And Status
1120 * PIO_LDSV
1121 *
1122 * Context:
1123 * Any domain
1124 */
1125 uint_t
nxge_tx_intr(void * arg1,void * arg2)1126 nxge_tx_intr(void *arg1, void *arg2)
1127 {
1128 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1129 p_nxge_t nxgep = (p_nxge_t)arg2;
1130 p_nxge_ldg_t ldgp;
1131 uint8_t channel;
1132 uint32_t vindex;
1133 npi_handle_t handle;
1134 tx_cs_t cs;
1135 p_tx_ring_t *tx_rings;
1136 p_tx_ring_t tx_ring_p;
1137 npi_status_t rs = NPI_SUCCESS;
1138 uint_t serviced = DDI_INTR_UNCLAIMED;
1139 nxge_status_t status = NXGE_OK;
1140
1141 if (ldvp == NULL) {
1142 NXGE_DEBUG_MSG((NULL, INT_CTL,
1143 "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1144 nxgep, ldvp));
1145 return (DDI_INTR_UNCLAIMED);
1146 }
1147
1148 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1149 nxgep = ldvp->nxgep;
1150 }
1151 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1152 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1153 nxgep, ldvp));
1154
1155 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1156 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1157 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1158 "<== nxge_tx_intr: interface not started or intialized"));
1159 return (DDI_INTR_CLAIMED);
1160 }
1161
1162 /*
1163 * This interrupt handler is for a specific
1164 * transmit dma channel.
1165 */
1166 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1167 /* Get the control and status for this channel. */
1168 channel = ldvp->channel;
1169 ldgp = ldvp->ldgp;
1170 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1171 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1172 "channel %d",
1173 nxgep, ldvp, channel));
1174
1175 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1176 vindex = ldvp->vdma_index;
1177 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1178 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1179 channel, vindex, rs));
1180 if (!rs && cs.bits.ldw.mk) {
1181 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1182 "==> nxge_tx_intr:channel %d ring index %d "
1183 "status 0x%08x (mk bit set)",
1184 channel, vindex, rs));
1185 tx_rings = nxgep->tx_rings->rings;
1186 tx_ring_p = tx_rings[vindex];
1187 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1188 "==> nxge_tx_intr:channel %d ring index %d "
1189 "status 0x%08x (mk bit set, calling reclaim)",
1190 channel, vindex, rs));
1191
1192 nxge_tx_ring_task((void *)tx_ring_p);
1193 }
1194
1195 /*
1196 * Process other transmit control and status.
1197 * Check the ldv state.
1198 */
1199 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1200 /*
1201 * Rearm this logical group if this is a single device
1202 * group.
1203 */
1204 if (ldgp->nldvs == 1) {
1205 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1206 "==> nxge_tx_intr: rearm"));
1207 if (status == NXGE_OK) {
1208 if (isLDOMguest(nxgep)) {
1209 nxge_hio_ldgimgn(nxgep, ldgp);
1210 } else {
1211 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1212 B_TRUE, ldgp->ldg_timer);
1213 }
1214 }
1215 }
1216
1217 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1218 serviced = DDI_INTR_CLAIMED;
1219 return (serviced);
1220 }
1221
1222 void
nxge_txdma_stop(p_nxge_t nxgep)1223 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */
1224 {
1225 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1226
1227 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1228
1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1230 }
1231
1232 void
nxge_txdma_stop_start(p_nxge_t nxgep)1233 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1234 {
1235 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1236
1237 (void) nxge_txdma_stop(nxgep);
1238
1239 (void) nxge_fixup_txdma_rings(nxgep);
1240 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1241 (void) nxge_tx_mac_enable(nxgep);
1242 (void) nxge_txdma_hw_kick(nxgep);
1243
1244 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1245 }
1246
1247 npi_status_t
nxge_txdma_channel_disable(nxge_t * nxge,int channel)1248 nxge_txdma_channel_disable(
1249 nxge_t *nxge,
1250 int channel)
1251 {
1252 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge);
1253 npi_status_t rs;
1254 tdmc_intr_dbg_t intr_dbg;
1255
1256 /*
1257 * Stop the dma channel and wait for the stop-done.
1258 * If the stop-done bit is not present, then force
1259 * an error so TXC will stop.
1260 * All channels bound to this port need to be stopped
1261 * and reset after injecting an interrupt error.
1262 */
1263 rs = npi_txdma_channel_disable(handle, channel);
1264 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1265 "==> nxge_txdma_channel_disable(%d) "
1266 "rs 0x%x", channel, rs));
1267 if (rs != NPI_SUCCESS) {
1268 /* Inject any error */
1269 intr_dbg.value = 0;
1270 intr_dbg.bits.ldw.nack_pref = 1;
1271 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1272 "==> nxge_txdma_hw_mode: "
1273 "channel %d (stop failed 0x%x) "
1274 "(inject err)", rs, channel));
1275 (void) npi_txdma_inj_int_error_set(
1276 handle, channel, &intr_dbg);
1277 rs = npi_txdma_channel_disable(handle, channel);
1278 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1279 "==> nxge_txdma_hw_mode: "
1280 "channel %d (stop again 0x%x) "
1281 "(after inject err)",
1282 rs, channel));
1283 }
1284
1285 return (rs);
1286 }
1287
1288 /*
1289 * nxge_txdma_hw_mode
1290 *
1291 * Toggle all TDCs on (enable) or off (disable).
1292 *
1293 * Arguments:
1294 * nxgep
1295 * enable Enable or disable a TDC.
1296 *
1297 * Notes:
1298 *
1299 * NPI/NXGE function calls:
1300 * npi_txdma_channel_enable(TX_CS)
1301 * npi_txdma_channel_disable(TX_CS)
1302 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1303 *
1304 * Registers accessed:
1305 * TX_CS DMC+0x40028 Transmit Control And Status
1306 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1307 *
1308 * Context:
1309 * Any domain
1310 */
1311 nxge_status_t
nxge_txdma_hw_mode(p_nxge_t nxgep,boolean_t enable)1312 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1313 {
1314 nxge_grp_set_t *set = &nxgep->tx_set;
1315
1316 npi_handle_t handle;
1317 nxge_status_t status;
1318 npi_status_t rs;
1319 int tdc;
1320
1321 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1322 "==> nxge_txdma_hw_mode: enable mode %d", enable));
1323
1324 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1325 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1326 "<== nxge_txdma_mode: not initialized"));
1327 return (NXGE_ERROR);
1328 }
1329
1330 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1331 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1332 "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1333 return (NXGE_ERROR);
1334 }
1335
1336 /* Enable or disable all of the TDCs owned by us. */
1337 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1338 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1339 if ((1 << tdc) & set->owned.map) {
1340 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1341 if (ring) {
1342 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1343 "==> nxge_txdma_hw_mode: channel %d", tdc));
1344 if (enable) {
1345 rs = npi_txdma_channel_enable
1346 (handle, tdc);
1347 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1348 "==> nxge_txdma_hw_mode: "
1349 "channel %d (enable) rs 0x%x",
1350 tdc, rs));
1351 } else {
1352 rs = nxge_txdma_channel_disable
1353 (nxgep, tdc);
1354 }
1355 }
1356 }
1357 }
1358
1359 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1360
1361 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1362 "<== nxge_txdma_hw_mode: status 0x%x", status));
1363
1364 return (status);
1365 }
1366
1367 void
nxge_txdma_enable_channel(p_nxge_t nxgep,uint16_t channel)1368 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1369 {
1370 npi_handle_t handle;
1371
1372 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1373 "==> nxge_txdma_enable_channel: channel %d", channel));
1374
1375 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1376 /* enable the transmit dma channels */
1377 (void) npi_txdma_channel_enable(handle, channel);
1378
1379 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1380 }
1381
1382 void
nxge_txdma_disable_channel(p_nxge_t nxgep,uint16_t channel)1383 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1384 {
1385 npi_handle_t handle;
1386
1387 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1388 "==> nxge_txdma_disable_channel: channel %d", channel));
1389
1390 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1391 /* stop the transmit dma channels */
1392 (void) npi_txdma_channel_disable(handle, channel);
1393
1394 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1395 }
1396
1397 /*
1398 * nxge_txdma_stop_inj_err
1399 *
1400 * Stop a TDC. If at first we don't succeed, inject an error.
1401 *
1402 * Arguments:
1403 * nxgep
1404 * channel The channel to stop.
1405 *
1406 * Notes:
1407 *
1408 * NPI/NXGE function calls:
1409 * npi_txdma_channel_disable()
1410 * npi_txdma_inj_int_error_set()
1411 * #if defined(NXGE_DEBUG)
1412 * nxge_txdma_regs_dump_channels(nxgep);
1413 * #endif
1414 *
1415 * Registers accessed:
1416 * TX_CS DMC+0x40028 Transmit Control And Status
1417 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1418 *
1419 * Context:
1420 * Any domain
1421 */
1422 int
nxge_txdma_stop_inj_err(p_nxge_t nxgep,int channel)1423 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1424 {
1425 npi_handle_t handle;
1426 tdmc_intr_dbg_t intr_dbg;
1427 int status;
1428 npi_status_t rs = NPI_SUCCESS;
1429
1430 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1431 /*
1432 * Stop the dma channel waits for the stop done.
1433 * If the stop done bit is not set, then create
1434 * an error.
1435 */
1436 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1437 rs = npi_txdma_channel_disable(handle, channel);
1438 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1439 if (status == NXGE_OK) {
1440 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1441 "<== nxge_txdma_stop_inj_err (channel %d): "
1442 "stopped OK", channel));
1443 return (status);
1444 }
1445
1446 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1447 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1448 "injecting error", channel, rs));
1449 /* Inject any error */
1450 intr_dbg.value = 0;
1451 intr_dbg.bits.ldw.nack_pref = 1;
1452 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1453
1454 /* Stop done bit will be set as a result of error injection */
1455 rs = npi_txdma_channel_disable(handle, channel);
1456 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1457 if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1458 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1459 "<== nxge_txdma_stop_inj_err (channel %d): "
1460 "stopped OK ", channel));
1461 return (status);
1462 }
1463
1464 #if defined(NXGE_DEBUG)
1465 nxge_txdma_regs_dump_channels(nxgep);
1466 #endif
1467 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1468 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1469 " (injected error but still not stopped)", channel, rs));
1470
1471 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1472 return (status);
1473 }
1474
1475 /*ARGSUSED*/
1476 void
nxge_fixup_txdma_rings(p_nxge_t nxgep)1477 nxge_fixup_txdma_rings(p_nxge_t nxgep)
1478 {
1479 nxge_grp_set_t *set = &nxgep->tx_set;
1480 int tdc;
1481
1482 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1483
1484 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1485 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1486 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1487 return;
1488 }
1489
1490 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1491 if ((1 << tdc) & set->owned.map) {
1492 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1493 if (ring) {
1494 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1495 "==> nxge_fixup_txdma_rings: channel %d",
1496 tdc));
1497 nxge_txdma_fixup_channel(nxgep, ring, tdc);
1498 }
1499 }
1500 }
1501
1502 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1503 }
1504
1505 /*ARGSUSED*/
1506 void
nxge_txdma_fix_channel(p_nxge_t nxgep,uint16_t channel)1507 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1508 {
1509 p_tx_ring_t ring_p;
1510
1511 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1512 ring_p = nxge_txdma_get_ring(nxgep, channel);
1513 if (ring_p == NULL) {
1514 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1515 return;
1516 }
1517
1518 if (ring_p->tdc != channel) {
1519 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1520 "<== nxge_txdma_fix_channel: channel not matched "
1521 "ring tdc %d passed channel",
1522 ring_p->tdc, channel));
1523 return;
1524 }
1525
1526 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1527
1528 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1529 }
1530
1531 /*ARGSUSED*/
1532 void
nxge_txdma_fixup_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1533 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1534 {
1535 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1536
1537 if (ring_p == NULL) {
1538 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1539 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1540 return;
1541 }
1542
1543 if (ring_p->tdc != channel) {
1544 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1545 "<== nxge_txdma_fixup_channel: channel not matched "
1546 "ring tdc %d passed channel",
1547 ring_p->tdc, channel));
1548 return;
1549 }
1550
1551 MUTEX_ENTER(&ring_p->lock);
1552 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1553 ring_p->rd_index = 0;
1554 ring_p->wr_index = 0;
1555 ring_p->ring_head.value = 0;
1556 ring_p->ring_kick_tail.value = 0;
1557 ring_p->descs_pending = 0;
1558 MUTEX_EXIT(&ring_p->lock);
1559
1560 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1561 }
1562
1563 /*ARGSUSED*/
1564 void
nxge_txdma_hw_kick(p_nxge_t nxgep)1565 nxge_txdma_hw_kick(p_nxge_t nxgep)
1566 {
1567 nxge_grp_set_t *set = &nxgep->tx_set;
1568 int tdc;
1569
1570 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1571
1572 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1573 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1574 "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1575 return;
1576 }
1577
1578 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1579 if ((1 << tdc) & set->owned.map) {
1580 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1581 if (ring) {
1582 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1583 "==> nxge_txdma_hw_kick: channel %d", tdc));
1584 nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1585 }
1586 }
1587 }
1588
1589 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1590 }
1591
1592 /*ARGSUSED*/
1593 void
nxge_txdma_kick_channel(p_nxge_t nxgep,uint16_t channel)1594 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1595 {
1596 p_tx_ring_t ring_p;
1597
1598 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1599
1600 ring_p = nxge_txdma_get_ring(nxgep, channel);
1601 if (ring_p == NULL) {
1602 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1603 " nxge_txdma_kick_channel"));
1604 return;
1605 }
1606
1607 if (ring_p->tdc != channel) {
1608 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1609 "<== nxge_txdma_kick_channel: channel not matched "
1610 "ring tdc %d passed channel",
1611 ring_p->tdc, channel));
1612 return;
1613 }
1614
1615 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1616
1617 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1618 }
1619
1620 /*ARGSUSED*/
1621 void
nxge_txdma_hw_kick_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1622 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1623 {
1624
1625 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1626
1627 if (ring_p == NULL) {
1628 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1629 "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1630 return;
1631 }
1632
1633 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1634 }
1635
1636 /*
1637 * nxge_check_tx_hang
1638 *
1639 * Check the state of all TDCs belonging to nxgep.
1640 *
1641 * Arguments:
1642 * nxgep
1643 *
1644 * Notes:
1645 * Called by nxge_hw.c:nxge_check_hw_state().
1646 *
1647 * NPI/NXGE function calls:
1648 *
1649 * Registers accessed:
1650 *
1651 * Context:
1652 * Any domain
1653 */
1654 /*ARGSUSED*/
1655 void
nxge_check_tx_hang(p_nxge_t nxgep)1656 nxge_check_tx_hang(p_nxge_t nxgep)
1657 {
1658 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1659
1660 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1661 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1662 goto nxge_check_tx_hang_exit;
1663 }
1664
1665 /*
1666 * Needs inputs from hardware for regs:
1667 * head index had not moved since last timeout.
1668 * packets not transmitted or stuffed registers.
1669 */
1670 if (nxge_txdma_hung(nxgep)) {
1671 nxge_fixup_hung_txdma_rings(nxgep);
1672 }
1673
1674 nxge_check_tx_hang_exit:
1675 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1676 }
1677
1678 /*
1679 * nxge_txdma_hung
1680 *
1681 * Reset a TDC.
1682 *
1683 * Arguments:
1684 * nxgep
1685 * channel The channel to reset.
1686 * reg_data The current TX_CS.
1687 *
1688 * Notes:
1689 * Called by nxge_check_tx_hang()
1690 *
1691 * NPI/NXGE function calls:
1692 * nxge_txdma_channel_hung()
1693 *
1694 * Registers accessed:
1695 *
1696 * Context:
1697 * Any domain
1698 */
1699 int
nxge_txdma_hung(p_nxge_t nxgep)1700 nxge_txdma_hung(p_nxge_t nxgep)
1701 {
1702 nxge_grp_set_t *set = &nxgep->tx_set;
1703 int tdc;
1704 boolean_t shared;
1705
1706 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1707
1708 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1709 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1710 "<== nxge_txdma_hung: NULL ring pointer(s)"));
1711 return (B_FALSE);
1712 }
1713
1714 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1715 /*
1716 * Grab the shared state of the TDC.
1717 */
1718 if (isLDOMservice(nxgep)) {
1719 nxge_hio_data_t *nhd =
1720 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1721
1722 MUTEX_ENTER(&nhd->lock);
1723 shared = nxgep->tdc_is_shared[tdc];
1724 MUTEX_EXIT(&nhd->lock);
1725 } else {
1726 shared = B_FALSE;
1727 }
1728
1729 /*
1730 * Now, process continue to process.
1731 */
1732 if (((1 << tdc) & set->owned.map) && !shared) {
1733 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1734 if (ring) {
1735 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1736 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1737 "==> nxge_txdma_hung: TDC %d hung",
1738 tdc));
1739 return (B_TRUE);
1740 }
1741 }
1742 }
1743 }
1744
1745 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1746
1747 return (B_FALSE);
1748 }
1749
1750 /*
1751 * nxge_txdma_channel_hung
1752 *
1753 * Reset a TDC.
1754 *
1755 * Arguments:
1756 * nxgep
1757 * ring <channel>'s ring.
1758 * channel The channel to reset.
1759 *
1760 * Notes:
1761 * Called by nxge_txdma.c:nxge_txdma_hung()
1762 *
1763 * NPI/NXGE function calls:
1764 * npi_txdma_ring_head_get()
1765 *
1766 * Registers accessed:
1767 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1768 *
1769 * Context:
1770 * Any domain
1771 */
1772 int
nxge_txdma_channel_hung(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,uint16_t channel)1773 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1774 {
1775 uint16_t head_index, tail_index;
1776 boolean_t head_wrap, tail_wrap;
1777 npi_handle_t handle;
1778 tx_ring_hdl_t tx_head;
1779 uint_t tx_rd_index;
1780
1781 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1782
1783 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1784 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1785 "==> nxge_txdma_channel_hung: channel %d", channel));
1786 MUTEX_ENTER(&tx_ring_p->lock);
1787 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1788
1789 tail_index = tx_ring_p->wr_index;
1790 tail_wrap = tx_ring_p->wr_index_wrap;
1791 tx_rd_index = tx_ring_p->rd_index;
1792 MUTEX_EXIT(&tx_ring_p->lock);
1793
1794 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1795 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1796 "tail_index %d tail_wrap %d ",
1797 channel, tx_rd_index, tail_index, tail_wrap));
1798 /*
1799 * Read the hardware maintained transmit head
1800 * and wrap around bit.
1801 */
1802 (void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1803 head_index = tx_head.bits.ldw.head;
1804 head_wrap = tx_head.bits.ldw.wrap;
1805 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1806 "==> nxge_txdma_channel_hung: "
1807 "tx_rd_index %d tail %d tail_wrap %d "
1808 "head %d wrap %d",
1809 tx_rd_index, tail_index, tail_wrap,
1810 head_index, head_wrap));
1811
1812 if (TXDMA_RING_EMPTY(head_index, head_wrap,
1813 tail_index, tail_wrap) &&
1814 (head_index == tx_rd_index)) {
1815 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1816 "==> nxge_txdma_channel_hung: EMPTY"));
1817 return (B_FALSE);
1818 }
1819
1820 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1821 "==> nxge_txdma_channel_hung: Checking if ring full"));
1822 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1823 tail_wrap)) {
1824 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1825 "==> nxge_txdma_channel_hung: full"));
1826 return (B_TRUE);
1827 }
1828
1829 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1830
1831 return (B_FALSE);
1832 }
1833
1834 /*
1835 * nxge_fixup_hung_txdma_rings
1836 *
1837 * Disable a TDC.
1838 *
1839 * Arguments:
1840 * nxgep
1841 * channel The channel to reset.
1842 * reg_data The current TX_CS.
1843 *
1844 * Notes:
1845 * Called by nxge_check_tx_hang()
1846 *
1847 * NPI/NXGE function calls:
1848 * npi_txdma_ring_head_get()
1849 *
1850 * Registers accessed:
1851 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1852 *
1853 * Context:
1854 * Any domain
1855 */
1856 /*ARGSUSED*/
1857 void
nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)1858 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1859 {
1860 nxge_grp_set_t *set = &nxgep->tx_set;
1861 int tdc;
1862
1863 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1864
1865 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1866 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1867 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1868 return;
1869 }
1870
1871 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1872 if ((1 << tdc) & set->owned.map) {
1873 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1874 if (ring) {
1875 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1876 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1877 "==> nxge_fixup_hung_txdma_rings: TDC %d",
1878 tdc));
1879 }
1880 }
1881 }
1882
1883 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1884 }
1885
1886 /*
1887 * nxge_txdma_fixup_hung_channel
1888 *
1889 * 'Fix' a hung TDC.
1890 *
1891 * Arguments:
1892 * nxgep
1893 * channel The channel to fix.
1894 *
1895 * Notes:
1896 * Called by nxge_fixup_hung_txdma_rings()
1897 *
1898 * 1. Reclaim the TDC.
1899 * 2. Disable the TDC.
1900 *
1901 * NPI/NXGE function calls:
1902 * nxge_txdma_reclaim()
1903 * npi_txdma_channel_disable(TX_CS)
1904 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1905 *
1906 * Registers accessed:
1907 * TX_CS DMC+0x40028 Transmit Control And Status
1908 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1909 *
1910 * Context:
1911 * Any domain
1912 */
1913 /*ARGSUSED*/
1914 void
nxge_txdma_fix_hung_channel(p_nxge_t nxgep,uint16_t channel)1915 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1916 {
1917 p_tx_ring_t ring_p;
1918
1919 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1920 ring_p = nxge_txdma_get_ring(nxgep, channel);
1921 if (ring_p == NULL) {
1922 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1923 "<== nxge_txdma_fix_hung_channel"));
1924 return;
1925 }
1926
1927 if (ring_p->tdc != channel) {
1928 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1929 "<== nxge_txdma_fix_hung_channel: channel not matched "
1930 "ring tdc %d passed channel",
1931 ring_p->tdc, channel));
1932 return;
1933 }
1934
1935 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1936
1937 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1938 }
1939
1940 /*ARGSUSED*/
1941 void
nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1942 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1943 uint16_t channel)
1944 {
1945 npi_handle_t handle;
1946 tdmc_intr_dbg_t intr_dbg;
1947 int status = NXGE_OK;
1948
1949 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1950
1951 if (ring_p == NULL) {
1952 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1953 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1954 return;
1955 }
1956
1957 if (ring_p->tdc != channel) {
1958 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1959 "<== nxge_txdma_fixup_hung_channel: channel "
1960 "not matched "
1961 "ring tdc %d passed channel",
1962 ring_p->tdc, channel));
1963 return;
1964 }
1965
1966 /* Reclaim descriptors */
1967 MUTEX_ENTER(&ring_p->lock);
1968 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1969 MUTEX_EXIT(&ring_p->lock);
1970
1971 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1972 /*
1973 * Stop the dma channel waits for the stop done.
1974 * If the stop done bit is not set, then force
1975 * an error.
1976 */
1977 status = npi_txdma_channel_disable(handle, channel);
1978 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1980 "<== nxge_txdma_fixup_hung_channel: stopped OK "
1981 "ring tdc %d passed channel %d",
1982 ring_p->tdc, channel));
1983 return;
1984 }
1985
1986 /* Inject any error */
1987 intr_dbg.value = 0;
1988 intr_dbg.bits.ldw.nack_pref = 1;
1989 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1990
1991 /* Stop done bit will be set as a result of error injection */
1992 status = npi_txdma_channel_disable(handle, channel);
1993 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1994 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1995 "<== nxge_txdma_fixup_hung_channel: stopped again"
1996 "ring tdc %d passed channel",
1997 ring_p->tdc, channel));
1998 return;
1999 }
2000
2001 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2002 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
2003 "ring tdc %d passed channel",
2004 ring_p->tdc, channel));
2005
2006 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
2007 }
2008
2009 /*ARGSUSED*/
2010 void
nxge_reclaim_rings(p_nxge_t nxgep)2011 nxge_reclaim_rings(p_nxge_t nxgep)
2012 {
2013 nxge_grp_set_t *set = &nxgep->tx_set;
2014 int tdc;
2015
2016 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
2017
2018 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2019 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2020 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2021 return;
2022 }
2023
2024 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2025 if ((1 << tdc) & set->owned.map) {
2026 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2027 if (ring) {
2028 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2029 "==> nxge_reclaim_rings: TDC %d", tdc));
2030 MUTEX_ENTER(&ring->lock);
2031 (void) nxge_txdma_reclaim(nxgep, ring, 0);
2032 MUTEX_EXIT(&ring->lock);
2033 }
2034 }
2035 }
2036
2037 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
2038 }
2039
2040 void
nxge_txdma_regs_dump_channels(p_nxge_t nxgep)2041 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2042 {
2043 nxge_grp_set_t *set = &nxgep->tx_set;
2044 npi_handle_t handle;
2045 int tdc;
2046
2047 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2048
2049 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2050
2051 if (!isLDOMguest(nxgep)) {
2052 (void) npi_txdma_dump_fzc_regs(handle);
2053
2054 /* Dump TXC registers. */
2055 (void) npi_txc_dump_fzc_regs(handle);
2056 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2057 }
2058
2059 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2060 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2061 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2062 return;
2063 }
2064
2065 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2066 if ((1 << tdc) & set->owned.map) {
2067 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2068 if (ring) {
2069 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2070 "==> nxge_txdma_regs_dump_channels: "
2071 "TDC %d", tdc));
2072 (void) npi_txdma_dump_tdc_regs(handle, tdc);
2073
2074 /* Dump TXC registers, if able to. */
2075 if (!isLDOMguest(nxgep)) {
2076 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2077 "==> nxge_txdma_regs_dump_channels:"
2078 " FZC TDC %d", tdc));
2079 (void) npi_txc_dump_tdc_fzc_regs
2080 (handle, tdc);
2081 }
2082 nxge_txdma_regs_dump(nxgep, tdc);
2083 }
2084 }
2085 }
2086
2087 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2088 }
2089
2090 void
nxge_txdma_regs_dump(p_nxge_t nxgep,int channel)2091 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2092 {
2093 npi_handle_t handle;
2094 tx_ring_hdl_t hdl;
2095 tx_ring_kick_t kick;
2096 tx_cs_t cs;
2097 txc_control_t control;
2098 uint32_t bitmap = 0;
2099 uint32_t burst = 0;
2100 uint32_t bytes = 0;
2101 dma_log_page_t cfg;
2102
2103 printf("\n\tfunc # %d tdc %d ",
2104 nxgep->function_num, channel);
2105 cfg.page_num = 0;
2106 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2107 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2108 printf("\n\tlog page func %d valid page 0 %d",
2109 cfg.func_num, cfg.valid);
2110 cfg.page_num = 1;
2111 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2112 printf("\n\tlog page func %d valid page 1 %d",
2113 cfg.func_num, cfg.valid);
2114
2115 (void) npi_txdma_ring_head_get(handle, channel, &hdl);
2116 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2117 printf("\n\thead value is 0x%0llx",
2118 (long long)hdl.value);
2119 printf("\n\thead index %d", hdl.bits.ldw.head);
2120 printf("\n\tkick value is 0x%0llx",
2121 (long long)kick.value);
2122 printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2123
2124 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2125 printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2126 printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2127
2128 (void) npi_txc_control(handle, OP_GET, &control);
2129 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2130 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2131 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2132
2133 printf("\n\tTXC port control 0x%0llx",
2134 (long long)control.value);
2135 printf("\n\tTXC port bitmap 0x%x", bitmap);
2136 printf("\n\tTXC max burst %d", burst);
2137 printf("\n\tTXC bytes xmt %d\n", bytes);
2138
2139 {
2140 ipp_status_t status;
2141
2142 (void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2143 #if defined(__i386)
2144 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2145 #else
2146 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2147 #endif
2148 }
2149 }
2150
2151 /*
2152 * nxge_tdc_hvio_setup
2153 *
2154 * I'm not exactly sure what this code does.
2155 *
2156 * Arguments:
2157 * nxgep
2158 * channel The channel to map.
2159 *
2160 * Notes:
2161 *
2162 * NPI/NXGE function calls:
2163 * na
2164 *
2165 * Context:
2166 * Service domain?
2167 */
2168 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2169 static void
nxge_tdc_hvio_setup(nxge_t * nxgep,int channel)2170 nxge_tdc_hvio_setup(
2171 nxge_t *nxgep, int channel)
2172 {
2173 nxge_dma_common_t *data;
2174 nxge_dma_common_t *control;
2175 tx_ring_t *ring;
2176
2177 ring = nxgep->tx_rings->rings[channel];
2178 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2179
2180 ring->hv_set = B_FALSE;
2181
2182 ring->hv_tx_buf_base_ioaddr_pp =
2183 (uint64_t)data->orig_ioaddr_pp;
2184 ring->hv_tx_buf_ioaddr_size =
2185 (uint64_t)data->orig_alength;
2186
2187 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2188 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2189 "orig vatopa base io $%p orig_len 0x%llx (%d)",
2190 ring->hv_tx_buf_base_ioaddr_pp,
2191 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2192 data->ioaddr_pp, data->orig_vatopa,
2193 data->orig_alength, data->orig_alength));
2194
2195 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2196
2197 ring->hv_tx_cntl_base_ioaddr_pp =
2198 (uint64_t)control->orig_ioaddr_pp;
2199 ring->hv_tx_cntl_ioaddr_size =
2200 (uint64_t)control->orig_alength;
2201
2202 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2203 "hv cntl base io $%p orig ioaddr_pp ($%p) "
2204 "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2205 ring->hv_tx_cntl_base_ioaddr_pp,
2206 control->orig_ioaddr_pp, control->orig_vatopa,
2207 ring->hv_tx_cntl_ioaddr_size,
2208 control->orig_alength, control->orig_alength));
2209 }
2210 #endif
2211
2212 static nxge_status_t
nxge_map_txdma(p_nxge_t nxgep,int channel)2213 nxge_map_txdma(p_nxge_t nxgep, int channel)
2214 {
2215 nxge_dma_common_t **pData;
2216 nxge_dma_common_t **pControl;
2217 tx_ring_t **pRing, *ring;
2218 tx_mbox_t **mailbox;
2219 uint32_t num_chunks;
2220
2221 nxge_status_t status = NXGE_OK;
2222
2223 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2224
2225 if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2226 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2227 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2228 "<== nxge_map_txdma: buf not allocated"));
2229 return (NXGE_ERROR);
2230 }
2231 }
2232
2233 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2234 return (NXGE_ERROR);
2235
2236 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2237 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2238 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2239 pRing = &nxgep->tx_rings->rings[channel];
2240 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2241
2242 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2243 "tx_rings $%p tx_desc_rings $%p",
2244 nxgep->tx_rings, nxgep->tx_rings->rings));
2245
2246 /*
2247 * Map descriptors from the buffer pools for <channel>.
2248 */
2249
2250 /*
2251 * Set up and prepare buffer blocks, descriptors
2252 * and mailbox.
2253 */
2254 status = nxge_map_txdma_channel(nxgep, channel,
2255 pData, pRing, num_chunks, pControl, mailbox);
2256 if (status != NXGE_OK) {
2257 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2258 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2259 "returned 0x%x",
2260 nxgep, channel, status));
2261 return (status);
2262 }
2263
2264 ring = *pRing;
2265
2266 ring->index = (uint16_t)channel;
2267 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2268
2269 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2270 if (isLDOMguest(nxgep)) {
2271 (void) nxge_tdc_lp_conf(nxgep, channel);
2272 } else {
2273 nxge_tdc_hvio_setup(nxgep, channel);
2274 }
2275 #endif
2276
2277 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2278 "(status 0x%x channel %d)", status, channel));
2279
2280 return (status);
2281 }
2282
2283 static nxge_status_t
nxge_map_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_tx_mbox_t * tx_mbox_p)2284 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2285 p_nxge_dma_common_t *dma_buf_p,
2286 p_tx_ring_t *tx_desc_p,
2287 uint32_t num_chunks,
2288 p_nxge_dma_common_t *dma_cntl_p,
2289 p_tx_mbox_t *tx_mbox_p)
2290 {
2291 int status = NXGE_OK;
2292
2293 /*
2294 * Set up and prepare buffer blocks, descriptors
2295 * and mailbox.
2296 */
2297 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2298 "==> nxge_map_txdma_channel (channel %d)", channel));
2299 /*
2300 * Transmit buffer blocks
2301 */
2302 status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2303 dma_buf_p, tx_desc_p, num_chunks);
2304 if (status != NXGE_OK) {
2305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2306 "==> nxge_map_txdma_channel (channel %d): "
2307 "map buffer failed 0x%x", channel, status));
2308 goto nxge_map_txdma_channel_exit;
2309 }
2310
2311 /*
2312 * Transmit block ring, and mailbox.
2313 */
2314 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2315 tx_mbox_p);
2316
2317 goto nxge_map_txdma_channel_exit;
2318
2319 nxge_map_txdma_channel_fail1:
2320 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2321 "==> nxge_map_txdma_channel: unmap buf"
2322 "(status 0x%x channel %d)",
2323 status, channel));
2324 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2325
2326 nxge_map_txdma_channel_exit:
2327 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2328 "<== nxge_map_txdma_channel: "
2329 "(status 0x%x channel %d)",
2330 status, channel));
2331
2332 return (status);
2333 }
2334
2335 /*ARGSUSED*/
2336 static void
nxge_unmap_txdma_channel(p_nxge_t nxgep,uint16_t channel)2337 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2338 {
2339 tx_ring_t *ring;
2340 tx_mbox_t *mailbox;
2341
2342 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2343 "==> nxge_unmap_txdma_channel (channel %d)", channel));
2344 /*
2345 * unmap tx block ring, and mailbox.
2346 */
2347 ring = nxgep->tx_rings->rings[channel];
2348 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2349
2350 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2351
2352 /* unmap buffer blocks */
2353 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2354
2355 nxge_free_txb(nxgep, channel);
2356
2357 /*
2358 * Cleanup the reference to the ring now that it does not exist.
2359 */
2360 nxgep->tx_rings->rings[channel] = NULL;
2361
2362 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2363 }
2364
2365 /*
2366 * nxge_map_txdma_channel_cfg_ring
2367 *
2368 * Map a TDC into our kernel space.
2369 * This function allocates all of the per-channel data structures.
2370 *
2371 * Arguments:
2372 * nxgep
2373 * dma_channel The channel to map.
2374 * dma_cntl_p
2375 * tx_ring_p dma_channel's transmit ring
2376 * tx_mbox_p dma_channel's mailbox
2377 *
2378 * Notes:
2379 *
2380 * NPI/NXGE function calls:
2381 * nxge_setup_dma_common()
2382 *
2383 * Registers accessed:
2384 * none.
2385 *
2386 * Context:
2387 * Any domain
2388 */
2389 /*ARGSUSED*/
2390 static void
nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_tx_ring_t tx_ring_p,p_tx_mbox_t * tx_mbox_p)2391 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2392 p_nxge_dma_common_t *dma_cntl_p,
2393 p_tx_ring_t tx_ring_p,
2394 p_tx_mbox_t *tx_mbox_p)
2395 {
2396 p_tx_mbox_t mboxp;
2397 p_nxge_dma_common_t cntl_dmap;
2398 p_nxge_dma_common_t dmap;
2399 p_tx_rng_cfig_t tx_ring_cfig_p;
2400 p_tx_ring_kick_t tx_ring_kick_p;
2401 p_tx_cs_t tx_cs_p;
2402 p_tx_dma_ent_msk_t tx_evmask_p;
2403 p_txdma_mbh_t mboxh_p;
2404 p_txdma_mbl_t mboxl_p;
2405 uint64_t tx_desc_len;
2406
2407 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2408 "==> nxge_map_txdma_channel_cfg_ring"));
2409
2410 cntl_dmap = *dma_cntl_p;
2411
2412 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2413 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2414 sizeof (tx_desc_t));
2415 /*
2416 * Zero out transmit ring descriptors.
2417 */
2418 bzero((caddr_t)dmap->kaddrp, dmap->alength);
2419 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2420 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2421 tx_cs_p = &(tx_ring_p->tx_cs);
2422 tx_evmask_p = &(tx_ring_p->tx_evmask);
2423 tx_ring_cfig_p->value = 0;
2424 tx_ring_kick_p->value = 0;
2425 tx_cs_p->value = 0;
2426 tx_evmask_p->value = 0;
2427
2428 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2429 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2430 dma_channel,
2431 dmap->dma_cookie.dmac_laddress));
2432
2433 tx_ring_cfig_p->value = 0;
2434 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2435 tx_ring_cfig_p->value =
2436 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2437 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2438
2439 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2440 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2441 dma_channel,
2442 tx_ring_cfig_p->value));
2443
2444 tx_cs_p->bits.ldw.rst = 1;
2445
2446 /* Map in mailbox */
2447 mboxp = (p_tx_mbox_t)
2448 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2449 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2450 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2451 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2452 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2453 mboxh_p->value = mboxl_p->value = 0;
2454
2455 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2456 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2457 dmap->dma_cookie.dmac_laddress));
2458
2459 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2460 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2461
2462 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2463 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2464
2465 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2466 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2467 dmap->dma_cookie.dmac_laddress));
2468 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2469 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2470 "mbox $%p",
2471 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2472 tx_ring_p->page_valid.value = 0;
2473 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2474 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2475 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2476 tx_ring_p->page_hdl.value = 0;
2477
2478 tx_ring_p->page_valid.bits.ldw.page0 = 1;
2479 tx_ring_p->page_valid.bits.ldw.page1 = 1;
2480
2481 tx_ring_p->max_burst.value = 0;
2482 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2483
2484 *tx_mbox_p = mboxp;
2485
2486 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2487 "<== nxge_map_txdma_channel_cfg_ring"));
2488 }
2489
2490 /*ARGSUSED*/
2491 static void
nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)2492 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2493 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2494 {
2495 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2496 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2497 tx_ring_p->tdc));
2498
2499 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2500
2501 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2502 "<== nxge_unmap_txdma_channel_cfg_ring"));
2503 }
2504
2505 /*
2506 * nxge_map_txdma_channel_buf_ring
2507 *
2508 *
2509 * Arguments:
2510 * nxgep
2511 * channel The channel to map.
2512 * dma_buf_p
2513 * tx_desc_p channel's descriptor ring
2514 * num_chunks
2515 *
2516 * Notes:
2517 *
2518 * NPI/NXGE function calls:
2519 * nxge_setup_dma_common()
2520 *
2521 * Registers accessed:
2522 * none.
2523 *
2524 * Context:
2525 * Any domain
2526 */
2527 static nxge_status_t
nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks)2528 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2529 p_nxge_dma_common_t *dma_buf_p,
2530 p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2531 {
2532 p_nxge_dma_common_t dma_bufp, tmp_bufp;
2533 p_nxge_dma_common_t dmap;
2534 nxge_os_dma_handle_t tx_buf_dma_handle;
2535 p_tx_ring_t tx_ring_p;
2536 p_tx_msg_t tx_msg_ring;
2537 nxge_status_t status = NXGE_OK;
2538 int ddi_status = DDI_SUCCESS;
2539 int i, j, index;
2540 uint32_t size, bsize;
2541 uint32_t nblocks, nmsgs;
2542 char qname[TASKQ_NAMELEN];
2543
2544 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2545 "==> nxge_map_txdma_channel_buf_ring"));
2546
2547 dma_bufp = tmp_bufp = *dma_buf_p;
2548 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2549 " nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2550 "chunks bufp $%p",
2551 channel, num_chunks, dma_bufp));
2552
2553 nmsgs = 0;
2554 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2555 nmsgs += tmp_bufp->nblocks;
2556 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2557 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2558 "bufp $%p nblocks %d nmsgs %d",
2559 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2560 }
2561 if (!nmsgs) {
2562 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2563 "<== nxge_map_txdma_channel_buf_ring: channel %d "
2564 "no msg blocks",
2565 channel));
2566 status = NXGE_ERROR;
2567 goto nxge_map_txdma_channel_buf_ring_exit;
2568 }
2569
2570 tx_ring_p = (p_tx_ring_t)
2571 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2572 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2573 (void *)nxgep->interrupt_cookie);
2574
2575 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2576 tx_ring_p->tx_ring_busy = B_FALSE;
2577 tx_ring_p->nxgep = nxgep;
2578 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL;
2579 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d",
2580 nxgep->instance, channel);
2581 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1,
2582 TASKQ_DEFAULTPRI, 0);
2583 if (tx_ring_p->taskq == NULL) {
2584 goto nxge_map_txdma_channel_buf_ring_fail1;
2585 }
2586
2587 /*
2588 * Allocate transmit message rings and handles for packets
2589 * not to be copied to premapped buffers.
2590 */
2591 size = nmsgs * sizeof (tx_msg_t);
2592 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2593 for (i = 0; i < nmsgs; i++) {
2594 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2595 DDI_DMA_DONTWAIT, 0,
2596 &tx_msg_ring[i].dma_handle);
2597 if (ddi_status != DDI_SUCCESS) {
2598 status |= NXGE_DDI_FAILED;
2599 break;
2600 }
2601 }
2602 if (i < nmsgs) {
2603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2604 "Allocate handles failed."));
2605 goto nxge_map_txdma_channel_buf_ring_fail1;
2606 }
2607
2608 tx_ring_p->tdc = channel;
2609 tx_ring_p->tx_msg_ring = tx_msg_ring;
2610 tx_ring_p->tx_ring_size = nmsgs;
2611 tx_ring_p->num_chunks = num_chunks;
2612 if (!nxge_tx_intr_thres) {
2613 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2614 }
2615 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2616 tx_ring_p->rd_index = 0;
2617 tx_ring_p->wr_index = 0;
2618 tx_ring_p->ring_head.value = 0;
2619 tx_ring_p->ring_kick_tail.value = 0;
2620 tx_ring_p->descs_pending = 0;
2621
2622 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2623 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2624 "actual tx desc max %d nmsgs %d "
2625 "(config nxge_tx_ring_size %d)",
2626 channel, tx_ring_p->tx_ring_size, nmsgs,
2627 nxge_tx_ring_size));
2628
2629 /*
2630 * Map in buffers from the buffer pool.
2631 */
2632 index = 0;
2633 bsize = dma_bufp->block_size;
2634
2635 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2636 "dma_bufp $%p tx_rng_p $%p "
2637 "tx_msg_rng_p $%p bsize %d",
2638 dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2639
2640 tx_buf_dma_handle = dma_bufp->dma_handle;
2641 for (i = 0; i < num_chunks; i++, dma_bufp++) {
2642 bsize = dma_bufp->block_size;
2643 nblocks = dma_bufp->nblocks;
2644 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2645 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2646 "size %d dma_bufp $%p",
2647 i, sizeof (nxge_dma_common_t), dma_bufp));
2648
2649 for (j = 0; j < nblocks; j++) {
2650 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2651 dmap = &tx_msg_ring[index++].buf_dma;
2652 #ifdef TX_MEM_DEBUG
2653 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2654 "==> nxge_map_txdma_channel_buf_ring: j %d"
2655 "dmap $%p", i, dmap));
2656 #endif
2657 nxge_setup_dma_common(dmap, dma_bufp, 1,
2658 bsize);
2659 }
2660 }
2661
2662 if (i < num_chunks) {
2663 status = NXGE_ERROR;
2664 goto nxge_map_txdma_channel_buf_ring_fail1;
2665 }
2666
2667 *tx_desc_p = tx_ring_p;
2668
2669 goto nxge_map_txdma_channel_buf_ring_exit;
2670
2671 nxge_map_txdma_channel_buf_ring_fail1:
2672 if (tx_ring_p->taskq) {
2673 ddi_taskq_destroy(tx_ring_p->taskq);
2674 tx_ring_p->taskq = NULL;
2675 }
2676
2677 index--;
2678 for (; index >= 0; index--) {
2679 if (tx_msg_ring[index].dma_handle != NULL) {
2680 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2681 }
2682 }
2683 MUTEX_DESTROY(&tx_ring_p->lock);
2684 KMEM_FREE(tx_msg_ring, size);
2685 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2686
2687 status = NXGE_ERROR;
2688
2689 nxge_map_txdma_channel_buf_ring_exit:
2690 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2691 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2692
2693 return (status);
2694 }
2695
2696 /*ARGSUSED*/
2697 static void
nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep,p_tx_ring_t tx_ring_p)2698 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2699 {
2700 p_tx_msg_t tx_msg_ring;
2701 p_tx_msg_t tx_msg_p;
2702 int i;
2703
2704 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2705 "==> nxge_unmap_txdma_channel_buf_ring"));
2706 if (tx_ring_p == NULL) {
2707 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2708 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2709 return;
2710 }
2711 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2712 "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2713 tx_ring_p->tdc));
2714
2715 tx_msg_ring = tx_ring_p->tx_msg_ring;
2716
2717 /*
2718 * Since the serialization thread, timer thread and
2719 * interrupt thread can all call the transmit reclaim,
2720 * the unmapping function needs to acquire the lock
2721 * to free those buffers which were transmitted
2722 * by the hardware already.
2723 */
2724 MUTEX_ENTER(&tx_ring_p->lock);
2725 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2726 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2727 "channel %d",
2728 tx_ring_p->tdc));
2729 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2730
2731 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2732 tx_msg_p = &tx_msg_ring[i];
2733 if (tx_msg_p->tx_message != NULL) {
2734 freemsg(tx_msg_p->tx_message);
2735 tx_msg_p->tx_message = NULL;
2736 }
2737 }
2738
2739 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2740 if (tx_msg_ring[i].dma_handle != NULL) {
2741 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2742 }
2743 tx_msg_ring[i].dma_handle = NULL;
2744 }
2745
2746 MUTEX_EXIT(&tx_ring_p->lock);
2747
2748 if (tx_ring_p->taskq) {
2749 ddi_taskq_destroy(tx_ring_p->taskq);
2750 tx_ring_p->taskq = NULL;
2751 }
2752
2753 MUTEX_DESTROY(&tx_ring_p->lock);
2754 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2755 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2756
2757 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2758 "<== nxge_unmap_txdma_channel_buf_ring"));
2759 }
2760
2761 static nxge_status_t
nxge_txdma_hw_start(p_nxge_t nxgep,int channel)2762 nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2763 {
2764 p_tx_rings_t tx_rings;
2765 p_tx_ring_t *tx_desc_rings;
2766 p_tx_mbox_areas_t tx_mbox_areas_p;
2767 p_tx_mbox_t *tx_mbox_p;
2768 nxge_status_t status = NXGE_OK;
2769
2770 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2771
2772 tx_rings = nxgep->tx_rings;
2773 if (tx_rings == NULL) {
2774 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2775 "<== nxge_txdma_hw_start: NULL ring pointer"));
2776 return (NXGE_ERROR);
2777 }
2778 tx_desc_rings = tx_rings->rings;
2779 if (tx_desc_rings == NULL) {
2780 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2781 "<== nxge_txdma_hw_start: NULL ring pointers"));
2782 return (NXGE_ERROR);
2783 }
2784
2785 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2786 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2787
2788 tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2789 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2790
2791 status = nxge_txdma_start_channel(nxgep, channel,
2792 (p_tx_ring_t)tx_desc_rings[channel],
2793 (p_tx_mbox_t)tx_mbox_p[channel]);
2794 if (status != NXGE_OK) {
2795 goto nxge_txdma_hw_start_fail1;
2796 }
2797
2798 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2799 "tx_rings $%p rings $%p",
2800 nxgep->tx_rings, nxgep->tx_rings->rings));
2801 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2802 "tx_rings $%p tx_desc_rings $%p",
2803 nxgep->tx_rings, tx_desc_rings));
2804
2805 goto nxge_txdma_hw_start_exit;
2806
2807 nxge_txdma_hw_start_fail1:
2808 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2809 "==> nxge_txdma_hw_start: disable "
2810 "(status 0x%x channel %d)", status, channel));
2811
2812 nxge_txdma_hw_start_exit:
2813 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2814 "==> nxge_txdma_hw_start: (status 0x%x)", status));
2815
2816 return (status);
2817 }
2818
2819 /*
2820 * nxge_txdma_start_channel
2821 *
2822 * Start a TDC.
2823 *
2824 * Arguments:
2825 * nxgep
2826 * channel The channel to start.
2827 * tx_ring_p channel's transmit descriptor ring.
2828 * tx_mbox_p channel' smailbox.
2829 *
2830 * Notes:
2831 *
2832 * NPI/NXGE function calls:
2833 * nxge_reset_txdma_channel()
2834 * nxge_init_txdma_channel_event_mask()
2835 * nxge_enable_txdma_channel()
2836 *
2837 * Registers accessed:
2838 * none directly (see functions above).
2839 *
2840 * Context:
2841 * Any domain
2842 */
2843 static nxge_status_t
nxge_txdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)2844 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2845 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2846
2847 {
2848 nxge_status_t status = NXGE_OK;
2849
2850 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2851 "==> nxge_txdma_start_channel (channel %d)", channel));
2852 /*
2853 * TXDMA/TXC must be in stopped state.
2854 */
2855 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2856
2857 /*
2858 * Reset TXDMA channel
2859 */
2860 tx_ring_p->tx_cs.value = 0;
2861 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2862 status = nxge_reset_txdma_channel(nxgep, channel,
2863 tx_ring_p->tx_cs.value);
2864 if (status != NXGE_OK) {
2865 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2866 "==> nxge_txdma_start_channel (channel %d)"
2867 " reset channel failed 0x%x", channel, status));
2868 goto nxge_txdma_start_channel_exit;
2869 }
2870
2871 /*
2872 * Initialize the TXDMA channel specific FZC control
2873 * configurations. These FZC registers are pertaining
2874 * to each TX channel (i.e. logical pages).
2875 */
2876 if (!isLDOMguest(nxgep)) {
2877 status = nxge_init_fzc_txdma_channel(nxgep, channel,
2878 tx_ring_p, tx_mbox_p);
2879 if (status != NXGE_OK) {
2880 goto nxge_txdma_start_channel_exit;
2881 }
2882 }
2883
2884 /*
2885 * Initialize the event masks.
2886 */
2887 tx_ring_p->tx_evmask.value = 0;
2888 status = nxge_init_txdma_channel_event_mask(nxgep,
2889 channel, &tx_ring_p->tx_evmask);
2890 if (status != NXGE_OK) {
2891 goto nxge_txdma_start_channel_exit;
2892 }
2893
2894 /*
2895 * Load TXDMA descriptors, buffers, mailbox,
2896 * initialise the DMA channels and
2897 * enable each DMA channel.
2898 */
2899 status = nxge_enable_txdma_channel(nxgep, channel,
2900 tx_ring_p, tx_mbox_p);
2901 if (status != NXGE_OK) {
2902 goto nxge_txdma_start_channel_exit;
2903 }
2904
2905 nxge_txdma_start_channel_exit:
2906 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2907
2908 return (status);
2909 }
2910
2911 /*
2912 * nxge_txdma_stop_channel
2913 *
2914 * Stop a TDC.
2915 *
2916 * Arguments:
2917 * nxgep
2918 * channel The channel to stop.
2919 * tx_ring_p channel's transmit descriptor ring.
2920 * tx_mbox_p channel' smailbox.
2921 *
2922 * Notes:
2923 *
2924 * NPI/NXGE function calls:
2925 * nxge_txdma_stop_inj_err()
2926 * nxge_reset_txdma_channel()
2927 * nxge_init_txdma_channel_event_mask()
2928 * nxge_init_txdma_channel_cntl_stat()
2929 * nxge_disable_txdma_channel()
2930 *
2931 * Registers accessed:
2932 * none directly (see functions above).
2933 *
2934 * Context:
2935 * Any domain
2936 */
2937 /*ARGSUSED*/
2938 static nxge_status_t
nxge_txdma_stop_channel(p_nxge_t nxgep,uint16_t channel)2939 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2940 {
2941 p_tx_ring_t tx_ring_p;
2942 int status = NXGE_OK;
2943
2944 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2945 "==> nxge_txdma_stop_channel: channel %d", channel));
2946
2947 /*
2948 * Stop (disable) TXDMA and TXC (if stop bit is set
2949 * and STOP_N_GO bit not set, the TXDMA reset state will
2950 * not be set if reset TXDMA.
2951 */
2952 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2953
2954 if (nxgep->tx_rings == NULL) {
2955 status = NXGE_ERROR;
2956 goto nxge_txdma_stop_channel_exit;
2957 }
2958
2959 tx_ring_p = nxgep->tx_rings->rings[channel];
2960 if (tx_ring_p == NULL) {
2961 status = NXGE_ERROR;
2962 goto nxge_txdma_stop_channel_exit;
2963 }
2964
2965 /*
2966 * Reset TXDMA channel
2967 */
2968 tx_ring_p->tx_cs.value = 0;
2969 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2970 status = nxge_reset_txdma_channel(nxgep, channel,
2971 tx_ring_p->tx_cs.value);
2972 if (status != NXGE_OK) {
2973 goto nxge_txdma_stop_channel_exit;
2974 }
2975
2976 #ifdef HARDWARE_REQUIRED
2977 /* Set up the interrupt event masks. */
2978 tx_ring_p->tx_evmask.value = 0;
2979 status = nxge_init_txdma_channel_event_mask(nxgep,
2980 channel, &tx_ring_p->tx_evmask);
2981 if (status != NXGE_OK) {
2982 goto nxge_txdma_stop_channel_exit;
2983 }
2984
2985 /* Initialize the DMA control and status register */
2986 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2987 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2988 tx_ring_p->tx_cs.value);
2989 if (status != NXGE_OK) {
2990 goto nxge_txdma_stop_channel_exit;
2991 }
2992
2993 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2994
2995 /* Disable channel */
2996 status = nxge_disable_txdma_channel(nxgep, channel,
2997 tx_ring_p, tx_mbox_p);
2998 if (status != NXGE_OK) {
2999 goto nxge_txdma_start_channel_exit;
3000 }
3001
3002 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3003 "==> nxge_txdma_stop_channel: event done"));
3004
3005 #endif
3006
3007 nxge_txdma_stop_channel_exit:
3008 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
3009 return (status);
3010 }
3011
3012 /*
3013 * nxge_txdma_get_ring
3014 *
3015 * Get the ring for a TDC.
3016 *
3017 * Arguments:
3018 * nxgep
3019 * channel
3020 *
3021 * Notes:
3022 *
3023 * NPI/NXGE function calls:
3024 *
3025 * Registers accessed:
3026 *
3027 * Context:
3028 * Any domain
3029 */
3030 static p_tx_ring_t
nxge_txdma_get_ring(p_nxge_t nxgep,uint16_t channel)3031 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
3032 {
3033 nxge_grp_set_t *set = &nxgep->tx_set;
3034 int tdc;
3035
3036 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
3037
3038 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3039 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3040 "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
3041 goto return_null;
3042 }
3043
3044 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3045 if ((1 << tdc) & set->owned.map) {
3046 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3047 if (ring) {
3048 if (channel == ring->tdc) {
3049 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3050 "<== nxge_txdma_get_ring: "
3051 "tdc %d ring $%p", tdc, ring));
3052 return (ring);
3053 }
3054 }
3055 }
3056 }
3057
3058 return_null:
3059 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
3060 "ring not found"));
3061
3062 return (NULL);
3063 }
3064
3065 /*
3066 * nxge_txdma_get_mbox
3067 *
3068 * Get the mailbox for a TDC.
3069 *
3070 * Arguments:
3071 * nxgep
3072 * channel
3073 *
3074 * Notes:
3075 *
3076 * NPI/NXGE function calls:
3077 *
3078 * Registers accessed:
3079 *
3080 * Context:
3081 * Any domain
3082 */
3083 static p_tx_mbox_t
nxge_txdma_get_mbox(p_nxge_t nxgep,uint16_t channel)3084 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3085 {
3086 nxge_grp_set_t *set = &nxgep->tx_set;
3087 int tdc;
3088
3089 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3090
3091 if (nxgep->tx_mbox_areas_p == 0 ||
3092 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3093 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3094 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3095 goto return_null;
3096 }
3097
3098 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3099 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3100 "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3101 goto return_null;
3102 }
3103
3104 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3105 if ((1 << tdc) & set->owned.map) {
3106 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3107 if (ring) {
3108 if (channel == ring->tdc) {
3109 tx_mbox_t *mailbox = nxgep->
3110 tx_mbox_areas_p->
3111 txmbox_areas_p[tdc];
3112 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3113 "<== nxge_txdma_get_mbox: tdc %d "
3114 "ring $%p", tdc, mailbox));
3115 return (mailbox);
3116 }
3117 }
3118 }
3119 }
3120
3121 return_null:
3122 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3123 "mailbox not found"));
3124
3125 return (NULL);
3126 }
3127
3128 /*
3129 * nxge_tx_err_evnts
3130 *
3131 * Recover a TDC.
3132 *
3133 * Arguments:
3134 * nxgep
3135 * index The index to the TDC ring.
3136 * ldvp Used to get the channel number ONLY.
3137 * cs A copy of the bits from TX_CS.
3138 *
3139 * Notes:
3140 * Calling tree:
3141 * nxge_tx_intr()
3142 *
3143 * NPI/NXGE function calls:
3144 * npi_txdma_ring_error_get()
3145 * npi_txdma_inj_par_error_get()
3146 * nxge_txdma_fatal_err_recover()
3147 *
3148 * Registers accessed:
3149 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High
3150 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3151 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3152 *
3153 * Context:
3154 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3155 */
3156 /*ARGSUSED*/
3157 static nxge_status_t
nxge_tx_err_evnts(p_nxge_t nxgep,uint_t index,p_nxge_ldv_t ldvp,tx_cs_t cs)3158 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3159 {
3160 npi_handle_t handle;
3161 npi_status_t rs;
3162 uint8_t channel;
3163 p_tx_ring_t *tx_rings;
3164 p_tx_ring_t tx_ring_p;
3165 p_nxge_tx_ring_stats_t tdc_stats;
3166 boolean_t txchan_fatal = B_FALSE;
3167 nxge_status_t status = NXGE_OK;
3168 tdmc_inj_par_err_t par_err;
3169 uint32_t value;
3170
3171 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3172 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3173 channel = ldvp->channel;
3174
3175 tx_rings = nxgep->tx_rings->rings;
3176 tx_ring_p = tx_rings[index];
3177 tdc_stats = tx_ring_p->tdc_stats;
3178 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3179 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3180 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3181 if ((rs = npi_txdma_ring_error_get(handle, channel,
3182 &tdc_stats->errlog)) != NPI_SUCCESS)
3183 return (NXGE_ERROR | rs);
3184 }
3185
3186 if (cs.bits.ldw.mbox_err) {
3187 tdc_stats->mbox_err++;
3188 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3189 NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3190 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3191 "==> nxge_tx_err_evnts(channel %d): "
3192 "fatal error: mailbox", channel));
3193 txchan_fatal = B_TRUE;
3194 }
3195 if (cs.bits.ldw.pkt_size_err) {
3196 tdc_stats->pkt_size_err++;
3197 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3198 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3199 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3200 "==> nxge_tx_err_evnts(channel %d): "
3201 "fatal error: pkt_size_err", channel));
3202 txchan_fatal = B_TRUE;
3203 }
3204 if (cs.bits.ldw.tx_ring_oflow) {
3205 tdc_stats->tx_ring_oflow++;
3206 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3207 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3208 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3209 "==> nxge_tx_err_evnts(channel %d): "
3210 "fatal error: tx_ring_oflow", channel));
3211 txchan_fatal = B_TRUE;
3212 }
3213 if (cs.bits.ldw.pref_buf_par_err) {
3214 tdc_stats->pre_buf_par_err++;
3215 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3216 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3218 "==> nxge_tx_err_evnts(channel %d): "
3219 "fatal error: pre_buf_par_err", channel));
3220 /* Clear error injection source for parity error */
3221 (void) npi_txdma_inj_par_error_get(handle, &value);
3222 par_err.value = value;
3223 par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3224 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3225 txchan_fatal = B_TRUE;
3226 }
3227 if (cs.bits.ldw.nack_pref) {
3228 tdc_stats->nack_pref++;
3229 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3230 NXGE_FM_EREPORT_TDMC_NACK_PREF);
3231 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3232 "==> nxge_tx_err_evnts(channel %d): "
3233 "fatal error: nack_pref", channel));
3234 txchan_fatal = B_TRUE;
3235 }
3236 if (cs.bits.ldw.nack_pkt_rd) {
3237 tdc_stats->nack_pkt_rd++;
3238 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3239 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3240 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3241 "==> nxge_tx_err_evnts(channel %d): "
3242 "fatal error: nack_pkt_rd", channel));
3243 txchan_fatal = B_TRUE;
3244 }
3245 if (cs.bits.ldw.conf_part_err) {
3246 tdc_stats->conf_part_err++;
3247 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3248 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3249 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3250 "==> nxge_tx_err_evnts(channel %d): "
3251 "fatal error: config_partition_err", channel));
3252 txchan_fatal = B_TRUE;
3253 }
3254 if (cs.bits.ldw.pkt_prt_err) {
3255 tdc_stats->pkt_part_err++;
3256 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3257 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3258 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3259 "==> nxge_tx_err_evnts(channel %d): "
3260 "fatal error: pkt_prt_err", channel));
3261 txchan_fatal = B_TRUE;
3262 }
3263
3264 /* Clear error injection source in case this is an injected error */
3265 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3266
3267 if (txchan_fatal) {
3268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3269 " nxge_tx_err_evnts: "
3270 " fatal error on channel %d cs 0x%llx\n",
3271 channel, cs.value));
3272 status = nxge_txdma_fatal_err_recover(nxgep, channel,
3273 tx_ring_p);
3274 if (status == NXGE_OK) {
3275 FM_SERVICE_RESTORED(nxgep);
3276 }
3277 }
3278
3279 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3280
3281 return (status);
3282 }
3283
3284 static nxge_status_t
nxge_txdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p)3285 nxge_txdma_fatal_err_recover(
3286 p_nxge_t nxgep,
3287 uint16_t channel,
3288 p_tx_ring_t tx_ring_p)
3289 {
3290 npi_handle_t handle;
3291 npi_status_t rs = NPI_SUCCESS;
3292 p_tx_mbox_t tx_mbox_p;
3293 nxge_status_t status = NXGE_OK;
3294
3295 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3296 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3297 "Recovering from TxDMAChannel#%d error...", channel));
3298
3299 /*
3300 * Stop the dma channel waits for the stop done.
3301 * If the stop done bit is not set, then create
3302 * an error.
3303 */
3304
3305 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3306 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3307 MUTEX_ENTER(&tx_ring_p->lock);
3308 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3309 if (rs != NPI_SUCCESS) {
3310 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3311 "==> nxge_txdma_fatal_err_recover (channel %d): "
3312 "stop failed ", channel));
3313 goto fail;
3314 }
3315
3316 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3317 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3318
3319 /*
3320 * Reset TXDMA channel
3321 */
3322 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3323 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3324 NPI_SUCCESS) {
3325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3326 "==> nxge_txdma_fatal_err_recover (channel %d)"
3327 " reset channel failed 0x%x", channel, rs));
3328 goto fail;
3329 }
3330
3331 /*
3332 * Reset the tail (kick) register to 0.
3333 * (Hardware will not reset it. Tx overflow fatal
3334 * error if tail is not set to 0 after reset!
3335 */
3336 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3337
3338 /* Restart TXDMA channel */
3339
3340 if (!isLDOMguest(nxgep)) {
3341 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3342
3343 // XXX This is a problem in HIO!
3344 /*
3345 * Initialize the TXDMA channel specific FZC control
3346 * configurations. These FZC registers are pertaining
3347 * to each TX channel (i.e. logical pages).
3348 */
3349 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3350 status = nxge_init_fzc_txdma_channel(nxgep, channel,
3351 tx_ring_p, tx_mbox_p);
3352 if (status != NXGE_OK)
3353 goto fail;
3354 }
3355
3356 /*
3357 * Initialize the event masks.
3358 */
3359 tx_ring_p->tx_evmask.value = 0;
3360 status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3361 &tx_ring_p->tx_evmask);
3362 if (status != NXGE_OK)
3363 goto fail;
3364
3365 tx_ring_p->wr_index_wrap = B_FALSE;
3366 tx_ring_p->wr_index = 0;
3367 tx_ring_p->rd_index = 0;
3368
3369 /*
3370 * Load TXDMA descriptors, buffers, mailbox,
3371 * initialise the DMA channels and
3372 * enable each DMA channel.
3373 */
3374 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3375 status = nxge_enable_txdma_channel(nxgep, channel,
3376 tx_ring_p, tx_mbox_p);
3377 MUTEX_EXIT(&tx_ring_p->lock);
3378 if (status != NXGE_OK)
3379 goto fail;
3380
3381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3382 "Recovery Successful, TxDMAChannel#%d Restored",
3383 channel));
3384 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3385
3386 return (NXGE_OK);
3387
3388 fail:
3389 MUTEX_EXIT(&tx_ring_p->lock);
3390
3391 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3392 "nxge_txdma_fatal_err_recover (channel %d): "
3393 "failed to recover this txdma channel", channel));
3394 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3395
3396 return (status);
3397 }
3398
3399 /*
3400 * nxge_tx_port_fatal_err_recover
3401 *
3402 * Attempt to recover from a fatal port error.
3403 *
3404 * Arguments:
3405 * nxgep
3406 *
3407 * Notes:
3408 * How would a guest do this?
3409 *
3410 * NPI/NXGE function calls:
3411 *
3412 * Registers accessed:
3413 *
3414 * Context:
3415 * Service domain
3416 */
3417 nxge_status_t
nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)3418 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3419 {
3420 nxge_grp_set_t *set = &nxgep->tx_set;
3421 nxge_channel_t tdc;
3422
3423 tx_ring_t *ring;
3424 tx_mbox_t *mailbox;
3425
3426 npi_handle_t handle;
3427 nxge_status_t status;
3428 npi_status_t rs;
3429
3430 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3431 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3432 "Recovering from TxPort error..."));
3433
3434 if (isLDOMguest(nxgep)) {
3435 return (NXGE_OK);
3436 }
3437
3438 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3439 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3440 "<== nxge_tx_port_fatal_err_recover: not initialized"));
3441 return (NXGE_ERROR);
3442 }
3443
3444 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3445 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3446 "<== nxge_tx_port_fatal_err_recover: "
3447 "NULL ring pointer(s)"));
3448 return (NXGE_ERROR);
3449 }
3450
3451 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3452 if ((1 << tdc) & set->owned.map) {
3453 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3454 if (ring)
3455 MUTEX_ENTER(&ring->lock);
3456 }
3457 }
3458
3459 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3460
3461 /*
3462 * Stop all the TDCs owned by us.
3463 * (The shared TDCs will have been stopped by their owners.)
3464 */
3465 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3466 if ((1 << tdc) & set->owned.map) {
3467 ring = nxgep->tx_rings->rings[tdc];
3468 if (ring) {
3469 rs = npi_txdma_channel_control
3470 (handle, TXDMA_STOP, tdc);
3471 if (rs != NPI_SUCCESS) {
3472 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3473 "nxge_tx_port_fatal_err_recover "
3474 "(channel %d): stop failed ", tdc));
3475 goto fail;
3476 }
3477 }
3478 }
3479 }
3480
3481 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3482
3483 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3484 if ((1 << tdc) & set->owned.map) {
3485 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3486 if (ring) {
3487 (void) nxge_txdma_reclaim(nxgep, ring, 0);
3488 }
3489 }
3490 }
3491
3492 /*
3493 * Reset all the TDCs.
3494 */
3495 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3496
3497 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3498 if ((1 << tdc) & set->owned.map) {
3499 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3500 if (ring) {
3501 if ((rs = npi_txdma_channel_control
3502 (handle, TXDMA_RESET, tdc))
3503 != NPI_SUCCESS) {
3504 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3505 "nxge_tx_port_fatal_err_recover "
3506 "(channel %d) reset channel "
3507 "failed 0x%x", tdc, rs));
3508 goto fail;
3509 }
3510 }
3511 /*
3512 * Reset the tail (kick) register to 0.
3513 * (Hardware will not reset it. Tx overflow fatal
3514 * error if tail is not set to 0 after reset!
3515 */
3516 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3517 }
3518 }
3519
3520 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3521
3522 /* Restart all the TDCs */
3523 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3524 if ((1 << tdc) & set->owned.map) {
3525 ring = nxgep->tx_rings->rings[tdc];
3526 if (ring) {
3527 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3528 status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3529 ring, mailbox);
3530 ring->tx_evmask.value = 0;
3531 /*
3532 * Initialize the event masks.
3533 */
3534 status = nxge_init_txdma_channel_event_mask
3535 (nxgep, tdc, &ring->tx_evmask);
3536
3537 ring->wr_index_wrap = B_FALSE;
3538 ring->wr_index = 0;
3539 ring->rd_index = 0;
3540
3541 if (status != NXGE_OK)
3542 goto fail;
3543 if (status != NXGE_OK)
3544 goto fail;
3545 }
3546 }
3547 }
3548
3549 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3550
3551 /* Re-enable all the TDCs */
3552 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3553 if ((1 << tdc) & set->owned.map) {
3554 ring = nxgep->tx_rings->rings[tdc];
3555 if (ring) {
3556 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3557 status = nxge_enable_txdma_channel(nxgep, tdc,
3558 ring, mailbox);
3559 if (status != NXGE_OK)
3560 goto fail;
3561 }
3562 }
3563 }
3564
3565 /*
3566 * Unlock all the TDCs.
3567 */
3568 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3569 if ((1 << tdc) & set->owned.map) {
3570 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3571 if (ring)
3572 MUTEX_EXIT(&ring->lock);
3573 }
3574 }
3575
3576 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3577 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3578
3579 return (NXGE_OK);
3580
3581 fail:
3582 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3583 if ((1 << tdc) & set->owned.map) {
3584 ring = nxgep->tx_rings->rings[tdc];
3585 if (ring)
3586 MUTEX_EXIT(&ring->lock);
3587 }
3588 }
3589
3590 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3591 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3592
3593 return (status);
3594 }
3595
3596 /*
3597 * nxge_txdma_inject_err
3598 *
3599 * Inject an error into a TDC.
3600 *
3601 * Arguments:
3602 * nxgep
3603 * err_id The error to inject.
3604 * chan The channel to inject into.
3605 *
3606 * Notes:
3607 * This is called from nxge_main.c:nxge_err_inject()
3608 * Has this ioctl ever been used?
3609 *
3610 * NPI/NXGE function calls:
3611 * npi_txdma_inj_par_error_get()
3612 * npi_txdma_inj_par_error_set()
3613 *
3614 * Registers accessed:
3615 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3616 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3617 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3618 *
3619 * Context:
3620 * Service domain
3621 */
3622 void
nxge_txdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)3623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3624 {
3625 tdmc_intr_dbg_t tdi;
3626 tdmc_inj_par_err_t par_err;
3627 uint32_t value;
3628 npi_handle_t handle;
3629
3630 switch (err_id) {
3631
3632 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3633 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3634 /* Clear error injection source for parity error */
3635 (void) npi_txdma_inj_par_error_get(handle, &value);
3636 par_err.value = value;
3637 par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3638 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3639
3640 par_err.bits.ldw.inject_parity_error = (1 << chan);
3641 (void) npi_txdma_inj_par_error_get(handle, &value);
3642 par_err.value = value;
3643 par_err.bits.ldw.inject_parity_error |= (1 << chan);
3644 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3645 (unsigned long long)par_err.value);
3646 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3647 break;
3648
3649 case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3650 case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3651 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3652 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3653 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3654 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3655 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3656 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3657 chan, &tdi.value);
3658 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3659 tdi.bits.ldw.pref_buf_par_err = 1;
3660 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3661 tdi.bits.ldw.mbox_err = 1;
3662 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3663 tdi.bits.ldw.nack_pref = 1;
3664 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3665 tdi.bits.ldw.nack_pkt_rd = 1;
3666 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3667 tdi.bits.ldw.pkt_size_err = 1;
3668 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3669 tdi.bits.ldw.tx_ring_oflow = 1;
3670 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3671 tdi.bits.ldw.conf_part_err = 1;
3672 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3673 tdi.bits.ldw.pkt_part_err = 1;
3674 #if defined(__i386)
3675 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3676 tdi.value);
3677 #else
3678 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3679 tdi.value);
3680 #endif
3681 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3682 chan, tdi.value);
3683
3684 break;
3685 }
3686 }
3687