1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_txdma.h>
29 #include <sys/nxge/nxge_hio.h>
30 #include <npi_tx_rd64.h>
31 #include <npi_tx_wr64.h>
32 #include <sys/llc1.h>
33
34 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
35 uint32_t nxge_tx_minfree = 64;
36 uint32_t nxge_tx_intr_thres = 0;
37 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
38 uint32_t nxge_tx_tiny_pack = 1;
39 uint32_t nxge_tx_use_bcopy = 1;
40
41 extern uint32_t nxge_tx_ring_size;
42 extern uint32_t nxge_bcopy_thresh;
43 extern uint32_t nxge_dvma_thresh;
44 extern uint32_t nxge_dma_stream_thresh;
45 extern dma_method_t nxge_force_dma;
46 extern uint32_t nxge_cksum_offload;
47
48 /* Device register access attributes for PIO. */
49 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
50 /* Device descriptor access attributes for DMA. */
51 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
52 /* Device buffer access attributes for DMA. */
53 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
54 extern ddi_dma_attr_t nxge_desc_dma_attr;
55 extern ddi_dma_attr_t nxge_tx_dma_attr;
56
57 extern void nxge_tx_ring_task(void *arg);
58
59 static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60
61 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
62
63 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
64 p_nxge_dma_common_t *, p_tx_ring_t *,
65 uint32_t, p_nxge_dma_common_t *,
66 p_tx_mbox_t *);
67 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
68
69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
72
73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
74 p_nxge_dma_common_t *, p_tx_ring_t,
75 p_tx_mbox_t *);
76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
77 p_tx_ring_t, p_tx_mbox_t);
78
79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
80 p_tx_ring_t, p_tx_mbox_t);
81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
82
83 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
84 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
85 p_nxge_ldv_t, tx_cs_t);
86 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
87 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
88 uint16_t, p_tx_ring_t);
89
90 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91 p_tx_ring_t ring_p, uint16_t channel);
92
93 nxge_status_t
nxge_init_txdma_channels(p_nxge_t nxgep)94 nxge_init_txdma_channels(p_nxge_t nxgep)
95 {
96 nxge_grp_set_t *set = &nxgep->tx_set;
97 int i, tdc, count;
98 nxge_grp_t *group;
99 dc_map_t map;
100 int dev_gindex;
101
102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
103
104 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
105 if ((1 << i) & set->lg.map) {
106 group = set->group[i];
107 dev_gindex =
108 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
109 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
110 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
111 if ((1 << tdc) & map) {
112 if ((nxge_grp_dc_add(nxgep,
113 group, VP_BOUND_TX, tdc)))
114 goto init_txdma_channels_exit;
115 }
116 }
117 }
118 if (++count == set->lg.count)
119 break;
120 }
121
122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
123 return (NXGE_OK);
124
125 init_txdma_channels_exit:
126 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
127 if ((1 << i) & set->lg.map) {
128 group = set->group[i];
129 dev_gindex =
130 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
131 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
132 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
133 if ((1 << tdc) & map) {
134 nxge_grp_dc_remove(nxgep,
135 VP_BOUND_TX, tdc);
136 }
137 }
138 }
139 if (++count == set->lg.count)
140 break;
141 }
142
143 return (NXGE_ERROR);
144
145 }
146
147 nxge_status_t
nxge_init_txdma_channel(p_nxge_t nxge,int channel)148 nxge_init_txdma_channel(
149 p_nxge_t nxge,
150 int channel)
151 {
152 nxge_status_t status;
153
154 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
155
156 status = nxge_map_txdma(nxge, channel);
157 if (status != NXGE_OK) {
158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
159 "<== nxge_init_txdma_channel: status 0x%x", status));
160 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
161 return (status);
162 }
163
164 status = nxge_txdma_hw_start(nxge, channel);
165 if (status != NXGE_OK) {
166 (void) nxge_unmap_txdma_channel(nxge, channel);
167 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
168 return (status);
169 }
170
171 if (!nxge->statsp->tdc_ksp[channel])
172 nxge_setup_tdc_kstats(nxge, channel);
173
174 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
175
176 return (status);
177 }
178
179 void
nxge_uninit_txdma_channels(p_nxge_t nxgep)180 nxge_uninit_txdma_channels(p_nxge_t nxgep)
181 {
182 nxge_grp_set_t *set = &nxgep->tx_set;
183 int tdc;
184
185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
186
187 if (set->owned.map == 0) {
188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
189 "nxge_uninit_txdma_channels: no channels"));
190 return;
191 }
192
193 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
194 if ((1 << tdc) & set->owned.map) {
195 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
196 }
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
200 }
201
202 void
nxge_uninit_txdma_channel(p_nxge_t nxgep,int channel)203 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
204 {
205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
206
207 if (nxgep->statsp->tdc_ksp[channel]) {
208 kstat_delete(nxgep->statsp->tdc_ksp[channel]);
209 nxgep->statsp->tdc_ksp[channel] = 0;
210 }
211
212 if (nxge_txdma_stop_channel(nxgep, channel) != NXGE_OK)
213 goto nxge_uninit_txdma_channel_exit;
214
215 nxge_unmap_txdma_channel(nxgep, channel);
216
217 nxge_uninit_txdma_channel_exit:
218 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_uninit_txdma_channel"));
219 }
220
221 void
nxge_setup_dma_common(p_nxge_dma_common_t dest_p,p_nxge_dma_common_t src_p,uint32_t entries,uint32_t size)222 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
223 uint32_t entries, uint32_t size)
224 {
225 size_t tsize;
226 *dest_p = *src_p;
227 tsize = size * entries;
228 dest_p->alength = tsize;
229 dest_p->nblocks = entries;
230 dest_p->block_size = size;
231 dest_p->offset += tsize;
232
233 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
234 src_p->alength -= tsize;
235 src_p->dma_cookie.dmac_laddress += tsize;
236 src_p->dma_cookie.dmac_size -= tsize;
237 }
238
239 /*
240 * nxge_reset_txdma_channel
241 *
242 * Reset a TDC.
243 *
244 * Arguments:
245 * nxgep
246 * channel The channel to reset.
247 * reg_data The current TX_CS.
248 *
249 * Notes:
250 *
251 * NPI/NXGE function calls:
252 * npi_txdma_channel_reset()
253 * npi_txdma_channel_control()
254 *
255 * Registers accessed:
256 * TX_CS DMC+0x40028 Transmit Control And Status
257 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick
258 *
259 * Context:
260 * Any domain
261 */
262 nxge_status_t
nxge_reset_txdma_channel(p_nxge_t nxgep,uint16_t channel,uint64_t reg_data)263 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
264 {
265 npi_status_t rs = NPI_SUCCESS;
266 nxge_status_t status = NXGE_OK;
267 npi_handle_t handle;
268
269 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
270
271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
272 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
273 rs = npi_txdma_channel_reset(handle, channel);
274 } else {
275 rs = npi_txdma_channel_control(handle, TXDMA_RESET,
276 channel);
277 }
278
279 if (rs != NPI_SUCCESS) {
280 status = NXGE_ERROR | rs;
281 }
282
283 /*
284 * Reset the tail (kick) register to 0.
285 * (Hardware will not reset it. Tx overflow fatal
286 * error if tail is not set to 0 after reset!
287 */
288 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
289
290 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
291 return (status);
292 }
293
294 /*
295 * nxge_init_txdma_channel_event_mask
296 *
297 * Enable interrupts for a set of events.
298 *
299 * Arguments:
300 * nxgep
301 * channel The channel to map.
302 * mask_p The events to enable.
303 *
304 * Notes:
305 *
306 * NPI/NXGE function calls:
307 * npi_txdma_event_mask()
308 *
309 * Registers accessed:
310 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask
311 *
312 * Context:
313 * Any domain
314 */
315 nxge_status_t
nxge_init_txdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_tx_dma_ent_msk_t mask_p)316 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
317 p_tx_dma_ent_msk_t mask_p)
318 {
319 npi_handle_t handle;
320 npi_status_t rs = NPI_SUCCESS;
321 nxge_status_t status = NXGE_OK;
322
323 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
324 "<== nxge_init_txdma_channel_event_mask"));
325
326 handle = NXGE_DEV_NPI_HANDLE(nxgep);
327 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
328 if (rs != NPI_SUCCESS) {
329 status = NXGE_ERROR | rs;
330 }
331
332 return (status);
333 }
334
335 /*
336 * nxge_init_txdma_channel_cntl_stat
337 *
338 * Stop a TDC. If at first we don't succeed, inject an error.
339 *
340 * Arguments:
341 * nxgep
342 * channel The channel to stop.
343 *
344 * Notes:
345 *
346 * NPI/NXGE function calls:
347 * npi_txdma_control_status()
348 *
349 * Registers accessed:
350 * TX_CS DMC+0x40028 Transmit Control And Status
351 *
352 * Context:
353 * Any domain
354 */
355 nxge_status_t
nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,uint64_t reg_data)356 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
357 uint64_t reg_data)
358 {
359 npi_handle_t handle;
360 npi_status_t rs = NPI_SUCCESS;
361 nxge_status_t status = NXGE_OK;
362
363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
364 "<== nxge_init_txdma_channel_cntl_stat"));
365
366 handle = NXGE_DEV_NPI_HANDLE(nxgep);
367 rs = npi_txdma_control_status(handle, OP_SET, channel,
368 (p_tx_cs_t)®_data);
369
370 if (rs != NPI_SUCCESS) {
371 status = NXGE_ERROR | rs;
372 }
373
374 return (status);
375 }
376
377 /*
378 * nxge_enable_txdma_channel
379 *
380 * Enable a TDC.
381 *
382 * Arguments:
383 * nxgep
384 * channel The channel to enable.
385 * tx_desc_p channel's transmit descriptor ring.
386 * mbox_p channel's mailbox,
387 *
388 * Notes:
389 *
390 * NPI/NXGE function calls:
391 * npi_txdma_ring_config()
392 * npi_txdma_mbox_config()
393 * npi_txdma_channel_init_enable()
394 *
395 * Registers accessed:
396 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration
397 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High
398 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low
399 * TX_CS DMC+0x40028 Transmit Control And Status
400 *
401 * Context:
402 * Any domain
403 */
404 nxge_status_t
nxge_enable_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_desc_p,p_tx_mbox_t mbox_p)405 nxge_enable_txdma_channel(p_nxge_t nxgep,
406 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
407 {
408 npi_handle_t handle;
409 npi_status_t rs = NPI_SUCCESS;
410 nxge_status_t status = NXGE_OK;
411
412 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
413
414 handle = NXGE_DEV_NPI_HANDLE(nxgep);
415 /*
416 * Use configuration data composed at init time.
417 * Write to hardware the transmit ring configurations.
418 */
419 rs = npi_txdma_ring_config(handle, OP_SET, channel,
420 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
421
422 if (rs != NPI_SUCCESS) {
423 return (NXGE_ERROR | rs);
424 }
425
426 if (isLDOMguest(nxgep)) {
427 /* Add interrupt handler for this channel. */
428 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
429 return (NXGE_ERROR);
430 }
431
432 /* Write to hardware the mailbox */
433 rs = npi_txdma_mbox_config(handle, OP_SET, channel,
434 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
435
436 if (rs != NPI_SUCCESS) {
437 return (NXGE_ERROR | rs);
438 }
439
440 /* Start the DMA engine. */
441 rs = npi_txdma_channel_init_enable(handle, channel);
442
443 if (rs != NPI_SUCCESS) {
444 return (NXGE_ERROR | rs);
445 }
446
447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
448
449 return (status);
450 }
451
452 void
nxge_fill_tx_hdr(p_mblk_t mp,boolean_t fill_len,boolean_t l4_cksum,int pkt_len,uint8_t npads,p_tx_pkt_hdr_all_t pkthdrp,t_uscalar_t start_offset,t_uscalar_t stuff_offset)453 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
454 boolean_t l4_cksum, int pkt_len, uint8_t npads,
455 p_tx_pkt_hdr_all_t pkthdrp,
456 t_uscalar_t start_offset,
457 t_uscalar_t stuff_offset)
458 {
459 p_tx_pkt_header_t hdrp;
460 p_mblk_t nmp;
461 uint64_t tmp;
462 size_t mblk_len;
463 size_t iph_len;
464 size_t hdrs_size;
465 uint8_t hdrs_buf[sizeof (struct ether_header) +
466 64 + sizeof (uint32_t)];
467 uint8_t *cursor;
468 uint8_t *ip_buf;
469 uint16_t eth_type;
470 uint8_t ipproto;
471 boolean_t is_vlan = B_FALSE;
472 size_t eth_hdr_size;
473
474 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
475
476 /*
477 * Caller should zero out the headers first.
478 */
479 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
480
481 if (fill_len) {
482 NXGE_DEBUG_MSG((NULL, TX_CTL,
483 "==> nxge_fill_tx_hdr: pkt_len %d "
484 "npads %d", pkt_len, npads));
485 tmp = (uint64_t)pkt_len;
486 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
487 goto fill_tx_header_done;
488 }
489
490 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
491
492 /*
493 * mp is the original data packet (does not include the
494 * Neptune transmit header).
495 */
496 nmp = mp;
497 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
498 "mp $%p b_rptr $%p len %d",
499 mp, nmp->b_rptr, MBLKL(nmp)));
500 /* copy ether_header from mblk to hdrs_buf */
501 cursor = &hdrs_buf[0];
502 tmp = sizeof (struct ether_vlan_header);
503 while ((nmp != NULL) && (tmp > 0)) {
504 size_t buflen;
505 mblk_len = MBLKL(nmp);
506 buflen = min((size_t)tmp, mblk_len);
507 bcopy(nmp->b_rptr, cursor, buflen);
508 cursor += buflen;
509 tmp -= buflen;
510 nmp = nmp->b_cont;
511 }
512
513 nmp = mp;
514 mblk_len = MBLKL(nmp);
515 ip_buf = NULL;
516 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
517 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
518 "ether type 0x%x", eth_type, hdrp->value));
519
520 if (eth_type < ETHERMTU) {
521 tmp = 1ull;
522 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
523 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
524 "value 0x%llx", hdrp->value));
525 if (*(hdrs_buf + sizeof (struct ether_header))
526 == LLC_SNAP_SAP) {
527 eth_type = ntohs(*((uint16_t *)(hdrs_buf +
528 sizeof (struct ether_header) + 6)));
529 NXGE_DEBUG_MSG((NULL, TX_CTL,
530 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
531 eth_type));
532 } else {
533 goto fill_tx_header_done;
534 }
535 } else if (eth_type == VLAN_ETHERTYPE) {
536 tmp = 1ull;
537 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
538
539 eth_type = ntohs(((struct ether_vlan_header *)
540 hdrs_buf)->ether_type);
541 is_vlan = B_TRUE;
542 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
543 "value 0x%llx", hdrp->value));
544 }
545
546 if (!is_vlan) {
547 eth_hdr_size = sizeof (struct ether_header);
548 } else {
549 eth_hdr_size = sizeof (struct ether_vlan_header);
550 }
551
552 switch (eth_type) {
553 case ETHERTYPE_IP:
554 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
555 ip_buf = nmp->b_rptr + eth_hdr_size;
556 mblk_len -= eth_hdr_size;
557 iph_len = ((*ip_buf) & 0x0f);
558 if (mblk_len > (iph_len + sizeof (uint32_t))) {
559 ip_buf = nmp->b_rptr;
560 ip_buf += eth_hdr_size;
561 } else {
562 ip_buf = NULL;
563 }
564
565 }
566 if (ip_buf == NULL) {
567 hdrs_size = 0;
568 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
569 while ((nmp) && (hdrs_size <
570 sizeof (hdrs_buf))) {
571 mblk_len = (size_t)nmp->b_wptr -
572 (size_t)nmp->b_rptr;
573 if (mblk_len >=
574 (sizeof (hdrs_buf) - hdrs_size))
575 mblk_len = sizeof (hdrs_buf) -
576 hdrs_size;
577 bcopy(nmp->b_rptr,
578 &hdrs_buf[hdrs_size], mblk_len);
579 hdrs_size += mblk_len;
580 nmp = nmp->b_cont;
581 }
582 ip_buf = hdrs_buf;
583 ip_buf += eth_hdr_size;
584 iph_len = ((*ip_buf) & 0x0f);
585 }
586
587 ipproto = ip_buf[9];
588
589 tmp = (uint64_t)iph_len;
590 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
591 tmp = (uint64_t)(eth_hdr_size >> 1);
592 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
593
594 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
595 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
596 "tmp 0x%x",
597 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
598 ipproto, tmp));
599 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
600 "value 0x%llx", hdrp->value));
601
602 break;
603
604 case ETHERTYPE_IPV6:
605 hdrs_size = 0;
606 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
607 while ((nmp) && (hdrs_size <
608 sizeof (hdrs_buf))) {
609 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
610 if (mblk_len >=
611 (sizeof (hdrs_buf) - hdrs_size))
612 mblk_len = sizeof (hdrs_buf) -
613 hdrs_size;
614 bcopy(nmp->b_rptr,
615 &hdrs_buf[hdrs_size], mblk_len);
616 hdrs_size += mblk_len;
617 nmp = nmp->b_cont;
618 }
619 ip_buf = hdrs_buf;
620 ip_buf += eth_hdr_size;
621
622 tmp = 1ull;
623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
624
625 tmp = (eth_hdr_size >> 1);
626 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
627
628 /* byte 6 is the next header protocol */
629 ipproto = ip_buf[6];
630
631 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
632 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
633 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
634 ipproto));
635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
636 "value 0x%llx", hdrp->value));
637
638 break;
639
640 default:
641 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
642 goto fill_tx_header_done;
643 }
644
645 switch (ipproto) {
646 case IPPROTO_TCP:
647 NXGE_DEBUG_MSG((NULL, TX_CTL,
648 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
649 if (l4_cksum) {
650 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
651 hdrp->value |=
652 (((uint64_t)(start_offset >> 1)) <<
653 TX_PKT_HEADER_L4START_SHIFT);
654 hdrp->value |=
655 (((uint64_t)(stuff_offset >> 1)) <<
656 TX_PKT_HEADER_L4STUFF_SHIFT);
657
658 NXGE_DEBUG_MSG((NULL, TX_CTL,
659 "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
660 "value 0x%llx", hdrp->value));
661 }
662
663 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
664 "value 0x%llx", hdrp->value));
665 break;
666
667 case IPPROTO_UDP:
668 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
669 if (l4_cksum) {
670 if (!nxge_cksum_offload) {
671 uint16_t *up;
672 uint16_t cksum;
673 t_uscalar_t stuff_len;
674
675 /*
676 * The checksum field has the
677 * partial checksum.
678 * IP_CSUM() macro calls ip_cksum() which
679 * can add in the partial checksum.
680 */
681 cksum = IP_CSUM(mp, start_offset, 0);
682 stuff_len = stuff_offset;
683 nmp = mp;
684 mblk_len = MBLKL(nmp);
685 while ((nmp != NULL) &&
686 (mblk_len < stuff_len)) {
687 stuff_len -= mblk_len;
688 nmp = nmp->b_cont;
689 if (nmp)
690 mblk_len = MBLKL(nmp);
691 }
692 ASSERT(nmp);
693 up = (uint16_t *)(nmp->b_rptr + stuff_len);
694
695 *up = cksum;
696 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
697 NXGE_DEBUG_MSG((NULL, TX_CTL,
698 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
699 "use sw cksum "
700 "write to $%p cksum 0x%x content up 0x%x",
701 stuff_len,
702 up,
703 cksum,
704 *up));
705 } else {
706 /* Hardware will compute the full checksum */
707 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
708 hdrp->value |=
709 (((uint64_t)(start_offset >> 1)) <<
710 TX_PKT_HEADER_L4START_SHIFT);
711 hdrp->value |=
712 (((uint64_t)(stuff_offset >> 1)) <<
713 TX_PKT_HEADER_L4STUFF_SHIFT);
714
715 NXGE_DEBUG_MSG((NULL, TX_CTL,
716 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
717 " use partial checksum "
718 "cksum 0x%x ",
719 "value 0x%llx",
720 stuff_offset,
721 IP_CSUM(mp, start_offset, 0),
722 hdrp->value));
723 }
724 }
725
726 NXGE_DEBUG_MSG((NULL, TX_CTL,
727 "==> nxge_tx_pkt_hdr_init: UDP"
728 "value 0x%llx", hdrp->value));
729 break;
730
731 default:
732 goto fill_tx_header_done;
733 }
734
735 fill_tx_header_done:
736 NXGE_DEBUG_MSG((NULL, TX_CTL,
737 "==> nxge_fill_tx_hdr: pkt_len %d "
738 "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
739
740 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
741 }
742
743 /*ARGSUSED*/
744 p_mblk_t
nxge_tx_pkt_header_reserve(p_mblk_t mp,uint8_t * npads)745 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
746 {
747 p_mblk_t newmp = NULL;
748
749 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
750 NXGE_DEBUG_MSG((NULL, TX_CTL,
751 "<== nxge_tx_pkt_header_reserve: allocb failed"));
752 return (NULL);
753 }
754
755 NXGE_DEBUG_MSG((NULL, TX_CTL,
756 "==> nxge_tx_pkt_header_reserve: get new mp"));
757 DB_TYPE(newmp) = M_DATA;
758 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
759 linkb(newmp, mp);
760 newmp->b_rptr -= TX_PKT_HEADER_SIZE;
761
762 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
763 "b_rptr $%p b_wptr $%p",
764 newmp->b_rptr, newmp->b_wptr));
765
766 NXGE_DEBUG_MSG((NULL, TX_CTL,
767 "<== nxge_tx_pkt_header_reserve: use new mp"));
768
769 return (newmp);
770 }
771
772 int
nxge_tx_pkt_nmblocks(p_mblk_t mp,int * tot_xfer_len_p)773 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
774 {
775 uint_t nmblks;
776 ssize_t len;
777 uint_t pkt_len;
778 p_mblk_t nmp, bmp, tmp;
779 uint8_t *b_wptr;
780
781 NXGE_DEBUG_MSG((NULL, TX_CTL,
782 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
783 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
784
785 nmp = mp;
786 bmp = mp;
787 nmblks = 0;
788 pkt_len = 0;
789 *tot_xfer_len_p = 0;
790
791 while (nmp) {
792 len = MBLKL(nmp);
793 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
794 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
795 len, pkt_len, nmblks,
796 *tot_xfer_len_p));
797
798 if (len <= 0) {
799 bmp = nmp;
800 nmp = nmp->b_cont;
801 NXGE_DEBUG_MSG((NULL, TX_CTL,
802 "==> nxge_tx_pkt_nmblocks: "
803 "len (0) pkt_len %d nmblks %d",
804 pkt_len, nmblks));
805 continue;
806 }
807
808 *tot_xfer_len_p += len;
809 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
810 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
811 len, pkt_len, nmblks,
812 *tot_xfer_len_p));
813
814 if (len < nxge_bcopy_thresh) {
815 NXGE_DEBUG_MSG((NULL, TX_CTL,
816 "==> nxge_tx_pkt_nmblocks: "
817 "len %d (< thresh) pkt_len %d nmblks %d",
818 len, pkt_len, nmblks));
819 if (pkt_len == 0)
820 nmblks++;
821 pkt_len += len;
822 if (pkt_len >= nxge_bcopy_thresh) {
823 pkt_len = 0;
824 len = 0;
825 nmp = bmp;
826 }
827 } else {
828 NXGE_DEBUG_MSG((NULL, TX_CTL,
829 "==> nxge_tx_pkt_nmblocks: "
830 "len %d (> thresh) pkt_len %d nmblks %d",
831 len, pkt_len, nmblks));
832 pkt_len = 0;
833 nmblks++;
834 /*
835 * Hardware limits the transfer length to 4K.
836 * If len is more than 4K, we need to break
837 * it up to at most 2 more blocks.
838 */
839 if (len > TX_MAX_TRANSFER_LENGTH) {
840 uint32_t nsegs;
841
842 nsegs = 1;
843 NXGE_DEBUG_MSG((NULL, TX_CTL,
844 "==> nxge_tx_pkt_nmblocks: "
845 "len %d pkt_len %d nmblks %d nsegs %d",
846 len, pkt_len, nmblks, nsegs));
847 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
848 ++nsegs;
849 }
850 do {
851 b_wptr = nmp->b_rptr +
852 TX_MAX_TRANSFER_LENGTH;
853 nmp->b_wptr = b_wptr;
854 if ((tmp = dupb(nmp)) == NULL) {
855 return (0);
856 }
857 tmp->b_rptr = b_wptr;
858 tmp->b_wptr = nmp->b_wptr;
859 tmp->b_cont = nmp->b_cont;
860 nmp->b_cont = tmp;
861 nmblks++;
862 if (--nsegs) {
863 nmp = tmp;
864 }
865 } while (nsegs);
866 nmp = tmp;
867 }
868 }
869
870 /*
871 * Hardware limits the transmit gather pointers to 15.
872 */
873 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
874 TX_MAX_GATHER_POINTERS) {
875 NXGE_DEBUG_MSG((NULL, TX_CTL,
876 "==> nxge_tx_pkt_nmblocks: pull msg - "
877 "len %d pkt_len %d nmblks %d",
878 len, pkt_len, nmblks));
879 /* Pull all message blocks from b_cont */
880 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
881 return (0);
882 }
883 freemsg(nmp->b_cont);
884 nmp->b_cont = tmp;
885 pkt_len = 0;
886 }
887 bmp = nmp;
888 nmp = nmp->b_cont;
889 }
890
891 NXGE_DEBUG_MSG((NULL, TX_CTL,
892 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
893 "nmblks %d len %d tot_xfer_len %d",
894 mp->b_rptr, mp->b_wptr, nmblks,
895 MBLKL(mp), *tot_xfer_len_p));
896
897 return (nmblks);
898 }
899
900 boolean_t
nxge_txdma_reclaim(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,int nmblks)901 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
902 {
903 boolean_t status = B_TRUE;
904 p_nxge_dma_common_t tx_desc_dma_p;
905 nxge_dma_common_t desc_area;
906 p_tx_desc_t tx_desc_ring_vp;
907 p_tx_desc_t tx_desc_p;
908 p_tx_desc_t tx_desc_pp;
909 tx_desc_t r_tx_desc;
910 p_tx_msg_t tx_msg_ring;
911 p_tx_msg_t tx_msg_p;
912 npi_handle_t handle;
913 tx_ring_hdl_t tx_head;
914 uint32_t pkt_len;
915 uint_t tx_rd_index;
916 uint16_t head_index, tail_index;
917 uint8_t tdc;
918 boolean_t head_wrap, tail_wrap;
919 p_nxge_tx_ring_stats_t tdc_stats;
920 int rc;
921
922 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
923
924 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
925 (nmblks != 0));
926 NXGE_DEBUG_MSG((nxgep, TX_CTL,
927 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
928 tx_ring_p->descs_pending, nxge_reclaim_pending,
929 nmblks));
930 if (!status) {
931 tx_desc_dma_p = &tx_ring_p->tdc_desc;
932 desc_area = tx_ring_p->tdc_desc;
933 handle = NXGE_DEV_NPI_HANDLE(nxgep);
934 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
935 tx_desc_ring_vp =
936 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
937 tx_rd_index = tx_ring_p->rd_index;
938 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
939 tx_msg_ring = tx_ring_p->tx_msg_ring;
940 tx_msg_p = &tx_msg_ring[tx_rd_index];
941 tdc = tx_ring_p->tdc;
942 tdc_stats = tx_ring_p->tdc_stats;
943 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
944 tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
945 }
946
947 tail_index = tx_ring_p->wr_index;
948 tail_wrap = tx_ring_p->wr_index_wrap;
949
950 NXGE_DEBUG_MSG((nxgep, TX_CTL,
951 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
952 "tail_index %d tail_wrap %d "
953 "tx_desc_p $%p ($%p) ",
954 tdc, tx_rd_index, tail_index, tail_wrap,
955 tx_desc_p, (*(uint64_t *)tx_desc_p)));
956 /*
957 * Read the hardware maintained transmit head
958 * and wrap around bit.
959 */
960 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
961 head_index = tx_head.bits.ldw.head;
962 head_wrap = tx_head.bits.ldw.wrap;
963 NXGE_DEBUG_MSG((nxgep, TX_CTL,
964 "==> nxge_txdma_reclaim: "
965 "tx_rd_index %d tail %d tail_wrap %d "
966 "head %d wrap %d",
967 tx_rd_index, tail_index, tail_wrap,
968 head_index, head_wrap));
969
970 if (head_index == tail_index) {
971 if (TXDMA_RING_EMPTY(head_index, head_wrap,
972 tail_index, tail_wrap) &&
973 (head_index == tx_rd_index)) {
974 NXGE_DEBUG_MSG((nxgep, TX_CTL,
975 "==> nxge_txdma_reclaim: EMPTY"));
976 return (B_TRUE);
977 }
978
979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
980 "==> nxge_txdma_reclaim: Checking "
981 "if ring full"));
982 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
983 tail_wrap)) {
984 NXGE_DEBUG_MSG((nxgep, TX_CTL,
985 "==> nxge_txdma_reclaim: full"));
986 return (B_FALSE);
987 }
988 }
989
990 NXGE_DEBUG_MSG((nxgep, TX_CTL,
991 "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
992
993 tx_desc_pp = &r_tx_desc;
994 while ((tx_rd_index != head_index) &&
995 (tx_ring_p->descs_pending != 0)) {
996
997 NXGE_DEBUG_MSG((nxgep, TX_CTL,
998 "==> nxge_txdma_reclaim: Checking if pending"));
999
1000 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1001 "==> nxge_txdma_reclaim: "
1002 "descs_pending %d ",
1003 tx_ring_p->descs_pending));
1004
1005 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1006 "==> nxge_txdma_reclaim: "
1007 "(tx_rd_index %d head_index %d "
1008 "(tx_desc_p $%p)",
1009 tx_rd_index, head_index,
1010 tx_desc_p));
1011
1012 tx_desc_pp->value = tx_desc_p->value;
1013 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1014 "==> nxge_txdma_reclaim: "
1015 "(tx_rd_index %d head_index %d "
1016 "tx_desc_p $%p (desc value 0x%llx) ",
1017 tx_rd_index, head_index,
1018 tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
1019
1020 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1021 "==> nxge_txdma_reclaim: dump desc:"));
1022
1023 pkt_len = tx_desc_pp->bits.hdw.tr_len;
1024 tdc_stats->obytes += (pkt_len - TX_PKT_HEADER_SIZE);
1025 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
1026 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1027 "==> nxge_txdma_reclaim: pkt_len %d "
1028 "tdc channel %d opackets %d",
1029 pkt_len,
1030 tdc,
1031 tdc_stats->opackets));
1032
1033 if (tx_msg_p->flags.dma_type == USE_DVMA) {
1034 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1035 "tx_desc_p = $%p "
1036 "tx_desc_pp = $%p "
1037 "index = %d",
1038 tx_desc_p,
1039 tx_desc_pp,
1040 tx_ring_p->rd_index));
1041 (void) dvma_unload(tx_msg_p->dvma_handle,
1042 0, -1);
1043 tx_msg_p->dvma_handle = NULL;
1044 if (tx_ring_p->dvma_wr_index ==
1045 tx_ring_p->dvma_wrap_mask) {
1046 tx_ring_p->dvma_wr_index = 0;
1047 } else {
1048 tx_ring_p->dvma_wr_index++;
1049 }
1050 tx_ring_p->dvma_pending--;
1051 } else if (tx_msg_p->flags.dma_type ==
1052 USE_DMA) {
1053 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1054 "==> nxge_txdma_reclaim: "
1055 "USE DMA"));
1056 if (rc = ddi_dma_unbind_handle
1057 (tx_msg_p->dma_handle)) {
1058 cmn_err(CE_WARN, "!nxge_reclaim: "
1059 "ddi_dma_unbind_handle "
1060 "failed. status %d", rc);
1061 }
1062 }
1063 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1064 "==> nxge_txdma_reclaim: count packets"));
1065 /*
1066 * count a chained packet only once.
1067 */
1068 if (tx_msg_p->tx_message != NULL) {
1069 freemsg(tx_msg_p->tx_message);
1070 tx_msg_p->tx_message = NULL;
1071 }
1072
1073 tx_msg_p->flags.dma_type = USE_NONE;
1074 tx_rd_index = tx_ring_p->rd_index;
1075 tx_rd_index = (tx_rd_index + 1) &
1076 tx_ring_p->tx_wrap_mask;
1077 tx_ring_p->rd_index = tx_rd_index;
1078 tx_ring_p->descs_pending--;
1079 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1080 tx_msg_p = &tx_msg_ring[tx_rd_index];
1081 }
1082
1083 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1084 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1085 if (status) {
1086 (void) atomic_cas_32((uint32_t *)&tx_ring_p->queueing,
1087 1, 0);
1088 }
1089 } else {
1090 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1091 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1092 }
1093
1094 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1095 "<== nxge_txdma_reclaim status = 0x%08x", status));
1096
1097 return (status);
1098 }
1099
1100 /*
1101 * nxge_tx_intr
1102 *
1103 * Process a TDC interrupt
1104 *
1105 * Arguments:
1106 * arg1 A Logical Device state Vector (LSV) data structure.
1107 * arg2 nxge_t *
1108 *
1109 * Notes:
1110 *
1111 * NPI/NXGE function calls:
1112 * npi_txdma_control_status()
1113 * npi_intr_ldg_mgmt_set()
1114 *
1115 * nxge_tx_err_evnts()
1116 * nxge_txdma_reclaim()
1117 *
1118 * Registers accessed:
1119 * TX_CS DMC+0x40028 Transmit Control And Status
1120 * PIO_LDSV
1121 *
1122 * Context:
1123 * Any domain
1124 */
1125 uint_t
nxge_tx_intr(char * arg1,char * arg2)1126 nxge_tx_intr(char *arg1, char *arg2)
1127 {
1128 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1129 p_nxge_t nxgep = (p_nxge_t)arg2;
1130 p_nxge_ldg_t ldgp;
1131 uint8_t channel;
1132 uint32_t vindex;
1133 npi_handle_t handle;
1134 tx_cs_t cs;
1135 p_tx_ring_t *tx_rings;
1136 p_tx_ring_t tx_ring_p;
1137 npi_status_t rs = NPI_SUCCESS;
1138 uint_t serviced = DDI_INTR_UNCLAIMED;
1139 nxge_status_t status = NXGE_OK;
1140
1141 if (ldvp == NULL) {
1142 NXGE_DEBUG_MSG((NULL, INT_CTL,
1143 "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1144 nxgep, ldvp));
1145 return (DDI_INTR_UNCLAIMED);
1146 }
1147
1148 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1149 nxgep = ldvp->nxgep;
1150 }
1151 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1152 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1153 nxgep, ldvp));
1154
1155 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1156 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1157 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1158 "<== nxge_tx_intr: interface not started or intialized"));
1159 return (DDI_INTR_CLAIMED);
1160 }
1161
1162 /*
1163 * This interrupt handler is for a specific
1164 * transmit dma channel.
1165 */
1166 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1167 /* Get the control and status for this channel. */
1168 channel = ldvp->channel;
1169 ldgp = ldvp->ldgp;
1170 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1171 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1172 "channel %d",
1173 nxgep, ldvp, channel));
1174
1175 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1176 vindex = ldvp->vdma_index;
1177 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1178 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1179 channel, vindex, rs));
1180 if (!rs && cs.bits.ldw.mk) {
1181 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1182 "==> nxge_tx_intr:channel %d ring index %d "
1183 "status 0x%08x (mk bit set)",
1184 channel, vindex, rs));
1185 tx_rings = nxgep->tx_rings->rings;
1186 tx_ring_p = tx_rings[vindex];
1187 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1188 "==> nxge_tx_intr:channel %d ring index %d "
1189 "status 0x%08x (mk bit set, calling reclaim)",
1190 channel, vindex, rs));
1191
1192 nxge_tx_ring_task((void *)tx_ring_p);
1193 }
1194
1195 /*
1196 * Process other transmit control and status.
1197 * Check the ldv state.
1198 */
1199 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1200 /*
1201 * Rearm this logical group if this is a single device
1202 * group.
1203 */
1204 if (ldgp->nldvs == 1) {
1205 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1206 "==> nxge_tx_intr: rearm"));
1207 if (status == NXGE_OK) {
1208 if (isLDOMguest(nxgep)) {
1209 nxge_hio_ldgimgn(nxgep, ldgp);
1210 } else {
1211 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1212 B_TRUE, ldgp->ldg_timer);
1213 }
1214 }
1215 }
1216
1217 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1218 serviced = DDI_INTR_CLAIMED;
1219 return (serviced);
1220 }
1221
1222 void
nxge_txdma_stop(p_nxge_t nxgep)1223 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */
1224 {
1225 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1226
1227 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1228
1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1230 }
1231
1232 void
nxge_txdma_stop_start(p_nxge_t nxgep)1233 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1234 {
1235 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1236
1237 (void) nxge_txdma_stop(nxgep);
1238
1239 (void) nxge_fixup_txdma_rings(nxgep);
1240 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1241 (void) nxge_tx_mac_enable(nxgep);
1242 (void) nxge_txdma_hw_kick(nxgep);
1243
1244 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1245 }
1246
1247 npi_status_t
nxge_txdma_channel_disable(nxge_t * nxge,int channel)1248 nxge_txdma_channel_disable(
1249 nxge_t *nxge,
1250 int channel)
1251 {
1252 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge);
1253 npi_status_t rs;
1254 tdmc_intr_dbg_t intr_dbg;
1255
1256 /*
1257 * Stop the dma channel and wait for the stop-done.
1258 * If the stop-done bit is not present, then force
1259 * an error so TXC will stop.
1260 * All channels bound to this port need to be stopped
1261 * and reset after injecting an interrupt error.
1262 */
1263 rs = npi_txdma_channel_disable(handle, channel);
1264 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1265 "==> nxge_txdma_channel_disable(%d) "
1266 "rs 0x%x", channel, rs));
1267 if (rs != NPI_SUCCESS) {
1268 /* Inject any error */
1269 intr_dbg.value = 0;
1270 intr_dbg.bits.ldw.nack_pref = 1;
1271 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1272 "==> nxge_txdma_hw_mode: "
1273 "channel %d (stop failed 0x%x) "
1274 "(inject err)", rs, channel));
1275 (void) npi_txdma_inj_int_error_set(
1276 handle, channel, &intr_dbg);
1277 rs = npi_txdma_channel_disable(handle, channel);
1278 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1279 "==> nxge_txdma_hw_mode: "
1280 "channel %d (stop again 0x%x) "
1281 "(after inject err)",
1282 rs, channel));
1283 }
1284
1285 return (rs);
1286 }
1287
1288 /*
1289 * nxge_txdma_hw_mode
1290 *
1291 * Toggle all TDCs on (enable) or off (disable).
1292 *
1293 * Arguments:
1294 * nxgep
1295 * enable Enable or disable a TDC.
1296 *
1297 * Notes:
1298 *
1299 * NPI/NXGE function calls:
1300 * npi_txdma_channel_enable(TX_CS)
1301 * npi_txdma_channel_disable(TX_CS)
1302 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1303 *
1304 * Registers accessed:
1305 * TX_CS DMC+0x40028 Transmit Control And Status
1306 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1307 *
1308 * Context:
1309 * Any domain
1310 */
1311 nxge_status_t
nxge_txdma_hw_mode(p_nxge_t nxgep,boolean_t enable)1312 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1313 {
1314 nxge_grp_set_t *set = &nxgep->tx_set;
1315
1316 npi_handle_t handle;
1317 nxge_status_t status;
1318 npi_status_t rs;
1319 int tdc;
1320
1321 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1322 "==> nxge_txdma_hw_mode: enable mode %d", enable));
1323
1324 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1325 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1326 "<== nxge_txdma_mode: not initialized"));
1327 return (NXGE_ERROR);
1328 }
1329
1330 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1331 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1332 "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1333 return (NXGE_ERROR);
1334 }
1335
1336 /* Enable or disable all of the TDCs owned by us. */
1337 rs = 0;
1338 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1339 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1340 if ((1 << tdc) & set->owned.map) {
1341 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1342 if (ring) {
1343 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1344 "==> nxge_txdma_hw_mode: channel %d", tdc));
1345 if (enable) {
1346 rs = npi_txdma_channel_enable
1347 (handle, tdc);
1348 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1349 "==> nxge_txdma_hw_mode: "
1350 "channel %d (enable) rs 0x%x",
1351 tdc, rs));
1352 } else {
1353 rs = nxge_txdma_channel_disable
1354 (nxgep, tdc);
1355 }
1356 }
1357 }
1358 }
1359
1360 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1361
1362 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1363 "<== nxge_txdma_hw_mode: status 0x%x", status));
1364
1365 return (status);
1366 }
1367
1368 void
nxge_txdma_enable_channel(p_nxge_t nxgep,uint16_t channel)1369 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1370 {
1371 npi_handle_t handle;
1372
1373 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1374 "==> nxge_txdma_enable_channel: channel %d", channel));
1375
1376 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1377 /* enable the transmit dma channels */
1378 (void) npi_txdma_channel_enable(handle, channel);
1379
1380 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1381 }
1382
1383 void
nxge_txdma_disable_channel(p_nxge_t nxgep,uint16_t channel)1384 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1385 {
1386 npi_handle_t handle;
1387
1388 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1389 "==> nxge_txdma_disable_channel: channel %d", channel));
1390
1391 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1392 /* stop the transmit dma channels */
1393 (void) npi_txdma_channel_disable(handle, channel);
1394
1395 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1396 }
1397
1398 /*
1399 * nxge_txdma_stop_inj_err
1400 *
1401 * Stop a TDC. If at first we don't succeed, inject an error.
1402 *
1403 * Arguments:
1404 * nxgep
1405 * channel The channel to stop.
1406 *
1407 * Notes:
1408 *
1409 * NPI/NXGE function calls:
1410 * npi_txdma_channel_disable()
1411 * npi_txdma_inj_int_error_set()
1412 * #if defined(NXGE_DEBUG)
1413 * nxge_txdma_regs_dump_channels(nxgep);
1414 * #endif
1415 *
1416 * Registers accessed:
1417 * TX_CS DMC+0x40028 Transmit Control And Status
1418 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1419 *
1420 * Context:
1421 * Any domain
1422 */
1423 int
nxge_txdma_stop_inj_err(p_nxge_t nxgep,int channel)1424 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1425 {
1426 npi_handle_t handle;
1427 tdmc_intr_dbg_t intr_dbg;
1428 int status;
1429 npi_status_t rs = NPI_SUCCESS;
1430
1431 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1432 /*
1433 * Stop the dma channel waits for the stop done.
1434 * If the stop done bit is not set, then create
1435 * an error.
1436 */
1437 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1438 rs = npi_txdma_channel_disable(handle, channel);
1439 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1440 if (status == NXGE_OK) {
1441 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1442 "<== nxge_txdma_stop_inj_err (channel %d): "
1443 "stopped OK", channel));
1444 return (status);
1445 }
1446
1447 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1448 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1449 "injecting error", channel, rs));
1450 /* Inject any error */
1451 intr_dbg.value = 0;
1452 intr_dbg.bits.ldw.nack_pref = 1;
1453 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1454
1455 /* Stop done bit will be set as a result of error injection */
1456 rs = npi_txdma_channel_disable(handle, channel);
1457 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1458 if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1459 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1460 "<== nxge_txdma_stop_inj_err (channel %d): "
1461 "stopped OK ", channel));
1462 return (status);
1463 }
1464
1465 #if defined(NXGE_DEBUG)
1466 nxge_txdma_regs_dump_channels(nxgep);
1467 #endif
1468 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1469 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1470 " (injected error but still not stopped)", channel, rs));
1471
1472 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1473 return (status);
1474 }
1475
1476 /*ARGSUSED*/
1477 void
nxge_fixup_txdma_rings(p_nxge_t nxgep)1478 nxge_fixup_txdma_rings(p_nxge_t nxgep)
1479 {
1480 nxge_grp_set_t *set = &nxgep->tx_set;
1481 int tdc;
1482
1483 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1484
1485 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1486 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1487 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1488 return;
1489 }
1490
1491 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1492 if ((1 << tdc) & set->owned.map) {
1493 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1494 if (ring) {
1495 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1496 "==> nxge_fixup_txdma_rings: channel %d",
1497 tdc));
1498 nxge_txdma_fixup_channel(nxgep, ring, tdc);
1499 }
1500 }
1501 }
1502
1503 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1504 }
1505
1506 /*ARGSUSED*/
1507 void
nxge_txdma_fix_channel(p_nxge_t nxgep,uint16_t channel)1508 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1509 {
1510 p_tx_ring_t ring_p;
1511
1512 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1513 ring_p = nxge_txdma_get_ring(nxgep, channel);
1514 if (ring_p == NULL) {
1515 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1516 return;
1517 }
1518
1519 if (ring_p->tdc != channel) {
1520 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1521 "<== nxge_txdma_fix_channel: channel not matched "
1522 "ring tdc %d passed channel",
1523 ring_p->tdc, channel));
1524 return;
1525 }
1526
1527 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1528
1529 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1530 }
1531
1532 /*ARGSUSED*/
1533 void
nxge_txdma_fixup_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1534 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1535 {
1536 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1537
1538 if (ring_p == NULL) {
1539 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1540 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1541 return;
1542 }
1543
1544 if (ring_p->tdc != channel) {
1545 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1546 "<== nxge_txdma_fixup_channel: channel not matched "
1547 "ring tdc %d passed channel",
1548 ring_p->tdc, channel));
1549 return;
1550 }
1551
1552 MUTEX_ENTER(&ring_p->lock);
1553 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1554 ring_p->rd_index = 0;
1555 ring_p->wr_index = 0;
1556 ring_p->ring_head.value = 0;
1557 ring_p->ring_kick_tail.value = 0;
1558 ring_p->descs_pending = 0;
1559 MUTEX_EXIT(&ring_p->lock);
1560
1561 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1562 }
1563
1564 /*ARGSUSED*/
1565 void
nxge_txdma_hw_kick(p_nxge_t nxgep)1566 nxge_txdma_hw_kick(p_nxge_t nxgep)
1567 {
1568 nxge_grp_set_t *set = &nxgep->tx_set;
1569 int tdc;
1570
1571 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1572
1573 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1574 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1575 "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1576 return;
1577 }
1578
1579 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1580 if ((1 << tdc) & set->owned.map) {
1581 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1582 if (ring) {
1583 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1584 "==> nxge_txdma_hw_kick: channel %d", tdc));
1585 nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1586 }
1587 }
1588 }
1589
1590 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1591 }
1592
1593 /*ARGSUSED*/
1594 void
nxge_txdma_kick_channel(p_nxge_t nxgep,uint16_t channel)1595 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1596 {
1597 p_tx_ring_t ring_p;
1598
1599 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1600
1601 ring_p = nxge_txdma_get_ring(nxgep, channel);
1602 if (ring_p == NULL) {
1603 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1604 " nxge_txdma_kick_channel"));
1605 return;
1606 }
1607
1608 if (ring_p->tdc != channel) {
1609 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1610 "<== nxge_txdma_kick_channel: channel not matched "
1611 "ring tdc %d passed channel",
1612 ring_p->tdc, channel));
1613 return;
1614 }
1615
1616 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1617
1618 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1619 }
1620
1621 /*ARGSUSED*/
1622 void
nxge_txdma_hw_kick_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1624 {
1625
1626 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1627
1628 if (ring_p == NULL) {
1629 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1630 "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1631 return;
1632 }
1633
1634 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1635 }
1636
1637 /*
1638 * nxge_check_tx_hang
1639 *
1640 * Check the state of all TDCs belonging to nxgep.
1641 *
1642 * Arguments:
1643 * nxgep
1644 *
1645 * Notes:
1646 * Called by nxge_hw.c:nxge_check_hw_state().
1647 *
1648 * NPI/NXGE function calls:
1649 *
1650 * Registers accessed:
1651 *
1652 * Context:
1653 * Any domain
1654 */
1655 /*ARGSUSED*/
1656 void
nxge_check_tx_hang(p_nxge_t nxgep)1657 nxge_check_tx_hang(p_nxge_t nxgep)
1658 {
1659 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1660
1661 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1662 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1663 goto nxge_check_tx_hang_exit;
1664 }
1665
1666 /*
1667 * Needs inputs from hardware for regs:
1668 * head index had not moved since last timeout.
1669 * packets not transmitted or stuffed registers.
1670 */
1671 if (nxge_txdma_hung(nxgep)) {
1672 nxge_fixup_hung_txdma_rings(nxgep);
1673 }
1674
1675 nxge_check_tx_hang_exit:
1676 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1677 }
1678
1679 /*
1680 * nxge_txdma_hung
1681 *
1682 * Reset a TDC.
1683 *
1684 * Arguments:
1685 * nxgep
1686 * channel The channel to reset.
1687 * reg_data The current TX_CS.
1688 *
1689 * Notes:
1690 * Called by nxge_check_tx_hang()
1691 *
1692 * NPI/NXGE function calls:
1693 * nxge_txdma_channel_hung()
1694 *
1695 * Registers accessed:
1696 *
1697 * Context:
1698 * Any domain
1699 */
1700 int
nxge_txdma_hung(p_nxge_t nxgep)1701 nxge_txdma_hung(p_nxge_t nxgep)
1702 {
1703 nxge_grp_set_t *set = &nxgep->tx_set;
1704 int tdc;
1705 boolean_t shared;
1706
1707 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1708
1709 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1710 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1711 "<== nxge_txdma_hung: NULL ring pointer(s)"));
1712 return (B_FALSE);
1713 }
1714
1715 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1716 /*
1717 * Grab the shared state of the TDC.
1718 */
1719 if (isLDOMservice(nxgep)) {
1720 nxge_hio_data_t *nhd =
1721 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1722
1723 MUTEX_ENTER(&nhd->lock);
1724 shared = nxgep->tdc_is_shared[tdc];
1725 MUTEX_EXIT(&nhd->lock);
1726 } else {
1727 shared = B_FALSE;
1728 }
1729
1730 /*
1731 * Now, process continue to process.
1732 */
1733 if (((1 << tdc) & set->owned.map) && !shared) {
1734 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1735 if (ring) {
1736 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1737 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1738 "==> nxge_txdma_hung: TDC %d hung",
1739 tdc));
1740 return (B_TRUE);
1741 }
1742 }
1743 }
1744 }
1745
1746 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1747
1748 return (B_FALSE);
1749 }
1750
1751 /*
1752 * nxge_txdma_channel_hung
1753 *
1754 * Reset a TDC.
1755 *
1756 * Arguments:
1757 * nxgep
1758 * ring <channel>'s ring.
1759 * channel The channel to reset.
1760 *
1761 * Notes:
1762 * Called by nxge_txdma.c:nxge_txdma_hung()
1763 *
1764 * NPI/NXGE function calls:
1765 * npi_txdma_ring_head_get()
1766 *
1767 * Registers accessed:
1768 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1769 *
1770 * Context:
1771 * Any domain
1772 */
1773 int
nxge_txdma_channel_hung(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,uint16_t channel)1774 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1775 {
1776 uint16_t head_index, tail_index;
1777 boolean_t head_wrap, tail_wrap;
1778 npi_handle_t handle;
1779 tx_ring_hdl_t tx_head;
1780 uint_t tx_rd_index;
1781
1782 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1783
1784 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1785 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1786 "==> nxge_txdma_channel_hung: channel %d", channel));
1787 MUTEX_ENTER(&tx_ring_p->lock);
1788 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1789
1790 tail_index = tx_ring_p->wr_index;
1791 tail_wrap = tx_ring_p->wr_index_wrap;
1792 tx_rd_index = tx_ring_p->rd_index;
1793 MUTEX_EXIT(&tx_ring_p->lock);
1794
1795 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1796 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1797 "tail_index %d tail_wrap %d ",
1798 channel, tx_rd_index, tail_index, tail_wrap));
1799 /*
1800 * Read the hardware maintained transmit head
1801 * and wrap around bit.
1802 */
1803 (void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1804 head_index = tx_head.bits.ldw.head;
1805 head_wrap = tx_head.bits.ldw.wrap;
1806 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1807 "==> nxge_txdma_channel_hung: "
1808 "tx_rd_index %d tail %d tail_wrap %d "
1809 "head %d wrap %d",
1810 tx_rd_index, tail_index, tail_wrap,
1811 head_index, head_wrap));
1812
1813 if (TXDMA_RING_EMPTY(head_index, head_wrap,
1814 tail_index, tail_wrap) &&
1815 (head_index == tx_rd_index)) {
1816 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1817 "==> nxge_txdma_channel_hung: EMPTY"));
1818 return (B_FALSE);
1819 }
1820
1821 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1822 "==> nxge_txdma_channel_hung: Checking if ring full"));
1823 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1824 tail_wrap)) {
1825 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1826 "==> nxge_txdma_channel_hung: full"));
1827 return (B_TRUE);
1828 }
1829
1830 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1831
1832 return (B_FALSE);
1833 }
1834
1835 /*
1836 * nxge_fixup_hung_txdma_rings
1837 *
1838 * Disable a TDC.
1839 *
1840 * Arguments:
1841 * nxgep
1842 * channel The channel to reset.
1843 * reg_data The current TX_CS.
1844 *
1845 * Notes:
1846 * Called by nxge_check_tx_hang()
1847 *
1848 * NPI/NXGE function calls:
1849 * npi_txdma_ring_head_get()
1850 *
1851 * Registers accessed:
1852 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1853 *
1854 * Context:
1855 * Any domain
1856 */
1857 /*ARGSUSED*/
1858 void
nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)1859 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1860 {
1861 nxge_grp_set_t *set = &nxgep->tx_set;
1862 int tdc;
1863
1864 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1865
1866 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1867 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1868 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1869 return;
1870 }
1871
1872 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1873 if ((1 << tdc) & set->owned.map) {
1874 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1875 if (ring) {
1876 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1877 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1878 "==> nxge_fixup_hung_txdma_rings: TDC %d",
1879 tdc));
1880 }
1881 }
1882 }
1883
1884 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1885 }
1886
1887 /*
1888 * nxge_txdma_fixup_hung_channel
1889 *
1890 * 'Fix' a hung TDC.
1891 *
1892 * Arguments:
1893 * nxgep
1894 * channel The channel to fix.
1895 *
1896 * Notes:
1897 * Called by nxge_fixup_hung_txdma_rings()
1898 *
1899 * 1. Reclaim the TDC.
1900 * 2. Disable the TDC.
1901 *
1902 * NPI/NXGE function calls:
1903 * nxge_txdma_reclaim()
1904 * npi_txdma_channel_disable(TX_CS)
1905 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1906 *
1907 * Registers accessed:
1908 * TX_CS DMC+0x40028 Transmit Control And Status
1909 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1910 *
1911 * Context:
1912 * Any domain
1913 */
1914 /*ARGSUSED*/
1915 void
nxge_txdma_fix_hung_channel(p_nxge_t nxgep,uint16_t channel)1916 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1917 {
1918 p_tx_ring_t ring_p;
1919
1920 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1921 ring_p = nxge_txdma_get_ring(nxgep, channel);
1922 if (ring_p == NULL) {
1923 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1924 "<== nxge_txdma_fix_hung_channel"));
1925 return;
1926 }
1927
1928 if (ring_p->tdc != channel) {
1929 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1930 "<== nxge_txdma_fix_hung_channel: channel not matched "
1931 "ring tdc %d passed channel",
1932 ring_p->tdc, channel));
1933 return;
1934 }
1935
1936 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1937
1938 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1939 }
1940
1941 /*ARGSUSED*/
1942 void
nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1943 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1944 uint16_t channel)
1945 {
1946 npi_handle_t handle;
1947 tdmc_intr_dbg_t intr_dbg;
1948 int status = NXGE_OK;
1949
1950 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1951
1952 if (ring_p == NULL) {
1953 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1954 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1955 return;
1956 }
1957
1958 if (ring_p->tdc != channel) {
1959 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1960 "<== nxge_txdma_fixup_hung_channel: channel "
1961 "not matched "
1962 "ring tdc %d passed channel",
1963 ring_p->tdc, channel));
1964 return;
1965 }
1966
1967 /* Reclaim descriptors */
1968 MUTEX_ENTER(&ring_p->lock);
1969 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1970 MUTEX_EXIT(&ring_p->lock);
1971
1972 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1973 /*
1974 * Stop the dma channel waits for the stop done.
1975 * If the stop done bit is not set, then force
1976 * an error.
1977 */
1978 status = npi_txdma_channel_disable(handle, channel);
1979 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1980 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1981 "<== nxge_txdma_fixup_hung_channel: stopped OK "
1982 "ring tdc %d passed channel %d",
1983 ring_p->tdc, channel));
1984 return;
1985 }
1986
1987 /* Inject any error */
1988 intr_dbg.value = 0;
1989 intr_dbg.bits.ldw.nack_pref = 1;
1990 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1991
1992 /* Stop done bit will be set as a result of error injection */
1993 status = npi_txdma_channel_disable(handle, channel);
1994 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1995 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1996 "<== nxge_txdma_fixup_hung_channel: stopped again"
1997 "ring tdc %d passed channel",
1998 ring_p->tdc, channel));
1999 return;
2000 }
2001
2002 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2003 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
2004 "ring tdc %d passed channel",
2005 ring_p->tdc, channel));
2006
2007 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
2008 }
2009
2010 /*ARGSUSED*/
2011 void
nxge_reclaim_rings(p_nxge_t nxgep)2012 nxge_reclaim_rings(p_nxge_t nxgep)
2013 {
2014 nxge_grp_set_t *set = &nxgep->tx_set;
2015 int tdc;
2016
2017 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
2018
2019 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2020 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2021 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2022 return;
2023 }
2024
2025 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2026 if ((1 << tdc) & set->owned.map) {
2027 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2028 if (ring) {
2029 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2030 "==> nxge_reclaim_rings: TDC %d", tdc));
2031 MUTEX_ENTER(&ring->lock);
2032 (void) nxge_txdma_reclaim(nxgep, ring, 0);
2033 MUTEX_EXIT(&ring->lock);
2034 }
2035 }
2036 }
2037
2038 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
2039 }
2040
2041 void
nxge_txdma_regs_dump_channels(p_nxge_t nxgep)2042 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2043 {
2044 nxge_grp_set_t *set = &nxgep->tx_set;
2045 npi_handle_t handle;
2046 int tdc;
2047
2048 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2049
2050 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2051
2052 if (!isLDOMguest(nxgep)) {
2053 (void) npi_txdma_dump_fzc_regs(handle);
2054
2055 /* Dump TXC registers. */
2056 (void) npi_txc_dump_fzc_regs(handle);
2057 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2058 }
2059
2060 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2061 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2062 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2063 return;
2064 }
2065
2066 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2067 if ((1 << tdc) & set->owned.map) {
2068 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2069 if (ring) {
2070 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2071 "==> nxge_txdma_regs_dump_channels: "
2072 "TDC %d", tdc));
2073 (void) npi_txdma_dump_tdc_regs(handle, tdc);
2074
2075 /* Dump TXC registers, if able to. */
2076 if (!isLDOMguest(nxgep)) {
2077 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2078 "==> nxge_txdma_regs_dump_channels:"
2079 " FZC TDC %d", tdc));
2080 (void) npi_txc_dump_tdc_fzc_regs
2081 (handle, tdc);
2082 }
2083 nxge_txdma_regs_dump(nxgep, tdc);
2084 }
2085 }
2086 }
2087
2088 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2089 }
2090
2091 void
nxge_txdma_regs_dump(p_nxge_t nxgep,int channel)2092 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2093 {
2094 npi_handle_t handle;
2095 tx_ring_hdl_t hdl;
2096 tx_ring_kick_t kick;
2097 tx_cs_t cs;
2098 txc_control_t control;
2099 uint32_t bitmap = 0;
2100 uint32_t burst = 0;
2101 uint32_t bytes = 0;
2102 dma_log_page_t cfg;
2103
2104 printf("\n\tfunc # %d tdc %d ",
2105 nxgep->function_num, channel);
2106 cfg.page_num = 0;
2107 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2108 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2109 printf("\n\tlog page func %d valid page 0 %d",
2110 cfg.func_num, cfg.valid);
2111 cfg.page_num = 1;
2112 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2113 printf("\n\tlog page func %d valid page 1 %d",
2114 cfg.func_num, cfg.valid);
2115
2116 (void) npi_txdma_ring_head_get(handle, channel, &hdl);
2117 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2118 printf("\n\thead value is 0x%0llx",
2119 (long long)hdl.value);
2120 printf("\n\thead index %d", hdl.bits.ldw.head);
2121 printf("\n\tkick value is 0x%0llx",
2122 (long long)kick.value);
2123 printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2124
2125 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2126 printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2127 printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2128
2129 (void) npi_txc_control(handle, OP_GET, &control);
2130 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2131 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2132 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2133
2134 printf("\n\tTXC port control 0x%0llx",
2135 (long long)control.value);
2136 printf("\n\tTXC port bitmap 0x%x", bitmap);
2137 printf("\n\tTXC max burst %d", burst);
2138 printf("\n\tTXC bytes xmt %d\n", bytes);
2139
2140 {
2141 ipp_status_t status;
2142
2143 (void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2144 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2145 }
2146 }
2147
2148 /*
2149 * nxge_tdc_hvio_setup
2150 *
2151 * I'm not exactly sure what this code does.
2152 *
2153 * Arguments:
2154 * nxgep
2155 * channel The channel to map.
2156 *
2157 * Notes:
2158 *
2159 * NPI/NXGE function calls:
2160 * na
2161 *
2162 * Context:
2163 * Service domain?
2164 */
2165 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2166 static void
nxge_tdc_hvio_setup(nxge_t * nxgep,int channel)2167 nxge_tdc_hvio_setup(
2168 nxge_t *nxgep, int channel)
2169 {
2170 nxge_dma_common_t *data;
2171 nxge_dma_common_t *control;
2172 tx_ring_t *ring;
2173
2174 ring = nxgep->tx_rings->rings[channel];
2175 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2176
2177 ring->hv_set = B_FALSE;
2178
2179 ring->hv_tx_buf_base_ioaddr_pp =
2180 (uint64_t)data->orig_ioaddr_pp;
2181 ring->hv_tx_buf_ioaddr_size =
2182 (uint64_t)data->orig_alength;
2183
2184 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2185 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2186 "orig vatopa base io $%p orig_len 0x%llx (%d)",
2187 ring->hv_tx_buf_base_ioaddr_pp,
2188 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2189 data->ioaddr_pp, data->orig_vatopa,
2190 data->orig_alength, data->orig_alength));
2191
2192 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2193
2194 ring->hv_tx_cntl_base_ioaddr_pp =
2195 (uint64_t)control->orig_ioaddr_pp;
2196 ring->hv_tx_cntl_ioaddr_size =
2197 (uint64_t)control->orig_alength;
2198
2199 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2200 "hv cntl base io $%p orig ioaddr_pp ($%p) "
2201 "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2202 ring->hv_tx_cntl_base_ioaddr_pp,
2203 control->orig_ioaddr_pp, control->orig_vatopa,
2204 ring->hv_tx_cntl_ioaddr_size,
2205 control->orig_alength, control->orig_alength));
2206 }
2207 #endif
2208
2209 static nxge_status_t
nxge_map_txdma(p_nxge_t nxgep,int channel)2210 nxge_map_txdma(p_nxge_t nxgep, int channel)
2211 {
2212 nxge_dma_common_t **pData;
2213 nxge_dma_common_t **pControl;
2214 tx_ring_t **pRing, *ring;
2215 tx_mbox_t **mailbox;
2216 uint32_t num_chunks;
2217
2218 nxge_status_t status = NXGE_OK;
2219
2220 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2221
2222 if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2223 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2224 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2225 "<== nxge_map_txdma: buf not allocated"));
2226 return (NXGE_ERROR);
2227 }
2228 }
2229
2230 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2231 return (NXGE_ERROR);
2232
2233 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2234 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2235 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2236 pRing = &nxgep->tx_rings->rings[channel];
2237 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2238
2239 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2240 "tx_rings $%p tx_desc_rings $%p",
2241 nxgep->tx_rings, nxgep->tx_rings->rings));
2242
2243 /*
2244 * Map descriptors from the buffer pools for <channel>.
2245 */
2246
2247 /*
2248 * Set up and prepare buffer blocks, descriptors
2249 * and mailbox.
2250 */
2251 status = nxge_map_txdma_channel(nxgep, channel,
2252 pData, pRing, num_chunks, pControl, mailbox);
2253 if (status != NXGE_OK) {
2254 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2255 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2256 "returned 0x%x",
2257 nxgep, channel, status));
2258 return (status);
2259 }
2260
2261 ring = *pRing;
2262
2263 ring->index = (uint16_t)channel;
2264 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2265
2266 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2267 if (isLDOMguest(nxgep)) {
2268 (void) nxge_tdc_lp_conf(nxgep, channel);
2269 } else {
2270 nxge_tdc_hvio_setup(nxgep, channel);
2271 }
2272 #endif
2273
2274 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2275 "(status 0x%x channel %d)", status, channel));
2276
2277 return (status);
2278 }
2279
2280 static nxge_status_t
nxge_map_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_tx_mbox_t * tx_mbox_p)2281 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2282 p_nxge_dma_common_t *dma_buf_p,
2283 p_tx_ring_t *tx_desc_p,
2284 uint32_t num_chunks,
2285 p_nxge_dma_common_t *dma_cntl_p,
2286 p_tx_mbox_t *tx_mbox_p)
2287 {
2288 int status = NXGE_OK;
2289
2290 /*
2291 * Set up and prepare buffer blocks, descriptors
2292 * and mailbox.
2293 */
2294 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2295 "==> nxge_map_txdma_channel (channel %d)", channel));
2296 /*
2297 * Transmit buffer blocks
2298 */
2299 status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2300 dma_buf_p, tx_desc_p, num_chunks);
2301 if (status != NXGE_OK) {
2302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2303 "==> nxge_map_txdma_channel (channel %d): "
2304 "map buffer failed 0x%x", channel, status));
2305 goto nxge_map_txdma_channel_exit;
2306 }
2307
2308 /*
2309 * Transmit block ring, and mailbox.
2310 */
2311 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2312 tx_mbox_p);
2313
2314 goto nxge_map_txdma_channel_exit;
2315
2316 nxge_map_txdma_channel_fail1:
2317 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2318 "==> nxge_map_txdma_channel: unmap buf"
2319 "(status 0x%x channel %d)",
2320 status, channel));
2321 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2322
2323 nxge_map_txdma_channel_exit:
2324 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2325 "<== nxge_map_txdma_channel: "
2326 "(status 0x%x channel %d)",
2327 status, channel));
2328
2329 return (status);
2330 }
2331
2332 /*ARGSUSED*/
2333 static void
nxge_unmap_txdma_channel(p_nxge_t nxgep,uint16_t channel)2334 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2335 {
2336 tx_ring_t *ring;
2337 tx_mbox_t *mailbox;
2338
2339 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2340 "==> nxge_unmap_txdma_channel (channel %d)", channel));
2341 /*
2342 * unmap tx block ring, and mailbox.
2343 */
2344 ring = nxgep->tx_rings->rings[channel];
2345 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2346
2347 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2348
2349 /* unmap buffer blocks */
2350 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2351
2352 nxge_free_txb(nxgep, channel);
2353
2354 /*
2355 * Cleanup the reference to the ring now that it does not exist.
2356 */
2357 nxgep->tx_rings->rings[channel] = NULL;
2358
2359 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2360 }
2361
2362 /*
2363 * nxge_map_txdma_channel_cfg_ring
2364 *
2365 * Map a TDC into our kernel space.
2366 * This function allocates all of the per-channel data structures.
2367 *
2368 * Arguments:
2369 * nxgep
2370 * dma_channel The channel to map.
2371 * dma_cntl_p
2372 * tx_ring_p dma_channel's transmit ring
2373 * tx_mbox_p dma_channel's mailbox
2374 *
2375 * Notes:
2376 *
2377 * NPI/NXGE function calls:
2378 * nxge_setup_dma_common()
2379 *
2380 * Registers accessed:
2381 * none.
2382 *
2383 * Context:
2384 * Any domain
2385 */
2386 /*ARGSUSED*/
2387 static void
nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_tx_ring_t tx_ring_p,p_tx_mbox_t * tx_mbox_p)2388 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2389 p_nxge_dma_common_t *dma_cntl_p,
2390 p_tx_ring_t tx_ring_p,
2391 p_tx_mbox_t *tx_mbox_p)
2392 {
2393 p_tx_mbox_t mboxp;
2394 p_nxge_dma_common_t cntl_dmap;
2395 p_nxge_dma_common_t dmap;
2396 p_tx_rng_cfig_t tx_ring_cfig_p;
2397 p_tx_ring_kick_t tx_ring_kick_p;
2398 p_tx_cs_t tx_cs_p;
2399 p_tx_dma_ent_msk_t tx_evmask_p;
2400 p_txdma_mbh_t mboxh_p;
2401 p_txdma_mbl_t mboxl_p;
2402 uint64_t tx_desc_len;
2403
2404 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2405 "==> nxge_map_txdma_channel_cfg_ring"));
2406
2407 cntl_dmap = *dma_cntl_p;
2408
2409 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2410 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2411 sizeof (tx_desc_t));
2412 /*
2413 * Zero out transmit ring descriptors.
2414 */
2415 bzero((caddr_t)dmap->kaddrp, dmap->alength);
2416 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2417 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2418 tx_cs_p = &(tx_ring_p->tx_cs);
2419 tx_evmask_p = &(tx_ring_p->tx_evmask);
2420 tx_ring_cfig_p->value = 0;
2421 tx_ring_kick_p->value = 0;
2422 tx_cs_p->value = 0;
2423 tx_evmask_p->value = 0;
2424
2425 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2426 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2427 dma_channel,
2428 dmap->dma_cookie.dmac_laddress));
2429
2430 tx_ring_cfig_p->value = 0;
2431 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2432 tx_ring_cfig_p->value =
2433 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2434 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2435
2436 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2437 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2438 dma_channel,
2439 tx_ring_cfig_p->value));
2440
2441 tx_cs_p->bits.ldw.rst = 1;
2442
2443 /* Map in mailbox */
2444 mboxp = (p_tx_mbox_t)
2445 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2446 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2447 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2448 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2449 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2450 mboxh_p->value = mboxl_p->value = 0;
2451
2452 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2453 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2454 dmap->dma_cookie.dmac_laddress));
2455
2456 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2457 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2458
2459 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2460 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2461
2462 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2463 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2464 dmap->dma_cookie.dmac_laddress));
2465 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2466 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2467 "mbox $%p",
2468 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2469 tx_ring_p->page_valid.value = 0;
2470 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2471 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2472 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2473 tx_ring_p->page_hdl.value = 0;
2474
2475 tx_ring_p->page_valid.bits.ldw.page0 = 1;
2476 tx_ring_p->page_valid.bits.ldw.page1 = 1;
2477
2478 tx_ring_p->max_burst.value = 0;
2479 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2480
2481 *tx_mbox_p = mboxp;
2482
2483 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2484 "<== nxge_map_txdma_channel_cfg_ring"));
2485 }
2486
2487 /*ARGSUSED*/
2488 static void
nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)2489 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2490 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2491 {
2492 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2493 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2494 tx_ring_p->tdc));
2495
2496 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2497
2498 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2499 "<== nxge_unmap_txdma_channel_cfg_ring"));
2500 }
2501
2502 /*
2503 * nxge_map_txdma_channel_buf_ring
2504 *
2505 *
2506 * Arguments:
2507 * nxgep
2508 * channel The channel to map.
2509 * dma_buf_p
2510 * tx_desc_p channel's descriptor ring
2511 * num_chunks
2512 *
2513 * Notes:
2514 *
2515 * NPI/NXGE function calls:
2516 * nxge_setup_dma_common()
2517 *
2518 * Registers accessed:
2519 * none.
2520 *
2521 * Context:
2522 * Any domain
2523 */
2524 static nxge_status_t
nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks)2525 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2526 p_nxge_dma_common_t *dma_buf_p,
2527 p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2528 {
2529 p_nxge_dma_common_t dma_bufp, tmp_bufp;
2530 p_nxge_dma_common_t dmap;
2531 nxge_os_dma_handle_t tx_buf_dma_handle;
2532 p_tx_ring_t tx_ring_p;
2533 p_tx_msg_t tx_msg_ring = NULL;
2534 nxge_status_t status = NXGE_OK;
2535 int ddi_status = DDI_SUCCESS;
2536 int i, j, index = 0;
2537 uint32_t size = 0, bsize;
2538 uint32_t nblocks, nmsgs;
2539 char qname[TASKQ_NAMELEN];
2540
2541 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2542 "==> nxge_map_txdma_channel_buf_ring"));
2543
2544 dma_bufp = tmp_bufp = *dma_buf_p;
2545 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2546 " nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2547 "chunks bufp $%p",
2548 channel, num_chunks, dma_bufp));
2549
2550 nmsgs = 0;
2551 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2552 nmsgs += tmp_bufp->nblocks;
2553 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2554 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2555 "bufp $%p nblocks %d nmsgs %d",
2556 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2557 }
2558 if (!nmsgs) {
2559 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2560 "<== nxge_map_txdma_channel_buf_ring: channel %d "
2561 "no msg blocks",
2562 channel));
2563 status = NXGE_ERROR;
2564 goto nxge_map_txdma_channel_buf_ring_exit;
2565 }
2566
2567 tx_ring_p = (p_tx_ring_t)
2568 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2569 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2570 (void *)nxgep->interrupt_cookie);
2571
2572 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2573 tx_ring_p->tx_ring_busy = B_FALSE;
2574 tx_ring_p->nxgep = nxgep;
2575 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL;
2576 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d",
2577 nxgep->instance, channel);
2578 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1,
2579 TASKQ_DEFAULTPRI, 0);
2580 if (tx_ring_p->taskq == NULL) {
2581 goto nxge_map_txdma_channel_buf_ring_fail1;
2582 }
2583
2584 /*
2585 * Allocate transmit message rings and handles for packets
2586 * not to be copied to premapped buffers.
2587 */
2588 size = nmsgs * sizeof (tx_msg_t);
2589 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2590 for (i = 0; i < nmsgs; i++) {
2591 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2592 DDI_DMA_DONTWAIT, 0,
2593 &tx_msg_ring[i].dma_handle);
2594 if (ddi_status != DDI_SUCCESS) {
2595 status |= NXGE_DDI_FAILED;
2596 break;
2597 }
2598 }
2599 if (i < nmsgs) {
2600 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2601 "Allocate handles failed."));
2602 goto nxge_map_txdma_channel_buf_ring_fail1;
2603 }
2604
2605 tx_ring_p->tdc = channel;
2606 tx_ring_p->tx_msg_ring = tx_msg_ring;
2607 tx_ring_p->tx_ring_size = nmsgs;
2608 tx_ring_p->num_chunks = num_chunks;
2609 if (!nxge_tx_intr_thres) {
2610 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2611 }
2612 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2613 tx_ring_p->rd_index = 0;
2614 tx_ring_p->wr_index = 0;
2615 tx_ring_p->ring_head.value = 0;
2616 tx_ring_p->ring_kick_tail.value = 0;
2617 tx_ring_p->descs_pending = 0;
2618
2619 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2620 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2621 "actual tx desc max %d nmsgs %d "
2622 "(config nxge_tx_ring_size %d)",
2623 channel, tx_ring_p->tx_ring_size, nmsgs,
2624 nxge_tx_ring_size));
2625
2626 /*
2627 * Map in buffers from the buffer pool.
2628 */
2629 index = 0;
2630 bsize = dma_bufp->block_size;
2631
2632 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2633 "dma_bufp $%p tx_rng_p $%p "
2634 "tx_msg_rng_p $%p bsize %d",
2635 dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2636
2637 tx_buf_dma_handle = dma_bufp->dma_handle;
2638 for (i = 0; i < num_chunks; i++, dma_bufp++) {
2639 bsize = dma_bufp->block_size;
2640 nblocks = dma_bufp->nblocks;
2641 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2642 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2643 "size %d dma_bufp $%p",
2644 i, sizeof (nxge_dma_common_t), dma_bufp));
2645
2646 for (j = 0; j < nblocks; j++) {
2647 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2648 dmap = &tx_msg_ring[index++].buf_dma;
2649 #ifdef TX_MEM_DEBUG
2650 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2651 "==> nxge_map_txdma_channel_buf_ring: j %d"
2652 "dmap $%p", i, dmap));
2653 #endif
2654 nxge_setup_dma_common(dmap, dma_bufp, 1,
2655 bsize);
2656 }
2657 }
2658
2659 if (i < num_chunks) {
2660 status = NXGE_ERROR;
2661 goto nxge_map_txdma_channel_buf_ring_fail1;
2662 }
2663
2664 *tx_desc_p = tx_ring_p;
2665
2666 goto nxge_map_txdma_channel_buf_ring_exit;
2667
2668 nxge_map_txdma_channel_buf_ring_fail1:
2669 if (tx_ring_p->taskq) {
2670 ddi_taskq_destroy(tx_ring_p->taskq);
2671 tx_ring_p->taskq = NULL;
2672 }
2673
2674 index--;
2675 for (; index >= 0; index--) {
2676 if (tx_msg_ring[index].dma_handle != NULL) {
2677 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2678 }
2679 }
2680 MUTEX_DESTROY(&tx_ring_p->lock);
2681 KMEM_FREE(tx_msg_ring, size);
2682 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2683
2684 status = NXGE_ERROR;
2685
2686 nxge_map_txdma_channel_buf_ring_exit:
2687 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2688 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2689
2690 return (status);
2691 }
2692
2693 /*ARGSUSED*/
2694 static void
nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep,p_tx_ring_t tx_ring_p)2695 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2696 {
2697 p_tx_msg_t tx_msg_ring;
2698 p_tx_msg_t tx_msg_p;
2699 int i;
2700
2701 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2702 "==> nxge_unmap_txdma_channel_buf_ring"));
2703 if (tx_ring_p == NULL) {
2704 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2705 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2706 return;
2707 }
2708 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2709 "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2710 tx_ring_p->tdc));
2711
2712 tx_msg_ring = tx_ring_p->tx_msg_ring;
2713
2714 /*
2715 * Since the serialization thread, timer thread and
2716 * interrupt thread can all call the transmit reclaim,
2717 * the unmapping function needs to acquire the lock
2718 * to free those buffers which were transmitted
2719 * by the hardware already.
2720 */
2721 MUTEX_ENTER(&tx_ring_p->lock);
2722 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2723 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2724 "channel %d",
2725 tx_ring_p->tdc));
2726 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2727
2728 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2729 tx_msg_p = &tx_msg_ring[i];
2730 if (tx_msg_p->tx_message != NULL) {
2731 freemsg(tx_msg_p->tx_message);
2732 tx_msg_p->tx_message = NULL;
2733 }
2734 }
2735
2736 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2737 if (tx_msg_ring[i].dma_handle != NULL) {
2738 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2739 }
2740 tx_msg_ring[i].dma_handle = NULL;
2741 }
2742
2743 MUTEX_EXIT(&tx_ring_p->lock);
2744
2745 if (tx_ring_p->taskq) {
2746 ddi_taskq_destroy(tx_ring_p->taskq);
2747 tx_ring_p->taskq = NULL;
2748 }
2749
2750 MUTEX_DESTROY(&tx_ring_p->lock);
2751 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2752 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2753
2754 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2755 "<== nxge_unmap_txdma_channel_buf_ring"));
2756 }
2757
2758 static nxge_status_t
nxge_txdma_hw_start(p_nxge_t nxgep,int channel)2759 nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2760 {
2761 p_tx_rings_t tx_rings;
2762 p_tx_ring_t *tx_desc_rings;
2763 p_tx_mbox_areas_t tx_mbox_areas_p;
2764 p_tx_mbox_t *tx_mbox_p;
2765 nxge_status_t status = NXGE_OK;
2766
2767 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2768
2769 tx_rings = nxgep->tx_rings;
2770 if (tx_rings == NULL) {
2771 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2772 "<== nxge_txdma_hw_start: NULL ring pointer"));
2773 return (NXGE_ERROR);
2774 }
2775 tx_desc_rings = tx_rings->rings;
2776 if (tx_desc_rings == NULL) {
2777 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2778 "<== nxge_txdma_hw_start: NULL ring pointers"));
2779 return (NXGE_ERROR);
2780 }
2781
2782 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2783 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2784
2785 tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2786 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2787
2788 status = nxge_txdma_start_channel(nxgep, channel,
2789 (p_tx_ring_t)tx_desc_rings[channel],
2790 (p_tx_mbox_t)tx_mbox_p[channel]);
2791 if (status != NXGE_OK) {
2792 goto nxge_txdma_hw_start_fail1;
2793 }
2794
2795 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2796 "tx_rings $%p rings $%p",
2797 nxgep->tx_rings, nxgep->tx_rings->rings));
2798 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2799 "tx_rings $%p tx_desc_rings $%p",
2800 nxgep->tx_rings, tx_desc_rings));
2801
2802 goto nxge_txdma_hw_start_exit;
2803
2804 nxge_txdma_hw_start_fail1:
2805 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2806 "==> nxge_txdma_hw_start: disable "
2807 "(status 0x%x channel %d)", status, channel));
2808
2809 nxge_txdma_hw_start_exit:
2810 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2811 "==> nxge_txdma_hw_start: (status 0x%x)", status));
2812
2813 return (status);
2814 }
2815
2816 /*
2817 * nxge_txdma_start_channel
2818 *
2819 * Start a TDC.
2820 *
2821 * Arguments:
2822 * nxgep
2823 * channel The channel to start.
2824 * tx_ring_p channel's transmit descriptor ring.
2825 * tx_mbox_p channel' smailbox.
2826 *
2827 * Notes:
2828 *
2829 * NPI/NXGE function calls:
2830 * nxge_reset_txdma_channel()
2831 * nxge_init_txdma_channel_event_mask()
2832 * nxge_enable_txdma_channel()
2833 *
2834 * Registers accessed:
2835 * none directly (see functions above).
2836 *
2837 * Context:
2838 * Any domain
2839 */
2840 static nxge_status_t
nxge_txdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)2841 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2842 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2843 {
2844 nxge_status_t status = NXGE_OK;
2845
2846 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2847 "==> nxge_txdma_start_channel (channel %d)", channel));
2848 /*
2849 * TXDMA/TXC must be in stopped state.
2850 */
2851 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2852
2853 /*
2854 * Reset TXDMA channel
2855 */
2856 tx_ring_p->tx_cs.value = 0;
2857 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2858 status = nxge_reset_txdma_channel(nxgep, channel,
2859 tx_ring_p->tx_cs.value);
2860 if (status != NXGE_OK) {
2861 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2862 "==> nxge_txdma_start_channel (channel %d)"
2863 " reset channel failed 0x%x", channel, status));
2864 goto nxge_txdma_start_channel_exit;
2865 }
2866
2867 /*
2868 * Initialize the TXDMA channel specific FZC control
2869 * configurations. These FZC registers are pertaining
2870 * to each TX channel (i.e. logical pages).
2871 */
2872 if (!isLDOMguest(nxgep)) {
2873 status = nxge_init_fzc_txdma_channel(nxgep, channel,
2874 tx_ring_p, tx_mbox_p);
2875 if (status != NXGE_OK) {
2876 goto nxge_txdma_start_channel_exit;
2877 }
2878 }
2879
2880 /*
2881 * Initialize the event masks.
2882 */
2883 tx_ring_p->tx_evmask.value = 0;
2884 status = nxge_init_txdma_channel_event_mask(nxgep,
2885 channel, &tx_ring_p->tx_evmask);
2886 if (status != NXGE_OK) {
2887 goto nxge_txdma_start_channel_exit;
2888 }
2889
2890 /*
2891 * Load TXDMA descriptors, buffers, mailbox,
2892 * initialise the DMA channels and
2893 * enable each DMA channel.
2894 */
2895 status = nxge_enable_txdma_channel(nxgep, channel,
2896 tx_ring_p, tx_mbox_p);
2897 if (status != NXGE_OK) {
2898 goto nxge_txdma_start_channel_exit;
2899 }
2900
2901 nxge_txdma_start_channel_exit:
2902 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2903
2904 return (status);
2905 }
2906
2907 /*
2908 * nxge_txdma_stop_channel
2909 *
2910 * Stop a TDC.
2911 *
2912 * Arguments:
2913 * nxgep
2914 * channel The channel to stop.
2915 * tx_ring_p channel's transmit descriptor ring.
2916 * tx_mbox_p channel' smailbox.
2917 *
2918 * Notes:
2919 *
2920 * NPI/NXGE function calls:
2921 * nxge_txdma_stop_inj_err()
2922 * nxge_reset_txdma_channel()
2923 * nxge_init_txdma_channel_event_mask()
2924 * nxge_init_txdma_channel_cntl_stat()
2925 * nxge_disable_txdma_channel()
2926 *
2927 * Registers accessed:
2928 * none directly (see functions above).
2929 *
2930 * Context:
2931 * Any domain
2932 */
2933 /*ARGSUSED*/
2934 static nxge_status_t
nxge_txdma_stop_channel(p_nxge_t nxgep,uint16_t channel)2935 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2936 {
2937 p_tx_ring_t tx_ring_p;
2938 int status = NXGE_OK;
2939
2940 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2941 "==> nxge_txdma_stop_channel: channel %d", channel));
2942
2943 /*
2944 * Stop (disable) TXDMA and TXC (if stop bit is set
2945 * and STOP_N_GO bit not set, the TXDMA reset state will
2946 * not be set if reset TXDMA.
2947 */
2948 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2949
2950 if (nxgep->tx_rings == NULL) {
2951 status = NXGE_ERROR;
2952 goto nxge_txdma_stop_channel_exit;
2953 }
2954
2955 tx_ring_p = nxgep->tx_rings->rings[channel];
2956 if (tx_ring_p == NULL) {
2957 status = NXGE_ERROR;
2958 goto nxge_txdma_stop_channel_exit;
2959 }
2960
2961 /*
2962 * Reset TXDMA channel
2963 */
2964 tx_ring_p->tx_cs.value = 0;
2965 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2966 status = nxge_reset_txdma_channel(nxgep, channel,
2967 tx_ring_p->tx_cs.value);
2968 if (status != NXGE_OK) {
2969 goto nxge_txdma_stop_channel_exit;
2970 }
2971
2972 #ifdef HARDWARE_REQUIRED
2973 /* Set up the interrupt event masks. */
2974 tx_ring_p->tx_evmask.value = 0;
2975 status = nxge_init_txdma_channel_event_mask(nxgep,
2976 channel, &tx_ring_p->tx_evmask);
2977 if (status != NXGE_OK) {
2978 goto nxge_txdma_stop_channel_exit;
2979 }
2980
2981 /* Initialize the DMA control and status register */
2982 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2983 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2984 tx_ring_p->tx_cs.value);
2985 if (status != NXGE_OK) {
2986 goto nxge_txdma_stop_channel_exit;
2987 }
2988
2989 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2990
2991 /* Disable channel */
2992 status = nxge_disable_txdma_channel(nxgep, channel,
2993 tx_ring_p, tx_mbox_p);
2994 if (status != NXGE_OK) {
2995 goto nxge_txdma_start_channel_exit;
2996 }
2997
2998 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2999 "==> nxge_txdma_stop_channel: event done"));
3000
3001 #endif
3002
3003 nxge_txdma_stop_channel_exit:
3004 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
3005 return (status);
3006 }
3007
3008 /*
3009 * nxge_txdma_get_ring
3010 *
3011 * Get the ring for a TDC.
3012 *
3013 * Arguments:
3014 * nxgep
3015 * channel
3016 *
3017 * Notes:
3018 *
3019 * NPI/NXGE function calls:
3020 *
3021 * Registers accessed:
3022 *
3023 * Context:
3024 * Any domain
3025 */
3026 static p_tx_ring_t
nxge_txdma_get_ring(p_nxge_t nxgep,uint16_t channel)3027 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
3028 {
3029 nxge_grp_set_t *set = &nxgep->tx_set;
3030 int tdc;
3031
3032 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
3033
3034 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3035 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3036 "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
3037 goto return_null;
3038 }
3039
3040 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3041 if ((1 << tdc) & set->owned.map) {
3042 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3043 if (ring) {
3044 if (channel == ring->tdc) {
3045 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3046 "<== nxge_txdma_get_ring: "
3047 "tdc %d ring $%p", tdc, ring));
3048 return (ring);
3049 }
3050 }
3051 }
3052 }
3053
3054 return_null:
3055 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
3056 "ring not found"));
3057
3058 return (NULL);
3059 }
3060
3061 /*
3062 * nxge_txdma_get_mbox
3063 *
3064 * Get the mailbox for a TDC.
3065 *
3066 * Arguments:
3067 * nxgep
3068 * channel
3069 *
3070 * Notes:
3071 *
3072 * NPI/NXGE function calls:
3073 *
3074 * Registers accessed:
3075 *
3076 * Context:
3077 * Any domain
3078 */
3079 static p_tx_mbox_t
nxge_txdma_get_mbox(p_nxge_t nxgep,uint16_t channel)3080 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3081 {
3082 nxge_grp_set_t *set = &nxgep->tx_set;
3083 int tdc;
3084
3085 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3086
3087 if (nxgep->tx_mbox_areas_p == 0 ||
3088 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3089 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3090 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3091 goto return_null;
3092 }
3093
3094 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3095 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3096 "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3097 goto return_null;
3098 }
3099
3100 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3101 if ((1 << tdc) & set->owned.map) {
3102 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3103 if (ring) {
3104 if (channel == ring->tdc) {
3105 tx_mbox_t *mailbox = nxgep->
3106 tx_mbox_areas_p->
3107 txmbox_areas_p[tdc];
3108 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3109 "<== nxge_txdma_get_mbox: tdc %d "
3110 "ring $%p", tdc, mailbox));
3111 return (mailbox);
3112 }
3113 }
3114 }
3115 }
3116
3117 return_null:
3118 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3119 "mailbox not found"));
3120
3121 return (NULL);
3122 }
3123
3124 /*
3125 * nxge_tx_err_evnts
3126 *
3127 * Recover a TDC.
3128 *
3129 * Arguments:
3130 * nxgep
3131 * index The index to the TDC ring.
3132 * ldvp Used to get the channel number ONLY.
3133 * cs A copy of the bits from TX_CS.
3134 *
3135 * Notes:
3136 * Calling tree:
3137 * nxge_tx_intr()
3138 *
3139 * NPI/NXGE function calls:
3140 * npi_txdma_ring_error_get()
3141 * npi_txdma_inj_par_error_get()
3142 * nxge_txdma_fatal_err_recover()
3143 *
3144 * Registers accessed:
3145 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High
3146 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3147 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3148 *
3149 * Context:
3150 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3151 */
3152 /*ARGSUSED*/
3153 static nxge_status_t
nxge_tx_err_evnts(p_nxge_t nxgep,uint_t index,p_nxge_ldv_t ldvp,tx_cs_t cs)3154 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3155 {
3156 npi_handle_t handle;
3157 npi_status_t rs;
3158 uint8_t channel;
3159 p_tx_ring_t *tx_rings;
3160 p_tx_ring_t tx_ring_p;
3161 p_nxge_tx_ring_stats_t tdc_stats;
3162 boolean_t txchan_fatal = B_FALSE;
3163 nxge_status_t status = NXGE_OK;
3164 tdmc_inj_par_err_t par_err;
3165 uint32_t value;
3166
3167 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3168 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3169 channel = ldvp->channel;
3170
3171 tx_rings = nxgep->tx_rings->rings;
3172 tx_ring_p = tx_rings[index];
3173 tdc_stats = tx_ring_p->tdc_stats;
3174 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3175 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3176 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3177 if ((rs = npi_txdma_ring_error_get(handle, channel,
3178 &tdc_stats->errlog)) != NPI_SUCCESS)
3179 return (NXGE_ERROR | rs);
3180 }
3181
3182 if (cs.bits.ldw.mbox_err) {
3183 tdc_stats->mbox_err++;
3184 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3185 NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3186 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3187 "==> nxge_tx_err_evnts(channel %d): "
3188 "fatal error: mailbox", channel));
3189 txchan_fatal = B_TRUE;
3190 }
3191 if (cs.bits.ldw.pkt_size_err) {
3192 tdc_stats->pkt_size_err++;
3193 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3194 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3195 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3196 "==> nxge_tx_err_evnts(channel %d): "
3197 "fatal error: pkt_size_err", channel));
3198 txchan_fatal = B_TRUE;
3199 }
3200 if (cs.bits.ldw.tx_ring_oflow) {
3201 tdc_stats->tx_ring_oflow++;
3202 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3203 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3204 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3205 "==> nxge_tx_err_evnts(channel %d): "
3206 "fatal error: tx_ring_oflow", channel));
3207 txchan_fatal = B_TRUE;
3208 }
3209 if (cs.bits.ldw.pref_buf_par_err) {
3210 tdc_stats->pre_buf_par_err++;
3211 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3212 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3213 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3214 "==> nxge_tx_err_evnts(channel %d): "
3215 "fatal error: pre_buf_par_err", channel));
3216 /* Clear error injection source for parity error */
3217 (void) npi_txdma_inj_par_error_get(handle, &value);
3218 par_err.value = value;
3219 par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3220 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3221 txchan_fatal = B_TRUE;
3222 }
3223 if (cs.bits.ldw.nack_pref) {
3224 tdc_stats->nack_pref++;
3225 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3226 NXGE_FM_EREPORT_TDMC_NACK_PREF);
3227 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3228 "==> nxge_tx_err_evnts(channel %d): "
3229 "fatal error: nack_pref", channel));
3230 txchan_fatal = B_TRUE;
3231 }
3232 if (cs.bits.ldw.nack_pkt_rd) {
3233 tdc_stats->nack_pkt_rd++;
3234 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3235 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3236 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3237 "==> nxge_tx_err_evnts(channel %d): "
3238 "fatal error: nack_pkt_rd", channel));
3239 txchan_fatal = B_TRUE;
3240 }
3241 if (cs.bits.ldw.conf_part_err) {
3242 tdc_stats->conf_part_err++;
3243 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3244 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3245 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3246 "==> nxge_tx_err_evnts(channel %d): "
3247 "fatal error: config_partition_err", channel));
3248 txchan_fatal = B_TRUE;
3249 }
3250 if (cs.bits.ldw.pkt_prt_err) {
3251 tdc_stats->pkt_part_err++;
3252 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3253 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3254 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3255 "==> nxge_tx_err_evnts(channel %d): "
3256 "fatal error: pkt_prt_err", channel));
3257 txchan_fatal = B_TRUE;
3258 }
3259
3260 /* Clear error injection source in case this is an injected error */
3261 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3262
3263 if (txchan_fatal) {
3264 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3265 " nxge_tx_err_evnts: "
3266 " fatal error on channel %d cs 0x%llx\n",
3267 channel, cs.value));
3268 status = nxge_txdma_fatal_err_recover(nxgep, channel,
3269 tx_ring_p);
3270 if (status == NXGE_OK) {
3271 FM_SERVICE_RESTORED(nxgep);
3272 }
3273 }
3274
3275 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3276
3277 return (status);
3278 }
3279
3280 static nxge_status_t
nxge_txdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p)3281 nxge_txdma_fatal_err_recover(
3282 p_nxge_t nxgep,
3283 uint16_t channel,
3284 p_tx_ring_t tx_ring_p)
3285 {
3286 npi_handle_t handle;
3287 npi_status_t rs = NPI_SUCCESS;
3288 p_tx_mbox_t tx_mbox_p;
3289 nxge_status_t status = NXGE_OK;
3290
3291 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3292 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3293 "Recovering from TxDMAChannel#%d error...", channel));
3294
3295 /*
3296 * Stop the dma channel waits for the stop done.
3297 * If the stop done bit is not set, then create
3298 * an error.
3299 */
3300
3301 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3302 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3303 MUTEX_ENTER(&tx_ring_p->lock);
3304 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3305 if (rs != NPI_SUCCESS) {
3306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3307 "==> nxge_txdma_fatal_err_recover (channel %d): "
3308 "stop failed ", channel));
3309 goto fail;
3310 }
3311
3312 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3313 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3314
3315 /*
3316 * Reset TXDMA channel
3317 */
3318 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3319 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3320 NPI_SUCCESS) {
3321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3322 "==> nxge_txdma_fatal_err_recover (channel %d)"
3323 " reset channel failed 0x%x", channel, rs));
3324 goto fail;
3325 }
3326
3327 /*
3328 * Reset the tail (kick) register to 0.
3329 * (Hardware will not reset it. Tx overflow fatal
3330 * error if tail is not set to 0 after reset!
3331 */
3332 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3333
3334 /* Restart TXDMA channel */
3335
3336 tx_mbox_p = NULL;
3337 if (!isLDOMguest(nxgep)) {
3338 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3339
3340 // XXX This is a problem in HIO!
3341 /*
3342 * Initialize the TXDMA channel specific FZC control
3343 * configurations. These FZC registers are pertaining
3344 * to each TX channel (i.e. logical pages).
3345 */
3346 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3347 status = nxge_init_fzc_txdma_channel(nxgep, channel,
3348 tx_ring_p, tx_mbox_p);
3349 if (status != NXGE_OK)
3350 goto fail;
3351 }
3352
3353 /*
3354 * Initialize the event masks.
3355 */
3356 tx_ring_p->tx_evmask.value = 0;
3357 status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3358 &tx_ring_p->tx_evmask);
3359 if (status != NXGE_OK)
3360 goto fail;
3361
3362 tx_ring_p->wr_index_wrap = B_FALSE;
3363 tx_ring_p->wr_index = 0;
3364 tx_ring_p->rd_index = 0;
3365
3366 /*
3367 * Load TXDMA descriptors, buffers, mailbox,
3368 * initialise the DMA channels and
3369 * enable each DMA channel.
3370 */
3371 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3372 status = nxge_enable_txdma_channel(nxgep, channel,
3373 tx_ring_p, tx_mbox_p);
3374 MUTEX_EXIT(&tx_ring_p->lock);
3375 if (status != NXGE_OK)
3376 goto fail;
3377
3378 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3379 "Recovery Successful, TxDMAChannel#%d Restored",
3380 channel));
3381 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3382
3383 return (NXGE_OK);
3384
3385 fail:
3386 MUTEX_EXIT(&tx_ring_p->lock);
3387
3388 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3389 "nxge_txdma_fatal_err_recover (channel %d): "
3390 "failed to recover this txdma channel", channel));
3391 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3392
3393 return (status);
3394 }
3395
3396 /*
3397 * nxge_tx_port_fatal_err_recover
3398 *
3399 * Attempt to recover from a fatal port error.
3400 *
3401 * Arguments:
3402 * nxgep
3403 *
3404 * Notes:
3405 * How would a guest do this?
3406 *
3407 * NPI/NXGE function calls:
3408 *
3409 * Registers accessed:
3410 *
3411 * Context:
3412 * Service domain
3413 */
3414 nxge_status_t
nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)3415 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3416 {
3417 nxge_grp_set_t *set = &nxgep->tx_set;
3418 nxge_channel_t tdc;
3419
3420 tx_ring_t *ring;
3421 tx_mbox_t *mailbox;
3422
3423 npi_handle_t handle;
3424 nxge_status_t status = NXGE_OK;
3425 npi_status_t rs;
3426
3427 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3428 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3429 "Recovering from TxPort error..."));
3430
3431 if (isLDOMguest(nxgep)) {
3432 return (NXGE_OK);
3433 }
3434
3435 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3436 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3437 "<== nxge_tx_port_fatal_err_recover: not initialized"));
3438 return (NXGE_ERROR);
3439 }
3440
3441 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3442 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3443 "<== nxge_tx_port_fatal_err_recover: "
3444 "NULL ring pointer(s)"));
3445 return (NXGE_ERROR);
3446 }
3447
3448 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3449 if ((1 << tdc) & set->owned.map) {
3450 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3451 if (ring)
3452 MUTEX_ENTER(&ring->lock);
3453 }
3454 }
3455
3456 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3457
3458 /*
3459 * Stop all the TDCs owned by us.
3460 * (The shared TDCs will have been stopped by their owners.)
3461 */
3462 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3463 if ((1 << tdc) & set->owned.map) {
3464 ring = nxgep->tx_rings->rings[tdc];
3465 if (ring) {
3466 rs = npi_txdma_channel_control
3467 (handle, TXDMA_STOP, tdc);
3468 if (rs != NPI_SUCCESS) {
3469 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3470 "nxge_tx_port_fatal_err_recover "
3471 "(channel %d): stop failed ", tdc));
3472 goto fail;
3473 }
3474 }
3475 }
3476 }
3477
3478 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3479
3480 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3481 if ((1 << tdc) & set->owned.map) {
3482 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3483 if (ring) {
3484 (void) nxge_txdma_reclaim(nxgep, ring, 0);
3485 }
3486 }
3487 }
3488
3489 /*
3490 * Reset all the TDCs.
3491 */
3492 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3493
3494 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3495 if ((1 << tdc) & set->owned.map) {
3496 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3497 if (ring) {
3498 if ((rs = npi_txdma_channel_control
3499 (handle, TXDMA_RESET, tdc))
3500 != NPI_SUCCESS) {
3501 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3502 "nxge_tx_port_fatal_err_recover "
3503 "(channel %d) reset channel "
3504 "failed 0x%x", tdc, rs));
3505 goto fail;
3506 }
3507 }
3508 /*
3509 * Reset the tail (kick) register to 0.
3510 * (Hardware will not reset it. Tx overflow fatal
3511 * error if tail is not set to 0 after reset!
3512 */
3513 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3514 }
3515 }
3516
3517 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3518
3519 /* Restart all the TDCs */
3520 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3521 if ((1 << tdc) & set->owned.map) {
3522 ring = nxgep->tx_rings->rings[tdc];
3523 if (ring) {
3524 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3525 status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3526 ring, mailbox);
3527 ring->tx_evmask.value = 0;
3528 /*
3529 * Initialize the event masks.
3530 */
3531 status = nxge_init_txdma_channel_event_mask
3532 (nxgep, tdc, &ring->tx_evmask);
3533
3534 ring->wr_index_wrap = B_FALSE;
3535 ring->wr_index = 0;
3536 ring->rd_index = 0;
3537
3538 if (status != NXGE_OK)
3539 goto fail;
3540 if (status != NXGE_OK)
3541 goto fail;
3542 }
3543 }
3544 }
3545
3546 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3547
3548 /* Re-enable all the TDCs */
3549 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3550 if ((1 << tdc) & set->owned.map) {
3551 ring = nxgep->tx_rings->rings[tdc];
3552 if (ring) {
3553 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3554 status = nxge_enable_txdma_channel(nxgep, tdc,
3555 ring, mailbox);
3556 if (status != NXGE_OK)
3557 goto fail;
3558 }
3559 }
3560 }
3561
3562 /*
3563 * Unlock all the TDCs.
3564 */
3565 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3566 if ((1 << tdc) & set->owned.map) {
3567 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3568 if (ring)
3569 MUTEX_EXIT(&ring->lock);
3570 }
3571 }
3572
3573 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3574 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3575
3576 return (NXGE_OK);
3577
3578 fail:
3579 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3580 if ((1 << tdc) & set->owned.map) {
3581 ring = nxgep->tx_rings->rings[tdc];
3582 if (ring)
3583 MUTEX_EXIT(&ring->lock);
3584 }
3585 }
3586
3587 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3588 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3589
3590 return (status);
3591 }
3592
3593 /*
3594 * nxge_txdma_inject_err
3595 *
3596 * Inject an error into a TDC.
3597 *
3598 * Arguments:
3599 * nxgep
3600 * err_id The error to inject.
3601 * chan The channel to inject into.
3602 *
3603 * Notes:
3604 * This is called from nxge_main.c:nxge_err_inject()
3605 * Has this ioctl ever been used?
3606 *
3607 * NPI/NXGE function calls:
3608 * npi_txdma_inj_par_error_get()
3609 * npi_txdma_inj_par_error_set()
3610 *
3611 * Registers accessed:
3612 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3613 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3614 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3615 *
3616 * Context:
3617 * Service domain
3618 */
3619 void
nxge_txdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)3620 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3621 {
3622 tdmc_intr_dbg_t tdi;
3623 tdmc_inj_par_err_t par_err;
3624 uint32_t value;
3625 npi_handle_t handle;
3626
3627 switch (err_id) {
3628
3629 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3630 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3631 /* Clear error injection source for parity error */
3632 (void) npi_txdma_inj_par_error_get(handle, &value);
3633 par_err.value = value;
3634 par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3635 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3636
3637 par_err.bits.ldw.inject_parity_error = (1 << chan);
3638 (void) npi_txdma_inj_par_error_get(handle, &value);
3639 par_err.value = value;
3640 par_err.bits.ldw.inject_parity_error |= (1 << chan);
3641 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3642 (unsigned long long)par_err.value);
3643 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3644 break;
3645
3646 case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3647 case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3648 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3649 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3650 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3651 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3652 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3653 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3654 chan, &tdi.value);
3655 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3656 tdi.bits.ldw.pref_buf_par_err = 1;
3657 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3658 tdi.bits.ldw.mbox_err = 1;
3659 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3660 tdi.bits.ldw.nack_pref = 1;
3661 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3662 tdi.bits.ldw.nack_pkt_rd = 1;
3663 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3664 tdi.bits.ldw.pkt_size_err = 1;
3665 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3666 tdi.bits.ldw.tx_ring_oflow = 1;
3667 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3668 tdi.bits.ldw.conf_part_err = 1;
3669 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3670 tdi.bits.ldw.pkt_part_err = 1;
3671 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3672 tdi.value);
3673 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3674 chan, tdi.value);
3675
3676 break;
3677 }
3678 }
3679