1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_txdma.h> 30 #include <sys/llc1.h> 31 32 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 33 uint32_t nxge_tx_minfree = 32; 34 uint32_t nxge_tx_intr_thres = 0; 35 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 36 uint32_t nxge_tx_tiny_pack = 1; 37 uint32_t nxge_tx_use_bcopy = 1; 38 39 extern uint32_t nxge_tx_ring_size; 40 extern uint32_t nxge_bcopy_thresh; 41 extern uint32_t nxge_dvma_thresh; 42 extern uint32_t nxge_dma_stream_thresh; 43 extern dma_method_t nxge_force_dma; 44 45 /* Device register access attributes for PIO. */ 46 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 47 /* Device descriptor access attributes for DMA. */ 48 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 49 /* Device buffer access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 51 extern ddi_dma_attr_t nxge_desc_dma_attr; 52 extern ddi_dma_attr_t nxge_tx_dma_attr; 53 54 extern int nxge_serial_tx(mblk_t *mp, void *arg); 55 56 static nxge_status_t nxge_map_txdma(p_nxge_t); 57 static void nxge_unmap_txdma(p_nxge_t); 58 59 static nxge_status_t nxge_txdma_hw_start(p_nxge_t); 60 static void nxge_txdma_hw_stop(p_nxge_t); 61 62 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 63 p_nxge_dma_common_t *, p_tx_ring_t *, 64 uint32_t, p_nxge_dma_common_t *, 65 p_tx_mbox_t *); 66 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t, 67 p_tx_ring_t, p_tx_mbox_t); 68 69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 72 73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_tx_ring_t, 75 p_tx_mbox_t *); 76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 77 p_tx_ring_t, p_tx_mbox_t); 78 79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 80 p_tx_ring_t, p_tx_mbox_t); 81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t, 82 p_tx_ring_t, p_tx_mbox_t); 83 84 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 85 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 86 p_nxge_ldv_t, tx_cs_t); 87 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 88 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 89 uint16_t, p_tx_ring_t); 90 91 nxge_status_t 92 nxge_init_txdma_channels(p_nxge_t nxgep) 93 { 94 nxge_status_t status = NXGE_OK; 95 96 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_init_txdma_channels")); 97 98 status = nxge_map_txdma(nxgep); 99 if (status != NXGE_OK) { 100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 101 "<== nxge_init_txdma_channels: status 0x%x", status)); 102 return (status); 103 } 104 105 status = nxge_txdma_hw_start(nxgep); 106 if (status != NXGE_OK) { 107 nxge_unmap_txdma(nxgep); 108 return (status); 109 } 110 111 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 112 "<== nxge_init_txdma_channels: status 0x%x", status)); 113 114 return (NXGE_OK); 115 } 116 117 void 118 nxge_uninit_txdma_channels(p_nxge_t nxgep) 119 { 120 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channels")); 121 122 nxge_txdma_hw_stop(nxgep); 123 nxge_unmap_txdma(nxgep); 124 125 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 126 "<== nxge_uinit_txdma_channels")); 127 } 128 129 void 130 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 131 uint32_t entries, uint32_t size) 132 { 133 size_t tsize; 134 *dest_p = *src_p; 135 tsize = size * entries; 136 dest_p->alength = tsize; 137 dest_p->nblocks = entries; 138 dest_p->block_size = size; 139 dest_p->offset += tsize; 140 141 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 142 src_p->alength -= tsize; 143 src_p->dma_cookie.dmac_laddress += tsize; 144 src_p->dma_cookie.dmac_size -= tsize; 145 } 146 147 nxge_status_t 148 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 149 { 150 npi_status_t rs = NPI_SUCCESS; 151 nxge_status_t status = NXGE_OK; 152 npi_handle_t handle; 153 154 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 155 156 handle = NXGE_DEV_NPI_HANDLE(nxgep); 157 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 158 rs = npi_txdma_channel_reset(handle, channel); 159 } else { 160 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 161 channel); 162 } 163 164 if (rs != NPI_SUCCESS) { 165 status = NXGE_ERROR | rs; 166 } 167 168 /* 169 * Reset the tail (kick) register to 0. 170 * (Hardware will not reset it. Tx overflow fatal 171 * error if tail is not set to 0 after reset! 172 */ 173 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 174 175 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 176 return (status); 177 } 178 179 nxge_status_t 180 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 181 p_tx_dma_ent_msk_t mask_p) 182 { 183 npi_handle_t handle; 184 npi_status_t rs = NPI_SUCCESS; 185 nxge_status_t status = NXGE_OK; 186 187 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 188 "<== nxge_init_txdma_channel_event_mask")); 189 190 handle = NXGE_DEV_NPI_HANDLE(nxgep); 191 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 192 if (rs != NPI_SUCCESS) { 193 status = NXGE_ERROR | rs; 194 } 195 196 return (status); 197 } 198 199 nxge_status_t 200 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 201 uint64_t reg_data) 202 { 203 npi_handle_t handle; 204 npi_status_t rs = NPI_SUCCESS; 205 nxge_status_t status = NXGE_OK; 206 207 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 208 "<== nxge_init_txdma_channel_cntl_stat")); 209 210 handle = NXGE_DEV_NPI_HANDLE(nxgep); 211 rs = npi_txdma_control_status(handle, OP_SET, channel, 212 (p_tx_cs_t)®_data); 213 214 if (rs != NPI_SUCCESS) { 215 status = NXGE_ERROR | rs; 216 } 217 218 return (status); 219 } 220 221 nxge_status_t 222 nxge_enable_txdma_channel(p_nxge_t nxgep, 223 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 224 { 225 npi_handle_t handle; 226 npi_status_t rs = NPI_SUCCESS; 227 nxge_status_t status = NXGE_OK; 228 229 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 230 231 handle = NXGE_DEV_NPI_HANDLE(nxgep); 232 /* 233 * Use configuration data composed at init time. 234 * Write to hardware the transmit ring configurations. 235 */ 236 rs = npi_txdma_ring_config(handle, OP_SET, channel, 237 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 238 239 if (rs != NPI_SUCCESS) { 240 return (NXGE_ERROR | rs); 241 } 242 243 /* Write to hardware the mailbox */ 244 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 245 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 246 247 if (rs != NPI_SUCCESS) { 248 return (NXGE_ERROR | rs); 249 } 250 251 /* Start the DMA engine. */ 252 rs = npi_txdma_channel_init_enable(handle, channel); 253 254 if (rs != NPI_SUCCESS) { 255 return (NXGE_ERROR | rs); 256 } 257 258 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 259 260 return (status); 261 } 262 263 void 264 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 265 boolean_t l4_cksum, int pkt_len, uint8_t npads, 266 p_tx_pkt_hdr_all_t pkthdrp) 267 { 268 p_tx_pkt_header_t hdrp; 269 p_mblk_t nmp; 270 uint64_t tmp; 271 size_t mblk_len; 272 size_t iph_len; 273 size_t hdrs_size; 274 uint8_t hdrs_buf[sizeof (struct ether_header) + 275 64 + sizeof (uint32_t)]; 276 uint8_t *ip_buf; 277 uint16_t eth_type; 278 uint8_t ipproto; 279 boolean_t is_vlan = B_FALSE; 280 size_t eth_hdr_size; 281 282 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 283 284 /* 285 * Caller should zero out the headers first. 286 */ 287 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 288 289 if (fill_len) { 290 NXGE_DEBUG_MSG((NULL, TX_CTL, 291 "==> nxge_fill_tx_hdr: pkt_len %d " 292 "npads %d", pkt_len, npads)); 293 tmp = (uint64_t)pkt_len; 294 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 295 goto fill_tx_header_done; 296 } 297 298 tmp = (uint64_t)npads; 299 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT); 300 301 /* 302 * mp is the original data packet (does not include the 303 * Neptune transmit header). 304 */ 305 nmp = mp; 306 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 307 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 308 "mp $%p b_rptr $%p len %d", 309 mp, nmp->b_rptr, mblk_len)); 310 ip_buf = NULL; 311 bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header)); 312 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 313 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 314 "ether type 0x%x", eth_type, hdrp->value)); 315 316 if (eth_type < ETHERMTU) { 317 tmp = 1ull; 318 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 319 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 320 "value 0x%llx", hdrp->value)); 321 if (*(hdrs_buf + sizeof (struct ether_header)) 322 == LLC_SNAP_SAP) { 323 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 324 sizeof (struct ether_header) + 6))); 325 NXGE_DEBUG_MSG((NULL, TX_CTL, 326 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 327 eth_type)); 328 } else { 329 goto fill_tx_header_done; 330 } 331 } else if (eth_type == VLAN_ETHERTYPE) { 332 tmp = 1ull; 333 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 334 335 eth_type = ntohs(((struct ether_vlan_header *) 336 hdrs_buf)->ether_type); 337 is_vlan = B_TRUE; 338 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 339 "value 0x%llx", hdrp->value)); 340 } 341 342 if (!is_vlan) { 343 eth_hdr_size = sizeof (struct ether_header); 344 } else { 345 eth_hdr_size = sizeof (struct ether_vlan_header); 346 } 347 348 switch (eth_type) { 349 case ETHERTYPE_IP: 350 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 351 ip_buf = nmp->b_rptr + eth_hdr_size; 352 mblk_len -= eth_hdr_size; 353 iph_len = ((*ip_buf) & 0x0f); 354 if (mblk_len > (iph_len + sizeof (uint32_t))) { 355 ip_buf = nmp->b_rptr; 356 ip_buf += eth_hdr_size; 357 } else { 358 ip_buf = NULL; 359 } 360 361 } 362 if (ip_buf == NULL) { 363 hdrs_size = 0; 364 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 365 while ((nmp) && (hdrs_size < 366 sizeof (hdrs_buf))) { 367 mblk_len = (size_t)nmp->b_wptr - 368 (size_t)nmp->b_rptr; 369 if (mblk_len >= 370 (sizeof (hdrs_buf) - hdrs_size)) 371 mblk_len = sizeof (hdrs_buf) - 372 hdrs_size; 373 bcopy(nmp->b_rptr, 374 &hdrs_buf[hdrs_size], mblk_len); 375 hdrs_size += mblk_len; 376 nmp = nmp->b_cont; 377 } 378 ip_buf = hdrs_buf; 379 ip_buf += eth_hdr_size; 380 iph_len = ((*ip_buf) & 0x0f); 381 } 382 383 ipproto = ip_buf[9]; 384 385 tmp = (uint64_t)iph_len; 386 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 387 tmp = (uint64_t)(eth_hdr_size >> 1); 388 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 389 390 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 391 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 392 "tmp 0x%x", 393 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 394 ipproto, tmp)); 395 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 396 "value 0x%llx", hdrp->value)); 397 398 break; 399 400 case ETHERTYPE_IPV6: 401 hdrs_size = 0; 402 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 403 while ((nmp) && (hdrs_size < 404 sizeof (hdrs_buf))) { 405 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 406 if (mblk_len >= 407 (sizeof (hdrs_buf) - hdrs_size)) 408 mblk_len = sizeof (hdrs_buf) - 409 hdrs_size; 410 bcopy(nmp->b_rptr, 411 &hdrs_buf[hdrs_size], mblk_len); 412 hdrs_size += mblk_len; 413 nmp = nmp->b_cont; 414 } 415 ip_buf = hdrs_buf; 416 ip_buf += eth_hdr_size; 417 418 tmp = 1ull; 419 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 420 421 tmp = (eth_hdr_size >> 1); 422 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 423 424 /* byte 6 is the next header protocol */ 425 ipproto = ip_buf[6]; 426 427 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 428 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 429 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 430 ipproto)); 431 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 432 "value 0x%llx", hdrp->value)); 433 434 break; 435 436 default: 437 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 438 goto fill_tx_header_done; 439 } 440 441 switch (ipproto) { 442 case IPPROTO_TCP: 443 NXGE_DEBUG_MSG((NULL, TX_CTL, 444 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 445 if (l4_cksum) { 446 tmp = 1ull; 447 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 448 NXGE_DEBUG_MSG((NULL, TX_CTL, 449 "==> nxge_tx_pkt_hdr_init: TCP CKSUM" 450 "value 0x%llx", hdrp->value)); 451 } 452 453 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 454 "value 0x%llx", hdrp->value)); 455 break; 456 457 case IPPROTO_UDP: 458 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 459 if (l4_cksum) { 460 tmp = 0x2ull; 461 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 462 } 463 NXGE_DEBUG_MSG((NULL, TX_CTL, 464 "==> nxge_tx_pkt_hdr_init: UDP" 465 "value 0x%llx", hdrp->value)); 466 break; 467 468 default: 469 goto fill_tx_header_done; 470 } 471 472 fill_tx_header_done: 473 NXGE_DEBUG_MSG((NULL, TX_CTL, 474 "==> nxge_fill_tx_hdr: pkt_len %d " 475 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 476 477 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 478 } 479 480 /*ARGSUSED*/ 481 p_mblk_t 482 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 483 { 484 p_mblk_t newmp = NULL; 485 486 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 487 NXGE_DEBUG_MSG((NULL, TX_CTL, 488 "<== nxge_tx_pkt_header_reserve: allocb failed")); 489 return (NULL); 490 } 491 492 NXGE_DEBUG_MSG((NULL, TX_CTL, 493 "==> nxge_tx_pkt_header_reserve: get new mp")); 494 DB_TYPE(newmp) = M_DATA; 495 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 496 linkb(newmp, mp); 497 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 498 499 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 500 "b_rptr $%p b_wptr $%p", 501 newmp->b_rptr, newmp->b_wptr)); 502 503 NXGE_DEBUG_MSG((NULL, TX_CTL, 504 "<== nxge_tx_pkt_header_reserve: use new mp")); 505 506 return (newmp); 507 } 508 509 int 510 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 511 { 512 uint_t nmblks; 513 ssize_t len; 514 uint_t pkt_len; 515 p_mblk_t nmp, bmp, tmp; 516 uint8_t *b_wptr; 517 518 NXGE_DEBUG_MSG((NULL, TX_CTL, 519 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 520 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 521 522 nmp = mp; 523 bmp = mp; 524 nmblks = 0; 525 pkt_len = 0; 526 *tot_xfer_len_p = 0; 527 528 while (nmp) { 529 len = MBLKL(nmp); 530 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 531 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 532 len, pkt_len, nmblks, 533 *tot_xfer_len_p)); 534 535 if (len <= 0) { 536 bmp = nmp; 537 nmp = nmp->b_cont; 538 NXGE_DEBUG_MSG((NULL, TX_CTL, 539 "==> nxge_tx_pkt_nmblocks: " 540 "len (0) pkt_len %d nmblks %d", 541 pkt_len, nmblks)); 542 continue; 543 } 544 545 *tot_xfer_len_p += len; 546 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 547 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 548 len, pkt_len, nmblks, 549 *tot_xfer_len_p)); 550 551 if (len < nxge_bcopy_thresh) { 552 NXGE_DEBUG_MSG((NULL, TX_CTL, 553 "==> nxge_tx_pkt_nmblocks: " 554 "len %d (< thresh) pkt_len %d nmblks %d", 555 len, pkt_len, nmblks)); 556 if (pkt_len == 0) 557 nmblks++; 558 pkt_len += len; 559 if (pkt_len >= nxge_bcopy_thresh) { 560 pkt_len = 0; 561 len = 0; 562 nmp = bmp; 563 } 564 } else { 565 NXGE_DEBUG_MSG((NULL, TX_CTL, 566 "==> nxge_tx_pkt_nmblocks: " 567 "len %d (> thresh) pkt_len %d nmblks %d", 568 len, pkt_len, nmblks)); 569 pkt_len = 0; 570 nmblks++; 571 /* 572 * Hardware limits the transfer length to 4K. 573 * If len is more than 4K, we need to break 574 * it up to at most 2 more blocks. 575 */ 576 if (len > TX_MAX_TRANSFER_LENGTH) { 577 uint32_t nsegs; 578 579 NXGE_DEBUG_MSG((NULL, TX_CTL, 580 "==> nxge_tx_pkt_nmblocks: " 581 "len %d pkt_len %d nmblks %d nsegs %d", 582 len, pkt_len, nmblks, nsegs)); 583 nsegs = 1; 584 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 585 ++nsegs; 586 } 587 do { 588 b_wptr = nmp->b_rptr + 589 TX_MAX_TRANSFER_LENGTH; 590 nmp->b_wptr = b_wptr; 591 if ((tmp = dupb(nmp)) == NULL) { 592 return (0); 593 } 594 tmp->b_rptr = b_wptr; 595 tmp->b_wptr = nmp->b_wptr; 596 tmp->b_cont = nmp->b_cont; 597 nmp->b_cont = tmp; 598 nmblks++; 599 if (--nsegs) { 600 nmp = tmp; 601 } 602 } while (nsegs); 603 nmp = tmp; 604 } 605 } 606 607 /* 608 * Hardware limits the transmit gather pointers to 15. 609 */ 610 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 611 TX_MAX_GATHER_POINTERS) { 612 NXGE_DEBUG_MSG((NULL, TX_CTL, 613 "==> nxge_tx_pkt_nmblocks: pull msg - " 614 "len %d pkt_len %d nmblks %d", 615 len, pkt_len, nmblks)); 616 /* Pull all message blocks from b_cont */ 617 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 618 return (0); 619 } 620 freemsg(nmp->b_cont); 621 nmp->b_cont = tmp; 622 pkt_len = 0; 623 } 624 bmp = nmp; 625 nmp = nmp->b_cont; 626 } 627 628 NXGE_DEBUG_MSG((NULL, TX_CTL, 629 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 630 "nmblks %d len %d tot_xfer_len %d", 631 mp->b_rptr, mp->b_wptr, nmblks, 632 MBLKL(mp), *tot_xfer_len_p)); 633 634 return (nmblks); 635 } 636 637 boolean_t 638 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 639 { 640 boolean_t status = B_TRUE; 641 p_nxge_dma_common_t tx_desc_dma_p; 642 nxge_dma_common_t desc_area; 643 p_tx_desc_t tx_desc_ring_vp; 644 p_tx_desc_t tx_desc_p; 645 p_tx_desc_t tx_desc_pp; 646 tx_desc_t r_tx_desc; 647 p_tx_msg_t tx_msg_ring; 648 p_tx_msg_t tx_msg_p; 649 npi_handle_t handle; 650 tx_ring_hdl_t tx_head; 651 uint32_t pkt_len; 652 uint_t tx_rd_index; 653 uint16_t head_index, tail_index; 654 uint8_t tdc; 655 boolean_t head_wrap, tail_wrap; 656 p_nxge_tx_ring_stats_t tdc_stats; 657 int rc; 658 659 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 660 661 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 662 (nmblks != 0)); 663 NXGE_DEBUG_MSG((nxgep, TX_CTL, 664 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 665 tx_ring_p->descs_pending, nxge_reclaim_pending, 666 nmblks)); 667 if (!status) { 668 tx_desc_dma_p = &tx_ring_p->tdc_desc; 669 desc_area = tx_ring_p->tdc_desc; 670 handle = NXGE_DEV_NPI_HANDLE(nxgep); 671 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 672 tx_desc_ring_vp = 673 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 674 tx_rd_index = tx_ring_p->rd_index; 675 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 676 tx_msg_ring = tx_ring_p->tx_msg_ring; 677 tx_msg_p = &tx_msg_ring[tx_rd_index]; 678 tdc = tx_ring_p->tdc; 679 tdc_stats = tx_ring_p->tdc_stats; 680 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 681 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 682 } 683 684 tail_index = tx_ring_p->wr_index; 685 tail_wrap = tx_ring_p->wr_index_wrap; 686 687 NXGE_DEBUG_MSG((nxgep, TX_CTL, 688 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 689 "tail_index %d tail_wrap %d " 690 "tx_desc_p $%p ($%p) ", 691 tdc, tx_rd_index, tail_index, tail_wrap, 692 tx_desc_p, (*(uint64_t *)tx_desc_p))); 693 /* 694 * Read the hardware maintained transmit head 695 * and wrap around bit. 696 */ 697 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 698 head_index = tx_head.bits.ldw.head; 699 head_wrap = tx_head.bits.ldw.wrap; 700 NXGE_DEBUG_MSG((nxgep, TX_CTL, 701 "==> nxge_txdma_reclaim: " 702 "tx_rd_index %d tail %d tail_wrap %d " 703 "head %d wrap %d", 704 tx_rd_index, tail_index, tail_wrap, 705 head_index, head_wrap)); 706 707 if (head_index == tail_index) { 708 if (TXDMA_RING_EMPTY(head_index, head_wrap, 709 tail_index, tail_wrap) && 710 (head_index == tx_rd_index)) { 711 NXGE_DEBUG_MSG((nxgep, TX_CTL, 712 "==> nxge_txdma_reclaim: EMPTY")); 713 return (B_TRUE); 714 } 715 716 NXGE_DEBUG_MSG((nxgep, TX_CTL, 717 "==> nxge_txdma_reclaim: Checking " 718 "if ring full")); 719 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 720 tail_wrap)) { 721 NXGE_DEBUG_MSG((nxgep, TX_CTL, 722 "==> nxge_txdma_reclaim: full")); 723 return (B_FALSE); 724 } 725 } 726 727 NXGE_DEBUG_MSG((nxgep, TX_CTL, 728 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 729 730 tx_desc_pp = &r_tx_desc; 731 while ((tx_rd_index != head_index) && 732 (tx_ring_p->descs_pending != 0)) { 733 734 NXGE_DEBUG_MSG((nxgep, TX_CTL, 735 "==> nxge_txdma_reclaim: Checking if pending")); 736 737 NXGE_DEBUG_MSG((nxgep, TX_CTL, 738 "==> nxge_txdma_reclaim: " 739 "descs_pending %d ", 740 tx_ring_p->descs_pending)); 741 742 NXGE_DEBUG_MSG((nxgep, TX_CTL, 743 "==> nxge_txdma_reclaim: " 744 "(tx_rd_index %d head_index %d " 745 "(tx_desc_p $%p)", 746 tx_rd_index, head_index, 747 tx_desc_p)); 748 749 tx_desc_pp->value = tx_desc_p->value; 750 NXGE_DEBUG_MSG((nxgep, TX_CTL, 751 "==> nxge_txdma_reclaim: " 752 "(tx_rd_index %d head_index %d " 753 "tx_desc_p $%p (desc value 0x%llx) ", 754 tx_rd_index, head_index, 755 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 756 757 NXGE_DEBUG_MSG((nxgep, TX_CTL, 758 "==> nxge_txdma_reclaim: dump desc:")); 759 760 pkt_len = tx_desc_pp->bits.hdw.tr_len; 761 tdc_stats->obytes += pkt_len; 762 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 763 NXGE_DEBUG_MSG((nxgep, TX_CTL, 764 "==> nxge_txdma_reclaim: pkt_len %d " 765 "tdc channel %d opackets %d", 766 pkt_len, 767 tdc, 768 tdc_stats->opackets)); 769 770 if (tx_msg_p->flags.dma_type == USE_DVMA) { 771 NXGE_DEBUG_MSG((nxgep, TX_CTL, 772 "tx_desc_p = $%p " 773 "tx_desc_pp = $%p " 774 "index = %d", 775 tx_desc_p, 776 tx_desc_pp, 777 tx_ring_p->rd_index)); 778 (void) dvma_unload(tx_msg_p->dvma_handle, 779 0, -1); 780 tx_msg_p->dvma_handle = NULL; 781 if (tx_ring_p->dvma_wr_index == 782 tx_ring_p->dvma_wrap_mask) { 783 tx_ring_p->dvma_wr_index = 0; 784 } else { 785 tx_ring_p->dvma_wr_index++; 786 } 787 tx_ring_p->dvma_pending--; 788 } else if (tx_msg_p->flags.dma_type == 789 USE_DMA) { 790 NXGE_DEBUG_MSG((nxgep, TX_CTL, 791 "==> nxge_txdma_reclaim: " 792 "USE DMA")); 793 if (rc = ddi_dma_unbind_handle 794 (tx_msg_p->dma_handle)) { 795 cmn_err(CE_WARN, "!nxge_reclaim: " 796 "ddi_dma_unbind_handle " 797 "failed. status %d", rc); 798 } 799 } 800 NXGE_DEBUG_MSG((nxgep, TX_CTL, 801 "==> nxge_txdma_reclaim: count packets")); 802 /* 803 * count a chained packet only once. 804 */ 805 if (tx_msg_p->tx_message != NULL) { 806 freemsg(tx_msg_p->tx_message); 807 tx_msg_p->tx_message = NULL; 808 } 809 810 tx_msg_p->flags.dma_type = USE_NONE; 811 tx_rd_index = tx_ring_p->rd_index; 812 tx_rd_index = (tx_rd_index + 1) & 813 tx_ring_p->tx_wrap_mask; 814 tx_ring_p->rd_index = tx_rd_index; 815 tx_ring_p->descs_pending--; 816 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 817 tx_msg_p = &tx_msg_ring[tx_rd_index]; 818 } 819 820 status = (nmblks <= (tx_ring_p->tx_ring_size - 821 tx_ring_p->descs_pending - 822 TX_FULL_MARK)); 823 if (status) { 824 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 825 } 826 } else { 827 status = (nmblks <= 828 (tx_ring_p->tx_ring_size - 829 tx_ring_p->descs_pending - 830 TX_FULL_MARK)); 831 } 832 833 NXGE_DEBUG_MSG((nxgep, TX_CTL, 834 "<== nxge_txdma_reclaim status = 0x%08x", status)); 835 836 return (status); 837 } 838 839 uint_t 840 nxge_tx_intr(void *arg1, void *arg2) 841 { 842 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 843 p_nxge_t nxgep = (p_nxge_t)arg2; 844 p_nxge_ldg_t ldgp; 845 uint8_t channel; 846 uint32_t vindex; 847 npi_handle_t handle; 848 tx_cs_t cs; 849 p_tx_ring_t *tx_rings; 850 p_tx_ring_t tx_ring_p; 851 npi_status_t rs = NPI_SUCCESS; 852 uint_t serviced = DDI_INTR_UNCLAIMED; 853 nxge_status_t status = NXGE_OK; 854 855 if (ldvp == NULL) { 856 NXGE_DEBUG_MSG((NULL, INT_CTL, 857 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 858 nxgep, ldvp)); 859 return (DDI_INTR_UNCLAIMED); 860 } 861 862 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 863 nxgep = ldvp->nxgep; 864 } 865 NXGE_DEBUG_MSG((nxgep, INT_CTL, 866 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 867 nxgep, ldvp)); 868 /* 869 * This interrupt handler is for a specific 870 * transmit dma channel. 871 */ 872 handle = NXGE_DEV_NPI_HANDLE(nxgep); 873 /* Get the control and status for this channel. */ 874 channel = ldvp->channel; 875 ldgp = ldvp->ldgp; 876 NXGE_DEBUG_MSG((nxgep, INT_CTL, 877 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 878 "channel %d", 879 nxgep, ldvp, channel)); 880 881 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 882 vindex = ldvp->vdma_index; 883 NXGE_DEBUG_MSG((nxgep, INT_CTL, 884 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 885 channel, vindex, rs)); 886 if (!rs && cs.bits.ldw.mk) { 887 NXGE_DEBUG_MSG((nxgep, INT_CTL, 888 "==> nxge_tx_intr:channel %d ring index %d " 889 "status 0x%08x (mk bit set)", 890 channel, vindex, rs)); 891 tx_rings = nxgep->tx_rings->rings; 892 tx_ring_p = tx_rings[vindex]; 893 NXGE_DEBUG_MSG((nxgep, INT_CTL, 894 "==> nxge_tx_intr:channel %d ring index %d " 895 "status 0x%08x (mk bit set, calling reclaim)", 896 channel, vindex, rs)); 897 898 MUTEX_ENTER(&tx_ring_p->lock); 899 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 900 MUTEX_EXIT(&tx_ring_p->lock); 901 mac_tx_update(nxgep->mach); 902 } 903 904 /* 905 * Process other transmit control and status. 906 * Check the ldv state. 907 */ 908 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 909 /* 910 * Rearm this logical group if this is a single device 911 * group. 912 */ 913 if (ldgp->nldvs == 1) { 914 NXGE_DEBUG_MSG((nxgep, INT_CTL, 915 "==> nxge_tx_intr: rearm")); 916 if (status == NXGE_OK) { 917 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 918 B_TRUE, ldgp->ldg_timer); 919 } 920 } 921 922 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 923 serviced = DDI_INTR_CLAIMED; 924 return (serviced); 925 } 926 927 void 928 nxge_txdma_stop(p_nxge_t nxgep) 929 { 930 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 931 932 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 933 934 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 935 } 936 937 void 938 nxge_txdma_stop_start(p_nxge_t nxgep) 939 { 940 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 941 942 (void) nxge_txdma_stop(nxgep); 943 944 (void) nxge_fixup_txdma_rings(nxgep); 945 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 946 (void) nxge_tx_mac_enable(nxgep); 947 (void) nxge_txdma_hw_kick(nxgep); 948 949 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 950 } 951 952 nxge_status_t 953 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 954 { 955 int i, ndmas; 956 uint16_t channel; 957 p_tx_rings_t tx_rings; 958 p_tx_ring_t *tx_desc_rings; 959 npi_handle_t handle; 960 npi_status_t rs = NPI_SUCCESS; 961 nxge_status_t status = NXGE_OK; 962 963 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 964 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 965 966 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 967 NXGE_DEBUG_MSG((nxgep, TX_CTL, 968 "<== nxge_txdma_mode: not initialized")); 969 return (NXGE_ERROR); 970 } 971 972 tx_rings = nxgep->tx_rings; 973 if (tx_rings == NULL) { 974 NXGE_DEBUG_MSG((nxgep, TX_CTL, 975 "<== nxge_txdma_hw_mode: NULL global ring pointer")); 976 return (NXGE_ERROR); 977 } 978 979 tx_desc_rings = tx_rings->rings; 980 if (tx_desc_rings == NULL) { 981 NXGE_DEBUG_MSG((nxgep, TX_CTL, 982 "<== nxge_txdma_hw_mode: NULL rings pointer")); 983 return (NXGE_ERROR); 984 } 985 986 ndmas = tx_rings->ndmas; 987 if (!ndmas) { 988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 989 "<== nxge_txdma_hw_mode: no dma channel allocated")); 990 return (NXGE_ERROR); 991 } 992 993 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_mode: " 994 "tx_rings $%p tx_desc_rings $%p ndmas %d", 995 tx_rings, tx_desc_rings, ndmas)); 996 997 handle = NXGE_DEV_NPI_HANDLE(nxgep); 998 for (i = 0; i < ndmas; i++) { 999 if (tx_desc_rings[i] == NULL) { 1000 continue; 1001 } 1002 channel = tx_desc_rings[i]->tdc; 1003 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1004 "==> nxge_txdma_hw_mode: channel %d", channel)); 1005 if (enable) { 1006 rs = npi_txdma_channel_enable(handle, channel); 1007 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1008 "==> nxge_txdma_hw_mode: channel %d (enable) " 1009 "rs 0x%x", channel, rs)); 1010 } else { 1011 /* 1012 * Stop the dma channel and waits for the stop done. 1013 * If the stop done bit is not set, then force 1014 * an error so TXC will stop. 1015 * All channels bound to this port need to be stopped 1016 * and reset after injecting an interrupt error. 1017 */ 1018 rs = npi_txdma_channel_disable(handle, channel); 1019 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1020 "==> nxge_txdma_hw_mode: channel %d (disable) " 1021 "rs 0x%x", channel, rs)); 1022 { 1023 tdmc_intr_dbg_t intr_dbg; 1024 1025 if (rs != NPI_SUCCESS) { 1026 /* Inject any error */ 1027 intr_dbg.value = 0; 1028 intr_dbg.bits.ldw.nack_pref = 1; 1029 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1030 "==> nxge_txdma_hw_mode: " 1031 "channel %d (stop failed 0x%x) " 1032 "(inject err)", rs, channel)); 1033 (void) npi_txdma_inj_int_error_set( 1034 handle, channel, &intr_dbg); 1035 rs = npi_txdma_channel_disable(handle, 1036 channel); 1037 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1038 "==> nxge_txdma_hw_mode: " 1039 "channel %d (stop again 0x%x) " 1040 "(after inject err)", 1041 rs, channel)); 1042 } 1043 } 1044 } 1045 } 1046 1047 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1048 1049 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1050 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1051 1052 return (status); 1053 } 1054 1055 void 1056 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1057 { 1058 npi_handle_t handle; 1059 1060 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1061 "==> nxge_txdma_enable_channel: channel %d", channel)); 1062 1063 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1064 /* enable the transmit dma channels */ 1065 (void) npi_txdma_channel_enable(handle, channel); 1066 1067 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1068 } 1069 1070 void 1071 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1072 { 1073 npi_handle_t handle; 1074 1075 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1076 "==> nxge_txdma_disable_channel: channel %d", channel)); 1077 1078 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1079 /* stop the transmit dma channels */ 1080 (void) npi_txdma_channel_disable(handle, channel); 1081 1082 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1083 } 1084 1085 int 1086 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1087 { 1088 npi_handle_t handle; 1089 tdmc_intr_dbg_t intr_dbg; 1090 int status; 1091 npi_status_t rs = NPI_SUCCESS; 1092 1093 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1094 /* 1095 * Stop the dma channel waits for the stop done. 1096 * If the stop done bit is not set, then create 1097 * an error. 1098 */ 1099 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1100 rs = npi_txdma_channel_disable(handle, channel); 1101 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1102 if (status == NXGE_OK) { 1103 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1104 "<== nxge_txdma_stop_inj_err (channel %d): " 1105 "stopped OK", channel)); 1106 return (status); 1107 } 1108 1109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1110 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1111 "injecting error", channel, rs)); 1112 /* Inject any error */ 1113 intr_dbg.value = 0; 1114 intr_dbg.bits.ldw.nack_pref = 1; 1115 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1116 1117 /* Stop done bit will be set as a result of error injection */ 1118 rs = npi_txdma_channel_disable(handle, channel); 1119 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1120 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1121 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1122 "<== nxge_txdma_stop_inj_err (channel %d): " 1123 "stopped OK ", channel)); 1124 return (status); 1125 } 1126 1127 #if defined(NXGE_DEBUG) 1128 nxge_txdma_regs_dump_channels(nxgep); 1129 #endif 1130 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1131 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1132 " (injected error but still not stopped)", channel, rs)); 1133 1134 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1135 return (status); 1136 } 1137 1138 void 1139 nxge_hw_start_tx(p_nxge_t nxgep) 1140 { 1141 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_tx")); 1142 1143 (void) nxge_txdma_hw_start(nxgep); 1144 (void) nxge_tx_mac_enable(nxgep); 1145 1146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_tx")); 1147 } 1148 1149 /*ARGSUSED*/ 1150 void 1151 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1152 { 1153 int index, ndmas; 1154 uint16_t channel; 1155 p_tx_rings_t tx_rings; 1156 1157 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1158 1159 /* 1160 * For each transmit channel, reclaim each descriptor and 1161 * free buffers. 1162 */ 1163 tx_rings = nxgep->tx_rings; 1164 if (tx_rings == NULL) { 1165 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1166 "<== nxge_fixup_txdma_rings: NULL ring pointer")); 1167 return; 1168 } 1169 1170 ndmas = tx_rings->ndmas; 1171 if (!ndmas) { 1172 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1173 "<== nxge_fixup_txdma_rings: no channel allocated")); 1174 return; 1175 } 1176 1177 if (tx_rings->rings == NULL) { 1178 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1179 "<== nxge_fixup_txdma_rings: NULL rings pointer")); 1180 return; 1181 } 1182 1183 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_fixup_txdma_rings: " 1184 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1185 tx_rings, tx_rings->rings, ndmas)); 1186 1187 for (index = 0; index < ndmas; index++) { 1188 channel = tx_rings->rings[index]->tdc; 1189 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1190 "==> nxge_fixup_txdma_rings: channel %d", channel)); 1191 1192 nxge_txdma_fixup_channel(nxgep, tx_rings->rings[index], 1193 channel); 1194 } 1195 1196 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1197 } 1198 1199 /*ARGSUSED*/ 1200 void 1201 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1202 { 1203 p_tx_ring_t ring_p; 1204 1205 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1206 ring_p = nxge_txdma_get_ring(nxgep, channel); 1207 if (ring_p == NULL) { 1208 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1209 return; 1210 } 1211 1212 if (ring_p->tdc != channel) { 1213 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1214 "<== nxge_txdma_fix_channel: channel not matched " 1215 "ring tdc %d passed channel", 1216 ring_p->tdc, channel)); 1217 return; 1218 } 1219 1220 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1221 1222 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1223 } 1224 1225 /*ARGSUSED*/ 1226 void 1227 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1228 { 1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1230 1231 if (ring_p == NULL) { 1232 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1233 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1234 return; 1235 } 1236 1237 if (ring_p->tdc != channel) { 1238 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1239 "<== nxge_txdma_fixup_channel: channel not matched " 1240 "ring tdc %d passed channel", 1241 ring_p->tdc, channel)); 1242 return; 1243 } 1244 1245 MUTEX_ENTER(&ring_p->lock); 1246 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1247 ring_p->rd_index = 0; 1248 ring_p->wr_index = 0; 1249 ring_p->ring_head.value = 0; 1250 ring_p->ring_kick_tail.value = 0; 1251 ring_p->descs_pending = 0; 1252 MUTEX_EXIT(&ring_p->lock); 1253 1254 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1255 } 1256 1257 /*ARGSUSED*/ 1258 void 1259 nxge_txdma_hw_kick(p_nxge_t nxgep) 1260 { 1261 int index, ndmas; 1262 uint16_t channel; 1263 p_tx_rings_t tx_rings; 1264 1265 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1266 1267 tx_rings = nxgep->tx_rings; 1268 if (tx_rings == NULL) { 1269 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1270 "<== nxge_txdma_hw_kick: NULL ring pointer")); 1271 return; 1272 } 1273 1274 ndmas = tx_rings->ndmas; 1275 if (!ndmas) { 1276 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1277 "<== nxge_txdma_hw_kick: no channel allocated")); 1278 return; 1279 } 1280 1281 if (tx_rings->rings == NULL) { 1282 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1283 "<== nxge_txdma_hw_kick: NULL rings pointer")); 1284 return; 1285 } 1286 1287 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_kick: " 1288 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1289 tx_rings, tx_rings->rings, ndmas)); 1290 1291 for (index = 0; index < ndmas; index++) { 1292 channel = tx_rings->rings[index]->tdc; 1293 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1294 "==> nxge_txdma_hw_kick: channel %d", channel)); 1295 nxge_txdma_hw_kick_channel(nxgep, tx_rings->rings[index], 1296 channel); 1297 } 1298 1299 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1300 } 1301 1302 /*ARGSUSED*/ 1303 void 1304 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1305 { 1306 p_tx_ring_t ring_p; 1307 1308 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1309 1310 ring_p = nxge_txdma_get_ring(nxgep, channel); 1311 if (ring_p == NULL) { 1312 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1313 " nxge_txdma_kick_channel")); 1314 return; 1315 } 1316 1317 if (ring_p->tdc != channel) { 1318 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1319 "<== nxge_txdma_kick_channel: channel not matched " 1320 "ring tdc %d passed channel", 1321 ring_p->tdc, channel)); 1322 return; 1323 } 1324 1325 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1326 1327 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1328 } 1329 1330 /*ARGSUSED*/ 1331 void 1332 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1333 { 1334 1335 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1336 1337 if (ring_p == NULL) { 1338 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1339 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1340 return; 1341 } 1342 1343 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1344 } 1345 1346 /*ARGSUSED*/ 1347 void 1348 nxge_check_tx_hang(p_nxge_t nxgep) 1349 { 1350 1351 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1352 1353 /* 1354 * Needs inputs from hardware for regs: 1355 * head index had not moved since last timeout. 1356 * packets not transmitted or stuffed registers. 1357 */ 1358 if (nxge_txdma_hung(nxgep)) { 1359 nxge_fixup_hung_txdma_rings(nxgep); 1360 } 1361 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1362 } 1363 1364 int 1365 nxge_txdma_hung(p_nxge_t nxgep) 1366 { 1367 int index, ndmas; 1368 uint16_t channel; 1369 p_tx_rings_t tx_rings; 1370 p_tx_ring_t tx_ring_p; 1371 1372 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1373 tx_rings = nxgep->tx_rings; 1374 if (tx_rings == NULL) { 1375 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1376 "<== nxge_txdma_hung: NULL ring pointer")); 1377 return (B_FALSE); 1378 } 1379 1380 ndmas = tx_rings->ndmas; 1381 if (!ndmas) { 1382 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1383 "<== nxge_txdma_hung: no channel " 1384 "allocated")); 1385 return (B_FALSE); 1386 } 1387 1388 if (tx_rings->rings == NULL) { 1389 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1390 "<== nxge_txdma_hung: NULL rings pointer")); 1391 return (B_FALSE); 1392 } 1393 1394 for (index = 0; index < ndmas; index++) { 1395 channel = tx_rings->rings[index]->tdc; 1396 tx_ring_p = tx_rings->rings[index]; 1397 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1398 "==> nxge_txdma_hung: channel %d", channel)); 1399 if (nxge_txdma_channel_hung(nxgep, tx_ring_p, channel)) { 1400 return (B_TRUE); 1401 } 1402 } 1403 1404 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1405 1406 return (B_FALSE); 1407 } 1408 1409 int 1410 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1411 { 1412 uint16_t head_index, tail_index; 1413 boolean_t head_wrap, tail_wrap; 1414 npi_handle_t handle; 1415 tx_ring_hdl_t tx_head; 1416 uint_t tx_rd_index; 1417 1418 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1419 1420 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1421 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1422 "==> nxge_txdma_channel_hung: channel %d", channel)); 1423 MUTEX_ENTER(&tx_ring_p->lock); 1424 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1425 1426 tail_index = tx_ring_p->wr_index; 1427 tail_wrap = tx_ring_p->wr_index_wrap; 1428 tx_rd_index = tx_ring_p->rd_index; 1429 MUTEX_EXIT(&tx_ring_p->lock); 1430 1431 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1432 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1433 "tail_index %d tail_wrap %d ", 1434 channel, tx_rd_index, tail_index, tail_wrap)); 1435 /* 1436 * Read the hardware maintained transmit head 1437 * and wrap around bit. 1438 */ 1439 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1440 head_index = tx_head.bits.ldw.head; 1441 head_wrap = tx_head.bits.ldw.wrap; 1442 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1443 "==> nxge_txdma_channel_hung: " 1444 "tx_rd_index %d tail %d tail_wrap %d " 1445 "head %d wrap %d", 1446 tx_rd_index, tail_index, tail_wrap, 1447 head_index, head_wrap)); 1448 1449 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1450 tail_index, tail_wrap) && 1451 (head_index == tx_rd_index)) { 1452 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1453 "==> nxge_txdma_channel_hung: EMPTY")); 1454 return (B_FALSE); 1455 } 1456 1457 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1458 "==> nxge_txdma_channel_hung: Checking if ring full")); 1459 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1460 tail_wrap)) { 1461 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1462 "==> nxge_txdma_channel_hung: full")); 1463 return (B_TRUE); 1464 } 1465 1466 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1467 1468 return (B_FALSE); 1469 } 1470 1471 /*ARGSUSED*/ 1472 void 1473 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1474 { 1475 int index, ndmas; 1476 uint16_t channel; 1477 p_tx_rings_t tx_rings; 1478 1479 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1480 tx_rings = nxgep->tx_rings; 1481 if (tx_rings == NULL) { 1482 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1483 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer")); 1484 return; 1485 } 1486 1487 ndmas = tx_rings->ndmas; 1488 if (!ndmas) { 1489 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1490 "<== nxge_fixup_hung_txdma_rings: no channel " 1491 "allocated")); 1492 return; 1493 } 1494 1495 if (tx_rings->rings == NULL) { 1496 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1497 "<== nxge_fixup_hung_txdma_rings: NULL rings pointer")); 1498 return; 1499 } 1500 1501 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings: " 1502 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1503 tx_rings, tx_rings->rings, ndmas)); 1504 1505 for (index = 0; index < ndmas; index++) { 1506 channel = tx_rings->rings[index]->tdc; 1507 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1508 "==> nxge_fixup_hung_txdma_rings: channel %d", 1509 channel)); 1510 1511 nxge_txdma_fixup_hung_channel(nxgep, tx_rings->rings[index], 1512 channel); 1513 } 1514 1515 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1516 } 1517 1518 /*ARGSUSED*/ 1519 void 1520 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1521 { 1522 p_tx_ring_t ring_p; 1523 1524 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1525 ring_p = nxge_txdma_get_ring(nxgep, channel); 1526 if (ring_p == NULL) { 1527 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1528 "<== nxge_txdma_fix_hung_channel")); 1529 return; 1530 } 1531 1532 if (ring_p->tdc != channel) { 1533 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1534 "<== nxge_txdma_fix_hung_channel: channel not matched " 1535 "ring tdc %d passed channel", 1536 ring_p->tdc, channel)); 1537 return; 1538 } 1539 1540 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1541 1542 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1543 } 1544 1545 /*ARGSUSED*/ 1546 void 1547 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1548 uint16_t channel) 1549 { 1550 npi_handle_t handle; 1551 tdmc_intr_dbg_t intr_dbg; 1552 int status = NXGE_OK; 1553 1554 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1555 1556 if (ring_p == NULL) { 1557 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1558 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1559 return; 1560 } 1561 1562 if (ring_p->tdc != channel) { 1563 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1564 "<== nxge_txdma_fixup_hung_channel: channel " 1565 "not matched " 1566 "ring tdc %d passed channel", 1567 ring_p->tdc, channel)); 1568 return; 1569 } 1570 1571 /* Reclaim descriptors */ 1572 MUTEX_ENTER(&ring_p->lock); 1573 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1574 MUTEX_EXIT(&ring_p->lock); 1575 1576 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1577 /* 1578 * Stop the dma channel waits for the stop done. 1579 * If the stop done bit is not set, then force 1580 * an error. 1581 */ 1582 status = npi_txdma_channel_disable(handle, channel); 1583 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1584 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1585 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1586 "ring tdc %d passed channel %d", 1587 ring_p->tdc, channel)); 1588 return; 1589 } 1590 1591 /* Inject any error */ 1592 intr_dbg.value = 0; 1593 intr_dbg.bits.ldw.nack_pref = 1; 1594 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1595 1596 /* Stop done bit will be set as a result of error injection */ 1597 status = npi_txdma_channel_disable(handle, channel); 1598 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1599 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1600 "<== nxge_txdma_fixup_hung_channel: stopped again" 1601 "ring tdc %d passed channel", 1602 ring_p->tdc, channel)); 1603 return; 1604 } 1605 1606 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1607 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1608 "ring tdc %d passed channel", 1609 ring_p->tdc, channel)); 1610 1611 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 1612 } 1613 1614 /*ARGSUSED*/ 1615 void 1616 nxge_reclaim_rings(p_nxge_t nxgep) 1617 { 1618 int index, ndmas; 1619 uint16_t channel; 1620 p_tx_rings_t tx_rings; 1621 p_tx_ring_t tx_ring_p; 1622 1623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_ring")); 1624 tx_rings = nxgep->tx_rings; 1625 if (tx_rings == NULL) { 1626 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1627 "<== nxge_reclain_rimgs: NULL ring pointer")); 1628 return; 1629 } 1630 1631 ndmas = tx_rings->ndmas; 1632 if (!ndmas) { 1633 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1634 "<== nxge_reclain_rimgs: no channel " 1635 "allocated")); 1636 return; 1637 } 1638 1639 if (tx_rings->rings == NULL) { 1640 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1641 "<== nxge_reclain_rimgs: NULL rings pointer")); 1642 return; 1643 } 1644 1645 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclain_rimgs: " 1646 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1647 tx_rings, tx_rings->rings, ndmas)); 1648 1649 for (index = 0; index < ndmas; index++) { 1650 channel = tx_rings->rings[index]->tdc; 1651 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1652 "==> reclain_rimgs: channel %d", 1653 channel)); 1654 tx_ring_p = tx_rings->rings[index]; 1655 MUTEX_ENTER(&tx_ring_p->lock); 1656 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, channel); 1657 MUTEX_EXIT(&tx_ring_p->lock); 1658 } 1659 1660 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 1661 } 1662 1663 void 1664 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 1665 { 1666 int index, ndmas; 1667 uint16_t channel; 1668 p_tx_rings_t tx_rings; 1669 npi_handle_t handle; 1670 1671 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_txdma_regs_dump_channels")); 1672 1673 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1674 (void) npi_txdma_dump_fzc_regs(handle); 1675 1676 tx_rings = nxgep->tx_rings; 1677 if (tx_rings == NULL) { 1678 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1679 "<== nxge_txdma_regs_dump_channels: NULL ring")); 1680 return; 1681 } 1682 1683 ndmas = tx_rings->ndmas; 1684 if (!ndmas) { 1685 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1686 "<== nxge_txdma_regs_dump_channels: " 1687 "no channel allocated")); 1688 return; 1689 } 1690 1691 if (tx_rings->rings == NULL) { 1692 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1693 "<== nxge_txdma_regs_dump_channels: NULL rings")); 1694 return; 1695 } 1696 1697 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_regs_dump_channels: " 1698 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1699 tx_rings, tx_rings->rings, ndmas)); 1700 1701 for (index = 0; index < ndmas; index++) { 1702 channel = tx_rings->rings[index]->tdc; 1703 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1704 "==> nxge_txdma_regs_dump_channels: channel %d", 1705 channel)); 1706 (void) npi_txdma_dump_tdc_regs(handle, channel); 1707 } 1708 1709 /* Dump TXC registers */ 1710 (void) npi_txc_dump_fzc_regs(handle); 1711 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 1712 1713 for (index = 0; index < ndmas; index++) { 1714 channel = tx_rings->rings[index]->tdc; 1715 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1716 "==> nxge_txdma_regs_dump_channels: channel %d", 1717 channel)); 1718 (void) npi_txc_dump_tdc_fzc_regs(handle, channel); 1719 } 1720 1721 for (index = 0; index < ndmas; index++) { 1722 channel = tx_rings->rings[index]->tdc; 1723 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1724 "==> nxge_txdma_regs_dump_channels: channel %d", 1725 channel)); 1726 nxge_txdma_regs_dump(nxgep, channel); 1727 } 1728 1729 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 1730 1731 } 1732 1733 void 1734 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 1735 { 1736 npi_handle_t handle; 1737 tx_ring_hdl_t hdl; 1738 tx_ring_kick_t kick; 1739 tx_cs_t cs; 1740 txc_control_t control; 1741 uint32_t bitmap = 0; 1742 uint32_t burst = 0; 1743 uint32_t bytes = 0; 1744 dma_log_page_t cfg; 1745 1746 printf("\n\tfunc # %d tdc %d ", 1747 nxgep->function_num, channel); 1748 cfg.page_num = 0; 1749 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1750 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1751 printf("\n\tlog page func %d valid page 0 %d", 1752 cfg.func_num, cfg.valid); 1753 cfg.page_num = 1; 1754 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1755 printf("\n\tlog page func %d valid page 1 %d", 1756 cfg.func_num, cfg.valid); 1757 1758 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 1759 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 1760 printf("\n\thead value is 0x%0llx", 1761 (long long)hdl.value); 1762 printf("\n\thead index %d", hdl.bits.ldw.head); 1763 printf("\n\tkick value is 0x%0llx", 1764 (long long)kick.value); 1765 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 1766 1767 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 1768 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 1769 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 1770 1771 (void) npi_txc_control(handle, OP_GET, &control); 1772 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 1773 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 1774 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 1775 1776 printf("\n\tTXC port control 0x%0llx", 1777 (long long)control.value); 1778 printf("\n\tTXC port bitmap 0x%x", bitmap); 1779 printf("\n\tTXC max burst %d", burst); 1780 printf("\n\tTXC bytes xmt %d\n", bytes); 1781 1782 { 1783 ipp_status_t status; 1784 1785 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 1786 #if defined(__i386) 1787 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 1788 #else 1789 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 1790 #endif 1791 } 1792 } 1793 1794 /* 1795 * Static functions start here. 1796 */ 1797 static nxge_status_t 1798 nxge_map_txdma(p_nxge_t nxgep) 1799 { 1800 int i, ndmas; 1801 uint16_t channel; 1802 p_tx_rings_t tx_rings; 1803 p_tx_ring_t *tx_desc_rings; 1804 p_tx_mbox_areas_t tx_mbox_areas_p; 1805 p_tx_mbox_t *tx_mbox_p; 1806 p_nxge_dma_pool_t dma_buf_poolp; 1807 p_nxge_dma_pool_t dma_cntl_poolp; 1808 p_nxge_dma_common_t *dma_buf_p; 1809 p_nxge_dma_common_t *dma_cntl_p; 1810 nxge_status_t status = NXGE_OK; 1811 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1812 p_nxge_dma_common_t t_dma_buf_p; 1813 p_nxge_dma_common_t t_dma_cntl_p; 1814 #endif 1815 1816 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 1817 1818 dma_buf_poolp = nxgep->tx_buf_pool_p; 1819 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 1820 1821 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1822 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1823 "==> nxge_map_txdma: buf not allocated")); 1824 return (NXGE_ERROR); 1825 } 1826 1827 ndmas = dma_buf_poolp->ndmas; 1828 if (!ndmas) { 1829 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1830 "<== nxge_map_txdma: no dma allocated")); 1831 return (NXGE_ERROR); 1832 } 1833 1834 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 1835 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1836 1837 tx_rings = (p_tx_rings_t) 1838 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 1839 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC( 1840 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP); 1841 1842 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1843 "tx_rings $%p tx_desc_rings $%p", 1844 tx_rings, tx_desc_rings)); 1845 1846 tx_mbox_areas_p = (p_tx_mbox_areas_t) 1847 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 1848 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC( 1849 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP); 1850 1851 /* 1852 * Map descriptors from the buffer pools for each dma channel. 1853 */ 1854 for (i = 0; i < ndmas; i++) { 1855 /* 1856 * Set up and prepare buffer blocks, descriptors 1857 * and mailbox. 1858 */ 1859 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1860 status = nxge_map_txdma_channel(nxgep, channel, 1861 (p_nxge_dma_common_t *)&dma_buf_p[i], 1862 (p_tx_ring_t *)&tx_desc_rings[i], 1863 dma_buf_poolp->num_chunks[i], 1864 (p_nxge_dma_common_t *)&dma_cntl_p[i], 1865 (p_tx_mbox_t *)&tx_mbox_p[i]); 1866 if (status != NXGE_OK) { 1867 goto nxge_map_txdma_fail1; 1868 } 1869 tx_desc_rings[i]->index = (uint16_t)i; 1870 tx_desc_rings[i]->tdc_stats = &nxgep->statsp->tdc_stats[i]; 1871 1872 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1873 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 1874 tx_desc_rings[i]->hv_set = B_FALSE; 1875 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 1876 t_dma_cntl_p = (p_nxge_dma_common_t)dma_cntl_p[i]; 1877 1878 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp = 1879 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 1880 tx_desc_rings[i]->hv_tx_buf_ioaddr_size = 1881 (uint64_t)t_dma_buf_p->orig_alength; 1882 1883 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1884 "==> nxge_map_txdma_channel: " 1885 "hv data buf base io $%p " 1886 "size 0x%llx (%d) " 1887 "buf base io $%p " 1888 "orig vatopa base io $%p " 1889 "orig_len 0x%llx (%d)", 1890 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp, 1891 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1892 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1893 t_dma_buf_p->ioaddr_pp, 1894 t_dma_buf_p->orig_vatopa, 1895 t_dma_buf_p->orig_alength, 1896 t_dma_buf_p->orig_alength)); 1897 1898 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp = 1899 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 1900 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size = 1901 (uint64_t)t_dma_cntl_p->orig_alength; 1902 1903 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1904 "==> nxge_map_txdma_channel: " 1905 "hv cntl base io $%p " 1906 "orig ioaddr_pp ($%p) " 1907 "orig vatopa ($%p) " 1908 "size 0x%llx (%d 0x%x)", 1909 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp, 1910 t_dma_cntl_p->orig_ioaddr_pp, 1911 t_dma_cntl_p->orig_vatopa, 1912 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size, 1913 t_dma_cntl_p->orig_alength, 1914 t_dma_cntl_p->orig_alength)); 1915 } 1916 #endif 1917 } 1918 1919 tx_rings->ndmas = ndmas; 1920 tx_rings->rings = tx_desc_rings; 1921 nxgep->tx_rings = tx_rings; 1922 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p; 1923 nxgep->tx_mbox_areas_p = tx_mbox_areas_p; 1924 1925 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1926 "tx_rings $%p rings $%p", 1927 nxgep->tx_rings, nxgep->tx_rings->rings)); 1928 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1929 "tx_rings $%p tx_desc_rings $%p", 1930 nxgep->tx_rings, tx_desc_rings)); 1931 1932 goto nxge_map_txdma_exit; 1933 1934 nxge_map_txdma_fail1: 1935 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1936 "==> nxge_map_txdma: uninit tx desc " 1937 "(status 0x%x channel %d i %d)", 1938 nxgep, status, channel, i)); 1939 i--; 1940 for (; i >= 0; i--) { 1941 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1942 nxge_unmap_txdma_channel(nxgep, channel, 1943 tx_desc_rings[i], 1944 tx_mbox_p[i]); 1945 } 1946 1947 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1948 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1949 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1950 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1951 1952 nxge_map_txdma_exit: 1953 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1954 "==> nxge_map_txdma: " 1955 "(status 0x%x channel %d)", 1956 status, channel)); 1957 1958 return (status); 1959 } 1960 1961 static void 1962 nxge_unmap_txdma(p_nxge_t nxgep) 1963 { 1964 int i, ndmas; 1965 uint8_t channel; 1966 p_tx_rings_t tx_rings; 1967 p_tx_ring_t *tx_desc_rings; 1968 p_tx_mbox_areas_t tx_mbox_areas_p; 1969 p_tx_mbox_t *tx_mbox_p; 1970 p_nxge_dma_pool_t dma_buf_poolp; 1971 1972 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma")); 1973 1974 dma_buf_poolp = nxgep->tx_buf_pool_p; 1975 if (!dma_buf_poolp->buf_allocated) { 1976 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1977 "==> nxge_unmap_txdma: buf not allocated")); 1978 return; 1979 } 1980 1981 ndmas = dma_buf_poolp->ndmas; 1982 if (!ndmas) { 1983 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1984 "<== nxge_unmap_txdma: no dma allocated")); 1985 return; 1986 } 1987 1988 tx_rings = nxgep->tx_rings; 1989 tx_desc_rings = tx_rings->rings; 1990 if (tx_rings == NULL) { 1991 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1992 "<== nxge_unmap_txdma: NULL ring pointer")); 1993 return; 1994 } 1995 1996 tx_desc_rings = tx_rings->rings; 1997 if (tx_desc_rings == NULL) { 1998 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1999 "<== nxge_unmap_txdma: NULL ring pointers")); 2000 return; 2001 } 2002 2003 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma: " 2004 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2005 tx_rings, tx_desc_rings, ndmas)); 2006 2007 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2008 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2009 2010 for (i = 0; i < ndmas; i++) { 2011 channel = tx_desc_rings[i]->tdc; 2012 (void) nxge_unmap_txdma_channel(nxgep, channel, 2013 (p_tx_ring_t)tx_desc_rings[i], 2014 (p_tx_mbox_t)tx_mbox_p[i]); 2015 } 2016 2017 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 2018 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 2019 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 2020 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 2021 2022 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2023 "<== nxge_unmap_txdma")); 2024 } 2025 2026 static nxge_status_t 2027 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2028 p_nxge_dma_common_t *dma_buf_p, 2029 p_tx_ring_t *tx_desc_p, 2030 uint32_t num_chunks, 2031 p_nxge_dma_common_t *dma_cntl_p, 2032 p_tx_mbox_t *tx_mbox_p) 2033 { 2034 int status = NXGE_OK; 2035 2036 /* 2037 * Set up and prepare buffer blocks, descriptors 2038 * and mailbox. 2039 */ 2040 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2041 "==> nxge_map_txdma_channel (channel %d)", channel)); 2042 /* 2043 * Transmit buffer blocks 2044 */ 2045 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2046 dma_buf_p, tx_desc_p, num_chunks); 2047 if (status != NXGE_OK) { 2048 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2049 "==> nxge_map_txdma_channel (channel %d): " 2050 "map buffer failed 0x%x", channel, status)); 2051 goto nxge_map_txdma_channel_exit; 2052 } 2053 2054 /* 2055 * Transmit block ring, and mailbox. 2056 */ 2057 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2058 tx_mbox_p); 2059 2060 goto nxge_map_txdma_channel_exit; 2061 2062 nxge_map_txdma_channel_fail1: 2063 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2064 "==> nxge_map_txdma_channel: unmap buf" 2065 "(status 0x%x channel %d)", 2066 status, channel)); 2067 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2068 2069 nxge_map_txdma_channel_exit: 2070 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2071 "<== nxge_map_txdma_channel: " 2072 "(status 0x%x channel %d)", 2073 status, channel)); 2074 2075 return (status); 2076 } 2077 2078 /*ARGSUSED*/ 2079 static void 2080 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2081 p_tx_ring_t tx_ring_p, 2082 p_tx_mbox_t tx_mbox_p) 2083 { 2084 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2085 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2086 /* 2087 * unmap tx block ring, and mailbox. 2088 */ 2089 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, 2090 tx_ring_p, tx_mbox_p); 2091 2092 /* unmap buffer blocks */ 2093 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, tx_ring_p); 2094 2095 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2096 } 2097 2098 /*ARGSUSED*/ 2099 static void 2100 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2101 p_nxge_dma_common_t *dma_cntl_p, 2102 p_tx_ring_t tx_ring_p, 2103 p_tx_mbox_t *tx_mbox_p) 2104 { 2105 p_tx_mbox_t mboxp; 2106 p_nxge_dma_common_t cntl_dmap; 2107 p_nxge_dma_common_t dmap; 2108 p_tx_rng_cfig_t tx_ring_cfig_p; 2109 p_tx_ring_kick_t tx_ring_kick_p; 2110 p_tx_cs_t tx_cs_p; 2111 p_tx_dma_ent_msk_t tx_evmask_p; 2112 p_txdma_mbh_t mboxh_p; 2113 p_txdma_mbl_t mboxl_p; 2114 uint64_t tx_desc_len; 2115 2116 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2117 "==> nxge_map_txdma_channel_cfg_ring")); 2118 2119 cntl_dmap = *dma_cntl_p; 2120 2121 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2122 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2123 sizeof (tx_desc_t)); 2124 /* 2125 * Zero out transmit ring descriptors. 2126 */ 2127 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2128 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2129 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2130 tx_cs_p = &(tx_ring_p->tx_cs); 2131 tx_evmask_p = &(tx_ring_p->tx_evmask); 2132 tx_ring_cfig_p->value = 0; 2133 tx_ring_kick_p->value = 0; 2134 tx_cs_p->value = 0; 2135 tx_evmask_p->value = 0; 2136 2137 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2138 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2139 dma_channel, 2140 dmap->dma_cookie.dmac_laddress)); 2141 2142 tx_ring_cfig_p->value = 0; 2143 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2144 tx_ring_cfig_p->value = 2145 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2146 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2147 2148 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2149 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2150 dma_channel, 2151 tx_ring_cfig_p->value)); 2152 2153 tx_cs_p->bits.ldw.rst = 1; 2154 2155 /* Map in mailbox */ 2156 mboxp = (p_tx_mbox_t) 2157 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2158 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2159 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2160 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2161 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2162 mboxh_p->value = mboxl_p->value = 0; 2163 2164 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2165 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2166 dmap->dma_cookie.dmac_laddress)); 2167 2168 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2169 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2170 2171 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2172 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2173 2174 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2175 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2176 dmap->dma_cookie.dmac_laddress)); 2177 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2178 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2179 "mbox $%p", 2180 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2181 tx_ring_p->page_valid.value = 0; 2182 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2183 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2184 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2185 tx_ring_p->page_hdl.value = 0; 2186 2187 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2188 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2189 2190 tx_ring_p->max_burst.value = 0; 2191 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2192 2193 *tx_mbox_p = mboxp; 2194 2195 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2196 "<== nxge_map_txdma_channel_cfg_ring")); 2197 } 2198 2199 /*ARGSUSED*/ 2200 static void 2201 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2202 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2203 { 2204 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2205 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2206 tx_ring_p->tdc)); 2207 2208 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2209 2210 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2211 "<== nxge_unmap_txdma_channel_cfg_ring")); 2212 } 2213 2214 static nxge_status_t 2215 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2216 p_nxge_dma_common_t *dma_buf_p, 2217 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2218 { 2219 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2220 p_nxge_dma_common_t dmap; 2221 nxge_os_dma_handle_t tx_buf_dma_handle; 2222 p_tx_ring_t tx_ring_p; 2223 p_tx_msg_t tx_msg_ring; 2224 nxge_status_t status = NXGE_OK; 2225 int ddi_status = DDI_SUCCESS; 2226 int i, j, index; 2227 uint32_t size, bsize; 2228 uint32_t nblocks, nmsgs; 2229 2230 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2231 "==> nxge_map_txdma_channel_buf_ring")); 2232 2233 dma_bufp = tmp_bufp = *dma_buf_p; 2234 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2235 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2236 "chunks bufp $%p", 2237 channel, num_chunks, dma_bufp)); 2238 2239 nmsgs = 0; 2240 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2241 nmsgs += tmp_bufp->nblocks; 2242 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2243 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2244 "bufp $%p nblocks %d nmsgs %d", 2245 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2246 } 2247 if (!nmsgs) { 2248 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2249 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2250 "no msg blocks", 2251 channel)); 2252 status = NXGE_ERROR; 2253 goto nxge_map_txdma_channel_buf_ring_exit; 2254 } 2255 2256 tx_ring_p = (p_tx_ring_t) 2257 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2258 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2259 (void *)nxgep->interrupt_cookie); 2260 2261 tx_ring_p->nxgep = nxgep; 2262 tx_ring_p->serial = nxge_serialize_create(nmsgs, 2263 nxge_serial_tx, tx_ring_p); 2264 /* 2265 * Allocate transmit message rings and handles for packets 2266 * not to be copied to premapped buffers. 2267 */ 2268 size = nmsgs * sizeof (tx_msg_t); 2269 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2270 for (i = 0; i < nmsgs; i++) { 2271 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2272 DDI_DMA_DONTWAIT, 0, 2273 &tx_msg_ring[i].dma_handle); 2274 if (ddi_status != DDI_SUCCESS) { 2275 status |= NXGE_DDI_FAILED; 2276 break; 2277 } 2278 } 2279 if (i < nmsgs) { 2280 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2281 "Allocate handles failed.")); 2282 goto nxge_map_txdma_channel_buf_ring_fail1; 2283 } 2284 2285 tx_ring_p->tdc = channel; 2286 tx_ring_p->tx_msg_ring = tx_msg_ring; 2287 tx_ring_p->tx_ring_size = nmsgs; 2288 tx_ring_p->num_chunks = num_chunks; 2289 if (!nxge_tx_intr_thres) { 2290 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2291 } 2292 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2293 tx_ring_p->rd_index = 0; 2294 tx_ring_p->wr_index = 0; 2295 tx_ring_p->ring_head.value = 0; 2296 tx_ring_p->ring_kick_tail.value = 0; 2297 tx_ring_p->descs_pending = 0; 2298 2299 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2300 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2301 "actual tx desc max %d nmsgs %d " 2302 "(config nxge_tx_ring_size %d)", 2303 channel, tx_ring_p->tx_ring_size, nmsgs, 2304 nxge_tx_ring_size)); 2305 2306 /* 2307 * Map in buffers from the buffer pool. 2308 */ 2309 index = 0; 2310 bsize = dma_bufp->block_size; 2311 2312 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2313 "dma_bufp $%p tx_rng_p $%p " 2314 "tx_msg_rng_p $%p bsize %d", 2315 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2316 2317 tx_buf_dma_handle = dma_bufp->dma_handle; 2318 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2319 bsize = dma_bufp->block_size; 2320 nblocks = dma_bufp->nblocks; 2321 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2322 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2323 "size %d dma_bufp $%p", 2324 i, sizeof (nxge_dma_common_t), dma_bufp)); 2325 2326 for (j = 0; j < nblocks; j++) { 2327 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2328 dmap = &tx_msg_ring[index++].buf_dma; 2329 #ifdef TX_MEM_DEBUG 2330 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2331 "==> nxge_map_txdma_channel_buf_ring: j %d" 2332 "dmap $%p", i, dmap)); 2333 #endif 2334 nxge_setup_dma_common(dmap, dma_bufp, 1, 2335 bsize); 2336 } 2337 } 2338 2339 if (i < num_chunks) { 2340 status = NXGE_ERROR; 2341 goto nxge_map_txdma_channel_buf_ring_fail1; 2342 } 2343 2344 *tx_desc_p = tx_ring_p; 2345 2346 goto nxge_map_txdma_channel_buf_ring_exit; 2347 2348 nxge_map_txdma_channel_buf_ring_fail1: 2349 if (tx_ring_p->serial) { 2350 nxge_serialize_destroy(tx_ring_p->serial); 2351 tx_ring_p->serial = NULL; 2352 } 2353 2354 index--; 2355 for (; index >= 0; index--) { 2356 if (tx_msg_ring[index].dma_handle != NULL) { 2357 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2358 } 2359 } 2360 MUTEX_DESTROY(&tx_ring_p->lock); 2361 KMEM_FREE(tx_msg_ring, size); 2362 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2363 2364 status = NXGE_ERROR; 2365 2366 nxge_map_txdma_channel_buf_ring_exit: 2367 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2368 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2369 2370 return (status); 2371 } 2372 2373 /*ARGSUSED*/ 2374 static void 2375 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2376 { 2377 p_tx_msg_t tx_msg_ring; 2378 p_tx_msg_t tx_msg_p; 2379 int i; 2380 2381 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2382 "==> nxge_unmap_txdma_channel_buf_ring")); 2383 if (tx_ring_p == NULL) { 2384 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2385 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2386 return; 2387 } 2388 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2389 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2390 tx_ring_p->tdc)); 2391 2392 tx_msg_ring = tx_ring_p->tx_msg_ring; 2393 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2394 tx_msg_p = &tx_msg_ring[i]; 2395 if (tx_msg_p->flags.dma_type == USE_DVMA) { 2396 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2397 "entry = %d", 2398 i)); 2399 (void) dvma_unload(tx_msg_p->dvma_handle, 2400 0, -1); 2401 tx_msg_p->dvma_handle = NULL; 2402 if (tx_ring_p->dvma_wr_index == 2403 tx_ring_p->dvma_wrap_mask) { 2404 tx_ring_p->dvma_wr_index = 0; 2405 } else { 2406 tx_ring_p->dvma_wr_index++; 2407 } 2408 tx_ring_p->dvma_pending--; 2409 } else if (tx_msg_p->flags.dma_type == 2410 USE_DMA) { 2411 if (ddi_dma_unbind_handle 2412 (tx_msg_p->dma_handle)) { 2413 cmn_err(CE_WARN, "!nxge_unmap_tx_bug_ring: " 2414 "ddi_dma_unbind_handle " 2415 "failed."); 2416 } 2417 } 2418 2419 if (tx_msg_p->tx_message != NULL) { 2420 freemsg(tx_msg_p->tx_message); 2421 tx_msg_p->tx_message = NULL; 2422 } 2423 } 2424 2425 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2426 if (tx_msg_ring[i].dma_handle != NULL) { 2427 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2428 } 2429 } 2430 2431 if (tx_ring_p->serial) { 2432 nxge_serialize_destroy(tx_ring_p->serial); 2433 tx_ring_p->serial = NULL; 2434 } 2435 2436 MUTEX_DESTROY(&tx_ring_p->lock); 2437 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2438 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2439 2440 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2441 "<== nxge_unmap_txdma_channel_buf_ring")); 2442 } 2443 2444 static nxge_status_t 2445 nxge_txdma_hw_start(p_nxge_t nxgep) 2446 { 2447 int i, ndmas; 2448 uint16_t channel; 2449 p_tx_rings_t tx_rings; 2450 p_tx_ring_t *tx_desc_rings; 2451 p_tx_mbox_areas_t tx_mbox_areas_p; 2452 p_tx_mbox_t *tx_mbox_p; 2453 nxge_status_t status = NXGE_OK; 2454 2455 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2456 2457 tx_rings = nxgep->tx_rings; 2458 if (tx_rings == NULL) { 2459 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2460 "<== nxge_txdma_hw_start: NULL ring pointer")); 2461 return (NXGE_ERROR); 2462 } 2463 tx_desc_rings = tx_rings->rings; 2464 if (tx_desc_rings == NULL) { 2465 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2466 "<== nxge_txdma_hw_start: NULL ring pointers")); 2467 return (NXGE_ERROR); 2468 } 2469 2470 ndmas = tx_rings->ndmas; 2471 if (!ndmas) { 2472 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2473 "<== nxge_txdma_hw_start: no dma channel allocated")); 2474 return (NXGE_ERROR); 2475 } 2476 2477 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2478 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2479 tx_rings, tx_desc_rings, ndmas)); 2480 2481 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2482 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2483 2484 for (i = 0; i < ndmas; i++) { 2485 channel = tx_desc_rings[i]->tdc, 2486 status = nxge_txdma_start_channel(nxgep, channel, 2487 (p_tx_ring_t)tx_desc_rings[i], 2488 (p_tx_mbox_t)tx_mbox_p[i]); 2489 if (status != NXGE_OK) { 2490 goto nxge_txdma_hw_start_fail1; 2491 } 2492 } 2493 2494 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2495 "tx_rings $%p rings $%p", 2496 nxgep->tx_rings, nxgep->tx_rings->rings)); 2497 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2498 "tx_rings $%p tx_desc_rings $%p", 2499 nxgep->tx_rings, tx_desc_rings)); 2500 2501 goto nxge_txdma_hw_start_exit; 2502 2503 nxge_txdma_hw_start_fail1: 2504 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2505 "==> nxge_txdma_hw_start: disable " 2506 "(status 0x%x channel %d i %d)", status, channel, i)); 2507 for (; i >= 0; i--) { 2508 channel = tx_desc_rings[i]->tdc, 2509 (void) nxge_txdma_stop_channel(nxgep, channel, 2510 (p_tx_ring_t)tx_desc_rings[i], 2511 (p_tx_mbox_t)tx_mbox_p[i]); 2512 } 2513 2514 nxge_txdma_hw_start_exit: 2515 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2516 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2517 2518 return (status); 2519 } 2520 2521 static void 2522 nxge_txdma_hw_stop(p_nxge_t nxgep) 2523 { 2524 int i, ndmas; 2525 uint16_t channel; 2526 p_tx_rings_t tx_rings; 2527 p_tx_ring_t *tx_desc_rings; 2528 p_tx_mbox_areas_t tx_mbox_areas_p; 2529 p_tx_mbox_t *tx_mbox_p; 2530 2531 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop")); 2532 2533 tx_rings = nxgep->tx_rings; 2534 if (tx_rings == NULL) { 2535 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2536 "<== nxge_txdma_hw_stop: NULL ring pointer")); 2537 return; 2538 } 2539 tx_desc_rings = tx_rings->rings; 2540 if (tx_desc_rings == NULL) { 2541 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2542 "<== nxge_txdma_hw_stop: NULL ring pointers")); 2543 return; 2544 } 2545 2546 ndmas = tx_rings->ndmas; 2547 if (!ndmas) { 2548 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2549 "<== nxge_txdma_hw_stop: no dma channel allocated")); 2550 return; 2551 } 2552 2553 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2554 "tx_rings $%p tx_desc_rings $%p", 2555 tx_rings, tx_desc_rings)); 2556 2557 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2558 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2559 2560 for (i = 0; i < ndmas; i++) { 2561 channel = tx_desc_rings[i]->tdc; 2562 (void) nxge_txdma_stop_channel(nxgep, channel, 2563 (p_tx_ring_t)tx_desc_rings[i], 2564 (p_tx_mbox_t)tx_mbox_p[i]); 2565 } 2566 2567 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2568 "tx_rings $%p tx_desc_rings $%p", 2569 tx_rings, tx_desc_rings)); 2570 2571 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_hw_stop")); 2572 } 2573 2574 static nxge_status_t 2575 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2576 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2577 2578 { 2579 nxge_status_t status = NXGE_OK; 2580 2581 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2582 "==> nxge_txdma_start_channel (channel %d)", channel)); 2583 /* 2584 * TXDMA/TXC must be in stopped state. 2585 */ 2586 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2587 2588 /* 2589 * Reset TXDMA channel 2590 */ 2591 tx_ring_p->tx_cs.value = 0; 2592 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2593 status = nxge_reset_txdma_channel(nxgep, channel, 2594 tx_ring_p->tx_cs.value); 2595 if (status != NXGE_OK) { 2596 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2597 "==> nxge_txdma_start_channel (channel %d)" 2598 " reset channel failed 0x%x", channel, status)); 2599 goto nxge_txdma_start_channel_exit; 2600 } 2601 2602 /* 2603 * Initialize the TXDMA channel specific FZC control 2604 * configurations. These FZC registers are pertaining 2605 * to each TX channel (i.e. logical pages). 2606 */ 2607 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2608 tx_ring_p, tx_mbox_p); 2609 if (status != NXGE_OK) { 2610 goto nxge_txdma_start_channel_exit; 2611 } 2612 2613 /* 2614 * Initialize the event masks. 2615 */ 2616 tx_ring_p->tx_evmask.value = 0; 2617 status = nxge_init_txdma_channel_event_mask(nxgep, 2618 channel, &tx_ring_p->tx_evmask); 2619 if (status != NXGE_OK) { 2620 goto nxge_txdma_start_channel_exit; 2621 } 2622 2623 /* 2624 * Load TXDMA descriptors, buffers, mailbox, 2625 * initialise the DMA channels and 2626 * enable each DMA channel. 2627 */ 2628 status = nxge_enable_txdma_channel(nxgep, channel, 2629 tx_ring_p, tx_mbox_p); 2630 if (status != NXGE_OK) { 2631 goto nxge_txdma_start_channel_exit; 2632 } 2633 2634 nxge_txdma_start_channel_exit: 2635 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2636 2637 return (status); 2638 } 2639 2640 /*ARGSUSED*/ 2641 static nxge_status_t 2642 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel, 2643 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2644 { 2645 int status = NXGE_OK; 2646 2647 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2648 "==> nxge_txdma_stop_channel: channel %d", channel)); 2649 2650 /* 2651 * Stop (disable) TXDMA and TXC (if stop bit is set 2652 * and STOP_N_GO bit not set, the TXDMA reset state will 2653 * not be set if reset TXDMA. 2654 */ 2655 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2656 2657 /* 2658 * Reset TXDMA channel 2659 */ 2660 tx_ring_p->tx_cs.value = 0; 2661 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2662 status = nxge_reset_txdma_channel(nxgep, channel, 2663 tx_ring_p->tx_cs.value); 2664 if (status != NXGE_OK) { 2665 goto nxge_txdma_stop_channel_exit; 2666 } 2667 2668 #ifdef HARDWARE_REQUIRED 2669 /* Set up the interrupt event masks. */ 2670 tx_ring_p->tx_evmask.value = 0; 2671 status = nxge_init_txdma_channel_event_mask(nxgep, 2672 channel, &tx_ring_p->tx_evmask); 2673 if (status != NXGE_OK) { 2674 goto nxge_txdma_stop_channel_exit; 2675 } 2676 2677 /* Initialize the DMA control and status register */ 2678 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2679 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2680 tx_ring_p->tx_cs.value); 2681 if (status != NXGE_OK) { 2682 goto nxge_txdma_stop_channel_exit; 2683 } 2684 2685 /* Disable channel */ 2686 status = nxge_disable_txdma_channel(nxgep, channel, 2687 tx_ring_p, tx_mbox_p); 2688 if (status != NXGE_OK) { 2689 goto nxge_txdma_start_channel_exit; 2690 } 2691 2692 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2693 "==> nxge_txdma_stop_channel: event done")); 2694 2695 #endif 2696 2697 nxge_txdma_stop_channel_exit: 2698 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2699 return (status); 2700 } 2701 2702 static p_tx_ring_t 2703 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 2704 { 2705 int index, ndmas; 2706 uint16_t tdc; 2707 p_tx_rings_t tx_rings; 2708 2709 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 2710 2711 tx_rings = nxgep->tx_rings; 2712 if (tx_rings == NULL) { 2713 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2714 "<== nxge_txdma_get_ring: NULL ring pointer")); 2715 return (NULL); 2716 } 2717 2718 ndmas = tx_rings->ndmas; 2719 if (!ndmas) { 2720 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2721 "<== nxge_txdma_get_ring: no channel allocated")); 2722 return (NULL); 2723 } 2724 2725 if (tx_rings->rings == NULL) { 2726 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2727 "<== nxge_txdma_get_ring: NULL rings pointer")); 2728 return (NULL); 2729 } 2730 2731 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_ring: " 2732 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2733 tx_rings, tx_rings, ndmas)); 2734 2735 for (index = 0; index < ndmas; index++) { 2736 tdc = tx_rings->rings[index]->tdc; 2737 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2738 "==> nxge_fixup_txdma_rings: channel %d", tdc)); 2739 if (channel == tdc) { 2740 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2741 "<== nxge_txdma_get_ring: tdc %d " 2742 "ring $%p", 2743 tdc, tx_rings->rings[index])); 2744 return (p_tx_ring_t)(tx_rings->rings[index]); 2745 } 2746 } 2747 2748 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring")); 2749 return (NULL); 2750 } 2751 2752 static p_tx_mbox_t 2753 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 2754 { 2755 int index, tdc, ndmas; 2756 p_tx_rings_t tx_rings; 2757 p_tx_mbox_areas_t tx_mbox_areas_p; 2758 p_tx_mbox_t *tx_mbox_p; 2759 2760 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 2761 2762 tx_rings = nxgep->tx_rings; 2763 if (tx_rings == NULL) { 2764 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2765 "<== nxge_txdma_get_mbox: NULL ring pointer")); 2766 return (NULL); 2767 } 2768 2769 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2770 if (tx_mbox_areas_p == NULL) { 2771 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2772 "<== nxge_txdma_get_mbox: NULL mbox pointer")); 2773 return (NULL); 2774 } 2775 2776 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2777 2778 ndmas = tx_rings->ndmas; 2779 if (!ndmas) { 2780 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2781 "<== nxge_txdma_get_mbox: no channel allocated")); 2782 return (NULL); 2783 } 2784 2785 if (tx_rings->rings == NULL) { 2786 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2787 "<== nxge_txdma_get_mbox: NULL rings pointer")); 2788 return (NULL); 2789 } 2790 2791 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_mbox: " 2792 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2793 tx_rings, tx_rings, ndmas)); 2794 2795 for (index = 0; index < ndmas; index++) { 2796 tdc = tx_rings->rings[index]->tdc; 2797 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2798 "==> nxge_txdma_get_mbox: channel %d", tdc)); 2799 if (channel == tdc) { 2800 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2801 "<== nxge_txdma_get_mbox: tdc %d " 2802 "ring $%p", 2803 tdc, tx_rings->rings[index])); 2804 return (p_tx_mbox_t)(tx_mbox_p[index]); 2805 } 2806 } 2807 2808 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox")); 2809 return (NULL); 2810 } 2811 2812 /*ARGSUSED*/ 2813 static nxge_status_t 2814 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 2815 { 2816 npi_handle_t handle; 2817 npi_status_t rs; 2818 uint8_t channel; 2819 p_tx_ring_t *tx_rings; 2820 p_tx_ring_t tx_ring_p; 2821 p_nxge_tx_ring_stats_t tdc_stats; 2822 boolean_t txchan_fatal = B_FALSE; 2823 nxge_status_t status = NXGE_OK; 2824 tdmc_inj_par_err_t par_err; 2825 uint32_t value; 2826 2827 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_tx_err_evnts")); 2828 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2829 channel = ldvp->channel; 2830 2831 tx_rings = nxgep->tx_rings->rings; 2832 tx_ring_p = tx_rings[index]; 2833 tdc_stats = tx_ring_p->tdc_stats; 2834 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 2835 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 2836 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 2837 if ((rs = npi_txdma_ring_error_get(handle, channel, 2838 &tdc_stats->errlog)) != NPI_SUCCESS) 2839 return (NXGE_ERROR | rs); 2840 } 2841 2842 if (cs.bits.ldw.mbox_err) { 2843 tdc_stats->mbox_err++; 2844 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2845 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 2846 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2847 "==> nxge_tx_err_evnts(channel %d): " 2848 "fatal error: mailbox", channel)); 2849 txchan_fatal = B_TRUE; 2850 } 2851 if (cs.bits.ldw.pkt_size_err) { 2852 tdc_stats->pkt_size_err++; 2853 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2854 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 2855 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2856 "==> nxge_tx_err_evnts(channel %d): " 2857 "fatal error: pkt_size_err", channel)); 2858 txchan_fatal = B_TRUE; 2859 } 2860 if (cs.bits.ldw.tx_ring_oflow) { 2861 tdc_stats->tx_ring_oflow++; 2862 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2863 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 2864 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2865 "==> nxge_tx_err_evnts(channel %d): " 2866 "fatal error: tx_ring_oflow", channel)); 2867 txchan_fatal = B_TRUE; 2868 } 2869 if (cs.bits.ldw.pref_buf_par_err) { 2870 tdc_stats->pre_buf_par_err++; 2871 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2872 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 2873 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2874 "==> nxge_tx_err_evnts(channel %d): " 2875 "fatal error: pre_buf_par_err", channel)); 2876 /* Clear error injection source for parity error */ 2877 (void) npi_txdma_inj_par_error_get(handle, &value); 2878 par_err.value = value; 2879 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 2880 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 2881 txchan_fatal = B_TRUE; 2882 } 2883 if (cs.bits.ldw.nack_pref) { 2884 tdc_stats->nack_pref++; 2885 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2886 NXGE_FM_EREPORT_TDMC_NACK_PREF); 2887 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2888 "==> nxge_tx_err_evnts(channel %d): " 2889 "fatal error: nack_pref", channel)); 2890 txchan_fatal = B_TRUE; 2891 } 2892 if (cs.bits.ldw.nack_pkt_rd) { 2893 tdc_stats->nack_pkt_rd++; 2894 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2895 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 2896 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2897 "==> nxge_tx_err_evnts(channel %d): " 2898 "fatal error: nack_pkt_rd", channel)); 2899 txchan_fatal = B_TRUE; 2900 } 2901 if (cs.bits.ldw.conf_part_err) { 2902 tdc_stats->conf_part_err++; 2903 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2904 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 2905 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2906 "==> nxge_tx_err_evnts(channel %d): " 2907 "fatal error: config_partition_err", channel)); 2908 txchan_fatal = B_TRUE; 2909 } 2910 if (cs.bits.ldw.pkt_prt_err) { 2911 tdc_stats->pkt_part_err++; 2912 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2913 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 2914 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2915 "==> nxge_tx_err_evnts(channel %d): " 2916 "fatal error: pkt_prt_err", channel)); 2917 txchan_fatal = B_TRUE; 2918 } 2919 2920 /* Clear error injection source in case this is an injected error */ 2921 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 2922 2923 if (txchan_fatal) { 2924 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2925 " nxge_tx_err_evnts: " 2926 " fatal error on channel %d cs 0x%llx\n", 2927 channel, cs.value)); 2928 status = nxge_txdma_fatal_err_recover(nxgep, channel, 2929 tx_ring_p); 2930 if (status == NXGE_OK) { 2931 FM_SERVICE_RESTORED(nxgep); 2932 } 2933 } 2934 2935 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_tx_err_evnts")); 2936 2937 return (status); 2938 } 2939 2940 static nxge_status_t 2941 nxge_txdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel, 2942 p_tx_ring_t tx_ring_p) 2943 { 2944 npi_handle_t handle; 2945 npi_status_t rs = NPI_SUCCESS; 2946 p_tx_mbox_t tx_mbox_p; 2947 nxge_status_t status = NXGE_OK; 2948 2949 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 2950 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2951 "Recovering from TxDMAChannel#%d error...", channel)); 2952 2953 /* 2954 * Stop the dma channel waits for the stop done. 2955 * If the stop done bit is not set, then create 2956 * an error. 2957 */ 2958 2959 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2960 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 2961 MUTEX_ENTER(&tx_ring_p->lock); 2962 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 2963 if (rs != NPI_SUCCESS) { 2964 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2965 "==> nxge_txdma_fatal_err_recover (channel %d): " 2966 "stop failed ", channel)); 2967 goto fail; 2968 } 2969 2970 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 2971 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2972 2973 /* 2974 * Reset TXDMA channel 2975 */ 2976 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 2977 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 2978 NPI_SUCCESS) { 2979 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2980 "==> nxge_txdma_fatal_err_recover (channel %d)" 2981 " reset channel failed 0x%x", channel, rs)); 2982 goto fail; 2983 } 2984 2985 /* 2986 * Reset the tail (kick) register to 0. 2987 * (Hardware will not reset it. Tx overflow fatal 2988 * error if tail is not set to 0 after reset! 2989 */ 2990 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 2991 2992 /* Restart TXDMA channel */ 2993 2994 /* 2995 * Initialize the TXDMA channel specific FZC control 2996 * configurations. These FZC registers are pertaining 2997 * to each TX channel (i.e. logical pages). 2998 */ 2999 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3000 3001 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3002 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3003 tx_ring_p, tx_mbox_p); 3004 if (status != NXGE_OK) 3005 goto fail; 3006 3007 /* 3008 * Initialize the event masks. 3009 */ 3010 tx_ring_p->tx_evmask.value = 0; 3011 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3012 &tx_ring_p->tx_evmask); 3013 if (status != NXGE_OK) 3014 goto fail; 3015 3016 tx_ring_p->wr_index_wrap = B_FALSE; 3017 tx_ring_p->wr_index = 0; 3018 tx_ring_p->rd_index = 0; 3019 3020 /* 3021 * Load TXDMA descriptors, buffers, mailbox, 3022 * initialise the DMA channels and 3023 * enable each DMA channel. 3024 */ 3025 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3026 status = nxge_enable_txdma_channel(nxgep, channel, 3027 tx_ring_p, tx_mbox_p); 3028 MUTEX_EXIT(&tx_ring_p->lock); 3029 if (status != NXGE_OK) 3030 goto fail; 3031 3032 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3033 "Recovery Successful, TxDMAChannel#%d Restored", 3034 channel)); 3035 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3036 3037 return (NXGE_OK); 3038 3039 fail: 3040 MUTEX_EXIT(&tx_ring_p->lock); 3041 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3042 "nxge_txdma_fatal_err_recover (channel %d): " 3043 "failed to recover this txdma channel", channel)); 3044 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3045 3046 return (status); 3047 } 3048 3049 nxge_status_t 3050 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3051 { 3052 npi_handle_t handle; 3053 npi_status_t rs = NPI_SUCCESS; 3054 nxge_status_t status = NXGE_OK; 3055 p_tx_ring_t *tx_desc_rings; 3056 p_tx_rings_t tx_rings; 3057 p_tx_ring_t tx_ring_p; 3058 p_tx_mbox_t tx_mbox_p; 3059 int i, ndmas; 3060 uint16_t channel; 3061 3062 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3063 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3064 "Recovering from TxPort error...")); 3065 3066 /* 3067 * Stop the dma channel waits for the stop done. 3068 * If the stop done bit is not set, then create 3069 * an error. 3070 */ 3071 3072 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3073 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort stop all DMA channels...")); 3074 3075 tx_rings = nxgep->tx_rings; 3076 tx_desc_rings = tx_rings->rings; 3077 ndmas = tx_rings->ndmas; 3078 3079 for (i = 0; i < ndmas; i++) { 3080 if (tx_desc_rings[i] == NULL) { 3081 continue; 3082 } 3083 tx_ring_p = tx_rings->rings[i]; 3084 MUTEX_ENTER(&tx_ring_p->lock); 3085 } 3086 3087 for (i = 0; i < ndmas; i++) { 3088 if (tx_desc_rings[i] == NULL) { 3089 continue; 3090 } 3091 channel = tx_desc_rings[i]->tdc; 3092 tx_ring_p = tx_rings->rings[i]; 3093 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3094 if (rs != NPI_SUCCESS) { 3095 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3096 "==> nxge_txdma_fatal_err_recover (channel %d): " 3097 "stop failed ", channel)); 3098 goto fail; 3099 } 3100 } 3101 3102 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reclaim all DMA channels...")); 3103 3104 for (i = 0; i < ndmas; i++) { 3105 if (tx_desc_rings[i] == NULL) { 3106 continue; 3107 } 3108 tx_ring_p = tx_rings->rings[i]; 3109 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3110 } 3111 3112 /* 3113 * Reset TXDMA channel 3114 */ 3115 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reset all DMA channels...")); 3116 3117 for (i = 0; i < ndmas; i++) { 3118 if (tx_desc_rings[i] == NULL) { 3119 continue; 3120 } 3121 channel = tx_desc_rings[i]->tdc; 3122 tx_ring_p = tx_rings->rings[i]; 3123 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, 3124 channel)) != NPI_SUCCESS) { 3125 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3126 "==> nxge_txdma_fatal_err_recover (channel %d)" 3127 " reset channel failed 0x%x", channel, rs)); 3128 goto fail; 3129 } 3130 3131 /* 3132 * Reset the tail (kick) register to 0. 3133 * (Hardware will not reset it. Tx overflow fatal 3134 * error if tail is not set to 0 after reset! 3135 */ 3136 3137 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3138 3139 } 3140 3141 /* 3142 * Initialize the TXDMA channel specific FZC control 3143 * configurations. These FZC registers are pertaining 3144 * to each TX channel (i.e. logical pages). 3145 */ 3146 3147 /* Restart TXDMA channels */ 3148 3149 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-start all DMA channels...")); 3150 3151 for (i = 0; i < ndmas; i++) { 3152 if (tx_desc_rings[i] == NULL) { 3153 continue; 3154 } 3155 channel = tx_desc_rings[i]->tdc; 3156 tx_ring_p = tx_rings->rings[i]; 3157 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3158 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3159 tx_ring_p, tx_mbox_p); 3160 tx_ring_p->tx_evmask.value = 0; 3161 /* 3162 * Initialize the event masks. 3163 */ 3164 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3165 &tx_ring_p->tx_evmask); 3166 3167 tx_ring_p->wr_index_wrap = B_FALSE; 3168 tx_ring_p->wr_index = 0; 3169 tx_ring_p->rd_index = 0; 3170 3171 if (status != NXGE_OK) 3172 goto fail; 3173 if (status != NXGE_OK) 3174 goto fail; 3175 } 3176 3177 /* 3178 * Load TXDMA descriptors, buffers, mailbox, 3179 * initialise the DMA channels and 3180 * enable each DMA channel. 3181 */ 3182 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-enable all DMA channels...")); 3183 3184 for (i = 0; i < ndmas; i++) { 3185 if (tx_desc_rings[i] == NULL) { 3186 continue; 3187 } 3188 channel = tx_desc_rings[i]->tdc; 3189 tx_ring_p = tx_rings->rings[i]; 3190 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3191 status = nxge_enable_txdma_channel(nxgep, channel, 3192 tx_ring_p, tx_mbox_p); 3193 if (status != NXGE_OK) 3194 goto fail; 3195 } 3196 3197 for (i = 0; i < ndmas; i++) { 3198 if (tx_desc_rings[i] == NULL) { 3199 continue; 3200 } 3201 tx_ring_p = tx_rings->rings[i]; 3202 MUTEX_EXIT(&tx_ring_p->lock); 3203 } 3204 3205 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3206 "Recovery Successful, TxPort Restored")); 3207 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3208 3209 return (NXGE_OK); 3210 3211 fail: 3212 for (i = 0; i < ndmas; i++) { 3213 if (tx_desc_rings[i] == NULL) { 3214 continue; 3215 } 3216 tx_ring_p = tx_rings->rings[i]; 3217 MUTEX_EXIT(&tx_ring_p->lock); 3218 } 3219 3220 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3221 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3222 "nxge_txdma_fatal_err_recover (channel %d): " 3223 "failed to recover this txdma channel")); 3224 3225 return (status); 3226 } 3227 3228 void 3229 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3230 { 3231 tdmc_intr_dbg_t tdi; 3232 tdmc_inj_par_err_t par_err; 3233 uint32_t value; 3234 npi_handle_t handle; 3235 3236 switch (err_id) { 3237 3238 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3239 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3240 /* Clear error injection source for parity error */ 3241 (void) npi_txdma_inj_par_error_get(handle, &value); 3242 par_err.value = value; 3243 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3244 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3245 3246 par_err.bits.ldw.inject_parity_error = (1 << chan); 3247 (void) npi_txdma_inj_par_error_get(handle, &value); 3248 par_err.value = value; 3249 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3250 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3251 (unsigned long long)par_err.value); 3252 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3253 break; 3254 3255 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3256 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3257 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3258 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3259 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3260 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3261 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3262 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3263 chan, &tdi.value); 3264 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3265 tdi.bits.ldw.pref_buf_par_err = 1; 3266 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3267 tdi.bits.ldw.mbox_err = 1; 3268 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3269 tdi.bits.ldw.nack_pref = 1; 3270 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3271 tdi.bits.ldw.nack_pkt_rd = 1; 3272 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3273 tdi.bits.ldw.pkt_size_err = 1; 3274 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3275 tdi.bits.ldw.tx_ring_oflow = 1; 3276 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3277 tdi.bits.ldw.conf_part_err = 1; 3278 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3279 tdi.bits.ldw.pkt_part_err = 1; 3280 #if defined(__i386) 3281 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3282 tdi.value); 3283 #else 3284 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3285 tdi.value); 3286 #endif 3287 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3288 chan, tdi.value); 3289 3290 break; 3291 } 3292 } 3293