1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_txdma.h> 30 #include <sys/llc1.h> 31 32 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 33 uint32_t nxge_tx_minfree = 32; 34 uint32_t nxge_tx_intr_thres = 0; 35 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 36 uint32_t nxge_tx_tiny_pack = 1; 37 uint32_t nxge_tx_use_bcopy = 1; 38 39 extern uint32_t nxge_tx_ring_size; 40 extern uint32_t nxge_bcopy_thresh; 41 extern uint32_t nxge_dvma_thresh; 42 extern uint32_t nxge_dma_stream_thresh; 43 extern dma_method_t nxge_force_dma; 44 45 /* Device register access attributes for PIO. */ 46 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 47 /* Device descriptor access attributes for DMA. */ 48 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 49 /* Device buffer access attributes for DMA. */ 50 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 51 extern ddi_dma_attr_t nxge_desc_dma_attr; 52 extern ddi_dma_attr_t nxge_tx_dma_attr; 53 54 extern int nxge_serial_tx(mblk_t *mp, void *arg); 55 56 static nxge_status_t nxge_map_txdma(p_nxge_t); 57 static void nxge_unmap_txdma(p_nxge_t); 58 59 static nxge_status_t nxge_txdma_hw_start(p_nxge_t); 60 static void nxge_txdma_hw_stop(p_nxge_t); 61 62 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 63 p_nxge_dma_common_t *, p_tx_ring_t *, 64 uint32_t, p_nxge_dma_common_t *, 65 p_tx_mbox_t *); 66 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t, 67 p_tx_ring_t, p_tx_mbox_t); 68 69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 72 73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_tx_ring_t, 75 p_tx_mbox_t *); 76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 77 p_tx_ring_t, p_tx_mbox_t); 78 79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 80 p_tx_ring_t, p_tx_mbox_t); 81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t, 82 p_tx_ring_t, p_tx_mbox_t); 83 84 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 85 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 86 p_nxge_ldv_t, tx_cs_t); 87 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 88 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 89 uint16_t, p_tx_ring_t); 90 91 nxge_status_t 92 nxge_init_txdma_channels(p_nxge_t nxgep) 93 { 94 nxge_status_t status = NXGE_OK; 95 96 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_init_txdma_channels")); 97 98 status = nxge_map_txdma(nxgep); 99 if (status != NXGE_OK) { 100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 101 "<== nxge_init_txdma_channels: status 0x%x", status)); 102 return (status); 103 } 104 105 status = nxge_txdma_hw_start(nxgep); 106 if (status != NXGE_OK) { 107 nxge_unmap_txdma(nxgep); 108 return (status); 109 } 110 111 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 112 "<== nxge_init_txdma_channels: status 0x%x", status)); 113 114 return (NXGE_OK); 115 } 116 117 void 118 nxge_uninit_txdma_channels(p_nxge_t nxgep) 119 { 120 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channels")); 121 122 nxge_txdma_hw_stop(nxgep); 123 nxge_unmap_txdma(nxgep); 124 125 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 126 "<== nxge_uinit_txdma_channels")); 127 } 128 129 void 130 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 131 uint32_t entries, uint32_t size) 132 { 133 size_t tsize; 134 *dest_p = *src_p; 135 tsize = size * entries; 136 dest_p->alength = tsize; 137 dest_p->nblocks = entries; 138 dest_p->block_size = size; 139 dest_p->offset += tsize; 140 141 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 142 src_p->alength -= tsize; 143 src_p->dma_cookie.dmac_laddress += tsize; 144 src_p->dma_cookie.dmac_size -= tsize; 145 } 146 147 nxge_status_t 148 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 149 { 150 npi_status_t rs = NPI_SUCCESS; 151 nxge_status_t status = NXGE_OK; 152 npi_handle_t handle; 153 154 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 155 156 handle = NXGE_DEV_NPI_HANDLE(nxgep); 157 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 158 rs = npi_txdma_channel_reset(handle, channel); 159 } else { 160 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 161 channel); 162 } 163 164 if (rs != NPI_SUCCESS) { 165 status = NXGE_ERROR | rs; 166 } 167 168 /* 169 * Reset the tail (kick) register to 0. 170 * (Hardware will not reset it. Tx overflow fatal 171 * error if tail is not set to 0 after reset! 172 */ 173 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 174 175 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 176 return (status); 177 } 178 179 nxge_status_t 180 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 181 p_tx_dma_ent_msk_t mask_p) 182 { 183 npi_handle_t handle; 184 npi_status_t rs = NPI_SUCCESS; 185 nxge_status_t status = NXGE_OK; 186 187 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 188 "<== nxge_init_txdma_channel_event_mask")); 189 190 handle = NXGE_DEV_NPI_HANDLE(nxgep); 191 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 192 if (rs != NPI_SUCCESS) { 193 status = NXGE_ERROR | rs; 194 } 195 196 return (status); 197 } 198 199 nxge_status_t 200 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 201 uint64_t reg_data) 202 { 203 npi_handle_t handle; 204 npi_status_t rs = NPI_SUCCESS; 205 nxge_status_t status = NXGE_OK; 206 207 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 208 "<== nxge_init_txdma_channel_cntl_stat")); 209 210 handle = NXGE_DEV_NPI_HANDLE(nxgep); 211 rs = npi_txdma_control_status(handle, OP_SET, channel, 212 (p_tx_cs_t)®_data); 213 214 if (rs != NPI_SUCCESS) { 215 status = NXGE_ERROR | rs; 216 } 217 218 return (status); 219 } 220 221 nxge_status_t 222 nxge_enable_txdma_channel(p_nxge_t nxgep, 223 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 224 { 225 npi_handle_t handle; 226 npi_status_t rs = NPI_SUCCESS; 227 nxge_status_t status = NXGE_OK; 228 229 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 230 231 handle = NXGE_DEV_NPI_HANDLE(nxgep); 232 /* 233 * Use configuration data composed at init time. 234 * Write to hardware the transmit ring configurations. 235 */ 236 rs = npi_txdma_ring_config(handle, OP_SET, channel, 237 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 238 239 if (rs != NPI_SUCCESS) { 240 return (NXGE_ERROR | rs); 241 } 242 243 /* Write to hardware the mailbox */ 244 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 245 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 246 247 if (rs != NPI_SUCCESS) { 248 return (NXGE_ERROR | rs); 249 } 250 251 /* Start the DMA engine. */ 252 rs = npi_txdma_channel_init_enable(handle, channel); 253 254 if (rs != NPI_SUCCESS) { 255 return (NXGE_ERROR | rs); 256 } 257 258 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 259 260 return (status); 261 } 262 263 void 264 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 265 boolean_t l4_cksum, int pkt_len, uint8_t npads, 266 p_tx_pkt_hdr_all_t pkthdrp) 267 { 268 p_tx_pkt_header_t hdrp; 269 p_mblk_t nmp; 270 uint64_t tmp; 271 size_t mblk_len; 272 size_t iph_len; 273 size_t hdrs_size; 274 uint8_t hdrs_buf[sizeof (struct ether_header) + 275 64 + sizeof (uint32_t)]; 276 uint8_t *ip_buf; 277 uint16_t eth_type; 278 uint8_t ipproto; 279 boolean_t is_vlan = B_FALSE; 280 size_t eth_hdr_size; 281 282 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 283 284 /* 285 * Caller should zero out the headers first. 286 */ 287 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 288 289 if (fill_len) { 290 NXGE_DEBUG_MSG((NULL, TX_CTL, 291 "==> nxge_fill_tx_hdr: pkt_len %d " 292 "npads %d", pkt_len, npads)); 293 tmp = (uint64_t)pkt_len; 294 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 295 goto fill_tx_header_done; 296 } 297 298 tmp = (uint64_t)npads; 299 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT); 300 301 /* 302 * mp is the original data packet (does not include the 303 * Neptune transmit header). 304 */ 305 nmp = mp; 306 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 307 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 308 "mp $%p b_rptr $%p len %d", 309 mp, nmp->b_rptr, mblk_len)); 310 ip_buf = NULL; 311 bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header)); 312 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 313 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 314 "ether type 0x%x", eth_type, hdrp->value)); 315 316 if (eth_type < ETHERMTU) { 317 tmp = 1ull; 318 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 319 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 320 "value 0x%llx", hdrp->value)); 321 if (*(hdrs_buf + sizeof (struct ether_header)) 322 == LLC_SNAP_SAP) { 323 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 324 sizeof (struct ether_header) + 6))); 325 NXGE_DEBUG_MSG((NULL, TX_CTL, 326 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 327 eth_type)); 328 } else { 329 goto fill_tx_header_done; 330 } 331 } else if (eth_type == VLAN_ETHERTYPE) { 332 tmp = 1ull; 333 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 334 335 eth_type = ntohs(((struct ether_vlan_header *) 336 hdrs_buf)->ether_type); 337 is_vlan = B_TRUE; 338 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 339 "value 0x%llx", hdrp->value)); 340 } 341 342 if (!is_vlan) { 343 eth_hdr_size = sizeof (struct ether_header); 344 } else { 345 eth_hdr_size = sizeof (struct ether_vlan_header); 346 } 347 348 switch (eth_type) { 349 case ETHERTYPE_IP: 350 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 351 ip_buf = nmp->b_rptr + eth_hdr_size; 352 mblk_len -= eth_hdr_size; 353 iph_len = ((*ip_buf) & 0x0f); 354 if (mblk_len > (iph_len + sizeof (uint32_t))) { 355 ip_buf = nmp->b_rptr; 356 ip_buf += eth_hdr_size; 357 } else { 358 ip_buf = NULL; 359 } 360 361 } 362 if (ip_buf == NULL) { 363 hdrs_size = 0; 364 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 365 while ((nmp) && (hdrs_size < 366 sizeof (hdrs_buf))) { 367 mblk_len = (size_t)nmp->b_wptr - 368 (size_t)nmp->b_rptr; 369 if (mblk_len >= 370 (sizeof (hdrs_buf) - hdrs_size)) 371 mblk_len = sizeof (hdrs_buf) - 372 hdrs_size; 373 bcopy(nmp->b_rptr, 374 &hdrs_buf[hdrs_size], mblk_len); 375 hdrs_size += mblk_len; 376 nmp = nmp->b_cont; 377 } 378 ip_buf = hdrs_buf; 379 ip_buf += eth_hdr_size; 380 iph_len = ((*ip_buf) & 0x0f); 381 } 382 383 ipproto = ip_buf[9]; 384 385 tmp = (uint64_t)iph_len; 386 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 387 tmp = (uint64_t)(eth_hdr_size >> 1); 388 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 389 390 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 391 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 392 "tmp 0x%x", 393 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 394 ipproto, tmp)); 395 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 396 "value 0x%llx", hdrp->value)); 397 398 break; 399 400 case ETHERTYPE_IPV6: 401 hdrs_size = 0; 402 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 403 while ((nmp) && (hdrs_size < 404 sizeof (hdrs_buf))) { 405 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 406 if (mblk_len >= 407 (sizeof (hdrs_buf) - hdrs_size)) 408 mblk_len = sizeof (hdrs_buf) - 409 hdrs_size; 410 bcopy(nmp->b_rptr, 411 &hdrs_buf[hdrs_size], mblk_len); 412 hdrs_size += mblk_len; 413 nmp = nmp->b_cont; 414 } 415 ip_buf = hdrs_buf; 416 ip_buf += eth_hdr_size; 417 418 tmp = 1ull; 419 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 420 421 tmp = (eth_hdr_size >> 1); 422 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 423 424 /* byte 6 is the next header protocol */ 425 ipproto = ip_buf[6]; 426 427 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 428 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 429 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 430 ipproto)); 431 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 432 "value 0x%llx", hdrp->value)); 433 434 break; 435 436 default: 437 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 438 goto fill_tx_header_done; 439 } 440 441 switch (ipproto) { 442 case IPPROTO_TCP: 443 NXGE_DEBUG_MSG((NULL, TX_CTL, 444 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 445 if (l4_cksum) { 446 tmp = 1ull; 447 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 448 NXGE_DEBUG_MSG((NULL, TX_CTL, 449 "==> nxge_tx_pkt_hdr_init: TCP CKSUM" 450 "value 0x%llx", hdrp->value)); 451 } 452 453 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 454 "value 0x%llx", hdrp->value)); 455 break; 456 457 case IPPROTO_UDP: 458 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 459 if (l4_cksum) { 460 tmp = 0x2ull; 461 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 462 } 463 NXGE_DEBUG_MSG((NULL, TX_CTL, 464 "==> nxge_tx_pkt_hdr_init: UDP" 465 "value 0x%llx", hdrp->value)); 466 break; 467 468 default: 469 goto fill_tx_header_done; 470 } 471 472 fill_tx_header_done: 473 NXGE_DEBUG_MSG((NULL, TX_CTL, 474 "==> nxge_fill_tx_hdr: pkt_len %d " 475 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 476 477 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 478 } 479 480 /*ARGSUSED*/ 481 p_mblk_t 482 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 483 { 484 p_mblk_t newmp = NULL; 485 486 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 487 NXGE_DEBUG_MSG((NULL, TX_CTL, 488 "<== nxge_tx_pkt_header_reserve: allocb failed")); 489 return (NULL); 490 } 491 492 NXGE_DEBUG_MSG((NULL, TX_CTL, 493 "==> nxge_tx_pkt_header_reserve: get new mp")); 494 DB_TYPE(newmp) = M_DATA; 495 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 496 linkb(newmp, mp); 497 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 498 499 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 500 "b_rptr $%p b_wptr $%p", 501 newmp->b_rptr, newmp->b_wptr)); 502 503 NXGE_DEBUG_MSG((NULL, TX_CTL, 504 "<== nxge_tx_pkt_header_reserve: use new mp")); 505 506 return (newmp); 507 } 508 509 int 510 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 511 { 512 uint_t nmblks; 513 ssize_t len; 514 uint_t pkt_len; 515 p_mblk_t nmp, bmp, tmp; 516 uint8_t *b_wptr; 517 518 NXGE_DEBUG_MSG((NULL, TX_CTL, 519 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 520 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 521 522 nmp = mp; 523 bmp = mp; 524 nmblks = 0; 525 pkt_len = 0; 526 *tot_xfer_len_p = 0; 527 528 while (nmp) { 529 len = MBLKL(nmp); 530 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 531 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 532 len, pkt_len, nmblks, 533 *tot_xfer_len_p)); 534 535 if (len <= 0) { 536 bmp = nmp; 537 nmp = nmp->b_cont; 538 NXGE_DEBUG_MSG((NULL, TX_CTL, 539 "==> nxge_tx_pkt_nmblocks: " 540 "len (0) pkt_len %d nmblks %d", 541 pkt_len, nmblks)); 542 continue; 543 } 544 545 *tot_xfer_len_p += len; 546 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 547 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 548 len, pkt_len, nmblks, 549 *tot_xfer_len_p)); 550 551 if (len < nxge_bcopy_thresh) { 552 NXGE_DEBUG_MSG((NULL, TX_CTL, 553 "==> nxge_tx_pkt_nmblocks: " 554 "len %d (< thresh) pkt_len %d nmblks %d", 555 len, pkt_len, nmblks)); 556 if (pkt_len == 0) 557 nmblks++; 558 pkt_len += len; 559 if (pkt_len >= nxge_bcopy_thresh) { 560 pkt_len = 0; 561 len = 0; 562 nmp = bmp; 563 } 564 } else { 565 NXGE_DEBUG_MSG((NULL, TX_CTL, 566 "==> nxge_tx_pkt_nmblocks: " 567 "len %d (> thresh) pkt_len %d nmblks %d", 568 len, pkt_len, nmblks)); 569 pkt_len = 0; 570 nmblks++; 571 /* 572 * Hardware limits the transfer length to 4K. 573 * If len is more than 4K, we need to break 574 * it up to at most 2 more blocks. 575 */ 576 if (len > TX_MAX_TRANSFER_LENGTH) { 577 uint32_t nsegs; 578 579 NXGE_DEBUG_MSG((NULL, TX_CTL, 580 "==> nxge_tx_pkt_nmblocks: " 581 "len %d pkt_len %d nmblks %d nsegs %d", 582 len, pkt_len, nmblks, nsegs)); 583 nsegs = 1; 584 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 585 ++nsegs; 586 } 587 do { 588 b_wptr = nmp->b_rptr + 589 TX_MAX_TRANSFER_LENGTH; 590 nmp->b_wptr = b_wptr; 591 if ((tmp = dupb(nmp)) == NULL) { 592 return (0); 593 } 594 tmp->b_rptr = b_wptr; 595 tmp->b_wptr = nmp->b_wptr; 596 tmp->b_cont = nmp->b_cont; 597 nmp->b_cont = tmp; 598 nmblks++; 599 if (--nsegs) { 600 nmp = tmp; 601 } 602 } while (nsegs); 603 nmp = tmp; 604 } 605 } 606 607 /* 608 * Hardware limits the transmit gather pointers to 15. 609 */ 610 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 611 TX_MAX_GATHER_POINTERS) { 612 NXGE_DEBUG_MSG((NULL, TX_CTL, 613 "==> nxge_tx_pkt_nmblocks: pull msg - " 614 "len %d pkt_len %d nmblks %d", 615 len, pkt_len, nmblks)); 616 /* Pull all message blocks from b_cont */ 617 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 618 return (0); 619 } 620 freemsg(nmp->b_cont); 621 nmp->b_cont = tmp; 622 pkt_len = 0; 623 } 624 bmp = nmp; 625 nmp = nmp->b_cont; 626 } 627 628 NXGE_DEBUG_MSG((NULL, TX_CTL, 629 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 630 "nmblks %d len %d tot_xfer_len %d", 631 mp->b_rptr, mp->b_wptr, nmblks, 632 MBLKL(mp), *tot_xfer_len_p)); 633 634 return (nmblks); 635 } 636 637 boolean_t 638 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 639 { 640 boolean_t status = B_TRUE; 641 p_nxge_dma_common_t tx_desc_dma_p; 642 nxge_dma_common_t desc_area; 643 p_tx_desc_t tx_desc_ring_vp; 644 p_tx_desc_t tx_desc_p; 645 p_tx_desc_t tx_desc_pp; 646 tx_desc_t r_tx_desc; 647 p_tx_msg_t tx_msg_ring; 648 p_tx_msg_t tx_msg_p; 649 npi_handle_t handle; 650 tx_ring_hdl_t tx_head; 651 uint32_t pkt_len; 652 uint_t tx_rd_index; 653 uint16_t head_index, tail_index; 654 uint8_t tdc; 655 boolean_t head_wrap, tail_wrap; 656 p_nxge_tx_ring_stats_t tdc_stats; 657 int rc; 658 659 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 660 661 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 662 (nmblks != 0)); 663 NXGE_DEBUG_MSG((nxgep, TX_CTL, 664 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 665 tx_ring_p->descs_pending, nxge_reclaim_pending, 666 nmblks)); 667 if (!status) { 668 tx_desc_dma_p = &tx_ring_p->tdc_desc; 669 desc_area = tx_ring_p->tdc_desc; 670 handle = NXGE_DEV_NPI_HANDLE(nxgep); 671 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 672 tx_desc_ring_vp = 673 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 674 tx_rd_index = tx_ring_p->rd_index; 675 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 676 tx_msg_ring = tx_ring_p->tx_msg_ring; 677 tx_msg_p = &tx_msg_ring[tx_rd_index]; 678 tdc = tx_ring_p->tdc; 679 tdc_stats = tx_ring_p->tdc_stats; 680 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 681 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 682 } 683 684 tail_index = tx_ring_p->wr_index; 685 tail_wrap = tx_ring_p->wr_index_wrap; 686 687 NXGE_DEBUG_MSG((nxgep, TX_CTL, 688 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 689 "tail_index %d tail_wrap %d " 690 "tx_desc_p $%p ($%p) ", 691 tdc, tx_rd_index, tail_index, tail_wrap, 692 tx_desc_p, (*(uint64_t *)tx_desc_p))); 693 /* 694 * Read the hardware maintained transmit head 695 * and wrap around bit. 696 */ 697 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 698 head_index = tx_head.bits.ldw.head; 699 head_wrap = tx_head.bits.ldw.wrap; 700 NXGE_DEBUG_MSG((nxgep, TX_CTL, 701 "==> nxge_txdma_reclaim: " 702 "tx_rd_index %d tail %d tail_wrap %d " 703 "head %d wrap %d", 704 tx_rd_index, tail_index, tail_wrap, 705 head_index, head_wrap)); 706 707 if (head_index == tail_index) { 708 if (TXDMA_RING_EMPTY(head_index, head_wrap, 709 tail_index, tail_wrap) && 710 (head_index == tx_rd_index)) { 711 NXGE_DEBUG_MSG((nxgep, TX_CTL, 712 "==> nxge_txdma_reclaim: EMPTY")); 713 return (B_TRUE); 714 } 715 716 NXGE_DEBUG_MSG((nxgep, TX_CTL, 717 "==> nxge_txdma_reclaim: Checking " 718 "if ring full")); 719 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 720 tail_wrap)) { 721 NXGE_DEBUG_MSG((nxgep, TX_CTL, 722 "==> nxge_txdma_reclaim: full")); 723 return (B_FALSE); 724 } 725 } 726 727 NXGE_DEBUG_MSG((nxgep, TX_CTL, 728 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 729 730 tx_desc_pp = &r_tx_desc; 731 while ((tx_rd_index != head_index) && 732 (tx_ring_p->descs_pending != 0)) { 733 734 NXGE_DEBUG_MSG((nxgep, TX_CTL, 735 "==> nxge_txdma_reclaim: Checking if pending")); 736 737 NXGE_DEBUG_MSG((nxgep, TX_CTL, 738 "==> nxge_txdma_reclaim: " 739 "descs_pending %d ", 740 tx_ring_p->descs_pending)); 741 742 NXGE_DEBUG_MSG((nxgep, TX_CTL, 743 "==> nxge_txdma_reclaim: " 744 "(tx_rd_index %d head_index %d " 745 "(tx_desc_p $%p)", 746 tx_rd_index, head_index, 747 tx_desc_p)); 748 749 tx_desc_pp->value = tx_desc_p->value; 750 NXGE_DEBUG_MSG((nxgep, TX_CTL, 751 "==> nxge_txdma_reclaim: " 752 "(tx_rd_index %d head_index %d " 753 "tx_desc_p $%p (desc value 0x%llx) ", 754 tx_rd_index, head_index, 755 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 756 757 NXGE_DEBUG_MSG((nxgep, TX_CTL, 758 "==> nxge_txdma_reclaim: dump desc:")); 759 760 pkt_len = tx_desc_pp->bits.hdw.tr_len; 761 tdc_stats->obytes += pkt_len; 762 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 763 NXGE_DEBUG_MSG((nxgep, TX_CTL, 764 "==> nxge_txdma_reclaim: pkt_len %d " 765 "tdc channel %d opackets %d", 766 pkt_len, 767 tdc, 768 tdc_stats->opackets)); 769 770 if (tx_msg_p->flags.dma_type == USE_DVMA) { 771 NXGE_DEBUG_MSG((nxgep, TX_CTL, 772 "tx_desc_p = $%p " 773 "tx_desc_pp = $%p " 774 "index = %d", 775 tx_desc_p, 776 tx_desc_pp, 777 tx_ring_p->rd_index)); 778 (void) dvma_unload(tx_msg_p->dvma_handle, 779 0, -1); 780 tx_msg_p->dvma_handle = NULL; 781 if (tx_ring_p->dvma_wr_index == 782 tx_ring_p->dvma_wrap_mask) { 783 tx_ring_p->dvma_wr_index = 0; 784 } else { 785 tx_ring_p->dvma_wr_index++; 786 } 787 tx_ring_p->dvma_pending--; 788 } else if (tx_msg_p->flags.dma_type == 789 USE_DMA) { 790 NXGE_DEBUG_MSG((nxgep, TX_CTL, 791 "==> nxge_txdma_reclaim: " 792 "USE DMA")); 793 if (rc = ddi_dma_unbind_handle 794 (tx_msg_p->dma_handle)) { 795 cmn_err(CE_WARN, "!nxge_reclaim: " 796 "ddi_dma_unbind_handle " 797 "failed. status %d", rc); 798 } 799 } 800 NXGE_DEBUG_MSG((nxgep, TX_CTL, 801 "==> nxge_txdma_reclaim: count packets")); 802 /* 803 * count a chained packet only once. 804 */ 805 if (tx_msg_p->tx_message != NULL) { 806 freemsg(tx_msg_p->tx_message); 807 tx_msg_p->tx_message = NULL; 808 } 809 810 tx_msg_p->flags.dma_type = USE_NONE; 811 tx_rd_index = tx_ring_p->rd_index; 812 tx_rd_index = (tx_rd_index + 1) & 813 tx_ring_p->tx_wrap_mask; 814 tx_ring_p->rd_index = tx_rd_index; 815 tx_ring_p->descs_pending--; 816 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 817 tx_msg_p = &tx_msg_ring[tx_rd_index]; 818 } 819 820 status = (nmblks <= (tx_ring_p->tx_ring_size - 821 tx_ring_p->descs_pending - 822 TX_FULL_MARK)); 823 if (status) { 824 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 825 } 826 } else { 827 status = (nmblks <= 828 (tx_ring_p->tx_ring_size - 829 tx_ring_p->descs_pending - 830 TX_FULL_MARK)); 831 } 832 833 NXGE_DEBUG_MSG((nxgep, TX_CTL, 834 "<== nxge_txdma_reclaim status = 0x%08x", status)); 835 836 return (status); 837 } 838 839 uint_t 840 nxge_tx_intr(void *arg1, void *arg2) 841 { 842 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 843 p_nxge_t nxgep = (p_nxge_t)arg2; 844 p_nxge_ldg_t ldgp; 845 uint8_t channel; 846 uint32_t vindex; 847 npi_handle_t handle; 848 tx_cs_t cs; 849 p_tx_ring_t *tx_rings; 850 p_tx_ring_t tx_ring_p; 851 npi_status_t rs = NPI_SUCCESS; 852 uint_t serviced = DDI_INTR_UNCLAIMED; 853 nxge_status_t status = NXGE_OK; 854 855 if (ldvp == NULL) { 856 NXGE_DEBUG_MSG((NULL, INT_CTL, 857 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 858 nxgep, ldvp)); 859 return (DDI_INTR_UNCLAIMED); 860 } 861 862 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 863 nxgep = ldvp->nxgep; 864 } 865 NXGE_DEBUG_MSG((nxgep, INT_CTL, 866 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 867 nxgep, ldvp)); 868 /* 869 * This interrupt handler is for a specific 870 * transmit dma channel. 871 */ 872 handle = NXGE_DEV_NPI_HANDLE(nxgep); 873 /* Get the control and status for this channel. */ 874 channel = ldvp->channel; 875 ldgp = ldvp->ldgp; 876 NXGE_DEBUG_MSG((nxgep, INT_CTL, 877 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 878 "channel %d", 879 nxgep, ldvp, channel)); 880 881 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 882 vindex = ldvp->vdma_index; 883 NXGE_DEBUG_MSG((nxgep, INT_CTL, 884 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 885 channel, vindex, rs)); 886 if (!rs && cs.bits.ldw.mk) { 887 NXGE_DEBUG_MSG((nxgep, INT_CTL, 888 "==> nxge_tx_intr:channel %d ring index %d " 889 "status 0x%08x (mk bit set)", 890 channel, vindex, rs)); 891 tx_rings = nxgep->tx_rings->rings; 892 tx_ring_p = tx_rings[vindex]; 893 NXGE_DEBUG_MSG((nxgep, INT_CTL, 894 "==> nxge_tx_intr:channel %d ring index %d " 895 "status 0x%08x (mk bit set, calling reclaim)", 896 channel, vindex, rs)); 897 898 MUTEX_ENTER(&tx_ring_p->lock); 899 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 900 MUTEX_EXIT(&tx_ring_p->lock); 901 mac_tx_update(nxgep->mach); 902 } 903 904 /* 905 * Process other transmit control and status. 906 * Check the ldv state. 907 */ 908 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 909 /* 910 * Rearm this logical group if this is a single device 911 * group. 912 */ 913 if (ldgp->nldvs == 1) { 914 NXGE_DEBUG_MSG((nxgep, INT_CTL, 915 "==> nxge_tx_intr: rearm")); 916 if (status == NXGE_OK) { 917 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 918 B_TRUE, ldgp->ldg_timer); 919 } 920 } 921 922 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 923 serviced = DDI_INTR_CLAIMED; 924 return (serviced); 925 } 926 927 void 928 nxge_txdma_stop(p_nxge_t nxgep) 929 { 930 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 931 932 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 933 934 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 935 } 936 937 void 938 nxge_txdma_stop_start(p_nxge_t nxgep) 939 { 940 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 941 942 (void) nxge_txdma_stop(nxgep); 943 944 (void) nxge_fixup_txdma_rings(nxgep); 945 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 946 (void) nxge_tx_mac_enable(nxgep); 947 (void) nxge_txdma_hw_kick(nxgep); 948 949 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 950 } 951 952 nxge_status_t 953 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 954 { 955 int i, ndmas; 956 uint16_t channel; 957 p_tx_rings_t tx_rings; 958 p_tx_ring_t *tx_desc_rings; 959 npi_handle_t handle; 960 npi_status_t rs = NPI_SUCCESS; 961 nxge_status_t status = NXGE_OK; 962 963 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 964 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 965 966 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 967 NXGE_DEBUG_MSG((nxgep, TX_CTL, 968 "<== nxge_txdma_mode: not initialized")); 969 return (NXGE_ERROR); 970 } 971 972 tx_rings = nxgep->tx_rings; 973 if (tx_rings == NULL) { 974 NXGE_DEBUG_MSG((nxgep, TX_CTL, 975 "<== nxge_txdma_hw_mode: NULL global ring pointer")); 976 return (NXGE_ERROR); 977 } 978 979 tx_desc_rings = tx_rings->rings; 980 if (tx_desc_rings == NULL) { 981 NXGE_DEBUG_MSG((nxgep, TX_CTL, 982 "<== nxge_txdma_hw_mode: NULL rings pointer")); 983 return (NXGE_ERROR); 984 } 985 986 ndmas = tx_rings->ndmas; 987 if (!ndmas) { 988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 989 "<== nxge_txdma_hw_mode: no dma channel allocated")); 990 return (NXGE_ERROR); 991 } 992 993 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_mode: " 994 "tx_rings $%p tx_desc_rings $%p ndmas %d", 995 tx_rings, tx_desc_rings, ndmas)); 996 997 handle = NXGE_DEV_NPI_HANDLE(nxgep); 998 for (i = 0; i < ndmas; i++) { 999 if (tx_desc_rings[i] == NULL) { 1000 continue; 1001 } 1002 channel = tx_desc_rings[i]->tdc; 1003 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1004 "==> nxge_txdma_hw_mode: channel %d", channel)); 1005 if (enable) { 1006 rs = npi_txdma_channel_enable(handle, channel); 1007 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1008 "==> nxge_txdma_hw_mode: channel %d (enable) " 1009 "rs 0x%x", channel, rs)); 1010 } else { 1011 /* 1012 * Stop the dma channel and waits for the stop done. 1013 * If the stop done bit is not set, then force 1014 * an error so TXC will stop. 1015 * All channels bound to this port need to be stopped 1016 * and reset after injecting an interrupt error. 1017 */ 1018 rs = npi_txdma_channel_disable(handle, channel); 1019 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1020 "==> nxge_txdma_hw_mode: channel %d (disable) " 1021 "rs 0x%x", channel, rs)); 1022 { 1023 tdmc_intr_dbg_t intr_dbg; 1024 1025 if (rs != NPI_SUCCESS) { 1026 /* Inject any error */ 1027 intr_dbg.value = 0; 1028 intr_dbg.bits.ldw.nack_pref = 1; 1029 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1030 "==> nxge_txdma_hw_mode: " 1031 "channel %d (stop failed 0x%x) " 1032 "(inject err)", rs, channel)); 1033 (void) npi_txdma_inj_int_error_set( 1034 handle, channel, &intr_dbg); 1035 rs = npi_txdma_channel_disable(handle, 1036 channel); 1037 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1038 "==> nxge_txdma_hw_mode: " 1039 "channel %d (stop again 0x%x) " 1040 "(after inject err)", 1041 rs, channel)); 1042 } 1043 } 1044 } 1045 } 1046 1047 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1048 1049 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1050 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1051 1052 return (status); 1053 } 1054 1055 void 1056 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1057 { 1058 npi_handle_t handle; 1059 1060 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1061 "==> nxge_txdma_enable_channel: channel %d", channel)); 1062 1063 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1064 /* enable the transmit dma channels */ 1065 (void) npi_txdma_channel_enable(handle, channel); 1066 1067 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1068 } 1069 1070 void 1071 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1072 { 1073 npi_handle_t handle; 1074 1075 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1076 "==> nxge_txdma_disable_channel: channel %d", channel)); 1077 1078 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1079 /* stop the transmit dma channels */ 1080 (void) npi_txdma_channel_disable(handle, channel); 1081 1082 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1083 } 1084 1085 int 1086 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1087 { 1088 npi_handle_t handle; 1089 tdmc_intr_dbg_t intr_dbg; 1090 int status; 1091 npi_status_t rs = NPI_SUCCESS; 1092 1093 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1094 /* 1095 * Stop the dma channel waits for the stop done. 1096 * If the stop done bit is not set, then create 1097 * an error. 1098 */ 1099 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1100 rs = npi_txdma_channel_disable(handle, channel); 1101 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1102 if (status == NXGE_OK) { 1103 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1104 "<== nxge_txdma_stop_inj_err (channel %d): " 1105 "stopped OK", channel)); 1106 return (status); 1107 } 1108 1109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1110 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1111 "injecting error", channel, rs)); 1112 /* Inject any error */ 1113 intr_dbg.value = 0; 1114 intr_dbg.bits.ldw.nack_pref = 1; 1115 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1116 1117 /* Stop done bit will be set as a result of error injection */ 1118 rs = npi_txdma_channel_disable(handle, channel); 1119 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1120 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1121 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1122 "<== nxge_txdma_stop_inj_err (channel %d): " 1123 "stopped OK ", channel)); 1124 return (status); 1125 } 1126 1127 #if defined(NXGE_DEBUG) 1128 nxge_txdma_regs_dump_channels(nxgep); 1129 #endif 1130 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1131 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1132 " (injected error but still not stopped)", channel, rs)); 1133 1134 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1135 return (status); 1136 } 1137 1138 void 1139 nxge_hw_start_tx(p_nxge_t nxgep) 1140 { 1141 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_tx")); 1142 1143 (void) nxge_txdma_hw_start(nxgep); 1144 (void) nxge_tx_mac_enable(nxgep); 1145 1146 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_tx")); 1147 } 1148 1149 /*ARGSUSED*/ 1150 void 1151 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1152 { 1153 int index, ndmas; 1154 uint16_t channel; 1155 p_tx_rings_t tx_rings; 1156 1157 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1158 1159 /* 1160 * For each transmit channel, reclaim each descriptor and 1161 * free buffers. 1162 */ 1163 tx_rings = nxgep->tx_rings; 1164 if (tx_rings == NULL) { 1165 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1166 "<== nxge_fixup_txdma_rings: NULL ring pointer")); 1167 return; 1168 } 1169 1170 ndmas = tx_rings->ndmas; 1171 if (!ndmas) { 1172 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1173 "<== nxge_fixup_txdma_rings: no channel allocated")); 1174 return; 1175 } 1176 1177 if (tx_rings->rings == NULL) { 1178 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1179 "<== nxge_fixup_txdma_rings: NULL rings pointer")); 1180 return; 1181 } 1182 1183 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_fixup_txdma_rings: " 1184 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1185 tx_rings, tx_rings->rings, ndmas)); 1186 1187 for (index = 0; index < ndmas; index++) { 1188 channel = tx_rings->rings[index]->tdc; 1189 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1190 "==> nxge_fixup_txdma_rings: channel %d", channel)); 1191 1192 nxge_txdma_fixup_channel(nxgep, tx_rings->rings[index], 1193 channel); 1194 } 1195 1196 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1197 } 1198 1199 /*ARGSUSED*/ 1200 void 1201 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1202 { 1203 p_tx_ring_t ring_p; 1204 1205 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1206 ring_p = nxge_txdma_get_ring(nxgep, channel); 1207 if (ring_p == NULL) { 1208 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1209 return; 1210 } 1211 1212 if (ring_p->tdc != channel) { 1213 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1214 "<== nxge_txdma_fix_channel: channel not matched " 1215 "ring tdc %d passed channel", 1216 ring_p->tdc, channel)); 1217 return; 1218 } 1219 1220 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1221 1222 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1223 } 1224 1225 /*ARGSUSED*/ 1226 void 1227 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1228 { 1229 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1230 1231 if (ring_p == NULL) { 1232 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1233 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1234 return; 1235 } 1236 1237 if (ring_p->tdc != channel) { 1238 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1239 "<== nxge_txdma_fixup_channel: channel not matched " 1240 "ring tdc %d passed channel", 1241 ring_p->tdc, channel)); 1242 return; 1243 } 1244 1245 MUTEX_ENTER(&ring_p->lock); 1246 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1247 ring_p->rd_index = 0; 1248 ring_p->wr_index = 0; 1249 ring_p->ring_head.value = 0; 1250 ring_p->ring_kick_tail.value = 0; 1251 ring_p->descs_pending = 0; 1252 MUTEX_EXIT(&ring_p->lock); 1253 1254 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1255 } 1256 1257 /*ARGSUSED*/ 1258 void 1259 nxge_txdma_hw_kick(p_nxge_t nxgep) 1260 { 1261 int index, ndmas; 1262 uint16_t channel; 1263 p_tx_rings_t tx_rings; 1264 1265 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1266 1267 tx_rings = nxgep->tx_rings; 1268 if (tx_rings == NULL) { 1269 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1270 "<== nxge_txdma_hw_kick: NULL ring pointer")); 1271 return; 1272 } 1273 1274 ndmas = tx_rings->ndmas; 1275 if (!ndmas) { 1276 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1277 "<== nxge_txdma_hw_kick: no channel allocated")); 1278 return; 1279 } 1280 1281 if (tx_rings->rings == NULL) { 1282 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1283 "<== nxge_txdma_hw_kick: NULL rings pointer")); 1284 return; 1285 } 1286 1287 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_kick: " 1288 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1289 tx_rings, tx_rings->rings, ndmas)); 1290 1291 for (index = 0; index < ndmas; index++) { 1292 channel = tx_rings->rings[index]->tdc; 1293 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1294 "==> nxge_txdma_hw_kick: channel %d", channel)); 1295 nxge_txdma_hw_kick_channel(nxgep, tx_rings->rings[index], 1296 channel); 1297 } 1298 1299 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1300 } 1301 1302 /*ARGSUSED*/ 1303 void 1304 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1305 { 1306 p_tx_ring_t ring_p; 1307 1308 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1309 1310 ring_p = nxge_txdma_get_ring(nxgep, channel); 1311 if (ring_p == NULL) { 1312 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1313 " nxge_txdma_kick_channel")); 1314 return; 1315 } 1316 1317 if (ring_p->tdc != channel) { 1318 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1319 "<== nxge_txdma_kick_channel: channel not matched " 1320 "ring tdc %d passed channel", 1321 ring_p->tdc, channel)); 1322 return; 1323 } 1324 1325 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1326 1327 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1328 } 1329 1330 /*ARGSUSED*/ 1331 void 1332 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1333 { 1334 1335 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1336 1337 if (ring_p == NULL) { 1338 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1339 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1340 return; 1341 } 1342 1343 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1344 } 1345 1346 /*ARGSUSED*/ 1347 void 1348 nxge_check_tx_hang(p_nxge_t nxgep) 1349 { 1350 1351 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1352 1353 /* 1354 * Needs inputs from hardware for regs: 1355 * head index had not moved since last timeout. 1356 * packets not transmitted or stuffed registers. 1357 */ 1358 if (nxge_txdma_hung(nxgep)) { 1359 nxge_fixup_hung_txdma_rings(nxgep); 1360 } 1361 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1362 } 1363 1364 int 1365 nxge_txdma_hung(p_nxge_t nxgep) 1366 { 1367 int index, ndmas; 1368 uint16_t channel; 1369 p_tx_rings_t tx_rings; 1370 p_tx_ring_t tx_ring_p; 1371 1372 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1373 tx_rings = nxgep->tx_rings; 1374 if (tx_rings == NULL) { 1375 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1376 "<== nxge_txdma_hung: NULL ring pointer")); 1377 return (B_FALSE); 1378 } 1379 1380 ndmas = tx_rings->ndmas; 1381 if (!ndmas) { 1382 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1383 "<== nxge_txdma_hung: no channel " 1384 "allocated")); 1385 return (B_FALSE); 1386 } 1387 1388 if (tx_rings->rings == NULL) { 1389 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1390 "<== nxge_txdma_hung: NULL rings pointer")); 1391 return (B_FALSE); 1392 } 1393 1394 for (index = 0; index < ndmas; index++) { 1395 channel = tx_rings->rings[index]->tdc; 1396 tx_ring_p = tx_rings->rings[index]; 1397 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1398 "==> nxge_txdma_hung: channel %d", channel)); 1399 if (nxge_txdma_channel_hung(nxgep, tx_ring_p, channel)) { 1400 return (B_TRUE); 1401 } 1402 } 1403 1404 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1405 1406 return (B_FALSE); 1407 } 1408 1409 int 1410 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1411 { 1412 uint16_t head_index, tail_index; 1413 boolean_t head_wrap, tail_wrap; 1414 npi_handle_t handle; 1415 tx_ring_hdl_t tx_head; 1416 uint_t tx_rd_index; 1417 1418 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1419 1420 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1421 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1422 "==> nxge_txdma_channel_hung: channel %d", channel)); 1423 MUTEX_ENTER(&tx_ring_p->lock); 1424 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1425 1426 tail_index = tx_ring_p->wr_index; 1427 tail_wrap = tx_ring_p->wr_index_wrap; 1428 tx_rd_index = tx_ring_p->rd_index; 1429 MUTEX_EXIT(&tx_ring_p->lock); 1430 1431 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1432 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1433 "tail_index %d tail_wrap %d ", 1434 channel, tx_rd_index, tail_index, tail_wrap)); 1435 /* 1436 * Read the hardware maintained transmit head 1437 * and wrap around bit. 1438 */ 1439 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1440 head_index = tx_head.bits.ldw.head; 1441 head_wrap = tx_head.bits.ldw.wrap; 1442 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1443 "==> nxge_txdma_channel_hung: " 1444 "tx_rd_index %d tail %d tail_wrap %d " 1445 "head %d wrap %d", 1446 tx_rd_index, tail_index, tail_wrap, 1447 head_index, head_wrap)); 1448 1449 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1450 tail_index, tail_wrap) && 1451 (head_index == tx_rd_index)) { 1452 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1453 "==> nxge_txdma_channel_hung: EMPTY")); 1454 return (B_FALSE); 1455 } 1456 1457 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1458 "==> nxge_txdma_channel_hung: Checking if ring full")); 1459 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1460 tail_wrap)) { 1461 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1462 "==> nxge_txdma_channel_hung: full")); 1463 return (B_TRUE); 1464 } 1465 1466 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1467 1468 return (B_FALSE); 1469 } 1470 1471 /*ARGSUSED*/ 1472 void 1473 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1474 { 1475 int index, ndmas; 1476 uint16_t channel; 1477 p_tx_rings_t tx_rings; 1478 1479 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1480 tx_rings = nxgep->tx_rings; 1481 if (tx_rings == NULL) { 1482 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1483 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer")); 1484 return; 1485 } 1486 1487 ndmas = tx_rings->ndmas; 1488 if (!ndmas) { 1489 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1490 "<== nxge_fixup_hung_txdma_rings: no channel " 1491 "allocated")); 1492 return; 1493 } 1494 1495 if (tx_rings->rings == NULL) { 1496 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1497 "<== nxge_fixup_hung_txdma_rings: NULL rings pointer")); 1498 return; 1499 } 1500 1501 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings: " 1502 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1503 tx_rings, tx_rings->rings, ndmas)); 1504 1505 for (index = 0; index < ndmas; index++) { 1506 channel = tx_rings->rings[index]->tdc; 1507 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1508 "==> nxge_fixup_hung_txdma_rings: channel %d", 1509 channel)); 1510 1511 nxge_txdma_fixup_hung_channel(nxgep, tx_rings->rings[index], 1512 channel); 1513 } 1514 1515 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1516 } 1517 1518 /*ARGSUSED*/ 1519 void 1520 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1521 { 1522 p_tx_ring_t ring_p; 1523 1524 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1525 ring_p = nxge_txdma_get_ring(nxgep, channel); 1526 if (ring_p == NULL) { 1527 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1528 "<== nxge_txdma_fix_hung_channel")); 1529 return; 1530 } 1531 1532 if (ring_p->tdc != channel) { 1533 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1534 "<== nxge_txdma_fix_hung_channel: channel not matched " 1535 "ring tdc %d passed channel", 1536 ring_p->tdc, channel)); 1537 return; 1538 } 1539 1540 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1541 1542 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1543 } 1544 1545 /*ARGSUSED*/ 1546 void 1547 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1548 uint16_t channel) 1549 { 1550 npi_handle_t handle; 1551 tdmc_intr_dbg_t intr_dbg; 1552 int status = NXGE_OK; 1553 1554 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1555 1556 if (ring_p == NULL) { 1557 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1558 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1559 return; 1560 } 1561 1562 if (ring_p->tdc != channel) { 1563 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1564 "<== nxge_txdma_fixup_hung_channel: channel " 1565 "not matched " 1566 "ring tdc %d passed channel", 1567 ring_p->tdc, channel)); 1568 return; 1569 } 1570 1571 /* Reclaim descriptors */ 1572 MUTEX_ENTER(&ring_p->lock); 1573 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1574 MUTEX_EXIT(&ring_p->lock); 1575 1576 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1577 /* 1578 * Stop the dma channel waits for the stop done. 1579 * If the stop done bit is not set, then force 1580 * an error. 1581 */ 1582 status = npi_txdma_channel_disable(handle, channel); 1583 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1584 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1585 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1586 "ring tdc %d passed channel %d", 1587 ring_p->tdc, channel)); 1588 return; 1589 } 1590 1591 /* Inject any error */ 1592 intr_dbg.value = 0; 1593 intr_dbg.bits.ldw.nack_pref = 1; 1594 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1595 1596 /* Stop done bit will be set as a result of error injection */ 1597 status = npi_txdma_channel_disable(handle, channel); 1598 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1599 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1600 "<== nxge_txdma_fixup_hung_channel: stopped again" 1601 "ring tdc %d passed channel", 1602 ring_p->tdc, channel)); 1603 return; 1604 } 1605 1606 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1607 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1608 "ring tdc %d passed channel", 1609 ring_p->tdc, channel)); 1610 1611 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 1612 } 1613 1614 /*ARGSUSED*/ 1615 void 1616 nxge_reclaim_rings(p_nxge_t nxgep) 1617 { 1618 int index, ndmas; 1619 uint16_t channel; 1620 p_tx_rings_t tx_rings; 1621 p_tx_ring_t tx_ring_p; 1622 1623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_ring")); 1624 tx_rings = nxgep->tx_rings; 1625 if (tx_rings == NULL) { 1626 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1627 "<== nxge_reclain_rimgs: NULL ring pointer")); 1628 return; 1629 } 1630 1631 ndmas = tx_rings->ndmas; 1632 if (!ndmas) { 1633 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1634 "<== nxge_reclain_rimgs: no channel " 1635 "allocated")); 1636 return; 1637 } 1638 1639 if (tx_rings->rings == NULL) { 1640 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1641 "<== nxge_reclain_rimgs: NULL rings pointer")); 1642 return; 1643 } 1644 1645 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclain_rimgs: " 1646 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1647 tx_rings, tx_rings->rings, ndmas)); 1648 1649 for (index = 0; index < ndmas; index++) { 1650 channel = tx_rings->rings[index]->tdc; 1651 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1652 "==> reclain_rimgs: channel %d", 1653 channel)); 1654 tx_ring_p = tx_rings->rings[index]; 1655 MUTEX_ENTER(&tx_ring_p->lock); 1656 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, channel); 1657 MUTEX_EXIT(&tx_ring_p->lock); 1658 } 1659 1660 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 1661 } 1662 1663 void 1664 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 1665 { 1666 int index, ndmas; 1667 uint16_t channel; 1668 p_tx_rings_t tx_rings; 1669 npi_handle_t handle; 1670 1671 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_txdma_regs_dump_channels")); 1672 1673 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1674 (void) npi_txdma_dump_fzc_regs(handle); 1675 1676 tx_rings = nxgep->tx_rings; 1677 if (tx_rings == NULL) { 1678 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1679 "<== nxge_txdma_regs_dump_channels: NULL ring")); 1680 return; 1681 } 1682 1683 ndmas = tx_rings->ndmas; 1684 if (!ndmas) { 1685 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1686 "<== nxge_txdma_regs_dump_channels: " 1687 "no channel allocated")); 1688 return; 1689 } 1690 1691 if (tx_rings->rings == NULL) { 1692 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1693 "<== nxge_txdma_regs_dump_channels: NULL rings")); 1694 return; 1695 } 1696 1697 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_regs_dump_channels: " 1698 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1699 tx_rings, tx_rings->rings, ndmas)); 1700 1701 for (index = 0; index < ndmas; index++) { 1702 channel = tx_rings->rings[index]->tdc; 1703 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1704 "==> nxge_txdma_regs_dump_channels: channel %d", 1705 channel)); 1706 (void) npi_txdma_dump_tdc_regs(handle, channel); 1707 } 1708 1709 /* Dump TXC registers */ 1710 (void) npi_txc_dump_fzc_regs(handle); 1711 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 1712 1713 for (index = 0; index < ndmas; index++) { 1714 channel = tx_rings->rings[index]->tdc; 1715 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1716 "==> nxge_txdma_regs_dump_channels: channel %d", 1717 channel)); 1718 (void) npi_txc_dump_tdc_fzc_regs(handle, channel); 1719 } 1720 1721 for (index = 0; index < ndmas; index++) { 1722 channel = tx_rings->rings[index]->tdc; 1723 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1724 "==> nxge_txdma_regs_dump_channels: channel %d", 1725 channel)); 1726 nxge_txdma_regs_dump(nxgep, channel); 1727 } 1728 1729 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 1730 1731 } 1732 1733 void 1734 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 1735 { 1736 npi_handle_t handle; 1737 tx_ring_hdl_t hdl; 1738 tx_ring_kick_t kick; 1739 tx_cs_t cs; 1740 txc_control_t control; 1741 uint32_t bitmap = 0; 1742 uint32_t burst = 0; 1743 uint32_t bytes = 0; 1744 dma_log_page_t cfg; 1745 1746 printf("\n\tfunc # %d tdc %d ", 1747 nxgep->function_num, channel); 1748 cfg.page_num = 0; 1749 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1750 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1751 printf("\n\tlog page func %d valid page 0 %d", 1752 cfg.func_num, cfg.valid); 1753 cfg.page_num = 1; 1754 (void) npi_txdma_log_page_get(handle, channel, &cfg); 1755 printf("\n\tlog page func %d valid page 1 %d", 1756 cfg.func_num, cfg.valid); 1757 1758 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 1759 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 1760 printf("\n\thead value is 0x%0llx", 1761 (long long)hdl.value); 1762 printf("\n\thead index %d", hdl.bits.ldw.head); 1763 printf("\n\tkick value is 0x%0llx", 1764 (long long)kick.value); 1765 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 1766 1767 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 1768 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 1769 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 1770 1771 (void) npi_txc_control(handle, OP_GET, &control); 1772 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 1773 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 1774 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 1775 1776 printf("\n\tTXC port control 0x%0llx", 1777 (long long)control.value); 1778 printf("\n\tTXC port bitmap 0x%x", bitmap); 1779 printf("\n\tTXC max burst %d", burst); 1780 printf("\n\tTXC bytes xmt %d\n", bytes); 1781 1782 { 1783 ipp_status_t status; 1784 1785 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 1786 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 1787 } 1788 } 1789 1790 /* 1791 * Static functions start here. 1792 */ 1793 static nxge_status_t 1794 nxge_map_txdma(p_nxge_t nxgep) 1795 { 1796 int i, ndmas; 1797 uint16_t channel; 1798 p_tx_rings_t tx_rings; 1799 p_tx_ring_t *tx_desc_rings; 1800 p_tx_mbox_areas_t tx_mbox_areas_p; 1801 p_tx_mbox_t *tx_mbox_p; 1802 p_nxge_dma_pool_t dma_buf_poolp; 1803 p_nxge_dma_pool_t dma_cntl_poolp; 1804 p_nxge_dma_common_t *dma_buf_p; 1805 p_nxge_dma_common_t *dma_cntl_p; 1806 nxge_status_t status = NXGE_OK; 1807 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1808 p_nxge_dma_common_t t_dma_buf_p; 1809 p_nxge_dma_common_t t_dma_cntl_p; 1810 #endif 1811 1812 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 1813 1814 dma_buf_poolp = nxgep->tx_buf_pool_p; 1815 dma_cntl_poolp = nxgep->tx_cntl_pool_p; 1816 1817 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1818 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1819 "==> nxge_map_txdma: buf not allocated")); 1820 return (NXGE_ERROR); 1821 } 1822 1823 ndmas = dma_buf_poolp->ndmas; 1824 if (!ndmas) { 1825 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1826 "<== nxge_map_txdma: no dma allocated")); 1827 return (NXGE_ERROR); 1828 } 1829 1830 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 1831 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1832 1833 tx_rings = (p_tx_rings_t) 1834 KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 1835 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC( 1836 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP); 1837 1838 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1839 "tx_rings $%p tx_desc_rings $%p", 1840 tx_rings, tx_desc_rings)); 1841 1842 tx_mbox_areas_p = (p_tx_mbox_areas_t) 1843 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 1844 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC( 1845 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP); 1846 1847 /* 1848 * Map descriptors from the buffer pools for each dma channel. 1849 */ 1850 for (i = 0; i < ndmas; i++) { 1851 /* 1852 * Set up and prepare buffer blocks, descriptors 1853 * and mailbox. 1854 */ 1855 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1856 status = nxge_map_txdma_channel(nxgep, channel, 1857 (p_nxge_dma_common_t *)&dma_buf_p[i], 1858 (p_tx_ring_t *)&tx_desc_rings[i], 1859 dma_buf_poolp->num_chunks[i], 1860 (p_nxge_dma_common_t *)&dma_cntl_p[i], 1861 (p_tx_mbox_t *)&tx_mbox_p[i]); 1862 if (status != NXGE_OK) { 1863 goto nxge_map_txdma_fail1; 1864 } 1865 tx_desc_rings[i]->index = (uint16_t)i; 1866 tx_desc_rings[i]->tdc_stats = &nxgep->statsp->tdc_stats[i]; 1867 1868 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 1869 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 1870 tx_desc_rings[i]->hv_set = B_FALSE; 1871 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 1872 t_dma_cntl_p = (p_nxge_dma_common_t)dma_cntl_p[i]; 1873 1874 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp = 1875 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 1876 tx_desc_rings[i]->hv_tx_buf_ioaddr_size = 1877 (uint64_t)t_dma_buf_p->orig_alength; 1878 1879 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1880 "==> nxge_map_txdma_channel: " 1881 "hv data buf base io $%p " 1882 "size 0x%llx (%d) " 1883 "buf base io $%p " 1884 "orig vatopa base io $%p " 1885 "orig_len 0x%llx (%d)", 1886 tx_desc_rings[i]->hv_tx_buf_base_ioaddr_pp, 1887 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1888 tx_desc_rings[i]->hv_tx_buf_ioaddr_size, 1889 t_dma_buf_p->ioaddr_pp, 1890 t_dma_buf_p->orig_vatopa, 1891 t_dma_buf_p->orig_alength, 1892 t_dma_buf_p->orig_alength)); 1893 1894 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp = 1895 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 1896 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size = 1897 (uint64_t)t_dma_cntl_p->orig_alength; 1898 1899 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1900 "==> nxge_map_txdma_channel: " 1901 "hv cntl base io $%p " 1902 "orig ioaddr_pp ($%p) " 1903 "orig vatopa ($%p) " 1904 "size 0x%llx (%d 0x%x)", 1905 tx_desc_rings[i]->hv_tx_cntl_base_ioaddr_pp, 1906 t_dma_cntl_p->orig_ioaddr_pp, 1907 t_dma_cntl_p->orig_vatopa, 1908 tx_desc_rings[i]->hv_tx_cntl_ioaddr_size, 1909 t_dma_cntl_p->orig_alength, 1910 t_dma_cntl_p->orig_alength)); 1911 } 1912 #endif 1913 } 1914 1915 tx_rings->ndmas = ndmas; 1916 tx_rings->rings = tx_desc_rings; 1917 nxgep->tx_rings = tx_rings; 1918 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p; 1919 nxgep->tx_mbox_areas_p = tx_mbox_areas_p; 1920 1921 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1922 "tx_rings $%p rings $%p", 1923 nxgep->tx_rings, nxgep->tx_rings->rings)); 1924 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 1925 "tx_rings $%p tx_desc_rings $%p", 1926 nxgep->tx_rings, tx_desc_rings)); 1927 1928 goto nxge_map_txdma_exit; 1929 1930 nxge_map_txdma_fail1: 1931 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1932 "==> nxge_map_txdma: uninit tx desc " 1933 "(status 0x%x channel %d i %d)", 1934 nxgep, status, channel, i)); 1935 i--; 1936 for (; i >= 0; i--) { 1937 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 1938 nxge_unmap_txdma_channel(nxgep, channel, 1939 tx_desc_rings[i], 1940 tx_mbox_p[i]); 1941 } 1942 1943 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1944 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1945 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1946 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1947 1948 nxge_map_txdma_exit: 1949 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1950 "==> nxge_map_txdma: " 1951 "(status 0x%x channel %d)", 1952 status, channel)); 1953 1954 return (status); 1955 } 1956 1957 static void 1958 nxge_unmap_txdma(p_nxge_t nxgep) 1959 { 1960 int i, ndmas; 1961 uint8_t channel; 1962 p_tx_rings_t tx_rings; 1963 p_tx_ring_t *tx_desc_rings; 1964 p_tx_mbox_areas_t tx_mbox_areas_p; 1965 p_tx_mbox_t *tx_mbox_p; 1966 p_nxge_dma_pool_t dma_buf_poolp; 1967 1968 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma")); 1969 1970 dma_buf_poolp = nxgep->tx_buf_pool_p; 1971 if (!dma_buf_poolp->buf_allocated) { 1972 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1973 "==> nxge_unmap_txdma: buf not allocated")); 1974 return; 1975 } 1976 1977 ndmas = dma_buf_poolp->ndmas; 1978 if (!ndmas) { 1979 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1980 "<== nxge_unmap_txdma: no dma allocated")); 1981 return; 1982 } 1983 1984 tx_rings = nxgep->tx_rings; 1985 tx_desc_rings = tx_rings->rings; 1986 if (tx_rings == NULL) { 1987 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1988 "<== nxge_unmap_txdma: NULL ring pointer")); 1989 return; 1990 } 1991 1992 tx_desc_rings = tx_rings->rings; 1993 if (tx_desc_rings == NULL) { 1994 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1995 "<== nxge_unmap_txdma: NULL ring pointers")); 1996 return; 1997 } 1998 1999 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_unmap_txdma: " 2000 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2001 tx_rings, tx_desc_rings, ndmas)); 2002 2003 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2004 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2005 2006 for (i = 0; i < ndmas; i++) { 2007 channel = tx_desc_rings[i]->tdc; 2008 (void) nxge_unmap_txdma_channel(nxgep, channel, 2009 (p_tx_ring_t)tx_desc_rings[i], 2010 (p_tx_mbox_t)tx_mbox_p[i]); 2011 } 2012 2013 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 2014 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 2015 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 2016 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 2017 2018 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2019 "<== nxge_unmap_txdma")); 2020 } 2021 2022 static nxge_status_t 2023 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2024 p_nxge_dma_common_t *dma_buf_p, 2025 p_tx_ring_t *tx_desc_p, 2026 uint32_t num_chunks, 2027 p_nxge_dma_common_t *dma_cntl_p, 2028 p_tx_mbox_t *tx_mbox_p) 2029 { 2030 int status = NXGE_OK; 2031 2032 /* 2033 * Set up and prepare buffer blocks, descriptors 2034 * and mailbox. 2035 */ 2036 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2037 "==> nxge_map_txdma_channel (channel %d)", channel)); 2038 /* 2039 * Transmit buffer blocks 2040 */ 2041 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2042 dma_buf_p, tx_desc_p, num_chunks); 2043 if (status != NXGE_OK) { 2044 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2045 "==> nxge_map_txdma_channel (channel %d): " 2046 "map buffer failed 0x%x", channel, status)); 2047 goto nxge_map_txdma_channel_exit; 2048 } 2049 2050 /* 2051 * Transmit block ring, and mailbox. 2052 */ 2053 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2054 tx_mbox_p); 2055 2056 goto nxge_map_txdma_channel_exit; 2057 2058 nxge_map_txdma_channel_fail1: 2059 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2060 "==> nxge_map_txdma_channel: unmap buf" 2061 "(status 0x%x channel %d)", 2062 status, channel)); 2063 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2064 2065 nxge_map_txdma_channel_exit: 2066 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2067 "<== nxge_map_txdma_channel: " 2068 "(status 0x%x channel %d)", 2069 status, channel)); 2070 2071 return (status); 2072 } 2073 2074 /*ARGSUSED*/ 2075 static void 2076 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2077 p_tx_ring_t tx_ring_p, 2078 p_tx_mbox_t tx_mbox_p) 2079 { 2080 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2081 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2082 /* 2083 * unmap tx block ring, and mailbox. 2084 */ 2085 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, 2086 tx_ring_p, tx_mbox_p); 2087 2088 /* unmap buffer blocks */ 2089 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, tx_ring_p); 2090 2091 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2092 } 2093 2094 /*ARGSUSED*/ 2095 static void 2096 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2097 p_nxge_dma_common_t *dma_cntl_p, 2098 p_tx_ring_t tx_ring_p, 2099 p_tx_mbox_t *tx_mbox_p) 2100 { 2101 p_tx_mbox_t mboxp; 2102 p_nxge_dma_common_t cntl_dmap; 2103 p_nxge_dma_common_t dmap; 2104 p_tx_rng_cfig_t tx_ring_cfig_p; 2105 p_tx_ring_kick_t tx_ring_kick_p; 2106 p_tx_cs_t tx_cs_p; 2107 p_tx_dma_ent_msk_t tx_evmask_p; 2108 p_txdma_mbh_t mboxh_p; 2109 p_txdma_mbl_t mboxl_p; 2110 uint64_t tx_desc_len; 2111 2112 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2113 "==> nxge_map_txdma_channel_cfg_ring")); 2114 2115 cntl_dmap = *dma_cntl_p; 2116 2117 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2118 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2119 sizeof (tx_desc_t)); 2120 /* 2121 * Zero out transmit ring descriptors. 2122 */ 2123 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2124 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2125 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2126 tx_cs_p = &(tx_ring_p->tx_cs); 2127 tx_evmask_p = &(tx_ring_p->tx_evmask); 2128 tx_ring_cfig_p->value = 0; 2129 tx_ring_kick_p->value = 0; 2130 tx_cs_p->value = 0; 2131 tx_evmask_p->value = 0; 2132 2133 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2134 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2135 dma_channel, 2136 dmap->dma_cookie.dmac_laddress)); 2137 2138 tx_ring_cfig_p->value = 0; 2139 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2140 tx_ring_cfig_p->value = 2141 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2142 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2143 2144 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2145 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2146 dma_channel, 2147 tx_ring_cfig_p->value)); 2148 2149 tx_cs_p->bits.ldw.rst = 1; 2150 2151 /* Map in mailbox */ 2152 mboxp = (p_tx_mbox_t) 2153 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2154 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2155 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2156 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2157 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2158 mboxh_p->value = mboxl_p->value = 0; 2159 2160 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2161 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2162 dmap->dma_cookie.dmac_laddress)); 2163 2164 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2165 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2166 2167 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2168 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2169 2170 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2171 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2172 dmap->dma_cookie.dmac_laddress)); 2173 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2174 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2175 "mbox $%p", 2176 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2177 tx_ring_p->page_valid.value = 0; 2178 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2179 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2180 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2181 tx_ring_p->page_hdl.value = 0; 2182 2183 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2184 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2185 2186 tx_ring_p->max_burst.value = 0; 2187 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2188 2189 *tx_mbox_p = mboxp; 2190 2191 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2192 "<== nxge_map_txdma_channel_cfg_ring")); 2193 } 2194 2195 /*ARGSUSED*/ 2196 static void 2197 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2198 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2199 { 2200 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2201 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2202 tx_ring_p->tdc)); 2203 2204 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2205 2206 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2207 "<== nxge_unmap_txdma_channel_cfg_ring")); 2208 } 2209 2210 static nxge_status_t 2211 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2212 p_nxge_dma_common_t *dma_buf_p, 2213 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2214 { 2215 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2216 p_nxge_dma_common_t dmap; 2217 nxge_os_dma_handle_t tx_buf_dma_handle; 2218 p_tx_ring_t tx_ring_p; 2219 p_tx_msg_t tx_msg_ring; 2220 nxge_status_t status = NXGE_OK; 2221 int ddi_status = DDI_SUCCESS; 2222 int i, j, index; 2223 uint32_t size, bsize; 2224 uint32_t nblocks, nmsgs; 2225 2226 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2227 "==> nxge_map_txdma_channel_buf_ring")); 2228 2229 dma_bufp = tmp_bufp = *dma_buf_p; 2230 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2231 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2232 "chunks bufp $%p", 2233 channel, num_chunks, dma_bufp)); 2234 2235 nmsgs = 0; 2236 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2237 nmsgs += tmp_bufp->nblocks; 2238 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2239 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2240 "bufp $%p nblocks %d nmsgs %d", 2241 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2242 } 2243 if (!nmsgs) { 2244 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2245 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2246 "no msg blocks", 2247 channel)); 2248 status = NXGE_ERROR; 2249 goto nxge_map_txdma_channel_buf_ring_exit; 2250 } 2251 2252 tx_ring_p = (p_tx_ring_t) 2253 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2254 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2255 (void *)nxgep->interrupt_cookie); 2256 2257 tx_ring_p->nxgep = nxgep; 2258 tx_ring_p->serial = nxge_serialize_create(nmsgs, 2259 nxge_serial_tx, tx_ring_p); 2260 /* 2261 * Allocate transmit message rings and handles for packets 2262 * not to be copied to premapped buffers. 2263 */ 2264 size = nmsgs * sizeof (tx_msg_t); 2265 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2266 for (i = 0; i < nmsgs; i++) { 2267 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2268 DDI_DMA_DONTWAIT, 0, 2269 &tx_msg_ring[i].dma_handle); 2270 if (ddi_status != DDI_SUCCESS) { 2271 status |= NXGE_DDI_FAILED; 2272 break; 2273 } 2274 } 2275 if (i < nmsgs) { 2276 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "Allocate handles failed.")); 2277 goto nxge_map_txdma_channel_buf_ring_fail1; 2278 } 2279 2280 tx_ring_p->tdc = channel; 2281 tx_ring_p->tx_msg_ring = tx_msg_ring; 2282 tx_ring_p->tx_ring_size = nmsgs; 2283 tx_ring_p->num_chunks = num_chunks; 2284 if (!nxge_tx_intr_thres) { 2285 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2286 } 2287 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2288 tx_ring_p->rd_index = 0; 2289 tx_ring_p->wr_index = 0; 2290 tx_ring_p->ring_head.value = 0; 2291 tx_ring_p->ring_kick_tail.value = 0; 2292 tx_ring_p->descs_pending = 0; 2293 2294 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2295 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2296 "actual tx desc max %d nmsgs %d " 2297 "(config nxge_tx_ring_size %d)", 2298 channel, tx_ring_p->tx_ring_size, nmsgs, 2299 nxge_tx_ring_size)); 2300 2301 /* 2302 * Map in buffers from the buffer pool. 2303 */ 2304 index = 0; 2305 bsize = dma_bufp->block_size; 2306 2307 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2308 "dma_bufp $%p tx_rng_p $%p " 2309 "tx_msg_rng_p $%p bsize %d", 2310 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2311 2312 tx_buf_dma_handle = dma_bufp->dma_handle; 2313 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2314 bsize = dma_bufp->block_size; 2315 nblocks = dma_bufp->nblocks; 2316 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2317 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2318 "size %d dma_bufp $%p", 2319 i, sizeof (nxge_dma_common_t), dma_bufp)); 2320 2321 for (j = 0; j < nblocks; j++) { 2322 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2323 dmap = &tx_msg_ring[index++].buf_dma; 2324 #ifdef TX_MEM_DEBUG 2325 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2326 "==> nxge_map_txdma_channel_buf_ring: j %d" 2327 "dmap $%p", i, dmap)); 2328 #endif 2329 nxge_setup_dma_common(dmap, dma_bufp, 1, 2330 bsize); 2331 } 2332 } 2333 2334 if (i < num_chunks) { 2335 goto nxge_map_txdma_channel_buf_ring_fail1; 2336 } 2337 2338 *tx_desc_p = tx_ring_p; 2339 2340 goto nxge_map_txdma_channel_buf_ring_exit; 2341 2342 nxge_map_txdma_channel_buf_ring_fail1: 2343 if (tx_ring_p->serial) { 2344 nxge_serialize_destroy(tx_ring_p->serial); 2345 tx_ring_p->serial = NULL; 2346 } 2347 2348 index--; 2349 for (; index >= 0; index--) { 2350 if (tx_msg_ring[i].dma_handle != NULL) { 2351 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2352 } 2353 } 2354 MUTEX_DESTROY(&tx_ring_p->lock); 2355 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2356 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2357 2358 nxge_map_txdma_channel_buf_ring_exit: 2359 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2360 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2361 2362 return (status); 2363 } 2364 2365 /*ARGSUSED*/ 2366 static void 2367 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2368 { 2369 p_tx_msg_t tx_msg_ring; 2370 p_tx_msg_t tx_msg_p; 2371 int i; 2372 2373 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2374 "==> nxge_unmap_txdma_channel_buf_ring")); 2375 if (tx_ring_p == NULL) { 2376 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2377 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2378 return; 2379 } 2380 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2381 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2382 tx_ring_p->tdc)); 2383 2384 tx_msg_ring = tx_ring_p->tx_msg_ring; 2385 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2386 tx_msg_p = &tx_msg_ring[i]; 2387 if (tx_msg_p->flags.dma_type == USE_DVMA) { 2388 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2389 "entry = %d", 2390 i)); 2391 (void) dvma_unload(tx_msg_p->dvma_handle, 2392 0, -1); 2393 tx_msg_p->dvma_handle = NULL; 2394 if (tx_ring_p->dvma_wr_index == 2395 tx_ring_p->dvma_wrap_mask) { 2396 tx_ring_p->dvma_wr_index = 0; 2397 } else { 2398 tx_ring_p->dvma_wr_index++; 2399 } 2400 tx_ring_p->dvma_pending--; 2401 } else if (tx_msg_p->flags.dma_type == 2402 USE_DMA) { 2403 if (ddi_dma_unbind_handle 2404 (tx_msg_p->dma_handle)) { 2405 cmn_err(CE_WARN, "!nxge_unmap_tx_bug_ring: " 2406 "ddi_dma_unbind_handle " 2407 "failed."); 2408 } 2409 } 2410 2411 if (tx_msg_p->tx_message != NULL) { 2412 freemsg(tx_msg_p->tx_message); 2413 tx_msg_p->tx_message = NULL; 2414 } 2415 } 2416 2417 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2418 if (tx_msg_ring[i].dma_handle != NULL) { 2419 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2420 } 2421 } 2422 2423 if (tx_ring_p->serial) { 2424 nxge_serialize_destroy(tx_ring_p->serial); 2425 tx_ring_p->serial = NULL; 2426 } 2427 2428 MUTEX_DESTROY(&tx_ring_p->lock); 2429 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2430 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2431 2432 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2433 "<== nxge_unmap_txdma_channel_buf_ring")); 2434 } 2435 2436 static nxge_status_t 2437 nxge_txdma_hw_start(p_nxge_t nxgep) 2438 { 2439 int i, ndmas; 2440 uint16_t channel; 2441 p_tx_rings_t tx_rings; 2442 p_tx_ring_t *tx_desc_rings; 2443 p_tx_mbox_areas_t tx_mbox_areas_p; 2444 p_tx_mbox_t *tx_mbox_p; 2445 nxge_status_t status = NXGE_OK; 2446 2447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2448 2449 tx_rings = nxgep->tx_rings; 2450 if (tx_rings == NULL) { 2451 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2452 "<== nxge_txdma_hw_start: NULL ring pointer")); 2453 return (NXGE_ERROR); 2454 } 2455 tx_desc_rings = tx_rings->rings; 2456 if (tx_desc_rings == NULL) { 2457 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2458 "<== nxge_txdma_hw_start: NULL ring pointers")); 2459 return (NXGE_ERROR); 2460 } 2461 2462 ndmas = tx_rings->ndmas; 2463 if (!ndmas) { 2464 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2465 "<== nxge_txdma_hw_start: no dma channel allocated")); 2466 return (NXGE_ERROR); 2467 } 2468 2469 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2470 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2471 tx_rings, tx_desc_rings, ndmas)); 2472 2473 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2474 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2475 2476 for (i = 0; i < ndmas; i++) { 2477 channel = tx_desc_rings[i]->tdc, 2478 status = nxge_txdma_start_channel(nxgep, channel, 2479 (p_tx_ring_t)tx_desc_rings[i], 2480 (p_tx_mbox_t)tx_mbox_p[i]); 2481 if (status != NXGE_OK) { 2482 goto nxge_txdma_hw_start_fail1; 2483 } 2484 } 2485 2486 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2487 "tx_rings $%p rings $%p", 2488 nxgep->tx_rings, nxgep->tx_rings->rings)); 2489 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2490 "tx_rings $%p tx_desc_rings $%p", 2491 nxgep->tx_rings, tx_desc_rings)); 2492 2493 goto nxge_txdma_hw_start_exit; 2494 2495 nxge_txdma_hw_start_fail1: 2496 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2497 "==> nxge_txdma_hw_start: disable " 2498 "(status 0x%x channel %d i %d)", status, channel, i)); 2499 for (; i >= 0; i--) { 2500 channel = tx_desc_rings[i]->tdc, 2501 (void) nxge_txdma_stop_channel(nxgep, channel, 2502 (p_tx_ring_t)tx_desc_rings[i], 2503 (p_tx_mbox_t)tx_mbox_p[i]); 2504 } 2505 2506 nxge_txdma_hw_start_exit: 2507 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2508 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2509 2510 return (status); 2511 } 2512 2513 static void 2514 nxge_txdma_hw_stop(p_nxge_t nxgep) 2515 { 2516 int i, ndmas; 2517 uint16_t channel; 2518 p_tx_rings_t tx_rings; 2519 p_tx_ring_t *tx_desc_rings; 2520 p_tx_mbox_areas_t tx_mbox_areas_p; 2521 p_tx_mbox_t *tx_mbox_p; 2522 2523 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop")); 2524 2525 tx_rings = nxgep->tx_rings; 2526 if (tx_rings == NULL) { 2527 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2528 "<== nxge_txdma_hw_stop: NULL ring pointer")); 2529 return; 2530 } 2531 tx_desc_rings = tx_rings->rings; 2532 if (tx_desc_rings == NULL) { 2533 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2534 "<== nxge_txdma_hw_stop: NULL ring pointers")); 2535 return; 2536 } 2537 2538 ndmas = tx_rings->ndmas; 2539 if (!ndmas) { 2540 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2541 "<== nxge_txdma_hw_stop: no dma channel allocated")); 2542 return; 2543 } 2544 2545 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2546 "tx_rings $%p tx_desc_rings $%p", 2547 tx_rings, tx_desc_rings)); 2548 2549 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2550 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2551 2552 for (i = 0; i < ndmas; i++) { 2553 channel = tx_desc_rings[i]->tdc; 2554 (void) nxge_txdma_stop_channel(nxgep, channel, 2555 (p_tx_ring_t)tx_desc_rings[i], 2556 (p_tx_mbox_t)tx_mbox_p[i]); 2557 } 2558 2559 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_stop: " 2560 "tx_rings $%p tx_desc_rings $%p", 2561 tx_rings, tx_desc_rings)); 2562 2563 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_hw_stop")); 2564 } 2565 2566 static nxge_status_t 2567 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2568 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2569 2570 { 2571 nxge_status_t status = NXGE_OK; 2572 2573 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2574 "==> nxge_txdma_start_channel (channel %d)", channel)); 2575 /* 2576 * TXDMA/TXC must be in stopped state. 2577 */ 2578 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2579 2580 /* 2581 * Reset TXDMA channel 2582 */ 2583 tx_ring_p->tx_cs.value = 0; 2584 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2585 status = nxge_reset_txdma_channel(nxgep, channel, 2586 tx_ring_p->tx_cs.value); 2587 if (status != NXGE_OK) { 2588 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2589 "==> nxge_txdma_start_channel (channel %d)" 2590 " reset channel failed 0x%x", channel, status)); 2591 goto nxge_txdma_start_channel_exit; 2592 } 2593 2594 /* 2595 * Initialize the TXDMA channel specific FZC control 2596 * configurations. These FZC registers are pertaining 2597 * to each TX channel (i.e. logical pages). 2598 */ 2599 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2600 tx_ring_p, tx_mbox_p); 2601 if (status != NXGE_OK) { 2602 goto nxge_txdma_start_channel_exit; 2603 } 2604 2605 /* 2606 * Initialize the event masks. 2607 */ 2608 tx_ring_p->tx_evmask.value = 0; 2609 status = nxge_init_txdma_channel_event_mask(nxgep, 2610 channel, &tx_ring_p->tx_evmask); 2611 if (status != NXGE_OK) { 2612 goto nxge_txdma_start_channel_exit; 2613 } 2614 2615 /* 2616 * Load TXDMA descriptors, buffers, mailbox, 2617 * initialise the DMA channels and 2618 * enable each DMA channel. 2619 */ 2620 status = nxge_enable_txdma_channel(nxgep, channel, 2621 tx_ring_p, tx_mbox_p); 2622 if (status != NXGE_OK) { 2623 goto nxge_txdma_start_channel_exit; 2624 } 2625 2626 nxge_txdma_start_channel_exit: 2627 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2628 2629 return (status); 2630 } 2631 2632 /*ARGSUSED*/ 2633 static nxge_status_t 2634 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel, 2635 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2636 { 2637 int status = NXGE_OK; 2638 2639 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2640 "==> nxge_txdma_stop_channel: channel %d", channel)); 2641 2642 /* 2643 * Stop (disable) TXDMA and TXC (if stop bit is set 2644 * and STOP_N_GO bit not set, the TXDMA reset state will 2645 * not be set if reset TXDMA. 2646 */ 2647 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2648 2649 /* 2650 * Reset TXDMA channel 2651 */ 2652 tx_ring_p->tx_cs.value = 0; 2653 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2654 status = nxge_reset_txdma_channel(nxgep, channel, 2655 tx_ring_p->tx_cs.value); 2656 if (status != NXGE_OK) { 2657 goto nxge_txdma_stop_channel_exit; 2658 } 2659 2660 #ifdef HARDWARE_REQUIRED 2661 /* Set up the interrupt event masks. */ 2662 tx_ring_p->tx_evmask.value = 0; 2663 status = nxge_init_txdma_channel_event_mask(nxgep, 2664 channel, &tx_ring_p->tx_evmask); 2665 if (status != NXGE_OK) { 2666 goto nxge_txdma_stop_channel_exit; 2667 } 2668 2669 /* Initialize the DMA control and status register */ 2670 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2671 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2672 tx_ring_p->tx_cs.value); 2673 if (status != NXGE_OK) { 2674 goto nxge_txdma_stop_channel_exit; 2675 } 2676 2677 /* Disable channel */ 2678 status = nxge_disable_txdma_channel(nxgep, channel, 2679 tx_ring_p, tx_mbox_p); 2680 if (status != NXGE_OK) { 2681 goto nxge_txdma_start_channel_exit; 2682 } 2683 2684 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2685 "==> nxge_txdma_stop_channel: event done")); 2686 2687 #endif 2688 2689 nxge_txdma_stop_channel_exit: 2690 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2691 return (status); 2692 } 2693 2694 static p_tx_ring_t 2695 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 2696 { 2697 int index, ndmas; 2698 uint16_t tdc; 2699 p_tx_rings_t tx_rings; 2700 2701 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 2702 2703 tx_rings = nxgep->tx_rings; 2704 if (tx_rings == NULL) { 2705 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2706 "<== nxge_txdma_get_ring: NULL ring pointer")); 2707 return (NULL); 2708 } 2709 2710 ndmas = tx_rings->ndmas; 2711 if (!ndmas) { 2712 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2713 "<== nxge_txdma_get_ring: no channel allocated")); 2714 return (NULL); 2715 } 2716 2717 if (tx_rings->rings == NULL) { 2718 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2719 "<== nxge_txdma_get_ring: NULL rings pointer")); 2720 return (NULL); 2721 } 2722 2723 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_ring: " 2724 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2725 tx_rings, tx_rings, ndmas)); 2726 2727 for (index = 0; index < ndmas; index++) { 2728 tdc = tx_rings->rings[index]->tdc; 2729 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2730 "==> nxge_fixup_txdma_rings: channel %d", tdc)); 2731 if (channel == tdc) { 2732 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2733 "<== nxge_txdma_get_ring: tdc %d " 2734 "ring $%p", 2735 tdc, tx_rings->rings[index])); 2736 return (p_tx_ring_t)(tx_rings->rings[index]); 2737 } 2738 } 2739 2740 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring")); 2741 return (NULL); 2742 } 2743 2744 static p_tx_mbox_t 2745 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 2746 { 2747 int index, tdc, ndmas; 2748 p_tx_rings_t tx_rings; 2749 p_tx_mbox_areas_t tx_mbox_areas_p; 2750 p_tx_mbox_t *tx_mbox_p; 2751 2752 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 2753 2754 tx_rings = nxgep->tx_rings; 2755 if (tx_rings == NULL) { 2756 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2757 "<== nxge_txdma_get_mbox: NULL ring pointer")); 2758 return (NULL); 2759 } 2760 2761 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2762 if (tx_mbox_areas_p == NULL) { 2763 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2764 "<== nxge_txdma_get_mbox: NULL mbox pointer")); 2765 return (NULL); 2766 } 2767 2768 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2769 2770 ndmas = tx_rings->ndmas; 2771 if (!ndmas) { 2772 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2773 "<== nxge_txdma_get_mbox: no channel allocated")); 2774 return (NULL); 2775 } 2776 2777 if (tx_rings->rings == NULL) { 2778 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2779 "<== nxge_txdma_get_mbox: NULL rings pointer")); 2780 return (NULL); 2781 } 2782 2783 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_get_mbox: " 2784 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2785 tx_rings, tx_rings, ndmas)); 2786 2787 for (index = 0; index < ndmas; index++) { 2788 tdc = tx_rings->rings[index]->tdc; 2789 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2790 "==> nxge_txdma_get_mbox: channel %d", tdc)); 2791 if (channel == tdc) { 2792 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2793 "<== nxge_txdma_get_mbox: tdc %d " 2794 "ring $%p", 2795 tdc, tx_rings->rings[index])); 2796 return (p_tx_mbox_t)(tx_mbox_p[index]); 2797 } 2798 } 2799 2800 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox")); 2801 return (NULL); 2802 } 2803 2804 /*ARGSUSED*/ 2805 static nxge_status_t 2806 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 2807 { 2808 npi_handle_t handle; 2809 npi_status_t rs; 2810 uint8_t channel; 2811 p_tx_ring_t *tx_rings; 2812 p_tx_ring_t tx_ring_p; 2813 p_nxge_tx_ring_stats_t tdc_stats; 2814 boolean_t txchan_fatal = B_FALSE; 2815 nxge_status_t status = NXGE_OK; 2816 tdmc_inj_par_err_t par_err; 2817 uint32_t value; 2818 2819 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_tx_err_evnts")); 2820 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2821 channel = ldvp->channel; 2822 2823 tx_rings = nxgep->tx_rings->rings; 2824 tx_ring_p = tx_rings[index]; 2825 tdc_stats = tx_ring_p->tdc_stats; 2826 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 2827 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 2828 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 2829 if ((rs = npi_txdma_ring_error_get(handle, channel, 2830 &tdc_stats->errlog)) != NPI_SUCCESS) 2831 return (NXGE_ERROR | rs); 2832 } 2833 2834 if (cs.bits.ldw.mbox_err) { 2835 tdc_stats->mbox_err++; 2836 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2837 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 2838 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2839 "==> nxge_tx_err_evnts(channel %d): " 2840 "fatal error: mailbox", channel)); 2841 txchan_fatal = B_TRUE; 2842 } 2843 if (cs.bits.ldw.pkt_size_err) { 2844 tdc_stats->pkt_size_err++; 2845 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2846 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 2847 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2848 "==> nxge_tx_err_evnts(channel %d): " 2849 "fatal error: pkt_size_err", channel)); 2850 txchan_fatal = B_TRUE; 2851 } 2852 if (cs.bits.ldw.tx_ring_oflow) { 2853 tdc_stats->tx_ring_oflow++; 2854 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2855 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 2856 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2857 "==> nxge_tx_err_evnts(channel %d): " 2858 "fatal error: tx_ring_oflow", channel)); 2859 txchan_fatal = B_TRUE; 2860 } 2861 if (cs.bits.ldw.pref_buf_par_err) { 2862 tdc_stats->pre_buf_par_err++; 2863 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2864 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 2865 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2866 "==> nxge_tx_err_evnts(channel %d): " 2867 "fatal error: pre_buf_par_err", channel)); 2868 /* Clear error injection source for parity error */ 2869 (void) npi_txdma_inj_par_error_get(handle, &value); 2870 par_err.value = value; 2871 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 2872 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 2873 txchan_fatal = B_TRUE; 2874 } 2875 if (cs.bits.ldw.nack_pref) { 2876 tdc_stats->nack_pref++; 2877 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2878 NXGE_FM_EREPORT_TDMC_NACK_PREF); 2879 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2880 "==> nxge_tx_err_evnts(channel %d): " 2881 "fatal error: nack_pref", channel)); 2882 txchan_fatal = B_TRUE; 2883 } 2884 if (cs.bits.ldw.nack_pkt_rd) { 2885 tdc_stats->nack_pkt_rd++; 2886 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2887 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 2888 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2889 "==> nxge_tx_err_evnts(channel %d): " 2890 "fatal error: nack_pkt_rd", channel)); 2891 txchan_fatal = B_TRUE; 2892 } 2893 if (cs.bits.ldw.conf_part_err) { 2894 tdc_stats->conf_part_err++; 2895 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2896 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 2897 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2898 "==> nxge_tx_err_evnts(channel %d): " 2899 "fatal error: config_partition_err", channel)); 2900 txchan_fatal = B_TRUE; 2901 } 2902 if (cs.bits.ldw.pkt_prt_err) { 2903 tdc_stats->pkt_part_err++; 2904 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 2905 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 2906 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2907 "==> nxge_tx_err_evnts(channel %d): " 2908 "fatal error: pkt_prt_err", channel)); 2909 txchan_fatal = B_TRUE; 2910 } 2911 2912 /* Clear error injection source in case this is an injected error */ 2913 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 2914 2915 if (txchan_fatal) { 2916 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2917 " nxge_tx_err_evnts: " 2918 " fatal error on channel %d cs 0x%llx\n", 2919 channel, cs.value)); 2920 status = nxge_txdma_fatal_err_recover(nxgep, channel, 2921 tx_ring_p); 2922 if (status == NXGE_OK) { 2923 FM_SERVICE_RESTORED(nxgep); 2924 } 2925 } 2926 2927 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_tx_err_evnts")); 2928 2929 return (status); 2930 } 2931 2932 static nxge_status_t 2933 nxge_txdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel, 2934 p_tx_ring_t tx_ring_p) 2935 { 2936 npi_handle_t handle; 2937 npi_status_t rs = NPI_SUCCESS; 2938 p_tx_mbox_t tx_mbox_p; 2939 nxge_status_t status = NXGE_OK; 2940 2941 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 2942 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2943 "Recovering from TxDMAChannel#%d error...", channel)); 2944 2945 /* 2946 * Stop the dma channel waits for the stop done. 2947 * If the stop done bit is not set, then create 2948 * an error. 2949 */ 2950 2951 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2952 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 2953 MUTEX_ENTER(&tx_ring_p->lock); 2954 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 2955 if (rs != NPI_SUCCESS) { 2956 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2957 "==> nxge_txdma_fatal_err_recover (channel %d): " 2958 "stop failed ", channel)); 2959 goto fail; 2960 } 2961 2962 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 2963 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2964 2965 /* 2966 * Reset TXDMA channel 2967 */ 2968 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 2969 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 2970 NPI_SUCCESS) { 2971 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2972 "==> nxge_txdma_fatal_err_recover (channel %d)" 2973 " reset channel failed 0x%x", channel, rs)); 2974 goto fail; 2975 } 2976 2977 /* 2978 * Reset the tail (kick) register to 0. 2979 * (Hardware will not reset it. Tx overflow fatal 2980 * error if tail is not set to 0 after reset! 2981 */ 2982 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 2983 2984 /* Restart TXDMA channel */ 2985 2986 /* 2987 * Initialize the TXDMA channel specific FZC control 2988 * configurations. These FZC registers are pertaining 2989 * to each TX channel (i.e. logical pages). 2990 */ 2991 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 2992 2993 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 2994 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2995 tx_ring_p, tx_mbox_p); 2996 if (status != NXGE_OK) 2997 goto fail; 2998 2999 /* 3000 * Initialize the event masks. 3001 */ 3002 tx_ring_p->tx_evmask.value = 0; 3003 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3004 &tx_ring_p->tx_evmask); 3005 if (status != NXGE_OK) 3006 goto fail; 3007 3008 tx_ring_p->wr_index_wrap = B_FALSE; 3009 tx_ring_p->wr_index = 0; 3010 tx_ring_p->rd_index = 0; 3011 3012 /* 3013 * Load TXDMA descriptors, buffers, mailbox, 3014 * initialise the DMA channels and 3015 * enable each DMA channel. 3016 */ 3017 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3018 status = nxge_enable_txdma_channel(nxgep, channel, 3019 tx_ring_p, tx_mbox_p); 3020 MUTEX_EXIT(&tx_ring_p->lock); 3021 if (status != NXGE_OK) 3022 goto fail; 3023 3024 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3025 "Recovery Successful, TxDMAChannel#%d Restored", 3026 channel)); 3027 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3028 3029 return (NXGE_OK); 3030 3031 fail: 3032 MUTEX_EXIT(&tx_ring_p->lock); 3033 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3034 "nxge_txdma_fatal_err_recover (channel %d): " 3035 "failed to recover this txdma channel", channel)); 3036 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3037 3038 return (status); 3039 } 3040 3041 nxge_status_t 3042 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3043 { 3044 npi_handle_t handle; 3045 npi_status_t rs = NPI_SUCCESS; 3046 nxge_status_t status = NXGE_OK; 3047 p_tx_ring_t *tx_desc_rings; 3048 p_tx_rings_t tx_rings; 3049 p_tx_ring_t tx_ring_p; 3050 p_tx_mbox_t tx_mbox_p; 3051 int i, ndmas; 3052 uint16_t channel; 3053 3054 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3055 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3056 "Recovering from TxPort error...")); 3057 3058 /* 3059 * Stop the dma channel waits for the stop done. 3060 * If the stop done bit is not set, then create 3061 * an error. 3062 */ 3063 3064 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3065 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort stop all DMA channels...")); 3066 3067 tx_rings = nxgep->tx_rings; 3068 tx_desc_rings = tx_rings->rings; 3069 ndmas = tx_rings->ndmas; 3070 3071 for (i = 0; i < ndmas; i++) { 3072 if (tx_desc_rings[i] == NULL) { 3073 continue; 3074 } 3075 tx_ring_p = tx_rings->rings[i]; 3076 MUTEX_ENTER(&tx_ring_p->lock); 3077 } 3078 3079 for (i = 0; i < ndmas; i++) { 3080 if (tx_desc_rings[i] == NULL) { 3081 continue; 3082 } 3083 channel = tx_desc_rings[i]->tdc; 3084 tx_ring_p = tx_rings->rings[i]; 3085 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3086 if (rs != NPI_SUCCESS) { 3087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3088 "==> nxge_txdma_fatal_err_recover (channel %d): " 3089 "stop failed ", channel)); 3090 goto fail; 3091 } 3092 } 3093 3094 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reclaim all DMA channels...")); 3095 3096 for (i = 0; i < ndmas; i++) { 3097 if (tx_desc_rings[i] == NULL) { 3098 continue; 3099 } 3100 tx_ring_p = tx_rings->rings[i]; 3101 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3102 } 3103 3104 /* 3105 * Reset TXDMA channel 3106 */ 3107 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort reset all DMA channels...")); 3108 3109 for (i = 0; i < ndmas; i++) { 3110 if (tx_desc_rings[i] == NULL) { 3111 continue; 3112 } 3113 channel = tx_desc_rings[i]->tdc; 3114 tx_ring_p = tx_rings->rings[i]; 3115 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, 3116 channel)) != NPI_SUCCESS) { 3117 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3118 "==> nxge_txdma_fatal_err_recover (channel %d)" 3119 " reset channel failed 0x%x", channel, rs)); 3120 goto fail; 3121 } 3122 3123 /* 3124 * Reset the tail (kick) register to 0. 3125 * (Hardware will not reset it. Tx overflow fatal 3126 * error if tail is not set to 0 after reset! 3127 */ 3128 3129 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3130 3131 } 3132 3133 /* 3134 * Initialize the TXDMA channel specific FZC control 3135 * configurations. These FZC registers are pertaining 3136 * to each TX channel (i.e. logical pages). 3137 */ 3138 3139 /* Restart TXDMA channels */ 3140 3141 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-start all DMA channels...")); 3142 3143 for (i = 0; i < ndmas; i++) { 3144 if (tx_desc_rings[i] == NULL) { 3145 continue; 3146 } 3147 channel = tx_desc_rings[i]->tdc; 3148 tx_ring_p = tx_rings->rings[i]; 3149 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3150 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3151 tx_ring_p, tx_mbox_p); 3152 tx_ring_p->tx_evmask.value = 0; 3153 /* 3154 * Initialize the event masks. 3155 */ 3156 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3157 &tx_ring_p->tx_evmask); 3158 3159 tx_ring_p->wr_index_wrap = B_FALSE; 3160 tx_ring_p->wr_index = 0; 3161 tx_ring_p->rd_index = 0; 3162 3163 if (status != NXGE_OK) 3164 goto fail; 3165 if (status != NXGE_OK) 3166 goto fail; 3167 } 3168 3169 /* 3170 * Load TXDMA descriptors, buffers, mailbox, 3171 * initialise the DMA channels and 3172 * enable each DMA channel. 3173 */ 3174 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxPort re-enable all DMA channels...")); 3175 3176 for (i = 0; i < ndmas; i++) { 3177 if (tx_desc_rings[i] == NULL) { 3178 continue; 3179 } 3180 channel = tx_desc_rings[i]->tdc; 3181 tx_ring_p = tx_rings->rings[i]; 3182 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3183 status = nxge_enable_txdma_channel(nxgep, channel, 3184 tx_ring_p, tx_mbox_p); 3185 if (status != NXGE_OK) 3186 goto fail; 3187 } 3188 3189 for (i = 0; i < ndmas; i++) { 3190 if (tx_desc_rings[i] == NULL) { 3191 continue; 3192 } 3193 tx_ring_p = tx_rings->rings[i]; 3194 MUTEX_EXIT(&tx_ring_p->lock); 3195 } 3196 3197 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3198 "Recovery Successful, TxPort Restored")); 3199 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3200 3201 return (NXGE_OK); 3202 3203 fail: 3204 for (i = 0; i < ndmas; i++) { 3205 if (tx_desc_rings[i] == NULL) { 3206 continue; 3207 } 3208 tx_ring_p = tx_rings->rings[i]; 3209 MUTEX_EXIT(&tx_ring_p->lock); 3210 } 3211 3212 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3213 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3214 "nxge_txdma_fatal_err_recover (channel %d): " 3215 "failed to recover this txdma channel")); 3216 3217 return (status); 3218 } 3219 3220 void 3221 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3222 { 3223 tdmc_intr_dbg_t tdi; 3224 tdmc_inj_par_err_t par_err; 3225 uint32_t value; 3226 npi_handle_t handle; 3227 3228 switch (err_id) { 3229 3230 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3231 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3232 /* Clear error injection source for parity error */ 3233 (void) npi_txdma_inj_par_error_get(handle, &value); 3234 par_err.value = value; 3235 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3236 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3237 3238 par_err.bits.ldw.inject_parity_error = (1 << chan); 3239 (void) npi_txdma_inj_par_error_get(handle, &value); 3240 par_err.value = value; 3241 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3242 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3243 (unsigned long long)par_err.value); 3244 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3245 break; 3246 3247 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3248 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3249 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3250 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3251 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3252 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3253 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3254 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3255 chan, &tdi.value); 3256 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3257 tdi.bits.ldw.pref_buf_par_err = 1; 3258 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3259 tdi.bits.ldw.mbox_err = 1; 3260 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3261 tdi.bits.ldw.nack_pref = 1; 3262 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3263 tdi.bits.ldw.nack_pkt_rd = 1; 3264 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3265 tdi.bits.ldw.pkt_size_err = 1; 3266 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3267 tdi.bits.ldw.tx_ring_oflow = 1; 3268 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3269 tdi.bits.ldw.conf_part_err = 1; 3270 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3271 tdi.bits.ldw.pkt_part_err = 1; 3272 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3273 tdi.value); 3274 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3275 chan, tdi.value); 3276 3277 break; 3278 } 3279 } 3280