1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <hxge_impl.h> 29 #include <hxge_txdma.h> 30 #include <sys/llc1.h> 31 32 uint32_t hxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 33 uint32_t hxge_tx_minfree = 32; 34 uint32_t hxge_tx_intr_thres = 0; 35 uint32_t hxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 36 uint32_t hxge_tx_tiny_pack = 1; 37 uint32_t hxge_tx_use_bcopy = 1; 38 39 extern uint32_t hxge_tx_ring_size; 40 extern uint32_t hxge_bcopy_thresh; 41 extern uint32_t hxge_dvma_thresh; 42 extern uint32_t hxge_dma_stream_thresh; 43 extern dma_method_t hxge_force_dma; 44 45 /* Device register access attributes for PIO. */ 46 extern ddi_device_acc_attr_t hxge_dev_reg_acc_attr; 47 48 /* Device descriptor access attributes for DMA. */ 49 extern ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr; 50 51 /* Device buffer access attributes for DMA. */ 52 extern ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr; 53 extern ddi_dma_attr_t hxge_desc_dma_attr; 54 extern ddi_dma_attr_t hxge_tx_dma_attr; 55 56 static hxge_status_t hxge_map_txdma(p_hxge_t hxgep); 57 static void hxge_unmap_txdma(p_hxge_t hxgep); 58 static hxge_status_t hxge_txdma_hw_start(p_hxge_t hxgep); 59 static void hxge_txdma_hw_stop(p_hxge_t hxgep); 60 61 static hxge_status_t hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel, 62 p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p, 63 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p, 64 p_tx_mbox_t *tx_mbox_p); 65 static void hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel, 66 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 67 static hxge_status_t hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t, 68 p_hxge_dma_common_t *, p_tx_ring_t *, uint32_t); 69 static void hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, 70 p_tx_ring_t tx_ring_p); 71 static void hxge_map_txdma_channel_cfg_ring(p_hxge_t, uint16_t, 72 p_hxge_dma_common_t *, p_tx_ring_t, p_tx_mbox_t *); 73 static void hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep, 74 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 75 static hxge_status_t hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel, 76 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 77 static hxge_status_t hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel, 78 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 79 static p_tx_ring_t hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel); 80 static hxge_status_t hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, 81 p_hxge_ldv_t ldvp, tdc_stat_t cs); 82 static p_tx_mbox_t hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel); 83 static hxge_status_t hxge_txdma_fatal_err_recover(p_hxge_t hxgep, 84 uint16_t channel, p_tx_ring_t tx_ring_p); 85 static hxge_status_t hxge_tx_port_fatal_err_recover(p_hxge_t hxgep); 86 87 hxge_status_t 88 hxge_init_txdma_channels(p_hxge_t hxgep) 89 { 90 hxge_status_t status = HXGE_OK; 91 block_reset_t reset_reg; 92 93 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_init_txdma_channels")); 94 95 /* 96 * Reset TDC block from PEU to cleanup any unknown configuration. 97 * This may be resulted from previous reboot. 98 */ 99 reset_reg.value = 0; 100 reset_reg.bits.tdc_rst = 1; 101 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 102 103 HXGE_DELAY(1000); 104 105 status = hxge_map_txdma(hxgep); 106 if (status != HXGE_OK) { 107 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 108 "<== hxge_init_txdma_channels: status 0x%x", status)); 109 return (status); 110 } 111 112 status = hxge_txdma_hw_start(hxgep); 113 if (status != HXGE_OK) { 114 hxge_unmap_txdma(hxgep); 115 return (status); 116 } 117 118 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 119 "<== hxge_init_txdma_channels: status 0x%x", status)); 120 121 return (HXGE_OK); 122 } 123 124 void 125 hxge_uninit_txdma_channels(p_hxge_t hxgep) 126 { 127 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_uninit_txdma_channels")); 128 129 hxge_txdma_hw_stop(hxgep); 130 hxge_unmap_txdma(hxgep); 131 132 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_uinit_txdma_channels")); 133 } 134 135 void 136 hxge_setup_dma_common(p_hxge_dma_common_t dest_p, p_hxge_dma_common_t src_p, 137 uint32_t entries, uint32_t size) 138 { 139 size_t tsize; 140 *dest_p = *src_p; 141 tsize = size * entries; 142 dest_p->alength = tsize; 143 dest_p->nblocks = entries; 144 dest_p->block_size = size; 145 dest_p->offset += tsize; 146 147 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 148 src_p->alength -= tsize; 149 src_p->dma_cookie.dmac_laddress += tsize; 150 src_p->dma_cookie.dmac_size -= tsize; 151 } 152 153 hxge_status_t 154 hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel, uint64_t reg_data) 155 { 156 hpi_status_t rs = HPI_SUCCESS; 157 hxge_status_t status = HXGE_OK; 158 hpi_handle_t handle; 159 160 HXGE_DEBUG_MSG((hxgep, TX_CTL, " ==> hxge_reset_txdma_channel")); 161 162 handle = HXGE_DEV_HPI_HANDLE(hxgep); 163 if ((reg_data & TDC_TDR_RST_MASK) == TDC_TDR_RST_MASK) { 164 rs = hpi_txdma_channel_reset(handle, channel); 165 } else { 166 rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel); 167 } 168 169 if (rs != HPI_SUCCESS) { 170 status = HXGE_ERROR | rs; 171 } 172 173 /* 174 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx 175 * overflow fatal error if tail is not set to 0 after reset! 176 */ 177 TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0); 178 179 HXGE_DEBUG_MSG((hxgep, TX_CTL, " <== hxge_reset_txdma_channel")); 180 181 return (status); 182 } 183 184 hxge_status_t 185 hxge_init_txdma_channel_event_mask(p_hxge_t hxgep, uint16_t channel, 186 tdc_int_mask_t *mask_p) 187 { 188 hpi_handle_t handle; 189 hpi_status_t rs = HPI_SUCCESS; 190 hxge_status_t status = HXGE_OK; 191 192 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 193 "<== hxge_init_txdma_channel_event_mask")); 194 195 handle = HXGE_DEV_HPI_HANDLE(hxgep); 196 197 /* 198 * Mask off tx_rng_oflow since it is a false alarm. The driver 199 * ensures not over flowing the hardware and check the hardware 200 * status. 201 */ 202 mask_p->bits.tx_rng_oflow = 1; 203 rs = hpi_txdma_event_mask(handle, OP_SET, channel, mask_p); 204 if (rs != HPI_SUCCESS) { 205 status = HXGE_ERROR | rs; 206 } 207 208 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 209 "==> hxge_init_txdma_channel_event_mask")); 210 return (status); 211 } 212 213 hxge_status_t 214 hxge_enable_txdma_channel(p_hxge_t hxgep, 215 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 216 { 217 hpi_handle_t handle; 218 hpi_status_t rs = HPI_SUCCESS; 219 hxge_status_t status = HXGE_OK; 220 221 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_enable_txdma_channel")); 222 223 handle = HXGE_DEV_HPI_HANDLE(hxgep); 224 /* 225 * Use configuration data composed at init time. Write to hardware the 226 * transmit ring configurations. 227 */ 228 rs = hpi_txdma_ring_config(handle, OP_SET, channel, 229 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 230 231 if (rs != HPI_SUCCESS) { 232 return (HXGE_ERROR | rs); 233 } 234 235 /* Write to hardware the mailbox */ 236 rs = hpi_txdma_mbox_config(handle, OP_SET, channel, 237 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 238 239 if (rs != HPI_SUCCESS) { 240 return (HXGE_ERROR | rs); 241 } 242 243 /* Start the DMA engine. */ 244 rs = hpi_txdma_channel_init_enable(handle, channel); 245 if (rs != HPI_SUCCESS) { 246 return (HXGE_ERROR | rs); 247 } 248 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_enable_txdma_channel")); 249 return (status); 250 } 251 252 void 253 hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum, 254 int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp) 255 { 256 p_tx_pkt_header_t hdrp; 257 p_mblk_t nmp; 258 uint64_t tmp; 259 size_t mblk_len; 260 size_t iph_len; 261 size_t hdrs_size; 262 uint8_t *ip_buf; 263 uint16_t eth_type; 264 uint8_t ipproto; 265 boolean_t is_vlan = B_FALSE; 266 size_t eth_hdr_size; 267 uint8_t hdrs_buf[sizeof (struct ether_header) + 64 + sizeof (uint32_t)]; 268 269 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: mp $%p", mp)); 270 271 /* 272 * Caller should zero out the headers first. 273 */ 274 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 275 276 if (fill_len) { 277 HXGE_DEBUG_MSG((NULL, TX_CTL, 278 "==> hxge_fill_tx_hdr: pkt_len %d npads %d", 279 pkt_len, npads)); 280 tmp = (uint64_t)pkt_len; 281 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 282 283 goto fill_tx_header_done; 284 } 285 tmp = (uint64_t)npads; 286 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT); 287 288 /* 289 * mp is the original data packet (does not include the Neptune 290 * transmit header). 291 */ 292 nmp = mp; 293 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 294 HXGE_DEBUG_MSG((NULL, TX_CTL, 295 "==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d", 296 mp, nmp->b_rptr, mblk_len)); 297 ip_buf = NULL; 298 bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header)); 299 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 300 HXGE_DEBUG_MSG((NULL, TX_CTL, 301 "==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x", 302 eth_type, hdrp->value)); 303 304 if (eth_type < ETHERMTU) { 305 tmp = 1ull; 306 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 307 HXGE_DEBUG_MSG((NULL, TX_CTL, 308 "==> hxge_tx_pkt_hdr_init: LLC value 0x%llx", hdrp->value)); 309 if (*(hdrs_buf + sizeof (struct ether_header)) == 310 LLC_SNAP_SAP) { 311 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 312 sizeof (struct ether_header) + 6))); 313 HXGE_DEBUG_MSG((NULL, TX_CTL, 314 "==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x", 315 eth_type)); 316 } else { 317 goto fill_tx_header_done; 318 } 319 } else if (eth_type == VLAN_ETHERTYPE) { 320 tmp = 1ull; 321 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 322 323 eth_type = ntohs(((struct ether_vlan_header *) 324 hdrs_buf)->ether_type); 325 is_vlan = B_TRUE; 326 HXGE_DEBUG_MSG((NULL, TX_CTL, 327 "==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx", 328 hdrp->value)); 329 } 330 if (!is_vlan) { 331 eth_hdr_size = sizeof (struct ether_header); 332 } else { 333 eth_hdr_size = sizeof (struct ether_vlan_header); 334 } 335 336 switch (eth_type) { 337 case ETHERTYPE_IP: 338 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 339 ip_buf = nmp->b_rptr + eth_hdr_size; 340 mblk_len -= eth_hdr_size; 341 iph_len = ((*ip_buf) & 0x0f); 342 if (mblk_len > (iph_len + sizeof (uint32_t))) { 343 ip_buf = nmp->b_rptr; 344 ip_buf += eth_hdr_size; 345 } else { 346 ip_buf = NULL; 347 } 348 } 349 if (ip_buf == NULL) { 350 hdrs_size = 0; 351 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 352 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) { 353 mblk_len = (size_t)nmp->b_wptr - 354 (size_t)nmp->b_rptr; 355 if (mblk_len >= 356 (sizeof (hdrs_buf) - hdrs_size)) 357 mblk_len = sizeof (hdrs_buf) - 358 hdrs_size; 359 bcopy(nmp->b_rptr, 360 &hdrs_buf[hdrs_size], mblk_len); 361 hdrs_size += mblk_len; 362 nmp = nmp->b_cont; 363 } 364 ip_buf = hdrs_buf; 365 ip_buf += eth_hdr_size; 366 iph_len = ((*ip_buf) & 0x0f); 367 } 368 ipproto = ip_buf[9]; 369 370 tmp = (uint64_t)iph_len; 371 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 372 tmp = (uint64_t)(eth_hdr_size >> 1); 373 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 374 375 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv4 " 376 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 377 "tmp 0x%x", iph_len, hdrp->bits.l3start, eth_hdr_size, 378 ipproto, tmp)); 379 HXGE_DEBUG_MSG((NULL, TX_CTL, 380 "==> hxge_tx_pkt_hdr_init: IP value 0x%llx", hdrp->value)); 381 break; 382 383 case ETHERTYPE_IPV6: 384 hdrs_size = 0; 385 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 386 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) { 387 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 388 if (mblk_len >= (sizeof (hdrs_buf) - hdrs_size)) 389 mblk_len = sizeof (hdrs_buf) - hdrs_size; 390 bcopy(nmp->b_rptr, &hdrs_buf[hdrs_size], mblk_len); 391 hdrs_size += mblk_len; 392 nmp = nmp->b_cont; 393 } 394 ip_buf = hdrs_buf; 395 ip_buf += eth_hdr_size; 396 397 tmp = 1ull; 398 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 399 400 tmp = (eth_hdr_size >> 1); 401 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 402 403 /* byte 6 is the next header protocol */ 404 ipproto = ip_buf[6]; 405 406 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv6 " 407 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 408 iph_len, hdrp->bits.l3start, eth_hdr_size, ipproto)); 409 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_hdr_init: IPv6 " 410 "value 0x%llx", hdrp->value)); 411 break; 412 413 default: 414 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: non-IP")); 415 goto fill_tx_header_done; 416 } 417 418 switch (ipproto) { 419 case IPPROTO_TCP: 420 HXGE_DEBUG_MSG((NULL, TX_CTL, 421 "==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 422 if (l4_cksum) { 423 tmp = 1ull; 424 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 425 HXGE_DEBUG_MSG((NULL, TX_CTL, 426 "==> hxge_tx_pkt_hdr_init: TCP CKSUM" 427 "value 0x%llx", hdrp->value)); 428 } 429 HXGE_DEBUG_MSG((NULL, TX_CTL, 430 "==> hxge_tx_pkt_hdr_init: TCP value 0x%llx", hdrp->value)); 431 break; 432 433 case IPPROTO_UDP: 434 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: UDP")); 435 if (l4_cksum) { 436 tmp = 0x2ull; 437 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 438 } 439 HXGE_DEBUG_MSG((NULL, TX_CTL, 440 "==> hxge_tx_pkt_hdr_init: UDP value 0x%llx", 441 hdrp->value)); 442 break; 443 444 default: 445 goto fill_tx_header_done; 446 } 447 448 fill_tx_header_done: 449 HXGE_DEBUG_MSG((NULL, TX_CTL, 450 "==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx", 451 pkt_len, npads, hdrp->value)); 452 HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_fill_tx_hdr")); 453 } 454 455 /*ARGSUSED*/ 456 p_mblk_t 457 hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 458 { 459 p_mblk_t newmp = NULL; 460 461 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 462 HXGE_DEBUG_MSG((NULL, TX_CTL, 463 "<== hxge_tx_pkt_header_reserve: allocb failed")); 464 return (NULL); 465 } 466 HXGE_DEBUG_MSG((NULL, TX_CTL, 467 "==> hxge_tx_pkt_header_reserve: get new mp")); 468 DB_TYPE(newmp) = M_DATA; 469 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 470 linkb(newmp, mp); 471 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 472 473 HXGE_DEBUG_MSG((NULL, TX_CTL, 474 "==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p", 475 newmp->b_rptr, newmp->b_wptr)); 476 HXGE_DEBUG_MSG((NULL, TX_CTL, 477 "<== hxge_tx_pkt_header_reserve: use new mp")); 478 return (newmp); 479 } 480 481 int 482 hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 483 { 484 uint_t nmblks; 485 ssize_t len; 486 uint_t pkt_len; 487 p_mblk_t nmp, bmp, tmp; 488 uint8_t *b_wptr; 489 490 HXGE_DEBUG_MSG((NULL, TX_CTL, 491 "==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d", 492 mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 493 494 nmp = mp; 495 bmp = mp; 496 nmblks = 0; 497 pkt_len = 0; 498 *tot_xfer_len_p = 0; 499 500 while (nmp) { 501 len = MBLKL(nmp); 502 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: " 503 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 504 len, pkt_len, nmblks, *tot_xfer_len_p)); 505 506 if (len <= 0) { 507 bmp = nmp; 508 nmp = nmp->b_cont; 509 HXGE_DEBUG_MSG((NULL, TX_CTL, 510 "==> hxge_tx_pkt_nmblocks:" 511 " len (0) pkt_len %d nmblks %d", pkt_len, nmblks)); 512 continue; 513 } 514 *tot_xfer_len_p += len; 515 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: " 516 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 517 len, pkt_len, nmblks, *tot_xfer_len_p)); 518 519 if (len < hxge_bcopy_thresh) { 520 HXGE_DEBUG_MSG((NULL, TX_CTL, 521 "==> hxge_tx_pkt_nmblocks: " 522 "len %d (< thresh) pkt_len %d nmblks %d", 523 len, pkt_len, nmblks)); 524 if (pkt_len == 0) 525 nmblks++; 526 pkt_len += len; 527 if (pkt_len >= hxge_bcopy_thresh) { 528 pkt_len = 0; 529 len = 0; 530 nmp = bmp; 531 } 532 } else { 533 HXGE_DEBUG_MSG((NULL, TX_CTL, 534 "==> hxge_tx_pkt_nmblocks: " 535 "len %d (> thresh) pkt_len %d nmblks %d", 536 len, pkt_len, nmblks)); 537 pkt_len = 0; 538 nmblks++; 539 /* 540 * Hardware limits the transfer length to 4K. If len is 541 * more than 4K, we need to break it up to at most 2 542 * more blocks. 543 */ 544 if (len > TX_MAX_TRANSFER_LENGTH) { 545 uint32_t nsegs; 546 547 HXGE_DEBUG_MSG((NULL, TX_CTL, 548 "==> hxge_tx_pkt_nmblocks: " 549 "len %d pkt_len %d nmblks %d nsegs %d", 550 len, pkt_len, nmblks, nsegs)); 551 nsegs = 1; 552 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 553 ++nsegs; 554 } 555 do { 556 b_wptr = nmp->b_rptr + 557 TX_MAX_TRANSFER_LENGTH; 558 nmp->b_wptr = b_wptr; 559 if ((tmp = dupb(nmp)) == NULL) { 560 return (0); 561 } 562 tmp->b_rptr = b_wptr; 563 tmp->b_wptr = nmp->b_wptr; 564 tmp->b_cont = nmp->b_cont; 565 nmp->b_cont = tmp; 566 nmblks++; 567 if (--nsegs) { 568 nmp = tmp; 569 } 570 } while (nsegs); 571 nmp = tmp; 572 } 573 } 574 575 /* 576 * Hardware limits the transmit gather pointers to 15. 577 */ 578 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 579 TX_MAX_GATHER_POINTERS) { 580 HXGE_DEBUG_MSG((NULL, TX_CTL, 581 "==> hxge_tx_pkt_nmblocks: pull msg - " 582 "len %d pkt_len %d nmblks %d", 583 len, pkt_len, nmblks)); 584 /* Pull all message blocks from b_cont */ 585 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 586 return (0); 587 } 588 freemsg(nmp->b_cont); 589 nmp->b_cont = tmp; 590 pkt_len = 0; 591 } 592 bmp = nmp; 593 nmp = nmp->b_cont; 594 } 595 596 HXGE_DEBUG_MSG((NULL, TX_CTL, 597 "<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 598 "nmblks %d len %d tot_xfer_len %d", 599 mp->b_rptr, mp->b_wptr, nmblks, MBLKL(mp), *tot_xfer_len_p)); 600 return (nmblks); 601 } 602 603 boolean_t 604 hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks) 605 { 606 boolean_t status = B_TRUE; 607 p_hxge_dma_common_t tx_desc_dma_p; 608 hxge_dma_common_t desc_area; 609 p_tx_desc_t tx_desc_ring_vp; 610 p_tx_desc_t tx_desc_p; 611 p_tx_desc_t tx_desc_pp; 612 tx_desc_t r_tx_desc; 613 p_tx_msg_t tx_msg_ring; 614 p_tx_msg_t tx_msg_p; 615 hpi_handle_t handle; 616 tdc_tdr_head_t tx_head; 617 uint32_t pkt_len; 618 uint_t tx_rd_index; 619 uint16_t head_index, tail_index; 620 uint8_t tdc; 621 boolean_t head_wrap, tail_wrap; 622 p_hxge_tx_ring_stats_t tdc_stats; 623 tdc_byte_cnt_t byte_cnt; 624 tdc_tdr_qlen_t qlen; 625 int rc; 626 627 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_reclaim")); 628 629 status = ((tx_ring_p->descs_pending < hxge_reclaim_pending) && 630 (nmblks != 0)); 631 HXGE_DEBUG_MSG((hxgep, TX_CTL, 632 "==> hxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 633 tx_ring_p->descs_pending, hxge_reclaim_pending, nmblks)); 634 635 if (!status) { 636 tx_desc_dma_p = &tx_ring_p->tdc_desc; 637 desc_area = tx_ring_p->tdc_desc; 638 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 639 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 640 tx_rd_index = tx_ring_p->rd_index; 641 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 642 tx_msg_ring = tx_ring_p->tx_msg_ring; 643 tx_msg_p = &tx_msg_ring[tx_rd_index]; 644 tdc = tx_ring_p->tdc; 645 tdc_stats = tx_ring_p->tdc_stats; 646 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 647 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 648 } 649 tail_index = tx_ring_p->wr_index; 650 tail_wrap = tx_ring_p->wr_index_wrap; 651 652 /* 653 * tdc_byte_cnt reg can be used to get bytes transmitted. It 654 * includes padding too in case of runt packets. 655 */ 656 handle = HXGE_DEV_HPI_HANDLE(hxgep); 657 TXDMA_REG_READ64(handle, TDC_BYTE_CNT, tdc, &byte_cnt.value); 658 tdc_stats->obytes_with_pad += byte_cnt.bits.byte_count; 659 660 HXGE_DEBUG_MSG((hxgep, TX_CTL, 661 "==> hxge_txdma_reclaim: tdc %d tx_rd_index %d " 662 "tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ", 663 tdc, tx_rd_index, tail_index, tail_wrap, 664 tx_desc_p, (*(uint64_t *)tx_desc_p))); 665 666 /* 667 * Read the hardware maintained transmit head and wrap around 668 * bit. 669 */ 670 TXDMA_REG_READ64(handle, TDC_TDR_HEAD, tdc, &tx_head.value); 671 head_index = tx_head.bits.head; 672 head_wrap = tx_head.bits.wrap; 673 HXGE_DEBUG_MSG((hxgep, TX_CTL, 674 "==> hxge_txdma_reclaim: " 675 "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d", 676 tx_rd_index, tail_index, tail_wrap, head_index, head_wrap)); 677 678 /* 679 * For debug only. This can be used to verify the qlen and make 680 * sure the hardware is wrapping the Tdr correctly. 681 */ 682 TXDMA_REG_READ64(handle, TDC_TDR_QLEN, tdc, &qlen.value); 683 HXGE_DEBUG_MSG((hxgep, TX_CTL, 684 "==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d", 685 qlen.bits.tdr_qlen, qlen.bits.tdr_pref_qlen)); 686 687 if (head_index == tail_index) { 688 if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, 689 tail_wrap) && (head_index == tx_rd_index)) { 690 HXGE_DEBUG_MSG((hxgep, TX_CTL, 691 "==> hxge_txdma_reclaim: EMPTY")); 692 return (B_TRUE); 693 } 694 HXGE_DEBUG_MSG((hxgep, TX_CTL, 695 "==> hxge_txdma_reclaim: Checking if ring full")); 696 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 697 tail_wrap)) { 698 HXGE_DEBUG_MSG((hxgep, TX_CTL, 699 "==> hxge_txdma_reclaim: full")); 700 return (B_FALSE); 701 } 702 } 703 HXGE_DEBUG_MSG((hxgep, TX_CTL, 704 "==> hxge_txdma_reclaim: tx_rd_index and head_index")); 705 706 /* XXXX: limit the # of reclaims */ 707 tx_desc_pp = &r_tx_desc; 708 while ((tx_rd_index != head_index) && 709 (tx_ring_p->descs_pending != 0)) { 710 HXGE_DEBUG_MSG((hxgep, TX_CTL, 711 "==> hxge_txdma_reclaim: Checking if pending")); 712 HXGE_DEBUG_MSG((hxgep, TX_CTL, 713 "==> hxge_txdma_reclaim: descs_pending %d ", 714 tx_ring_p->descs_pending)); 715 HXGE_DEBUG_MSG((hxgep, TX_CTL, 716 "==> hxge_txdma_reclaim: " 717 "(tx_rd_index %d head_index %d (tx_desc_p $%p)", 718 tx_rd_index, head_index, tx_desc_p)); 719 720 tx_desc_pp->value = tx_desc_p->value; 721 HXGE_DEBUG_MSG((hxgep, TX_CTL, 722 "==> hxge_txdma_reclaim: " 723 "(tx_rd_index %d head_index %d " 724 "tx_desc_p $%p (desc value 0x%llx) ", 725 tx_rd_index, head_index, 726 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 727 HXGE_DEBUG_MSG((hxgep, TX_CTL, 728 "==> hxge_txdma_reclaim: dump desc:")); 729 730 /* 731 * tdc_byte_cnt reg can be used to get bytes 732 * transmitted 733 */ 734 pkt_len = tx_desc_pp->bits.tr_len; 735 tdc_stats->obytes += pkt_len; 736 tdc_stats->opackets += tx_desc_pp->bits.sop; 737 HXGE_DEBUG_MSG((hxgep, TX_CTL, 738 "==> hxge_txdma_reclaim: pkt_len %d " 739 "tdc channel %d opackets %d", 740 pkt_len, tdc, tdc_stats->opackets)); 741 742 if (tx_msg_p->flags.dma_type == USE_DVMA) { 743 HXGE_DEBUG_MSG((hxgep, TX_CTL, 744 "tx_desc_p = $%p tx_desc_pp = $%p " 745 "index = %d", 746 tx_desc_p, tx_desc_pp, 747 tx_ring_p->rd_index)); 748 (void) dvma_unload(tx_msg_p->dvma_handle, 749 0, -1); 750 tx_msg_p->dvma_handle = NULL; 751 if (tx_ring_p->dvma_wr_index == 752 tx_ring_p->dvma_wrap_mask) { 753 tx_ring_p->dvma_wr_index = 0; 754 } else { 755 tx_ring_p->dvma_wr_index++; 756 } 757 tx_ring_p->dvma_pending--; 758 } else if (tx_msg_p->flags.dma_type == USE_DMA) { 759 HXGE_DEBUG_MSG((hxgep, TX_CTL, 760 "==> hxge_txdma_reclaim: USE DMA")); 761 if (rc = ddi_dma_unbind_handle 762 (tx_msg_p->dma_handle)) { 763 cmn_err(CE_WARN, "hxge_reclaim: " 764 "ddi_dma_unbind_handle " 765 "failed. status %d", rc); 766 } 767 } 768 769 HXGE_DEBUG_MSG((hxgep, TX_CTL, 770 "==> hxge_txdma_reclaim: count packets")); 771 772 /* 773 * count a chained packet only once. 774 */ 775 if (tx_msg_p->tx_message != NULL) { 776 freemsg(tx_msg_p->tx_message); 777 tx_msg_p->tx_message = NULL; 778 } 779 tx_msg_p->flags.dma_type = USE_NONE; 780 tx_rd_index = tx_ring_p->rd_index; 781 tx_rd_index = (tx_rd_index + 1) & 782 tx_ring_p->tx_wrap_mask; 783 tx_ring_p->rd_index = tx_rd_index; 784 tx_ring_p->descs_pending--; 785 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 786 tx_msg_p = &tx_msg_ring[tx_rd_index]; 787 } 788 789 status = (nmblks <= (tx_ring_p->tx_ring_size - 790 tx_ring_p->descs_pending - TX_FULL_MARK)); 791 if (status) { 792 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 793 } 794 } else { 795 status = (nmblks <= (tx_ring_p->tx_ring_size - 796 tx_ring_p->descs_pending - TX_FULL_MARK)); 797 } 798 799 HXGE_DEBUG_MSG((hxgep, TX_CTL, 800 "<== hxge_txdma_reclaim status = 0x%08x", status)); 801 return (status); 802 } 803 804 uint_t 805 hxge_tx_intr(caddr_t arg1, caddr_t arg2) 806 { 807 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 808 p_hxge_t hxgep = (p_hxge_t)arg2; 809 p_hxge_ldg_t ldgp; 810 uint8_t channel; 811 uint32_t vindex; 812 hpi_handle_t handle; 813 tdc_stat_t cs; 814 p_tx_ring_t *tx_rings; 815 p_tx_ring_t tx_ring_p; 816 hpi_status_t rs = HPI_SUCCESS; 817 uint_t serviced = DDI_INTR_UNCLAIMED; 818 hxge_status_t status = HXGE_OK; 819 820 if (ldvp == NULL) { 821 HXGE_DEBUG_MSG((NULL, INT_CTL, 822 "<== hxge_tx_intr: hxgep $%p ldvp $%p", hxgep, ldvp)); 823 return (DDI_INTR_UNCLAIMED); 824 } 825 826 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 827 hxgep = ldvp->hxgep; 828 } 829 830 HXGE_DEBUG_MSG((hxgep, INT_CTL, 831 "==> hxge_tx_intr: hxgep(arg2) $%p ldvp(arg1) $%p", hxgep, ldvp)); 832 833 /* 834 * This interrupt handler is for a specific transmit dma channel. 835 */ 836 handle = HXGE_DEV_HPI_HANDLE(hxgep); 837 838 /* Get the control and status for this channel. */ 839 channel = ldvp->channel; 840 ldgp = ldvp->ldgp; 841 HXGE_DEBUG_MSG((hxgep, INT_CTL, 842 "==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d", 843 hxgep, ldvp, channel)); 844 845 rs = hpi_txdma_control_status(handle, OP_GET, channel, &cs); 846 vindex = ldvp->vdma_index; 847 HXGE_DEBUG_MSG((hxgep, INT_CTL, 848 "==> hxge_tx_intr:channel %d ring index %d status 0x%08x", 849 channel, vindex, rs)); 850 851 if (!rs && cs.bits.marked) { 852 HXGE_DEBUG_MSG((hxgep, INT_CTL, 853 "==> hxge_tx_intr:channel %d ring index %d " 854 "status 0x%08x (marked bit set)", channel, vindex, rs)); 855 tx_rings = hxgep->tx_rings->rings; 856 tx_ring_p = tx_rings[vindex]; 857 HXGE_DEBUG_MSG((hxgep, INT_CTL, 858 "==> hxge_tx_intr:channel %d ring index %d " 859 "status 0x%08x (marked bit set, calling reclaim)", 860 channel, vindex, rs)); 861 862 MUTEX_ENTER(&tx_ring_p->lock); 863 (void) hxge_txdma_reclaim(hxgep, tx_rings[vindex], 0); 864 MUTEX_EXIT(&tx_ring_p->lock); 865 mac_tx_update(hxgep->mach); 866 } 867 868 /* 869 * Process other transmit control and status. Check the ldv state. 870 */ 871 status = hxge_tx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 872 873 /* Clear the error bits */ 874 RXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value); 875 876 /* 877 * Rearm this logical group if this is a single device group. 878 */ 879 if (ldgp->nldvs == 1) { 880 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_tx_intr: rearm")); 881 if (status == HXGE_OK) { 882 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, 883 B_TRUE, ldgp->ldg_timer); 884 } 885 } 886 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_tx_intr")); 887 serviced = DDI_INTR_CLAIMED; 888 return (serviced); 889 } 890 891 void 892 hxge_txdma_stop(p_hxge_t hxgep) 893 { 894 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop")); 895 896 (void) hxge_tx_vmac_disable(hxgep); 897 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 898 899 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop")); 900 } 901 902 hxge_status_t 903 hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 904 { 905 int i, ndmas; 906 uint16_t channel; 907 p_tx_rings_t tx_rings; 908 p_tx_ring_t *tx_desc_rings; 909 hpi_handle_t handle; 910 hpi_status_t rs = HPI_SUCCESS; 911 hxge_status_t status = HXGE_OK; 912 913 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 914 "==> hxge_txdma_hw_mode: enable mode %d", enable)); 915 916 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 917 HXGE_DEBUG_MSG((hxgep, TX_CTL, 918 "<== hxge_txdma_mode: not initialized")); 919 return (HXGE_ERROR); 920 } 921 tx_rings = hxgep->tx_rings; 922 if (tx_rings == NULL) { 923 HXGE_DEBUG_MSG((hxgep, TX_CTL, 924 "<== hxge_txdma_hw_mode: NULL global ring pointer")); 925 return (HXGE_ERROR); 926 } 927 tx_desc_rings = tx_rings->rings; 928 if (tx_desc_rings == NULL) { 929 HXGE_DEBUG_MSG((hxgep, TX_CTL, 930 "<== hxge_txdma_hw_mode: NULL rings pointer")); 931 return (HXGE_ERROR); 932 } 933 ndmas = tx_rings->ndmas; 934 if (!ndmas) { 935 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 936 "<== hxge_txdma_hw_mode: no dma channel allocated")); 937 return (HXGE_ERROR); 938 } 939 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_mode: " 940 "tx_rings $%p tx_desc_rings $%p ndmas %d", 941 tx_rings, tx_desc_rings, ndmas)); 942 943 handle = HXGE_DEV_HPI_HANDLE(hxgep); 944 for (i = 0; i < ndmas; i++) { 945 if (tx_desc_rings[i] == NULL) { 946 continue; 947 } 948 channel = tx_desc_rings[i]->tdc; 949 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 950 "==> hxge_txdma_hw_mode: channel %d", channel)); 951 if (enable) { 952 rs = hpi_txdma_channel_enable(handle, channel); 953 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 954 "==> hxge_txdma_hw_mode: channel %d (enable) " 955 "rs 0x%x", channel, rs)); 956 } else { 957 /* 958 * Stop the dma channel and waits for the stop done. If 959 * the stop done bit is not set, then force an error so 960 * TXC will stop. All channels bound to this port need 961 * to be stopped and reset after injecting an interrupt 962 * error. 963 */ 964 rs = hpi_txdma_channel_disable(handle, channel); 965 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 966 "==> hxge_txdma_hw_mode: channel %d (disable) " 967 "rs 0x%x", channel, rs)); 968 } 969 } 970 971 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 972 973 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 974 "<== hxge_txdma_hw_mode: status 0x%x", status)); 975 976 return (status); 977 } 978 979 void 980 hxge_txdma_enable_channel(p_hxge_t hxgep, uint16_t channel) 981 { 982 hpi_handle_t handle; 983 984 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 985 "==> hxge_txdma_enable_channel: channel %d", channel)); 986 987 handle = HXGE_DEV_HPI_HANDLE(hxgep); 988 /* enable the transmit dma channels */ 989 (void) hpi_txdma_channel_enable(handle, channel); 990 991 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_txdma_enable_channel")); 992 } 993 994 void 995 hxge_txdma_disable_channel(p_hxge_t hxgep, uint16_t channel) 996 { 997 hpi_handle_t handle; 998 999 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1000 "==> hxge_txdma_disable_channel: channel %d", channel)); 1001 1002 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1003 /* stop the transmit dma channels */ 1004 (void) hpi_txdma_channel_disable(handle, channel); 1005 1006 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_disable_channel")); 1007 } 1008 1009 int 1010 hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel) 1011 { 1012 hpi_handle_t handle; 1013 int status; 1014 hpi_status_t rs = HPI_SUCCESS; 1015 1016 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop_inj_err")); 1017 1018 /* 1019 * Stop the dma channel waits for the stop done. If the stop done bit 1020 * is not set, then create an error. 1021 */ 1022 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1023 rs = hpi_txdma_channel_disable(handle, channel); 1024 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 1025 if (status == HXGE_OK) { 1026 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1027 "<== hxge_txdma_stop_inj_err (channel %d): " 1028 "stopped OK", channel)); 1029 return (status); 1030 } 1031 1032 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1033 "==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1034 " (injected error but still not stopped)", channel, rs)); 1035 1036 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop_inj_err")); 1037 1038 return (status); 1039 } 1040 1041 /*ARGSUSED*/ 1042 void 1043 hxge_fixup_txdma_rings(p_hxge_t hxgep) 1044 { 1045 int index, ndmas; 1046 uint16_t channel; 1047 p_tx_rings_t tx_rings; 1048 1049 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_txdma_rings")); 1050 1051 /* 1052 * For each transmit channel, reclaim each descriptor and free buffers. 1053 */ 1054 tx_rings = hxgep->tx_rings; 1055 if (tx_rings == NULL) { 1056 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1057 "<== hxge_fixup_txdma_rings: NULL ring pointer")); 1058 return; 1059 } 1060 1061 ndmas = tx_rings->ndmas; 1062 if (!ndmas) { 1063 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1064 "<== hxge_fixup_txdma_rings: no channel allocated")); 1065 return; 1066 } 1067 1068 if (tx_rings->rings == NULL) { 1069 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1070 "<== hxge_fixup_txdma_rings: NULL rings pointer")); 1071 return; 1072 } 1073 1074 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_fixup_txdma_rings: " 1075 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1076 tx_rings, tx_rings->rings, ndmas)); 1077 1078 for (index = 0; index < ndmas; index++) { 1079 channel = tx_rings->rings[index]->tdc; 1080 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1081 "==> hxge_fixup_txdma_rings: channel %d", channel)); 1082 hxge_txdma_fixup_channel(hxgep, tx_rings->rings[index], 1083 channel); 1084 } 1085 1086 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_txdma_rings")); 1087 } 1088 1089 /*ARGSUSED*/ 1090 void 1091 hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel) 1092 { 1093 p_tx_ring_t ring_p; 1094 1095 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_channel")); 1096 1097 ring_p = hxge_txdma_get_ring(hxgep, channel); 1098 if (ring_p == NULL) { 1099 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel")); 1100 return; 1101 } 1102 1103 if (ring_p->tdc != channel) { 1104 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1105 "<== hxge_txdma_fix_channel: channel not matched " 1106 "ring tdc %d passed channel", ring_p->tdc, channel)); 1107 return; 1108 } 1109 1110 hxge_txdma_fixup_channel(hxgep, ring_p, channel); 1111 1112 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel")); 1113 } 1114 1115 /*ARGSUSED*/ 1116 void 1117 hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel) 1118 { 1119 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_channel")); 1120 1121 if (ring_p == NULL) { 1122 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1123 "<== hxge_txdma_fixup_channel: NULL ring pointer")); 1124 return; 1125 } 1126 if (ring_p->tdc != channel) { 1127 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1128 "<== hxge_txdma_fixup_channel: channel not matched " 1129 "ring tdc %d passed channel", ring_p->tdc, channel)); 1130 return; 1131 } 1132 MUTEX_ENTER(&ring_p->lock); 1133 (void) hxge_txdma_reclaim(hxgep, ring_p, 0); 1134 1135 ring_p->rd_index = 0; 1136 ring_p->wr_index = 0; 1137 ring_p->ring_head.value = 0; 1138 ring_p->ring_kick_tail.value = 0; 1139 ring_p->descs_pending = 0; 1140 MUTEX_EXIT(&ring_p->lock); 1141 1142 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_channel")); 1143 } 1144 1145 /*ARGSUSED*/ 1146 void 1147 hxge_txdma_hw_kick(p_hxge_t hxgep) 1148 { 1149 int index, ndmas; 1150 uint16_t channel; 1151 p_tx_rings_t tx_rings; 1152 1153 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick")); 1154 1155 tx_rings = hxgep->tx_rings; 1156 if (tx_rings == NULL) { 1157 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1158 "<== hxge_txdma_hw_kick: NULL ring pointer")); 1159 return; 1160 } 1161 ndmas = tx_rings->ndmas; 1162 if (!ndmas) { 1163 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1164 "<== hxge_txdma_hw_kick: no channel allocated")); 1165 return; 1166 } 1167 if (tx_rings->rings == NULL) { 1168 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1169 "<== hxge_txdma_hw_kick: NULL rings pointer")); 1170 return; 1171 } 1172 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_kick: " 1173 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1174 tx_rings, tx_rings->rings, ndmas)); 1175 1176 for (index = 0; index < ndmas; index++) { 1177 channel = tx_rings->rings[index]->tdc; 1178 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1179 "==> hxge_txdma_hw_kick: channel %d", channel)); 1180 hxge_txdma_hw_kick_channel(hxgep, tx_rings->rings[index], 1181 channel); 1182 } 1183 1184 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick")); 1185 } 1186 1187 /*ARGSUSED*/ 1188 void 1189 hxge_txdma_kick_channel(p_hxge_t hxgep, uint16_t channel) 1190 { 1191 p_tx_ring_t ring_p; 1192 1193 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_kick_channel")); 1194 1195 ring_p = hxge_txdma_get_ring(hxgep, channel); 1196 if (ring_p == NULL) { 1197 HXGE_DEBUG_MSG((hxgep, TX_CTL, " hxge_txdma_kick_channel")); 1198 return; 1199 } 1200 1201 if (ring_p->tdc != channel) { 1202 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1203 "<== hxge_txdma_kick_channel: channel not matched " 1204 "ring tdc %d passed channel", ring_p->tdc, channel)); 1205 return; 1206 } 1207 1208 hxge_txdma_hw_kick_channel(hxgep, ring_p, channel); 1209 1210 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_kick_channel")); 1211 } 1212 1213 /*ARGSUSED*/ 1214 void 1215 hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel) 1216 { 1217 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick_channel")); 1218 1219 if (ring_p == NULL) { 1220 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1221 "<== hxge_txdma_hw_kick_channel: NULL ring pointer")); 1222 return; 1223 } 1224 1225 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick_channel")); 1226 } 1227 1228 /*ARGSUSED*/ 1229 void 1230 hxge_check_tx_hang(p_hxge_t hxgep) 1231 { 1232 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_check_tx_hang")); 1233 1234 /* 1235 * Needs inputs from hardware for regs: head index had not moved since 1236 * last timeout. packets not transmitted or stuffed registers. 1237 */ 1238 if (hxge_txdma_hung(hxgep)) { 1239 hxge_fixup_hung_txdma_rings(hxgep); 1240 } 1241 1242 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_check_tx_hang")); 1243 } 1244 1245 int 1246 hxge_txdma_hung(p_hxge_t hxgep) 1247 { 1248 int index, ndmas; 1249 uint16_t channel; 1250 p_tx_rings_t tx_rings; 1251 p_tx_ring_t tx_ring_p; 1252 1253 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hung")); 1254 1255 tx_rings = hxgep->tx_rings; 1256 if (tx_rings == NULL) { 1257 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1258 "<== hxge_txdma_hung: NULL ring pointer")); 1259 return (B_FALSE); 1260 } 1261 1262 ndmas = tx_rings->ndmas; 1263 if (!ndmas) { 1264 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1265 "<== hxge_txdma_hung: no channel allocated")); 1266 return (B_FALSE); 1267 } 1268 1269 if (tx_rings->rings == NULL) { 1270 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1271 "<== hxge_txdma_hung: NULL rings pointer")); 1272 return (B_FALSE); 1273 } 1274 1275 for (index = 0; index < ndmas; index++) { 1276 channel = tx_rings->rings[index]->tdc; 1277 tx_ring_p = tx_rings->rings[index]; 1278 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1279 "==> hxge_txdma_hung: channel %d", channel)); 1280 if (hxge_txdma_channel_hung(hxgep, tx_ring_p, channel)) { 1281 return (B_TRUE); 1282 } 1283 } 1284 1285 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hung")); 1286 1287 return (B_FALSE); 1288 } 1289 1290 int 1291 hxge_txdma_channel_hung(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1292 { 1293 uint16_t head_index, tail_index; 1294 boolean_t head_wrap, tail_wrap; 1295 hpi_handle_t handle; 1296 tdc_tdr_head_t tx_head; 1297 uint_t tx_rd_index; 1298 1299 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung")); 1300 1301 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1302 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1303 "==> hxge_txdma_channel_hung: channel %d", channel)); 1304 MUTEX_ENTER(&tx_ring_p->lock); 1305 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 1306 1307 tail_index = tx_ring_p->wr_index; 1308 tail_wrap = tx_ring_p->wr_index_wrap; 1309 tx_rd_index = tx_ring_p->rd_index; 1310 MUTEX_EXIT(&tx_ring_p->lock); 1311 1312 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1313 "==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1314 "tail_index %d tail_wrap %d ", 1315 channel, tx_rd_index, tail_index, tail_wrap)); 1316 /* 1317 * Read the hardware maintained transmit head and wrap around bit. 1318 */ 1319 (void) hpi_txdma_ring_head_get(handle, channel, &tx_head); 1320 head_index = tx_head.bits.head; 1321 head_wrap = tx_head.bits.wrap; 1322 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung: " 1323 "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d", 1324 tx_rd_index, tail_index, tail_wrap, head_index, head_wrap)); 1325 1326 if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, tail_wrap) && 1327 (head_index == tx_rd_index)) { 1328 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1329 "==> hxge_txdma_channel_hung: EMPTY")); 1330 return (B_FALSE); 1331 } 1332 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1333 "==> hxge_txdma_channel_hung: Checking if ring full")); 1334 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, tail_wrap)) { 1335 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1336 "==> hxge_txdma_channel_hung: full")); 1337 return (B_TRUE); 1338 } 1339 1340 /* If not full, check with hardware to see if it is hung */ 1341 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_channel_hung")); 1342 1343 return (B_FALSE); 1344 } 1345 1346 /*ARGSUSED*/ 1347 void 1348 hxge_fixup_hung_txdma_rings(p_hxge_t hxgep) 1349 { 1350 int index, ndmas; 1351 uint16_t channel; 1352 p_tx_rings_t tx_rings; 1353 1354 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings")); 1355 tx_rings = hxgep->tx_rings; 1356 if (tx_rings == NULL) { 1357 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1358 "<== hxge_fixup_hung_txdma_rings: NULL ring pointer")); 1359 return; 1360 } 1361 ndmas = tx_rings->ndmas; 1362 if (!ndmas) { 1363 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1364 "<== hxge_fixup_hung_txdma_rings: no channel allocated")); 1365 return; 1366 } 1367 if (tx_rings->rings == NULL) { 1368 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1369 "<== hxge_fixup_hung_txdma_rings: NULL rings pointer")); 1370 return; 1371 } 1372 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings: " 1373 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1374 tx_rings, tx_rings->rings, ndmas)); 1375 1376 for (index = 0; index < ndmas; index++) { 1377 channel = tx_rings->rings[index]->tdc; 1378 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1379 "==> hxge_fixup_hung_txdma_rings: channel %d", channel)); 1380 hxge_txdma_fixup_hung_channel(hxgep, tx_rings->rings[index], 1381 channel); 1382 } 1383 1384 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_hung_txdma_rings")); 1385 } 1386 1387 /*ARGSUSED*/ 1388 void 1389 hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel) 1390 { 1391 p_tx_ring_t ring_p; 1392 1393 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_hung_channel")); 1394 ring_p = hxge_txdma_get_ring(hxgep, channel); 1395 if (ring_p == NULL) { 1396 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1397 "<== hxge_txdma_fix_hung_channel")); 1398 return; 1399 } 1400 if (ring_p->tdc != channel) { 1401 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1402 "<== hxge_txdma_fix_hung_channel: channel not matched " 1403 "ring tdc %d passed channel", ring_p->tdc, channel)); 1404 return; 1405 } 1406 hxge_txdma_fixup_channel(hxgep, ring_p, channel); 1407 1408 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_hung_channel")); 1409 } 1410 1411 /*ARGSUSED*/ 1412 void 1413 hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, 1414 uint16_t channel) 1415 { 1416 hpi_handle_t handle; 1417 int status = HXGE_OK; 1418 1419 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_hung_channel")); 1420 1421 if (ring_p == NULL) { 1422 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1423 "<== hxge_txdma_fixup_hung_channel: NULL ring pointer")); 1424 return; 1425 } 1426 if (ring_p->tdc != channel) { 1427 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1428 "<== hxge_txdma_fixup_hung_channel: channel " 1429 "not matched ring tdc %d passed channel", 1430 ring_p->tdc, channel)); 1431 return; 1432 } 1433 /* Reclaim descriptors */ 1434 MUTEX_ENTER(&ring_p->lock); 1435 (void) hxge_txdma_reclaim(hxgep, ring_p, 0); 1436 MUTEX_EXIT(&ring_p->lock); 1437 1438 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1439 /* 1440 * Stop the dma channel waits for the stop done. If the stop done bit 1441 * is not set, then force an error. 1442 */ 1443 status = hpi_txdma_channel_disable(handle, channel); 1444 if (!(status & HPI_TXDMA_STOP_FAILED)) { 1445 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1446 "<== hxge_txdma_fixup_hung_channel: stopped OK " 1447 "ring tdc %d passed channel %d", ring_p->tdc, channel)); 1448 return; 1449 } 1450 /* Stop done bit will be set as a result of error injection */ 1451 status = hpi_txdma_channel_disable(handle, channel); 1452 if (!(status & HPI_TXDMA_STOP_FAILED)) { 1453 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1454 "<== hxge_txdma_fixup_hung_channel: stopped again" 1455 "ring tdc %d passed channel", ring_p->tdc, channel)); 1456 return; 1457 } 1458 1459 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1460 "<== hxge_txdma_fixup_hung_channel: stop done still not set!! " 1461 "ring tdc %d passed channel", ring_p->tdc, channel)); 1462 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_hung_channel")); 1463 } 1464 1465 /*ARGSUSED*/ 1466 void 1467 hxge_reclaim_rings(p_hxge_t hxgep) 1468 { 1469 int index, ndmas; 1470 uint16_t channel; 1471 p_tx_rings_t tx_rings; 1472 p_tx_ring_t tx_ring_p; 1473 1474 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclaim_ring")); 1475 tx_rings = hxgep->tx_rings; 1476 if (tx_rings == NULL) { 1477 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1478 "<== hxge_reclain_rimgs: NULL ring pointer")); 1479 return; 1480 } 1481 ndmas = tx_rings->ndmas; 1482 if (!ndmas) { 1483 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1484 "<== hxge_reclain_rimgs: no channel allocated")); 1485 return; 1486 } 1487 if (tx_rings->rings == NULL) { 1488 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1489 "<== hxge_reclain_rimgs: NULL rings pointer")); 1490 return; 1491 } 1492 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclain_rimgs: " 1493 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1494 tx_rings, tx_rings->rings, ndmas)); 1495 1496 for (index = 0; index < ndmas; index++) { 1497 channel = tx_rings->rings[index]->tdc; 1498 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> reclain_rimgs: channel %d", 1499 channel)); 1500 tx_ring_p = tx_rings->rings[index]; 1501 MUTEX_ENTER(&tx_ring_p->lock); 1502 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, channel); 1503 MUTEX_EXIT(&tx_ring_p->lock); 1504 } 1505 1506 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_reclaim_rings")); 1507 } 1508 1509 /* 1510 * Static functions start here. 1511 */ 1512 static hxge_status_t 1513 hxge_map_txdma(p_hxge_t hxgep) 1514 { 1515 int i, ndmas; 1516 uint16_t channel; 1517 p_tx_rings_t tx_rings; 1518 p_tx_ring_t *tx_desc_rings; 1519 p_tx_mbox_areas_t tx_mbox_areas_p; 1520 p_tx_mbox_t *tx_mbox_p; 1521 p_hxge_dma_pool_t dma_buf_poolp; 1522 p_hxge_dma_pool_t dma_cntl_poolp; 1523 p_hxge_dma_common_t *dma_buf_p; 1524 p_hxge_dma_common_t *dma_cntl_p; 1525 hxge_status_t status = HXGE_OK; 1526 1527 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma")); 1528 1529 dma_buf_poolp = hxgep->tx_buf_pool_p; 1530 dma_cntl_poolp = hxgep->tx_cntl_pool_p; 1531 1532 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1533 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1534 "==> hxge_map_txdma: buf not allocated")); 1535 return (HXGE_ERROR); 1536 } 1537 ndmas = dma_buf_poolp->ndmas; 1538 if (!ndmas) { 1539 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1540 "<== hxge_map_txdma: no dma allocated")); 1541 return (HXGE_ERROR); 1542 } 1543 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 1544 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1545 1546 tx_rings = (p_tx_rings_t)KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 1547 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC( 1548 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP); 1549 1550 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1551 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 1552 1553 tx_mbox_areas_p = (p_tx_mbox_areas_t) 1554 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 1555 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC( 1556 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP); 1557 1558 /* 1559 * Map descriptors from the buffer pools for each dma channel. 1560 */ 1561 for (i = 0; i < ndmas; i++) { 1562 /* 1563 * Set up and prepare buffer blocks, descriptors and mailbox. 1564 */ 1565 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 1566 status = hxge_map_txdma_channel(hxgep, channel, 1567 (p_hxge_dma_common_t *)&dma_buf_p[i], 1568 (p_tx_ring_t *)&tx_desc_rings[i], 1569 dma_buf_poolp->num_chunks[i], 1570 (p_hxge_dma_common_t *)&dma_cntl_p[i], 1571 (p_tx_mbox_t *)&tx_mbox_p[i]); 1572 if (status != HXGE_OK) { 1573 goto hxge_map_txdma_fail1; 1574 } 1575 tx_desc_rings[i]->index = (uint16_t)i; 1576 tx_desc_rings[i]->tdc_stats = &hxgep->statsp->tdc_stats[i]; 1577 } 1578 1579 tx_rings->ndmas = ndmas; 1580 tx_rings->rings = tx_desc_rings; 1581 hxgep->tx_rings = tx_rings; 1582 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p; 1583 hxgep->tx_mbox_areas_p = tx_mbox_areas_p; 1584 1585 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1586 "tx_rings $%p rings $%p", hxgep->tx_rings, hxgep->tx_rings->rings)); 1587 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1588 "tx_rings $%p tx_desc_rings $%p", 1589 hxgep->tx_rings, tx_desc_rings)); 1590 1591 goto hxge_map_txdma_exit; 1592 1593 hxge_map_txdma_fail1: 1594 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1595 "==> hxge_map_txdma: uninit tx desc " 1596 "(status 0x%x channel %d i %d)", hxgep, status, channel, i)); 1597 i--; 1598 for (; i >= 0; i--) { 1599 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 1600 hxge_unmap_txdma_channel(hxgep, channel, tx_desc_rings[i], 1601 tx_mbox_p[i]); 1602 } 1603 1604 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1605 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1606 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1607 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1608 1609 hxge_map_txdma_exit: 1610 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1611 "==> hxge_map_txdma: (status 0x%x channel %d)", status, channel)); 1612 1613 return (status); 1614 } 1615 1616 static void 1617 hxge_unmap_txdma(p_hxge_t hxgep) 1618 { 1619 int i, ndmas; 1620 uint8_t channel; 1621 p_tx_rings_t tx_rings; 1622 p_tx_ring_t *tx_desc_rings; 1623 p_tx_mbox_areas_t tx_mbox_areas_p; 1624 p_tx_mbox_t *tx_mbox_p; 1625 p_hxge_dma_pool_t dma_buf_poolp; 1626 1627 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma")); 1628 1629 dma_buf_poolp = hxgep->tx_buf_pool_p; 1630 if (!dma_buf_poolp->buf_allocated) { 1631 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1632 "==> hxge_unmap_txdma: buf not allocated")); 1633 return; 1634 } 1635 ndmas = dma_buf_poolp->ndmas; 1636 if (!ndmas) { 1637 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1638 "<== hxge_unmap_txdma: no dma allocated")); 1639 return; 1640 } 1641 tx_rings = hxgep->tx_rings; 1642 tx_desc_rings = tx_rings->rings; 1643 if (tx_rings == NULL) { 1644 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1645 "<== hxge_unmap_txdma: NULL ring pointer")); 1646 return; 1647 } 1648 tx_desc_rings = tx_rings->rings; 1649 if (tx_desc_rings == NULL) { 1650 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1651 "<== hxge_unmap_txdma: NULL ring pointers")); 1652 return; 1653 } 1654 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma: " 1655 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1656 tx_rings, tx_desc_rings, ndmas)); 1657 1658 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 1659 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 1660 1661 for (i = 0; i < ndmas; i++) { 1662 channel = tx_desc_rings[i]->tdc; 1663 (void) hxge_unmap_txdma_channel(hxgep, channel, 1664 (p_tx_ring_t)tx_desc_rings[i], 1665 (p_tx_mbox_t)tx_mbox_p[i]); 1666 } 1667 1668 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1669 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1670 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1671 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1672 1673 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma")); 1674 } 1675 1676 static hxge_status_t 1677 hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel, 1678 p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p, 1679 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p, 1680 p_tx_mbox_t *tx_mbox_p) 1681 { 1682 int status = HXGE_OK; 1683 1684 /* 1685 * Set up and prepare buffer blocks, descriptors and mailbox. 1686 */ 1687 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1688 "==> hxge_map_txdma_channel (channel %d)", channel)); 1689 1690 /* 1691 * Transmit buffer blocks 1692 */ 1693 status = hxge_map_txdma_channel_buf_ring(hxgep, channel, 1694 dma_buf_p, tx_desc_p, num_chunks); 1695 if (status != HXGE_OK) { 1696 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1697 "==> hxge_map_txdma_channel (channel %d): " 1698 "map buffer failed 0x%x", channel, status)); 1699 goto hxge_map_txdma_channel_exit; 1700 } 1701 /* 1702 * Transmit block ring, and mailbox. 1703 */ 1704 hxge_map_txdma_channel_cfg_ring(hxgep, channel, dma_cntl_p, *tx_desc_p, 1705 tx_mbox_p); 1706 1707 goto hxge_map_txdma_channel_exit; 1708 1709 hxge_map_txdma_channel_fail1: 1710 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1711 "==> hxge_map_txdma_channel: unmap buf" 1712 "(status 0x%x channel %d)", status, channel)); 1713 hxge_unmap_txdma_channel_buf_ring(hxgep, *tx_desc_p); 1714 1715 hxge_map_txdma_channel_exit: 1716 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1717 "<== hxge_map_txdma_channel: (status 0x%x channel %d)", 1718 status, channel)); 1719 1720 return (status); 1721 } 1722 1723 /*ARGSUSED*/ 1724 static void 1725 hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel, 1726 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 1727 { 1728 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1729 "==> hxge_unmap_txdma_channel (channel %d)", channel)); 1730 1731 /* unmap tx block ring, and mailbox. */ 1732 (void) hxge_unmap_txdma_channel_cfg_ring(hxgep, tx_ring_p, tx_mbox_p); 1733 1734 /* unmap buffer blocks */ 1735 (void) hxge_unmap_txdma_channel_buf_ring(hxgep, tx_ring_p); 1736 1737 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma_channel")); 1738 } 1739 1740 /*ARGSUSED*/ 1741 static void 1742 hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 1743 p_hxge_dma_common_t *dma_cntl_p, p_tx_ring_t tx_ring_p, 1744 p_tx_mbox_t *tx_mbox_p) 1745 { 1746 p_tx_mbox_t mboxp; 1747 p_hxge_dma_common_t cntl_dmap; 1748 p_hxge_dma_common_t dmap; 1749 tdc_tdr_cfg_t *tx_ring_cfig_p; 1750 tdc_tdr_kick_t *tx_ring_kick_p; 1751 tdc_tdr_cfg_t *tx_cs_p; 1752 tdc_int_mask_t *tx_evmask_p; 1753 tdc_mbh_t *mboxh_p; 1754 tdc_mbl_t *mboxl_p; 1755 uint64_t tx_desc_len; 1756 1757 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1758 "==> hxge_map_txdma_channel_cfg_ring")); 1759 1760 cntl_dmap = *dma_cntl_p; 1761 1762 dmap = (p_hxge_dma_common_t)&tx_ring_p->tdc_desc; 1763 hxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 1764 sizeof (tx_desc_t)); 1765 1766 /* 1767 * Zero out transmit ring descriptors. 1768 */ 1769 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1770 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 1771 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 1772 tx_cs_p = &(tx_ring_p->tx_cs); 1773 tx_evmask_p = &(tx_ring_p->tx_evmask); 1774 tx_ring_cfig_p->value = 0; 1775 tx_ring_kick_p->value = 0; 1776 tx_cs_p->value = 0; 1777 tx_evmask_p->value = 0; 1778 1779 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1780 "==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p", 1781 dma_channel, dmap->dma_cookie.dmac_laddress)); 1782 1783 tx_ring_cfig_p->value = 0; 1784 1785 /* Hydra len is 11 bits and the lower 5 bits are 0s */ 1786 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 5); 1787 tx_ring_cfig_p->value = 1788 (dmap->dma_cookie.dmac_laddress & TDC_TDR_CFG_ADDR_MASK) | 1789 (tx_desc_len << TDC_TDR_CFG_LEN_SHIFT); 1790 1791 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1792 "==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 1793 dma_channel, tx_ring_cfig_p->value)); 1794 1795 tx_cs_p->bits.reset = 1; 1796 1797 /* Map in mailbox */ 1798 mboxp = (p_tx_mbox_t)KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 1799 dmap = (p_hxge_dma_common_t)&mboxp->tx_mbox; 1800 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 1801 mboxh_p = (tdc_mbh_t *)&tx_ring_p->tx_mbox_mbh; 1802 mboxl_p = (tdc_mbl_t *)&tx_ring_p->tx_mbox_mbl; 1803 mboxh_p->value = mboxl_p->value = 0; 1804 1805 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1806 "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 1807 dmap->dma_cookie.dmac_laddress)); 1808 1809 mboxh_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 1810 TDC_MBH_ADDR_SHIFT) & TDC_MBH_MASK); 1811 mboxl_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress & 1812 TDC_MBL_MASK) >> TDC_MBL_SHIFT); 1813 1814 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1815 "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 1816 dmap->dma_cookie.dmac_laddress)); 1817 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1818 "==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p", 1819 mboxh_p->bits.mbaddr, mboxl_p->bits.mbaddr)); 1820 1821 /* 1822 * Set page valid and no mask 1823 */ 1824 tx_ring_p->page_hdl.value = 0; 1825 1826 *tx_mbox_p = mboxp; 1827 1828 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1829 "<== hxge_map_txdma_channel_cfg_ring")); 1830 } 1831 1832 /*ARGSUSED*/ 1833 static void 1834 hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep, 1835 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 1836 { 1837 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1838 "==> hxge_unmap_txdma_channel_cfg_ring: channel %d", 1839 tx_ring_p->tdc)); 1840 1841 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 1842 1843 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1844 "<== hxge_unmap_txdma_channel_cfg_ring")); 1845 } 1846 1847 static hxge_status_t 1848 hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 1849 p_hxge_dma_common_t *dma_buf_p, 1850 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 1851 { 1852 p_hxge_dma_common_t dma_bufp, tmp_bufp; 1853 p_hxge_dma_common_t dmap; 1854 hxge_os_dma_handle_t tx_buf_dma_handle; 1855 p_tx_ring_t tx_ring_p; 1856 p_tx_msg_t tx_msg_ring; 1857 hxge_status_t status = HXGE_OK; 1858 int ddi_status = DDI_SUCCESS; 1859 int i, j, index; 1860 uint32_t size, bsize; 1861 uint32_t nblocks, nmsgs; 1862 1863 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1864 "==> hxge_map_txdma_channel_buf_ring")); 1865 1866 dma_bufp = tmp_bufp = *dma_buf_p; 1867 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1868 " hxge_map_txdma_channel_buf_ring: channel %d to map %d " 1869 "chunks bufp $%p", channel, num_chunks, dma_bufp)); 1870 1871 nmsgs = 0; 1872 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 1873 nmsgs += tmp_bufp->nblocks; 1874 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1875 "==> hxge_map_txdma_channel_buf_ring: channel %d " 1876 "bufp $%p nblocks %d nmsgs %d", 1877 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 1878 } 1879 if (!nmsgs) { 1880 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1881 "<== hxge_map_txdma_channel_buf_ring: channel %d " 1882 "no msg blocks", channel)); 1883 status = HXGE_ERROR; 1884 1885 goto hxge_map_txdma_channel_buf_ring_exit; 1886 } 1887 tx_ring_p = (p_tx_ring_t)KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 1888 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 1889 (void *) hxgep->interrupt_cookie); 1890 /* 1891 * Allocate transmit message rings and handles for packets not to be 1892 * copied to premapped buffers. 1893 */ 1894 size = nmsgs * sizeof (tx_msg_t); 1895 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 1896 for (i = 0; i < nmsgs; i++) { 1897 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr, 1898 DDI_DMA_DONTWAIT, 0, &tx_msg_ring[i].dma_handle); 1899 if (ddi_status != DDI_SUCCESS) { 1900 status |= HXGE_DDI_FAILED; 1901 break; 1902 } 1903 } 1904 1905 if (i < nmsgs) { 1906 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, 1907 "Allocate handles failed.")); 1908 1909 goto hxge_map_txdma_channel_buf_ring_fail1; 1910 } 1911 tx_ring_p->tdc = channel; 1912 tx_ring_p->tx_msg_ring = tx_msg_ring; 1913 tx_ring_p->tx_ring_size = nmsgs; 1914 tx_ring_p->num_chunks = num_chunks; 1915 if (!hxge_tx_intr_thres) { 1916 hxge_tx_intr_thres = tx_ring_p->tx_ring_size / 4; 1917 } 1918 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 1919 tx_ring_p->rd_index = 0; 1920 tx_ring_p->wr_index = 0; 1921 tx_ring_p->ring_head.value = 0; 1922 tx_ring_p->ring_kick_tail.value = 0; 1923 tx_ring_p->descs_pending = 0; 1924 1925 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1926 "==> hxge_map_txdma_channel_buf_ring: channel %d " 1927 "actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)", 1928 channel, tx_ring_p->tx_ring_size, nmsgs, hxge_tx_ring_size)); 1929 1930 /* 1931 * Map in buffers from the buffer pool. 1932 */ 1933 index = 0; 1934 bsize = dma_bufp->block_size; 1935 1936 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma_channel_buf_ring: " 1937 "dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d", 1938 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 1939 1940 tx_buf_dma_handle = dma_bufp->dma_handle; 1941 for (i = 0; i < num_chunks; i++, dma_bufp++) { 1942 bsize = dma_bufp->block_size; 1943 nblocks = dma_bufp->nblocks; 1944 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1945 "==> hxge_map_txdma_channel_buf_ring: dma chunk %d " 1946 "size %d dma_bufp $%p", 1947 i, sizeof (hxge_dma_common_t), dma_bufp)); 1948 1949 for (j = 0; j < nblocks; j++) { 1950 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 1951 dmap = &tx_msg_ring[index++].buf_dma; 1952 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1953 "==> hxge_map_txdma_channel_buf_ring: j %d" 1954 "dmap $%p", i, dmap)); 1955 hxge_setup_dma_common(dmap, dma_bufp, 1, bsize); 1956 } 1957 } 1958 1959 if (i < num_chunks) { 1960 status = HXGE_ERROR; 1961 1962 goto hxge_map_txdma_channel_buf_ring_fail1; 1963 } 1964 1965 *tx_desc_p = tx_ring_p; 1966 1967 goto hxge_map_txdma_channel_buf_ring_exit; 1968 1969 hxge_map_txdma_channel_buf_ring_fail1: 1970 index--; 1971 for (; index >= 0; index--) { 1972 if (tx_msg_ring[index].dma_handle != NULL) { 1973 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 1974 } 1975 } 1976 MUTEX_DESTROY(&tx_ring_p->lock); 1977 KMEM_FREE(tx_msg_ring, size); 1978 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 1979 1980 status = HXGE_ERROR; 1981 1982 hxge_map_txdma_channel_buf_ring_exit: 1983 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1984 "<== hxge_map_txdma_channel_buf_ring status 0x%x", status)); 1985 1986 return (status); 1987 } 1988 1989 /*ARGSUSED*/ 1990 static void 1991 hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, p_tx_ring_t tx_ring_p) 1992 { 1993 p_tx_msg_t tx_msg_ring; 1994 p_tx_msg_t tx_msg_p; 1995 int i; 1996 1997 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1998 "==> hxge_unmap_txdma_channel_buf_ring")); 1999 if (tx_ring_p == NULL) { 2000 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2001 "<== hxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2002 return; 2003 } 2004 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2005 "==> hxge_unmap_txdma_channel_buf_ring: channel %d", 2006 tx_ring_p->tdc)); 2007 2008 tx_msg_ring = tx_ring_p->tx_msg_ring; 2009 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2010 tx_msg_p = &tx_msg_ring[i]; 2011 if (tx_msg_p->flags.dma_type == USE_DVMA) { 2012 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "entry = %d", i)); 2013 (void) dvma_unload(tx_msg_p->dvma_handle, 0, -1); 2014 tx_msg_p->dvma_handle = NULL; 2015 if (tx_ring_p->dvma_wr_index == 2016 tx_ring_p->dvma_wrap_mask) { 2017 tx_ring_p->dvma_wr_index = 0; 2018 } else { 2019 tx_ring_p->dvma_wr_index++; 2020 } 2021 tx_ring_p->dvma_pending--; 2022 } else if (tx_msg_p->flags.dma_type == USE_DMA) { 2023 if (ddi_dma_unbind_handle(tx_msg_p->dma_handle)) { 2024 cmn_err(CE_WARN, "hxge_unmap_tx_bug_ring: " 2025 "ddi_dma_unbind_handle failed."); 2026 } 2027 } 2028 if (tx_msg_p->tx_message != NULL) { 2029 freemsg(tx_msg_p->tx_message); 2030 tx_msg_p->tx_message = NULL; 2031 } 2032 } 2033 2034 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2035 if (tx_msg_ring[i].dma_handle != NULL) { 2036 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2037 } 2038 } 2039 2040 MUTEX_DESTROY(&tx_ring_p->lock); 2041 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2042 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2043 2044 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2045 "<== hxge_unmap_txdma_channel_buf_ring")); 2046 } 2047 2048 static hxge_status_t 2049 hxge_txdma_hw_start(p_hxge_t hxgep) 2050 { 2051 int i, ndmas; 2052 uint16_t channel; 2053 p_tx_rings_t tx_rings; 2054 p_tx_ring_t *tx_desc_rings; 2055 p_tx_mbox_areas_t tx_mbox_areas_p; 2056 p_tx_mbox_t *tx_mbox_p; 2057 hxge_status_t status = HXGE_OK; 2058 uint64_t tmp; 2059 2060 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start")); 2061 2062 /* 2063 * Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat. 2064 * 3. Scrub memory and check for errors. 2065 */ 2066 (void) hxge_tx_vmac_disable(hxgep); 2067 2068 /* 2069 * Clear the error status 2070 */ 2071 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7); 2072 2073 /* 2074 * Scrub the rtab memory for the TDC and reset the TDC. 2075 */ 2076 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, 0x0ULL); 2077 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, 0x0ULL); 2078 2079 for (i = 0; i < 256; i++) { 2080 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2081 (uint64_t)i); 2082 2083 /* 2084 * Write the command register with an indirect read instruction 2085 */ 2086 tmp = (0x1ULL << 30) | i; 2087 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp); 2088 2089 /* 2090 * Wait for status done 2091 */ 2092 tmp = 0; 2093 do { 2094 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2095 &tmp); 2096 } while (((tmp >> 31) & 0x1ULL) == 0x0); 2097 } 2098 2099 for (i = 0; i < 256; i++) { 2100 /* 2101 * Write the command register with an indirect read instruction 2102 */ 2103 tmp = (0x1ULL << 30) | i; 2104 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp); 2105 2106 /* 2107 * Wait for status done 2108 */ 2109 tmp = 0; 2110 do { 2111 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2112 &tmp); 2113 } while (((tmp >> 31) & 0x1ULL) == 0x0); 2114 2115 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, &tmp); 2116 if (0x1ff00ULL != (0x1ffffULL & tmp)) { 2117 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2118 "unexpected data (hi), entry: %x, value: 0x%0llx\n", 2119 i, (unsigned long long)tmp)); 2120 status = HXGE_ERROR; 2121 } 2122 2123 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, &tmp); 2124 if (tmp != 0) { 2125 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2126 "unexpected data (lo), entry: %x\n", i)); 2127 status = HXGE_ERROR; 2128 } 2129 2130 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp); 2131 if (tmp != 0) { 2132 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2133 "parity error, entry: %x, val 0x%llx\n", 2134 i, (unsigned long long)tmp)); 2135 status = HXGE_ERROR; 2136 } 2137 2138 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp); 2139 if (tmp != 0) { 2140 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2141 "parity error, entry: %x\n", i)); 2142 status = HXGE_ERROR; 2143 } 2144 } 2145 2146 if (status != HXGE_OK) 2147 goto hxge_txdma_hw_start_exit; 2148 2149 /* 2150 * Reset FIFO Error Status for the TDC and enable FIFO error events. 2151 */ 2152 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7); 2153 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_MASK, 0x0); 2154 2155 /* 2156 * Initialize the Transmit DMAs. 2157 */ 2158 tx_rings = hxgep->tx_rings; 2159 if (tx_rings == NULL) { 2160 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2161 "<== hxge_txdma_hw_start: NULL ring pointer")); 2162 return (HXGE_ERROR); 2163 } 2164 2165 tx_rings->dma_to_reenable = 0; 2166 2167 tx_desc_rings = tx_rings->rings; 2168 if (tx_desc_rings == NULL) { 2169 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2170 "<== hxge_txdma_hw_start: NULL ring pointers")); 2171 return (HXGE_ERROR); 2172 } 2173 ndmas = tx_rings->ndmas; 2174 if (!ndmas) { 2175 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2176 "<== hxge_txdma_hw_start: no dma channel allocated")); 2177 return (HXGE_ERROR); 2178 } 2179 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start: " 2180 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2181 tx_rings, tx_desc_rings, ndmas)); 2182 2183 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2184 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2185 2186 /* 2187 * Init the DMAs. 2188 */ 2189 for (i = 0; i < ndmas; i++) { 2190 channel = tx_desc_rings[i]->tdc; 2191 status = hxge_txdma_start_channel(hxgep, channel, 2192 (p_tx_ring_t)tx_desc_rings[i], 2193 (p_tx_mbox_t)tx_mbox_p[i]); 2194 if (status != HXGE_OK) { 2195 goto hxge_txdma_hw_start_fail1; 2196 } 2197 } 2198 2199 (void) hxge_tx_vmac_enable(hxgep); 2200 2201 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2202 "==> hxge_txdma_hw_start: tx_rings $%p rings $%p", 2203 hxgep->tx_rings, hxgep->tx_rings->rings)); 2204 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2205 "==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p", 2206 hxgep->tx_rings, tx_desc_rings)); 2207 2208 goto hxge_txdma_hw_start_exit; 2209 2210 hxge_txdma_hw_start_fail1: 2211 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2212 "==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)", 2213 status, channel, i)); 2214 2215 for (; i >= 0; i--) { 2216 channel = tx_desc_rings[i]->tdc, 2217 (void) hxge_txdma_stop_channel(hxgep, channel, 2218 (p_tx_ring_t)tx_desc_rings[i], 2219 (p_tx_mbox_t)tx_mbox_p[i]); 2220 } 2221 2222 hxge_txdma_hw_start_exit: 2223 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2224 "==> hxge_txdma_hw_start: (status 0x%x)", status)); 2225 2226 return (status); 2227 } 2228 2229 static void 2230 hxge_txdma_hw_stop(p_hxge_t hxgep) 2231 { 2232 int i, ndmas; 2233 uint16_t channel; 2234 p_tx_rings_t tx_rings; 2235 p_tx_ring_t *tx_desc_rings; 2236 p_tx_mbox_areas_t tx_mbox_areas_p; 2237 p_tx_mbox_t *tx_mbox_p; 2238 2239 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop")); 2240 2241 tx_rings = hxgep->tx_rings; 2242 if (tx_rings == NULL) { 2243 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2244 "<== hxge_txdma_hw_stop: NULL ring pointer")); 2245 return; 2246 } 2247 2248 tx_desc_rings = tx_rings->rings; 2249 if (tx_desc_rings == NULL) { 2250 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2251 "<== hxge_txdma_hw_stop: NULL ring pointers")); 2252 return; 2253 } 2254 2255 ndmas = tx_rings->ndmas; 2256 if (!ndmas) { 2257 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2258 "<== hxge_txdma_hw_stop: no dma channel allocated")); 2259 return; 2260 } 2261 2262 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: " 2263 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2264 2265 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2266 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2267 2268 for (i = 0; i < ndmas; i++) { 2269 channel = tx_desc_rings[i]->tdc; 2270 (void) hxge_txdma_stop_channel(hxgep, channel, 2271 (p_tx_ring_t)tx_desc_rings[i], 2272 (p_tx_mbox_t)tx_mbox_p[i]); 2273 } 2274 2275 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: " 2276 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2277 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_hw_stop")); 2278 } 2279 2280 static hxge_status_t 2281 hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel, 2282 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2283 { 2284 hxge_status_t status = HXGE_OK; 2285 2286 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2287 "==> hxge_txdma_start_channel (channel %d)", channel)); 2288 /* 2289 * TXDMA/TXC must be in stopped state. 2290 */ 2291 (void) hxge_txdma_stop_inj_err(hxgep, channel); 2292 2293 /* 2294 * Reset TXDMA channel 2295 */ 2296 tx_ring_p->tx_cs.value = 0; 2297 tx_ring_p->tx_cs.bits.reset = 1; 2298 status = hxge_reset_txdma_channel(hxgep, channel, 2299 tx_ring_p->tx_cs.value); 2300 if (status != HXGE_OK) { 2301 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2302 "==> hxge_txdma_start_channel (channel %d)" 2303 " reset channel failed 0x%x", channel, status)); 2304 2305 goto hxge_txdma_start_channel_exit; 2306 } 2307 2308 /* 2309 * Initialize the TXDMA channel specific FZC control configurations. 2310 * These FZC registers are pertaining to each TX channel (i.e. logical 2311 * pages). 2312 */ 2313 status = hxge_init_fzc_txdma_channel(hxgep, channel, 2314 tx_ring_p, tx_mbox_p); 2315 if (status != HXGE_OK) { 2316 goto hxge_txdma_start_channel_exit; 2317 } 2318 2319 /* 2320 * Initialize the event masks. 2321 */ 2322 tx_ring_p->tx_evmask.value = 0; 2323 status = hxge_init_txdma_channel_event_mask(hxgep, 2324 channel, &tx_ring_p->tx_evmask); 2325 if (status != HXGE_OK) { 2326 goto hxge_txdma_start_channel_exit; 2327 } 2328 2329 /* 2330 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA 2331 * channels and enable each DMA channel. 2332 */ 2333 status = hxge_enable_txdma_channel(hxgep, channel, 2334 tx_ring_p, tx_mbox_p); 2335 if (status != HXGE_OK) { 2336 goto hxge_txdma_start_channel_exit; 2337 } 2338 2339 hxge_txdma_start_channel_exit: 2340 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_start_channel")); 2341 2342 return (status); 2343 } 2344 2345 /*ARGSUSED*/ 2346 static hxge_status_t 2347 hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel, 2348 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2349 { 2350 int status = HXGE_OK; 2351 2352 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2353 "==> hxge_txdma_stop_channel: channel %d", channel)); 2354 2355 /* 2356 * Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit 2357 * not set, the TXDMA reset state will not be set if reset TXDMA. 2358 */ 2359 (void) hxge_txdma_stop_inj_err(hxgep, channel); 2360 2361 /* 2362 * Reset TXDMA channel 2363 */ 2364 tx_ring_p->tx_cs.value = 0; 2365 tx_ring_p->tx_cs.bits.reset = 1; 2366 status = hxge_reset_txdma_channel(hxgep, channel, 2367 tx_ring_p->tx_cs.value); 2368 if (status != HXGE_OK) { 2369 goto hxge_txdma_stop_channel_exit; 2370 } 2371 2372 hxge_txdma_stop_channel_exit: 2373 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_stop_channel")); 2374 2375 return (status); 2376 } 2377 2378 static p_tx_ring_t 2379 hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel) 2380 { 2381 int index, ndmas; 2382 uint16_t tdc; 2383 p_tx_rings_t tx_rings; 2384 2385 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_ring")); 2386 2387 tx_rings = hxgep->tx_rings; 2388 if (tx_rings == NULL) { 2389 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2390 "<== hxge_txdma_get_ring: NULL ring pointer")); 2391 return (NULL); 2392 } 2393 ndmas = tx_rings->ndmas; 2394 if (!ndmas) { 2395 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2396 "<== hxge_txdma_get_ring: no channel allocated")); 2397 return (NULL); 2398 } 2399 if (tx_rings->rings == NULL) { 2400 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2401 "<== hxge_txdma_get_ring: NULL rings pointer")); 2402 return (NULL); 2403 } 2404 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_ring: " 2405 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2406 tx_rings, tx_rings, ndmas)); 2407 2408 for (index = 0; index < ndmas; index++) { 2409 tdc = tx_rings->rings[index]->tdc; 2410 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2411 "==> hxge_fixup_txdma_rings: channel %d", tdc)); 2412 if (channel == tdc) { 2413 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2414 "<== hxge_txdma_get_ring: tdc %d ring $%p", 2415 tdc, tx_rings->rings[index])); 2416 return (p_tx_ring_t)(tx_rings->rings[index]); 2417 } 2418 } 2419 2420 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_ring")); 2421 2422 return (NULL); 2423 } 2424 2425 static p_tx_mbox_t 2426 hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel) 2427 { 2428 int index, tdc, ndmas; 2429 p_tx_rings_t tx_rings; 2430 p_tx_mbox_areas_t tx_mbox_areas_p; 2431 p_tx_mbox_t *tx_mbox_p; 2432 2433 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_mbox")); 2434 2435 tx_rings = hxgep->tx_rings; 2436 if (tx_rings == NULL) { 2437 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2438 "<== hxge_txdma_get_mbox: NULL ring pointer")); 2439 return (NULL); 2440 } 2441 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2442 if (tx_mbox_areas_p == NULL) { 2443 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2444 "<== hxge_txdma_get_mbox: NULL mbox pointer")); 2445 return (NULL); 2446 } 2447 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2448 2449 ndmas = tx_rings->ndmas; 2450 if (!ndmas) { 2451 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2452 "<== hxge_txdma_get_mbox: no channel allocated")); 2453 return (NULL); 2454 } 2455 if (tx_rings->rings == NULL) { 2456 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2457 "<== hxge_txdma_get_mbox: NULL rings pointer")); 2458 return (NULL); 2459 } 2460 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_mbox: " 2461 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2462 tx_rings, tx_rings, ndmas)); 2463 2464 for (index = 0; index < ndmas; index++) { 2465 tdc = tx_rings->rings[index]->tdc; 2466 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2467 "==> hxge_txdma_get_mbox: channel %d", tdc)); 2468 if (channel == tdc) { 2469 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2470 "<== hxge_txdma_get_mbox: tdc %d ring $%p", 2471 tdc, tx_rings->rings[index])); 2472 return (p_tx_mbox_t)(tx_mbox_p[index]); 2473 } 2474 } 2475 2476 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_mbox")); 2477 2478 return (NULL); 2479 } 2480 2481 /*ARGSUSED*/ 2482 static hxge_status_t 2483 hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 2484 tdc_stat_t cs) 2485 { 2486 hpi_handle_t handle; 2487 uint8_t channel; 2488 p_tx_ring_t *tx_rings; 2489 p_tx_ring_t tx_ring_p; 2490 p_hxge_tx_ring_stats_t tdc_stats; 2491 boolean_t txchan_fatal = B_FALSE; 2492 hxge_status_t status = HXGE_OK; 2493 tdc_drop_cnt_t drop_cnt; 2494 2495 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_tx_err_evnts")); 2496 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2497 channel = ldvp->channel; 2498 2499 tx_rings = hxgep->tx_rings->rings; 2500 tx_ring_p = tx_rings[index]; 2501 tdc_stats = tx_ring_p->tdc_stats; 2502 2503 /* Get the error counts if any */ 2504 TXDMA_REG_READ64(handle, TDC_DROP_CNT, channel, &drop_cnt.value); 2505 tdc_stats->count_hdr_size_err += drop_cnt.bits.hdr_size_error_count; 2506 tdc_stats->count_runt += drop_cnt.bits.runt_count; 2507 tdc_stats->count_abort += drop_cnt.bits.abort_count; 2508 2509 if (cs.bits.peu_resp_err) { 2510 tdc_stats->peu_resp_err++; 2511 HXGE_FM_REPORT_ERROR(hxgep, channel, 2512 HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR); 2513 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2514 "==> hxge_tx_err_evnts(channel %d): " 2515 "fatal error: peu_resp_err", channel)); 2516 txchan_fatal = B_TRUE; 2517 } 2518 2519 if (cs.bits.pkt_size_hdr_err) { 2520 tdc_stats->pkt_size_hdr_err++; 2521 HXGE_FM_REPORT_ERROR(hxgep, channel, 2522 HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR); 2523 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2524 "==> hxge_tx_err_evnts(channel %d): " 2525 "fatal error: pkt_size_hdr_err", channel)); 2526 txchan_fatal = B_TRUE; 2527 } 2528 2529 if (cs.bits.runt_pkt_drop_err) { 2530 tdc_stats->runt_pkt_drop_err++; 2531 HXGE_FM_REPORT_ERROR(hxgep, channel, 2532 HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR); 2533 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2534 "==> hxge_tx_err_evnts(channel %d): " 2535 "fatal error: runt_pkt_drop_err", channel)); 2536 txchan_fatal = B_TRUE; 2537 } 2538 2539 if (cs.bits.pkt_size_err) { 2540 tdc_stats->pkt_size_err++; 2541 HXGE_FM_REPORT_ERROR(hxgep, channel, 2542 HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 2543 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2544 "==> hxge_tx_err_evnts(channel %d): " 2545 "fatal error: pkt_size_err", channel)); 2546 txchan_fatal = B_TRUE; 2547 } 2548 2549 if (cs.bits.tx_rng_oflow) { 2550 tdc_stats->tx_rng_oflow++; 2551 if (tdc_stats->tx_rng_oflow) 2552 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2553 "==> hxge_tx_err_evnts(channel %d): " 2554 "fatal error: tx_rng_oflow", channel)); 2555 } 2556 2557 if (cs.bits.pref_par_err) { 2558 tdc_stats->pref_par_err++; 2559 2560 /* Get the address of parity error read data */ 2561 TXDMA_REG_READ64(hxgep->hpi_handle, TDC_PREF_PAR_LOG, 2562 channel, &tdc_stats->errlog.value); 2563 2564 HXGE_FM_REPORT_ERROR(hxgep, channel, 2565 HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR); 2566 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2567 "==> hxge_tx_err_evnts(channel %d): " 2568 "fatal error: pref_par_err", channel)); 2569 txchan_fatal = B_TRUE; 2570 } 2571 2572 if (cs.bits.tdr_pref_cpl_to) { 2573 tdc_stats->tdr_pref_cpl_to++; 2574 HXGE_FM_REPORT_ERROR(hxgep, channel, 2575 HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO); 2576 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2577 "==> hxge_tx_err_evnts(channel %d): " 2578 "fatal error: config_partition_err", channel)); 2579 txchan_fatal = B_TRUE; 2580 } 2581 2582 if (cs.bits.pkt_cpl_to) { 2583 tdc_stats->pkt_cpl_to++; 2584 HXGE_FM_REPORT_ERROR(hxgep, channel, 2585 HXGE_FM_EREPORT_TDMC_PKT_CPL_TO); 2586 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2587 "==> hxge_tx_err_evnts(channel %d): " 2588 "fatal error: pkt_cpl_to", channel)); 2589 txchan_fatal = B_TRUE; 2590 } 2591 2592 if (cs.bits.invalid_sop) { 2593 tdc_stats->invalid_sop++; 2594 HXGE_FM_REPORT_ERROR(hxgep, channel, 2595 HXGE_FM_EREPORT_TDMC_INVALID_SOP); 2596 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2597 "==> hxge_tx_err_evnts(channel %d): " 2598 "fatal error: invalid_sop", channel)); 2599 txchan_fatal = B_TRUE; 2600 } 2601 2602 if (cs.bits.unexpected_sop) { 2603 tdc_stats->unexpected_sop++; 2604 HXGE_FM_REPORT_ERROR(hxgep, channel, 2605 HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP); 2606 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2607 "==> hxge_tx_err_evnts(channel %d): " 2608 "fatal error: unexpected_sop", channel)); 2609 txchan_fatal = B_TRUE; 2610 } 2611 2612 /* Clear error injection source in case this is an injected error */ 2613 TXDMA_REG_WRITE64(hxgep->hpi_handle, TDC_STAT_INT_DBG, channel, 0); 2614 2615 if (txchan_fatal) { 2616 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2617 " hxge_tx_err_evnts: " 2618 " fatal error on channel %d cs 0x%llx\n", 2619 channel, cs.value)); 2620 status = hxge_txdma_fatal_err_recover(hxgep, channel, 2621 tx_ring_p); 2622 if (status == HXGE_OK) { 2623 FM_SERVICE_RESTORED(hxgep); 2624 } 2625 } 2626 2627 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "<== hxge_tx_err_evnts")); 2628 2629 return (status); 2630 } 2631 2632 static hxge_status_t 2633 hxge_txdma_wait_for_qst(p_hxge_t hxgep, int channel) 2634 { 2635 hpi_status_t rs; 2636 hxge_status_t status = HXGE_OK; 2637 hpi_handle_t handle; 2638 2639 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2640 2641 /* 2642 * Wait for QST state of the DMA. 2643 */ 2644 rs = hpi_txdma_control_stop_wait(handle, channel); 2645 if (rs != HPI_SUCCESS) 2646 status = HXGE_ERROR; 2647 2648 return (status); 2649 } 2650 2651 static hxge_status_t 2652 hxge_txdma_handle_rtab_error(p_hxge_t hxgep) 2653 { 2654 hxge_status_t status = HXGE_OK; 2655 int ndmas, i; 2656 uint16_t chnl; 2657 2658 ndmas = hxgep->tx_rings->ndmas; 2659 2660 /* 2661 * Make sure each DMA is in the QST state. 2662 */ 2663 for (i = 0; i < ndmas; i++) { 2664 status = hxge_txdma_wait_for_qst(hxgep, i); 2665 if (status != HXGE_OK) 2666 goto hxge_txdma_handle_rtab_error_exit; 2667 } 2668 2669 /* 2670 * Enable the DMAs. 2671 */ 2672 for (i = 0; i < ndmas; i++) { 2673 chnl = (hxgep->tx_rings->dma_to_reenable + i) % ndmas; 2674 hxge_txdma_enable_channel(hxgep, chnl); 2675 } 2676 2677 hxgep->tx_rings->dma_to_reenable = 2678 (hxgep->tx_rings->dma_to_reenable + 1) % ndmas; 2679 2680 hxge_txdma_handle_rtab_error_exit: 2681 return (status); 2682 } 2683 2684 hxge_status_t 2685 hxge_txdma_handle_sys_errors(p_hxge_t hxgep) 2686 { 2687 hpi_handle_t handle; 2688 hxge_status_t status = HXGE_OK; 2689 tdc_fifo_err_stat_t fifo_stat; 2690 hxge_tdc_sys_stats_t *tdc_sys_stats; 2691 2692 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_handle_sys_errors")); 2693 2694 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2695 2696 /* 2697 * The FIFO is shared by all channels. 2698 * Get the status of Reorder Buffer and Reorder Table Buffer Errors 2699 */ 2700 HXGE_REG_RD64(handle, TDC_FIFO_ERR_STAT, &fifo_stat.value); 2701 2702 /* 2703 * Clear the error bits. Note that writing a 1 clears the bit. Writing 2704 * a 0 does nothing. 2705 */ 2706 HXGE_REG_WR64(handle, TDC_FIFO_ERR_STAT, fifo_stat.value); 2707 2708 tdc_sys_stats = &hxgep->statsp->tdc_sys_stats; 2709 if (fifo_stat.bits.reord_tbl_par_err) { 2710 tdc_sys_stats->reord_tbl_par_err++; 2711 status = hxge_txdma_handle_rtab_error(hxgep); 2712 } 2713 2714 if (fifo_stat.bits.reord_buf_ded_err) { 2715 tdc_sys_stats->reord_buf_ded_err++; 2716 HXGE_FM_REPORT_ERROR(hxgep, NULL, 2717 HXGE_FM_EREPORT_TDMC_REORD_BUF_DED); 2718 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2719 "==> hxge_txdma_handle_sys_errors: " 2720 "fatal error: reord_buf_ded_err")); 2721 } 2722 2723 if (fifo_stat.bits.reord_buf_sec_err) { 2724 tdc_sys_stats->reord_buf_sec_err++; 2725 if (tdc_sys_stats->reord_buf_sec_err == 1) 2726 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2727 "==> hxge_txdma_handle_sys_errors: " 2728 "reord_buf_sec_err")); 2729 } 2730 2731 if (fifo_stat.bits.reord_buf_ded_err) { 2732 status = hxge_tx_port_fatal_err_recover(hxgep); 2733 if (status == HXGE_OK) { 2734 FM_SERVICE_RESTORED(hxgep); 2735 } 2736 } 2737 2738 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_handle_sys_errors")); 2739 2740 return (status); 2741 } 2742 2743 static hxge_status_t 2744 hxge_txdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel, 2745 p_tx_ring_t tx_ring_p) 2746 { 2747 hpi_handle_t handle; 2748 hpi_status_t rs = HPI_SUCCESS; 2749 p_tx_mbox_t tx_mbox_p; 2750 hxge_status_t status = HXGE_OK; 2751 2752 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover")); 2753 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2754 "Recovering from TxDMAChannel#%d error...", channel)); 2755 2756 /* 2757 * Stop the dma channel waits for the stop done. If the stop done bit 2758 * is not set, then create an error. 2759 */ 2760 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2761 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping txdma channel(%d)", 2762 channel)); 2763 MUTEX_ENTER(&tx_ring_p->lock); 2764 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel); 2765 if (rs != HPI_SUCCESS) { 2766 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2767 "==> hxge_txdma_fatal_err_recover (channel %d): " 2768 "stop failed ", channel)); 2769 2770 goto fail; 2771 } 2772 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming txdma channel(%d)", 2773 channel)); 2774 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 2775 2776 /* 2777 * Reset TXDMA channel 2778 */ 2779 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "resetting txdma channel(%d)", 2780 channel)); 2781 if ((rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 2782 HPI_SUCCESS) { 2783 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2784 "==> hxge_txdma_fatal_err_recover (channel %d)" 2785 " reset channel failed 0x%x", channel, rs)); 2786 2787 goto fail; 2788 } 2789 /* 2790 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx 2791 * overflow fatal error if tail is not set to 0 after reset! 2792 */ 2793 TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0); 2794 2795 /* 2796 * Restart TXDMA channel 2797 * 2798 * Initialize the TXDMA channel specific FZC control configurations. 2799 * These FZC registers are pertaining to each TX channel (i.e. logical 2800 * pages). 2801 */ 2802 tx_mbox_p = hxge_txdma_get_mbox(hxgep, channel); 2803 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "restarting txdma channel(%d)", 2804 channel)); 2805 status = hxge_init_fzc_txdma_channel(hxgep, channel, 2806 tx_ring_p, tx_mbox_p); 2807 if (status != HXGE_OK) 2808 goto fail; 2809 2810 /* 2811 * Initialize the event masks. 2812 */ 2813 tx_ring_p->tx_evmask.value = 0; 2814 status = hxge_init_txdma_channel_event_mask(hxgep, channel, 2815 &tx_ring_p->tx_evmask); 2816 if (status != HXGE_OK) 2817 goto fail; 2818 2819 tx_ring_p->wr_index_wrap = B_FALSE; 2820 tx_ring_p->wr_index = 0; 2821 tx_ring_p->rd_index = 0; 2822 2823 /* 2824 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA 2825 * channels and enable each DMA channel. 2826 */ 2827 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "enabling txdma channel(%d)", 2828 channel)); 2829 status = hxge_enable_txdma_channel(hxgep, channel, 2830 tx_ring_p, tx_mbox_p); 2831 MUTEX_EXIT(&tx_ring_p->lock); 2832 if (status != HXGE_OK) 2833 goto fail; 2834 2835 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2836 "Recovery Successful, TxDMAChannel#%d Restored", channel)); 2837 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover")); 2838 2839 return (HXGE_OK); 2840 2841 fail: 2842 MUTEX_EXIT(&tx_ring_p->lock); 2843 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2844 "hxge_txdma_fatal_err_recover (channel %d): " 2845 "failed to recover this txdma channel", channel)); 2846 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 2847 2848 return (status); 2849 } 2850 2851 static hxge_status_t 2852 hxge_tx_port_fatal_err_recover(p_hxge_t hxgep) 2853 { 2854 hpi_handle_t handle; 2855 hpi_status_t rs = HPI_SUCCESS; 2856 hxge_status_t status = HXGE_OK; 2857 p_tx_ring_t *tx_desc_rings; 2858 p_tx_rings_t tx_rings; 2859 p_tx_ring_t tx_ring_p; 2860 int i, ndmas; 2861 uint16_t channel; 2862 block_reset_t reset_reg; 2863 2864 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2865 "==> hxge_tx_port_fatal_err_recover")); 2866 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2867 "Recovering from TxPort error...")); 2868 2869 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2870 2871 /* Reset TDC block from PEU for this fatal error */ 2872 reset_reg.value = 0; 2873 reset_reg.bits.tdc_rst = 1; 2874 HXGE_REG_WR32(handle, BLOCK_RESET, reset_reg.value); 2875 2876 HXGE_DELAY(1000); 2877 2878 /* 2879 * Stop the dma channel waits for the stop done. If the stop done bit 2880 * is not set, then create an error. 2881 */ 2882 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping all DMA channels...")); 2883 2884 tx_rings = hxgep->tx_rings; 2885 tx_desc_rings = tx_rings->rings; 2886 ndmas = tx_rings->ndmas; 2887 2888 for (i = 0; i < ndmas; i++) { 2889 if (tx_desc_rings[i] == NULL) { 2890 continue; 2891 } 2892 tx_ring_p = tx_rings->rings[i]; 2893 MUTEX_ENTER(&tx_ring_p->lock); 2894 } 2895 2896 for (i = 0; i < ndmas; i++) { 2897 if (tx_desc_rings[i] == NULL) { 2898 continue; 2899 } 2900 channel = tx_desc_rings[i]->tdc; 2901 tx_ring_p = tx_rings->rings[i]; 2902 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel); 2903 if (rs != HPI_SUCCESS) { 2904 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2905 "==> hxge_txdma_fatal_err_recover (channel %d): " 2906 "stop failed ", channel)); 2907 2908 goto fail; 2909 } 2910 } 2911 2912 /* 2913 * Do reclaim on all of th DMAs. 2914 */ 2915 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming all DMA channels...")); 2916 for (i = 0; i < ndmas; i++) { 2917 if (tx_desc_rings[i] == NULL) { 2918 continue; 2919 } 2920 tx_ring_p = tx_rings->rings[i]; 2921 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 2922 } 2923 2924 /* Restart the TDC */ 2925 if ((status = hxge_txdma_hw_start(hxgep)) != HXGE_OK) 2926 goto fail; 2927 2928 for (i = 0; i < ndmas; i++) { 2929 if (tx_desc_rings[i] == NULL) { 2930 continue; 2931 } 2932 tx_ring_p = tx_rings->rings[i]; 2933 MUTEX_EXIT(&tx_ring_p->lock); 2934 } 2935 2936 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2937 "Recovery Successful, TxPort Restored")); 2938 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2939 "<== hxge_tx_port_fatal_err_recover")); 2940 return (HXGE_OK); 2941 2942 fail: 2943 for (i = 0; i < ndmas; i++) { 2944 if (tx_desc_rings[i] == NULL) { 2945 continue; 2946 } 2947 tx_ring_p = tx_rings->rings[i]; 2948 MUTEX_EXIT(&tx_ring_p->lock); 2949 } 2950 2951 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 2952 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2953 "hxge_txdma_fatal_err_recover (channel %d): " 2954 "failed to recover this txdma channel")); 2955 2956 return (status); 2957 } 2958