1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_txdma.h> 28 #include <sys/llc1.h> 29 30 uint32_t hxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 31 uint32_t hxge_tx_minfree = 64; 32 uint32_t hxge_tx_intr_thres = 0; 33 uint32_t hxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 34 uint32_t hxge_tx_tiny_pack = 1; 35 uint32_t hxge_tx_use_bcopy = 1; 36 37 extern uint32_t hxge_tx_ring_size; 38 extern uint32_t hxge_bcopy_thresh; 39 extern uint32_t hxge_dvma_thresh; 40 extern uint32_t hxge_dma_stream_thresh; 41 extern dma_method_t hxge_force_dma; 42 43 /* Device register access attributes for PIO. */ 44 extern ddi_device_acc_attr_t hxge_dev_reg_acc_attr; 45 46 /* Device descriptor access attributes for DMA. */ 47 extern ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr; 48 49 /* Device buffer access attributes for DMA. */ 50 extern ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr; 51 extern ddi_dma_attr_t hxge_desc_dma_attr; 52 extern ddi_dma_attr_t hxge_tx_dma_attr; 53 54 static hxge_status_t hxge_map_txdma(p_hxge_t hxgep); 55 static void hxge_unmap_txdma(p_hxge_t hxgep); 56 static hxge_status_t hxge_txdma_hw_start(p_hxge_t hxgep); 57 static void hxge_txdma_hw_stop(p_hxge_t hxgep); 58 59 static hxge_status_t hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel, 60 p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p, 61 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p, 62 p_tx_mbox_t *tx_mbox_p); 63 static void hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel, 64 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 65 static hxge_status_t hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t, 66 p_hxge_dma_common_t *, p_tx_ring_t *, uint32_t); 67 static void hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, 68 p_tx_ring_t tx_ring_p); 69 static void hxge_map_txdma_channel_cfg_ring(p_hxge_t, uint16_t, 70 p_hxge_dma_common_t *, p_tx_ring_t, p_tx_mbox_t *); 71 static void hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep, 72 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 73 static hxge_status_t hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel, 74 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 75 static hxge_status_t hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel, 76 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 77 static p_tx_ring_t hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel); 78 static hxge_status_t hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, 79 p_hxge_ldv_t ldvp, tdc_stat_t cs); 80 static p_tx_mbox_t hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel); 81 static hxge_status_t hxge_txdma_fatal_err_recover(p_hxge_t hxgep, 82 uint16_t channel, p_tx_ring_t tx_ring_p); 83 static hxge_status_t hxge_tx_port_fatal_err_recover(p_hxge_t hxgep); 84 85 hxge_status_t 86 hxge_init_txdma_channels(p_hxge_t hxgep) 87 { 88 hxge_status_t status = HXGE_OK; 89 block_reset_t reset_reg; 90 91 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_init_txdma_channels")); 92 93 /* 94 * Reset TDC block from PEU to cleanup any unknown configuration. 95 * This may be resulted from previous reboot. 96 */ 97 reset_reg.value = 0; 98 reset_reg.bits.tdc_rst = 1; 99 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 100 101 HXGE_DELAY(1000); 102 103 status = hxge_map_txdma(hxgep); 104 if (status != HXGE_OK) { 105 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 106 "<== hxge_init_txdma_channels: status 0x%x", status)); 107 return (status); 108 } 109 110 status = hxge_txdma_hw_start(hxgep); 111 if (status != HXGE_OK) { 112 hxge_unmap_txdma(hxgep); 113 return (status); 114 } 115 116 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 117 "<== hxge_init_txdma_channels: status 0x%x", status)); 118 119 return (HXGE_OK); 120 } 121 122 void 123 hxge_uninit_txdma_channels(p_hxge_t hxgep) 124 { 125 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_uninit_txdma_channels")); 126 127 hxge_txdma_hw_stop(hxgep); 128 hxge_unmap_txdma(hxgep); 129 130 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_uinit_txdma_channels")); 131 } 132 133 void 134 hxge_setup_dma_common(p_hxge_dma_common_t dest_p, p_hxge_dma_common_t src_p, 135 uint32_t entries, uint32_t size) 136 { 137 size_t tsize; 138 *dest_p = *src_p; 139 tsize = size * entries; 140 dest_p->alength = tsize; 141 dest_p->nblocks = entries; 142 dest_p->block_size = size; 143 dest_p->offset += tsize; 144 145 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 146 src_p->alength -= tsize; 147 src_p->dma_cookie.dmac_laddress += tsize; 148 src_p->dma_cookie.dmac_size -= tsize; 149 } 150 151 hxge_status_t 152 hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel, uint64_t reg_data) 153 { 154 hpi_status_t rs = HPI_SUCCESS; 155 hxge_status_t status = HXGE_OK; 156 hpi_handle_t handle; 157 158 HXGE_DEBUG_MSG((hxgep, TX_CTL, " ==> hxge_reset_txdma_channel")); 159 160 handle = HXGE_DEV_HPI_HANDLE(hxgep); 161 if ((reg_data & TDC_TDR_RST_MASK) == TDC_TDR_RST_MASK) { 162 rs = hpi_txdma_channel_reset(handle, channel); 163 } else { 164 rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel); 165 } 166 167 if (rs != HPI_SUCCESS) { 168 status = HXGE_ERROR | rs; 169 } 170 171 /* 172 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx 173 * overflow fatal error if tail is not set to 0 after reset! 174 */ 175 TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0); 176 177 HXGE_DEBUG_MSG((hxgep, TX_CTL, " <== hxge_reset_txdma_channel")); 178 179 return (status); 180 } 181 182 hxge_status_t 183 hxge_init_txdma_channel_event_mask(p_hxge_t hxgep, uint16_t channel, 184 tdc_int_mask_t *mask_p) 185 { 186 hpi_handle_t handle; 187 hpi_status_t rs = HPI_SUCCESS; 188 hxge_status_t status = HXGE_OK; 189 190 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 191 "<== hxge_init_txdma_channel_event_mask")); 192 193 handle = HXGE_DEV_HPI_HANDLE(hxgep); 194 195 /* 196 * Mask off tx_rng_oflow since it is a false alarm. The driver 197 * ensures not over flowing the hardware and check the hardware 198 * status. 199 */ 200 mask_p->bits.tx_rng_oflow = 1; 201 rs = hpi_txdma_event_mask(handle, OP_SET, channel, mask_p); 202 if (rs != HPI_SUCCESS) { 203 status = HXGE_ERROR | rs; 204 } 205 206 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 207 "==> hxge_init_txdma_channel_event_mask")); 208 return (status); 209 } 210 211 hxge_status_t 212 hxge_enable_txdma_channel(p_hxge_t hxgep, 213 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 214 { 215 hpi_handle_t handle; 216 hpi_status_t rs = HPI_SUCCESS; 217 hxge_status_t status = HXGE_OK; 218 219 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_enable_txdma_channel")); 220 221 handle = HXGE_DEV_HPI_HANDLE(hxgep); 222 /* 223 * Use configuration data composed at init time. Write to hardware the 224 * transmit ring configurations. 225 */ 226 rs = hpi_txdma_ring_config(handle, OP_SET, channel, 227 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 228 229 if (rs != HPI_SUCCESS) { 230 return (HXGE_ERROR | rs); 231 } 232 233 /* Write to hardware the mailbox */ 234 rs = hpi_txdma_mbox_config(handle, OP_SET, channel, 235 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 236 237 if (rs != HPI_SUCCESS) { 238 return (HXGE_ERROR | rs); 239 } 240 241 /* Start the DMA engine. */ 242 rs = hpi_txdma_channel_init_enable(handle, channel); 243 if (rs != HPI_SUCCESS) { 244 return (HXGE_ERROR | rs); 245 } 246 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_enable_txdma_channel")); 247 return (status); 248 } 249 250 void 251 hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum, 252 int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp) 253 { 254 p_tx_pkt_header_t hdrp; 255 p_mblk_t nmp; 256 uint64_t tmp; 257 size_t mblk_len; 258 size_t iph_len; 259 size_t hdrs_size; 260 uint8_t *ip_buf; 261 uint16_t eth_type; 262 uint8_t ipproto; 263 boolean_t is_vlan = B_FALSE; 264 size_t eth_hdr_size; 265 uint8_t hdrs_buf[sizeof (struct ether_header) + 64 + sizeof (uint32_t)]; 266 267 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: mp $%p", mp)); 268 269 /* 270 * Caller should zero out the headers first. 271 */ 272 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 273 274 if (fill_len) { 275 HXGE_DEBUG_MSG((NULL, TX_CTL, 276 "==> hxge_fill_tx_hdr: pkt_len %d npads %d", 277 pkt_len, npads)); 278 tmp = (uint64_t)pkt_len; 279 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 280 281 goto fill_tx_header_done; 282 } 283 tmp = (uint64_t)npads; 284 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT); 285 286 /* 287 * mp is the original data packet (does not include the Neptune 288 * transmit header). 289 */ 290 nmp = mp; 291 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 292 HXGE_DEBUG_MSG((NULL, TX_CTL, 293 "==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d", 294 mp, nmp->b_rptr, mblk_len)); 295 ip_buf = NULL; 296 bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header)); 297 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 298 HXGE_DEBUG_MSG((NULL, TX_CTL, 299 "==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x", 300 eth_type, hdrp->value)); 301 302 if (eth_type < ETHERMTU) { 303 tmp = 1ull; 304 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 305 HXGE_DEBUG_MSG((NULL, TX_CTL, 306 "==> hxge_tx_pkt_hdr_init: LLC value 0x%llx", hdrp->value)); 307 if (*(hdrs_buf + sizeof (struct ether_header)) == 308 LLC_SNAP_SAP) { 309 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 310 sizeof (struct ether_header) + 6))); 311 HXGE_DEBUG_MSG((NULL, TX_CTL, 312 "==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x", 313 eth_type)); 314 } else { 315 goto fill_tx_header_done; 316 } 317 } else if (eth_type == VLAN_ETHERTYPE) { 318 tmp = 1ull; 319 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 320 321 eth_type = ntohs(((struct ether_vlan_header *) 322 hdrs_buf)->ether_type); 323 is_vlan = B_TRUE; 324 HXGE_DEBUG_MSG((NULL, TX_CTL, 325 "==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx", 326 hdrp->value)); 327 } 328 if (!is_vlan) { 329 eth_hdr_size = sizeof (struct ether_header); 330 } else { 331 eth_hdr_size = sizeof (struct ether_vlan_header); 332 } 333 334 switch (eth_type) { 335 case ETHERTYPE_IP: 336 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 337 ip_buf = nmp->b_rptr + eth_hdr_size; 338 mblk_len -= eth_hdr_size; 339 iph_len = ((*ip_buf) & 0x0f); 340 if (mblk_len > (iph_len + sizeof (uint32_t))) { 341 ip_buf = nmp->b_rptr; 342 ip_buf += eth_hdr_size; 343 } else { 344 ip_buf = NULL; 345 } 346 } 347 if (ip_buf == NULL) { 348 hdrs_size = 0; 349 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 350 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) { 351 mblk_len = (size_t)nmp->b_wptr - 352 (size_t)nmp->b_rptr; 353 if (mblk_len >= 354 (sizeof (hdrs_buf) - hdrs_size)) 355 mblk_len = sizeof (hdrs_buf) - 356 hdrs_size; 357 bcopy(nmp->b_rptr, 358 &hdrs_buf[hdrs_size], mblk_len); 359 hdrs_size += mblk_len; 360 nmp = nmp->b_cont; 361 } 362 ip_buf = hdrs_buf; 363 ip_buf += eth_hdr_size; 364 iph_len = ((*ip_buf) & 0x0f); 365 } 366 ipproto = ip_buf[9]; 367 368 tmp = (uint64_t)iph_len; 369 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 370 tmp = (uint64_t)(eth_hdr_size >> 1); 371 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 372 373 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv4 " 374 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 375 "tmp 0x%x", iph_len, hdrp->bits.l3start, eth_hdr_size, 376 ipproto, tmp)); 377 HXGE_DEBUG_MSG((NULL, TX_CTL, 378 "==> hxge_tx_pkt_hdr_init: IP value 0x%llx", hdrp->value)); 379 break; 380 381 case ETHERTYPE_IPV6: 382 hdrs_size = 0; 383 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 384 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) { 385 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 386 if (mblk_len >= (sizeof (hdrs_buf) - hdrs_size)) 387 mblk_len = sizeof (hdrs_buf) - hdrs_size; 388 bcopy(nmp->b_rptr, &hdrs_buf[hdrs_size], mblk_len); 389 hdrs_size += mblk_len; 390 nmp = nmp->b_cont; 391 } 392 ip_buf = hdrs_buf; 393 ip_buf += eth_hdr_size; 394 395 tmp = 1ull; 396 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 397 398 tmp = (eth_hdr_size >> 1); 399 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 400 401 /* byte 6 is the next header protocol */ 402 ipproto = ip_buf[6]; 403 404 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv6 " 405 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 406 iph_len, hdrp->bits.l3start, eth_hdr_size, ipproto)); 407 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_hdr_init: IPv6 " 408 "value 0x%llx", hdrp->value)); 409 break; 410 411 default: 412 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: non-IP")); 413 goto fill_tx_header_done; 414 } 415 416 switch (ipproto) { 417 case IPPROTO_TCP: 418 HXGE_DEBUG_MSG((NULL, TX_CTL, 419 "==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 420 if (l4_cksum) { 421 tmp = 1ull; 422 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 423 HXGE_DEBUG_MSG((NULL, TX_CTL, 424 "==> hxge_tx_pkt_hdr_init: TCP CKSUM" 425 "value 0x%llx", hdrp->value)); 426 } 427 HXGE_DEBUG_MSG((NULL, TX_CTL, 428 "==> hxge_tx_pkt_hdr_init: TCP value 0x%llx", hdrp->value)); 429 break; 430 431 case IPPROTO_UDP: 432 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: UDP")); 433 if (l4_cksum) { 434 tmp = 0x2ull; 435 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 436 } 437 HXGE_DEBUG_MSG((NULL, TX_CTL, 438 "==> hxge_tx_pkt_hdr_init: UDP value 0x%llx", 439 hdrp->value)); 440 break; 441 442 default: 443 goto fill_tx_header_done; 444 } 445 446 fill_tx_header_done: 447 HXGE_DEBUG_MSG((NULL, TX_CTL, 448 "==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx", 449 pkt_len, npads, hdrp->value)); 450 HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_fill_tx_hdr")); 451 } 452 453 /*ARGSUSED*/ 454 p_mblk_t 455 hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 456 { 457 p_mblk_t newmp = NULL; 458 459 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 460 HXGE_DEBUG_MSG((NULL, TX_CTL, 461 "<== hxge_tx_pkt_header_reserve: allocb failed")); 462 return (NULL); 463 } 464 HXGE_DEBUG_MSG((NULL, TX_CTL, 465 "==> hxge_tx_pkt_header_reserve: get new mp")); 466 DB_TYPE(newmp) = M_DATA; 467 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 468 linkb(newmp, mp); 469 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 470 471 HXGE_DEBUG_MSG((NULL, TX_CTL, 472 "==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p", 473 newmp->b_rptr, newmp->b_wptr)); 474 HXGE_DEBUG_MSG((NULL, TX_CTL, 475 "<== hxge_tx_pkt_header_reserve: use new mp")); 476 return (newmp); 477 } 478 479 int 480 hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 481 { 482 uint_t nmblks; 483 ssize_t len; 484 uint_t pkt_len; 485 p_mblk_t nmp, bmp, tmp; 486 uint8_t *b_wptr; 487 488 HXGE_DEBUG_MSG((NULL, TX_CTL, 489 "==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d", 490 mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 491 492 nmp = mp; 493 bmp = mp; 494 nmblks = 0; 495 pkt_len = 0; 496 *tot_xfer_len_p = 0; 497 498 while (nmp) { 499 len = MBLKL(nmp); 500 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: " 501 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 502 len, pkt_len, nmblks, *tot_xfer_len_p)); 503 504 if (len <= 0) { 505 bmp = nmp; 506 nmp = nmp->b_cont; 507 HXGE_DEBUG_MSG((NULL, TX_CTL, 508 "==> hxge_tx_pkt_nmblocks:" 509 " len (0) pkt_len %d nmblks %d", pkt_len, nmblks)); 510 continue; 511 } 512 *tot_xfer_len_p += len; 513 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: " 514 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 515 len, pkt_len, nmblks, *tot_xfer_len_p)); 516 517 if (len < hxge_bcopy_thresh) { 518 HXGE_DEBUG_MSG((NULL, TX_CTL, 519 "==> hxge_tx_pkt_nmblocks: " 520 "len %d (< thresh) pkt_len %d nmblks %d", 521 len, pkt_len, nmblks)); 522 if (pkt_len == 0) 523 nmblks++; 524 pkt_len += len; 525 if (pkt_len >= hxge_bcopy_thresh) { 526 pkt_len = 0; 527 len = 0; 528 nmp = bmp; 529 } 530 } else { 531 HXGE_DEBUG_MSG((NULL, TX_CTL, 532 "==> hxge_tx_pkt_nmblocks: " 533 "len %d (> thresh) pkt_len %d nmblks %d", 534 len, pkt_len, nmblks)); 535 pkt_len = 0; 536 nmblks++; 537 /* 538 * Hardware limits the transfer length to 4K. If len is 539 * more than 4K, we need to break it up to at most 2 540 * more blocks. 541 */ 542 if (len > TX_MAX_TRANSFER_LENGTH) { 543 uint32_t nsegs; 544 545 HXGE_DEBUG_MSG((NULL, TX_CTL, 546 "==> hxge_tx_pkt_nmblocks: " 547 "len %d pkt_len %d nmblks %d nsegs %d", 548 len, pkt_len, nmblks, nsegs)); 549 nsegs = 1; 550 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 551 ++nsegs; 552 } 553 do { 554 b_wptr = nmp->b_rptr + 555 TX_MAX_TRANSFER_LENGTH; 556 nmp->b_wptr = b_wptr; 557 if ((tmp = dupb(nmp)) == NULL) { 558 return (0); 559 } 560 tmp->b_rptr = b_wptr; 561 tmp->b_wptr = nmp->b_wptr; 562 tmp->b_cont = nmp->b_cont; 563 nmp->b_cont = tmp; 564 nmblks++; 565 if (--nsegs) { 566 nmp = tmp; 567 } 568 } while (nsegs); 569 nmp = tmp; 570 } 571 } 572 573 /* 574 * Hardware limits the transmit gather pointers to 15. 575 */ 576 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 577 TX_MAX_GATHER_POINTERS) { 578 HXGE_DEBUG_MSG((NULL, TX_CTL, 579 "==> hxge_tx_pkt_nmblocks: pull msg - " 580 "len %d pkt_len %d nmblks %d", 581 len, pkt_len, nmblks)); 582 /* Pull all message blocks from b_cont */ 583 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 584 return (0); 585 } 586 freemsg(nmp->b_cont); 587 nmp->b_cont = tmp; 588 pkt_len = 0; 589 } 590 bmp = nmp; 591 nmp = nmp->b_cont; 592 } 593 594 HXGE_DEBUG_MSG((NULL, TX_CTL, 595 "<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 596 "nmblks %d len %d tot_xfer_len %d", 597 mp->b_rptr, mp->b_wptr, nmblks, MBLKL(mp), *tot_xfer_len_p)); 598 return (nmblks); 599 } 600 601 boolean_t 602 hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks) 603 { 604 boolean_t status = B_TRUE; 605 p_hxge_dma_common_t tx_desc_dma_p; 606 hxge_dma_common_t desc_area; 607 p_tx_desc_t tx_desc_ring_vp; 608 p_tx_desc_t tx_desc_p; 609 p_tx_desc_t tx_desc_pp; 610 tx_desc_t r_tx_desc; 611 p_tx_msg_t tx_msg_ring; 612 p_tx_msg_t tx_msg_p; 613 hpi_handle_t handle; 614 tdc_tdr_head_t tx_head; 615 uint32_t pkt_len; 616 uint_t tx_rd_index; 617 uint16_t head_index, tail_index; 618 uint8_t tdc; 619 boolean_t head_wrap, tail_wrap; 620 p_hxge_tx_ring_stats_t tdc_stats; 621 tdc_byte_cnt_t byte_cnt; 622 tdc_tdr_qlen_t qlen; 623 int rc; 624 625 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_reclaim")); 626 627 status = ((tx_ring_p->descs_pending < hxge_reclaim_pending) && 628 (nmblks != 0)); 629 HXGE_DEBUG_MSG((hxgep, TX_CTL, 630 "==> hxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 631 tx_ring_p->descs_pending, hxge_reclaim_pending, nmblks)); 632 633 if (!status) { 634 tx_desc_dma_p = &tx_ring_p->tdc_desc; 635 desc_area = tx_ring_p->tdc_desc; 636 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 637 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 638 tx_rd_index = tx_ring_p->rd_index; 639 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 640 tx_msg_ring = tx_ring_p->tx_msg_ring; 641 tx_msg_p = &tx_msg_ring[tx_rd_index]; 642 tdc = tx_ring_p->tdc; 643 tdc_stats = tx_ring_p->tdc_stats; 644 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 645 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 646 } 647 tail_index = tx_ring_p->wr_index; 648 tail_wrap = tx_ring_p->wr_index_wrap; 649 650 /* 651 * tdc_byte_cnt reg can be used to get bytes transmitted. It 652 * includes padding too in case of runt packets. 653 */ 654 handle = HXGE_DEV_HPI_HANDLE(hxgep); 655 TXDMA_REG_READ64(handle, TDC_BYTE_CNT, tdc, &byte_cnt.value); 656 tdc_stats->obytes_with_pad += byte_cnt.bits.byte_count; 657 658 HXGE_DEBUG_MSG((hxgep, TX_CTL, 659 "==> hxge_txdma_reclaim: tdc %d tx_rd_index %d " 660 "tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ", 661 tdc, tx_rd_index, tail_index, tail_wrap, 662 tx_desc_p, (*(uint64_t *)tx_desc_p))); 663 664 /* 665 * Read the hardware maintained transmit head and wrap around 666 * bit. 667 */ 668 TXDMA_REG_READ64(handle, TDC_TDR_HEAD, tdc, &tx_head.value); 669 head_index = tx_head.bits.head; 670 head_wrap = tx_head.bits.wrap; 671 HXGE_DEBUG_MSG((hxgep, TX_CTL, 672 "==> hxge_txdma_reclaim: " 673 "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d", 674 tx_rd_index, tail_index, tail_wrap, head_index, head_wrap)); 675 676 /* 677 * For debug only. This can be used to verify the qlen and make 678 * sure the hardware is wrapping the Tdr correctly. 679 */ 680 TXDMA_REG_READ64(handle, TDC_TDR_QLEN, tdc, &qlen.value); 681 HXGE_DEBUG_MSG((hxgep, TX_CTL, 682 "==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d", 683 qlen.bits.tdr_qlen, qlen.bits.tdr_pref_qlen)); 684 685 if (head_index == tail_index) { 686 if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, 687 tail_wrap) && (head_index == tx_rd_index)) { 688 HXGE_DEBUG_MSG((hxgep, TX_CTL, 689 "==> hxge_txdma_reclaim: EMPTY")); 690 return (B_TRUE); 691 } 692 HXGE_DEBUG_MSG((hxgep, TX_CTL, 693 "==> hxge_txdma_reclaim: Checking if ring full")); 694 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 695 tail_wrap)) { 696 HXGE_DEBUG_MSG((hxgep, TX_CTL, 697 "==> hxge_txdma_reclaim: full")); 698 return (B_FALSE); 699 } 700 } 701 HXGE_DEBUG_MSG((hxgep, TX_CTL, 702 "==> hxge_txdma_reclaim: tx_rd_index and head_index")); 703 704 /* XXXX: limit the # of reclaims */ 705 tx_desc_pp = &r_tx_desc; 706 while ((tx_rd_index != head_index) && 707 (tx_ring_p->descs_pending != 0)) { 708 HXGE_DEBUG_MSG((hxgep, TX_CTL, 709 "==> hxge_txdma_reclaim: Checking if pending")); 710 HXGE_DEBUG_MSG((hxgep, TX_CTL, 711 "==> hxge_txdma_reclaim: descs_pending %d ", 712 tx_ring_p->descs_pending)); 713 HXGE_DEBUG_MSG((hxgep, TX_CTL, 714 "==> hxge_txdma_reclaim: " 715 "(tx_rd_index %d head_index %d (tx_desc_p $%p)", 716 tx_rd_index, head_index, tx_desc_p)); 717 718 tx_desc_pp->value = tx_desc_p->value; 719 HXGE_DEBUG_MSG((hxgep, TX_CTL, 720 "==> hxge_txdma_reclaim: " 721 "(tx_rd_index %d head_index %d " 722 "tx_desc_p $%p (desc value 0x%llx) ", 723 tx_rd_index, head_index, 724 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 725 HXGE_DEBUG_MSG((hxgep, TX_CTL, 726 "==> hxge_txdma_reclaim: dump desc:")); 727 728 /* 729 * tdc_byte_cnt reg can be used to get bytes 730 * transmitted 731 */ 732 pkt_len = tx_desc_pp->bits.tr_len; 733 tdc_stats->obytes += pkt_len; 734 tdc_stats->opackets += tx_desc_pp->bits.sop; 735 HXGE_DEBUG_MSG((hxgep, TX_CTL, 736 "==> hxge_txdma_reclaim: pkt_len %d " 737 "tdc channel %d opackets %d", 738 pkt_len, tdc, tdc_stats->opackets)); 739 740 if (tx_msg_p->flags.dma_type == USE_DVMA) { 741 HXGE_DEBUG_MSG((hxgep, TX_CTL, 742 "tx_desc_p = $%p tx_desc_pp = $%p " 743 "index = %d", 744 tx_desc_p, tx_desc_pp, 745 tx_ring_p->rd_index)); 746 (void) dvma_unload(tx_msg_p->dvma_handle, 747 0, -1); 748 tx_msg_p->dvma_handle = NULL; 749 if (tx_ring_p->dvma_wr_index == 750 tx_ring_p->dvma_wrap_mask) { 751 tx_ring_p->dvma_wr_index = 0; 752 } else { 753 tx_ring_p->dvma_wr_index++; 754 } 755 tx_ring_p->dvma_pending--; 756 } else if (tx_msg_p->flags.dma_type == USE_DMA) { 757 HXGE_DEBUG_MSG((hxgep, TX_CTL, 758 "==> hxge_txdma_reclaim: USE DMA")); 759 if (rc = ddi_dma_unbind_handle 760 (tx_msg_p->dma_handle)) { 761 cmn_err(CE_WARN, "hxge_reclaim: " 762 "ddi_dma_unbind_handle " 763 "failed. status %d", rc); 764 } 765 } 766 767 HXGE_DEBUG_MSG((hxgep, TX_CTL, 768 "==> hxge_txdma_reclaim: count packets")); 769 770 /* 771 * count a chained packet only once. 772 */ 773 if (tx_msg_p->tx_message != NULL) { 774 freemsg(tx_msg_p->tx_message); 775 tx_msg_p->tx_message = NULL; 776 } 777 tx_msg_p->flags.dma_type = USE_NONE; 778 tx_rd_index = tx_ring_p->rd_index; 779 tx_rd_index = (tx_rd_index + 1) & 780 tx_ring_p->tx_wrap_mask; 781 tx_ring_p->rd_index = tx_rd_index; 782 tx_ring_p->descs_pending--; 783 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 784 tx_msg_p = &tx_msg_ring[tx_rd_index]; 785 } 786 787 status = (nmblks <= (tx_ring_p->tx_ring_size - 788 tx_ring_p->descs_pending - TX_FULL_MARK)); 789 if (status) { 790 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 791 } 792 } else { 793 status = (nmblks <= (tx_ring_p->tx_ring_size - 794 tx_ring_p->descs_pending - TX_FULL_MARK)); 795 } 796 797 HXGE_DEBUG_MSG((hxgep, TX_CTL, 798 "<== hxge_txdma_reclaim status = 0x%08x", status)); 799 return (status); 800 } 801 802 uint_t 803 hxge_tx_intr(caddr_t arg1, caddr_t arg2) 804 { 805 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 806 p_hxge_t hxgep = (p_hxge_t)arg2; 807 p_hxge_ldg_t ldgp; 808 uint8_t channel; 809 uint32_t vindex; 810 hpi_handle_t handle; 811 tdc_stat_t cs; 812 p_tx_ring_t *tx_rings; 813 p_tx_ring_t tx_ring_p; 814 hpi_status_t rs = HPI_SUCCESS; 815 uint_t serviced = DDI_INTR_UNCLAIMED; 816 hxge_status_t status = HXGE_OK; 817 818 if (ldvp == NULL) { 819 HXGE_DEBUG_MSG((NULL, INT_CTL, 820 "<== hxge_tx_intr: hxgep $%p ldvp $%p", hxgep, ldvp)); 821 return (DDI_INTR_UNCLAIMED); 822 } 823 824 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 825 hxgep = ldvp->hxgep; 826 } 827 828 /* 829 * If the interface is not started, just swallow the interrupt 830 * and don't rearm the logical device. 831 */ 832 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) 833 return (DDI_INTR_CLAIMED); 834 835 HXGE_DEBUG_MSG((hxgep, INT_CTL, 836 "==> hxge_tx_intr: hxgep(arg2) $%p ldvp(arg1) $%p", hxgep, ldvp)); 837 838 /* 839 * This interrupt handler is for a specific transmit dma channel. 840 */ 841 handle = HXGE_DEV_HPI_HANDLE(hxgep); 842 843 /* Get the control and status for this channel. */ 844 channel = ldvp->channel; 845 ldgp = ldvp->ldgp; 846 HXGE_DEBUG_MSG((hxgep, INT_CTL, 847 "==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d", 848 hxgep, ldvp, channel)); 849 850 rs = hpi_txdma_control_status(handle, OP_GET, channel, &cs); 851 vindex = ldvp->vdma_index; 852 HXGE_DEBUG_MSG((hxgep, INT_CTL, 853 "==> hxge_tx_intr:channel %d ring index %d status 0x%08x", 854 channel, vindex, rs)); 855 856 if (!rs && cs.bits.marked) { 857 HXGE_DEBUG_MSG((hxgep, INT_CTL, 858 "==> hxge_tx_intr:channel %d ring index %d " 859 "status 0x%08x (marked bit set)", channel, vindex, rs)); 860 tx_rings = hxgep->tx_rings->rings; 861 tx_ring_p = tx_rings[vindex]; 862 HXGE_DEBUG_MSG((hxgep, INT_CTL, 863 "==> hxge_tx_intr:channel %d ring index %d " 864 "status 0x%08x (marked bit set, calling reclaim)", 865 channel, vindex, rs)); 866 867 MUTEX_ENTER(&tx_ring_p->lock); 868 (void) hxge_txdma_reclaim(hxgep, tx_rings[vindex], 0); 869 MUTEX_EXIT(&tx_ring_p->lock); 870 mac_tx_update(hxgep->mach); 871 } 872 873 /* 874 * Process other transmit control and status. Check the ldv state. 875 */ 876 status = hxge_tx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 877 878 /* Clear the error bits */ 879 RXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value); 880 881 /* 882 * Rearm this logical group if this is a single device group. 883 */ 884 if (ldgp->nldvs == 1) { 885 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_tx_intr: rearm")); 886 if (status == HXGE_OK) { 887 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, 888 B_TRUE, ldgp->ldg_timer); 889 } 890 } 891 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_tx_intr")); 892 serviced = DDI_INTR_CLAIMED; 893 return (serviced); 894 } 895 896 void 897 hxge_txdma_stop(p_hxge_t hxgep) 898 { 899 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop")); 900 901 (void) hxge_tx_vmac_disable(hxgep); 902 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 903 904 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop")); 905 } 906 907 hxge_status_t 908 hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 909 { 910 int i, ndmas; 911 uint16_t channel; 912 p_tx_rings_t tx_rings; 913 p_tx_ring_t *tx_desc_rings; 914 hpi_handle_t handle; 915 hpi_status_t rs = HPI_SUCCESS; 916 hxge_status_t status = HXGE_OK; 917 918 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 919 "==> hxge_txdma_hw_mode: enable mode %d", enable)); 920 921 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 922 HXGE_DEBUG_MSG((hxgep, TX_CTL, 923 "<== hxge_txdma_mode: not initialized")); 924 return (HXGE_ERROR); 925 } 926 tx_rings = hxgep->tx_rings; 927 if (tx_rings == NULL) { 928 HXGE_DEBUG_MSG((hxgep, TX_CTL, 929 "<== hxge_txdma_hw_mode: NULL global ring pointer")); 930 return (HXGE_ERROR); 931 } 932 tx_desc_rings = tx_rings->rings; 933 if (tx_desc_rings == NULL) { 934 HXGE_DEBUG_MSG((hxgep, TX_CTL, 935 "<== hxge_txdma_hw_mode: NULL rings pointer")); 936 return (HXGE_ERROR); 937 } 938 ndmas = tx_rings->ndmas; 939 if (!ndmas) { 940 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 941 "<== hxge_txdma_hw_mode: no dma channel allocated")); 942 return (HXGE_ERROR); 943 } 944 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_mode: " 945 "tx_rings $%p tx_desc_rings $%p ndmas %d", 946 tx_rings, tx_desc_rings, ndmas)); 947 948 handle = HXGE_DEV_HPI_HANDLE(hxgep); 949 for (i = 0; i < ndmas; i++) { 950 if (tx_desc_rings[i] == NULL) { 951 continue; 952 } 953 channel = tx_desc_rings[i]->tdc; 954 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 955 "==> hxge_txdma_hw_mode: channel %d", channel)); 956 if (enable) { 957 rs = hpi_txdma_channel_enable(handle, channel); 958 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 959 "==> hxge_txdma_hw_mode: channel %d (enable) " 960 "rs 0x%x", channel, rs)); 961 } else { 962 /* 963 * Stop the dma channel and waits for the stop done. If 964 * the stop done bit is not set, then force an error so 965 * TXC will stop. All channels bound to this port need 966 * to be stopped and reset after injecting an interrupt 967 * error. 968 */ 969 rs = hpi_txdma_channel_disable(handle, channel); 970 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 971 "==> hxge_txdma_hw_mode: channel %d (disable) " 972 "rs 0x%x", channel, rs)); 973 } 974 } 975 976 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 977 978 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 979 "<== hxge_txdma_hw_mode: status 0x%x", status)); 980 981 return (status); 982 } 983 984 void 985 hxge_txdma_enable_channel(p_hxge_t hxgep, uint16_t channel) 986 { 987 hpi_handle_t handle; 988 989 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 990 "==> hxge_txdma_enable_channel: channel %d", channel)); 991 992 handle = HXGE_DEV_HPI_HANDLE(hxgep); 993 /* enable the transmit dma channels */ 994 (void) hpi_txdma_channel_enable(handle, channel); 995 996 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_txdma_enable_channel")); 997 } 998 999 void 1000 hxge_txdma_disable_channel(p_hxge_t hxgep, uint16_t channel) 1001 { 1002 hpi_handle_t handle; 1003 1004 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1005 "==> hxge_txdma_disable_channel: channel %d", channel)); 1006 1007 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1008 /* stop the transmit dma channels */ 1009 (void) hpi_txdma_channel_disable(handle, channel); 1010 1011 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_disable_channel")); 1012 } 1013 1014 int 1015 hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel) 1016 { 1017 hpi_handle_t handle; 1018 int status; 1019 hpi_status_t rs = HPI_SUCCESS; 1020 1021 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop_inj_err")); 1022 1023 /* 1024 * Stop the dma channel waits for the stop done. If the stop done bit 1025 * is not set, then create an error. 1026 */ 1027 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1028 rs = hpi_txdma_channel_disable(handle, channel); 1029 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 1030 if (status == HXGE_OK) { 1031 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1032 "<== hxge_txdma_stop_inj_err (channel %d): " 1033 "stopped OK", channel)); 1034 return (status); 1035 } 1036 1037 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1038 "==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1039 " (injected error but still not stopped)", channel, rs)); 1040 1041 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop_inj_err")); 1042 1043 return (status); 1044 } 1045 1046 /*ARGSUSED*/ 1047 void 1048 hxge_fixup_txdma_rings(p_hxge_t hxgep) 1049 { 1050 int index, ndmas; 1051 uint16_t channel; 1052 p_tx_rings_t tx_rings; 1053 1054 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_txdma_rings")); 1055 1056 /* 1057 * For each transmit channel, reclaim each descriptor and free buffers. 1058 */ 1059 tx_rings = hxgep->tx_rings; 1060 if (tx_rings == NULL) { 1061 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1062 "<== hxge_fixup_txdma_rings: NULL ring pointer")); 1063 return; 1064 } 1065 1066 ndmas = tx_rings->ndmas; 1067 if (!ndmas) { 1068 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1069 "<== hxge_fixup_txdma_rings: no channel allocated")); 1070 return; 1071 } 1072 1073 if (tx_rings->rings == NULL) { 1074 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1075 "<== hxge_fixup_txdma_rings: NULL rings pointer")); 1076 return; 1077 } 1078 1079 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_fixup_txdma_rings: " 1080 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1081 tx_rings, tx_rings->rings, ndmas)); 1082 1083 for (index = 0; index < ndmas; index++) { 1084 channel = tx_rings->rings[index]->tdc; 1085 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1086 "==> hxge_fixup_txdma_rings: channel %d", channel)); 1087 hxge_txdma_fixup_channel(hxgep, tx_rings->rings[index], 1088 channel); 1089 } 1090 1091 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_txdma_rings")); 1092 } 1093 1094 /*ARGSUSED*/ 1095 void 1096 hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel) 1097 { 1098 p_tx_ring_t ring_p; 1099 1100 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_channel")); 1101 1102 ring_p = hxge_txdma_get_ring(hxgep, channel); 1103 if (ring_p == NULL) { 1104 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel")); 1105 return; 1106 } 1107 1108 if (ring_p->tdc != channel) { 1109 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1110 "<== hxge_txdma_fix_channel: channel not matched " 1111 "ring tdc %d passed channel", ring_p->tdc, channel)); 1112 return; 1113 } 1114 1115 hxge_txdma_fixup_channel(hxgep, ring_p, channel); 1116 1117 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel")); 1118 } 1119 1120 /*ARGSUSED*/ 1121 void 1122 hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel) 1123 { 1124 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_channel")); 1125 1126 if (ring_p == NULL) { 1127 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1128 "<== hxge_txdma_fixup_channel: NULL ring pointer")); 1129 return; 1130 } 1131 if (ring_p->tdc != channel) { 1132 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1133 "<== hxge_txdma_fixup_channel: channel not matched " 1134 "ring tdc %d passed channel", ring_p->tdc, channel)); 1135 return; 1136 } 1137 MUTEX_ENTER(&ring_p->lock); 1138 (void) hxge_txdma_reclaim(hxgep, ring_p, 0); 1139 1140 ring_p->rd_index = 0; 1141 ring_p->wr_index = 0; 1142 ring_p->ring_head.value = 0; 1143 ring_p->ring_kick_tail.value = 0; 1144 ring_p->descs_pending = 0; 1145 MUTEX_EXIT(&ring_p->lock); 1146 1147 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_channel")); 1148 } 1149 1150 /*ARGSUSED*/ 1151 void 1152 hxge_txdma_hw_kick(p_hxge_t hxgep) 1153 { 1154 int index, ndmas; 1155 uint16_t channel; 1156 p_tx_rings_t tx_rings; 1157 1158 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick")); 1159 1160 tx_rings = hxgep->tx_rings; 1161 if (tx_rings == NULL) { 1162 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1163 "<== hxge_txdma_hw_kick: NULL ring pointer")); 1164 return; 1165 } 1166 ndmas = tx_rings->ndmas; 1167 if (!ndmas) { 1168 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1169 "<== hxge_txdma_hw_kick: no channel allocated")); 1170 return; 1171 } 1172 if (tx_rings->rings == NULL) { 1173 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1174 "<== hxge_txdma_hw_kick: NULL rings pointer")); 1175 return; 1176 } 1177 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_kick: " 1178 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1179 tx_rings, tx_rings->rings, ndmas)); 1180 1181 for (index = 0; index < ndmas; index++) { 1182 channel = tx_rings->rings[index]->tdc; 1183 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1184 "==> hxge_txdma_hw_kick: channel %d", channel)); 1185 hxge_txdma_hw_kick_channel(hxgep, tx_rings->rings[index], 1186 channel); 1187 } 1188 1189 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick")); 1190 } 1191 1192 /*ARGSUSED*/ 1193 void 1194 hxge_txdma_kick_channel(p_hxge_t hxgep, uint16_t channel) 1195 { 1196 p_tx_ring_t ring_p; 1197 1198 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_kick_channel")); 1199 1200 ring_p = hxge_txdma_get_ring(hxgep, channel); 1201 if (ring_p == NULL) { 1202 HXGE_DEBUG_MSG((hxgep, TX_CTL, " hxge_txdma_kick_channel")); 1203 return; 1204 } 1205 1206 if (ring_p->tdc != channel) { 1207 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1208 "<== hxge_txdma_kick_channel: channel not matched " 1209 "ring tdc %d passed channel", ring_p->tdc, channel)); 1210 return; 1211 } 1212 1213 hxge_txdma_hw_kick_channel(hxgep, ring_p, channel); 1214 1215 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_kick_channel")); 1216 } 1217 1218 /*ARGSUSED*/ 1219 void 1220 hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel) 1221 { 1222 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick_channel")); 1223 1224 if (ring_p == NULL) { 1225 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1226 "<== hxge_txdma_hw_kick_channel: NULL ring pointer")); 1227 return; 1228 } 1229 1230 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick_channel")); 1231 } 1232 1233 /*ARGSUSED*/ 1234 void 1235 hxge_check_tx_hang(p_hxge_t hxgep) 1236 { 1237 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_check_tx_hang")); 1238 1239 /* 1240 * Needs inputs from hardware for regs: head index had not moved since 1241 * last timeout. packets not transmitted or stuffed registers. 1242 */ 1243 if (hxge_txdma_hung(hxgep)) { 1244 hxge_fixup_hung_txdma_rings(hxgep); 1245 } 1246 1247 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_check_tx_hang")); 1248 } 1249 1250 int 1251 hxge_txdma_hung(p_hxge_t hxgep) 1252 { 1253 int index, ndmas; 1254 uint16_t channel; 1255 p_tx_rings_t tx_rings; 1256 p_tx_ring_t tx_ring_p; 1257 1258 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hung")); 1259 1260 tx_rings = hxgep->tx_rings; 1261 if (tx_rings == NULL) { 1262 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1263 "<== hxge_txdma_hung: NULL ring pointer")); 1264 return (B_FALSE); 1265 } 1266 1267 ndmas = tx_rings->ndmas; 1268 if (!ndmas) { 1269 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1270 "<== hxge_txdma_hung: no channel allocated")); 1271 return (B_FALSE); 1272 } 1273 1274 if (tx_rings->rings == NULL) { 1275 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1276 "<== hxge_txdma_hung: NULL rings pointer")); 1277 return (B_FALSE); 1278 } 1279 1280 for (index = 0; index < ndmas; index++) { 1281 channel = tx_rings->rings[index]->tdc; 1282 tx_ring_p = tx_rings->rings[index]; 1283 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1284 "==> hxge_txdma_hung: channel %d", channel)); 1285 if (hxge_txdma_channel_hung(hxgep, tx_ring_p, channel)) { 1286 return (B_TRUE); 1287 } 1288 } 1289 1290 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hung")); 1291 1292 return (B_FALSE); 1293 } 1294 1295 int 1296 hxge_txdma_channel_hung(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1297 { 1298 uint16_t head_index, tail_index; 1299 boolean_t head_wrap, tail_wrap; 1300 hpi_handle_t handle; 1301 tdc_tdr_head_t tx_head; 1302 uint_t tx_rd_index; 1303 1304 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung")); 1305 1306 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1307 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1308 "==> hxge_txdma_channel_hung: channel %d", channel)); 1309 MUTEX_ENTER(&tx_ring_p->lock); 1310 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 1311 1312 tail_index = tx_ring_p->wr_index; 1313 tail_wrap = tx_ring_p->wr_index_wrap; 1314 tx_rd_index = tx_ring_p->rd_index; 1315 MUTEX_EXIT(&tx_ring_p->lock); 1316 1317 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1318 "==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1319 "tail_index %d tail_wrap %d ", 1320 channel, tx_rd_index, tail_index, tail_wrap)); 1321 /* 1322 * Read the hardware maintained transmit head and wrap around bit. 1323 */ 1324 (void) hpi_txdma_ring_head_get(handle, channel, &tx_head); 1325 head_index = tx_head.bits.head; 1326 head_wrap = tx_head.bits.wrap; 1327 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung: " 1328 "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d", 1329 tx_rd_index, tail_index, tail_wrap, head_index, head_wrap)); 1330 1331 if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, tail_wrap) && 1332 (head_index == tx_rd_index)) { 1333 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1334 "==> hxge_txdma_channel_hung: EMPTY")); 1335 return (B_FALSE); 1336 } 1337 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1338 "==> hxge_txdma_channel_hung: Checking if ring full")); 1339 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, tail_wrap)) { 1340 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1341 "==> hxge_txdma_channel_hung: full")); 1342 return (B_TRUE); 1343 } 1344 1345 /* If not full, check with hardware to see if it is hung */ 1346 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_channel_hung")); 1347 1348 return (B_FALSE); 1349 } 1350 1351 /*ARGSUSED*/ 1352 void 1353 hxge_fixup_hung_txdma_rings(p_hxge_t hxgep) 1354 { 1355 int index, ndmas; 1356 uint16_t channel; 1357 p_tx_rings_t tx_rings; 1358 1359 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings")); 1360 tx_rings = hxgep->tx_rings; 1361 if (tx_rings == NULL) { 1362 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1363 "<== hxge_fixup_hung_txdma_rings: NULL ring pointer")); 1364 return; 1365 } 1366 ndmas = tx_rings->ndmas; 1367 if (!ndmas) { 1368 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1369 "<== hxge_fixup_hung_txdma_rings: no channel allocated")); 1370 return; 1371 } 1372 if (tx_rings->rings == NULL) { 1373 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1374 "<== hxge_fixup_hung_txdma_rings: NULL rings pointer")); 1375 return; 1376 } 1377 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings: " 1378 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1379 tx_rings, tx_rings->rings, ndmas)); 1380 1381 for (index = 0; index < ndmas; index++) { 1382 channel = tx_rings->rings[index]->tdc; 1383 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1384 "==> hxge_fixup_hung_txdma_rings: channel %d", channel)); 1385 hxge_txdma_fixup_hung_channel(hxgep, tx_rings->rings[index], 1386 channel); 1387 } 1388 1389 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_hung_txdma_rings")); 1390 } 1391 1392 /*ARGSUSED*/ 1393 void 1394 hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel) 1395 { 1396 p_tx_ring_t ring_p; 1397 1398 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_hung_channel")); 1399 ring_p = hxge_txdma_get_ring(hxgep, channel); 1400 if (ring_p == NULL) { 1401 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1402 "<== hxge_txdma_fix_hung_channel")); 1403 return; 1404 } 1405 if (ring_p->tdc != channel) { 1406 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1407 "<== hxge_txdma_fix_hung_channel: channel not matched " 1408 "ring tdc %d passed channel", ring_p->tdc, channel)); 1409 return; 1410 } 1411 hxge_txdma_fixup_channel(hxgep, ring_p, channel); 1412 1413 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_hung_channel")); 1414 } 1415 1416 /*ARGSUSED*/ 1417 void 1418 hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, 1419 uint16_t channel) 1420 { 1421 hpi_handle_t handle; 1422 int status = HXGE_OK; 1423 1424 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_hung_channel")); 1425 1426 if (ring_p == NULL) { 1427 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1428 "<== hxge_txdma_fixup_hung_channel: NULL ring pointer")); 1429 return; 1430 } 1431 if (ring_p->tdc != channel) { 1432 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1433 "<== hxge_txdma_fixup_hung_channel: channel " 1434 "not matched ring tdc %d passed channel", 1435 ring_p->tdc, channel)); 1436 return; 1437 } 1438 /* Reclaim descriptors */ 1439 MUTEX_ENTER(&ring_p->lock); 1440 (void) hxge_txdma_reclaim(hxgep, ring_p, 0); 1441 MUTEX_EXIT(&ring_p->lock); 1442 1443 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1444 /* 1445 * Stop the dma channel waits for the stop done. If the stop done bit 1446 * is not set, then force an error. 1447 */ 1448 status = hpi_txdma_channel_disable(handle, channel); 1449 if (!(status & HPI_TXDMA_STOP_FAILED)) { 1450 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1451 "<== hxge_txdma_fixup_hung_channel: stopped OK " 1452 "ring tdc %d passed channel %d", ring_p->tdc, channel)); 1453 return; 1454 } 1455 /* Stop done bit will be set as a result of error injection */ 1456 status = hpi_txdma_channel_disable(handle, channel); 1457 if (!(status & HPI_TXDMA_STOP_FAILED)) { 1458 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1459 "<== hxge_txdma_fixup_hung_channel: stopped again" 1460 "ring tdc %d passed channel", ring_p->tdc, channel)); 1461 return; 1462 } 1463 1464 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1465 "<== hxge_txdma_fixup_hung_channel: stop done still not set!! " 1466 "ring tdc %d passed channel", ring_p->tdc, channel)); 1467 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_hung_channel")); 1468 } 1469 1470 /*ARGSUSED*/ 1471 void 1472 hxge_reclaim_rings(p_hxge_t hxgep) 1473 { 1474 int index, ndmas; 1475 uint16_t channel; 1476 p_tx_rings_t tx_rings; 1477 p_tx_ring_t tx_ring_p; 1478 1479 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclaim_ring")); 1480 tx_rings = hxgep->tx_rings; 1481 if (tx_rings == NULL) { 1482 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1483 "<== hxge_reclain_rimgs: NULL ring pointer")); 1484 return; 1485 } 1486 ndmas = tx_rings->ndmas; 1487 if (!ndmas) { 1488 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1489 "<== hxge_reclain_rimgs: no channel allocated")); 1490 return; 1491 } 1492 if (tx_rings->rings == NULL) { 1493 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1494 "<== hxge_reclain_rimgs: NULL rings pointer")); 1495 return; 1496 } 1497 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclain_rimgs: " 1498 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1499 tx_rings, tx_rings->rings, ndmas)); 1500 1501 for (index = 0; index < ndmas; index++) { 1502 channel = tx_rings->rings[index]->tdc; 1503 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> reclain_rimgs: channel %d", 1504 channel)); 1505 tx_ring_p = tx_rings->rings[index]; 1506 MUTEX_ENTER(&tx_ring_p->lock); 1507 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, channel); 1508 MUTEX_EXIT(&tx_ring_p->lock); 1509 } 1510 1511 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_reclaim_rings")); 1512 } 1513 1514 /* 1515 * Static functions start here. 1516 */ 1517 static hxge_status_t 1518 hxge_map_txdma(p_hxge_t hxgep) 1519 { 1520 int i, ndmas; 1521 uint16_t channel; 1522 p_tx_rings_t tx_rings; 1523 p_tx_ring_t *tx_desc_rings; 1524 p_tx_mbox_areas_t tx_mbox_areas_p; 1525 p_tx_mbox_t *tx_mbox_p; 1526 p_hxge_dma_pool_t dma_buf_poolp; 1527 p_hxge_dma_pool_t dma_cntl_poolp; 1528 p_hxge_dma_common_t *dma_buf_p; 1529 p_hxge_dma_common_t *dma_cntl_p; 1530 hxge_status_t status = HXGE_OK; 1531 1532 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma")); 1533 1534 dma_buf_poolp = hxgep->tx_buf_pool_p; 1535 dma_cntl_poolp = hxgep->tx_cntl_pool_p; 1536 1537 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1538 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1539 "==> hxge_map_txdma: buf not allocated")); 1540 return (HXGE_ERROR); 1541 } 1542 ndmas = dma_buf_poolp->ndmas; 1543 if (!ndmas) { 1544 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1545 "<== hxge_map_txdma: no dma allocated")); 1546 return (HXGE_ERROR); 1547 } 1548 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 1549 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1550 1551 tx_rings = (p_tx_rings_t)KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 1552 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC( 1553 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP); 1554 1555 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1556 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 1557 1558 tx_mbox_areas_p = (p_tx_mbox_areas_t) 1559 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 1560 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC( 1561 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP); 1562 1563 /* 1564 * Map descriptors from the buffer pools for each dma channel. 1565 */ 1566 for (i = 0; i < ndmas; i++) { 1567 /* 1568 * Set up and prepare buffer blocks, descriptors and mailbox. 1569 */ 1570 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 1571 status = hxge_map_txdma_channel(hxgep, channel, 1572 (p_hxge_dma_common_t *)&dma_buf_p[i], 1573 (p_tx_ring_t *)&tx_desc_rings[i], 1574 dma_buf_poolp->num_chunks[i], 1575 (p_hxge_dma_common_t *)&dma_cntl_p[i], 1576 (p_tx_mbox_t *)&tx_mbox_p[i]); 1577 if (status != HXGE_OK) { 1578 goto hxge_map_txdma_fail1; 1579 } 1580 tx_desc_rings[i]->index = (uint16_t)i; 1581 tx_desc_rings[i]->tdc_stats = &hxgep->statsp->tdc_stats[i]; 1582 } 1583 1584 tx_rings->ndmas = ndmas; 1585 tx_rings->rings = tx_desc_rings; 1586 hxgep->tx_rings = tx_rings; 1587 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p; 1588 hxgep->tx_mbox_areas_p = tx_mbox_areas_p; 1589 1590 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1591 "tx_rings $%p rings $%p", hxgep->tx_rings, hxgep->tx_rings->rings)); 1592 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1593 "tx_rings $%p tx_desc_rings $%p", 1594 hxgep->tx_rings, tx_desc_rings)); 1595 1596 goto hxge_map_txdma_exit; 1597 1598 hxge_map_txdma_fail1: 1599 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1600 "==> hxge_map_txdma: uninit tx desc " 1601 "(status 0x%x channel %d i %d)", hxgep, status, channel, i)); 1602 i--; 1603 for (; i >= 0; i--) { 1604 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 1605 hxge_unmap_txdma_channel(hxgep, channel, tx_desc_rings[i], 1606 tx_mbox_p[i]); 1607 } 1608 1609 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1610 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1611 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1612 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1613 1614 hxge_map_txdma_exit: 1615 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1616 "==> hxge_map_txdma: (status 0x%x channel %d)", status, channel)); 1617 1618 return (status); 1619 } 1620 1621 static void 1622 hxge_unmap_txdma(p_hxge_t hxgep) 1623 { 1624 int i, ndmas; 1625 uint8_t channel; 1626 p_tx_rings_t tx_rings; 1627 p_tx_ring_t *tx_desc_rings; 1628 p_tx_mbox_areas_t tx_mbox_areas_p; 1629 p_tx_mbox_t *tx_mbox_p; 1630 p_hxge_dma_pool_t dma_buf_poolp; 1631 1632 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma")); 1633 1634 dma_buf_poolp = hxgep->tx_buf_pool_p; 1635 if (!dma_buf_poolp->buf_allocated) { 1636 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1637 "==> hxge_unmap_txdma: buf not allocated")); 1638 return; 1639 } 1640 ndmas = dma_buf_poolp->ndmas; 1641 if (!ndmas) { 1642 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1643 "<== hxge_unmap_txdma: no dma allocated")); 1644 return; 1645 } 1646 tx_rings = hxgep->tx_rings; 1647 tx_desc_rings = tx_rings->rings; 1648 if (tx_rings == NULL) { 1649 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1650 "<== hxge_unmap_txdma: NULL ring pointer")); 1651 return; 1652 } 1653 tx_desc_rings = tx_rings->rings; 1654 if (tx_desc_rings == NULL) { 1655 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1656 "<== hxge_unmap_txdma: NULL ring pointers")); 1657 return; 1658 } 1659 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma: " 1660 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1661 tx_rings, tx_desc_rings, ndmas)); 1662 1663 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 1664 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 1665 1666 for (i = 0; i < ndmas; i++) { 1667 channel = tx_desc_rings[i]->tdc; 1668 (void) hxge_unmap_txdma_channel(hxgep, channel, 1669 (p_tx_ring_t)tx_desc_rings[i], 1670 (p_tx_mbox_t)tx_mbox_p[i]); 1671 } 1672 1673 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1674 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1675 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1676 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1677 1678 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma")); 1679 } 1680 1681 static hxge_status_t 1682 hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel, 1683 p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p, 1684 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p, 1685 p_tx_mbox_t *tx_mbox_p) 1686 { 1687 int status = HXGE_OK; 1688 1689 /* 1690 * Set up and prepare buffer blocks, descriptors and mailbox. 1691 */ 1692 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1693 "==> hxge_map_txdma_channel (channel %d)", channel)); 1694 1695 /* 1696 * Transmit buffer blocks 1697 */ 1698 status = hxge_map_txdma_channel_buf_ring(hxgep, channel, 1699 dma_buf_p, tx_desc_p, num_chunks); 1700 if (status != HXGE_OK) { 1701 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1702 "==> hxge_map_txdma_channel (channel %d): " 1703 "map buffer failed 0x%x", channel, status)); 1704 goto hxge_map_txdma_channel_exit; 1705 } 1706 /* 1707 * Transmit block ring, and mailbox. 1708 */ 1709 hxge_map_txdma_channel_cfg_ring(hxgep, channel, dma_cntl_p, *tx_desc_p, 1710 tx_mbox_p); 1711 1712 goto hxge_map_txdma_channel_exit; 1713 1714 hxge_map_txdma_channel_fail1: 1715 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1716 "==> hxge_map_txdma_channel: unmap buf" 1717 "(status 0x%x channel %d)", status, channel)); 1718 hxge_unmap_txdma_channel_buf_ring(hxgep, *tx_desc_p); 1719 1720 hxge_map_txdma_channel_exit: 1721 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1722 "<== hxge_map_txdma_channel: (status 0x%x channel %d)", 1723 status, channel)); 1724 1725 return (status); 1726 } 1727 1728 /*ARGSUSED*/ 1729 static void 1730 hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel, 1731 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 1732 { 1733 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1734 "==> hxge_unmap_txdma_channel (channel %d)", channel)); 1735 1736 /* unmap tx block ring, and mailbox. */ 1737 (void) hxge_unmap_txdma_channel_cfg_ring(hxgep, tx_ring_p, tx_mbox_p); 1738 1739 /* unmap buffer blocks */ 1740 (void) hxge_unmap_txdma_channel_buf_ring(hxgep, tx_ring_p); 1741 1742 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma_channel")); 1743 } 1744 1745 /*ARGSUSED*/ 1746 static void 1747 hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 1748 p_hxge_dma_common_t *dma_cntl_p, p_tx_ring_t tx_ring_p, 1749 p_tx_mbox_t *tx_mbox_p) 1750 { 1751 p_tx_mbox_t mboxp; 1752 p_hxge_dma_common_t cntl_dmap; 1753 p_hxge_dma_common_t dmap; 1754 tdc_tdr_cfg_t *tx_ring_cfig_p; 1755 tdc_tdr_kick_t *tx_ring_kick_p; 1756 tdc_tdr_cfg_t *tx_cs_p; 1757 tdc_int_mask_t *tx_evmask_p; 1758 tdc_mbh_t *mboxh_p; 1759 tdc_mbl_t *mboxl_p; 1760 uint64_t tx_desc_len; 1761 1762 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1763 "==> hxge_map_txdma_channel_cfg_ring")); 1764 1765 cntl_dmap = *dma_cntl_p; 1766 1767 dmap = (p_hxge_dma_common_t)&tx_ring_p->tdc_desc; 1768 hxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 1769 sizeof (tx_desc_t)); 1770 1771 /* 1772 * Zero out transmit ring descriptors. 1773 */ 1774 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1775 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 1776 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 1777 tx_cs_p = &(tx_ring_p->tx_cs); 1778 tx_evmask_p = &(tx_ring_p->tx_evmask); 1779 tx_ring_cfig_p->value = 0; 1780 tx_ring_kick_p->value = 0; 1781 tx_cs_p->value = 0; 1782 tx_evmask_p->value = 0; 1783 1784 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1785 "==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p", 1786 dma_channel, dmap->dma_cookie.dmac_laddress)); 1787 1788 tx_ring_cfig_p->value = 0; 1789 1790 /* Hydra len is 11 bits and the lower 5 bits are 0s */ 1791 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 5); 1792 tx_ring_cfig_p->value = 1793 (dmap->dma_cookie.dmac_laddress & TDC_TDR_CFG_ADDR_MASK) | 1794 (tx_desc_len << TDC_TDR_CFG_LEN_SHIFT); 1795 1796 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1797 "==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 1798 dma_channel, tx_ring_cfig_p->value)); 1799 1800 tx_cs_p->bits.reset = 1; 1801 1802 /* Map in mailbox */ 1803 mboxp = (p_tx_mbox_t)KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 1804 dmap = (p_hxge_dma_common_t)&mboxp->tx_mbox; 1805 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 1806 mboxh_p = (tdc_mbh_t *)&tx_ring_p->tx_mbox_mbh; 1807 mboxl_p = (tdc_mbl_t *)&tx_ring_p->tx_mbox_mbl; 1808 mboxh_p->value = mboxl_p->value = 0; 1809 1810 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1811 "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 1812 dmap->dma_cookie.dmac_laddress)); 1813 1814 mboxh_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 1815 TDC_MBH_ADDR_SHIFT) & TDC_MBH_MASK); 1816 mboxl_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress & 1817 TDC_MBL_MASK) >> TDC_MBL_SHIFT); 1818 1819 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1820 "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 1821 dmap->dma_cookie.dmac_laddress)); 1822 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1823 "==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p", 1824 mboxh_p->bits.mbaddr, mboxl_p->bits.mbaddr)); 1825 1826 /* 1827 * Set page valid and no mask 1828 */ 1829 tx_ring_p->page_hdl.value = 0; 1830 1831 *tx_mbox_p = mboxp; 1832 1833 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1834 "<== hxge_map_txdma_channel_cfg_ring")); 1835 } 1836 1837 /*ARGSUSED*/ 1838 static void 1839 hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep, 1840 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 1841 { 1842 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1843 "==> hxge_unmap_txdma_channel_cfg_ring: channel %d", 1844 tx_ring_p->tdc)); 1845 1846 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 1847 1848 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1849 "<== hxge_unmap_txdma_channel_cfg_ring")); 1850 } 1851 1852 static hxge_status_t 1853 hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 1854 p_hxge_dma_common_t *dma_buf_p, 1855 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 1856 { 1857 p_hxge_dma_common_t dma_bufp, tmp_bufp; 1858 p_hxge_dma_common_t dmap; 1859 hxge_os_dma_handle_t tx_buf_dma_handle; 1860 p_tx_ring_t tx_ring_p; 1861 p_tx_msg_t tx_msg_ring; 1862 hxge_status_t status = HXGE_OK; 1863 int ddi_status = DDI_SUCCESS; 1864 int i, j, index; 1865 uint32_t size, bsize; 1866 uint32_t nblocks, nmsgs; 1867 1868 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1869 "==> hxge_map_txdma_channel_buf_ring")); 1870 1871 dma_bufp = tmp_bufp = *dma_buf_p; 1872 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1873 " hxge_map_txdma_channel_buf_ring: channel %d to map %d " 1874 "chunks bufp $%p", channel, num_chunks, dma_bufp)); 1875 1876 nmsgs = 0; 1877 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 1878 nmsgs += tmp_bufp->nblocks; 1879 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1880 "==> hxge_map_txdma_channel_buf_ring: channel %d " 1881 "bufp $%p nblocks %d nmsgs %d", 1882 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 1883 } 1884 if (!nmsgs) { 1885 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1886 "<== hxge_map_txdma_channel_buf_ring: channel %d " 1887 "no msg blocks", channel)); 1888 status = HXGE_ERROR; 1889 1890 goto hxge_map_txdma_channel_buf_ring_exit; 1891 } 1892 tx_ring_p = (p_tx_ring_t)KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 1893 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 1894 (void *) hxgep->interrupt_cookie); 1895 /* 1896 * Allocate transmit message rings and handles for packets not to be 1897 * copied to premapped buffers. 1898 */ 1899 size = nmsgs * sizeof (tx_msg_t); 1900 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 1901 for (i = 0; i < nmsgs; i++) { 1902 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr, 1903 DDI_DMA_DONTWAIT, 0, &tx_msg_ring[i].dma_handle); 1904 if (ddi_status != DDI_SUCCESS) { 1905 status |= HXGE_DDI_FAILED; 1906 break; 1907 } 1908 } 1909 1910 if (i < nmsgs) { 1911 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, 1912 "Allocate handles failed.")); 1913 1914 goto hxge_map_txdma_channel_buf_ring_fail1; 1915 } 1916 tx_ring_p->tdc = channel; 1917 tx_ring_p->tx_msg_ring = tx_msg_ring; 1918 tx_ring_p->tx_ring_size = nmsgs; 1919 tx_ring_p->num_chunks = num_chunks; 1920 if (!hxge_tx_intr_thres) { 1921 hxge_tx_intr_thres = tx_ring_p->tx_ring_size / 4; 1922 } 1923 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 1924 tx_ring_p->rd_index = 0; 1925 tx_ring_p->wr_index = 0; 1926 tx_ring_p->ring_head.value = 0; 1927 tx_ring_p->ring_kick_tail.value = 0; 1928 tx_ring_p->descs_pending = 0; 1929 1930 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1931 "==> hxge_map_txdma_channel_buf_ring: channel %d " 1932 "actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)", 1933 channel, tx_ring_p->tx_ring_size, nmsgs, hxge_tx_ring_size)); 1934 1935 /* 1936 * Map in buffers from the buffer pool. 1937 */ 1938 index = 0; 1939 bsize = dma_bufp->block_size; 1940 1941 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma_channel_buf_ring: " 1942 "dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d", 1943 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 1944 1945 for (i = 0; i < num_chunks; i++, dma_bufp++) { 1946 bsize = dma_bufp->block_size; 1947 nblocks = dma_bufp->nblocks; 1948 tx_buf_dma_handle = dma_bufp->dma_handle; 1949 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1950 "==> hxge_map_txdma_channel_buf_ring: dma chunk %d " 1951 "size %d dma_bufp $%p", 1952 i, sizeof (hxge_dma_common_t), dma_bufp)); 1953 1954 for (j = 0; j < nblocks; j++) { 1955 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 1956 tx_msg_ring[index].offset_index = j; 1957 dmap = &tx_msg_ring[index++].buf_dma; 1958 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1959 "==> hxge_map_txdma_channel_buf_ring: j %d" 1960 "dmap $%p", i, dmap)); 1961 hxge_setup_dma_common(dmap, dma_bufp, 1, bsize); 1962 } 1963 } 1964 1965 if (i < num_chunks) { 1966 status = HXGE_ERROR; 1967 1968 goto hxge_map_txdma_channel_buf_ring_fail1; 1969 } 1970 1971 *tx_desc_p = tx_ring_p; 1972 1973 goto hxge_map_txdma_channel_buf_ring_exit; 1974 1975 hxge_map_txdma_channel_buf_ring_fail1: 1976 index--; 1977 for (; index >= 0; index--) { 1978 if (tx_msg_ring[index].dma_handle != NULL) { 1979 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 1980 } 1981 } 1982 MUTEX_DESTROY(&tx_ring_p->lock); 1983 KMEM_FREE(tx_msg_ring, size); 1984 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 1985 1986 status = HXGE_ERROR; 1987 1988 hxge_map_txdma_channel_buf_ring_exit: 1989 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1990 "<== hxge_map_txdma_channel_buf_ring status 0x%x", status)); 1991 1992 return (status); 1993 } 1994 1995 /*ARGSUSED*/ 1996 static void 1997 hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, p_tx_ring_t tx_ring_p) 1998 { 1999 p_tx_msg_t tx_msg_ring; 2000 p_tx_msg_t tx_msg_p; 2001 int i; 2002 2003 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2004 "==> hxge_unmap_txdma_channel_buf_ring")); 2005 if (tx_ring_p == NULL) { 2006 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2007 "<== hxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2008 return; 2009 } 2010 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2011 "==> hxge_unmap_txdma_channel_buf_ring: channel %d", 2012 tx_ring_p->tdc)); 2013 2014 tx_msg_ring = tx_ring_p->tx_msg_ring; 2015 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2016 tx_msg_p = &tx_msg_ring[i]; 2017 if (tx_msg_p->flags.dma_type == USE_DVMA) { 2018 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "entry = %d", i)); 2019 (void) dvma_unload(tx_msg_p->dvma_handle, 0, -1); 2020 tx_msg_p->dvma_handle = NULL; 2021 if (tx_ring_p->dvma_wr_index == 2022 tx_ring_p->dvma_wrap_mask) { 2023 tx_ring_p->dvma_wr_index = 0; 2024 } else { 2025 tx_ring_p->dvma_wr_index++; 2026 } 2027 tx_ring_p->dvma_pending--; 2028 } else if (tx_msg_p->flags.dma_type == USE_DMA) { 2029 if (ddi_dma_unbind_handle(tx_msg_p->dma_handle)) { 2030 cmn_err(CE_WARN, "hxge_unmap_tx_bug_ring: " 2031 "ddi_dma_unbind_handle failed."); 2032 } 2033 } 2034 if (tx_msg_p->tx_message != NULL) { 2035 freemsg(tx_msg_p->tx_message); 2036 tx_msg_p->tx_message = NULL; 2037 } 2038 } 2039 2040 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2041 if (tx_msg_ring[i].dma_handle != NULL) { 2042 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2043 } 2044 } 2045 2046 MUTEX_DESTROY(&tx_ring_p->lock); 2047 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2048 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2049 2050 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2051 "<== hxge_unmap_txdma_channel_buf_ring")); 2052 } 2053 2054 static hxge_status_t 2055 hxge_txdma_hw_start(p_hxge_t hxgep) 2056 { 2057 int i, ndmas; 2058 uint16_t channel; 2059 p_tx_rings_t tx_rings; 2060 p_tx_ring_t *tx_desc_rings; 2061 p_tx_mbox_areas_t tx_mbox_areas_p; 2062 p_tx_mbox_t *tx_mbox_p; 2063 hxge_status_t status = HXGE_OK; 2064 uint64_t tmp; 2065 2066 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start")); 2067 2068 /* 2069 * Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat. 2070 * 3. Scrub memory and check for errors. 2071 */ 2072 (void) hxge_tx_vmac_disable(hxgep); 2073 2074 /* 2075 * Clear the error status 2076 */ 2077 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7); 2078 2079 /* 2080 * Scrub the rtab memory for the TDC and reset the TDC. 2081 */ 2082 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, 0x0ULL); 2083 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, 0x0ULL); 2084 2085 for (i = 0; i < 256; i++) { 2086 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2087 (uint64_t)i); 2088 2089 /* 2090 * Write the command register with an indirect read instruction 2091 */ 2092 tmp = (0x1ULL << 30) | i; 2093 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp); 2094 2095 /* 2096 * Wait for status done 2097 */ 2098 tmp = 0; 2099 do { 2100 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2101 &tmp); 2102 } while (((tmp >> 31) & 0x1ULL) == 0x0); 2103 } 2104 2105 for (i = 0; i < 256; i++) { 2106 /* 2107 * Write the command register with an indirect read instruction 2108 */ 2109 tmp = (0x1ULL << 30) | i; 2110 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp); 2111 2112 /* 2113 * Wait for status done 2114 */ 2115 tmp = 0; 2116 do { 2117 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2118 &tmp); 2119 } while (((tmp >> 31) & 0x1ULL) == 0x0); 2120 2121 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, &tmp); 2122 if (0x1ff00ULL != (0x1ffffULL & tmp)) { 2123 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2124 "unexpected data (hi), entry: %x, value: 0x%0llx\n", 2125 i, (unsigned long long)tmp)); 2126 status = HXGE_ERROR; 2127 } 2128 2129 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, &tmp); 2130 if (tmp != 0) { 2131 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2132 "unexpected data (lo), entry: %x\n", i)); 2133 status = HXGE_ERROR; 2134 } 2135 2136 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp); 2137 if (tmp != 0) { 2138 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2139 "parity error, entry: %x, val 0x%llx\n", 2140 i, (unsigned long long)tmp)); 2141 status = HXGE_ERROR; 2142 } 2143 2144 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp); 2145 if (tmp != 0) { 2146 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2147 "parity error, entry: %x\n", i)); 2148 status = HXGE_ERROR; 2149 } 2150 } 2151 2152 if (status != HXGE_OK) 2153 goto hxge_txdma_hw_start_exit; 2154 2155 /* 2156 * Reset FIFO Error Status for the TDC and enable FIFO error events. 2157 */ 2158 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7); 2159 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_MASK, 0x0); 2160 2161 /* 2162 * Initialize the Transmit DMAs. 2163 */ 2164 tx_rings = hxgep->tx_rings; 2165 if (tx_rings == NULL) { 2166 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2167 "<== hxge_txdma_hw_start: NULL ring pointer")); 2168 return (HXGE_ERROR); 2169 } 2170 2171 tx_desc_rings = tx_rings->rings; 2172 if (tx_desc_rings == NULL) { 2173 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2174 "<== hxge_txdma_hw_start: NULL ring pointers")); 2175 return (HXGE_ERROR); 2176 } 2177 ndmas = tx_rings->ndmas; 2178 if (!ndmas) { 2179 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2180 "<== hxge_txdma_hw_start: no dma channel allocated")); 2181 return (HXGE_ERROR); 2182 } 2183 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start: " 2184 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2185 tx_rings, tx_desc_rings, ndmas)); 2186 2187 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2188 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2189 2190 /* 2191 * Init the DMAs. 2192 */ 2193 for (i = 0; i < ndmas; i++) { 2194 channel = tx_desc_rings[i]->tdc; 2195 status = hxge_txdma_start_channel(hxgep, channel, 2196 (p_tx_ring_t)tx_desc_rings[i], 2197 (p_tx_mbox_t)tx_mbox_p[i]); 2198 if (status != HXGE_OK) { 2199 goto hxge_txdma_hw_start_fail1; 2200 } 2201 } 2202 2203 (void) hxge_tx_vmac_enable(hxgep); 2204 2205 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2206 "==> hxge_txdma_hw_start: tx_rings $%p rings $%p", 2207 hxgep->tx_rings, hxgep->tx_rings->rings)); 2208 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2209 "==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p", 2210 hxgep->tx_rings, tx_desc_rings)); 2211 2212 goto hxge_txdma_hw_start_exit; 2213 2214 hxge_txdma_hw_start_fail1: 2215 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2216 "==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)", 2217 status, channel, i)); 2218 2219 for (; i >= 0; i--) { 2220 channel = tx_desc_rings[i]->tdc, 2221 (void) hxge_txdma_stop_channel(hxgep, channel, 2222 (p_tx_ring_t)tx_desc_rings[i], 2223 (p_tx_mbox_t)tx_mbox_p[i]); 2224 } 2225 2226 hxge_txdma_hw_start_exit: 2227 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2228 "==> hxge_txdma_hw_start: (status 0x%x)", status)); 2229 2230 return (status); 2231 } 2232 2233 static void 2234 hxge_txdma_hw_stop(p_hxge_t hxgep) 2235 { 2236 int i, ndmas; 2237 uint16_t channel; 2238 p_tx_rings_t tx_rings; 2239 p_tx_ring_t *tx_desc_rings; 2240 p_tx_mbox_areas_t tx_mbox_areas_p; 2241 p_tx_mbox_t *tx_mbox_p; 2242 2243 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop")); 2244 2245 tx_rings = hxgep->tx_rings; 2246 if (tx_rings == NULL) { 2247 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2248 "<== hxge_txdma_hw_stop: NULL ring pointer")); 2249 return; 2250 } 2251 2252 tx_desc_rings = tx_rings->rings; 2253 if (tx_desc_rings == NULL) { 2254 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2255 "<== hxge_txdma_hw_stop: NULL ring pointers")); 2256 return; 2257 } 2258 2259 ndmas = tx_rings->ndmas; 2260 if (!ndmas) { 2261 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2262 "<== hxge_txdma_hw_stop: no dma channel allocated")); 2263 return; 2264 } 2265 2266 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: " 2267 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2268 2269 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2270 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2271 2272 for (i = 0; i < ndmas; i++) { 2273 channel = tx_desc_rings[i]->tdc; 2274 (void) hxge_txdma_stop_channel(hxgep, channel, 2275 (p_tx_ring_t)tx_desc_rings[i], 2276 (p_tx_mbox_t)tx_mbox_p[i]); 2277 } 2278 2279 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: " 2280 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2281 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_hw_stop")); 2282 } 2283 2284 static hxge_status_t 2285 hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel, 2286 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2287 { 2288 hxge_status_t status = HXGE_OK; 2289 2290 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2291 "==> hxge_txdma_start_channel (channel %d)", channel)); 2292 /* 2293 * TXDMA/TXC must be in stopped state. 2294 */ 2295 (void) hxge_txdma_stop_inj_err(hxgep, channel); 2296 2297 /* 2298 * Reset TXDMA channel 2299 */ 2300 tx_ring_p->tx_cs.value = 0; 2301 tx_ring_p->tx_cs.bits.reset = 1; 2302 status = hxge_reset_txdma_channel(hxgep, channel, 2303 tx_ring_p->tx_cs.value); 2304 if (status != HXGE_OK) { 2305 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2306 "==> hxge_txdma_start_channel (channel %d)" 2307 " reset channel failed 0x%x", channel, status)); 2308 2309 goto hxge_txdma_start_channel_exit; 2310 } 2311 2312 /* 2313 * Initialize the TXDMA channel specific FZC control configurations. 2314 * These FZC registers are pertaining to each TX channel (i.e. logical 2315 * pages). 2316 */ 2317 status = hxge_init_fzc_txdma_channel(hxgep, channel, 2318 tx_ring_p, tx_mbox_p); 2319 if (status != HXGE_OK) { 2320 goto hxge_txdma_start_channel_exit; 2321 } 2322 2323 /* 2324 * Initialize the event masks. 2325 */ 2326 tx_ring_p->tx_evmask.value = 0; 2327 status = hxge_init_txdma_channel_event_mask(hxgep, 2328 channel, &tx_ring_p->tx_evmask); 2329 if (status != HXGE_OK) { 2330 goto hxge_txdma_start_channel_exit; 2331 } 2332 2333 /* 2334 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA 2335 * channels and enable each DMA channel. 2336 */ 2337 status = hxge_enable_txdma_channel(hxgep, channel, 2338 tx_ring_p, tx_mbox_p); 2339 if (status != HXGE_OK) { 2340 goto hxge_txdma_start_channel_exit; 2341 } 2342 2343 hxge_txdma_start_channel_exit: 2344 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_start_channel")); 2345 2346 return (status); 2347 } 2348 2349 /*ARGSUSED*/ 2350 static hxge_status_t 2351 hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel, 2352 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2353 { 2354 int status = HXGE_OK; 2355 2356 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2357 "==> hxge_txdma_stop_channel: channel %d", channel)); 2358 2359 /* 2360 * Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit 2361 * not set, the TXDMA reset state will not be set if reset TXDMA. 2362 */ 2363 (void) hxge_txdma_stop_inj_err(hxgep, channel); 2364 2365 /* 2366 * Reset TXDMA channel 2367 */ 2368 tx_ring_p->tx_cs.value = 0; 2369 tx_ring_p->tx_cs.bits.reset = 1; 2370 status = hxge_reset_txdma_channel(hxgep, channel, 2371 tx_ring_p->tx_cs.value); 2372 if (status != HXGE_OK) { 2373 goto hxge_txdma_stop_channel_exit; 2374 } 2375 2376 hxge_txdma_stop_channel_exit: 2377 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_stop_channel")); 2378 2379 return (status); 2380 } 2381 2382 static p_tx_ring_t 2383 hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel) 2384 { 2385 int index, ndmas; 2386 uint16_t tdc; 2387 p_tx_rings_t tx_rings; 2388 2389 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_ring")); 2390 2391 tx_rings = hxgep->tx_rings; 2392 if (tx_rings == NULL) { 2393 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2394 "<== hxge_txdma_get_ring: NULL ring pointer")); 2395 return (NULL); 2396 } 2397 ndmas = tx_rings->ndmas; 2398 if (!ndmas) { 2399 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2400 "<== hxge_txdma_get_ring: no channel allocated")); 2401 return (NULL); 2402 } 2403 if (tx_rings->rings == NULL) { 2404 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2405 "<== hxge_txdma_get_ring: NULL rings pointer")); 2406 return (NULL); 2407 } 2408 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_ring: " 2409 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2410 tx_rings, tx_rings, ndmas)); 2411 2412 for (index = 0; index < ndmas; index++) { 2413 tdc = tx_rings->rings[index]->tdc; 2414 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2415 "==> hxge_fixup_txdma_rings: channel %d", tdc)); 2416 if (channel == tdc) { 2417 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2418 "<== hxge_txdma_get_ring: tdc %d ring $%p", 2419 tdc, tx_rings->rings[index])); 2420 return (p_tx_ring_t)(tx_rings->rings[index]); 2421 } 2422 } 2423 2424 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_ring")); 2425 2426 return (NULL); 2427 } 2428 2429 static p_tx_mbox_t 2430 hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel) 2431 { 2432 int index, tdc, ndmas; 2433 p_tx_rings_t tx_rings; 2434 p_tx_mbox_areas_t tx_mbox_areas_p; 2435 p_tx_mbox_t *tx_mbox_p; 2436 2437 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_mbox")); 2438 2439 tx_rings = hxgep->tx_rings; 2440 if (tx_rings == NULL) { 2441 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2442 "<== hxge_txdma_get_mbox: NULL ring pointer")); 2443 return (NULL); 2444 } 2445 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2446 if (tx_mbox_areas_p == NULL) { 2447 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2448 "<== hxge_txdma_get_mbox: NULL mbox pointer")); 2449 return (NULL); 2450 } 2451 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2452 2453 ndmas = tx_rings->ndmas; 2454 if (!ndmas) { 2455 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2456 "<== hxge_txdma_get_mbox: no channel allocated")); 2457 return (NULL); 2458 } 2459 if (tx_rings->rings == NULL) { 2460 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2461 "<== hxge_txdma_get_mbox: NULL rings pointer")); 2462 return (NULL); 2463 } 2464 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_mbox: " 2465 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2466 tx_rings, tx_rings, ndmas)); 2467 2468 for (index = 0; index < ndmas; index++) { 2469 tdc = tx_rings->rings[index]->tdc; 2470 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2471 "==> hxge_txdma_get_mbox: channel %d", tdc)); 2472 if (channel == tdc) { 2473 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2474 "<== hxge_txdma_get_mbox: tdc %d ring $%p", 2475 tdc, tx_rings->rings[index])); 2476 return (p_tx_mbox_t)(tx_mbox_p[index]); 2477 } 2478 } 2479 2480 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_mbox")); 2481 2482 return (NULL); 2483 } 2484 2485 /*ARGSUSED*/ 2486 static hxge_status_t 2487 hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 2488 tdc_stat_t cs) 2489 { 2490 hpi_handle_t handle; 2491 uint8_t channel; 2492 p_tx_ring_t *tx_rings; 2493 p_tx_ring_t tx_ring_p; 2494 p_hxge_tx_ring_stats_t tdc_stats; 2495 boolean_t txchan_fatal = B_FALSE; 2496 hxge_status_t status = HXGE_OK; 2497 tdc_drop_cnt_t drop_cnt; 2498 2499 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_tx_err_evnts")); 2500 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2501 channel = ldvp->channel; 2502 2503 tx_rings = hxgep->tx_rings->rings; 2504 tx_ring_p = tx_rings[index]; 2505 tdc_stats = tx_ring_p->tdc_stats; 2506 2507 /* Get the error counts if any */ 2508 TXDMA_REG_READ64(handle, TDC_DROP_CNT, channel, &drop_cnt.value); 2509 tdc_stats->count_hdr_size_err += drop_cnt.bits.hdr_size_error_count; 2510 tdc_stats->count_runt += drop_cnt.bits.runt_count; 2511 tdc_stats->count_abort += drop_cnt.bits.abort_count; 2512 2513 if (cs.bits.peu_resp_err) { 2514 tdc_stats->peu_resp_err++; 2515 HXGE_FM_REPORT_ERROR(hxgep, channel, 2516 HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR); 2517 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2518 "==> hxge_tx_err_evnts(channel %d): " 2519 "fatal error: peu_resp_err", channel)); 2520 txchan_fatal = B_TRUE; 2521 } 2522 2523 if (cs.bits.pkt_size_hdr_err) { 2524 tdc_stats->pkt_size_hdr_err++; 2525 HXGE_FM_REPORT_ERROR(hxgep, channel, 2526 HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR); 2527 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2528 "==> hxge_tx_err_evnts(channel %d): " 2529 "fatal error: pkt_size_hdr_err", channel)); 2530 txchan_fatal = B_TRUE; 2531 } 2532 2533 if (cs.bits.runt_pkt_drop_err) { 2534 tdc_stats->runt_pkt_drop_err++; 2535 HXGE_FM_REPORT_ERROR(hxgep, channel, 2536 HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR); 2537 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2538 "==> hxge_tx_err_evnts(channel %d): " 2539 "fatal error: runt_pkt_drop_err", channel)); 2540 txchan_fatal = B_TRUE; 2541 } 2542 2543 if (cs.bits.pkt_size_err) { 2544 tdc_stats->pkt_size_err++; 2545 HXGE_FM_REPORT_ERROR(hxgep, channel, 2546 HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 2547 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2548 "==> hxge_tx_err_evnts(channel %d): " 2549 "fatal error: pkt_size_err", channel)); 2550 txchan_fatal = B_TRUE; 2551 } 2552 2553 if (cs.bits.tx_rng_oflow) { 2554 tdc_stats->tx_rng_oflow++; 2555 if (tdc_stats->tx_rng_oflow) 2556 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2557 "==> hxge_tx_err_evnts(channel %d): " 2558 "fatal error: tx_rng_oflow", channel)); 2559 } 2560 2561 if (cs.bits.pref_par_err) { 2562 tdc_stats->pref_par_err++; 2563 2564 /* Get the address of parity error read data */ 2565 TXDMA_REG_READ64(hxgep->hpi_handle, TDC_PREF_PAR_LOG, 2566 channel, &tdc_stats->errlog.value); 2567 2568 HXGE_FM_REPORT_ERROR(hxgep, channel, 2569 HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR); 2570 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2571 "==> hxge_tx_err_evnts(channel %d): " 2572 "fatal error: pref_par_err", channel)); 2573 txchan_fatal = B_TRUE; 2574 } 2575 2576 if (cs.bits.tdr_pref_cpl_to) { 2577 tdc_stats->tdr_pref_cpl_to++; 2578 HXGE_FM_REPORT_ERROR(hxgep, channel, 2579 HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO); 2580 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2581 "==> hxge_tx_err_evnts(channel %d): " 2582 "fatal error: tdr_pref_cpl_to", channel)); 2583 txchan_fatal = B_TRUE; 2584 } 2585 2586 if (cs.bits.pkt_cpl_to) { 2587 tdc_stats->pkt_cpl_to++; 2588 HXGE_FM_REPORT_ERROR(hxgep, channel, 2589 HXGE_FM_EREPORT_TDMC_PKT_CPL_TO); 2590 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2591 "==> hxge_tx_err_evnts(channel %d): " 2592 "fatal error: pkt_cpl_to", channel)); 2593 txchan_fatal = B_TRUE; 2594 } 2595 2596 if (cs.bits.invalid_sop) { 2597 tdc_stats->invalid_sop++; 2598 HXGE_FM_REPORT_ERROR(hxgep, channel, 2599 HXGE_FM_EREPORT_TDMC_INVALID_SOP); 2600 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2601 "==> hxge_tx_err_evnts(channel %d): " 2602 "fatal error: invalid_sop", channel)); 2603 txchan_fatal = B_TRUE; 2604 } 2605 2606 if (cs.bits.unexpected_sop) { 2607 tdc_stats->unexpected_sop++; 2608 HXGE_FM_REPORT_ERROR(hxgep, channel, 2609 HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP); 2610 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2611 "==> hxge_tx_err_evnts(channel %d): " 2612 "fatal error: unexpected_sop", channel)); 2613 txchan_fatal = B_TRUE; 2614 } 2615 2616 /* Clear error injection source in case this is an injected error */ 2617 TXDMA_REG_WRITE64(hxgep->hpi_handle, TDC_STAT_INT_DBG, channel, 0); 2618 2619 if (txchan_fatal) { 2620 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2621 " hxge_tx_err_evnts: " 2622 " fatal error on channel %d cs 0x%llx\n", 2623 channel, cs.value)); 2624 status = hxge_txdma_fatal_err_recover(hxgep, channel, 2625 tx_ring_p); 2626 if (status == HXGE_OK) { 2627 FM_SERVICE_RESTORED(hxgep); 2628 } 2629 } 2630 2631 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "<== hxge_tx_err_evnts")); 2632 2633 return (status); 2634 } 2635 2636 hxge_status_t 2637 hxge_txdma_handle_sys_errors(p_hxge_t hxgep) 2638 { 2639 hpi_handle_t handle; 2640 hxge_status_t status = HXGE_OK; 2641 tdc_fifo_err_stat_t fifo_stat; 2642 hxge_tdc_sys_stats_t *tdc_sys_stats; 2643 2644 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_handle_sys_errors")); 2645 2646 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2647 2648 /* 2649 * The FIFO is shared by all channels. 2650 * Get the status of Reorder Buffer and Reorder Table Buffer Errors 2651 */ 2652 HXGE_REG_RD64(handle, TDC_FIFO_ERR_STAT, &fifo_stat.value); 2653 2654 /* 2655 * Clear the error bits. Note that writing a 1 clears the bit. Writing 2656 * a 0 does nothing. 2657 */ 2658 HXGE_REG_WR64(handle, TDC_FIFO_ERR_STAT, fifo_stat.value); 2659 2660 tdc_sys_stats = &hxgep->statsp->tdc_sys_stats; 2661 if (fifo_stat.bits.reord_tbl_par_err) { 2662 tdc_sys_stats->reord_tbl_par_err++; 2663 HXGE_FM_REPORT_ERROR(hxgep, NULL, 2664 HXGE_FM_EREPORT_TDMC_REORD_TBL_PAR); 2665 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2666 "==> hxge_txdma_handle_sys_errors: fatal error: " 2667 "reord_tbl_par_err")); 2668 } 2669 2670 if (fifo_stat.bits.reord_buf_ded_err) { 2671 tdc_sys_stats->reord_buf_ded_err++; 2672 HXGE_FM_REPORT_ERROR(hxgep, NULL, 2673 HXGE_FM_EREPORT_TDMC_REORD_BUF_DED); 2674 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2675 "==> hxge_txdma_handle_sys_errors: " 2676 "fatal error: reord_buf_ded_err")); 2677 } 2678 2679 if (fifo_stat.bits.reord_buf_sec_err) { 2680 tdc_sys_stats->reord_buf_sec_err++; 2681 if (tdc_sys_stats->reord_buf_sec_err == 1) 2682 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2683 "==> hxge_txdma_handle_sys_errors: " 2684 "reord_buf_sec_err")); 2685 } 2686 2687 if (fifo_stat.bits.reord_tbl_par_err || 2688 fifo_stat.bits.reord_buf_ded_err) { 2689 status = hxge_tx_port_fatal_err_recover(hxgep); 2690 if (status == HXGE_OK) { 2691 FM_SERVICE_RESTORED(hxgep); 2692 } 2693 } 2694 2695 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_handle_sys_errors")); 2696 2697 return (status); 2698 } 2699 2700 static hxge_status_t 2701 hxge_txdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel, 2702 p_tx_ring_t tx_ring_p) 2703 { 2704 hpi_handle_t handle; 2705 hpi_status_t rs = HPI_SUCCESS; 2706 p_tx_mbox_t tx_mbox_p; 2707 hxge_status_t status = HXGE_OK; 2708 2709 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover")); 2710 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2711 "Recovering from TxDMAChannel#%d error...", channel)); 2712 2713 /* 2714 * Stop the dma channel waits for the stop done. If the stop done bit 2715 * is not set, then create an error. 2716 */ 2717 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2718 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping txdma channel(%d)", 2719 channel)); 2720 MUTEX_ENTER(&tx_ring_p->lock); 2721 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel); 2722 if (rs != HPI_SUCCESS) { 2723 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2724 "==> hxge_txdma_fatal_err_recover (channel %d): " 2725 "stop failed ", channel)); 2726 2727 goto fail; 2728 } 2729 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming txdma channel(%d)", 2730 channel)); 2731 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 2732 2733 /* 2734 * Reset TXDMA channel 2735 */ 2736 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "resetting txdma channel(%d)", 2737 channel)); 2738 if ((rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 2739 HPI_SUCCESS) { 2740 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2741 "==> hxge_txdma_fatal_err_recover (channel %d)" 2742 " reset channel failed 0x%x", channel, rs)); 2743 2744 goto fail; 2745 } 2746 /* 2747 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx 2748 * overflow fatal error if tail is not set to 0 after reset! 2749 */ 2750 TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0); 2751 2752 /* 2753 * Restart TXDMA channel 2754 * 2755 * Initialize the TXDMA channel specific FZC control configurations. 2756 * These FZC registers are pertaining to each TX channel (i.e. logical 2757 * pages). 2758 */ 2759 tx_mbox_p = hxge_txdma_get_mbox(hxgep, channel); 2760 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "restarting txdma channel(%d)", 2761 channel)); 2762 status = hxge_init_fzc_txdma_channel(hxgep, channel, 2763 tx_ring_p, tx_mbox_p); 2764 if (status != HXGE_OK) 2765 goto fail; 2766 2767 /* 2768 * Initialize the event masks. 2769 */ 2770 tx_ring_p->tx_evmask.value = 0; 2771 status = hxge_init_txdma_channel_event_mask(hxgep, channel, 2772 &tx_ring_p->tx_evmask); 2773 if (status != HXGE_OK) 2774 goto fail; 2775 2776 tx_ring_p->wr_index_wrap = B_FALSE; 2777 tx_ring_p->wr_index = 0; 2778 tx_ring_p->rd_index = 0; 2779 2780 /* 2781 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA 2782 * channels and enable each DMA channel. 2783 */ 2784 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "enabling txdma channel(%d)", 2785 channel)); 2786 status = hxge_enable_txdma_channel(hxgep, channel, 2787 tx_ring_p, tx_mbox_p); 2788 MUTEX_EXIT(&tx_ring_p->lock); 2789 if (status != HXGE_OK) 2790 goto fail; 2791 2792 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2793 "Recovery Successful, TxDMAChannel#%d Restored", channel)); 2794 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover")); 2795 2796 return (HXGE_OK); 2797 2798 fail: 2799 MUTEX_EXIT(&tx_ring_p->lock); 2800 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2801 "hxge_txdma_fatal_err_recover (channel %d): " 2802 "failed to recover this txdma channel", channel)); 2803 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 2804 2805 return (status); 2806 } 2807 2808 static hxge_status_t 2809 hxge_tx_port_fatal_err_recover(p_hxge_t hxgep) 2810 { 2811 hpi_handle_t handle; 2812 hpi_status_t rs = HPI_SUCCESS; 2813 hxge_status_t status = HXGE_OK; 2814 p_tx_ring_t *tx_desc_rings; 2815 p_tx_rings_t tx_rings; 2816 p_tx_ring_t tx_ring_p; 2817 int i, ndmas; 2818 uint16_t channel; 2819 block_reset_t reset_reg; 2820 2821 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2822 "==> hxge_tx_port_fatal_err_recover")); 2823 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2824 "Recovering from TxPort error...")); 2825 2826 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2827 2828 /* Reset TDC block from PEU for this fatal error */ 2829 reset_reg.value = 0; 2830 reset_reg.bits.tdc_rst = 1; 2831 HXGE_REG_WR32(handle, BLOCK_RESET, reset_reg.value); 2832 2833 HXGE_DELAY(1000); 2834 2835 /* 2836 * Stop the dma channel waits for the stop done. If the stop done bit 2837 * is not set, then create an error. 2838 */ 2839 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping all DMA channels...")); 2840 2841 tx_rings = hxgep->tx_rings; 2842 tx_desc_rings = tx_rings->rings; 2843 ndmas = tx_rings->ndmas; 2844 2845 for (i = 0; i < ndmas; i++) { 2846 if (tx_desc_rings[i] == NULL) { 2847 continue; 2848 } 2849 tx_ring_p = tx_rings->rings[i]; 2850 MUTEX_ENTER(&tx_ring_p->lock); 2851 } 2852 2853 for (i = 0; i < ndmas; i++) { 2854 if (tx_desc_rings[i] == NULL) { 2855 continue; 2856 } 2857 channel = tx_desc_rings[i]->tdc; 2858 tx_ring_p = tx_rings->rings[i]; 2859 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel); 2860 if (rs != HPI_SUCCESS) { 2861 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2862 "==> hxge_txdma_fatal_err_recover (channel %d): " 2863 "stop failed ", channel)); 2864 2865 goto fail; 2866 } 2867 } 2868 2869 /* 2870 * Do reclaim on all of th DMAs. 2871 */ 2872 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming all DMA channels...")); 2873 for (i = 0; i < ndmas; i++) { 2874 if (tx_desc_rings[i] == NULL) { 2875 continue; 2876 } 2877 tx_ring_p = tx_rings->rings[i]; 2878 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 2879 } 2880 2881 /* Restart the TDC */ 2882 if ((status = hxge_txdma_hw_start(hxgep)) != HXGE_OK) 2883 goto fail; 2884 2885 for (i = 0; i < ndmas; i++) { 2886 if (tx_desc_rings[i] == NULL) { 2887 continue; 2888 } 2889 tx_ring_p = tx_rings->rings[i]; 2890 MUTEX_EXIT(&tx_ring_p->lock); 2891 } 2892 2893 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2894 "Recovery Successful, TxPort Restored")); 2895 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2896 "<== hxge_tx_port_fatal_err_recover")); 2897 return (HXGE_OK); 2898 2899 fail: 2900 for (i = 0; i < ndmas; i++) { 2901 if (tx_desc_rings[i] == NULL) { 2902 continue; 2903 } 2904 tx_ring_p = tx_rings->rings[i]; 2905 MUTEX_EXIT(&tx_ring_p->lock); 2906 } 2907 2908 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 2909 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2910 "hxge_txdma_fatal_err_recover (channel %d): " 2911 "failed to recover this txdma channel")); 2912 2913 return (status); 2914 } 2915