1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_txdma.h> 28 #include <sys/llc1.h> 29 30 uint32_t hxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 31 uint32_t hxge_tx_minfree = 32; 32 uint32_t hxge_tx_intr_thres = 0; 33 uint32_t hxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 34 uint32_t hxge_tx_tiny_pack = 1; 35 uint32_t hxge_tx_use_bcopy = 1; 36 37 extern uint32_t hxge_tx_ring_size; 38 extern uint32_t hxge_bcopy_thresh; 39 extern uint32_t hxge_dvma_thresh; 40 extern uint32_t hxge_dma_stream_thresh; 41 extern dma_method_t hxge_force_dma; 42 43 /* Device register access attributes for PIO. */ 44 extern ddi_device_acc_attr_t hxge_dev_reg_acc_attr; 45 46 /* Device descriptor access attributes for DMA. */ 47 extern ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr; 48 49 /* Device buffer access attributes for DMA. */ 50 extern ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr; 51 extern ddi_dma_attr_t hxge_desc_dma_attr; 52 extern ddi_dma_attr_t hxge_tx_dma_attr; 53 54 static hxge_status_t hxge_map_txdma(p_hxge_t hxgep); 55 static void hxge_unmap_txdma(p_hxge_t hxgep); 56 static hxge_status_t hxge_txdma_hw_start(p_hxge_t hxgep); 57 static void hxge_txdma_hw_stop(p_hxge_t hxgep); 58 59 static hxge_status_t hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel, 60 p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p, 61 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p, 62 p_tx_mbox_t *tx_mbox_p); 63 static void hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel, 64 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 65 static hxge_status_t hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t, 66 p_hxge_dma_common_t *, p_tx_ring_t *, uint32_t); 67 static void hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, 68 p_tx_ring_t tx_ring_p); 69 static void hxge_map_txdma_channel_cfg_ring(p_hxge_t, uint16_t, 70 p_hxge_dma_common_t *, p_tx_ring_t, p_tx_mbox_t *); 71 static void hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep, 72 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 73 static hxge_status_t hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel, 74 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 75 static hxge_status_t hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel, 76 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p); 77 static p_tx_ring_t hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel); 78 static hxge_status_t hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, 79 p_hxge_ldv_t ldvp, tdc_stat_t cs); 80 static p_tx_mbox_t hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel); 81 static hxge_status_t hxge_txdma_fatal_err_recover(p_hxge_t hxgep, 82 uint16_t channel, p_tx_ring_t tx_ring_p); 83 static hxge_status_t hxge_tx_port_fatal_err_recover(p_hxge_t hxgep); 84 85 hxge_status_t 86 hxge_init_txdma_channels(p_hxge_t hxgep) 87 { 88 hxge_status_t status = HXGE_OK; 89 block_reset_t reset_reg; 90 91 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_init_txdma_channels")); 92 93 /* 94 * Reset TDC block from PEU to cleanup any unknown configuration. 95 * This may be resulted from previous reboot. 96 */ 97 reset_reg.value = 0; 98 reset_reg.bits.tdc_rst = 1; 99 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 100 101 HXGE_DELAY(1000); 102 103 status = hxge_map_txdma(hxgep); 104 if (status != HXGE_OK) { 105 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 106 "<== hxge_init_txdma_channels: status 0x%x", status)); 107 return (status); 108 } 109 110 status = hxge_txdma_hw_start(hxgep); 111 if (status != HXGE_OK) { 112 hxge_unmap_txdma(hxgep); 113 return (status); 114 } 115 116 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 117 "<== hxge_init_txdma_channels: status 0x%x", status)); 118 119 return (HXGE_OK); 120 } 121 122 void 123 hxge_uninit_txdma_channels(p_hxge_t hxgep) 124 { 125 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_uninit_txdma_channels")); 126 127 hxge_txdma_hw_stop(hxgep); 128 hxge_unmap_txdma(hxgep); 129 130 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_uinit_txdma_channels")); 131 } 132 133 void 134 hxge_setup_dma_common(p_hxge_dma_common_t dest_p, p_hxge_dma_common_t src_p, 135 uint32_t entries, uint32_t size) 136 { 137 size_t tsize; 138 *dest_p = *src_p; 139 tsize = size * entries; 140 dest_p->alength = tsize; 141 dest_p->nblocks = entries; 142 dest_p->block_size = size; 143 dest_p->offset += tsize; 144 145 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 146 src_p->alength -= tsize; 147 src_p->dma_cookie.dmac_laddress += tsize; 148 src_p->dma_cookie.dmac_size -= tsize; 149 } 150 151 hxge_status_t 152 hxge_reset_txdma_channel(p_hxge_t hxgep, uint16_t channel, uint64_t reg_data) 153 { 154 hpi_status_t rs = HPI_SUCCESS; 155 hxge_status_t status = HXGE_OK; 156 hpi_handle_t handle; 157 158 HXGE_DEBUG_MSG((hxgep, TX_CTL, " ==> hxge_reset_txdma_channel")); 159 160 handle = HXGE_DEV_HPI_HANDLE(hxgep); 161 if ((reg_data & TDC_TDR_RST_MASK) == TDC_TDR_RST_MASK) { 162 rs = hpi_txdma_channel_reset(handle, channel); 163 } else { 164 rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel); 165 } 166 167 if (rs != HPI_SUCCESS) { 168 status = HXGE_ERROR | rs; 169 } 170 171 /* 172 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx 173 * overflow fatal error if tail is not set to 0 after reset! 174 */ 175 TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0); 176 177 HXGE_DEBUG_MSG((hxgep, TX_CTL, " <== hxge_reset_txdma_channel")); 178 179 return (status); 180 } 181 182 hxge_status_t 183 hxge_init_txdma_channel_event_mask(p_hxge_t hxgep, uint16_t channel, 184 tdc_int_mask_t *mask_p) 185 { 186 hpi_handle_t handle; 187 hpi_status_t rs = HPI_SUCCESS; 188 hxge_status_t status = HXGE_OK; 189 190 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 191 "<== hxge_init_txdma_channel_event_mask")); 192 193 handle = HXGE_DEV_HPI_HANDLE(hxgep); 194 195 /* 196 * Mask off tx_rng_oflow since it is a false alarm. The driver 197 * ensures not over flowing the hardware and check the hardware 198 * status. 199 */ 200 mask_p->bits.tx_rng_oflow = 1; 201 rs = hpi_txdma_event_mask(handle, OP_SET, channel, mask_p); 202 if (rs != HPI_SUCCESS) { 203 status = HXGE_ERROR | rs; 204 } 205 206 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 207 "==> hxge_init_txdma_channel_event_mask")); 208 return (status); 209 } 210 211 hxge_status_t 212 hxge_enable_txdma_channel(p_hxge_t hxgep, 213 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 214 { 215 hpi_handle_t handle; 216 hpi_status_t rs = HPI_SUCCESS; 217 hxge_status_t status = HXGE_OK; 218 219 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_enable_txdma_channel")); 220 221 handle = HXGE_DEV_HPI_HANDLE(hxgep); 222 /* 223 * Use configuration data composed at init time. Write to hardware the 224 * transmit ring configurations. 225 */ 226 rs = hpi_txdma_ring_config(handle, OP_SET, channel, 227 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 228 229 if (rs != HPI_SUCCESS) { 230 return (HXGE_ERROR | rs); 231 } 232 233 /* Write to hardware the mailbox */ 234 rs = hpi_txdma_mbox_config(handle, OP_SET, channel, 235 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 236 237 if (rs != HPI_SUCCESS) { 238 return (HXGE_ERROR | rs); 239 } 240 241 /* Start the DMA engine. */ 242 rs = hpi_txdma_channel_init_enable(handle, channel); 243 if (rs != HPI_SUCCESS) { 244 return (HXGE_ERROR | rs); 245 } 246 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_enable_txdma_channel")); 247 return (status); 248 } 249 250 void 251 hxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, boolean_t l4_cksum, 252 int pkt_len, uint8_t npads, p_tx_pkt_hdr_all_t pkthdrp) 253 { 254 p_tx_pkt_header_t hdrp; 255 p_mblk_t nmp; 256 uint64_t tmp; 257 size_t mblk_len; 258 size_t iph_len; 259 size_t hdrs_size; 260 uint8_t *ip_buf; 261 uint16_t eth_type; 262 uint8_t ipproto; 263 boolean_t is_vlan = B_FALSE; 264 size_t eth_hdr_size; 265 uint8_t hdrs_buf[sizeof (struct ether_header) + 64 + sizeof (uint32_t)]; 266 267 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: mp $%p", mp)); 268 269 /* 270 * Caller should zero out the headers first. 271 */ 272 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 273 274 if (fill_len) { 275 HXGE_DEBUG_MSG((NULL, TX_CTL, 276 "==> hxge_fill_tx_hdr: pkt_len %d npads %d", 277 pkt_len, npads)); 278 tmp = (uint64_t)pkt_len; 279 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 280 281 goto fill_tx_header_done; 282 } 283 tmp = (uint64_t)npads; 284 hdrp->value |= (tmp << TX_PKT_HEADER_PAD_SHIFT); 285 286 /* 287 * mp is the original data packet (does not include the Neptune 288 * transmit header). 289 */ 290 nmp = mp; 291 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 292 HXGE_DEBUG_MSG((NULL, TX_CTL, 293 "==> hxge_fill_tx_hdr: mp $%p b_rptr $%p len %d", 294 mp, nmp->b_rptr, mblk_len)); 295 ip_buf = NULL; 296 bcopy(nmp->b_rptr, &hdrs_buf[0], sizeof (struct ether_vlan_header)); 297 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 298 HXGE_DEBUG_MSG((NULL, TX_CTL, 299 "==> : hxge_fill_tx_hdr: (value 0x%llx) ether type 0x%x", 300 eth_type, hdrp->value)); 301 302 if (eth_type < ETHERMTU) { 303 tmp = 1ull; 304 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 305 HXGE_DEBUG_MSG((NULL, TX_CTL, 306 "==> hxge_tx_pkt_hdr_init: LLC value 0x%llx", hdrp->value)); 307 if (*(hdrs_buf + sizeof (struct ether_header)) == 308 LLC_SNAP_SAP) { 309 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 310 sizeof (struct ether_header) + 6))); 311 HXGE_DEBUG_MSG((NULL, TX_CTL, 312 "==> hxge_tx_pkt_hdr_init: LLC ether type 0x%x", 313 eth_type)); 314 } else { 315 goto fill_tx_header_done; 316 } 317 } else if (eth_type == VLAN_ETHERTYPE) { 318 tmp = 1ull; 319 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 320 321 eth_type = ntohs(((struct ether_vlan_header *) 322 hdrs_buf)->ether_type); 323 is_vlan = B_TRUE; 324 HXGE_DEBUG_MSG((NULL, TX_CTL, 325 "==> hxge_tx_pkt_hdr_init: VLAN value 0x%llx", 326 hdrp->value)); 327 } 328 if (!is_vlan) { 329 eth_hdr_size = sizeof (struct ether_header); 330 } else { 331 eth_hdr_size = sizeof (struct ether_vlan_header); 332 } 333 334 switch (eth_type) { 335 case ETHERTYPE_IP: 336 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 337 ip_buf = nmp->b_rptr + eth_hdr_size; 338 mblk_len -= eth_hdr_size; 339 iph_len = ((*ip_buf) & 0x0f); 340 if (mblk_len > (iph_len + sizeof (uint32_t))) { 341 ip_buf = nmp->b_rptr; 342 ip_buf += eth_hdr_size; 343 } else { 344 ip_buf = NULL; 345 } 346 } 347 if (ip_buf == NULL) { 348 hdrs_size = 0; 349 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 350 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) { 351 mblk_len = (size_t)nmp->b_wptr - 352 (size_t)nmp->b_rptr; 353 if (mblk_len >= 354 (sizeof (hdrs_buf) - hdrs_size)) 355 mblk_len = sizeof (hdrs_buf) - 356 hdrs_size; 357 bcopy(nmp->b_rptr, 358 &hdrs_buf[hdrs_size], mblk_len); 359 hdrs_size += mblk_len; 360 nmp = nmp->b_cont; 361 } 362 ip_buf = hdrs_buf; 363 ip_buf += eth_hdr_size; 364 iph_len = ((*ip_buf) & 0x0f); 365 } 366 ipproto = ip_buf[9]; 367 368 tmp = (uint64_t)iph_len; 369 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 370 tmp = (uint64_t)(eth_hdr_size >> 1); 371 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 372 373 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv4 " 374 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 375 "tmp 0x%x", iph_len, hdrp->bits.l3start, eth_hdr_size, 376 ipproto, tmp)); 377 HXGE_DEBUG_MSG((NULL, TX_CTL, 378 "==> hxge_tx_pkt_hdr_init: IP value 0x%llx", hdrp->value)); 379 break; 380 381 case ETHERTYPE_IPV6: 382 hdrs_size = 0; 383 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 384 while ((nmp) && (hdrs_size < sizeof (hdrs_buf))) { 385 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 386 if (mblk_len >= (sizeof (hdrs_buf) - hdrs_size)) 387 mblk_len = sizeof (hdrs_buf) - hdrs_size; 388 bcopy(nmp->b_rptr, &hdrs_buf[hdrs_size], mblk_len); 389 hdrs_size += mblk_len; 390 nmp = nmp->b_cont; 391 } 392 ip_buf = hdrs_buf; 393 ip_buf += eth_hdr_size; 394 395 tmp = 1ull; 396 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 397 398 tmp = (eth_hdr_size >> 1); 399 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 400 401 /* byte 6 is the next header protocol */ 402 ipproto = ip_buf[6]; 403 404 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: IPv6 " 405 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 406 iph_len, hdrp->bits.l3start, eth_hdr_size, ipproto)); 407 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_hdr_init: IPv6 " 408 "value 0x%llx", hdrp->value)); 409 break; 410 411 default: 412 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: non-IP")); 413 goto fill_tx_header_done; 414 } 415 416 switch (ipproto) { 417 case IPPROTO_TCP: 418 HXGE_DEBUG_MSG((NULL, TX_CTL, 419 "==> hxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 420 if (l4_cksum) { 421 tmp = 1ull; 422 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 423 HXGE_DEBUG_MSG((NULL, TX_CTL, 424 "==> hxge_tx_pkt_hdr_init: TCP CKSUM" 425 "value 0x%llx", hdrp->value)); 426 } 427 HXGE_DEBUG_MSG((NULL, TX_CTL, 428 "==> hxge_tx_pkt_hdr_init: TCP value 0x%llx", hdrp->value)); 429 break; 430 431 case IPPROTO_UDP: 432 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_fill_tx_hdr: UDP")); 433 if (l4_cksum) { 434 tmp = 0x2ull; 435 hdrp->value |= (tmp << TX_PKT_HEADER_PKT_TYPE_SHIFT); 436 } 437 HXGE_DEBUG_MSG((NULL, TX_CTL, 438 "==> hxge_tx_pkt_hdr_init: UDP value 0x%llx", 439 hdrp->value)); 440 break; 441 442 default: 443 goto fill_tx_header_done; 444 } 445 446 fill_tx_header_done: 447 HXGE_DEBUG_MSG((NULL, TX_CTL, 448 "==> hxge_fill_tx_hdr: pkt_len %d npads %d value 0x%llx", 449 pkt_len, npads, hdrp->value)); 450 HXGE_DEBUG_MSG((NULL, TX_CTL, "<== hxge_fill_tx_hdr")); 451 } 452 453 /*ARGSUSED*/ 454 p_mblk_t 455 hxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 456 { 457 p_mblk_t newmp = NULL; 458 459 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 460 HXGE_DEBUG_MSG((NULL, TX_CTL, 461 "<== hxge_tx_pkt_header_reserve: allocb failed")); 462 return (NULL); 463 } 464 HXGE_DEBUG_MSG((NULL, TX_CTL, 465 "==> hxge_tx_pkt_header_reserve: get new mp")); 466 DB_TYPE(newmp) = M_DATA; 467 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 468 linkb(newmp, mp); 469 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 470 471 HXGE_DEBUG_MSG((NULL, TX_CTL, 472 "==>hxge_tx_pkt_header_reserve: b_rptr $%p b_wptr $%p", 473 newmp->b_rptr, newmp->b_wptr)); 474 HXGE_DEBUG_MSG((NULL, TX_CTL, 475 "<== hxge_tx_pkt_header_reserve: use new mp")); 476 return (newmp); 477 } 478 479 int 480 hxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 481 { 482 uint_t nmblks; 483 ssize_t len; 484 uint_t pkt_len; 485 p_mblk_t nmp, bmp, tmp; 486 uint8_t *b_wptr; 487 488 HXGE_DEBUG_MSG((NULL, TX_CTL, 489 "==> hxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p len %d", 490 mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 491 492 nmp = mp; 493 bmp = mp; 494 nmblks = 0; 495 pkt_len = 0; 496 *tot_xfer_len_p = 0; 497 498 while (nmp) { 499 len = MBLKL(nmp); 500 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: " 501 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 502 len, pkt_len, nmblks, *tot_xfer_len_p)); 503 504 if (len <= 0) { 505 bmp = nmp; 506 nmp = nmp->b_cont; 507 HXGE_DEBUG_MSG((NULL, TX_CTL, 508 "==> hxge_tx_pkt_nmblocks:" 509 " len (0) pkt_len %d nmblks %d", pkt_len, nmblks)); 510 continue; 511 } 512 *tot_xfer_len_p += len; 513 HXGE_DEBUG_MSG((NULL, TX_CTL, "==> hxge_tx_pkt_nmblocks: " 514 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 515 len, pkt_len, nmblks, *tot_xfer_len_p)); 516 517 if (len < hxge_bcopy_thresh) { 518 HXGE_DEBUG_MSG((NULL, TX_CTL, 519 "==> hxge_tx_pkt_nmblocks: " 520 "len %d (< thresh) pkt_len %d nmblks %d", 521 len, pkt_len, nmblks)); 522 if (pkt_len == 0) 523 nmblks++; 524 pkt_len += len; 525 if (pkt_len >= hxge_bcopy_thresh) { 526 pkt_len = 0; 527 len = 0; 528 nmp = bmp; 529 } 530 } else { 531 HXGE_DEBUG_MSG((NULL, TX_CTL, 532 "==> hxge_tx_pkt_nmblocks: " 533 "len %d (> thresh) pkt_len %d nmblks %d", 534 len, pkt_len, nmblks)); 535 pkt_len = 0; 536 nmblks++; 537 /* 538 * Hardware limits the transfer length to 4K. If len is 539 * more than 4K, we need to break it up to at most 2 540 * more blocks. 541 */ 542 if (len > TX_MAX_TRANSFER_LENGTH) { 543 uint32_t nsegs; 544 545 HXGE_DEBUG_MSG((NULL, TX_CTL, 546 "==> hxge_tx_pkt_nmblocks: " 547 "len %d pkt_len %d nmblks %d nsegs %d", 548 len, pkt_len, nmblks, nsegs)); 549 nsegs = 1; 550 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 551 ++nsegs; 552 } 553 do { 554 b_wptr = nmp->b_rptr + 555 TX_MAX_TRANSFER_LENGTH; 556 nmp->b_wptr = b_wptr; 557 if ((tmp = dupb(nmp)) == NULL) { 558 return (0); 559 } 560 tmp->b_rptr = b_wptr; 561 tmp->b_wptr = nmp->b_wptr; 562 tmp->b_cont = nmp->b_cont; 563 nmp->b_cont = tmp; 564 nmblks++; 565 if (--nsegs) { 566 nmp = tmp; 567 } 568 } while (nsegs); 569 nmp = tmp; 570 } 571 } 572 573 /* 574 * Hardware limits the transmit gather pointers to 15. 575 */ 576 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 577 TX_MAX_GATHER_POINTERS) { 578 HXGE_DEBUG_MSG((NULL, TX_CTL, 579 "==> hxge_tx_pkt_nmblocks: pull msg - " 580 "len %d pkt_len %d nmblks %d", 581 len, pkt_len, nmblks)); 582 /* Pull all message blocks from b_cont */ 583 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 584 return (0); 585 } 586 freemsg(nmp->b_cont); 587 nmp->b_cont = tmp; 588 pkt_len = 0; 589 } 590 bmp = nmp; 591 nmp = nmp->b_cont; 592 } 593 594 HXGE_DEBUG_MSG((NULL, TX_CTL, 595 "<== hxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 596 "nmblks %d len %d tot_xfer_len %d", 597 mp->b_rptr, mp->b_wptr, nmblks, MBLKL(mp), *tot_xfer_len_p)); 598 return (nmblks); 599 } 600 601 boolean_t 602 hxge_txdma_reclaim(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, int nmblks) 603 { 604 boolean_t status = B_TRUE; 605 p_hxge_dma_common_t tx_desc_dma_p; 606 hxge_dma_common_t desc_area; 607 p_tx_desc_t tx_desc_ring_vp; 608 p_tx_desc_t tx_desc_p; 609 p_tx_desc_t tx_desc_pp; 610 tx_desc_t r_tx_desc; 611 p_tx_msg_t tx_msg_ring; 612 p_tx_msg_t tx_msg_p; 613 hpi_handle_t handle; 614 tdc_tdr_head_t tx_head; 615 uint32_t pkt_len; 616 uint_t tx_rd_index; 617 uint16_t head_index, tail_index; 618 uint8_t tdc; 619 boolean_t head_wrap, tail_wrap; 620 p_hxge_tx_ring_stats_t tdc_stats; 621 tdc_byte_cnt_t byte_cnt; 622 tdc_tdr_qlen_t qlen; 623 int rc; 624 625 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_reclaim")); 626 627 status = ((tx_ring_p->descs_pending < hxge_reclaim_pending) && 628 (nmblks != 0)); 629 HXGE_DEBUG_MSG((hxgep, TX_CTL, 630 "==> hxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 631 tx_ring_p->descs_pending, hxge_reclaim_pending, nmblks)); 632 633 if (!status) { 634 tx_desc_dma_p = &tx_ring_p->tdc_desc; 635 desc_area = tx_ring_p->tdc_desc; 636 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 637 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 638 tx_rd_index = tx_ring_p->rd_index; 639 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 640 tx_msg_ring = tx_ring_p->tx_msg_ring; 641 tx_msg_p = &tx_msg_ring[tx_rd_index]; 642 tdc = tx_ring_p->tdc; 643 tdc_stats = tx_ring_p->tdc_stats; 644 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 645 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 646 } 647 tail_index = tx_ring_p->wr_index; 648 tail_wrap = tx_ring_p->wr_index_wrap; 649 650 /* 651 * tdc_byte_cnt reg can be used to get bytes transmitted. It 652 * includes padding too in case of runt packets. 653 */ 654 handle = HXGE_DEV_HPI_HANDLE(hxgep); 655 TXDMA_REG_READ64(handle, TDC_BYTE_CNT, tdc, &byte_cnt.value); 656 tdc_stats->obytes_with_pad += byte_cnt.bits.byte_count; 657 658 HXGE_DEBUG_MSG((hxgep, TX_CTL, 659 "==> hxge_txdma_reclaim: tdc %d tx_rd_index %d " 660 "tail_index %d tail_wrap %d tx_desc_p $%p ($%p) ", 661 tdc, tx_rd_index, tail_index, tail_wrap, 662 tx_desc_p, (*(uint64_t *)tx_desc_p))); 663 664 /* 665 * Read the hardware maintained transmit head and wrap around 666 * bit. 667 */ 668 TXDMA_REG_READ64(handle, TDC_TDR_HEAD, tdc, &tx_head.value); 669 head_index = tx_head.bits.head; 670 head_wrap = tx_head.bits.wrap; 671 HXGE_DEBUG_MSG((hxgep, TX_CTL, 672 "==> hxge_txdma_reclaim: " 673 "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d", 674 tx_rd_index, tail_index, tail_wrap, head_index, head_wrap)); 675 676 /* 677 * For debug only. This can be used to verify the qlen and make 678 * sure the hardware is wrapping the Tdr correctly. 679 */ 680 TXDMA_REG_READ64(handle, TDC_TDR_QLEN, tdc, &qlen.value); 681 HXGE_DEBUG_MSG((hxgep, TX_CTL, 682 "==> hxge_txdma_reclaim: tdr_qlen %d tdr_pref_qlen %d", 683 qlen.bits.tdr_qlen, qlen.bits.tdr_pref_qlen)); 684 685 if (head_index == tail_index) { 686 if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, 687 tail_wrap) && (head_index == tx_rd_index)) { 688 HXGE_DEBUG_MSG((hxgep, TX_CTL, 689 "==> hxge_txdma_reclaim: EMPTY")); 690 return (B_TRUE); 691 } 692 HXGE_DEBUG_MSG((hxgep, TX_CTL, 693 "==> hxge_txdma_reclaim: Checking if ring full")); 694 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 695 tail_wrap)) { 696 HXGE_DEBUG_MSG((hxgep, TX_CTL, 697 "==> hxge_txdma_reclaim: full")); 698 return (B_FALSE); 699 } 700 } 701 HXGE_DEBUG_MSG((hxgep, TX_CTL, 702 "==> hxge_txdma_reclaim: tx_rd_index and head_index")); 703 704 /* XXXX: limit the # of reclaims */ 705 tx_desc_pp = &r_tx_desc; 706 while ((tx_rd_index != head_index) && 707 (tx_ring_p->descs_pending != 0)) { 708 HXGE_DEBUG_MSG((hxgep, TX_CTL, 709 "==> hxge_txdma_reclaim: Checking if pending")); 710 HXGE_DEBUG_MSG((hxgep, TX_CTL, 711 "==> hxge_txdma_reclaim: descs_pending %d ", 712 tx_ring_p->descs_pending)); 713 HXGE_DEBUG_MSG((hxgep, TX_CTL, 714 "==> hxge_txdma_reclaim: " 715 "(tx_rd_index %d head_index %d (tx_desc_p $%p)", 716 tx_rd_index, head_index, tx_desc_p)); 717 718 tx_desc_pp->value = tx_desc_p->value; 719 HXGE_DEBUG_MSG((hxgep, TX_CTL, 720 "==> hxge_txdma_reclaim: " 721 "(tx_rd_index %d head_index %d " 722 "tx_desc_p $%p (desc value 0x%llx) ", 723 tx_rd_index, head_index, 724 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 725 HXGE_DEBUG_MSG((hxgep, TX_CTL, 726 "==> hxge_txdma_reclaim: dump desc:")); 727 728 /* 729 * tdc_byte_cnt reg can be used to get bytes 730 * transmitted 731 */ 732 pkt_len = tx_desc_pp->bits.tr_len; 733 tdc_stats->obytes += pkt_len; 734 tdc_stats->opackets += tx_desc_pp->bits.sop; 735 HXGE_DEBUG_MSG((hxgep, TX_CTL, 736 "==> hxge_txdma_reclaim: pkt_len %d " 737 "tdc channel %d opackets %d", 738 pkt_len, tdc, tdc_stats->opackets)); 739 740 if (tx_msg_p->flags.dma_type == USE_DVMA) { 741 HXGE_DEBUG_MSG((hxgep, TX_CTL, 742 "tx_desc_p = $%p tx_desc_pp = $%p " 743 "index = %d", 744 tx_desc_p, tx_desc_pp, 745 tx_ring_p->rd_index)); 746 (void) dvma_unload(tx_msg_p->dvma_handle, 747 0, -1); 748 tx_msg_p->dvma_handle = NULL; 749 if (tx_ring_p->dvma_wr_index == 750 tx_ring_p->dvma_wrap_mask) { 751 tx_ring_p->dvma_wr_index = 0; 752 } else { 753 tx_ring_p->dvma_wr_index++; 754 } 755 tx_ring_p->dvma_pending--; 756 } else if (tx_msg_p->flags.dma_type == USE_DMA) { 757 HXGE_DEBUG_MSG((hxgep, TX_CTL, 758 "==> hxge_txdma_reclaim: USE DMA")); 759 if (rc = ddi_dma_unbind_handle 760 (tx_msg_p->dma_handle)) { 761 cmn_err(CE_WARN, "hxge_reclaim: " 762 "ddi_dma_unbind_handle " 763 "failed. status %d", rc); 764 } 765 } 766 767 HXGE_DEBUG_MSG((hxgep, TX_CTL, 768 "==> hxge_txdma_reclaim: count packets")); 769 770 /* 771 * count a chained packet only once. 772 */ 773 if (tx_msg_p->tx_message != NULL) { 774 freemsg(tx_msg_p->tx_message); 775 tx_msg_p->tx_message = NULL; 776 } 777 tx_msg_p->flags.dma_type = USE_NONE; 778 tx_rd_index = tx_ring_p->rd_index; 779 tx_rd_index = (tx_rd_index + 1) & 780 tx_ring_p->tx_wrap_mask; 781 tx_ring_p->rd_index = tx_rd_index; 782 tx_ring_p->descs_pending--; 783 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 784 tx_msg_p = &tx_msg_ring[tx_rd_index]; 785 } 786 787 status = (nmblks <= (tx_ring_p->tx_ring_size - 788 tx_ring_p->descs_pending - TX_FULL_MARK)); 789 if (status) { 790 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 791 } 792 } else { 793 status = (nmblks <= (tx_ring_p->tx_ring_size - 794 tx_ring_p->descs_pending - TX_FULL_MARK)); 795 } 796 797 HXGE_DEBUG_MSG((hxgep, TX_CTL, 798 "<== hxge_txdma_reclaim status = 0x%08x", status)); 799 return (status); 800 } 801 802 uint_t 803 hxge_tx_intr(caddr_t arg1, caddr_t arg2) 804 { 805 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 806 p_hxge_t hxgep = (p_hxge_t)arg2; 807 p_hxge_ldg_t ldgp; 808 uint8_t channel; 809 uint32_t vindex; 810 hpi_handle_t handle; 811 tdc_stat_t cs; 812 p_tx_ring_t *tx_rings; 813 p_tx_ring_t tx_ring_p; 814 hpi_status_t rs = HPI_SUCCESS; 815 uint_t serviced = DDI_INTR_UNCLAIMED; 816 hxge_status_t status = HXGE_OK; 817 818 if (ldvp == NULL) { 819 HXGE_DEBUG_MSG((NULL, INT_CTL, 820 "<== hxge_tx_intr: hxgep $%p ldvp $%p", hxgep, ldvp)); 821 return (DDI_INTR_UNCLAIMED); 822 } 823 824 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 825 hxgep = ldvp->hxgep; 826 } 827 828 /* 829 * If the interface is not started, just swallow the interrupt 830 * and don't rearm the logical device. 831 */ 832 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) 833 return (DDI_INTR_CLAIMED); 834 835 HXGE_DEBUG_MSG((hxgep, INT_CTL, 836 "==> hxge_tx_intr: hxgep(arg2) $%p ldvp(arg1) $%p", hxgep, ldvp)); 837 838 /* 839 * This interrupt handler is for a specific transmit dma channel. 840 */ 841 handle = HXGE_DEV_HPI_HANDLE(hxgep); 842 843 /* Get the control and status for this channel. */ 844 channel = ldvp->channel; 845 ldgp = ldvp->ldgp; 846 HXGE_DEBUG_MSG((hxgep, INT_CTL, 847 "==> hxge_tx_intr: hxgep $%p ldvp (ldvp) $%p channel %d", 848 hxgep, ldvp, channel)); 849 850 rs = hpi_txdma_control_status(handle, OP_GET, channel, &cs); 851 vindex = ldvp->vdma_index; 852 HXGE_DEBUG_MSG((hxgep, INT_CTL, 853 "==> hxge_tx_intr:channel %d ring index %d status 0x%08x", 854 channel, vindex, rs)); 855 856 if (!rs && cs.bits.marked) { 857 HXGE_DEBUG_MSG((hxgep, INT_CTL, 858 "==> hxge_tx_intr:channel %d ring index %d " 859 "status 0x%08x (marked bit set)", channel, vindex, rs)); 860 tx_rings = hxgep->tx_rings->rings; 861 tx_ring_p = tx_rings[vindex]; 862 HXGE_DEBUG_MSG((hxgep, INT_CTL, 863 "==> hxge_tx_intr:channel %d ring index %d " 864 "status 0x%08x (marked bit set, calling reclaim)", 865 channel, vindex, rs)); 866 867 MUTEX_ENTER(&tx_ring_p->lock); 868 (void) hxge_txdma_reclaim(hxgep, tx_rings[vindex], 0); 869 MUTEX_EXIT(&tx_ring_p->lock); 870 mac_tx_update(hxgep->mach); 871 } 872 873 /* 874 * Process other transmit control and status. Check the ldv state. 875 */ 876 status = hxge_tx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 877 878 /* Clear the error bits */ 879 RXDMA_REG_WRITE64(handle, TDC_STAT, channel, cs.value); 880 881 /* 882 * Rearm this logical group if this is a single device group. 883 */ 884 if (ldgp->nldvs == 1) { 885 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_tx_intr: rearm")); 886 if (status == HXGE_OK) { 887 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, 888 B_TRUE, ldgp->ldg_timer); 889 } 890 } 891 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_tx_intr")); 892 serviced = DDI_INTR_CLAIMED; 893 return (serviced); 894 } 895 896 void 897 hxge_txdma_stop(p_hxge_t hxgep) 898 { 899 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop")); 900 901 (void) hxge_tx_vmac_disable(hxgep); 902 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 903 904 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop")); 905 } 906 907 hxge_status_t 908 hxge_txdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 909 { 910 int i, ndmas; 911 uint16_t channel; 912 p_tx_rings_t tx_rings; 913 p_tx_ring_t *tx_desc_rings; 914 hpi_handle_t handle; 915 hpi_status_t rs = HPI_SUCCESS; 916 hxge_status_t status = HXGE_OK; 917 918 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 919 "==> hxge_txdma_hw_mode: enable mode %d", enable)); 920 921 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 922 HXGE_DEBUG_MSG((hxgep, TX_CTL, 923 "<== hxge_txdma_mode: not initialized")); 924 return (HXGE_ERROR); 925 } 926 tx_rings = hxgep->tx_rings; 927 if (tx_rings == NULL) { 928 HXGE_DEBUG_MSG((hxgep, TX_CTL, 929 "<== hxge_txdma_hw_mode: NULL global ring pointer")); 930 return (HXGE_ERROR); 931 } 932 tx_desc_rings = tx_rings->rings; 933 if (tx_desc_rings == NULL) { 934 HXGE_DEBUG_MSG((hxgep, TX_CTL, 935 "<== hxge_txdma_hw_mode: NULL rings pointer")); 936 return (HXGE_ERROR); 937 } 938 ndmas = tx_rings->ndmas; 939 if (!ndmas) { 940 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 941 "<== hxge_txdma_hw_mode: no dma channel allocated")); 942 return (HXGE_ERROR); 943 } 944 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_mode: " 945 "tx_rings $%p tx_desc_rings $%p ndmas %d", 946 tx_rings, tx_desc_rings, ndmas)); 947 948 handle = HXGE_DEV_HPI_HANDLE(hxgep); 949 for (i = 0; i < ndmas; i++) { 950 if (tx_desc_rings[i] == NULL) { 951 continue; 952 } 953 channel = tx_desc_rings[i]->tdc; 954 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 955 "==> hxge_txdma_hw_mode: channel %d", channel)); 956 if (enable) { 957 rs = hpi_txdma_channel_enable(handle, channel); 958 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 959 "==> hxge_txdma_hw_mode: channel %d (enable) " 960 "rs 0x%x", channel, rs)); 961 } else { 962 /* 963 * Stop the dma channel and waits for the stop done. If 964 * the stop done bit is not set, then force an error so 965 * TXC will stop. All channels bound to this port need 966 * to be stopped and reset after injecting an interrupt 967 * error. 968 */ 969 rs = hpi_txdma_channel_disable(handle, channel); 970 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 971 "==> hxge_txdma_hw_mode: channel %d (disable) " 972 "rs 0x%x", channel, rs)); 973 } 974 } 975 976 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 977 978 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 979 "<== hxge_txdma_hw_mode: status 0x%x", status)); 980 981 return (status); 982 } 983 984 void 985 hxge_txdma_enable_channel(p_hxge_t hxgep, uint16_t channel) 986 { 987 hpi_handle_t handle; 988 989 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 990 "==> hxge_txdma_enable_channel: channel %d", channel)); 991 992 handle = HXGE_DEV_HPI_HANDLE(hxgep); 993 /* enable the transmit dma channels */ 994 (void) hpi_txdma_channel_enable(handle, channel); 995 996 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_txdma_enable_channel")); 997 } 998 999 void 1000 hxge_txdma_disable_channel(p_hxge_t hxgep, uint16_t channel) 1001 { 1002 hpi_handle_t handle; 1003 1004 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 1005 "==> hxge_txdma_disable_channel: channel %d", channel)); 1006 1007 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1008 /* stop the transmit dma channels */ 1009 (void) hpi_txdma_channel_disable(handle, channel); 1010 1011 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_disable_channel")); 1012 } 1013 1014 int 1015 hxge_txdma_stop_inj_err(p_hxge_t hxgep, int channel) 1016 { 1017 hpi_handle_t handle; 1018 int status; 1019 hpi_status_t rs = HPI_SUCCESS; 1020 1021 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_stop_inj_err")); 1022 1023 /* 1024 * Stop the dma channel waits for the stop done. If the stop done bit 1025 * is not set, then create an error. 1026 */ 1027 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1028 rs = hpi_txdma_channel_disable(handle, channel); 1029 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 1030 if (status == HXGE_OK) { 1031 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1032 "<== hxge_txdma_stop_inj_err (channel %d): " 1033 "stopped OK", channel)); 1034 return (status); 1035 } 1036 1037 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1038 "==> hxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1039 " (injected error but still not stopped)", channel, rs)); 1040 1041 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_stop_inj_err")); 1042 1043 return (status); 1044 } 1045 1046 /*ARGSUSED*/ 1047 void 1048 hxge_fixup_txdma_rings(p_hxge_t hxgep) 1049 { 1050 int index, ndmas; 1051 uint16_t channel; 1052 p_tx_rings_t tx_rings; 1053 1054 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_txdma_rings")); 1055 1056 /* 1057 * For each transmit channel, reclaim each descriptor and free buffers. 1058 */ 1059 tx_rings = hxgep->tx_rings; 1060 if (tx_rings == NULL) { 1061 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1062 "<== hxge_fixup_txdma_rings: NULL ring pointer")); 1063 return; 1064 } 1065 1066 ndmas = tx_rings->ndmas; 1067 if (!ndmas) { 1068 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1069 "<== hxge_fixup_txdma_rings: no channel allocated")); 1070 return; 1071 } 1072 1073 if (tx_rings->rings == NULL) { 1074 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1075 "<== hxge_fixup_txdma_rings: NULL rings pointer")); 1076 return; 1077 } 1078 1079 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_fixup_txdma_rings: " 1080 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1081 tx_rings, tx_rings->rings, ndmas)); 1082 1083 for (index = 0; index < ndmas; index++) { 1084 channel = tx_rings->rings[index]->tdc; 1085 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1086 "==> hxge_fixup_txdma_rings: channel %d", channel)); 1087 hxge_txdma_fixup_channel(hxgep, tx_rings->rings[index], 1088 channel); 1089 } 1090 1091 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_txdma_rings")); 1092 } 1093 1094 /*ARGSUSED*/ 1095 void 1096 hxge_txdma_fix_channel(p_hxge_t hxgep, uint16_t channel) 1097 { 1098 p_tx_ring_t ring_p; 1099 1100 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_channel")); 1101 1102 ring_p = hxge_txdma_get_ring(hxgep, channel); 1103 if (ring_p == NULL) { 1104 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel")); 1105 return; 1106 } 1107 1108 if (ring_p->tdc != channel) { 1109 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1110 "<== hxge_txdma_fix_channel: channel not matched " 1111 "ring tdc %d passed channel", ring_p->tdc, channel)); 1112 return; 1113 } 1114 1115 hxge_txdma_fixup_channel(hxgep, ring_p, channel); 1116 1117 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_channel")); 1118 } 1119 1120 /*ARGSUSED*/ 1121 void 1122 hxge_txdma_fixup_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel) 1123 { 1124 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_channel")); 1125 1126 if (ring_p == NULL) { 1127 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1128 "<== hxge_txdma_fixup_channel: NULL ring pointer")); 1129 return; 1130 } 1131 if (ring_p->tdc != channel) { 1132 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1133 "<== hxge_txdma_fixup_channel: channel not matched " 1134 "ring tdc %d passed channel", ring_p->tdc, channel)); 1135 return; 1136 } 1137 MUTEX_ENTER(&ring_p->lock); 1138 (void) hxge_txdma_reclaim(hxgep, ring_p, 0); 1139 1140 ring_p->rd_index = 0; 1141 ring_p->wr_index = 0; 1142 ring_p->ring_head.value = 0; 1143 ring_p->ring_kick_tail.value = 0; 1144 ring_p->descs_pending = 0; 1145 MUTEX_EXIT(&ring_p->lock); 1146 1147 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_channel")); 1148 } 1149 1150 /*ARGSUSED*/ 1151 void 1152 hxge_txdma_hw_kick(p_hxge_t hxgep) 1153 { 1154 int index, ndmas; 1155 uint16_t channel; 1156 p_tx_rings_t tx_rings; 1157 1158 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick")); 1159 1160 tx_rings = hxgep->tx_rings; 1161 if (tx_rings == NULL) { 1162 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1163 "<== hxge_txdma_hw_kick: NULL ring pointer")); 1164 return; 1165 } 1166 ndmas = tx_rings->ndmas; 1167 if (!ndmas) { 1168 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1169 "<== hxge_txdma_hw_kick: no channel allocated")); 1170 return; 1171 } 1172 if (tx_rings->rings == NULL) { 1173 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1174 "<== hxge_txdma_hw_kick: NULL rings pointer")); 1175 return; 1176 } 1177 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_kick: " 1178 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1179 tx_rings, tx_rings->rings, ndmas)); 1180 1181 for (index = 0; index < ndmas; index++) { 1182 channel = tx_rings->rings[index]->tdc; 1183 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1184 "==> hxge_txdma_hw_kick: channel %d", channel)); 1185 hxge_txdma_hw_kick_channel(hxgep, tx_rings->rings[index], 1186 channel); 1187 } 1188 1189 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick")); 1190 } 1191 1192 /*ARGSUSED*/ 1193 void 1194 hxge_txdma_kick_channel(p_hxge_t hxgep, uint16_t channel) 1195 { 1196 p_tx_ring_t ring_p; 1197 1198 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_kick_channel")); 1199 1200 ring_p = hxge_txdma_get_ring(hxgep, channel); 1201 if (ring_p == NULL) { 1202 HXGE_DEBUG_MSG((hxgep, TX_CTL, " hxge_txdma_kick_channel")); 1203 return; 1204 } 1205 1206 if (ring_p->tdc != channel) { 1207 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1208 "<== hxge_txdma_kick_channel: channel not matched " 1209 "ring tdc %d passed channel", ring_p->tdc, channel)); 1210 return; 1211 } 1212 1213 hxge_txdma_hw_kick_channel(hxgep, ring_p, channel); 1214 1215 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_kick_channel")); 1216 } 1217 1218 /*ARGSUSED*/ 1219 void 1220 hxge_txdma_hw_kick_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, uint16_t channel) 1221 { 1222 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hw_kick_channel")); 1223 1224 if (ring_p == NULL) { 1225 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1226 "<== hxge_txdma_hw_kick_channel: NULL ring pointer")); 1227 return; 1228 } 1229 1230 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hw_kick_channel")); 1231 } 1232 1233 /*ARGSUSED*/ 1234 void 1235 hxge_check_tx_hang(p_hxge_t hxgep) 1236 { 1237 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_check_tx_hang")); 1238 1239 /* 1240 * Needs inputs from hardware for regs: head index had not moved since 1241 * last timeout. packets not transmitted or stuffed registers. 1242 */ 1243 if (hxge_txdma_hung(hxgep)) { 1244 hxge_fixup_hung_txdma_rings(hxgep); 1245 } 1246 1247 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_check_tx_hang")); 1248 } 1249 1250 int 1251 hxge_txdma_hung(p_hxge_t hxgep) 1252 { 1253 int index, ndmas; 1254 uint16_t channel; 1255 p_tx_rings_t tx_rings; 1256 p_tx_ring_t tx_ring_p; 1257 1258 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_hung")); 1259 1260 tx_rings = hxgep->tx_rings; 1261 if (tx_rings == NULL) { 1262 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1263 "<== hxge_txdma_hung: NULL ring pointer")); 1264 return (B_FALSE); 1265 } 1266 1267 ndmas = tx_rings->ndmas; 1268 if (!ndmas) { 1269 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1270 "<== hxge_txdma_hung: no channel allocated")); 1271 return (B_FALSE); 1272 } 1273 1274 if (tx_rings->rings == NULL) { 1275 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1276 "<== hxge_txdma_hung: NULL rings pointer")); 1277 return (B_FALSE); 1278 } 1279 1280 for (index = 0; index < ndmas; index++) { 1281 channel = tx_rings->rings[index]->tdc; 1282 tx_ring_p = tx_rings->rings[index]; 1283 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1284 "==> hxge_txdma_hung: channel %d", channel)); 1285 if (hxge_txdma_channel_hung(hxgep, tx_ring_p, channel)) { 1286 return (B_TRUE); 1287 } 1288 } 1289 1290 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_hung")); 1291 1292 return (B_FALSE); 1293 } 1294 1295 int 1296 hxge_txdma_channel_hung(p_hxge_t hxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1297 { 1298 uint16_t head_index, tail_index; 1299 boolean_t head_wrap, tail_wrap; 1300 hpi_handle_t handle; 1301 tdc_tdr_head_t tx_head; 1302 uint_t tx_rd_index; 1303 1304 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung")); 1305 1306 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1307 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1308 "==> hxge_txdma_channel_hung: channel %d", channel)); 1309 MUTEX_ENTER(&tx_ring_p->lock); 1310 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 1311 1312 tail_index = tx_ring_p->wr_index; 1313 tail_wrap = tx_ring_p->wr_index_wrap; 1314 tx_rd_index = tx_ring_p->rd_index; 1315 MUTEX_EXIT(&tx_ring_p->lock); 1316 1317 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1318 "==> hxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1319 "tail_index %d tail_wrap %d ", 1320 channel, tx_rd_index, tail_index, tail_wrap)); 1321 /* 1322 * Read the hardware maintained transmit head and wrap around bit. 1323 */ 1324 (void) hpi_txdma_ring_head_get(handle, channel, &tx_head); 1325 head_index = tx_head.bits.head; 1326 head_wrap = tx_head.bits.wrap; 1327 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_channel_hung: " 1328 "tx_rd_index %d tail %d tail_wrap %d head %d wrap %d", 1329 tx_rd_index, tail_index, tail_wrap, head_index, head_wrap)); 1330 1331 if (TXDMA_RING_EMPTY(head_index, head_wrap, tail_index, tail_wrap) && 1332 (head_index == tx_rd_index)) { 1333 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1334 "==> hxge_txdma_channel_hung: EMPTY")); 1335 return (B_FALSE); 1336 } 1337 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1338 "==> hxge_txdma_channel_hung: Checking if ring full")); 1339 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, tail_wrap)) { 1340 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1341 "==> hxge_txdma_channel_hung: full")); 1342 return (B_TRUE); 1343 } 1344 1345 /* If not full, check with hardware to see if it is hung */ 1346 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_channel_hung")); 1347 1348 return (B_FALSE); 1349 } 1350 1351 /*ARGSUSED*/ 1352 void 1353 hxge_fixup_hung_txdma_rings(p_hxge_t hxgep) 1354 { 1355 int index, ndmas; 1356 uint16_t channel; 1357 p_tx_rings_t tx_rings; 1358 1359 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings")); 1360 tx_rings = hxgep->tx_rings; 1361 if (tx_rings == NULL) { 1362 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1363 "<== hxge_fixup_hung_txdma_rings: NULL ring pointer")); 1364 return; 1365 } 1366 ndmas = tx_rings->ndmas; 1367 if (!ndmas) { 1368 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1369 "<== hxge_fixup_hung_txdma_rings: no channel allocated")); 1370 return; 1371 } 1372 if (tx_rings->rings == NULL) { 1373 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1374 "<== hxge_fixup_hung_txdma_rings: NULL rings pointer")); 1375 return; 1376 } 1377 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_fixup_hung_txdma_rings: " 1378 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1379 tx_rings, tx_rings->rings, ndmas)); 1380 1381 for (index = 0; index < ndmas; index++) { 1382 channel = tx_rings->rings[index]->tdc; 1383 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1384 "==> hxge_fixup_hung_txdma_rings: channel %d", channel)); 1385 hxge_txdma_fixup_hung_channel(hxgep, tx_rings->rings[index], 1386 channel); 1387 } 1388 1389 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_fixup_hung_txdma_rings")); 1390 } 1391 1392 /*ARGSUSED*/ 1393 void 1394 hxge_txdma_fix_hung_channel(p_hxge_t hxgep, uint16_t channel) 1395 { 1396 p_tx_ring_t ring_p; 1397 1398 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fix_hung_channel")); 1399 ring_p = hxge_txdma_get_ring(hxgep, channel); 1400 if (ring_p == NULL) { 1401 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1402 "<== hxge_txdma_fix_hung_channel")); 1403 return; 1404 } 1405 if (ring_p->tdc != channel) { 1406 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1407 "<== hxge_txdma_fix_hung_channel: channel not matched " 1408 "ring tdc %d passed channel", ring_p->tdc, channel)); 1409 return; 1410 } 1411 hxge_txdma_fixup_channel(hxgep, ring_p, channel); 1412 1413 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fix_hung_channel")); 1414 } 1415 1416 /*ARGSUSED*/ 1417 void 1418 hxge_txdma_fixup_hung_channel(p_hxge_t hxgep, p_tx_ring_t ring_p, 1419 uint16_t channel) 1420 { 1421 hpi_handle_t handle; 1422 int status = HXGE_OK; 1423 1424 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_fixup_hung_channel")); 1425 1426 if (ring_p == NULL) { 1427 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1428 "<== hxge_txdma_fixup_hung_channel: NULL ring pointer")); 1429 return; 1430 } 1431 if (ring_p->tdc != channel) { 1432 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1433 "<== hxge_txdma_fixup_hung_channel: channel " 1434 "not matched ring tdc %d passed channel", 1435 ring_p->tdc, channel)); 1436 return; 1437 } 1438 /* Reclaim descriptors */ 1439 MUTEX_ENTER(&ring_p->lock); 1440 (void) hxge_txdma_reclaim(hxgep, ring_p, 0); 1441 MUTEX_EXIT(&ring_p->lock); 1442 1443 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1444 /* 1445 * Stop the dma channel waits for the stop done. If the stop done bit 1446 * is not set, then force an error. 1447 */ 1448 status = hpi_txdma_channel_disable(handle, channel); 1449 if (!(status & HPI_TXDMA_STOP_FAILED)) { 1450 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1451 "<== hxge_txdma_fixup_hung_channel: stopped OK " 1452 "ring tdc %d passed channel %d", ring_p->tdc, channel)); 1453 return; 1454 } 1455 /* Stop done bit will be set as a result of error injection */ 1456 status = hpi_txdma_channel_disable(handle, channel); 1457 if (!(status & HPI_TXDMA_STOP_FAILED)) { 1458 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1459 "<== hxge_txdma_fixup_hung_channel: stopped again" 1460 "ring tdc %d passed channel", ring_p->tdc, channel)); 1461 return; 1462 } 1463 1464 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1465 "<== hxge_txdma_fixup_hung_channel: stop done still not set!! " 1466 "ring tdc %d passed channel", ring_p->tdc, channel)); 1467 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_fixup_hung_channel")); 1468 } 1469 1470 /*ARGSUSED*/ 1471 void 1472 hxge_reclaim_rings(p_hxge_t hxgep) 1473 { 1474 int index, ndmas; 1475 uint16_t channel; 1476 p_tx_rings_t tx_rings; 1477 p_tx_ring_t tx_ring_p; 1478 1479 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclaim_ring")); 1480 tx_rings = hxgep->tx_rings; 1481 if (tx_rings == NULL) { 1482 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1483 "<== hxge_reclain_rimgs: NULL ring pointer")); 1484 return; 1485 } 1486 ndmas = tx_rings->ndmas; 1487 if (!ndmas) { 1488 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1489 "<== hxge_reclain_rimgs: no channel allocated")); 1490 return; 1491 } 1492 if (tx_rings->rings == NULL) { 1493 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1494 "<== hxge_reclain_rimgs: NULL rings pointer")); 1495 return; 1496 } 1497 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_reclain_rimgs: " 1498 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1499 tx_rings, tx_rings->rings, ndmas)); 1500 1501 for (index = 0; index < ndmas; index++) { 1502 channel = tx_rings->rings[index]->tdc; 1503 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> reclain_rimgs: channel %d", 1504 channel)); 1505 tx_ring_p = tx_rings->rings[index]; 1506 MUTEX_ENTER(&tx_ring_p->lock); 1507 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, channel); 1508 MUTEX_EXIT(&tx_ring_p->lock); 1509 } 1510 1511 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_reclaim_rings")); 1512 } 1513 1514 /* 1515 * Static functions start here. 1516 */ 1517 static hxge_status_t 1518 hxge_map_txdma(p_hxge_t hxgep) 1519 { 1520 int i, ndmas; 1521 uint16_t channel; 1522 p_tx_rings_t tx_rings; 1523 p_tx_ring_t *tx_desc_rings; 1524 p_tx_mbox_areas_t tx_mbox_areas_p; 1525 p_tx_mbox_t *tx_mbox_p; 1526 p_hxge_dma_pool_t dma_buf_poolp; 1527 p_hxge_dma_pool_t dma_cntl_poolp; 1528 p_hxge_dma_common_t *dma_buf_p; 1529 p_hxge_dma_common_t *dma_cntl_p; 1530 hxge_status_t status = HXGE_OK; 1531 1532 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma")); 1533 1534 dma_buf_poolp = hxgep->tx_buf_pool_p; 1535 dma_cntl_poolp = hxgep->tx_cntl_pool_p; 1536 1537 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1538 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1539 "==> hxge_map_txdma: buf not allocated")); 1540 return (HXGE_ERROR); 1541 } 1542 ndmas = dma_buf_poolp->ndmas; 1543 if (!ndmas) { 1544 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1545 "<== hxge_map_txdma: no dma allocated")); 1546 return (HXGE_ERROR); 1547 } 1548 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 1549 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 1550 1551 tx_rings = (p_tx_rings_t)KMEM_ZALLOC(sizeof (tx_rings_t), KM_SLEEP); 1552 tx_desc_rings = (p_tx_ring_t *)KMEM_ZALLOC( 1553 sizeof (p_tx_ring_t) * ndmas, KM_SLEEP); 1554 1555 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1556 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 1557 1558 tx_mbox_areas_p = (p_tx_mbox_areas_t) 1559 KMEM_ZALLOC(sizeof (tx_mbox_areas_t), KM_SLEEP); 1560 tx_mbox_p = (p_tx_mbox_t *)KMEM_ZALLOC( 1561 sizeof (p_tx_mbox_t) * ndmas, KM_SLEEP); 1562 1563 /* 1564 * Map descriptors from the buffer pools for each dma channel. 1565 */ 1566 for (i = 0; i < ndmas; i++) { 1567 /* 1568 * Set up and prepare buffer blocks, descriptors and mailbox. 1569 */ 1570 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 1571 status = hxge_map_txdma_channel(hxgep, channel, 1572 (p_hxge_dma_common_t *)&dma_buf_p[i], 1573 (p_tx_ring_t *)&tx_desc_rings[i], 1574 dma_buf_poolp->num_chunks[i], 1575 (p_hxge_dma_common_t *)&dma_cntl_p[i], 1576 (p_tx_mbox_t *)&tx_mbox_p[i]); 1577 if (status != HXGE_OK) { 1578 goto hxge_map_txdma_fail1; 1579 } 1580 tx_desc_rings[i]->index = (uint16_t)i; 1581 tx_desc_rings[i]->tdc_stats = &hxgep->statsp->tdc_stats[i]; 1582 } 1583 1584 tx_rings->ndmas = ndmas; 1585 tx_rings->rings = tx_desc_rings; 1586 hxgep->tx_rings = tx_rings; 1587 tx_mbox_areas_p->txmbox_areas_p = tx_mbox_p; 1588 hxgep->tx_mbox_areas_p = tx_mbox_areas_p; 1589 1590 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1591 "tx_rings $%p rings $%p", hxgep->tx_rings, hxgep->tx_rings->rings)); 1592 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma: " 1593 "tx_rings $%p tx_desc_rings $%p", 1594 hxgep->tx_rings, tx_desc_rings)); 1595 1596 goto hxge_map_txdma_exit; 1597 1598 hxge_map_txdma_fail1: 1599 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1600 "==> hxge_map_txdma: uninit tx desc " 1601 "(status 0x%x channel %d i %d)", hxgep, status, channel, i)); 1602 i--; 1603 for (; i >= 0; i--) { 1604 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 1605 hxge_unmap_txdma_channel(hxgep, channel, tx_desc_rings[i], 1606 tx_mbox_p[i]); 1607 } 1608 1609 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1610 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1611 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1612 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1613 1614 hxge_map_txdma_exit: 1615 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1616 "==> hxge_map_txdma: (status 0x%x channel %d)", status, channel)); 1617 1618 return (status); 1619 } 1620 1621 static void 1622 hxge_unmap_txdma(p_hxge_t hxgep) 1623 { 1624 int i, ndmas; 1625 uint8_t channel; 1626 p_tx_rings_t tx_rings; 1627 p_tx_ring_t *tx_desc_rings; 1628 p_tx_mbox_areas_t tx_mbox_areas_p; 1629 p_tx_mbox_t *tx_mbox_p; 1630 p_hxge_dma_pool_t dma_buf_poolp; 1631 1632 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma")); 1633 1634 dma_buf_poolp = hxgep->tx_buf_pool_p; 1635 if (!dma_buf_poolp->buf_allocated) { 1636 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1637 "==> hxge_unmap_txdma: buf not allocated")); 1638 return; 1639 } 1640 ndmas = dma_buf_poolp->ndmas; 1641 if (!ndmas) { 1642 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1643 "<== hxge_unmap_txdma: no dma allocated")); 1644 return; 1645 } 1646 tx_rings = hxgep->tx_rings; 1647 tx_desc_rings = tx_rings->rings; 1648 if (tx_rings == NULL) { 1649 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1650 "<== hxge_unmap_txdma: NULL ring pointer")); 1651 return; 1652 } 1653 tx_desc_rings = tx_rings->rings; 1654 if (tx_desc_rings == NULL) { 1655 HXGE_DEBUG_MSG((hxgep, TX_CTL, 1656 "<== hxge_unmap_txdma: NULL ring pointers")); 1657 return; 1658 } 1659 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_unmap_txdma: " 1660 "tx_rings $%p tx_desc_rings $%p ndmas %d", 1661 tx_rings, tx_desc_rings, ndmas)); 1662 1663 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 1664 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 1665 1666 for (i = 0; i < ndmas; i++) { 1667 channel = tx_desc_rings[i]->tdc; 1668 (void) hxge_unmap_txdma_channel(hxgep, channel, 1669 (p_tx_ring_t)tx_desc_rings[i], 1670 (p_tx_mbox_t)tx_mbox_p[i]); 1671 } 1672 1673 KMEM_FREE(tx_desc_rings, sizeof (p_tx_ring_t) * ndmas); 1674 KMEM_FREE(tx_rings, sizeof (tx_rings_t)); 1675 KMEM_FREE(tx_mbox_p, sizeof (p_tx_mbox_t) * ndmas); 1676 KMEM_FREE(tx_mbox_areas_p, sizeof (tx_mbox_areas_t)); 1677 1678 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma")); 1679 } 1680 1681 static hxge_status_t 1682 hxge_map_txdma_channel(p_hxge_t hxgep, uint16_t channel, 1683 p_hxge_dma_common_t *dma_buf_p, p_tx_ring_t *tx_desc_p, 1684 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p, 1685 p_tx_mbox_t *tx_mbox_p) 1686 { 1687 int status = HXGE_OK; 1688 1689 /* 1690 * Set up and prepare buffer blocks, descriptors and mailbox. 1691 */ 1692 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1693 "==> hxge_map_txdma_channel (channel %d)", channel)); 1694 1695 /* 1696 * Transmit buffer blocks 1697 */ 1698 status = hxge_map_txdma_channel_buf_ring(hxgep, channel, 1699 dma_buf_p, tx_desc_p, num_chunks); 1700 if (status != HXGE_OK) { 1701 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1702 "==> hxge_map_txdma_channel (channel %d): " 1703 "map buffer failed 0x%x", channel, status)); 1704 goto hxge_map_txdma_channel_exit; 1705 } 1706 /* 1707 * Transmit block ring, and mailbox. 1708 */ 1709 hxge_map_txdma_channel_cfg_ring(hxgep, channel, dma_cntl_p, *tx_desc_p, 1710 tx_mbox_p); 1711 1712 goto hxge_map_txdma_channel_exit; 1713 1714 hxge_map_txdma_channel_fail1: 1715 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1716 "==> hxge_map_txdma_channel: unmap buf" 1717 "(status 0x%x channel %d)", status, channel)); 1718 hxge_unmap_txdma_channel_buf_ring(hxgep, *tx_desc_p); 1719 1720 hxge_map_txdma_channel_exit: 1721 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1722 "<== hxge_map_txdma_channel: (status 0x%x channel %d)", 1723 status, channel)); 1724 1725 return (status); 1726 } 1727 1728 /*ARGSUSED*/ 1729 static void 1730 hxge_unmap_txdma_channel(p_hxge_t hxgep, uint16_t channel, 1731 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 1732 { 1733 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1734 "==> hxge_unmap_txdma_channel (channel %d)", channel)); 1735 1736 /* unmap tx block ring, and mailbox. */ 1737 (void) hxge_unmap_txdma_channel_cfg_ring(hxgep, tx_ring_p, tx_mbox_p); 1738 1739 /* unmap buffer blocks */ 1740 (void) hxge_unmap_txdma_channel_buf_ring(hxgep, tx_ring_p); 1741 1742 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_unmap_txdma_channel")); 1743 } 1744 1745 /*ARGSUSED*/ 1746 static void 1747 hxge_map_txdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 1748 p_hxge_dma_common_t *dma_cntl_p, p_tx_ring_t tx_ring_p, 1749 p_tx_mbox_t *tx_mbox_p) 1750 { 1751 p_tx_mbox_t mboxp; 1752 p_hxge_dma_common_t cntl_dmap; 1753 p_hxge_dma_common_t dmap; 1754 tdc_tdr_cfg_t *tx_ring_cfig_p; 1755 tdc_tdr_kick_t *tx_ring_kick_p; 1756 tdc_tdr_cfg_t *tx_cs_p; 1757 tdc_int_mask_t *tx_evmask_p; 1758 tdc_mbh_t *mboxh_p; 1759 tdc_mbl_t *mboxl_p; 1760 uint64_t tx_desc_len; 1761 1762 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1763 "==> hxge_map_txdma_channel_cfg_ring")); 1764 1765 cntl_dmap = *dma_cntl_p; 1766 1767 dmap = (p_hxge_dma_common_t)&tx_ring_p->tdc_desc; 1768 hxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 1769 sizeof (tx_desc_t)); 1770 1771 /* 1772 * Zero out transmit ring descriptors. 1773 */ 1774 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1775 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 1776 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 1777 tx_cs_p = &(tx_ring_p->tx_cs); 1778 tx_evmask_p = &(tx_ring_p->tx_evmask); 1779 tx_ring_cfig_p->value = 0; 1780 tx_ring_kick_p->value = 0; 1781 tx_cs_p->value = 0; 1782 tx_evmask_p->value = 0; 1783 1784 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1785 "==> hxge_map_txdma_channel_cfg_ring: channel %d des $%p", 1786 dma_channel, dmap->dma_cookie.dmac_laddress)); 1787 1788 tx_ring_cfig_p->value = 0; 1789 1790 /* Hydra len is 11 bits and the lower 5 bits are 0s */ 1791 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 5); 1792 tx_ring_cfig_p->value = 1793 (dmap->dma_cookie.dmac_laddress & TDC_TDR_CFG_ADDR_MASK) | 1794 (tx_desc_len << TDC_TDR_CFG_LEN_SHIFT); 1795 1796 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1797 "==> hxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 1798 dma_channel, tx_ring_cfig_p->value)); 1799 1800 tx_cs_p->bits.reset = 1; 1801 1802 /* Map in mailbox */ 1803 mboxp = (p_tx_mbox_t)KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 1804 dmap = (p_hxge_dma_common_t)&mboxp->tx_mbox; 1805 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 1806 mboxh_p = (tdc_mbh_t *)&tx_ring_p->tx_mbox_mbh; 1807 mboxl_p = (tdc_mbl_t *)&tx_ring_p->tx_mbox_mbl; 1808 mboxh_p->value = mboxl_p->value = 0; 1809 1810 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1811 "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 1812 dmap->dma_cookie.dmac_laddress)); 1813 1814 mboxh_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 1815 TDC_MBH_ADDR_SHIFT) & TDC_MBH_MASK); 1816 mboxl_p->bits.mbaddr = ((dmap->dma_cookie.dmac_laddress & 1817 TDC_MBL_MASK) >> TDC_MBL_SHIFT); 1818 1819 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1820 "==> hxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 1821 dmap->dma_cookie.dmac_laddress)); 1822 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1823 "==> hxge_map_txdma_channel_cfg_ring: hmbox $%p mbox $%p", 1824 mboxh_p->bits.mbaddr, mboxl_p->bits.mbaddr)); 1825 1826 /* 1827 * Set page valid and no mask 1828 */ 1829 tx_ring_p->page_hdl.value = 0; 1830 1831 *tx_mbox_p = mboxp; 1832 1833 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1834 "<== hxge_map_txdma_channel_cfg_ring")); 1835 } 1836 1837 /*ARGSUSED*/ 1838 static void 1839 hxge_unmap_txdma_channel_cfg_ring(p_hxge_t hxgep, 1840 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 1841 { 1842 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1843 "==> hxge_unmap_txdma_channel_cfg_ring: channel %d", 1844 tx_ring_p->tdc)); 1845 1846 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 1847 1848 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1849 "<== hxge_unmap_txdma_channel_cfg_ring")); 1850 } 1851 1852 static hxge_status_t 1853 hxge_map_txdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 1854 p_hxge_dma_common_t *dma_buf_p, 1855 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 1856 { 1857 p_hxge_dma_common_t dma_bufp, tmp_bufp; 1858 p_hxge_dma_common_t dmap; 1859 hxge_os_dma_handle_t tx_buf_dma_handle; 1860 p_tx_ring_t tx_ring_p; 1861 p_tx_msg_t tx_msg_ring; 1862 hxge_status_t status = HXGE_OK; 1863 int ddi_status = DDI_SUCCESS; 1864 int i, j, index; 1865 uint32_t size, bsize; 1866 uint32_t nblocks, nmsgs; 1867 1868 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1869 "==> hxge_map_txdma_channel_buf_ring")); 1870 1871 dma_bufp = tmp_bufp = *dma_buf_p; 1872 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1873 " hxge_map_txdma_channel_buf_ring: channel %d to map %d " 1874 "chunks bufp $%p", channel, num_chunks, dma_bufp)); 1875 1876 nmsgs = 0; 1877 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 1878 nmsgs += tmp_bufp->nblocks; 1879 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1880 "==> hxge_map_txdma_channel_buf_ring: channel %d " 1881 "bufp $%p nblocks %d nmsgs %d", 1882 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 1883 } 1884 if (!nmsgs) { 1885 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1886 "<== hxge_map_txdma_channel_buf_ring: channel %d " 1887 "no msg blocks", channel)); 1888 status = HXGE_ERROR; 1889 1890 goto hxge_map_txdma_channel_buf_ring_exit; 1891 } 1892 tx_ring_p = (p_tx_ring_t)KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 1893 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 1894 (void *) hxgep->interrupt_cookie); 1895 /* 1896 * Allocate transmit message rings and handles for packets not to be 1897 * copied to premapped buffers. 1898 */ 1899 size = nmsgs * sizeof (tx_msg_t); 1900 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 1901 for (i = 0; i < nmsgs; i++) { 1902 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr, 1903 DDI_DMA_DONTWAIT, 0, &tx_msg_ring[i].dma_handle); 1904 if (ddi_status != DDI_SUCCESS) { 1905 status |= HXGE_DDI_FAILED; 1906 break; 1907 } 1908 } 1909 1910 if (i < nmsgs) { 1911 HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, 1912 "Allocate handles failed.")); 1913 1914 goto hxge_map_txdma_channel_buf_ring_fail1; 1915 } 1916 tx_ring_p->tdc = channel; 1917 tx_ring_p->tx_msg_ring = tx_msg_ring; 1918 tx_ring_p->tx_ring_size = nmsgs; 1919 tx_ring_p->num_chunks = num_chunks; 1920 if (!hxge_tx_intr_thres) { 1921 hxge_tx_intr_thres = tx_ring_p->tx_ring_size / 4; 1922 } 1923 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 1924 tx_ring_p->rd_index = 0; 1925 tx_ring_p->wr_index = 0; 1926 tx_ring_p->ring_head.value = 0; 1927 tx_ring_p->ring_kick_tail.value = 0; 1928 tx_ring_p->descs_pending = 0; 1929 1930 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1931 "==> hxge_map_txdma_channel_buf_ring: channel %d " 1932 "actual tx desc max %d nmsgs %d (config hxge_tx_ring_size %d)", 1933 channel, tx_ring_p->tx_ring_size, nmsgs, hxge_tx_ring_size)); 1934 1935 /* 1936 * Map in buffers from the buffer pool. 1937 */ 1938 index = 0; 1939 bsize = dma_bufp->block_size; 1940 1941 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_map_txdma_channel_buf_ring: " 1942 "dma_bufp $%p tx_rng_p $%p tx_msg_rng_p $%p bsize %d", 1943 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 1944 1945 tx_buf_dma_handle = dma_bufp->dma_handle; 1946 for (i = 0; i < num_chunks; i++, dma_bufp++) { 1947 bsize = dma_bufp->block_size; 1948 nblocks = dma_bufp->nblocks; 1949 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1950 "==> hxge_map_txdma_channel_buf_ring: dma chunk %d " 1951 "size %d dma_bufp $%p", 1952 i, sizeof (hxge_dma_common_t), dma_bufp)); 1953 1954 for (j = 0; j < nblocks; j++) { 1955 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 1956 dmap = &tx_msg_ring[index++].buf_dma; 1957 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1958 "==> hxge_map_txdma_channel_buf_ring: j %d" 1959 "dmap $%p", i, dmap)); 1960 hxge_setup_dma_common(dmap, dma_bufp, 1, bsize); 1961 } 1962 } 1963 1964 if (i < num_chunks) { 1965 status = HXGE_ERROR; 1966 1967 goto hxge_map_txdma_channel_buf_ring_fail1; 1968 } 1969 1970 *tx_desc_p = tx_ring_p; 1971 1972 goto hxge_map_txdma_channel_buf_ring_exit; 1973 1974 hxge_map_txdma_channel_buf_ring_fail1: 1975 index--; 1976 for (; index >= 0; index--) { 1977 if (tx_msg_ring[index].dma_handle != NULL) { 1978 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 1979 } 1980 } 1981 MUTEX_DESTROY(&tx_ring_p->lock); 1982 KMEM_FREE(tx_msg_ring, size); 1983 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 1984 1985 status = HXGE_ERROR; 1986 1987 hxge_map_txdma_channel_buf_ring_exit: 1988 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 1989 "<== hxge_map_txdma_channel_buf_ring status 0x%x", status)); 1990 1991 return (status); 1992 } 1993 1994 /*ARGSUSED*/ 1995 static void 1996 hxge_unmap_txdma_channel_buf_ring(p_hxge_t hxgep, p_tx_ring_t tx_ring_p) 1997 { 1998 p_tx_msg_t tx_msg_ring; 1999 p_tx_msg_t tx_msg_p; 2000 int i; 2001 2002 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2003 "==> hxge_unmap_txdma_channel_buf_ring")); 2004 if (tx_ring_p == NULL) { 2005 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2006 "<== hxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2007 return; 2008 } 2009 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2010 "==> hxge_unmap_txdma_channel_buf_ring: channel %d", 2011 tx_ring_p->tdc)); 2012 2013 tx_msg_ring = tx_ring_p->tx_msg_ring; 2014 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2015 tx_msg_p = &tx_msg_ring[i]; 2016 if (tx_msg_p->flags.dma_type == USE_DVMA) { 2017 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "entry = %d", i)); 2018 (void) dvma_unload(tx_msg_p->dvma_handle, 0, -1); 2019 tx_msg_p->dvma_handle = NULL; 2020 if (tx_ring_p->dvma_wr_index == 2021 tx_ring_p->dvma_wrap_mask) { 2022 tx_ring_p->dvma_wr_index = 0; 2023 } else { 2024 tx_ring_p->dvma_wr_index++; 2025 } 2026 tx_ring_p->dvma_pending--; 2027 } else if (tx_msg_p->flags.dma_type == USE_DMA) { 2028 if (ddi_dma_unbind_handle(tx_msg_p->dma_handle)) { 2029 cmn_err(CE_WARN, "hxge_unmap_tx_bug_ring: " 2030 "ddi_dma_unbind_handle failed."); 2031 } 2032 } 2033 if (tx_msg_p->tx_message != NULL) { 2034 freemsg(tx_msg_p->tx_message); 2035 tx_msg_p->tx_message = NULL; 2036 } 2037 } 2038 2039 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2040 if (tx_msg_ring[i].dma_handle != NULL) { 2041 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2042 } 2043 } 2044 2045 MUTEX_DESTROY(&tx_ring_p->lock); 2046 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2047 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2048 2049 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2050 "<== hxge_unmap_txdma_channel_buf_ring")); 2051 } 2052 2053 static hxge_status_t 2054 hxge_txdma_hw_start(p_hxge_t hxgep) 2055 { 2056 int i, ndmas; 2057 uint16_t channel; 2058 p_tx_rings_t tx_rings; 2059 p_tx_ring_t *tx_desc_rings; 2060 p_tx_mbox_areas_t tx_mbox_areas_p; 2061 p_tx_mbox_t *tx_mbox_p; 2062 hxge_status_t status = HXGE_OK; 2063 uint64_t tmp; 2064 2065 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start")); 2066 2067 /* 2068 * Initialize REORD Table 1. Disable VMAC 2. Reset the FIFO Err Stat. 2069 * 3. Scrub memory and check for errors. 2070 */ 2071 (void) hxge_tx_vmac_disable(hxgep); 2072 2073 /* 2074 * Clear the error status 2075 */ 2076 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7); 2077 2078 /* 2079 * Scrub the rtab memory for the TDC and reset the TDC. 2080 */ 2081 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, 0x0ULL); 2082 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, 0x0ULL); 2083 2084 for (i = 0; i < 256; i++) { 2085 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2086 (uint64_t)i); 2087 2088 /* 2089 * Write the command register with an indirect read instruction 2090 */ 2091 tmp = (0x1ULL << 30) | i; 2092 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp); 2093 2094 /* 2095 * Wait for status done 2096 */ 2097 tmp = 0; 2098 do { 2099 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2100 &tmp); 2101 } while (((tmp >> 31) & 0x1ULL) == 0x0); 2102 } 2103 2104 for (i = 0; i < 256; i++) { 2105 /* 2106 * Write the command register with an indirect read instruction 2107 */ 2108 tmp = (0x1ULL << 30) | i; 2109 HXGE_REG_WR64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, tmp); 2110 2111 /* 2112 * Wait for status done 2113 */ 2114 tmp = 0; 2115 do { 2116 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_CMD, 2117 &tmp); 2118 } while (((tmp >> 31) & 0x1ULL) == 0x0); 2119 2120 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_HI, &tmp); 2121 if (0x1ff00ULL != (0x1ffffULL & tmp)) { 2122 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2123 "unexpected data (hi), entry: %x, value: 0x%0llx\n", 2124 i, (unsigned long long)tmp)); 2125 status = HXGE_ERROR; 2126 } 2127 2128 HXGE_REG_RD64(hxgep->hpi_handle, TDC_REORD_TBL_DATA_LO, &tmp); 2129 if (tmp != 0) { 2130 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2131 "unexpected data (lo), entry: %x\n", i)); 2132 status = HXGE_ERROR; 2133 } 2134 2135 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp); 2136 if (tmp != 0) { 2137 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2138 "parity error, entry: %x, val 0x%llx\n", 2139 i, (unsigned long long)tmp)); 2140 status = HXGE_ERROR; 2141 } 2142 2143 HXGE_REG_RD64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, &tmp); 2144 if (tmp != 0) { 2145 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "PANIC ReordTbl " 2146 "parity error, entry: %x\n", i)); 2147 status = HXGE_ERROR; 2148 } 2149 } 2150 2151 if (status != HXGE_OK) 2152 goto hxge_txdma_hw_start_exit; 2153 2154 /* 2155 * Reset FIFO Error Status for the TDC and enable FIFO error events. 2156 */ 2157 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_STAT, 0x7); 2158 HXGE_REG_WR64(hxgep->hpi_handle, TDC_FIFO_ERR_MASK, 0x0); 2159 2160 /* 2161 * Initialize the Transmit DMAs. 2162 */ 2163 tx_rings = hxgep->tx_rings; 2164 if (tx_rings == NULL) { 2165 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2166 "<== hxge_txdma_hw_start: NULL ring pointer")); 2167 return (HXGE_ERROR); 2168 } 2169 2170 tx_rings->dma_to_reenable = 0; 2171 2172 tx_desc_rings = tx_rings->rings; 2173 if (tx_desc_rings == NULL) { 2174 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2175 "<== hxge_txdma_hw_start: NULL ring pointers")); 2176 return (HXGE_ERROR); 2177 } 2178 ndmas = tx_rings->ndmas; 2179 if (!ndmas) { 2180 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2181 "<== hxge_txdma_hw_start: no dma channel allocated")); 2182 return (HXGE_ERROR); 2183 } 2184 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_start: " 2185 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2186 tx_rings, tx_desc_rings, ndmas)); 2187 2188 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2189 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2190 2191 /* 2192 * Init the DMAs. 2193 */ 2194 for (i = 0; i < ndmas; i++) { 2195 channel = tx_desc_rings[i]->tdc; 2196 status = hxge_txdma_start_channel(hxgep, channel, 2197 (p_tx_ring_t)tx_desc_rings[i], 2198 (p_tx_mbox_t)tx_mbox_p[i]); 2199 if (status != HXGE_OK) { 2200 goto hxge_txdma_hw_start_fail1; 2201 } 2202 } 2203 2204 (void) hxge_tx_vmac_enable(hxgep); 2205 2206 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2207 "==> hxge_txdma_hw_start: tx_rings $%p rings $%p", 2208 hxgep->tx_rings, hxgep->tx_rings->rings)); 2209 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2210 "==> hxge_txdma_hw_start: tx_rings $%p tx_desc_rings $%p", 2211 hxgep->tx_rings, tx_desc_rings)); 2212 2213 goto hxge_txdma_hw_start_exit; 2214 2215 hxge_txdma_hw_start_fail1: 2216 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2217 "==> hxge_txdma_hw_start: disable (status 0x%x channel %d i %d)", 2218 status, channel, i)); 2219 2220 for (; i >= 0; i--) { 2221 channel = tx_desc_rings[i]->tdc, 2222 (void) hxge_txdma_stop_channel(hxgep, channel, 2223 (p_tx_ring_t)tx_desc_rings[i], 2224 (p_tx_mbox_t)tx_mbox_p[i]); 2225 } 2226 2227 hxge_txdma_hw_start_exit: 2228 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2229 "==> hxge_txdma_hw_start: (status 0x%x)", status)); 2230 2231 return (status); 2232 } 2233 2234 static void 2235 hxge_txdma_hw_stop(p_hxge_t hxgep) 2236 { 2237 int i, ndmas; 2238 uint16_t channel; 2239 p_tx_rings_t tx_rings; 2240 p_tx_ring_t *tx_desc_rings; 2241 p_tx_mbox_areas_t tx_mbox_areas_p; 2242 p_tx_mbox_t *tx_mbox_p; 2243 2244 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop")); 2245 2246 tx_rings = hxgep->tx_rings; 2247 if (tx_rings == NULL) { 2248 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2249 "<== hxge_txdma_hw_stop: NULL ring pointer")); 2250 return; 2251 } 2252 2253 tx_desc_rings = tx_rings->rings; 2254 if (tx_desc_rings == NULL) { 2255 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2256 "<== hxge_txdma_hw_stop: NULL ring pointers")); 2257 return; 2258 } 2259 2260 ndmas = tx_rings->ndmas; 2261 if (!ndmas) { 2262 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2263 "<== hxge_txdma_hw_stop: no dma channel allocated")); 2264 return; 2265 } 2266 2267 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: " 2268 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2269 2270 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2271 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2272 2273 for (i = 0; i < ndmas; i++) { 2274 channel = tx_desc_rings[i]->tdc; 2275 (void) hxge_txdma_stop_channel(hxgep, channel, 2276 (p_tx_ring_t)tx_desc_rings[i], 2277 (p_tx_mbox_t)tx_mbox_p[i]); 2278 } 2279 2280 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_hw_stop: " 2281 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2282 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_hw_stop")); 2283 } 2284 2285 static hxge_status_t 2286 hxge_txdma_start_channel(p_hxge_t hxgep, uint16_t channel, 2287 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2288 { 2289 hxge_status_t status = HXGE_OK; 2290 2291 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2292 "==> hxge_txdma_start_channel (channel %d)", channel)); 2293 /* 2294 * TXDMA/TXC must be in stopped state. 2295 */ 2296 (void) hxge_txdma_stop_inj_err(hxgep, channel); 2297 2298 /* 2299 * Reset TXDMA channel 2300 */ 2301 tx_ring_p->tx_cs.value = 0; 2302 tx_ring_p->tx_cs.bits.reset = 1; 2303 status = hxge_reset_txdma_channel(hxgep, channel, 2304 tx_ring_p->tx_cs.value); 2305 if (status != HXGE_OK) { 2306 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2307 "==> hxge_txdma_start_channel (channel %d)" 2308 " reset channel failed 0x%x", channel, status)); 2309 2310 goto hxge_txdma_start_channel_exit; 2311 } 2312 2313 /* 2314 * Initialize the TXDMA channel specific FZC control configurations. 2315 * These FZC registers are pertaining to each TX channel (i.e. logical 2316 * pages). 2317 */ 2318 status = hxge_init_fzc_txdma_channel(hxgep, channel, 2319 tx_ring_p, tx_mbox_p); 2320 if (status != HXGE_OK) { 2321 goto hxge_txdma_start_channel_exit; 2322 } 2323 2324 /* 2325 * Initialize the event masks. 2326 */ 2327 tx_ring_p->tx_evmask.value = 0; 2328 status = hxge_init_txdma_channel_event_mask(hxgep, 2329 channel, &tx_ring_p->tx_evmask); 2330 if (status != HXGE_OK) { 2331 goto hxge_txdma_start_channel_exit; 2332 } 2333 2334 /* 2335 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA 2336 * channels and enable each DMA channel. 2337 */ 2338 status = hxge_enable_txdma_channel(hxgep, channel, 2339 tx_ring_p, tx_mbox_p); 2340 if (status != HXGE_OK) { 2341 goto hxge_txdma_start_channel_exit; 2342 } 2343 2344 hxge_txdma_start_channel_exit: 2345 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_start_channel")); 2346 2347 return (status); 2348 } 2349 2350 /*ARGSUSED*/ 2351 static hxge_status_t 2352 hxge_txdma_stop_channel(p_hxge_t hxgep, uint16_t channel, 2353 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2354 { 2355 int status = HXGE_OK; 2356 2357 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2358 "==> hxge_txdma_stop_channel: channel %d", channel)); 2359 2360 /* 2361 * Stop (disable) TXDMA and TXC (if stop bit is set and STOP_N_GO bit 2362 * not set, the TXDMA reset state will not be set if reset TXDMA. 2363 */ 2364 (void) hxge_txdma_stop_inj_err(hxgep, channel); 2365 2366 /* 2367 * Reset TXDMA channel 2368 */ 2369 tx_ring_p->tx_cs.value = 0; 2370 tx_ring_p->tx_cs.bits.reset = 1; 2371 status = hxge_reset_txdma_channel(hxgep, channel, 2372 tx_ring_p->tx_cs.value); 2373 if (status != HXGE_OK) { 2374 goto hxge_txdma_stop_channel_exit; 2375 } 2376 2377 hxge_txdma_stop_channel_exit: 2378 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_txdma_stop_channel")); 2379 2380 return (status); 2381 } 2382 2383 static p_tx_ring_t 2384 hxge_txdma_get_ring(p_hxge_t hxgep, uint16_t channel) 2385 { 2386 int index, ndmas; 2387 uint16_t tdc; 2388 p_tx_rings_t tx_rings; 2389 2390 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_ring")); 2391 2392 tx_rings = hxgep->tx_rings; 2393 if (tx_rings == NULL) { 2394 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2395 "<== hxge_txdma_get_ring: NULL ring pointer")); 2396 return (NULL); 2397 } 2398 ndmas = tx_rings->ndmas; 2399 if (!ndmas) { 2400 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2401 "<== hxge_txdma_get_ring: no channel allocated")); 2402 return (NULL); 2403 } 2404 if (tx_rings->rings == NULL) { 2405 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2406 "<== hxge_txdma_get_ring: NULL rings pointer")); 2407 return (NULL); 2408 } 2409 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_ring: " 2410 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2411 tx_rings, tx_rings, ndmas)); 2412 2413 for (index = 0; index < ndmas; index++) { 2414 tdc = tx_rings->rings[index]->tdc; 2415 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2416 "==> hxge_fixup_txdma_rings: channel %d", tdc)); 2417 if (channel == tdc) { 2418 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2419 "<== hxge_txdma_get_ring: tdc %d ring $%p", 2420 tdc, tx_rings->rings[index])); 2421 return (p_tx_ring_t)(tx_rings->rings[index]); 2422 } 2423 } 2424 2425 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_ring")); 2426 2427 return (NULL); 2428 } 2429 2430 static p_tx_mbox_t 2431 hxge_txdma_get_mbox(p_hxge_t hxgep, uint16_t channel) 2432 { 2433 int index, tdc, ndmas; 2434 p_tx_rings_t tx_rings; 2435 p_tx_mbox_areas_t tx_mbox_areas_p; 2436 p_tx_mbox_t *tx_mbox_p; 2437 2438 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_get_mbox")); 2439 2440 tx_rings = hxgep->tx_rings; 2441 if (tx_rings == NULL) { 2442 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2443 "<== hxge_txdma_get_mbox: NULL ring pointer")); 2444 return (NULL); 2445 } 2446 tx_mbox_areas_p = hxgep->tx_mbox_areas_p; 2447 if (tx_mbox_areas_p == NULL) { 2448 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2449 "<== hxge_txdma_get_mbox: NULL mbox pointer")); 2450 return (NULL); 2451 } 2452 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2453 2454 ndmas = tx_rings->ndmas; 2455 if (!ndmas) { 2456 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2457 "<== hxge_txdma_get_mbox: no channel allocated")); 2458 return (NULL); 2459 } 2460 if (tx_rings->rings == NULL) { 2461 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2462 "<== hxge_txdma_get_mbox: NULL rings pointer")); 2463 return (NULL); 2464 } 2465 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_txdma_get_mbox: " 2466 "tx_rings $%p tx_desc_rings $%p ndmas %d", 2467 tx_rings, tx_rings, ndmas)); 2468 2469 for (index = 0; index < ndmas; index++) { 2470 tdc = tx_rings->rings[index]->tdc; 2471 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, 2472 "==> hxge_txdma_get_mbox: channel %d", tdc)); 2473 if (channel == tdc) { 2474 HXGE_DEBUG_MSG((hxgep, TX_CTL, 2475 "<== hxge_txdma_get_mbox: tdc %d ring $%p", 2476 tdc, tx_rings->rings[index])); 2477 return (p_tx_mbox_t)(tx_mbox_p[index]); 2478 } 2479 } 2480 2481 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_get_mbox")); 2482 2483 return (NULL); 2484 } 2485 2486 /*ARGSUSED*/ 2487 static hxge_status_t 2488 hxge_tx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 2489 tdc_stat_t cs) 2490 { 2491 hpi_handle_t handle; 2492 uint8_t channel; 2493 p_tx_ring_t *tx_rings; 2494 p_tx_ring_t tx_ring_p; 2495 p_hxge_tx_ring_stats_t tdc_stats; 2496 boolean_t txchan_fatal = B_FALSE; 2497 hxge_status_t status = HXGE_OK; 2498 tdc_drop_cnt_t drop_cnt; 2499 2500 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_tx_err_evnts")); 2501 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2502 channel = ldvp->channel; 2503 2504 tx_rings = hxgep->tx_rings->rings; 2505 tx_ring_p = tx_rings[index]; 2506 tdc_stats = tx_ring_p->tdc_stats; 2507 2508 /* Get the error counts if any */ 2509 TXDMA_REG_READ64(handle, TDC_DROP_CNT, channel, &drop_cnt.value); 2510 tdc_stats->count_hdr_size_err += drop_cnt.bits.hdr_size_error_count; 2511 tdc_stats->count_runt += drop_cnt.bits.runt_count; 2512 tdc_stats->count_abort += drop_cnt.bits.abort_count; 2513 2514 if (cs.bits.peu_resp_err) { 2515 tdc_stats->peu_resp_err++; 2516 HXGE_FM_REPORT_ERROR(hxgep, channel, 2517 HXGE_FM_EREPORT_TDMC_PEU_RESP_ERR); 2518 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2519 "==> hxge_tx_err_evnts(channel %d): " 2520 "fatal error: peu_resp_err", channel)); 2521 txchan_fatal = B_TRUE; 2522 } 2523 2524 if (cs.bits.pkt_size_hdr_err) { 2525 tdc_stats->pkt_size_hdr_err++; 2526 HXGE_FM_REPORT_ERROR(hxgep, channel, 2527 HXGE_FM_EREPORT_TDMC_PKT_SIZE_HDR_ERR); 2528 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2529 "==> hxge_tx_err_evnts(channel %d): " 2530 "fatal error: pkt_size_hdr_err", channel)); 2531 txchan_fatal = B_TRUE; 2532 } 2533 2534 if (cs.bits.runt_pkt_drop_err) { 2535 tdc_stats->runt_pkt_drop_err++; 2536 HXGE_FM_REPORT_ERROR(hxgep, channel, 2537 HXGE_FM_EREPORT_TDMC_RUNT_PKT_DROP_ERR); 2538 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2539 "==> hxge_tx_err_evnts(channel %d): " 2540 "fatal error: runt_pkt_drop_err", channel)); 2541 txchan_fatal = B_TRUE; 2542 } 2543 2544 if (cs.bits.pkt_size_err) { 2545 tdc_stats->pkt_size_err++; 2546 HXGE_FM_REPORT_ERROR(hxgep, channel, 2547 HXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 2548 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2549 "==> hxge_tx_err_evnts(channel %d): " 2550 "fatal error: pkt_size_err", channel)); 2551 txchan_fatal = B_TRUE; 2552 } 2553 2554 if (cs.bits.tx_rng_oflow) { 2555 tdc_stats->tx_rng_oflow++; 2556 if (tdc_stats->tx_rng_oflow) 2557 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2558 "==> hxge_tx_err_evnts(channel %d): " 2559 "fatal error: tx_rng_oflow", channel)); 2560 } 2561 2562 if (cs.bits.pref_par_err) { 2563 tdc_stats->pref_par_err++; 2564 2565 /* Get the address of parity error read data */ 2566 TXDMA_REG_READ64(hxgep->hpi_handle, TDC_PREF_PAR_LOG, 2567 channel, &tdc_stats->errlog.value); 2568 2569 HXGE_FM_REPORT_ERROR(hxgep, channel, 2570 HXGE_FM_EREPORT_TDMC_PREF_PAR_ERR); 2571 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2572 "==> hxge_tx_err_evnts(channel %d): " 2573 "fatal error: pref_par_err", channel)); 2574 txchan_fatal = B_TRUE; 2575 } 2576 2577 if (cs.bits.tdr_pref_cpl_to) { 2578 tdc_stats->tdr_pref_cpl_to++; 2579 HXGE_FM_REPORT_ERROR(hxgep, channel, 2580 HXGE_FM_EREPORT_TDMC_TDR_PREF_CPL_TO); 2581 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2582 "==> hxge_tx_err_evnts(channel %d): " 2583 "fatal error: config_partition_err", channel)); 2584 txchan_fatal = B_TRUE; 2585 } 2586 2587 if (cs.bits.pkt_cpl_to) { 2588 tdc_stats->pkt_cpl_to++; 2589 HXGE_FM_REPORT_ERROR(hxgep, channel, 2590 HXGE_FM_EREPORT_TDMC_PKT_CPL_TO); 2591 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2592 "==> hxge_tx_err_evnts(channel %d): " 2593 "fatal error: pkt_cpl_to", channel)); 2594 txchan_fatal = B_TRUE; 2595 } 2596 2597 if (cs.bits.invalid_sop) { 2598 tdc_stats->invalid_sop++; 2599 HXGE_FM_REPORT_ERROR(hxgep, channel, 2600 HXGE_FM_EREPORT_TDMC_INVALID_SOP); 2601 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2602 "==> hxge_tx_err_evnts(channel %d): " 2603 "fatal error: invalid_sop", channel)); 2604 txchan_fatal = B_TRUE; 2605 } 2606 2607 if (cs.bits.unexpected_sop) { 2608 tdc_stats->unexpected_sop++; 2609 HXGE_FM_REPORT_ERROR(hxgep, channel, 2610 HXGE_FM_EREPORT_TDMC_UNEXPECTED_SOP); 2611 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2612 "==> hxge_tx_err_evnts(channel %d): " 2613 "fatal error: unexpected_sop", channel)); 2614 txchan_fatal = B_TRUE; 2615 } 2616 2617 /* Clear error injection source in case this is an injected error */ 2618 TXDMA_REG_WRITE64(hxgep->hpi_handle, TDC_STAT_INT_DBG, channel, 0); 2619 2620 if (txchan_fatal) { 2621 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2622 " hxge_tx_err_evnts: " 2623 " fatal error on channel %d cs 0x%llx\n", 2624 channel, cs.value)); 2625 status = hxge_txdma_fatal_err_recover(hxgep, channel, 2626 tx_ring_p); 2627 if (status == HXGE_OK) { 2628 FM_SERVICE_RESTORED(hxgep); 2629 } 2630 } 2631 2632 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "<== hxge_tx_err_evnts")); 2633 2634 return (status); 2635 } 2636 2637 static hxge_status_t 2638 hxge_txdma_wait_for_qst(p_hxge_t hxgep, int channel) 2639 { 2640 hpi_status_t rs; 2641 hxge_status_t status = HXGE_OK; 2642 hpi_handle_t handle; 2643 2644 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2645 2646 /* 2647 * Wait for QST state of the DMA. 2648 */ 2649 rs = hpi_txdma_control_stop_wait(handle, channel); 2650 if (rs != HPI_SUCCESS) 2651 status = HXGE_ERROR; 2652 2653 return (status); 2654 } 2655 2656 static hxge_status_t 2657 hxge_txdma_handle_rtab_error(p_hxge_t hxgep) 2658 { 2659 hxge_status_t status = HXGE_OK; 2660 int ndmas, i; 2661 uint16_t chnl; 2662 2663 ndmas = hxgep->tx_rings->ndmas; 2664 2665 /* 2666 * Make sure each DMA is in the QST state. 2667 */ 2668 for (i = 0; i < ndmas; i++) { 2669 status = hxge_txdma_wait_for_qst(hxgep, i); 2670 if (status != HXGE_OK) 2671 goto hxge_txdma_handle_rtab_error_exit; 2672 } 2673 2674 /* 2675 * Enable the DMAs. 2676 */ 2677 for (i = 0; i < ndmas; i++) { 2678 chnl = (hxgep->tx_rings->dma_to_reenable + i) % ndmas; 2679 hxge_txdma_enable_channel(hxgep, chnl); 2680 } 2681 2682 hxgep->tx_rings->dma_to_reenable = 2683 (hxgep->tx_rings->dma_to_reenable + 1) % ndmas; 2684 2685 hxge_txdma_handle_rtab_error_exit: 2686 return (status); 2687 } 2688 2689 hxge_status_t 2690 hxge_txdma_handle_sys_errors(p_hxge_t hxgep) 2691 { 2692 hpi_handle_t handle; 2693 hxge_status_t status = HXGE_OK; 2694 tdc_fifo_err_stat_t fifo_stat; 2695 hxge_tdc_sys_stats_t *tdc_sys_stats; 2696 2697 HXGE_DEBUG_MSG((hxgep, TX_CTL, "==> hxge_txdma_handle_sys_errors")); 2698 2699 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2700 2701 /* 2702 * The FIFO is shared by all channels. 2703 * Get the status of Reorder Buffer and Reorder Table Buffer Errors 2704 */ 2705 HXGE_REG_RD64(handle, TDC_FIFO_ERR_STAT, &fifo_stat.value); 2706 2707 /* 2708 * Clear the error bits. Note that writing a 1 clears the bit. Writing 2709 * a 0 does nothing. 2710 */ 2711 HXGE_REG_WR64(handle, TDC_FIFO_ERR_STAT, fifo_stat.value); 2712 2713 tdc_sys_stats = &hxgep->statsp->tdc_sys_stats; 2714 if (fifo_stat.bits.reord_tbl_par_err) { 2715 tdc_sys_stats->reord_tbl_par_err++; 2716 status = hxge_txdma_handle_rtab_error(hxgep); 2717 } 2718 2719 if (fifo_stat.bits.reord_buf_ded_err) { 2720 tdc_sys_stats->reord_buf_ded_err++; 2721 HXGE_FM_REPORT_ERROR(hxgep, NULL, 2722 HXGE_FM_EREPORT_TDMC_REORD_BUF_DED); 2723 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2724 "==> hxge_txdma_handle_sys_errors: " 2725 "fatal error: reord_buf_ded_err")); 2726 } 2727 2728 if (fifo_stat.bits.reord_buf_sec_err) { 2729 tdc_sys_stats->reord_buf_sec_err++; 2730 if (tdc_sys_stats->reord_buf_sec_err == 1) 2731 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2732 "==> hxge_txdma_handle_sys_errors: " 2733 "reord_buf_sec_err")); 2734 } 2735 2736 if (fifo_stat.bits.reord_buf_ded_err) { 2737 status = hxge_tx_port_fatal_err_recover(hxgep); 2738 if (status == HXGE_OK) { 2739 FM_SERVICE_RESTORED(hxgep); 2740 } 2741 } 2742 2743 HXGE_DEBUG_MSG((hxgep, TX_CTL, "<== hxge_txdma_handle_sys_errors")); 2744 2745 return (status); 2746 } 2747 2748 static hxge_status_t 2749 hxge_txdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel, 2750 p_tx_ring_t tx_ring_p) 2751 { 2752 hpi_handle_t handle; 2753 hpi_status_t rs = HPI_SUCCESS; 2754 p_tx_mbox_t tx_mbox_p; 2755 hxge_status_t status = HXGE_OK; 2756 2757 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover")); 2758 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2759 "Recovering from TxDMAChannel#%d error...", channel)); 2760 2761 /* 2762 * Stop the dma channel waits for the stop done. If the stop done bit 2763 * is not set, then create an error. 2764 */ 2765 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2766 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping txdma channel(%d)", 2767 channel)); 2768 MUTEX_ENTER(&tx_ring_p->lock); 2769 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel); 2770 if (rs != HPI_SUCCESS) { 2771 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2772 "==> hxge_txdma_fatal_err_recover (channel %d): " 2773 "stop failed ", channel)); 2774 2775 goto fail; 2776 } 2777 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming txdma channel(%d)", 2778 channel)); 2779 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 2780 2781 /* 2782 * Reset TXDMA channel 2783 */ 2784 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "resetting txdma channel(%d)", 2785 channel)); 2786 if ((rs = hpi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 2787 HPI_SUCCESS) { 2788 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2789 "==> hxge_txdma_fatal_err_recover (channel %d)" 2790 " reset channel failed 0x%x", channel, rs)); 2791 2792 goto fail; 2793 } 2794 /* 2795 * Reset the tail (kick) register to 0. (Hardware will not reset it. Tx 2796 * overflow fatal error if tail is not set to 0 after reset! 2797 */ 2798 TXDMA_REG_WRITE64(handle, TDC_TDR_KICK, channel, 0); 2799 2800 /* 2801 * Restart TXDMA channel 2802 * 2803 * Initialize the TXDMA channel specific FZC control configurations. 2804 * These FZC registers are pertaining to each TX channel (i.e. logical 2805 * pages). 2806 */ 2807 tx_mbox_p = hxge_txdma_get_mbox(hxgep, channel); 2808 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "restarting txdma channel(%d)", 2809 channel)); 2810 status = hxge_init_fzc_txdma_channel(hxgep, channel, 2811 tx_ring_p, tx_mbox_p); 2812 if (status != HXGE_OK) 2813 goto fail; 2814 2815 /* 2816 * Initialize the event masks. 2817 */ 2818 tx_ring_p->tx_evmask.value = 0; 2819 status = hxge_init_txdma_channel_event_mask(hxgep, channel, 2820 &tx_ring_p->tx_evmask); 2821 if (status != HXGE_OK) 2822 goto fail; 2823 2824 tx_ring_p->wr_index_wrap = B_FALSE; 2825 tx_ring_p->wr_index = 0; 2826 tx_ring_p->rd_index = 0; 2827 2828 /* 2829 * Load TXDMA descriptors, buffers, mailbox, initialise the DMA 2830 * channels and enable each DMA channel. 2831 */ 2832 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "enabling txdma channel(%d)", 2833 channel)); 2834 status = hxge_enable_txdma_channel(hxgep, channel, 2835 tx_ring_p, tx_mbox_p); 2836 MUTEX_EXIT(&tx_ring_p->lock); 2837 if (status != HXGE_OK) 2838 goto fail; 2839 2840 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2841 "Recovery Successful, TxDMAChannel#%d Restored", channel)); 2842 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "==> hxge_txdma_fatal_err_recover")); 2843 2844 return (HXGE_OK); 2845 2846 fail: 2847 MUTEX_EXIT(&tx_ring_p->lock); 2848 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2849 "hxge_txdma_fatal_err_recover (channel %d): " 2850 "failed to recover this txdma channel", channel)); 2851 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 2852 2853 return (status); 2854 } 2855 2856 static hxge_status_t 2857 hxge_tx_port_fatal_err_recover(p_hxge_t hxgep) 2858 { 2859 hpi_handle_t handle; 2860 hpi_status_t rs = HPI_SUCCESS; 2861 hxge_status_t status = HXGE_OK; 2862 p_tx_ring_t *tx_desc_rings; 2863 p_tx_rings_t tx_rings; 2864 p_tx_ring_t tx_ring_p; 2865 int i, ndmas; 2866 uint16_t channel; 2867 block_reset_t reset_reg; 2868 2869 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2870 "==> hxge_tx_port_fatal_err_recover")); 2871 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2872 "Recovering from TxPort error...")); 2873 2874 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2875 2876 /* Reset TDC block from PEU for this fatal error */ 2877 reset_reg.value = 0; 2878 reset_reg.bits.tdc_rst = 1; 2879 HXGE_REG_WR32(handle, BLOCK_RESET, reset_reg.value); 2880 2881 HXGE_DELAY(1000); 2882 2883 /* 2884 * Stop the dma channel waits for the stop done. If the stop done bit 2885 * is not set, then create an error. 2886 */ 2887 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "stopping all DMA channels...")); 2888 2889 tx_rings = hxgep->tx_rings; 2890 tx_desc_rings = tx_rings->rings; 2891 ndmas = tx_rings->ndmas; 2892 2893 for (i = 0; i < ndmas; i++) { 2894 if (tx_desc_rings[i] == NULL) { 2895 continue; 2896 } 2897 tx_ring_p = tx_rings->rings[i]; 2898 MUTEX_ENTER(&tx_ring_p->lock); 2899 } 2900 2901 for (i = 0; i < ndmas; i++) { 2902 if (tx_desc_rings[i] == NULL) { 2903 continue; 2904 } 2905 channel = tx_desc_rings[i]->tdc; 2906 tx_ring_p = tx_rings->rings[i]; 2907 rs = hpi_txdma_channel_control(handle, TXDMA_STOP, channel); 2908 if (rs != HPI_SUCCESS) { 2909 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2910 "==> hxge_txdma_fatal_err_recover (channel %d): " 2911 "stop failed ", channel)); 2912 2913 goto fail; 2914 } 2915 } 2916 2917 /* 2918 * Do reclaim on all of th DMAs. 2919 */ 2920 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, "reclaiming all DMA channels...")); 2921 for (i = 0; i < ndmas; i++) { 2922 if (tx_desc_rings[i] == NULL) { 2923 continue; 2924 } 2925 tx_ring_p = tx_rings->rings[i]; 2926 (void) hxge_txdma_reclaim(hxgep, tx_ring_p, 0); 2927 } 2928 2929 /* Restart the TDC */ 2930 if ((status = hxge_txdma_hw_start(hxgep)) != HXGE_OK) 2931 goto fail; 2932 2933 for (i = 0; i < ndmas; i++) { 2934 if (tx_desc_rings[i] == NULL) { 2935 continue; 2936 } 2937 tx_ring_p = tx_rings->rings[i]; 2938 MUTEX_EXIT(&tx_ring_p->lock); 2939 } 2940 2941 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2942 "Recovery Successful, TxPort Restored")); 2943 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2944 "<== hxge_tx_port_fatal_err_recover")); 2945 return (HXGE_OK); 2946 2947 fail: 2948 for (i = 0; i < ndmas; i++) { 2949 if (tx_desc_rings[i] == NULL) { 2950 continue; 2951 } 2952 tx_ring_p = tx_rings->rings[i]; 2953 MUTEX_EXIT(&tx_ring_p->lock); 2954 } 2955 2956 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 2957 HXGE_DEBUG_MSG((hxgep, TX_ERR_CTL, 2958 "hxge_txdma_fatal_err_recover (channel %d): " 2959 "failed to recover this txdma channel")); 2960 2961 return (status); 2962 } 2963