1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_rxdma.h> 28 29 /* 30 * Globals: tunable parameters (/etc/system or adb) 31 * 32 */ 33 extern uint32_t hxge_rbr_size; 34 extern uint32_t hxge_rcr_size; 35 extern uint32_t hxge_rbr_spare_size; 36 37 extern uint32_t hxge_mblks_pending; 38 39 /* 40 * Tunable to reduce the amount of time spent in the 41 * ISR doing Rx Processing. 42 */ 43 extern uint32_t hxge_max_rx_pkts; 44 45 /* 46 * Tunables to manage the receive buffer blocks. 47 * 48 * hxge_rx_threshold_hi: copy all buffers. 49 * hxge_rx_bcopy_size_type: receive buffer block size type. 50 * hxge_rx_threshold_lo: copy only up to tunable block size type. 51 */ 52 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi; 53 extern hxge_rxbuf_type_t hxge_rx_buf_size_type; 54 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo; 55 56 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep); 57 static void hxge_unmap_rxdma(p_hxge_t hxgep); 58 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep); 59 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep); 60 static void hxge_rxdma_hw_stop(p_hxge_t hxgep); 61 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 62 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 63 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p, 64 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 65 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 66 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 67 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, 68 uint16_t dma_channel, p_hxge_dma_common_t *dma_cntl_p, 69 p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 70 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 71 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 72 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, 73 uint16_t channel, p_hxge_dma_common_t *dma_buf_p, 74 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks); 75 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 76 p_rx_rbr_ring_t rbr_p); 77 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 78 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p); 79 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel); 80 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 81 p_rx_rcr_ring_t *rcr_p, rdc_stat_t cs); 82 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p, 83 p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, 84 mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry); 85 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep, 86 uint16_t channel); 87 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t); 88 static void hxge_freeb(p_rx_msg_t); 89 static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, 90 p_hxge_ldv_t ldvp, rdc_stat_t cs); 91 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, 92 p_hxge_ldv_t ldvp, rdc_stat_t cs); 93 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep, 94 p_rx_rbr_ring_t rx_dmap); 95 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, 96 uint16_t channel); 97 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep); 98 99 #define HXGE_RXDMA_RBB_MAX(x) (((x) >> 4) * 15) 100 #define HXGE_RXDMA_RBB_MIN(x) ((x) >> 4) 101 #define HXGE_RXDMA_RBB_THRESHOLD(x) (((x) >> 4) * 14) 102 103 hxge_status_t 104 hxge_init_rxdma_channels(p_hxge_t hxgep) 105 { 106 hxge_status_t status = HXGE_OK; 107 block_reset_t reset_reg; 108 109 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels")); 110 111 /* Reset RDC block from PEU to clear any previous state */ 112 reset_reg.value = 0; 113 reset_reg.bits.rdc_rst = 1; 114 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 115 HXGE_DELAY(1000); 116 117 status = hxge_map_rxdma(hxgep); 118 if (status != HXGE_OK) { 119 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 120 "<== hxge_init_rxdma: status 0x%x", status)); 121 return (status); 122 } 123 124 status = hxge_rxdma_hw_start_common(hxgep); 125 if (status != HXGE_OK) { 126 hxge_unmap_rxdma(hxgep); 127 } 128 129 status = hxge_rxdma_hw_start(hxgep); 130 if (status != HXGE_OK) { 131 hxge_unmap_rxdma(hxgep); 132 } 133 134 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 135 "<== hxge_init_rxdma_channels: status 0x%x", status)); 136 return (status); 137 } 138 139 void 140 hxge_uninit_rxdma_channels(p_hxge_t hxgep) 141 { 142 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels")); 143 144 hxge_rxdma_hw_stop(hxgep); 145 hxge_unmap_rxdma(hxgep); 146 147 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels")); 148 } 149 150 hxge_status_t 151 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel, 152 rdc_stat_t *cs_p) 153 { 154 hpi_handle_t handle; 155 hpi_status_t rs = HPI_SUCCESS; 156 hxge_status_t status = HXGE_OK; 157 158 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 159 "<== hxge_init_rxdma_channel_cntl_stat")); 160 161 handle = HXGE_DEV_HPI_HANDLE(hxgep); 162 rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p); 163 164 if (rs != HPI_SUCCESS) { 165 status = HXGE_ERROR | rs; 166 } 167 return (status); 168 } 169 170 171 hxge_status_t 172 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 173 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 174 { 175 hpi_handle_t handle; 176 rdc_desc_cfg_t rdc_desc; 177 rdc_rcr_cfg_b_t *cfgb_p; 178 hpi_status_t rs = HPI_SUCCESS; 179 180 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel")); 181 handle = HXGE_DEV_HPI_HANDLE(hxgep); 182 183 /* 184 * Use configuration data composed at init time. Write to hardware the 185 * receive ring configurations. 186 */ 187 rdc_desc.mbox_enable = 1; 188 rdc_desc.mbox_addr = mbox_p->mbox_addr; 189 HXGE_DEBUG_MSG((hxgep, RX_CTL, 190 "==> hxge_enable_rxdma_channel: mboxp $%p($%p)", 191 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 192 193 rdc_desc.rbr_len = rbr_p->rbb_max; 194 rdc_desc.rbr_addr = rbr_p->rbr_addr; 195 196 switch (hxgep->rx_bksize_code) { 197 case RBR_BKSIZE_4K: 198 rdc_desc.page_size = SIZE_4KB; 199 break; 200 case RBR_BKSIZE_8K: 201 rdc_desc.page_size = SIZE_8KB; 202 break; 203 } 204 205 rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0; 206 rdc_desc.valid0 = 1; 207 208 rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1; 209 rdc_desc.valid1 = 1; 210 211 rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2; 212 rdc_desc.valid2 = 1; 213 214 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 215 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 216 217 rdc_desc.rcr_len = rcr_p->comp_size; 218 rdc_desc.rcr_addr = rcr_p->rcr_addr; 219 220 cfgb_p = &(rcr_p->rcr_cfgb); 221 rdc_desc.rcr_threshold = cfgb_p->bits.pthres; 222 rdc_desc.rcr_timeout = cfgb_p->bits.timeout; 223 rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout; 224 225 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 226 "rbr_len qlen %d pagesize code %d rcr_len %d", 227 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 228 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 229 "size 0 %d size 1 %d size 2 %d", 230 rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1, 231 rbr_p->hpi_pkt_buf_size2)); 232 233 rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 234 if (rs != HPI_SUCCESS) { 235 return (HXGE_ERROR | rs); 236 } 237 238 /* 239 * Enable the timeout and threshold. 240 */ 241 rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 242 rdc_desc.rcr_threshold); 243 if (rs != HPI_SUCCESS) { 244 return (HXGE_ERROR | rs); 245 } 246 247 rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 248 rdc_desc.rcr_timeout); 249 if (rs != HPI_SUCCESS) { 250 return (HXGE_ERROR | rs); 251 } 252 253 /* Enable the DMA */ 254 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 255 if (rs != HPI_SUCCESS) { 256 return (HXGE_ERROR | rs); 257 } 258 259 /* 260 * Kick the DMA engine with the initial kick and indicate 261 * that we have remaining blocks to post. 262 */ 263 rbr_p->pages_to_post = HXGE_RXDMA_RBB_MIN(rbr_p->rbb_max); 264 hpi_rxdma_rdc_rbr_kick(handle, channel, 265 HXGE_RXDMA_RBB_MAX(rbr_p->rbb_max)); 266 267 /* Clear the rbr empty bit */ 268 (void) hpi_rxdma_channel_rbr_empty_clear(handle, channel); 269 270 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel")); 271 272 return (HXGE_OK); 273 } 274 275 static hxge_status_t 276 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel) 277 { 278 hpi_handle_t handle; 279 hpi_status_t rs = HPI_SUCCESS; 280 281 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel")); 282 283 handle = HXGE_DEV_HPI_HANDLE(hxgep); 284 285 /* disable the DMA */ 286 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 287 if (rs != HPI_SUCCESS) { 288 HXGE_DEBUG_MSG((hxgep, RX_CTL, 289 "<== hxge_disable_rxdma_channel:failed (0x%x)", rs)); 290 return (HXGE_ERROR | rs); 291 } 292 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel")); 293 return (HXGE_OK); 294 } 295 296 hxge_status_t 297 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel) 298 { 299 hpi_handle_t handle; 300 hxge_status_t status = HXGE_OK; 301 302 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 303 "==> hxge_rxdma_channel_rcrflush")); 304 305 handle = HXGE_DEV_HPI_HANDLE(hxgep); 306 hpi_rxdma_rdc_rcr_flush(handle, channel); 307 308 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 309 "<== hxge_rxdma_channel_rcrflush")); 310 return (status); 311 312 } 313 314 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 315 316 #define TO_LEFT -1 317 #define TO_RIGHT 1 318 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 319 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 320 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 321 #define NO_HINT 0xffffffff 322 323 /*ARGSUSED*/ 324 hxge_status_t 325 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p, 326 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 327 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 328 { 329 int bufsize; 330 uint64_t pktbuf_pp; 331 uint64_t dvma_addr; 332 rxring_info_t *ring_info; 333 int base_side, end_side; 334 int r_index, l_index, anchor_index; 335 int found, search_done; 336 uint32_t offset, chunk_size, block_size, page_size_mask; 337 uint32_t chunk_index, block_index, total_index; 338 int max_iterations, iteration; 339 rxbuf_index_info_t *bufinfo; 340 341 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp")); 342 343 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 344 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 345 pkt_buf_addr_pp, pktbufsz_type)); 346 347 #if defined(__i386) 348 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 349 #else 350 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 351 #endif 352 353 switch (pktbufsz_type) { 354 case 0: 355 bufsize = rbr_p->pkt_buf_size0; 356 break; 357 case 1: 358 bufsize = rbr_p->pkt_buf_size1; 359 break; 360 case 2: 361 bufsize = rbr_p->pkt_buf_size2; 362 break; 363 case RCR_SINGLE_BLOCK: 364 bufsize = 0; 365 anchor_index = 0; 366 break; 367 default: 368 return (HXGE_ERROR); 369 } 370 371 if (rbr_p->num_blocks == 1) { 372 anchor_index = 0; 373 ring_info = rbr_p->ring_info; 374 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 375 376 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 377 "==> hxge_rxbuf_pp_to_vp: (found, 1 block) " 378 "buf_pp $%p btype %d anchor_index %d bufinfo $%p", 379 pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo)); 380 381 goto found_index; 382 } 383 384 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 385 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d", 386 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 387 388 ring_info = rbr_p->ring_info; 389 found = B_FALSE; 390 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 391 iteration = 0; 392 max_iterations = ring_info->max_iterations; 393 394 /* 395 * First check if this block have been seen recently. This is indicated 396 * by a hint which is initialized when the first buffer of the block is 397 * seen. The hint is reset when the last buffer of the block has been 398 * processed. As three block sizes are supported, three hints are kept. 399 * The idea behind the hints is that once the hardware uses a block 400 * for a buffer of that size, it will use it exclusively for that size 401 * and will use it until it is exhausted. It is assumed that there 402 * would a single block being used for the same buffer sizes at any 403 * given time. 404 */ 405 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 406 anchor_index = ring_info->hint[pktbufsz_type]; 407 dvma_addr = bufinfo[anchor_index].dvma_addr; 408 chunk_size = bufinfo[anchor_index].buf_size; 409 if ((pktbuf_pp >= dvma_addr) && 410 (pktbuf_pp < (dvma_addr + chunk_size))) { 411 found = B_TRUE; 412 /* 413 * check if this is the last buffer in the block If so, 414 * then reset the hint for the size; 415 */ 416 417 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 418 ring_info->hint[pktbufsz_type] = NO_HINT; 419 } 420 } 421 422 if (found == B_FALSE) { 423 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 424 "==> hxge_rxbuf_pp_to_vp: (!found)" 425 "buf_pp $%p btype %d anchor_index %d", 426 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 427 428 /* 429 * This is the first buffer of the block of this size. Need to 430 * search the whole information array. the search algorithm 431 * uses a binary tree search algorithm. It assumes that the 432 * information is already sorted with increasing order info[0] 433 * < info[1] < info[2] .... < info[n-1] where n is the size of 434 * the information array 435 */ 436 r_index = rbr_p->num_blocks - 1; 437 l_index = 0; 438 search_done = B_FALSE; 439 anchor_index = MID_INDEX(r_index, l_index); 440 while (search_done == B_FALSE) { 441 if ((r_index == l_index) || 442 (iteration >= max_iterations)) 443 search_done = B_TRUE; 444 445 end_side = TO_RIGHT; /* to the right */ 446 base_side = TO_LEFT; /* to the left */ 447 /* read the DVMA address information and sort it */ 448 dvma_addr = bufinfo[anchor_index].dvma_addr; 449 chunk_size = bufinfo[anchor_index].buf_size; 450 451 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 452 "==> hxge_rxbuf_pp_to_vp: (searching)" 453 "buf_pp $%p btype %d " 454 "anchor_index %d chunk_size %d dvmaaddr $%p", 455 pkt_buf_addr_pp, pktbufsz_type, anchor_index, 456 chunk_size, dvma_addr)); 457 458 if (pktbuf_pp >= dvma_addr) 459 base_side = TO_RIGHT; /* to the right */ 460 if (pktbuf_pp < (dvma_addr + chunk_size)) 461 end_side = TO_LEFT; /* to the left */ 462 463 switch (base_side + end_side) { 464 case IN_MIDDLE: 465 /* found */ 466 found = B_TRUE; 467 search_done = B_TRUE; 468 if ((pktbuf_pp + bufsize) < 469 (dvma_addr + chunk_size)) 470 ring_info->hint[pktbufsz_type] = 471 bufinfo[anchor_index].buf_index; 472 break; 473 case BOTH_RIGHT: 474 /* not found: go to the right */ 475 l_index = anchor_index + 1; 476 anchor_index = MID_INDEX(r_index, l_index); 477 break; 478 479 case BOTH_LEFT: 480 /* not found: go to the left */ 481 r_index = anchor_index - 1; 482 anchor_index = MID_INDEX(r_index, l_index); 483 break; 484 default: /* should not come here */ 485 return (HXGE_ERROR); 486 } 487 iteration++; 488 } 489 490 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 491 "==> hxge_rxbuf_pp_to_vp: (search done)" 492 "buf_pp $%p btype %d anchor_index %d", 493 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 494 } 495 496 if (found == B_FALSE) { 497 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 498 "==> hxge_rxbuf_pp_to_vp: (search failed)" 499 "buf_pp $%p btype %d anchor_index %d", 500 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 501 return (HXGE_ERROR); 502 } 503 504 found_index: 505 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 506 "==> hxge_rxbuf_pp_to_vp: (FOUND1)" 507 "buf_pp $%p btype %d bufsize %d anchor_index %d", 508 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index)); 509 510 /* index of the first block in this chunk */ 511 chunk_index = bufinfo[anchor_index].start_index; 512 dvma_addr = bufinfo[anchor_index].dvma_addr; 513 page_size_mask = ring_info->block_size_mask; 514 515 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 516 "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 517 "buf_pp $%p btype %d bufsize %d " 518 "anchor_index %d chunk_index %d dvma $%p", 519 pkt_buf_addr_pp, pktbufsz_type, bufsize, 520 anchor_index, chunk_index, dvma_addr)); 521 522 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 523 block_size = rbr_p->block_size; /* System block(page) size */ 524 525 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 526 "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 527 "buf_pp $%p btype %d bufsize %d " 528 "anchor_index %d chunk_index %d dvma $%p " 529 "offset %d block_size %d", 530 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index, 531 chunk_index, dvma_addr, offset, block_size)); 532 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index")); 533 534 block_index = (offset / block_size); /* index within chunk */ 535 total_index = chunk_index + block_index; 536 537 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 538 "==> hxge_rxbuf_pp_to_vp: " 539 "total_index %d dvma_addr $%p " 540 "offset %d block_size %d " 541 "block_index %d ", 542 total_index, dvma_addr, offset, block_size, block_index)); 543 544 #if defined(__i386) 545 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 546 (uint32_t)offset); 547 #else 548 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 549 offset); 550 #endif 551 552 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 553 "==> hxge_rxbuf_pp_to_vp: " 554 "total_index %d dvma_addr $%p " 555 "offset %d block_size %d " 556 "block_index %d " 557 "*pkt_buf_addr_p $%p", 558 total_index, dvma_addr, offset, block_size, 559 block_index, *pkt_buf_addr_p)); 560 561 *msg_index = total_index; 562 *bufoffset = (offset & page_size_mask); 563 564 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 565 "==> hxge_rxbuf_pp_to_vp: get msg index: " 566 "msg_index %d bufoffset_index %d", 567 *msg_index, *bufoffset)); 568 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp")); 569 570 return (HXGE_OK); 571 } 572 573 574 /* 575 * used by quick sort (qsort) function 576 * to perform comparison 577 */ 578 static int 579 hxge_sort_compare(const void *p1, const void *p2) 580 { 581 582 rxbuf_index_info_t *a, *b; 583 584 a = (rxbuf_index_info_t *)p1; 585 b = (rxbuf_index_info_t *)p2; 586 587 if (a->dvma_addr > b->dvma_addr) 588 return (1); 589 if (a->dvma_addr < b->dvma_addr) 590 return (-1); 591 return (0); 592 } 593 594 /* 595 * Grabbed this sort implementation from common/syscall/avl.c 596 * 597 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 598 * v = Ptr to array/vector of objs 599 * n = # objs in the array 600 * s = size of each obj (must be multiples of a word size) 601 * f = ptr to function to compare two objs 602 * returns (-1 = less than, 0 = equal, 1 = greater than 603 */ 604 void 605 hxge_ksort(caddr_t v, int n, int s, int (*f) ()) 606 { 607 int g, i, j, ii; 608 unsigned int *p1, *p2; 609 unsigned int tmp; 610 611 /* No work to do */ 612 if (v == NULL || n <= 1) 613 return; 614 /* Sanity check on arguments */ 615 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 616 ASSERT(s > 0); 617 618 for (g = n / 2; g > 0; g /= 2) { 619 for (i = g; i < n; i++) { 620 for (j = i - g; j >= 0 && 621 (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) { 622 p1 = (unsigned *)(v + j * s); 623 p2 = (unsigned *)(v + (j + g) * s); 624 for (ii = 0; ii < s / 4; ii++) { 625 tmp = *p1; 626 *p1++ = *p2; 627 *p2++ = tmp; 628 } 629 } 630 } 631 } 632 } 633 634 /* 635 * Initialize data structures required for rxdma 636 * buffer dvma->vmem address lookup 637 */ 638 /*ARGSUSED*/ 639 static hxge_status_t 640 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp) 641 { 642 int index; 643 rxring_info_t *ring_info; 644 int max_iteration = 0, max_index = 0; 645 646 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init")); 647 648 ring_info = rbrp->ring_info; 649 ring_info->hint[0] = NO_HINT; 650 ring_info->hint[1] = NO_HINT; 651 ring_info->hint[2] = NO_HINT; 652 max_index = rbrp->num_blocks; 653 654 /* read the DVMA address information and sort it */ 655 /* do init of the information array */ 656 657 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 658 " hxge_rxbuf_index_info_init Sort ptrs")); 659 660 /* sort the array */ 661 hxge_ksort((void *) ring_info->buffer, max_index, 662 sizeof (rxbuf_index_info_t), hxge_sort_compare); 663 664 for (index = 0; index < max_index; index++) { 665 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 666 " hxge_rxbuf_index_info_init: sorted chunk %d " 667 " ioaddr $%p kaddr $%p size %x", 668 index, ring_info->buffer[index].dvma_addr, 669 ring_info->buffer[index].kaddr, 670 ring_info->buffer[index].buf_size)); 671 } 672 673 max_iteration = 0; 674 while (max_index >= (1ULL << max_iteration)) 675 max_iteration++; 676 ring_info->max_iterations = max_iteration + 1; 677 678 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 679 " hxge_rxbuf_index_info_init Find max iter %d", 680 ring_info->max_iterations)); 681 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init")); 682 683 return (HXGE_OK); 684 } 685 686 /*ARGSUSED*/ 687 void 688 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p) 689 { 690 #ifdef HXGE_DEBUG 691 692 uint32_t bptr; 693 uint64_t pp; 694 695 bptr = entry_p->bits.pkt_buf_addr; 696 697 HXGE_DEBUG_MSG((hxgep, RX_CTL, 698 "\trcr entry $%p " 699 "\trcr entry 0x%0llx " 700 "\trcr entry 0x%08x " 701 "\trcr entry 0x%08x " 702 "\tvalue 0x%0llx\n" 703 "\tmulti = %d\n" 704 "\tpkt_type = 0x%x\n" 705 "\terror = 0x%04x\n" 706 "\tl2_len = %d\n" 707 "\tpktbufsize = %d\n" 708 "\tpkt_buf_addr = $%p\n" 709 "\tpkt_buf_addr (<< 6) = $%p\n", 710 entry_p, 711 *(int64_t *)entry_p, 712 *(int32_t *)entry_p, 713 *(int32_t *)((char *)entry_p + 32), 714 entry_p->value, 715 entry_p->bits.multi, 716 entry_p->bits.pkt_type, 717 entry_p->bits.error, 718 entry_p->bits.l2_len, 719 entry_p->bits.pktbufsz, 720 bptr, 721 entry_p->bits.pkt_buf_addr_l)); 722 723 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 724 RCR_PKT_BUF_ADDR_SHIFT; 725 726 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 727 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 728 #endif 729 } 730 731 /*ARGSUSED*/ 732 void 733 hxge_rxdma_stop(p_hxge_t hxgep) 734 { 735 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop")); 736 737 (void) hxge_rx_vmac_disable(hxgep); 738 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 739 740 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop")); 741 } 742 743 void 744 hxge_rxdma_stop_reinit(p_hxge_t hxgep) 745 { 746 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit")); 747 748 (void) hxge_rxdma_stop(hxgep); 749 (void) hxge_uninit_rxdma_channels(hxgep); 750 (void) hxge_init_rxdma_channels(hxgep); 751 752 (void) hxge_rx_vmac_enable(hxgep); 753 754 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit")); 755 } 756 757 hxge_status_t 758 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 759 { 760 int i, ndmas; 761 uint16_t channel; 762 p_rx_rbr_rings_t rx_rbr_rings; 763 p_rx_rbr_ring_t *rbr_rings; 764 hpi_handle_t handle; 765 hpi_status_t rs = HPI_SUCCESS; 766 hxge_status_t status = HXGE_OK; 767 768 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 769 "==> hxge_rxdma_hw_mode: mode %d", enable)); 770 771 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 772 HXGE_DEBUG_MSG((hxgep, RX_CTL, 773 "<== hxge_rxdma_mode: not initialized")); 774 return (HXGE_ERROR); 775 } 776 777 rx_rbr_rings = hxgep->rx_rbr_rings; 778 if (rx_rbr_rings == NULL) { 779 HXGE_DEBUG_MSG((hxgep, RX_CTL, 780 "<== hxge_rxdma_mode: NULL ring pointer")); 781 return (HXGE_ERROR); 782 } 783 784 if (rx_rbr_rings->rbr_rings == NULL) { 785 HXGE_DEBUG_MSG((hxgep, RX_CTL, 786 "<== hxge_rxdma_mode: NULL rbr rings pointer")); 787 return (HXGE_ERROR); 788 } 789 790 ndmas = rx_rbr_rings->ndmas; 791 if (!ndmas) { 792 HXGE_DEBUG_MSG((hxgep, RX_CTL, 793 "<== hxge_rxdma_mode: no channel")); 794 return (HXGE_ERROR); 795 } 796 797 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 798 "==> hxge_rxdma_mode (ndmas %d)", ndmas)); 799 800 rbr_rings = rx_rbr_rings->rbr_rings; 801 802 handle = HXGE_DEV_HPI_HANDLE(hxgep); 803 804 for (i = 0; i < ndmas; i++) { 805 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 806 continue; 807 } 808 channel = rbr_rings[i]->rdc; 809 if (enable) { 810 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 811 "==> hxge_rxdma_hw_mode: channel %d (enable)", 812 channel)); 813 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 814 } else { 815 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 816 "==> hxge_rxdma_hw_mode: channel %d (disable)", 817 channel)); 818 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 819 } 820 } 821 822 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 823 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 824 "<== hxge_rxdma_hw_mode: status 0x%x", status)); 825 826 return (status); 827 } 828 829 int 830 hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel) 831 { 832 int i, ndmas; 833 uint16_t rdc; 834 p_rx_rbr_rings_t rx_rbr_rings; 835 p_rx_rbr_ring_t *rbr_rings; 836 837 HXGE_DEBUG_MSG((hxgep, RX_CTL, 838 "==> hxge_rxdma_get_ring_index: channel %d", channel)); 839 840 rx_rbr_rings = hxgep->rx_rbr_rings; 841 if (rx_rbr_rings == NULL) { 842 HXGE_DEBUG_MSG((hxgep, RX_CTL, 843 "<== hxge_rxdma_get_ring_index: NULL ring pointer")); 844 return (-1); 845 } 846 847 ndmas = rx_rbr_rings->ndmas; 848 if (!ndmas) { 849 HXGE_DEBUG_MSG((hxgep, RX_CTL, 850 "<== hxge_rxdma_get_ring_index: no channel")); 851 return (-1); 852 } 853 854 HXGE_DEBUG_MSG((hxgep, RX_CTL, 855 "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 856 857 rbr_rings = rx_rbr_rings->rbr_rings; 858 for (i = 0; i < ndmas; i++) { 859 rdc = rbr_rings[i]->rdc; 860 if (channel == rdc) { 861 HXGE_DEBUG_MSG((hxgep, RX_CTL, 862 "==> hxge_rxdma_get_rbr_ring: " 863 "channel %d (index %d) " 864 "ring %d", channel, i, rbr_rings[i])); 865 866 return (i); 867 } 868 } 869 870 HXGE_DEBUG_MSG((hxgep, RX_CTL, 871 "<== hxge_rxdma_get_rbr_ring_index: not found")); 872 873 return (-1); 874 } 875 876 /* 877 * Static functions start here. 878 */ 879 static p_rx_msg_t 880 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p) 881 { 882 p_rx_msg_t hxge_mp = NULL; 883 p_hxge_dma_common_t dmamsg_p; 884 uchar_t *buffer; 885 886 hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 887 if (hxge_mp == NULL) { 888 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 889 "Allocation of a rx msg failed.")); 890 goto hxge_allocb_exit; 891 } 892 893 hxge_mp->use_buf_pool = B_FALSE; 894 if (dmabuf_p) { 895 hxge_mp->use_buf_pool = B_TRUE; 896 897 dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma; 898 *dmamsg_p = *dmabuf_p; 899 dmamsg_p->nblocks = 1; 900 dmamsg_p->block_size = size; 901 dmamsg_p->alength = size; 902 buffer = (uchar_t *)dmabuf_p->kaddrp; 903 904 dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size); 905 dmabuf_p->ioaddr_pp = (void *) 906 ((char *)dmabuf_p->ioaddr_pp + size); 907 908 dmabuf_p->alength -= size; 909 dmabuf_p->offset += size; 910 dmabuf_p->dma_cookie.dmac_laddress += size; 911 dmabuf_p->dma_cookie.dmac_size -= size; 912 } else { 913 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 914 if (buffer == NULL) { 915 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 916 "Allocation of a receive page failed.")); 917 goto hxge_allocb_fail1; 918 } 919 } 920 921 hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb); 922 if (hxge_mp->rx_mblk_p == NULL) { 923 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed.")); 924 goto hxge_allocb_fail2; 925 } 926 hxge_mp->buffer = buffer; 927 hxge_mp->block_size = size; 928 hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb; 929 hxge_mp->freeb.free_arg = (caddr_t)hxge_mp; 930 hxge_mp->ref_cnt = 1; 931 hxge_mp->free = B_TRUE; 932 hxge_mp->rx_use_bcopy = B_FALSE; 933 934 atomic_inc_32(&hxge_mblks_pending); 935 936 goto hxge_allocb_exit; 937 938 hxge_allocb_fail2: 939 if (!hxge_mp->use_buf_pool) { 940 KMEM_FREE(buffer, size); 941 } 942 hxge_allocb_fail1: 943 KMEM_FREE(hxge_mp, sizeof (rx_msg_t)); 944 hxge_mp = NULL; 945 946 hxge_allocb_exit: 947 return (hxge_mp); 948 } 949 950 p_mblk_t 951 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 952 { 953 p_mblk_t mp; 954 955 HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb")); 956 HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p " 957 "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size)); 958 959 mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb); 960 if (mp == NULL) { 961 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 962 goto hxge_dupb_exit; 963 } 964 965 atomic_inc_32(&hxge_mp->ref_cnt); 966 967 hxge_dupb_exit: 968 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 969 return (mp); 970 } 971 972 p_mblk_t 973 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 974 { 975 p_mblk_t mp; 976 uchar_t *dp; 977 978 mp = allocb(size + HXGE_RXBUF_EXTRA, 0); 979 if (mp == NULL) { 980 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 981 goto hxge_dupb_bcopy_exit; 982 } 983 dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA; 984 bcopy((void *) &hxge_mp->buffer[offset], dp, size); 985 mp->b_wptr = dp + size; 986 987 hxge_dupb_bcopy_exit: 988 989 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 990 991 return (mp); 992 } 993 994 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, 995 p_rx_msg_t rx_msg_p); 996 997 void 998 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 999 { 1000 hpi_handle_t handle; 1001 uint64_t rbr_qlen, blocks_to_post = 0ULL; 1002 1003 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page")); 1004 1005 /* Reuse this buffer */ 1006 rx_msg_p->free = B_FALSE; 1007 rx_msg_p->cur_usage_cnt = 0; 1008 rx_msg_p->max_usage_cnt = 0; 1009 rx_msg_p->pkt_buf_size = 0; 1010 1011 if (rx_rbr_p->rbr_use_bcopy) { 1012 rx_msg_p->rx_use_bcopy = B_FALSE; 1013 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1014 } 1015 1016 /* 1017 * Get the rbr header pointer and its offset index. 1018 */ 1019 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1020 rx_rbr_p->rbr_wrap_mask); 1021 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1022 1023 /* 1024 * Don't post when index is close to 0 or near the max to reduce the 1025 * number rbr_emepty errors 1026 */ 1027 rx_rbr_p->pages_to_post++; 1028 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1029 1030 /* 1031 * False RBR Empty Workaround 1032 */ 1033 RXDMA_REG_READ64(handle, RDC_RBR_QLEN, rx_rbr_p->rdc, &rbr_qlen); 1034 rbr_qlen = rbr_qlen & 0xffff; 1035 1036 if ((rbr_qlen > 0) && 1037 (rbr_qlen < HXGE_RXDMA_RBB_THRESHOLD(rx_rbr_p->rbb_max))) { 1038 blocks_to_post = 1039 HXGE_RXDMA_RBB_MAX(rx_rbr_p->rbb_max) - rbr_qlen; 1040 } 1041 1042 /* 1043 * Clamp posting to what we have available. 1044 */ 1045 if ((blocks_to_post > 0) && 1046 (blocks_to_post > rx_rbr_p->pages_to_post)) { 1047 blocks_to_post = rx_rbr_p->pages_to_post; 1048 } 1049 1050 /* 1051 * Post blocks to the hardware, if any is available. 1052 */ 1053 if (blocks_to_post > 0) { 1054 hpi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, blocks_to_post); 1055 rx_rbr_p->pages_to_post -= blocks_to_post; 1056 } 1057 1058 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1059 "<== hxge_post_page (channel %d post_next_index %d)", 1060 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1061 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page")); 1062 } 1063 1064 void 1065 hxge_freeb(p_rx_msg_t rx_msg_p) 1066 { 1067 size_t size; 1068 uchar_t *buffer = NULL; 1069 int ref_cnt; 1070 boolean_t free_state = B_FALSE; 1071 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1072 1073 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb")); 1074 HXGE_DEBUG_MSG((NULL, MEM2_CTL, 1075 "hxge_freeb:rx_msg_p = $%p (block pending %d)", 1076 rx_msg_p, hxge_mblks_pending)); 1077 1078 MUTEX_ENTER(&ring->post_lock); 1079 1080 /* 1081 * First we need to get the free state, then 1082 * atomic decrement the reference count to prevent 1083 * the race condition with the interrupt thread that 1084 * is processing a loaned up buffer block. 1085 */ 1086 free_state = rx_msg_p->free; 1087 1088 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1089 if (!ref_cnt) { 1090 atomic_dec_32(&hxge_mblks_pending); 1091 1092 buffer = rx_msg_p->buffer; 1093 size = rx_msg_p->block_size; 1094 1095 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: " 1096 "will free: rx_msg_p = $%p (block pending %d)", 1097 rx_msg_p, hxge_mblks_pending)); 1098 1099 if (!rx_msg_p->use_buf_pool) { 1100 KMEM_FREE(buffer, size); 1101 } 1102 1103 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1104 /* Decrement the receive buffer ring's reference count, too. */ 1105 atomic_dec_32(&ring->rbr_ref_cnt); 1106 1107 /* 1108 * Free the receive buffer ring, iff 1109 * 1. all the receive buffers have been freed 1110 * 2. and we are in the proper state (that is, 1111 * we are not UNMAPPING). 1112 */ 1113 if (ring->rbr_ref_cnt == 0 && ring->rbr_state == RBR_UNMAPPED) { 1114 KMEM_FREE(ring, sizeof (*ring)); 1115 } 1116 goto hxge_freeb_exit; 1117 } 1118 1119 /* 1120 * Repost buffer. 1121 */ 1122 if (free_state && (ref_cnt == 1)) { 1123 HXGE_DEBUG_MSG((NULL, RX_CTL, 1124 "hxge_freeb: post page $%p:", rx_msg_p)); 1125 if (ring->rbr_state == RBR_POSTING) 1126 hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p); 1127 } 1128 1129 hxge_freeb_exit: 1130 MUTEX_EXIT(&ring->post_lock); 1131 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb")); 1132 } 1133 1134 uint_t 1135 hxge_rx_intr(caddr_t arg1, caddr_t arg2) 1136 { 1137 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 1138 p_hxge_t hxgep = (p_hxge_t)arg2; 1139 p_hxge_ldg_t ldgp; 1140 uint8_t channel; 1141 hpi_handle_t handle; 1142 rdc_stat_t cs; 1143 uint_t serviced = DDI_INTR_UNCLAIMED; 1144 1145 if (ldvp == NULL) { 1146 HXGE_DEBUG_MSG((NULL, RX_INT_CTL, 1147 "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1148 return (DDI_INTR_UNCLAIMED); 1149 } 1150 1151 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 1152 hxgep = ldvp->hxgep; 1153 } 1154 1155 /* 1156 * If the interface is not started, just swallow the interrupt 1157 * for the logical device and don't rearm it. 1158 */ 1159 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) 1160 return (DDI_INTR_CLAIMED); 1161 1162 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1163 "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1164 1165 /* 1166 * This interrupt handler is for a specific receive dma channel. 1167 */ 1168 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1169 1170 /* 1171 * Get the control and status for this channel. 1172 */ 1173 channel = ldvp->channel; 1174 ldgp = ldvp->ldgp; 1175 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value); 1176 1177 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d " 1178 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1179 channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres)); 1180 1181 hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs); 1182 serviced = DDI_INTR_CLAIMED; 1183 1184 /* error events. */ 1185 if (cs.value & RDC_STAT_ERROR) { 1186 (void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 1187 } 1188 1189 hxge_intr_exit: 1190 /* 1191 * Enable the mailbox update interrupt if we want to use mailbox. We 1192 * probably don't need to use mailbox as it only saves us one pio read. 1193 * Also write 1 to rcrthres and rcrto to clear these two edge triggered 1194 * bits. 1195 */ 1196 cs.value &= RDC_STAT_WR1C; 1197 cs.bits.mex = 1; 1198 cs.bits.ptrread = 0; 1199 cs.bits.pktread = 0; 1200 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1201 1202 /* 1203 * Rearm this logical group if this is a single device group. 1204 */ 1205 if (ldgp->nldvs == 1) { 1206 ld_intr_mgmt_t mgm; 1207 1208 mgm.value = 0; 1209 mgm.bits.arm = 1; 1210 mgm.bits.timer = ldgp->ldg_timer; 1211 HXGE_REG_WR32(handle, 1212 LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value); 1213 } 1214 1215 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1216 "<== hxge_rx_intr: serviced %d", serviced)); 1217 1218 return (serviced); 1219 } 1220 1221 static void 1222 hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1223 rdc_stat_t cs) 1224 { 1225 p_mblk_t mp; 1226 p_rx_rcr_ring_t rcrp; 1227 1228 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring")); 1229 if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1230 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1231 "<== hxge_rx_pkts_vring: no mp")); 1232 return; 1233 } 1234 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp)); 1235 1236 #ifdef HXGE_DEBUG 1237 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1238 "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) " 1239 "LEN %d mp $%p mp->b_next $%p rcrp $%p " 1240 "mac_handle $%p", 1241 (mp->b_wptr - mp->b_rptr), mp, mp->b_next, 1242 rcrp, rcrp->rcr_mac_handle)); 1243 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1244 "==> hxge_rx_pkts_vring: dump packets " 1245 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1246 mp, mp->b_rptr, mp->b_wptr, 1247 hxge_dump_packet((char *)mp->b_rptr, 64))); 1248 1249 if (mp->b_cont) { 1250 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1251 "==> hxge_rx_pkts_vring: dump b_cont packets " 1252 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1253 mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr, 1254 hxge_dump_packet((char *)mp->b_cont->b_rptr, 1255 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1256 } 1257 if (mp->b_next) { 1258 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1259 "==> hxge_rx_pkts_vring: dump next packets " 1260 "(b_rptr $%p): %s", 1261 mp->b_next->b_rptr, 1262 hxge_dump_packet((char *)mp->b_next->b_rptr, 64))); 1263 } 1264 #endif 1265 1266 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1267 "==> hxge_rx_pkts_vring: send packet to stack")); 1268 mac_rx(hxgep->mach, rcrp->rcr_mac_handle, mp); 1269 1270 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring")); 1271 } 1272 1273 /*ARGSUSED*/ 1274 mblk_t * 1275 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1276 p_rx_rcr_ring_t *rcrp, rdc_stat_t cs) 1277 { 1278 hpi_handle_t handle; 1279 uint8_t channel; 1280 p_rx_rcr_rings_t rx_rcr_rings; 1281 p_rx_rcr_ring_t rcr_p; 1282 uint32_t comp_rd_index; 1283 p_rcr_entry_t rcr_desc_rd_head_p; 1284 p_rcr_entry_t rcr_desc_rd_head_pp; 1285 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1286 uint16_t qlen, nrcr_read, npkt_read; 1287 uint32_t qlen_hw, qlen_sw; 1288 uint32_t invalid_rcr_entry; 1289 boolean_t multi; 1290 rdc_rcr_cfg_b_t rcr_cfg_b; 1291 p_rx_mbox_t rx_mboxp; 1292 p_rxdma_mailbox_t mboxp; 1293 uint64_t rcr_head_index, rcr_tail_index; 1294 uint64_t rcr_tail; 1295 uint64_t value; 1296 rdc_rcr_tail_t rcr_tail_reg; 1297 1298 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d " 1299 "channel %d", vindex, ldvp->channel)); 1300 1301 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 1302 return (NULL); 1303 } 1304 1305 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1306 rx_rcr_rings = hxgep->rx_rcr_rings; 1307 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1308 channel = rcr_p->rdc; 1309 if (channel != ldvp->channel) { 1310 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d " 1311 "channel %d, and rcr channel %d not matched.", 1312 vindex, ldvp->channel, channel)); 1313 return (NULL); 1314 } 1315 1316 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1317 "==> hxge_rx_pkts: START: rcr channel %d " 1318 "head_p $%p head_pp $%p index %d ", 1319 channel, rcr_p->rcr_desc_rd_head_p, 1320 rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1321 1322 rx_mboxp = hxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 1323 mboxp = (p_rxdma_mailbox_t)rx_mboxp->rx_mbox.kaddrp; 1324 1325 (void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1326 RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value); 1327 rcr_tail = rcr_tail_reg.bits.tail; 1328 1329 if (!qlen) { 1330 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1331 "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)", 1332 channel, qlen)); 1333 return (NULL); 1334 } 1335 1336 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d " 1337 "qlen %d", channel, qlen)); 1338 1339 comp_rd_index = rcr_p->comp_rd_index; 1340 1341 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1342 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1343 nrcr_read = npkt_read = 0; 1344 1345 /* 1346 * Number of packets queued (The jumbo or multi packet will be counted 1347 * as only one paccket and it may take up more than one completion 1348 * entry). 1349 */ 1350 qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts; 1351 head_mp = NULL; 1352 tail_mp = &head_mp; 1353 nmp = mp_cont = NULL; 1354 multi = B_FALSE; 1355 1356 rcr_head_index = rcr_p->rcr_desc_rd_head_p - rcr_p->rcr_desc_first_p; 1357 rcr_tail_index = rcr_tail - rcr_p->rcr_tail_begin; 1358 1359 if (rcr_tail_index >= rcr_head_index) { 1360 qlen_sw = rcr_tail_index - rcr_head_index; 1361 } else { 1362 /* rcr_tail has wrapped around */ 1363 qlen_sw = (rcr_p->comp_size - rcr_head_index) + rcr_tail_index; 1364 } 1365 1366 if (qlen_hw > qlen_sw) { 1367 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1368 "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n", 1369 channel, qlen_hw, qlen_sw)); 1370 qlen_hw = qlen_sw; 1371 } 1372 1373 while (qlen_hw) { 1374 #ifdef HXGE_DEBUG 1375 hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p); 1376 #endif 1377 /* 1378 * Process one completion ring entry. 1379 */ 1380 invalid_rcr_entry = 0; 1381 hxge_receive_packet(hxgep, 1382 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont, 1383 &invalid_rcr_entry); 1384 if (invalid_rcr_entry != 0) { 1385 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1386 "Channel %d could only read 0x%x packets, " 1387 "but 0x%x pending\n", channel, npkt_read, qlen_hw)); 1388 break; 1389 } 1390 1391 /* 1392 * message chaining modes (nemo msg chaining) 1393 */ 1394 if (nmp) { 1395 nmp->b_next = NULL; 1396 if (!multi && !mp_cont) { /* frame fits a partition */ 1397 *tail_mp = nmp; 1398 tail_mp = &nmp->b_next; 1399 nmp = NULL; 1400 } else if (multi && !mp_cont) { /* first segment */ 1401 *tail_mp = nmp; 1402 tail_mp = &nmp->b_cont; 1403 } else if (multi && mp_cont) { /* mid of multi segs */ 1404 *tail_mp = mp_cont; 1405 tail_mp = &mp_cont->b_cont; 1406 } else if (!multi && mp_cont) { /* last segment */ 1407 *tail_mp = mp_cont; 1408 tail_mp = &nmp->b_next; 1409 nmp = NULL; 1410 } 1411 } 1412 1413 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1414 "==> hxge_rx_pkts: loop: rcr channel %d " 1415 "before updating: multi %d " 1416 "nrcr_read %d " 1417 "npk read %d " 1418 "head_pp $%p index %d ", 1419 channel, multi, 1420 nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index)); 1421 1422 if (!multi) { 1423 qlen_hw--; 1424 npkt_read++; 1425 } 1426 1427 /* 1428 * Update the next read entry. 1429 */ 1430 comp_rd_index = NEXT_ENTRY(comp_rd_index, 1431 rcr_p->comp_wrap_mask); 1432 1433 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 1434 rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p); 1435 1436 nrcr_read++; 1437 1438 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1439 "<== hxge_rx_pkts: (SAM, process one packet) " 1440 "nrcr_read %d", nrcr_read)); 1441 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1442 "==> hxge_rx_pkts: loop: rcr channel %d " 1443 "multi %d nrcr_read %d npk read %d head_pp $%p index %d ", 1444 channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1445 comp_rd_index)); 1446 } 1447 1448 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 1449 rcr_p->comp_rd_index = comp_rd_index; 1450 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 1451 1452 /* Adjust the mailbox queue length for a hardware bug workaround */ 1453 mboxp->rcrstat_a.bits.qlen -= npkt_read; 1454 1455 if ((hxgep->intr_timeout != rcr_p->intr_timeout) || 1456 (hxgep->intr_threshold != rcr_p->intr_threshold)) { 1457 rcr_p->intr_timeout = hxgep->intr_timeout; 1458 rcr_p->intr_threshold = hxgep->intr_threshold; 1459 rcr_cfg_b.value = 0x0ULL; 1460 if (rcr_p->intr_timeout) 1461 rcr_cfg_b.bits.entout = 1; 1462 rcr_cfg_b.bits.timeout = rcr_p->intr_timeout; 1463 rcr_cfg_b.bits.pthres = rcr_p->intr_threshold; 1464 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, 1465 channel, rcr_cfg_b.value); 1466 } 1467 1468 cs.bits.pktread = npkt_read; 1469 cs.bits.ptrread = nrcr_read; 1470 value = cs.value; 1471 cs.value &= 0xffffffffULL; 1472 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1473 1474 cs.value = value & ~0xffffffffULL; 1475 cs.bits.pktread = 0; 1476 cs.bits.ptrread = 0; 1477 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1478 1479 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1480 "==> hxge_rx_pkts: EXIT: rcr channel %d " 1481 "head_pp $%p index %016llx ", 1482 channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1483 1484 /* 1485 * Update RCR buffer pointer read and number of packets read. 1486 */ 1487 1488 *rcrp = rcr_p; 1489 1490 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts")); 1491 1492 return (head_mp); 1493 } 1494 1495 #define RCR_ENTRY_PATTERN 0x5a5a6b6b7c7c8d8dULL 1496 1497 /*ARGSUSED*/ 1498 void 1499 hxge_receive_packet(p_hxge_t hxgep, 1500 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 1501 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont, 1502 uint32_t *invalid_rcr_entry) 1503 { 1504 p_mblk_t nmp = NULL; 1505 uint64_t multi; 1506 uint8_t channel; 1507 1508 boolean_t first_entry = B_TRUE; 1509 boolean_t is_tcp_udp = B_FALSE; 1510 boolean_t buffer_free = B_FALSE; 1511 boolean_t error_send_up = B_FALSE; 1512 uint8_t error_type; 1513 uint16_t l2_len; 1514 uint16_t skip_len; 1515 uint8_t pktbufsz_type; 1516 uint64_t rcr_entry; 1517 uint64_t *pkt_buf_addr_pp; 1518 uint64_t *pkt_buf_addr_p; 1519 uint32_t buf_offset; 1520 uint32_t bsize; 1521 uint32_t msg_index; 1522 p_rx_rbr_ring_t rx_rbr_p; 1523 p_rx_msg_t *rx_msg_ring_p; 1524 p_rx_msg_t rx_msg_p; 1525 1526 uint16_t sw_offset_bytes = 0, hdr_size = 0; 1527 hxge_status_t status = HXGE_OK; 1528 boolean_t is_valid = B_FALSE; 1529 p_hxge_rx_ring_stats_t rdc_stats; 1530 uint32_t bytes_read; 1531 1532 uint64_t pkt_type; 1533 1534 channel = rcr_p->rdc; 1535 1536 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet")); 1537 1538 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 1539 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 1540 1541 /* Verify the content of the rcr_entry for a hardware bug workaround */ 1542 if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) { 1543 *invalid_rcr_entry = 1; 1544 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet " 1545 "Channel %d invalid RCR entry 0x%llx found, returning\n", 1546 channel, (long long) rcr_entry)); 1547 return; 1548 } 1549 *((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN; 1550 1551 multi = (rcr_entry & RCR_MULTI_MASK); 1552 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 1553 1554 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 1555 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 1556 1557 /* 1558 * Hardware does not strip the CRC due bug ID 11451 where 1559 * the hardware mis handles minimum size packets. 1560 */ 1561 l2_len -= ETHERFCSL; 1562 1563 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 1564 RCR_PKTBUFSZ_SHIFT); 1565 #if defined(__i386) 1566 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 1567 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 1568 #else 1569 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 1570 RCR_PKT_BUF_ADDR_SHIFT); 1571 #endif 1572 1573 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1574 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1575 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1576 "error_type 0x%x pkt_type 0x%x " 1577 "pktbufsz_type %d ", 1578 rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len, 1579 multi, error_type, pkt_type, pktbufsz_type)); 1580 1581 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1582 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1583 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1584 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 1585 rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type, pkt_type)); 1586 1587 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1588 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1589 "full pkt_buf_addr_pp $%p l2_len %d", 1590 rcr_entry, pkt_buf_addr_pp, l2_len)); 1591 1592 /* get the stats ptr */ 1593 rdc_stats = rcr_p->rdc_stats; 1594 1595 if (!l2_len) { 1596 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1597 "<== hxge_receive_packet: failed: l2 length is 0.")); 1598 return; 1599 } 1600 1601 /* shift 6 bits to get the full io address */ 1602 #if defined(__i386) 1603 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 1604 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1605 #else 1606 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 1607 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1608 #endif 1609 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1610 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1611 "full pkt_buf_addr_pp $%p l2_len %d", 1612 rcr_entry, pkt_buf_addr_pp, l2_len)); 1613 1614 rx_rbr_p = rcr_p->rx_rbr_p; 1615 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 1616 1617 if (first_entry) { 1618 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 1619 RXDMA_HDR_SIZE_DEFAULT); 1620 1621 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1622 "==> hxge_receive_packet: first entry 0x%016llx " 1623 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 1624 rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size)); 1625 } 1626 1627 MUTEX_ENTER(&rcr_p->lock); 1628 MUTEX_ENTER(&rx_rbr_p->lock); 1629 1630 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1631 "==> (rbr 1) hxge_receive_packet: entry 0x%0llx " 1632 "full pkt_buf_addr_pp $%p l2_len %d", 1633 rcr_entry, pkt_buf_addr_pp, l2_len)); 1634 1635 /* 1636 * Packet buffer address in the completion entry points to the starting 1637 * buffer address (offset 0). Use the starting buffer address to locate 1638 * the corresponding kernel address. 1639 */ 1640 status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p, 1641 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 1642 &buf_offset, &msg_index); 1643 1644 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1645 "==> (rbr 2) hxge_receive_packet: entry 0x%0llx " 1646 "full pkt_buf_addr_pp $%p l2_len %d", 1647 rcr_entry, pkt_buf_addr_pp, l2_len)); 1648 1649 if (status != HXGE_OK) { 1650 MUTEX_EXIT(&rx_rbr_p->lock); 1651 MUTEX_EXIT(&rcr_p->lock); 1652 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1653 "<== hxge_receive_packet: found vaddr failed %d", status)); 1654 return; 1655 } 1656 1657 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1658 "==> (rbr 3) hxge_receive_packet: entry 0x%0llx " 1659 "full pkt_buf_addr_pp $%p l2_len %d", 1660 rcr_entry, pkt_buf_addr_pp, l2_len)); 1661 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1662 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1663 "full pkt_buf_addr_pp $%p l2_len %d", 1664 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1665 1666 if (msg_index >= rx_rbr_p->tnblocks) { 1667 MUTEX_EXIT(&rx_rbr_p->lock); 1668 MUTEX_EXIT(&rcr_p->lock); 1669 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1670 "==> hxge_receive_packet: FATAL msg_index (%d) " 1671 "should be smaller than tnblocks (%d)\n", 1672 msg_index, rx_rbr_p->tnblocks)); 1673 return; 1674 } 1675 1676 rx_msg_p = rx_msg_ring_p[msg_index]; 1677 1678 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1679 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1680 "full pkt_buf_addr_pp $%p l2_len %d", 1681 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1682 1683 switch (pktbufsz_type) { 1684 case RCR_PKTBUFSZ_0: 1685 bsize = rx_rbr_p->pkt_buf_size0_bytes; 1686 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1687 "==> hxge_receive_packet: 0 buf %d", bsize)); 1688 break; 1689 case RCR_PKTBUFSZ_1: 1690 bsize = rx_rbr_p->pkt_buf_size1_bytes; 1691 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1692 "==> hxge_receive_packet: 1 buf %d", bsize)); 1693 break; 1694 case RCR_PKTBUFSZ_2: 1695 bsize = rx_rbr_p->pkt_buf_size2_bytes; 1696 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1697 "==> hxge_receive_packet: 2 buf %d", bsize)); 1698 break; 1699 case RCR_SINGLE_BLOCK: 1700 bsize = rx_msg_p->block_size; 1701 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1702 "==> hxge_receive_packet: single %d", bsize)); 1703 1704 break; 1705 default: 1706 MUTEX_EXIT(&rx_rbr_p->lock); 1707 MUTEX_EXIT(&rcr_p->lock); 1708 return; 1709 } 1710 1711 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 1712 (buf_offset + sw_offset_bytes), (hdr_size + l2_len), 1713 DDI_DMA_SYNC_FORCPU); 1714 1715 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1716 "==> hxge_receive_packet: after first dump:usage count")); 1717 1718 if (rx_msg_p->cur_usage_cnt == 0) { 1719 if (rx_rbr_p->rbr_use_bcopy) { 1720 atomic_inc_32(&rx_rbr_p->rbr_consumed); 1721 if (rx_rbr_p->rbr_consumed < 1722 rx_rbr_p->rbr_threshold_hi) { 1723 if (rx_rbr_p->rbr_threshold_lo == 0 || 1724 ((rx_rbr_p->rbr_consumed >= 1725 rx_rbr_p->rbr_threshold_lo) && 1726 (rx_rbr_p->rbr_bufsize_type >= 1727 pktbufsz_type))) { 1728 rx_msg_p->rx_use_bcopy = B_TRUE; 1729 } 1730 } else { 1731 rx_msg_p->rx_use_bcopy = B_TRUE; 1732 } 1733 } 1734 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1735 "==> hxge_receive_packet: buf %d (new block) ", bsize)); 1736 1737 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 1738 rx_msg_p->pkt_buf_size = bsize; 1739 rx_msg_p->cur_usage_cnt = 1; 1740 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 1741 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1742 "==> hxge_receive_packet: buf %d (single block) ", 1743 bsize)); 1744 /* 1745 * Buffer can be reused once the free function is 1746 * called. 1747 */ 1748 rx_msg_p->max_usage_cnt = 1; 1749 buffer_free = B_TRUE; 1750 } else { 1751 rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize; 1752 if (rx_msg_p->max_usage_cnt == 1) { 1753 buffer_free = B_TRUE; 1754 } 1755 } 1756 } else { 1757 rx_msg_p->cur_usage_cnt++; 1758 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 1759 buffer_free = B_TRUE; 1760 } 1761 } 1762 1763 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1764 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 1765 msg_index, l2_len, 1766 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 1767 1768 if (error_type) { 1769 rdc_stats->ierrors++; 1770 /* Update error stats */ 1771 rdc_stats->errlog.compl_err_type = error_type; 1772 HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR); 1773 1774 if (error_type & RCR_CTRL_FIFO_DED) { 1775 rdc_stats->ctrl_fifo_ecc_err++; 1776 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1777 " hxge_receive_packet: " 1778 " channel %d RCR ctrl_fifo_ded error", channel)); 1779 } else if (error_type & RCR_DATA_FIFO_DED) { 1780 rdc_stats->data_fifo_ecc_err++; 1781 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1782 " hxge_receive_packet: channel %d" 1783 " RCR data_fifo_ded error", channel)); 1784 } 1785 1786 /* 1787 * Update and repost buffer block if max usage count is 1788 * reached. 1789 */ 1790 if (error_send_up == B_FALSE) { 1791 atomic_inc_32(&rx_msg_p->ref_cnt); 1792 if (buffer_free == B_TRUE) { 1793 rx_msg_p->free = B_TRUE; 1794 } 1795 1796 MUTEX_EXIT(&rx_rbr_p->lock); 1797 MUTEX_EXIT(&rcr_p->lock); 1798 hxge_freeb(rx_msg_p); 1799 return; 1800 } 1801 } 1802 1803 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1804 "==> hxge_receive_packet: DMA sync second ")); 1805 1806 bytes_read = rcr_p->rcvd_pkt_bytes; 1807 skip_len = sw_offset_bytes + hdr_size; 1808 if (!rx_msg_p->rx_use_bcopy) { 1809 /* 1810 * For loaned up buffers, the driver reference count 1811 * will be incremented first and then the free state. 1812 */ 1813 if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 1814 if (first_entry) { 1815 nmp->b_rptr = &nmp->b_rptr[skip_len]; 1816 if (l2_len < bsize - skip_len) { 1817 nmp->b_wptr = &nmp->b_rptr[l2_len]; 1818 } else { 1819 nmp->b_wptr = &nmp->b_rptr[bsize 1820 - skip_len]; 1821 } 1822 } else { 1823 if (l2_len - bytes_read < bsize) { 1824 nmp->b_wptr = 1825 &nmp->b_rptr[l2_len - bytes_read]; 1826 } else { 1827 nmp->b_wptr = &nmp->b_rptr[bsize]; 1828 } 1829 } 1830 } 1831 } else { 1832 if (first_entry) { 1833 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 1834 l2_len < bsize - skip_len ? 1835 l2_len : bsize - skip_len); 1836 } else { 1837 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset, 1838 l2_len - bytes_read < bsize ? 1839 l2_len - bytes_read : bsize); 1840 } 1841 } 1842 1843 if (nmp != NULL) { 1844 if (first_entry) 1845 bytes_read = nmp->b_wptr - nmp->b_rptr; 1846 else 1847 bytes_read += nmp->b_wptr - nmp->b_rptr; 1848 1849 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1850 "==> hxge_receive_packet after dupb: " 1851 "rbr consumed %d " 1852 "pktbufsz_type %d " 1853 "nmp $%p rptr $%p wptr $%p " 1854 "buf_offset %d bzise %d l2_len %d skip_len %d", 1855 rx_rbr_p->rbr_consumed, 1856 pktbufsz_type, 1857 nmp, nmp->b_rptr, nmp->b_wptr, 1858 buf_offset, bsize, l2_len, skip_len)); 1859 } else { 1860 cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)"); 1861 1862 atomic_inc_32(&rx_msg_p->ref_cnt); 1863 if (buffer_free == B_TRUE) { 1864 rx_msg_p->free = B_TRUE; 1865 } 1866 1867 MUTEX_EXIT(&rx_rbr_p->lock); 1868 MUTEX_EXIT(&rcr_p->lock); 1869 hxge_freeb(rx_msg_p); 1870 return; 1871 } 1872 1873 if (buffer_free == B_TRUE) { 1874 rx_msg_p->free = B_TRUE; 1875 } 1876 1877 /* 1878 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a 1879 * packet is not fragmented and no error bit is set, then L4 checksum 1880 * is OK. 1881 */ 1882 is_valid = (nmp != NULL); 1883 if (first_entry) { 1884 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 1885 if (l2_len > (STD_FRAME_SIZE - ETHERFCSL)) 1886 rdc_stats->jumbo_pkts++; 1887 rdc_stats->ibytes += skip_len + l2_len < bsize ? 1888 l2_len : bsize; 1889 } else { 1890 /* 1891 * Add the current portion of the packet to the kstats. 1892 * The current portion of the packet is calculated by using 1893 * length of the packet and the previously received portion. 1894 */ 1895 rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ? 1896 l2_len - rcr_p->rcvd_pkt_bytes : bsize; 1897 } 1898 1899 rcr_p->rcvd_pkt_bytes = bytes_read; 1900 1901 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 1902 atomic_inc_32(&rx_msg_p->ref_cnt); 1903 MUTEX_EXIT(&rx_rbr_p->lock); 1904 MUTEX_EXIT(&rcr_p->lock); 1905 hxge_freeb(rx_msg_p); 1906 } else { 1907 MUTEX_EXIT(&rx_rbr_p->lock); 1908 MUTEX_EXIT(&rcr_p->lock); 1909 } 1910 1911 if (is_valid) { 1912 nmp->b_cont = NULL; 1913 if (first_entry) { 1914 *mp = nmp; 1915 *mp_cont = NULL; 1916 } else { 1917 *mp_cont = nmp; 1918 } 1919 } 1920 1921 /* 1922 * Update stats and hardware checksuming. 1923 */ 1924 if (is_valid && !multi) { 1925 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 1926 pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE); 1927 1928 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_receive_packet: " 1929 "is_valid 0x%x multi %d pkt %d d error %d", 1930 is_valid, multi, is_tcp_udp, error_type)); 1931 1932 if (is_tcp_udp && !error_type) { 1933 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 1934 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 1935 1936 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1937 "==> hxge_receive_packet: Full tcp/udp cksum " 1938 "is_valid 0x%x multi %d pkt %d " 1939 "error %d", 1940 is_valid, multi, is_tcp_udp, error_type)); 1941 } 1942 } 1943 1944 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1945 "==> hxge_receive_packet: *mp 0x%016llx", *mp)); 1946 1947 *multi_p = (multi == RCR_MULTI_MASK); 1948 1949 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: " 1950 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 1951 *multi_p, nmp, *mp, *mp_cont)); 1952 } 1953 1954 /*ARGSUSED*/ 1955 static hxge_status_t 1956 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 1957 rdc_stat_t cs) 1958 { 1959 p_hxge_rx_ring_stats_t rdc_stats; 1960 hpi_handle_t handle; 1961 boolean_t rxchan_fatal = B_FALSE; 1962 uint8_t channel; 1963 hxge_status_t status = HXGE_OK; 1964 uint64_t cs_val; 1965 1966 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts")); 1967 1968 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1969 channel = ldvp->channel; 1970 1971 /* Clear the interrupts */ 1972 cs.bits.pktread = 0; 1973 cs.bits.ptrread = 0; 1974 cs_val = cs.value & RDC_STAT_WR1C; 1975 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val); 1976 1977 rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index]; 1978 1979 if (cs.bits.rbr_cpl_to) { 1980 rdc_stats->rbr_tmout++; 1981 HXGE_FM_REPORT_ERROR(hxgep, channel, 1982 HXGE_FM_EREPORT_RDMC_RBR_CPL_TO); 1983 rxchan_fatal = B_TRUE; 1984 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1985 "==> hxge_rx_err_evnts(channel %d): " 1986 "fatal error: rx_rbr_timeout", channel)); 1987 } 1988 1989 if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) { 1990 (void) hpi_rxdma_ring_perr_stat_get(handle, 1991 &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par); 1992 } 1993 1994 if (cs.bits.rcr_shadow_par_err) { 1995 rdc_stats->rcr_sha_par++; 1996 HXGE_FM_REPORT_ERROR(hxgep, channel, 1997 HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 1998 rxchan_fatal = B_TRUE; 1999 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2000 "==> hxge_rx_err_evnts(channel %d): " 2001 "fatal error: rcr_shadow_par_err", channel)); 2002 } 2003 2004 if (cs.bits.rbr_prefetch_par_err) { 2005 rdc_stats->rbr_pre_par++; 2006 HXGE_FM_REPORT_ERROR(hxgep, channel, 2007 HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2008 rxchan_fatal = B_TRUE; 2009 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2010 "==> hxge_rx_err_evnts(channel %d): " 2011 "fatal error: rbr_prefetch_par_err", channel)); 2012 } 2013 2014 if (cs.bits.rbr_pre_empty) { 2015 rdc_stats->rbr_pre_empty++; 2016 HXGE_FM_REPORT_ERROR(hxgep, channel, 2017 HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY); 2018 rxchan_fatal = B_TRUE; 2019 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2020 "==> hxge_rx_err_evnts(channel %d): " 2021 "fatal error: rbr_pre_empty", channel)); 2022 } 2023 2024 if (cs.bits.peu_resp_err) { 2025 rdc_stats->peu_resp_err++; 2026 HXGE_FM_REPORT_ERROR(hxgep, channel, 2027 HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR); 2028 rxchan_fatal = B_TRUE; 2029 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2030 "==> hxge_rx_err_evnts(channel %d): " 2031 "fatal error: peu_resp_err", channel)); 2032 } 2033 2034 if (cs.bits.rcr_thres) { 2035 rdc_stats->rcr_thres++; 2036 } 2037 2038 if (cs.bits.rcr_to) { 2039 rdc_stats->rcr_to++; 2040 } 2041 2042 if (cs.bits.rcr_shadow_full) { 2043 rdc_stats->rcr_shadow_full++; 2044 HXGE_FM_REPORT_ERROR(hxgep, channel, 2045 HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL); 2046 rxchan_fatal = B_TRUE; 2047 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2048 "==> hxge_rx_err_evnts(channel %d): " 2049 "fatal error: rcr_shadow_full", channel)); 2050 } 2051 2052 if (cs.bits.rcr_full) { 2053 rdc_stats->rcrfull++; 2054 HXGE_FM_REPORT_ERROR(hxgep, channel, 2055 HXGE_FM_EREPORT_RDMC_RCRFULL); 2056 rxchan_fatal = B_TRUE; 2057 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2058 "==> hxge_rx_err_evnts(channel %d): " 2059 "fatal error: rcrfull error", channel)); 2060 } 2061 2062 if (cs.bits.rbr_empty) { 2063 rdc_stats->rbr_empty++; 2064 if (rdc_stats->rbr_empty == 1) 2065 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2066 "==> hxge_rx_err_evnts(channel %d): " 2067 "rbr empty error", channel)); 2068 /* 2069 * DMA channel is disabled due to rbr_empty bit is set 2070 * although it is not fatal. Enable the DMA channel here 2071 * to work-around the hardware bug. 2072 */ 2073 (void) hpi_rxdma_cfg_rdc_enable(handle, channel); 2074 } 2075 2076 if (cs.bits.rbr_full) { 2077 rdc_stats->rbrfull++; 2078 HXGE_FM_REPORT_ERROR(hxgep, channel, 2079 HXGE_FM_EREPORT_RDMC_RBRFULL); 2080 rxchan_fatal = B_TRUE; 2081 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2082 "==> hxge_rx_err_evnts(channel %d): " 2083 "fatal error: rbr_full error", channel)); 2084 } 2085 2086 if (rxchan_fatal) { 2087 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2088 " hxge_rx_err_evnts: fatal error on Channel #%d\n", 2089 channel)); 2090 status = hxge_rxdma_fatal_err_recover(hxgep, channel); 2091 if (status == HXGE_OK) { 2092 FM_SERVICE_RESTORED(hxgep); 2093 } 2094 } 2095 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts")); 2096 2097 return (status); 2098 } 2099 2100 static hxge_status_t 2101 hxge_map_rxdma(p_hxge_t hxgep) 2102 { 2103 int i, ndmas; 2104 uint16_t channel; 2105 p_rx_rbr_rings_t rx_rbr_rings; 2106 p_rx_rbr_ring_t *rbr_rings; 2107 p_rx_rcr_rings_t rx_rcr_rings; 2108 p_rx_rcr_ring_t *rcr_rings; 2109 p_rx_mbox_areas_t rx_mbox_areas_p; 2110 p_rx_mbox_t *rx_mbox_p; 2111 p_hxge_dma_pool_t dma_buf_poolp; 2112 p_hxge_dma_pool_t dma_cntl_poolp; 2113 p_hxge_dma_common_t *dma_buf_p; 2114 p_hxge_dma_common_t *dma_cntl_p; 2115 uint32_t *num_chunks; 2116 hxge_status_t status = HXGE_OK; 2117 2118 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma")); 2119 2120 dma_buf_poolp = hxgep->rx_buf_pool_p; 2121 dma_cntl_poolp = hxgep->rx_cntl_pool_p; 2122 2123 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2124 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2125 "<== hxge_map_rxdma: buf not allocated")); 2126 return (HXGE_ERROR); 2127 } 2128 2129 ndmas = dma_buf_poolp->ndmas; 2130 if (!ndmas) { 2131 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2132 "<== hxge_map_rxdma: no dma allocated")); 2133 return (HXGE_ERROR); 2134 } 2135 2136 num_chunks = dma_buf_poolp->num_chunks; 2137 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2138 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2139 rx_rbr_rings = (p_rx_rbr_rings_t) 2140 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2141 rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC( 2142 sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2143 2144 rx_rcr_rings = (p_rx_rcr_rings_t) 2145 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2146 rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC( 2147 sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2148 2149 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2150 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2151 rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC( 2152 sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2153 2154 /* 2155 * Timeout should be set based on the system clock divider. 2156 * The following timeout value of 1 assumes that the 2157 * granularity (1000) is 3 microseconds running at 300MHz. 2158 */ 2159 2160 hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2161 hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2162 2163 /* 2164 * Map descriptors from the buffer polls for each dam channel. 2165 */ 2166 for (i = 0; i < ndmas; i++) { 2167 /* 2168 * Set up and prepare buffer blocks, descriptors and mailbox. 2169 */ 2170 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2171 status = hxge_map_rxdma_channel(hxgep, channel, 2172 (p_hxge_dma_common_t *)&dma_buf_p[i], 2173 (p_rx_rbr_ring_t *)&rbr_rings[i], 2174 num_chunks[i], (p_hxge_dma_common_t *)&dma_cntl_p[i], 2175 (p_rx_rcr_ring_t *)&rcr_rings[i], 2176 (p_rx_mbox_t *)&rx_mbox_p[i]); 2177 if (status != HXGE_OK) { 2178 goto hxge_map_rxdma_fail1; 2179 } 2180 rbr_rings[i]->index = (uint16_t)i; 2181 rcr_rings[i]->index = (uint16_t)i; 2182 rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i]; 2183 } 2184 2185 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2186 rx_rbr_rings->rbr_rings = rbr_rings; 2187 hxgep->rx_rbr_rings = rx_rbr_rings; 2188 rx_rcr_rings->rcr_rings = rcr_rings; 2189 hxgep->rx_rcr_rings = rx_rcr_rings; 2190 2191 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2192 hxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2193 2194 goto hxge_map_rxdma_exit; 2195 2196 hxge_map_rxdma_fail1: 2197 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2198 "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)", 2199 status, channel, i)); 2200 i--; 2201 for (; i >= 0; i--) { 2202 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2203 hxge_unmap_rxdma_channel(hxgep, channel, 2204 rbr_rings[i], rcr_rings[i], rx_mbox_p[i]); 2205 } 2206 2207 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2208 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2209 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2210 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2211 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2212 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2213 2214 hxge_map_rxdma_exit: 2215 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2216 "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 2217 2218 return (status); 2219 } 2220 2221 static void 2222 hxge_unmap_rxdma(p_hxge_t hxgep) 2223 { 2224 int i, ndmas; 2225 uint16_t channel; 2226 p_rx_rbr_rings_t rx_rbr_rings; 2227 p_rx_rbr_ring_t *rbr_rings; 2228 p_rx_rcr_rings_t rx_rcr_rings; 2229 p_rx_rcr_ring_t *rcr_rings; 2230 p_rx_mbox_areas_t rx_mbox_areas_p; 2231 p_rx_mbox_t *rx_mbox_p; 2232 p_hxge_dma_pool_t dma_buf_poolp; 2233 p_hxge_dma_pool_t dma_cntl_poolp; 2234 p_hxge_dma_common_t *dma_buf_p; 2235 2236 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma")); 2237 2238 dma_buf_poolp = hxgep->rx_buf_pool_p; 2239 dma_cntl_poolp = hxgep->rx_cntl_pool_p; 2240 2241 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2242 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2243 "<== hxge_unmap_rxdma: NULL buf pointers")); 2244 return; 2245 } 2246 2247 rx_rbr_rings = hxgep->rx_rbr_rings; 2248 rx_rcr_rings = hxgep->rx_rcr_rings; 2249 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2250 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2251 "<== hxge_unmap_rxdma: NULL ring pointers")); 2252 return; 2253 } 2254 2255 ndmas = rx_rbr_rings->ndmas; 2256 if (!ndmas) { 2257 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2258 "<== hxge_unmap_rxdma: no channel")); 2259 return; 2260 } 2261 2262 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2263 "==> hxge_unmap_rxdma (ndmas %d)", ndmas)); 2264 2265 rbr_rings = rx_rbr_rings->rbr_rings; 2266 rcr_rings = rx_rcr_rings->rcr_rings; 2267 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 2268 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 2269 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2270 2271 for (i = 0; i < ndmas; i++) { 2272 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2273 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2274 "==> hxge_unmap_rxdma (ndmas %d) channel %d", 2275 ndmas, channel)); 2276 (void) hxge_unmap_rxdma_channel(hxgep, channel, 2277 (p_rx_rbr_ring_t)rbr_rings[i], 2278 (p_rx_rcr_ring_t)rcr_rings[i], 2279 (p_rx_mbox_t)rx_mbox_p[i]); 2280 } 2281 2282 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2283 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2284 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2285 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2286 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2287 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2288 2289 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma")); 2290 } 2291 2292 hxge_status_t 2293 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2294 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 2295 uint32_t num_chunks, p_hxge_dma_common_t *dma_cntl_p, 2296 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2297 { 2298 int status = HXGE_OK; 2299 2300 /* 2301 * Set up and prepare buffer blocks, descriptors and mailbox. 2302 */ 2303 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2304 "==> hxge_map_rxdma_channel (channel %d)", channel)); 2305 2306 /* 2307 * Receive buffer blocks 2308 */ 2309 status = hxge_map_rxdma_channel_buf_ring(hxgep, channel, 2310 dma_buf_p, rbr_p, num_chunks); 2311 if (status != HXGE_OK) { 2312 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2313 "==> hxge_map_rxdma_channel (channel %d): " 2314 "map buffer failed 0x%x", channel, status)); 2315 goto hxge_map_rxdma_channel_exit; 2316 } 2317 2318 /* 2319 * Receive block ring, completion ring and mailbox. 2320 */ 2321 status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel, 2322 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 2323 if (status != HXGE_OK) { 2324 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2325 "==> hxge_map_rxdma_channel (channel %d): " 2326 "map config failed 0x%x", channel, status)); 2327 goto hxge_map_rxdma_channel_fail2; 2328 } 2329 goto hxge_map_rxdma_channel_exit; 2330 2331 hxge_map_rxdma_channel_fail3: 2332 /* Free rbr, rcr */ 2333 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2334 "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)", 2335 status, channel)); 2336 hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p); 2337 2338 hxge_map_rxdma_channel_fail2: 2339 /* Free buffer blocks */ 2340 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2341 "==> hxge_map_rxdma_channel: free rx buffers" 2342 "(hxgep 0x%x status 0x%x channel %d)", 2343 hxgep, status, channel)); 2344 hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p); 2345 2346 status = HXGE_ERROR; 2347 2348 hxge_map_rxdma_channel_exit: 2349 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2350 "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)", 2351 hxgep, status, channel)); 2352 2353 return (status); 2354 } 2355 2356 /*ARGSUSED*/ 2357 static void 2358 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2359 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2360 { 2361 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2362 "==> hxge_unmap_rxdma_channel (channel %d)", channel)); 2363 2364 /* 2365 * unmap receive block ring, completion ring and mailbox. 2366 */ 2367 (void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p); 2368 2369 /* unmap buffer blocks */ 2370 (void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p); 2371 2372 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel")); 2373 } 2374 2375 /*ARGSUSED*/ 2376 static hxge_status_t 2377 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 2378 p_hxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 2379 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2380 { 2381 p_rx_rbr_ring_t rbrp; 2382 p_rx_rcr_ring_t rcrp; 2383 p_rx_mbox_t mboxp; 2384 p_hxge_dma_common_t cntl_dmap; 2385 p_hxge_dma_common_t dmap; 2386 p_rx_msg_t *rx_msg_ring; 2387 p_rx_msg_t rx_msg_p; 2388 rdc_rbr_cfg_a_t *rcfga_p; 2389 rdc_rbr_cfg_b_t *rcfgb_p; 2390 rdc_rcr_cfg_a_t *cfga_p; 2391 rdc_rcr_cfg_b_t *cfgb_p; 2392 rdc_rx_cfg1_t *cfig1_p; 2393 rdc_rx_cfg2_t *cfig2_p; 2394 rdc_rbr_kick_t *kick_p; 2395 uint32_t dmaaddrp; 2396 uint32_t *rbr_vaddrp; 2397 uint32_t bkaddr; 2398 hxge_status_t status = HXGE_OK; 2399 int i; 2400 uint32_t hxge_port_rcr_size; 2401 2402 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2403 "==> hxge_map_rxdma_channel_cfg_ring")); 2404 2405 cntl_dmap = *dma_cntl_p; 2406 2407 /* Map in the receive block ring */ 2408 rbrp = *rbr_p; 2409 dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc; 2410 hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 2411 2412 /* 2413 * Zero out buffer block ring descriptors. 2414 */ 2415 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2416 2417 rcfga_p = &(rbrp->rbr_cfga); 2418 rcfgb_p = &(rbrp->rbr_cfgb); 2419 kick_p = &(rbrp->rbr_kick); 2420 rcfga_p->value = 0; 2421 rcfgb_p->value = 0; 2422 kick_p->value = 0; 2423 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 2424 rcfga_p->value = (rbrp->rbr_addr & 2425 (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK)); 2426 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 2427 2428 /* XXXX: how to choose packet buffer sizes */ 2429 rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0; 2430 rcfgb_p->bits.vld0 = 1; 2431 rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1; 2432 rcfgb_p->bits.vld1 = 1; 2433 rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2; 2434 rcfgb_p->bits.vld2 = 1; 2435 rcfgb_p->bits.bksize = hxgep->rx_bksize_code; 2436 2437 /* 2438 * For each buffer block, enter receive block address to the ring. 2439 */ 2440 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 2441 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 2442 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2443 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2444 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 2445 2446 rx_msg_ring = rbrp->rx_msg_ring; 2447 for (i = 0; i < rbrp->tnblocks; i++) { 2448 rx_msg_p = rx_msg_ring[i]; 2449 rx_msg_p->hxgep = hxgep; 2450 rx_msg_p->rx_rbr_p = rbrp; 2451 bkaddr = (uint32_t) 2452 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2453 RBR_BKADDR_SHIFT)); 2454 rx_msg_p->free = B_FALSE; 2455 rx_msg_p->max_usage_cnt = 0xbaddcafe; 2456 2457 *rbr_vaddrp++ = bkaddr; 2458 } 2459 2460 kick_p->bits.bkadd = rbrp->rbb_max; 2461 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 2462 2463 rbrp->rbr_rd_index = 0; 2464 2465 rbrp->rbr_consumed = 0; 2466 rbrp->rbr_use_bcopy = B_TRUE; 2467 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 2468 2469 /* 2470 * Do bcopy on packets greater than bcopy size once the lo threshold is 2471 * reached. This lo threshold should be less than the hi threshold. 2472 * 2473 * Do bcopy on every packet once the hi threshold is reached. 2474 */ 2475 if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) { 2476 /* default it to use hi */ 2477 hxge_rx_threshold_lo = hxge_rx_threshold_hi; 2478 } 2479 if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) { 2480 hxge_rx_buf_size_type = HXGE_RBR_TYPE2; 2481 } 2482 rbrp->rbr_bufsize_type = hxge_rx_buf_size_type; 2483 2484 switch (hxge_rx_threshold_hi) { 2485 default: 2486 case HXGE_RX_COPY_NONE: 2487 /* Do not do bcopy at all */ 2488 rbrp->rbr_use_bcopy = B_FALSE; 2489 rbrp->rbr_threshold_hi = rbrp->rbb_max; 2490 break; 2491 2492 case HXGE_RX_COPY_1: 2493 case HXGE_RX_COPY_2: 2494 case HXGE_RX_COPY_3: 2495 case HXGE_RX_COPY_4: 2496 case HXGE_RX_COPY_5: 2497 case HXGE_RX_COPY_6: 2498 case HXGE_RX_COPY_7: 2499 rbrp->rbr_threshold_hi = 2500 rbrp->rbb_max * (hxge_rx_threshold_hi) / 2501 HXGE_RX_BCOPY_SCALE; 2502 break; 2503 2504 case HXGE_RX_COPY_ALL: 2505 rbrp->rbr_threshold_hi = 0; 2506 break; 2507 } 2508 2509 switch (hxge_rx_threshold_lo) { 2510 default: 2511 case HXGE_RX_COPY_NONE: 2512 /* Do not do bcopy at all */ 2513 if (rbrp->rbr_use_bcopy) { 2514 rbrp->rbr_use_bcopy = B_FALSE; 2515 } 2516 rbrp->rbr_threshold_lo = rbrp->rbb_max; 2517 break; 2518 2519 case HXGE_RX_COPY_1: 2520 case HXGE_RX_COPY_2: 2521 case HXGE_RX_COPY_3: 2522 case HXGE_RX_COPY_4: 2523 case HXGE_RX_COPY_5: 2524 case HXGE_RX_COPY_6: 2525 case HXGE_RX_COPY_7: 2526 rbrp->rbr_threshold_lo = 2527 rbrp->rbb_max * (hxge_rx_threshold_lo) / 2528 HXGE_RX_BCOPY_SCALE; 2529 break; 2530 2531 case HXGE_RX_COPY_ALL: 2532 rbrp->rbr_threshold_lo = 0; 2533 break; 2534 } 2535 2536 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2537 "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d " 2538 "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d " 2539 "rbb_threshold_lo %d", 2540 dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type, 2541 rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo)); 2542 2543 /* Map in the receive completion ring */ 2544 rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 2545 rcrp->rdc = dma_channel; 2546 2547 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 2548 rcrp->comp_size = hxge_port_rcr_size; 2549 rcrp->comp_wrap_mask = hxge_port_rcr_size - 1; 2550 2551 rcrp->max_receive_pkts = hxge_max_rx_pkts; 2552 2553 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 2554 hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 2555 sizeof (rcr_entry_t)); 2556 rcrp->comp_rd_index = 0; 2557 rcrp->comp_wt_index = 0; 2558 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 2559 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 2560 #if defined(__i386) 2561 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2562 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2563 #else 2564 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2565 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2566 #endif 2567 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 2568 (hxge_port_rcr_size - 1); 2569 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 2570 (hxge_port_rcr_size - 1); 2571 2572 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 2573 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 2574 2575 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2576 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2577 "rbr_vaddrp $%p rcr_desc_rd_head_p $%p " 2578 "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p " 2579 "rcr_desc_rd_last_pp $%p ", 2580 dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p, 2581 rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p, 2582 rcrp->rcr_desc_last_pp)); 2583 2584 /* 2585 * Zero out buffer block ring descriptors. 2586 */ 2587 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2588 rcrp->intr_timeout = hxgep->intr_timeout; 2589 rcrp->intr_threshold = hxgep->intr_threshold; 2590 rcrp->full_hdr_flag = B_FALSE; 2591 rcrp->sw_priv_hdr_len = 0; 2592 2593 cfga_p = &(rcrp->rcr_cfga); 2594 cfgb_p = &(rcrp->rcr_cfgb); 2595 cfga_p->value = 0; 2596 cfgb_p->value = 0; 2597 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 2598 2599 cfga_p->value = (rcrp->rcr_addr & 2600 (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK)); 2601 2602 cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF); 2603 2604 /* 2605 * Timeout should be set based on the system clock divider. The 2606 * following timeout value of 1 assumes that the granularity (1000) is 2607 * 3 microseconds running at 300MHz. 2608 */ 2609 cfgb_p->bits.pthres = rcrp->intr_threshold; 2610 cfgb_p->bits.timeout = rcrp->intr_timeout; 2611 cfgb_p->bits.entout = 1; 2612 2613 /* Map in the mailbox */ 2614 mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 2615 dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox; 2616 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 2617 cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1; 2618 cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2; 2619 cfig1_p->value = cfig2_p->value = 0; 2620 2621 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 2622 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2623 "==> hxge_map_rxdma_channel_cfg_ring: " 2624 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 2625 dma_channel, cfig1_p->value, cfig2_p->value, 2626 mboxp->mbox_addr)); 2627 2628 dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff); 2629 cfig1_p->bits.mbaddr_h = dmaaddrp; 2630 2631 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 2632 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 2633 RXDMA_CFIG2_MBADDR_L_MASK); 2634 2635 cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 2636 2637 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2638 "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p " 2639 "cfg1 0x%016llx cfig2 0x%016llx", 2640 dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value)); 2641 2642 cfig2_p->bits.full_hdr = rcrp->full_hdr_flag; 2643 cfig2_p->bits.offset = rcrp->sw_priv_hdr_len; 2644 2645 rbrp->rx_rcr_p = rcrp; 2646 rcrp->rx_rbr_p = rbrp; 2647 *rcr_p = rcrp; 2648 *rx_mbox_p = mboxp; 2649 2650 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2651 "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 2652 return (status); 2653 } 2654 2655 /*ARGSUSED*/ 2656 static void 2657 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 2658 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2659 { 2660 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2661 "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc)); 2662 2663 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 2664 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 2665 2666 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2667 "<== hxge_unmap_rxdma_channel_cfg_ring")); 2668 } 2669 2670 static hxge_status_t 2671 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 2672 p_hxge_dma_common_t *dma_buf_p, 2673 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 2674 { 2675 p_rx_rbr_ring_t rbrp; 2676 p_hxge_dma_common_t dma_bufp, tmp_bufp; 2677 p_rx_msg_t *rx_msg_ring; 2678 p_rx_msg_t rx_msg_p; 2679 p_mblk_t mblk_p; 2680 2681 rxring_info_t *ring_info; 2682 hxge_status_t status = HXGE_OK; 2683 int i, j, index; 2684 uint32_t size, bsize, nblocks, nmsgs; 2685 2686 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2687 "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel)); 2688 2689 dma_bufp = tmp_bufp = *dma_buf_p; 2690 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2691 " hxge_map_rxdma_channel_buf_ring: channel %d to map %d " 2692 "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp)); 2693 2694 nmsgs = 0; 2695 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2696 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2697 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2698 "bufp 0x%016llx nblocks %d nmsgs %d", 2699 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2700 nmsgs += tmp_bufp->nblocks; 2701 } 2702 if (!nmsgs) { 2703 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2704 "<== hxge_map_rxdma_channel_buf_ring: channel %d " 2705 "no msg blocks", channel)); 2706 status = HXGE_ERROR; 2707 goto hxge_map_rxdma_channel_buf_ring_exit; 2708 } 2709 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 2710 2711 size = nmsgs * sizeof (p_rx_msg_t); 2712 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2713 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 2714 KM_SLEEP); 2715 2716 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 2717 (void *) hxgep->interrupt_cookie); 2718 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 2719 (void *) hxgep->interrupt_cookie); 2720 rbrp->rdc = channel; 2721 rbrp->num_blocks = num_chunks; 2722 rbrp->tnblocks = nmsgs; 2723 rbrp->rbb_max = nmsgs; 2724 rbrp->rbr_max_size = nmsgs; 2725 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 2726 2727 rbrp->pages_to_post = 0; 2728 rbrp->pages_to_skip = 20; 2729 rbrp->pages_to_post_threshold = rbrp->rbb_max - rbrp->pages_to_skip / 2; 2730 2731 /* 2732 * Buffer sizes suggested by NIU architect. 256, 512 and 2K. 2733 */ 2734 2735 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 2736 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 2737 rbrp->hpi_pkt_buf_size0 = SIZE_256B; 2738 2739 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 2740 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 2741 rbrp->hpi_pkt_buf_size1 = SIZE_1KB; 2742 2743 rbrp->block_size = hxgep->rx_default_block_size; 2744 2745 if (!hxgep->param_arr[param_accept_jumbo].value) { 2746 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 2747 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 2748 rbrp->hpi_pkt_buf_size2 = SIZE_2KB; 2749 } else { 2750 if (rbrp->block_size >= 0x2000) { 2751 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2752 "<== hxge_map_rxdma_channel_buf_ring: channel %d " 2753 "no msg blocks", channel)); 2754 status = HXGE_ERROR; 2755 goto hxge_map_rxdma_channel_buf_ring_fail1; 2756 } else { 2757 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 2758 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 2759 rbrp->hpi_pkt_buf_size2 = SIZE_4KB; 2760 } 2761 } 2762 2763 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2764 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2765 "actual rbr max %d rbb_max %d nmsgs %d " 2766 "rbrp->block_size %d default_block_size %d " 2767 "(config hxge_rbr_size %d hxge_rbr_spare_size %d)", 2768 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 2769 rbrp->block_size, hxgep->rx_default_block_size, 2770 hxge_rbr_size, hxge_rbr_spare_size)); 2771 2772 /* 2773 * Map in buffers from the buffer pool. 2774 * Note that num_blocks is the num_chunks. For Sparc, there is likely 2775 * only one chunk. For x86, there will be many chunks. 2776 * Loop over chunks. 2777 */ 2778 index = 0; 2779 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 2780 bsize = dma_bufp->block_size; 2781 nblocks = dma_bufp->nblocks; 2782 #if defined(__i386) 2783 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 2784 #else 2785 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 2786 #endif 2787 ring_info->buffer[i].buf_index = i; 2788 ring_info->buffer[i].buf_size = dma_bufp->alength; 2789 ring_info->buffer[i].start_index = index; 2790 #if defined(__i386) 2791 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 2792 #else 2793 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 2794 #endif 2795 2796 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2797 " hxge_map_rxdma_channel_buf_ring: map channel %d " 2798 "chunk %d nblocks %d chunk_size %x block_size 0x%x " 2799 "dma_bufp $%p dvma_addr $%p", channel, i, 2800 dma_bufp->nblocks, 2801 ring_info->buffer[i].buf_size, bsize, dma_bufp, 2802 ring_info->buffer[i].dvma_addr)); 2803 2804 /* loop over blocks within a chunk */ 2805 for (j = 0; j < nblocks; j++) { 2806 if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO, 2807 dma_bufp)) == NULL) { 2808 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2809 "allocb failed (index %d i %d j %d)", 2810 index, i, j)); 2811 goto hxge_map_rxdma_channel_buf_ring_fail1; 2812 } 2813 rx_msg_ring[index] = rx_msg_p; 2814 rx_msg_p->block_index = index; 2815 rx_msg_p->shifted_addr = (uint32_t) 2816 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2817 RBR_BKADDR_SHIFT)); 2818 /* 2819 * Too much output 2820 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2821 * "index %d j %d rx_msg_p $%p mblk %p", 2822 * index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 2823 */ 2824 mblk_p = rx_msg_p->rx_mblk_p; 2825 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 2826 2827 rbrp->rbr_ref_cnt++; 2828 index++; 2829 rx_msg_p->buf_dma.dma_channel = channel; 2830 } 2831 } 2832 if (i < rbrp->num_blocks) { 2833 goto hxge_map_rxdma_channel_buf_ring_fail1; 2834 } 2835 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2836 "hxge_map_rxdma_channel_buf_ring: done buf init " 2837 "channel %d msg block entries %d", channel, index)); 2838 ring_info->block_size_mask = bsize - 1; 2839 rbrp->rx_msg_ring = rx_msg_ring; 2840 rbrp->dma_bufp = dma_buf_p; 2841 rbrp->ring_info = ring_info; 2842 2843 status = hxge_rxbuf_index_info_init(hxgep, rbrp); 2844 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: " 2845 "channel %d done buf info init", channel)); 2846 2847 /* 2848 * Finally, permit hxge_freeb() to call hxge_post_page(). 2849 */ 2850 rbrp->rbr_state = RBR_POSTING; 2851 2852 *rbr_p = rbrp; 2853 2854 goto hxge_map_rxdma_channel_buf_ring_exit; 2855 2856 hxge_map_rxdma_channel_buf_ring_fail1: 2857 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2858 " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 2859 channel, status)); 2860 2861 index--; 2862 for (; index >= 0; index--) { 2863 rx_msg_p = rx_msg_ring[index]; 2864 if (rx_msg_p != NULL) { 2865 hxge_freeb(rx_msg_p); 2866 rx_msg_ring[index] = NULL; 2867 } 2868 } 2869 2870 hxge_map_rxdma_channel_buf_ring_fail: 2871 MUTEX_DESTROY(&rbrp->post_lock); 2872 MUTEX_DESTROY(&rbrp->lock); 2873 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2874 KMEM_FREE(rx_msg_ring, size); 2875 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 2876 2877 status = HXGE_ERROR; 2878 2879 hxge_map_rxdma_channel_buf_ring_exit: 2880 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2881 "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 2882 2883 return (status); 2884 } 2885 2886 /*ARGSUSED*/ 2887 static void 2888 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 2889 p_rx_rbr_ring_t rbr_p) 2890 { 2891 p_rx_msg_t *rx_msg_ring; 2892 p_rx_msg_t rx_msg_p; 2893 rxring_info_t *ring_info; 2894 int i; 2895 uint32_t size; 2896 2897 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2898 "==> hxge_unmap_rxdma_channel_buf_ring")); 2899 if (rbr_p == NULL) { 2900 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2901 "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 2902 return; 2903 } 2904 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2905 "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc)); 2906 2907 rx_msg_ring = rbr_p->rx_msg_ring; 2908 ring_info = rbr_p->ring_info; 2909 2910 if (rx_msg_ring == NULL || ring_info == NULL) { 2911 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2912 "<== hxge_unmap_rxdma_channel_buf_ring: " 2913 "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info)); 2914 return; 2915 } 2916 2917 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 2918 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2919 " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 2920 "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks, 2921 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 2922 2923 for (i = 0; i < rbr_p->tnblocks; i++) { 2924 rx_msg_p = rx_msg_ring[i]; 2925 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2926 " hxge_unmap_rxdma_channel_buf_ring: " 2927 "rx_msg_p $%p", rx_msg_p)); 2928 if (rx_msg_p != NULL) { 2929 hxge_freeb(rx_msg_p); 2930 rx_msg_ring[i] = NULL; 2931 } 2932 } 2933 2934 /* 2935 * We no longer may use the mutex <post_lock>. By setting 2936 * <rbr_state> to anything but POSTING, we prevent 2937 * hxge_post_page() from accessing a dead mutex. 2938 */ 2939 rbr_p->rbr_state = RBR_UNMAPPING; 2940 MUTEX_DESTROY(&rbr_p->post_lock); 2941 2942 MUTEX_DESTROY(&rbr_p->lock); 2943 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2944 KMEM_FREE(rx_msg_ring, size); 2945 2946 if (rbr_p->rbr_ref_cnt == 0) { 2947 /* This is the normal state of affairs. */ 2948 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 2949 } else { 2950 /* 2951 * Some of our buffers are still being used. 2952 * Therefore, tell hxge_freeb() this ring is 2953 * unmapped, so it may free <rbr_p> for us. 2954 */ 2955 rbr_p->rbr_state = RBR_UNMAPPED; 2956 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2957 "unmap_rxdma_buf_ring: %d %s outstanding.", 2958 rbr_p->rbr_ref_cnt, 2959 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 2960 } 2961 2962 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2963 "<== hxge_unmap_rxdma_channel_buf_ring")); 2964 } 2965 2966 static hxge_status_t 2967 hxge_rxdma_hw_start_common(p_hxge_t hxgep) 2968 { 2969 hxge_status_t status = HXGE_OK; 2970 2971 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 2972 2973 /* 2974 * Load the sharable parameters by writing to the function zero control 2975 * registers. These FZC registers should be initialized only once for 2976 * the entire chip. 2977 */ 2978 (void) hxge_init_fzc_rx_common(hxgep); 2979 2980 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 2981 2982 return (status); 2983 } 2984 2985 static hxge_status_t 2986 hxge_rxdma_hw_start(p_hxge_t hxgep) 2987 { 2988 int i, ndmas; 2989 uint16_t channel; 2990 p_rx_rbr_rings_t rx_rbr_rings; 2991 p_rx_rbr_ring_t *rbr_rings; 2992 p_rx_rcr_rings_t rx_rcr_rings; 2993 p_rx_rcr_ring_t *rcr_rings; 2994 p_rx_mbox_areas_t rx_mbox_areas_p; 2995 p_rx_mbox_t *rx_mbox_p; 2996 hxge_status_t status = HXGE_OK; 2997 2998 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start")); 2999 3000 rx_rbr_rings = hxgep->rx_rbr_rings; 3001 rx_rcr_rings = hxgep->rx_rcr_rings; 3002 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3003 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3004 "<== hxge_rxdma_hw_start: NULL ring pointers")); 3005 return (HXGE_ERROR); 3006 } 3007 3008 ndmas = rx_rbr_rings->ndmas; 3009 if (ndmas == 0) { 3010 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3011 "<== hxge_rxdma_hw_start: no dma channel allocated")); 3012 return (HXGE_ERROR); 3013 } 3014 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3015 "==> hxge_rxdma_hw_start (ndmas %d)", ndmas)); 3016 3017 /* 3018 * Scrub the RDC Rx DMA Prefetch Buffer Command. 3019 */ 3020 for (i = 0; i < 128; i++) { 3021 HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i); 3022 } 3023 3024 /* 3025 * Scrub Rx DMA Shadow Tail Command. 3026 */ 3027 for (i = 0; i < 64; i++) { 3028 HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i); 3029 } 3030 3031 /* 3032 * Scrub Rx DMA Control Fifo Command. 3033 */ 3034 for (i = 0; i < 512; i++) { 3035 HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i); 3036 } 3037 3038 /* 3039 * Scrub Rx DMA Data Fifo Command. 3040 */ 3041 for (i = 0; i < 1536; i++) { 3042 HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i); 3043 } 3044 3045 /* 3046 * Reset the FIFO Error Stat. 3047 */ 3048 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF); 3049 3050 /* Set the error mask to receive interrupts */ 3051 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3052 3053 rbr_rings = rx_rbr_rings->rbr_rings; 3054 rcr_rings = rx_rcr_rings->rcr_rings; 3055 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 3056 if (rx_mbox_areas_p) { 3057 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3058 } 3059 3060 for (i = 0; i < ndmas; i++) { 3061 channel = rbr_rings[i]->rdc; 3062 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3063 "==> hxge_rxdma_hw_start (ndmas %d) channel %d", 3064 ndmas, channel)); 3065 status = hxge_rxdma_start_channel(hxgep, channel, 3066 (p_rx_rbr_ring_t)rbr_rings[i], 3067 (p_rx_rcr_ring_t)rcr_rings[i], 3068 (p_rx_mbox_t)rx_mbox_p[i]); 3069 if (status != HXGE_OK) { 3070 goto hxge_rxdma_hw_start_fail1; 3071 } 3072 } 3073 3074 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: " 3075 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3076 rx_rbr_rings, rx_rcr_rings)); 3077 goto hxge_rxdma_hw_start_exit; 3078 3079 hxge_rxdma_hw_start_fail1: 3080 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3081 "==> hxge_rxdma_hw_start: disable " 3082 "(status 0x%x channel %d i %d)", status, channel, i)); 3083 for (; i >= 0; i--) { 3084 channel = rbr_rings[i]->rdc; 3085 (void) hxge_rxdma_stop_channel(hxgep, channel); 3086 } 3087 3088 hxge_rxdma_hw_start_exit: 3089 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3090 "==> hxge_rxdma_hw_start: (status 0x%x)", status)); 3091 return (status); 3092 } 3093 3094 static void 3095 hxge_rxdma_hw_stop(p_hxge_t hxgep) 3096 { 3097 int i, ndmas; 3098 uint16_t channel; 3099 p_rx_rbr_rings_t rx_rbr_rings; 3100 p_rx_rbr_ring_t *rbr_rings; 3101 p_rx_rcr_rings_t rx_rcr_rings; 3102 3103 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop")); 3104 3105 rx_rbr_rings = hxgep->rx_rbr_rings; 3106 rx_rcr_rings = hxgep->rx_rcr_rings; 3107 3108 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3109 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3110 "<== hxge_rxdma_hw_stop: NULL ring pointers")); 3111 return; 3112 } 3113 3114 ndmas = rx_rbr_rings->ndmas; 3115 if (!ndmas) { 3116 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3117 "<== hxge_rxdma_hw_stop: no dma channel allocated")); 3118 return; 3119 } 3120 3121 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3122 "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3123 3124 rbr_rings = rx_rbr_rings->rbr_rings; 3125 for (i = 0; i < ndmas; i++) { 3126 channel = rbr_rings[i]->rdc; 3127 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3128 "==> hxge_rxdma_hw_stop (ndmas %d) channel %d", 3129 ndmas, channel)); 3130 (void) hxge_rxdma_stop_channel(hxgep, channel); 3131 } 3132 3133 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: " 3134 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3135 rx_rbr_rings, rx_rcr_rings)); 3136 3137 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop")); 3138 } 3139 3140 static hxge_status_t 3141 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 3142 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3143 { 3144 hpi_handle_t handle; 3145 hpi_status_t rs = HPI_SUCCESS; 3146 rdc_stat_t cs; 3147 rdc_int_mask_t ent_mask; 3148 hxge_status_t status = HXGE_OK; 3149 3150 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel")); 3151 3152 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3153 3154 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: " 3155 "hpi handle addr $%p acc $%p", 3156 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3157 3158 /* Reset RXDMA channel */ 3159 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3160 if (rs != HPI_SUCCESS) { 3161 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3162 "==> hxge_rxdma_start_channel: " 3163 "reset rxdma failed (0x%08x channel %d)", 3164 status, channel)); 3165 return (HXGE_ERROR | rs); 3166 } 3167 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3168 "==> hxge_rxdma_start_channel: reset done: channel %d", channel)); 3169 3170 /* 3171 * Initialize the RXDMA channel specific FZC control configurations. 3172 * These FZC registers are pertaining to each RX channel (logical 3173 * pages). 3174 */ 3175 status = hxge_init_fzc_rxdma_channel(hxgep, 3176 channel, rbr_p, rcr_p, mbox_p); 3177 if (status != HXGE_OK) { 3178 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3179 "==> hxge_rxdma_start_channel: " 3180 "init fzc rxdma failed (0x%08x channel %d)", 3181 status, channel)); 3182 return (status); 3183 } 3184 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3185 "==> hxge_rxdma_start_channel: fzc done")); 3186 3187 /* 3188 * Zero out the shadow and prefetch ram. 3189 */ 3190 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3191 "==> hxge_rxdma_start_channel: ram done")); 3192 3193 /* Set up the interrupt event masks. */ 3194 ent_mask.value = 0; 3195 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3196 if (rs != HPI_SUCCESS) { 3197 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3198 "==> hxge_rxdma_start_channel: " 3199 "init rxdma event masks failed (0x%08x channel %d)", 3200 status, channel)); 3201 return (HXGE_ERROR | rs); 3202 } 3203 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3204 "event done: channel %d (mask 0x%016llx)", 3205 channel, ent_mask.value)); 3206 3207 /* 3208 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA 3209 * channels and enable each DMA channel. 3210 */ 3211 status = hxge_enable_rxdma_channel(hxgep, 3212 channel, rbr_p, rcr_p, mbox_p); 3213 if (status != HXGE_OK) { 3214 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3215 " hxge_rxdma_start_channel: " 3216 " init enable rxdma failed (0x%08x channel %d)", 3217 status, channel)); 3218 return (status); 3219 } 3220 3221 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3222 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3223 3224 /* 3225 * Initialize the receive DMA control and status register 3226 * Note that rdc_stat HAS to be set after RBR and RCR rings are set 3227 */ 3228 cs.value = 0; 3229 cs.bits.mex = 1; 3230 cs.bits.rcr_thres = 1; 3231 cs.bits.rcr_to = 1; 3232 cs.bits.rbr_empty = 1; 3233 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3234 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3235 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3236 if (status != HXGE_OK) { 3237 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3238 "==> hxge_rxdma_start_channel: " 3239 "init rxdma control register failed (0x%08x channel %d", 3240 status, channel)); 3241 return (status); 3242 } 3243 3244 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3245 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3246 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3247 "==> hxge_rxdma_start_channel: enable done")); 3248 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel")); 3249 3250 return (HXGE_OK); 3251 } 3252 3253 static hxge_status_t 3254 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel) 3255 { 3256 hpi_handle_t handle; 3257 hpi_status_t rs = HPI_SUCCESS; 3258 rdc_stat_t cs; 3259 rdc_int_mask_t ent_mask; 3260 hxge_status_t status = HXGE_OK; 3261 3262 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel")); 3263 3264 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3265 3266 HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: " 3267 "hpi handle addr $%p acc $%p", 3268 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3269 3270 /* Reset RXDMA channel */ 3271 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3272 if (rs != HPI_SUCCESS) { 3273 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3274 " hxge_rxdma_stop_channel: " 3275 " reset rxdma failed (0x%08x channel %d)", 3276 rs, channel)); 3277 return (HXGE_ERROR | rs); 3278 } 3279 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3280 "==> hxge_rxdma_stop_channel: reset done")); 3281 3282 /* Set up the interrupt event masks. */ 3283 ent_mask.value = RDC_INT_MASK_ALL; 3284 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3285 if (rs != HPI_SUCCESS) { 3286 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3287 "==> hxge_rxdma_stop_channel: " 3288 "set rxdma event masks failed (0x%08x channel %d)", 3289 rs, channel)); 3290 return (HXGE_ERROR | rs); 3291 } 3292 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3293 "==> hxge_rxdma_stop_channel: event done")); 3294 3295 /* Initialize the receive DMA control and status register */ 3296 cs.value = 0; 3297 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3298 3299 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control " 3300 " to default (all 0s) 0x%08x", cs.value)); 3301 3302 if (status != HXGE_OK) { 3303 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3304 " hxge_rxdma_stop_channel: init rxdma" 3305 " control register failed (0x%08x channel %d", 3306 status, channel)); 3307 return (status); 3308 } 3309 3310 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3311 "==> hxge_rxdma_stop_channel: control done")); 3312 3313 /* disable dma channel */ 3314 status = hxge_disable_rxdma_channel(hxgep, channel); 3315 3316 if (status != HXGE_OK) { 3317 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3318 " hxge_rxdma_stop_channel: " 3319 " init enable rxdma failed (0x%08x channel %d)", 3320 status, channel)); 3321 return (status); 3322 } 3323 3324 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3325 "==> hxge_rxdma_stop_channel: disable done")); 3326 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel")); 3327 3328 return (HXGE_OK); 3329 } 3330 3331 hxge_status_t 3332 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep) 3333 { 3334 hpi_handle_t handle; 3335 p_hxge_rdc_sys_stats_t statsp; 3336 rdc_fifo_err_stat_t stat; 3337 hxge_status_t status = HXGE_OK; 3338 3339 handle = hxgep->hpi_handle; 3340 statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats; 3341 3342 /* Clear the int_dbg register in case it is an injected err */ 3343 HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0); 3344 3345 /* Get the error status and clear the register */ 3346 HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value); 3347 HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value); 3348 3349 if (stat.bits.rx_ctrl_fifo_sec) { 3350 statsp->ctrl_fifo_sec++; 3351 if (statsp->ctrl_fifo_sec == 1) 3352 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3353 "==> hxge_rxdma_handle_sys_errors: " 3354 "rx_ctrl_fifo_sec")); 3355 } 3356 3357 if (stat.bits.rx_ctrl_fifo_ded) { 3358 /* Global fatal error encountered */ 3359 statsp->ctrl_fifo_ded++; 3360 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3361 HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED); 3362 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3363 "==> hxge_rxdma_handle_sys_errors: " 3364 "fatal error: rx_ctrl_fifo_ded error")); 3365 } 3366 3367 if (stat.bits.rx_data_fifo_sec) { 3368 statsp->data_fifo_sec++; 3369 if (statsp->data_fifo_sec == 1) 3370 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3371 "==> hxge_rxdma_handle_sys_errors: " 3372 "rx_data_fifo_sec")); 3373 } 3374 3375 if (stat.bits.rx_data_fifo_ded) { 3376 /* Global fatal error encountered */ 3377 statsp->data_fifo_ded++; 3378 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3379 HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED); 3380 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3381 "==> hxge_rxdma_handle_sys_errors: " 3382 "fatal error: rx_data_fifo_ded error")); 3383 } 3384 3385 if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) { 3386 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3387 " hxge_rxdma_handle_sys_errors: fatal error\n")); 3388 status = hxge_rx_port_fatal_err_recover(hxgep); 3389 if (status == HXGE_OK) { 3390 FM_SERVICE_RESTORED(hxgep); 3391 } 3392 } 3393 3394 return (HXGE_OK); 3395 } 3396 3397 static hxge_status_t 3398 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel) 3399 { 3400 hpi_handle_t handle; 3401 hpi_status_t rs = HPI_SUCCESS; 3402 hxge_status_t status = HXGE_OK; 3403 p_rx_rbr_ring_t rbrp; 3404 p_rx_rcr_ring_t rcrp; 3405 p_rx_mbox_t mboxp; 3406 rdc_int_mask_t ent_mask; 3407 p_hxge_dma_common_t dmap; 3408 int ring_idx; 3409 p_rx_msg_t rx_msg_p; 3410 int i; 3411 uint32_t hxge_port_rcr_size; 3412 uint64_t tmp; 3413 3414 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover")); 3415 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3416 "Recovering from RxDMAChannel#%d error...", channel)); 3417 3418 /* 3419 * Stop the dma channel waits for the stop done. If the stop done bit 3420 * is not set, then create an error. 3421 */ 3422 3423 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3424 3425 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop...")); 3426 3427 ring_idx = hxge_rxdma_get_ring_index(hxgep, channel); 3428 rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx]; 3429 rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx]; 3430 3431 MUTEX_ENTER(&rcrp->lock); 3432 MUTEX_ENTER(&rbrp->lock); 3433 MUTEX_ENTER(&rbrp->post_lock); 3434 3435 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel...")); 3436 3437 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 3438 if (rs != HPI_SUCCESS) { 3439 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3440 "hxge_disable_rxdma_channel:failed")); 3441 goto fail; 3442 } 3443 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt...")); 3444 3445 /* Disable interrupt */ 3446 ent_mask.value = RDC_INT_MASK_ALL; 3447 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3448 if (rs != HPI_SUCCESS) { 3449 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3450 "Set rxdma event masks failed (channel %d)", channel)); 3451 } 3452 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset...")); 3453 3454 /* Reset RXDMA channel */ 3455 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3456 if (rs != HPI_SUCCESS) { 3457 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3458 "Reset rxdma failed (channel %d)", channel)); 3459 goto fail; 3460 } 3461 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 3462 mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 3463 3464 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3465 rbrp->rbr_rd_index = 0; 3466 rbrp->pages_to_post = 0; 3467 3468 rcrp->comp_rd_index = 0; 3469 rcrp->comp_wt_index = 0; 3470 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3471 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3472 #if defined(__i386) 3473 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3474 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3475 #else 3476 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3477 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3478 #endif 3479 3480 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3481 (hxge_port_rcr_size - 1); 3482 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3483 (hxge_port_rcr_size - 1); 3484 3485 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 3486 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 3487 3488 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 3489 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3490 3491 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n", 3492 rbrp->rbr_max_size)); 3493 3494 for (i = 0; i < rbrp->rbr_max_size; i++) { 3495 /* Reset all the buffers */ 3496 rx_msg_p = rbrp->rx_msg_ring[i]; 3497 rx_msg_p->ref_cnt = 1; 3498 rx_msg_p->free = B_TRUE; 3499 rx_msg_p->cur_usage_cnt = 0; 3500 rx_msg_p->max_usage_cnt = 0; 3501 rx_msg_p->pkt_buf_size = 0; 3502 } 3503 3504 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start...")); 3505 3506 status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp); 3507 if (status != HXGE_OK) { 3508 goto fail; 3509 } 3510 3511 /* 3512 * The DMA channel may disable itself automatically. 3513 * The following is a work-around. 3514 */ 3515 HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp); 3516 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 3517 if (rs != HPI_SUCCESS) { 3518 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3519 "hpi_rxdma_cfg_rdc_enable (channel %d)", channel)); 3520 } 3521 3522 MUTEX_EXIT(&rbrp->post_lock); 3523 MUTEX_EXIT(&rbrp->lock); 3524 MUTEX_EXIT(&rcrp->lock); 3525 3526 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3527 "Recovery Successful, RxDMAChannel#%d Restored", channel)); 3528 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover")); 3529 3530 return (HXGE_OK); 3531 3532 fail: 3533 MUTEX_EXIT(&rbrp->post_lock); 3534 MUTEX_EXIT(&rbrp->lock); 3535 MUTEX_EXIT(&rcrp->lock); 3536 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3537 3538 return (HXGE_ERROR | rs); 3539 } 3540 3541 static hxge_status_t 3542 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep) 3543 { 3544 hxge_status_t status = HXGE_OK; 3545 p_hxge_dma_common_t *dma_buf_p; 3546 uint16_t channel; 3547 int ndmas; 3548 int i; 3549 block_reset_t reset_reg; 3550 3551 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover")); 3552 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ...")); 3553 3554 /* Reset RDC block from PEU for this fatal error */ 3555 reset_reg.value = 0; 3556 reset_reg.bits.rdc_rst = 1; 3557 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 3558 3559 /* Disable RxMAC */ 3560 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n")); 3561 if (hxge_rx_vmac_disable(hxgep) != HXGE_OK) 3562 goto fail; 3563 3564 HXGE_DELAY(1000); 3565 3566 /* Restore any common settings after PEU reset */ 3567 if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK) 3568 goto fail; 3569 3570 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels...")); 3571 3572 ndmas = hxgep->rx_buf_pool_p->ndmas; 3573 dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p; 3574 3575 for (i = 0; i < ndmas; i++) { 3576 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 3577 if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) { 3578 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3579 "Could not recover channel %d", channel)); 3580 } 3581 } 3582 3583 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC...")); 3584 3585 /* Reset RxMAC */ 3586 if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) { 3587 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3588 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3589 goto fail; 3590 } 3591 3592 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC...")); 3593 3594 /* Re-Initialize RxMAC */ 3595 if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) { 3596 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3597 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3598 goto fail; 3599 } 3600 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC...")); 3601 3602 /* Re-enable RxMAC */ 3603 if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) { 3604 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3605 "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC")); 3606 goto fail; 3607 } 3608 3609 /* Reset the error mask since PEU reset cleared it */ 3610 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3611 3612 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3613 "Recovery Successful, RxPort Restored")); 3614 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover")); 3615 3616 return (HXGE_OK); 3617 fail: 3618 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3619 return (status); 3620 } 3621