1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_rxdma.h> 28 29 /* 30 * Globals: tunable parameters (/etc/system or adb) 31 * 32 */ 33 extern uint32_t hxge_rbr_size; 34 extern uint32_t hxge_rcr_size; 35 extern uint32_t hxge_rbr_spare_size; 36 extern uint32_t hxge_mblks_pending; 37 38 /* 39 * Tunable to reduce the amount of time spent in the 40 * ISR doing Rx Processing. 41 */ 42 extern uint32_t hxge_max_rx_pkts; 43 44 /* 45 * Tunables to manage the receive buffer blocks. 46 * 47 * hxge_rx_threshold_hi: copy all buffers. 48 * hxge_rx_bcopy_size_type: receive buffer block size type. 49 * hxge_rx_threshold_lo: copy only up to tunable block size type. 50 */ 51 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi; 52 extern hxge_rxbuf_type_t hxge_rx_buf_size_type; 53 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo; 54 55 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep); 56 static void hxge_unmap_rxdma(p_hxge_t hxgep); 57 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep); 58 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep); 59 static void hxge_rxdma_hw_stop(p_hxge_t hxgep); 60 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 61 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 62 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 63 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 64 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 65 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 66 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 67 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, 68 uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p, 69 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 70 p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 71 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 72 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 73 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, 74 uint16_t channel, p_hxge_dma_common_t *dma_buf_p, 75 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks); 76 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 77 p_rx_rbr_ring_t rbr_p); 78 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 79 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 80 int n_init_kick); 81 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel); 82 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 83 p_rx_rcr_ring_t *rcr_p, rdc_stat_t cs); 84 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p, 85 p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, 86 mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry); 87 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep, 88 uint16_t channel); 89 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t); 90 static void hxge_freeb(p_rx_msg_t); 91 static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, 92 p_hxge_ldv_t ldvp, rdc_stat_t cs); 93 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, 94 p_hxge_ldv_t ldvp, rdc_stat_t cs); 95 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep, 96 p_rx_rbr_ring_t rx_dmap); 97 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, 98 uint16_t channel); 99 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep); 100 101 hxge_status_t 102 hxge_init_rxdma_channels(p_hxge_t hxgep) 103 { 104 hxge_status_t status = HXGE_OK; 105 block_reset_t reset_reg; 106 107 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels")); 108 109 /* Reset RDC block from PEU to clear any previous state */ 110 reset_reg.value = 0; 111 reset_reg.bits.rdc_rst = 1; 112 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 113 HXGE_DELAY(1000); 114 115 status = hxge_map_rxdma(hxgep); 116 if (status != HXGE_OK) { 117 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 118 "<== hxge_init_rxdma: status 0x%x", status)); 119 return (status); 120 } 121 122 status = hxge_rxdma_hw_start_common(hxgep); 123 if (status != HXGE_OK) { 124 hxge_unmap_rxdma(hxgep); 125 } 126 127 status = hxge_rxdma_hw_start(hxgep); 128 if (status != HXGE_OK) { 129 hxge_unmap_rxdma(hxgep); 130 } 131 132 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 133 "<== hxge_init_rxdma_channels: status 0x%x", status)); 134 return (status); 135 } 136 137 void 138 hxge_uninit_rxdma_channels(p_hxge_t hxgep) 139 { 140 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels")); 141 142 hxge_rxdma_hw_stop(hxgep); 143 hxge_unmap_rxdma(hxgep); 144 145 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels")); 146 } 147 148 hxge_status_t 149 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel, 150 rdc_stat_t *cs_p) 151 { 152 hpi_handle_t handle; 153 hpi_status_t rs = HPI_SUCCESS; 154 hxge_status_t status = HXGE_OK; 155 156 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 157 "<== hxge_init_rxdma_channel_cntl_stat")); 158 159 handle = HXGE_DEV_HPI_HANDLE(hxgep); 160 rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p); 161 162 if (rs != HPI_SUCCESS) { 163 status = HXGE_ERROR | rs; 164 } 165 return (status); 166 } 167 168 169 hxge_status_t 170 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 171 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 172 int n_init_kick) 173 { 174 hpi_handle_t handle; 175 rdc_desc_cfg_t rdc_desc; 176 rdc_rcr_cfg_b_t *cfgb_p; 177 hpi_status_t rs = HPI_SUCCESS; 178 179 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel")); 180 handle = HXGE_DEV_HPI_HANDLE(hxgep); 181 182 /* 183 * Use configuration data composed at init time. Write to hardware the 184 * receive ring configurations. 185 */ 186 rdc_desc.mbox_enable = 1; 187 rdc_desc.mbox_addr = mbox_p->mbox_addr; 188 HXGE_DEBUG_MSG((hxgep, RX_CTL, 189 "==> hxge_enable_rxdma_channel: mboxp $%p($%p)", 190 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 191 192 rdc_desc.rbr_len = rbr_p->rbb_max; 193 rdc_desc.rbr_addr = rbr_p->rbr_addr; 194 195 switch (hxgep->rx_bksize_code) { 196 case RBR_BKSIZE_4K: 197 rdc_desc.page_size = SIZE_4KB; 198 break; 199 case RBR_BKSIZE_8K: 200 rdc_desc.page_size = SIZE_8KB; 201 break; 202 } 203 204 rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0; 205 rdc_desc.valid0 = 1; 206 207 rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1; 208 rdc_desc.valid1 = 1; 209 210 rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2; 211 rdc_desc.valid2 = 1; 212 213 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 214 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 215 216 rdc_desc.rcr_len = rcr_p->comp_size; 217 rdc_desc.rcr_addr = rcr_p->rcr_addr; 218 219 cfgb_p = &(rcr_p->rcr_cfgb); 220 rdc_desc.rcr_threshold = cfgb_p->bits.pthres; 221 rdc_desc.rcr_timeout = cfgb_p->bits.timeout; 222 rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout; 223 224 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 225 "rbr_len qlen %d pagesize code %d rcr_len %d", 226 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 227 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 228 "size 0 %d size 1 %d size 2 %d", 229 rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1, 230 rbr_p->hpi_pkt_buf_size2)); 231 232 rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 233 if (rs != HPI_SUCCESS) { 234 return (HXGE_ERROR | rs); 235 } 236 237 /* 238 * Enable the timeout and threshold. 239 */ 240 rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 241 rdc_desc.rcr_threshold); 242 if (rs != HPI_SUCCESS) { 243 return (HXGE_ERROR | rs); 244 } 245 246 rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 247 rdc_desc.rcr_timeout); 248 if (rs != HPI_SUCCESS) { 249 return (HXGE_ERROR | rs); 250 } 251 252 /* Enable the DMA */ 253 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 254 if (rs != HPI_SUCCESS) { 255 return (HXGE_ERROR | rs); 256 } 257 258 /* Kick the DMA engine */ 259 hpi_rxdma_rdc_rbr_kick(handle, channel, n_init_kick); 260 261 /* Clear the rbr empty bit */ 262 (void) hpi_rxdma_channel_rbr_empty_clear(handle, channel); 263 264 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel")); 265 266 return (HXGE_OK); 267 } 268 269 static hxge_status_t 270 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel) 271 { 272 hpi_handle_t handle; 273 hpi_status_t rs = HPI_SUCCESS; 274 275 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel")); 276 277 handle = HXGE_DEV_HPI_HANDLE(hxgep); 278 279 /* disable the DMA */ 280 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 281 if (rs != HPI_SUCCESS) { 282 HXGE_DEBUG_MSG((hxgep, RX_CTL, 283 "<== hxge_disable_rxdma_channel:failed (0x%x)", rs)); 284 return (HXGE_ERROR | rs); 285 } 286 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel")); 287 return (HXGE_OK); 288 } 289 290 hxge_status_t 291 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel) 292 { 293 hpi_handle_t handle; 294 hxge_status_t status = HXGE_OK; 295 296 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 297 "==> hxge_rxdma_channel_rcrflush")); 298 299 handle = HXGE_DEV_HPI_HANDLE(hxgep); 300 hpi_rxdma_rdc_rcr_flush(handle, channel); 301 302 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 303 "<== hxge_rxdma_channel_rcrflush")); 304 return (status); 305 306 } 307 308 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 309 310 #define TO_LEFT -1 311 #define TO_RIGHT 1 312 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 313 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 314 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 315 #define NO_HINT 0xffffffff 316 317 /*ARGSUSED*/ 318 hxge_status_t 319 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p, 320 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 321 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 322 { 323 int bufsize; 324 uint64_t pktbuf_pp; 325 uint64_t dvma_addr; 326 rxring_info_t *ring_info; 327 int base_side, end_side; 328 int r_index, l_index, anchor_index; 329 int found, search_done; 330 uint32_t offset, chunk_size, block_size, page_size_mask; 331 uint32_t chunk_index, block_index, total_index; 332 int max_iterations, iteration; 333 rxbuf_index_info_t *bufinfo; 334 335 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp")); 336 337 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 338 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 339 pkt_buf_addr_pp, pktbufsz_type)); 340 341 #if defined(__i386) 342 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 343 #else 344 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 345 #endif 346 347 switch (pktbufsz_type) { 348 case 0: 349 bufsize = rbr_p->pkt_buf_size0; 350 break; 351 case 1: 352 bufsize = rbr_p->pkt_buf_size1; 353 break; 354 case 2: 355 bufsize = rbr_p->pkt_buf_size2; 356 break; 357 case RCR_SINGLE_BLOCK: 358 bufsize = 0; 359 anchor_index = 0; 360 break; 361 default: 362 return (HXGE_ERROR); 363 } 364 365 if (rbr_p->num_blocks == 1) { 366 anchor_index = 0; 367 ring_info = rbr_p->ring_info; 368 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 369 370 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 371 "==> hxge_rxbuf_pp_to_vp: (found, 1 block) " 372 "buf_pp $%p btype %d anchor_index %d bufinfo $%p", 373 pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo)); 374 375 goto found_index; 376 } 377 378 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 379 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d", 380 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 381 382 ring_info = rbr_p->ring_info; 383 found = B_FALSE; 384 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 385 iteration = 0; 386 max_iterations = ring_info->max_iterations; 387 388 /* 389 * First check if this block have been seen recently. This is indicated 390 * by a hint which is initialized when the first buffer of the block is 391 * seen. The hint is reset when the last buffer of the block has been 392 * processed. As three block sizes are supported, three hints are kept. 393 * The idea behind the hints is that once the hardware uses a block 394 * for a buffer of that size, it will use it exclusively for that size 395 * and will use it until it is exhausted. It is assumed that there 396 * would a single block being used for the same buffer sizes at any 397 * given time. 398 */ 399 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 400 anchor_index = ring_info->hint[pktbufsz_type]; 401 dvma_addr = bufinfo[anchor_index].dvma_addr; 402 chunk_size = bufinfo[anchor_index].buf_size; 403 if ((pktbuf_pp >= dvma_addr) && 404 (pktbuf_pp < (dvma_addr + chunk_size))) { 405 found = B_TRUE; 406 /* 407 * check if this is the last buffer in the block If so, 408 * then reset the hint for the size; 409 */ 410 411 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 412 ring_info->hint[pktbufsz_type] = NO_HINT; 413 } 414 } 415 416 if (found == B_FALSE) { 417 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 418 "==> hxge_rxbuf_pp_to_vp: (!found)" 419 "buf_pp $%p btype %d anchor_index %d", 420 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 421 422 /* 423 * This is the first buffer of the block of this size. Need to 424 * search the whole information array. the search algorithm 425 * uses a binary tree search algorithm. It assumes that the 426 * information is already sorted with increasing order info[0] 427 * < info[1] < info[2] .... < info[n-1] where n is the size of 428 * the information array 429 */ 430 r_index = rbr_p->num_blocks - 1; 431 l_index = 0; 432 search_done = B_FALSE; 433 anchor_index = MID_INDEX(r_index, l_index); 434 while (search_done == B_FALSE) { 435 if ((r_index == l_index) || 436 (iteration >= max_iterations)) 437 search_done = B_TRUE; 438 439 end_side = TO_RIGHT; /* to the right */ 440 base_side = TO_LEFT; /* to the left */ 441 /* read the DVMA address information and sort it */ 442 dvma_addr = bufinfo[anchor_index].dvma_addr; 443 chunk_size = bufinfo[anchor_index].buf_size; 444 445 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 446 "==> hxge_rxbuf_pp_to_vp: (searching)" 447 "buf_pp $%p btype %d " 448 "anchor_index %d chunk_size %d dvmaaddr $%p", 449 pkt_buf_addr_pp, pktbufsz_type, anchor_index, 450 chunk_size, dvma_addr)); 451 452 if (pktbuf_pp >= dvma_addr) 453 base_side = TO_RIGHT; /* to the right */ 454 if (pktbuf_pp < (dvma_addr + chunk_size)) 455 end_side = TO_LEFT; /* to the left */ 456 457 switch (base_side + end_side) { 458 case IN_MIDDLE: 459 /* found */ 460 found = B_TRUE; 461 search_done = B_TRUE; 462 if ((pktbuf_pp + bufsize) < 463 (dvma_addr + chunk_size)) 464 ring_info->hint[pktbufsz_type] = 465 bufinfo[anchor_index].buf_index; 466 break; 467 case BOTH_RIGHT: 468 /* not found: go to the right */ 469 l_index = anchor_index + 1; 470 anchor_index = MID_INDEX(r_index, l_index); 471 break; 472 473 case BOTH_LEFT: 474 /* not found: go to the left */ 475 r_index = anchor_index - 1; 476 anchor_index = MID_INDEX(r_index, l_index); 477 break; 478 default: /* should not come here */ 479 return (HXGE_ERROR); 480 } 481 iteration++; 482 } 483 484 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 485 "==> hxge_rxbuf_pp_to_vp: (search done)" 486 "buf_pp $%p btype %d anchor_index %d", 487 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 488 } 489 490 if (found == B_FALSE) { 491 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 492 "==> hxge_rxbuf_pp_to_vp: (search failed)" 493 "buf_pp $%p btype %d anchor_index %d", 494 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 495 return (HXGE_ERROR); 496 } 497 498 found_index: 499 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 500 "==> hxge_rxbuf_pp_to_vp: (FOUND1)" 501 "buf_pp $%p btype %d bufsize %d anchor_index %d", 502 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index)); 503 504 /* index of the first block in this chunk */ 505 chunk_index = bufinfo[anchor_index].start_index; 506 dvma_addr = bufinfo[anchor_index].dvma_addr; 507 page_size_mask = ring_info->block_size_mask; 508 509 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 510 "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 511 "buf_pp $%p btype %d bufsize %d " 512 "anchor_index %d chunk_index %d dvma $%p", 513 pkt_buf_addr_pp, pktbufsz_type, bufsize, 514 anchor_index, chunk_index, dvma_addr)); 515 516 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 517 block_size = rbr_p->block_size; /* System block(page) size */ 518 519 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 520 "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 521 "buf_pp $%p btype %d bufsize %d " 522 "anchor_index %d chunk_index %d dvma $%p " 523 "offset %d block_size %d", 524 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index, 525 chunk_index, dvma_addr, offset, block_size)); 526 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index")); 527 528 block_index = (offset / block_size); /* index within chunk */ 529 total_index = chunk_index + block_index; 530 531 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 532 "==> hxge_rxbuf_pp_to_vp: " 533 "total_index %d dvma_addr $%p " 534 "offset %d block_size %d " 535 "block_index %d ", 536 total_index, dvma_addr, offset, block_size, block_index)); 537 538 #if defined(__i386) 539 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 540 (uint32_t)offset); 541 #else 542 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 543 offset); 544 #endif 545 546 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 547 "==> hxge_rxbuf_pp_to_vp: " 548 "total_index %d dvma_addr $%p " 549 "offset %d block_size %d " 550 "block_index %d " 551 "*pkt_buf_addr_p $%p", 552 total_index, dvma_addr, offset, block_size, 553 block_index, *pkt_buf_addr_p)); 554 555 *msg_index = total_index; 556 *bufoffset = (offset & page_size_mask); 557 558 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 559 "==> hxge_rxbuf_pp_to_vp: get msg index: " 560 "msg_index %d bufoffset_index %d", 561 *msg_index, *bufoffset)); 562 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp")); 563 564 return (HXGE_OK); 565 } 566 567 568 /* 569 * used by quick sort (qsort) function 570 * to perform comparison 571 */ 572 static int 573 hxge_sort_compare(const void *p1, const void *p2) 574 { 575 576 rxbuf_index_info_t *a, *b; 577 578 a = (rxbuf_index_info_t *)p1; 579 b = (rxbuf_index_info_t *)p2; 580 581 if (a->dvma_addr > b->dvma_addr) 582 return (1); 583 if (a->dvma_addr < b->dvma_addr) 584 return (-1); 585 return (0); 586 } 587 588 /* 589 * Grabbed this sort implementation from common/syscall/avl.c 590 * 591 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 592 * v = Ptr to array/vector of objs 593 * n = # objs in the array 594 * s = size of each obj (must be multiples of a word size) 595 * f = ptr to function to compare two objs 596 * returns (-1 = less than, 0 = equal, 1 = greater than 597 */ 598 void 599 hxge_ksort(caddr_t v, int n, int s, int (*f) ()) 600 { 601 int g, i, j, ii; 602 unsigned int *p1, *p2; 603 unsigned int tmp; 604 605 /* No work to do */ 606 if (v == NULL || n <= 1) 607 return; 608 /* Sanity check on arguments */ 609 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 610 ASSERT(s > 0); 611 612 for (g = n / 2; g > 0; g /= 2) { 613 for (i = g; i < n; i++) { 614 for (j = i - g; j >= 0 && 615 (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) { 616 p1 = (unsigned *)(v + j * s); 617 p2 = (unsigned *)(v + (j + g) * s); 618 for (ii = 0; ii < s / 4; ii++) { 619 tmp = *p1; 620 *p1++ = *p2; 621 *p2++ = tmp; 622 } 623 } 624 } 625 } 626 } 627 628 /* 629 * Initialize data structures required for rxdma 630 * buffer dvma->vmem address lookup 631 */ 632 /*ARGSUSED*/ 633 static hxge_status_t 634 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp) 635 { 636 int index; 637 rxring_info_t *ring_info; 638 int max_iteration = 0, max_index = 0; 639 640 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init")); 641 642 ring_info = rbrp->ring_info; 643 ring_info->hint[0] = NO_HINT; 644 ring_info->hint[1] = NO_HINT; 645 ring_info->hint[2] = NO_HINT; 646 max_index = rbrp->num_blocks; 647 648 /* read the DVMA address information and sort it */ 649 /* do init of the information array */ 650 651 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 652 " hxge_rxbuf_index_info_init Sort ptrs")); 653 654 /* sort the array */ 655 hxge_ksort((void *) ring_info->buffer, max_index, 656 sizeof (rxbuf_index_info_t), hxge_sort_compare); 657 658 for (index = 0; index < max_index; index++) { 659 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 660 " hxge_rxbuf_index_info_init: sorted chunk %d " 661 " ioaddr $%p kaddr $%p size %x", 662 index, ring_info->buffer[index].dvma_addr, 663 ring_info->buffer[index].kaddr, 664 ring_info->buffer[index].buf_size)); 665 } 666 667 max_iteration = 0; 668 while (max_index >= (1ULL << max_iteration)) 669 max_iteration++; 670 ring_info->max_iterations = max_iteration + 1; 671 672 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 673 " hxge_rxbuf_index_info_init Find max iter %d", 674 ring_info->max_iterations)); 675 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init")); 676 677 return (HXGE_OK); 678 } 679 680 /*ARGSUSED*/ 681 void 682 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p) 683 { 684 #ifdef HXGE_DEBUG 685 686 uint32_t bptr; 687 uint64_t pp; 688 689 bptr = entry_p->bits.pkt_buf_addr; 690 691 HXGE_DEBUG_MSG((hxgep, RX_CTL, 692 "\trcr entry $%p " 693 "\trcr entry 0x%0llx " 694 "\trcr entry 0x%08x " 695 "\trcr entry 0x%08x " 696 "\tvalue 0x%0llx\n" 697 "\tmulti = %d\n" 698 "\tpkt_type = 0x%x\n" 699 "\terror = 0x%04x\n" 700 "\tl2_len = %d\n" 701 "\tpktbufsize = %d\n" 702 "\tpkt_buf_addr = $%p\n" 703 "\tpkt_buf_addr (<< 6) = $%p\n", 704 entry_p, 705 *(int64_t *)entry_p, 706 *(int32_t *)entry_p, 707 *(int32_t *)((char *)entry_p + 32), 708 entry_p->value, 709 entry_p->bits.multi, 710 entry_p->bits.pkt_type, 711 entry_p->bits.error, 712 entry_p->bits.l2_len, 713 entry_p->bits.pktbufsz, 714 bptr, 715 entry_p->bits.pkt_buf_addr_l)); 716 717 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 718 RCR_PKT_BUF_ADDR_SHIFT; 719 720 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 721 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 722 #endif 723 } 724 725 /*ARGSUSED*/ 726 void 727 hxge_rxdma_stop(p_hxge_t hxgep) 728 { 729 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop")); 730 731 (void) hxge_rx_vmac_disable(hxgep); 732 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 733 734 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop")); 735 } 736 737 void 738 hxge_rxdma_stop_reinit(p_hxge_t hxgep) 739 { 740 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit")); 741 742 (void) hxge_rxdma_stop(hxgep); 743 (void) hxge_uninit_rxdma_channels(hxgep); 744 (void) hxge_init_rxdma_channels(hxgep); 745 746 (void) hxge_rx_vmac_enable(hxgep); 747 748 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit")); 749 } 750 751 hxge_status_t 752 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 753 { 754 int i, ndmas; 755 uint16_t channel; 756 p_rx_rbr_rings_t rx_rbr_rings; 757 p_rx_rbr_ring_t *rbr_rings; 758 hpi_handle_t handle; 759 hpi_status_t rs = HPI_SUCCESS; 760 hxge_status_t status = HXGE_OK; 761 762 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 763 "==> hxge_rxdma_hw_mode: mode %d", enable)); 764 765 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 766 HXGE_DEBUG_MSG((hxgep, RX_CTL, 767 "<== hxge_rxdma_mode: not initialized")); 768 return (HXGE_ERROR); 769 } 770 771 rx_rbr_rings = hxgep->rx_rbr_rings; 772 if (rx_rbr_rings == NULL) { 773 HXGE_DEBUG_MSG((hxgep, RX_CTL, 774 "<== hxge_rxdma_mode: NULL ring pointer")); 775 return (HXGE_ERROR); 776 } 777 778 if (rx_rbr_rings->rbr_rings == NULL) { 779 HXGE_DEBUG_MSG((hxgep, RX_CTL, 780 "<== hxge_rxdma_mode: NULL rbr rings pointer")); 781 return (HXGE_ERROR); 782 } 783 784 ndmas = rx_rbr_rings->ndmas; 785 if (!ndmas) { 786 HXGE_DEBUG_MSG((hxgep, RX_CTL, 787 "<== hxge_rxdma_mode: no channel")); 788 return (HXGE_ERROR); 789 } 790 791 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 792 "==> hxge_rxdma_mode (ndmas %d)", ndmas)); 793 794 rbr_rings = rx_rbr_rings->rbr_rings; 795 796 handle = HXGE_DEV_HPI_HANDLE(hxgep); 797 798 for (i = 0; i < ndmas; i++) { 799 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 800 continue; 801 } 802 channel = rbr_rings[i]->rdc; 803 if (enable) { 804 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 805 "==> hxge_rxdma_hw_mode: channel %d (enable)", 806 channel)); 807 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 808 } else { 809 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 810 "==> hxge_rxdma_hw_mode: channel %d (disable)", 811 channel)); 812 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 813 } 814 } 815 816 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 817 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 818 "<== hxge_rxdma_hw_mode: status 0x%x", status)); 819 820 return (status); 821 } 822 823 int 824 hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel) 825 { 826 int i, ndmas; 827 uint16_t rdc; 828 p_rx_rbr_rings_t rx_rbr_rings; 829 p_rx_rbr_ring_t *rbr_rings; 830 831 HXGE_DEBUG_MSG((hxgep, RX_CTL, 832 "==> hxge_rxdma_get_ring_index: channel %d", channel)); 833 834 rx_rbr_rings = hxgep->rx_rbr_rings; 835 if (rx_rbr_rings == NULL) { 836 HXGE_DEBUG_MSG((hxgep, RX_CTL, 837 "<== hxge_rxdma_get_ring_index: NULL ring pointer")); 838 return (-1); 839 } 840 841 ndmas = rx_rbr_rings->ndmas; 842 if (!ndmas) { 843 HXGE_DEBUG_MSG((hxgep, RX_CTL, 844 "<== hxge_rxdma_get_ring_index: no channel")); 845 return (-1); 846 } 847 848 HXGE_DEBUG_MSG((hxgep, RX_CTL, 849 "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 850 851 rbr_rings = rx_rbr_rings->rbr_rings; 852 for (i = 0; i < ndmas; i++) { 853 rdc = rbr_rings[i]->rdc; 854 if (channel == rdc) { 855 HXGE_DEBUG_MSG((hxgep, RX_CTL, 856 "==> hxge_rxdma_get_rbr_ring: " 857 "channel %d (index %d) " 858 "ring %d", channel, i, rbr_rings[i])); 859 860 return (i); 861 } 862 } 863 864 HXGE_DEBUG_MSG((hxgep, RX_CTL, 865 "<== hxge_rxdma_get_rbr_ring_index: not found")); 866 867 return (-1); 868 } 869 870 /* 871 * Static functions start here. 872 */ 873 static p_rx_msg_t 874 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p) 875 { 876 p_rx_msg_t hxge_mp = NULL; 877 p_hxge_dma_common_t dmamsg_p; 878 uchar_t *buffer; 879 880 hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 881 if (hxge_mp == NULL) { 882 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 883 "Allocation of a rx msg failed.")); 884 goto hxge_allocb_exit; 885 } 886 887 hxge_mp->use_buf_pool = B_FALSE; 888 if (dmabuf_p) { 889 hxge_mp->use_buf_pool = B_TRUE; 890 891 dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma; 892 *dmamsg_p = *dmabuf_p; 893 dmamsg_p->nblocks = 1; 894 dmamsg_p->block_size = size; 895 dmamsg_p->alength = size; 896 buffer = (uchar_t *)dmabuf_p->kaddrp; 897 898 dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size); 899 dmabuf_p->ioaddr_pp = (void *) 900 ((char *)dmabuf_p->ioaddr_pp + size); 901 902 dmabuf_p->alength -= size; 903 dmabuf_p->offset += size; 904 dmabuf_p->dma_cookie.dmac_laddress += size; 905 dmabuf_p->dma_cookie.dmac_size -= size; 906 } else { 907 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 908 if (buffer == NULL) { 909 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 910 "Allocation of a receive page failed.")); 911 goto hxge_allocb_fail1; 912 } 913 } 914 915 hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb); 916 if (hxge_mp->rx_mblk_p == NULL) { 917 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed.")); 918 goto hxge_allocb_fail2; 919 } 920 hxge_mp->buffer = buffer; 921 hxge_mp->block_size = size; 922 hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb; 923 hxge_mp->freeb.free_arg = (caddr_t)hxge_mp; 924 hxge_mp->ref_cnt = 1; 925 hxge_mp->free = B_TRUE; 926 hxge_mp->rx_use_bcopy = B_FALSE; 927 928 atomic_inc_32(&hxge_mblks_pending); 929 930 goto hxge_allocb_exit; 931 932 hxge_allocb_fail2: 933 if (!hxge_mp->use_buf_pool) { 934 KMEM_FREE(buffer, size); 935 } 936 hxge_allocb_fail1: 937 KMEM_FREE(hxge_mp, sizeof (rx_msg_t)); 938 hxge_mp = NULL; 939 940 hxge_allocb_exit: 941 return (hxge_mp); 942 } 943 944 p_mblk_t 945 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 946 { 947 p_mblk_t mp; 948 949 HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb")); 950 HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p " 951 "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size)); 952 953 mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb); 954 if (mp == NULL) { 955 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 956 goto hxge_dupb_exit; 957 } 958 959 atomic_inc_32(&hxge_mp->ref_cnt); 960 961 hxge_dupb_exit: 962 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 963 return (mp); 964 } 965 966 p_mblk_t 967 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 968 { 969 p_mblk_t mp; 970 uchar_t *dp; 971 972 mp = allocb(size + HXGE_RXBUF_EXTRA, 0); 973 if (mp == NULL) { 974 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 975 goto hxge_dupb_bcopy_exit; 976 } 977 dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA; 978 bcopy((void *) &hxge_mp->buffer[offset], dp, size); 979 mp->b_wptr = dp + size; 980 981 hxge_dupb_bcopy_exit: 982 983 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 984 985 return (mp); 986 } 987 988 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, 989 p_rx_msg_t rx_msg_p); 990 991 void 992 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 993 { 994 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page")); 995 996 /* Reuse this buffer */ 997 rx_msg_p->free = B_FALSE; 998 rx_msg_p->cur_usage_cnt = 0; 999 rx_msg_p->max_usage_cnt = 0; 1000 rx_msg_p->pkt_buf_size = 0; 1001 1002 if (rx_rbr_p->rbr_use_bcopy) { 1003 rx_msg_p->rx_use_bcopy = B_FALSE; 1004 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1005 } 1006 1007 /* 1008 * Get the rbr header pointer and its offset index. 1009 */ 1010 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1011 rx_rbr_p->rbr_wrap_mask); 1012 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1013 1014 hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1); 1015 1016 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1017 "<== hxge_post_page (channel %d post_next_index %d)", 1018 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1019 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page")); 1020 } 1021 1022 void 1023 hxge_freeb(p_rx_msg_t rx_msg_p) 1024 { 1025 size_t size; 1026 uchar_t *buffer = NULL; 1027 int ref_cnt; 1028 boolean_t free_state = B_FALSE; 1029 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1030 1031 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb")); 1032 HXGE_DEBUG_MSG((NULL, MEM2_CTL, 1033 "hxge_freeb:rx_msg_p = $%p (block pending %d)", 1034 rx_msg_p, hxge_mblks_pending)); 1035 1036 if (ring == NULL) 1037 return; 1038 1039 /* 1040 * This is to prevent posting activities while we are recovering 1041 * from fatal errors. This should not be a performance drag since 1042 * ref_cnt != 0 most times. 1043 */ 1044 MUTEX_ENTER(&ring->post_lock); 1045 1046 /* 1047 * First we need to get the free state, then 1048 * atomic decrement the reference count to prevent 1049 * the race condition with the interrupt thread that 1050 * is processing a loaned up buffer block. 1051 */ 1052 free_state = rx_msg_p->free; 1053 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1054 if (!ref_cnt) { 1055 atomic_dec_32(&hxge_mblks_pending); 1056 1057 buffer = rx_msg_p->buffer; 1058 size = rx_msg_p->block_size; 1059 1060 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: " 1061 "will free: rx_msg_p = $%p (block pending %d)", 1062 rx_msg_p, hxge_mblks_pending)); 1063 1064 if (!rx_msg_p->use_buf_pool) { 1065 KMEM_FREE(buffer, size); 1066 } 1067 1068 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1069 /* 1070 * Decrement the receive buffer ring's reference 1071 * count, too. 1072 */ 1073 atomic_dec_32(&ring->rbr_ref_cnt); 1074 1075 /* 1076 * Free the receive buffer ring, iff 1077 * 1. all the receive buffers have been freed 1078 * 2. and we are in the proper state (that is, 1079 * we are not UNMAPPING). 1080 */ 1081 if (ring->rbr_ref_cnt == 0 && 1082 ring->rbr_state == RBR_UNMAPPED) { 1083 KMEM_FREE(ring, sizeof (*ring)); 1084 } 1085 } 1086 1087 /* 1088 * Repost buffer. 1089 */ 1090 if (free_state && (ref_cnt == 1)) { 1091 HXGE_DEBUG_MSG((NULL, RX_CTL, 1092 "hxge_freeb: post page $%p:", rx_msg_p)); 1093 if (ring->rbr_state == RBR_POSTING) 1094 hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p); 1095 } 1096 1097 MUTEX_EXIT(&ring->post_lock); 1098 1099 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb")); 1100 } 1101 1102 uint_t 1103 hxge_rx_intr(caddr_t arg1, caddr_t arg2) 1104 { 1105 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 1106 p_hxge_t hxgep = (p_hxge_t)arg2; 1107 p_hxge_ldg_t ldgp; 1108 uint8_t channel; 1109 hpi_handle_t handle; 1110 rdc_stat_t cs; 1111 uint_t serviced = DDI_INTR_UNCLAIMED; 1112 1113 if (ldvp == NULL) { 1114 HXGE_DEBUG_MSG((NULL, RX_INT_CTL, 1115 "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1116 return (DDI_INTR_UNCLAIMED); 1117 } 1118 1119 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 1120 hxgep = ldvp->hxgep; 1121 } 1122 1123 /* 1124 * If the interface is not started, just swallow the interrupt 1125 * for the logical device and don't rearm it. 1126 */ 1127 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) 1128 return (DDI_INTR_CLAIMED); 1129 1130 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1131 "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1132 1133 /* 1134 * This interrupt handler is for a specific receive dma channel. 1135 */ 1136 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1137 1138 /* 1139 * Get the control and status for this channel. 1140 */ 1141 channel = ldvp->channel; 1142 ldgp = ldvp->ldgp; 1143 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value); 1144 1145 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d " 1146 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1147 channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres)); 1148 1149 hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs); 1150 serviced = DDI_INTR_CLAIMED; 1151 1152 /* error events. */ 1153 if (cs.value & RDC_STAT_ERROR) { 1154 (void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 1155 } 1156 1157 hxge_intr_exit: 1158 /* 1159 * Enable the mailbox update interrupt if we want to use mailbox. We 1160 * probably don't need to use mailbox as it only saves us one pio read. 1161 * Also write 1 to rcrthres and rcrto to clear these two edge triggered 1162 * bits. 1163 */ 1164 cs.value &= RDC_STAT_WR1C; 1165 cs.bits.mex = 1; 1166 cs.bits.ptrread = 0; 1167 cs.bits.pktread = 0; 1168 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1169 1170 /* 1171 * Rearm this logical group if this is a single device group. 1172 */ 1173 if (ldgp->nldvs == 1) { 1174 ld_intr_mgmt_t mgm; 1175 1176 mgm.value = 0; 1177 mgm.bits.arm = 1; 1178 mgm.bits.timer = ldgp->ldg_timer; 1179 HXGE_REG_WR32(handle, 1180 LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value); 1181 } 1182 1183 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1184 "<== hxge_rx_intr: serviced %d", serviced)); 1185 1186 return (serviced); 1187 } 1188 1189 static void 1190 hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1191 rdc_stat_t cs) 1192 { 1193 p_mblk_t mp; 1194 p_rx_rcr_ring_t rcrp; 1195 1196 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring")); 1197 if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1198 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1199 "<== hxge_rx_pkts_vring: no mp")); 1200 return; 1201 } 1202 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp)); 1203 1204 #ifdef HXGE_DEBUG 1205 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1206 "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) " 1207 "LEN %d mp $%p mp->b_next $%p rcrp $%p " 1208 "mac_handle $%p", 1209 (mp->b_wptr - mp->b_rptr), mp, mp->b_next, 1210 rcrp, rcrp->rcr_mac_handle)); 1211 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1212 "==> hxge_rx_pkts_vring: dump packets " 1213 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1214 mp, mp->b_rptr, mp->b_wptr, 1215 hxge_dump_packet((char *)mp->b_rptr, 64))); 1216 1217 if (mp->b_cont) { 1218 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1219 "==> hxge_rx_pkts_vring: dump b_cont packets " 1220 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1221 mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr, 1222 hxge_dump_packet((char *)mp->b_cont->b_rptr, 1223 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1224 } 1225 if (mp->b_next) { 1226 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1227 "==> hxge_rx_pkts_vring: dump next packets " 1228 "(b_rptr $%p): %s", 1229 mp->b_next->b_rptr, 1230 hxge_dump_packet((char *)mp->b_next->b_rptr, 64))); 1231 } 1232 #endif 1233 1234 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1235 "==> hxge_rx_pkts_vring: send packet to stack")); 1236 mac_rx(hxgep->mach, rcrp->rcr_mac_handle, mp); 1237 1238 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring")); 1239 } 1240 1241 /*ARGSUSED*/ 1242 mblk_t * 1243 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1244 p_rx_rcr_ring_t *rcrp, rdc_stat_t cs) 1245 { 1246 hpi_handle_t handle; 1247 uint8_t channel; 1248 p_rx_rcr_rings_t rx_rcr_rings; 1249 p_rx_rcr_ring_t rcr_p; 1250 uint32_t comp_rd_index; 1251 p_rcr_entry_t rcr_desc_rd_head_p; 1252 p_rcr_entry_t rcr_desc_rd_head_pp; 1253 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1254 uint16_t qlen, nrcr_read, npkt_read; 1255 uint32_t qlen_hw, qlen_sw; 1256 uint32_t invalid_rcr_entry; 1257 boolean_t multi; 1258 rdc_rcr_cfg_b_t rcr_cfg_b; 1259 p_rx_mbox_t rx_mboxp; 1260 p_rxdma_mailbox_t mboxp; 1261 uint64_t rcr_head_index, rcr_tail_index; 1262 uint64_t rcr_tail; 1263 uint64_t value; 1264 rdc_rcr_tail_t rcr_tail_reg; 1265 p_hxge_rx_ring_stats_t rdc_stats; 1266 1267 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d " 1268 "channel %d", vindex, ldvp->channel)); 1269 1270 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 1271 return (NULL); 1272 } 1273 1274 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1275 rx_rcr_rings = hxgep->rx_rcr_rings; 1276 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1277 channel = rcr_p->rdc; 1278 if (channel != ldvp->channel) { 1279 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d " 1280 "channel %d, and rcr channel %d not matched.", 1281 vindex, ldvp->channel, channel)); 1282 return (NULL); 1283 } 1284 1285 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1286 "==> hxge_rx_pkts: START: rcr channel %d " 1287 "head_p $%p head_pp $%p index %d ", 1288 channel, rcr_p->rcr_desc_rd_head_p, 1289 rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1290 1291 rx_mboxp = hxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 1292 mboxp = (p_rxdma_mailbox_t)rx_mboxp->rx_mbox.kaddrp; 1293 1294 (void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1295 RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value); 1296 rcr_tail = rcr_tail_reg.bits.tail; 1297 1298 if (!qlen) { 1299 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1300 "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)", 1301 channel, qlen)); 1302 return (NULL); 1303 } 1304 1305 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d " 1306 "qlen %d", channel, qlen)); 1307 1308 comp_rd_index = rcr_p->comp_rd_index; 1309 1310 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1311 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1312 nrcr_read = npkt_read = 0; 1313 1314 /* 1315 * Number of packets queued (The jumbo or multi packet will be counted 1316 * as only one paccket and it may take up more than one completion 1317 * entry). 1318 */ 1319 qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts; 1320 head_mp = NULL; 1321 tail_mp = &head_mp; 1322 nmp = mp_cont = NULL; 1323 multi = B_FALSE; 1324 1325 rcr_head_index = rcr_p->rcr_desc_rd_head_p - rcr_p->rcr_desc_first_p; 1326 rcr_tail_index = rcr_tail - rcr_p->rcr_tail_begin; 1327 1328 if (rcr_tail_index >= rcr_head_index) { 1329 qlen_sw = rcr_tail_index - rcr_head_index; 1330 } else { 1331 /* rcr_tail has wrapped around */ 1332 qlen_sw = (rcr_p->comp_size - rcr_head_index) + rcr_tail_index; 1333 } 1334 1335 if (qlen_hw > qlen_sw) { 1336 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1337 "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n", 1338 channel, qlen_hw, qlen_sw)); 1339 qlen_hw = qlen_sw; 1340 } 1341 1342 while (qlen_hw) { 1343 #ifdef HXGE_DEBUG 1344 hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p); 1345 #endif 1346 /* 1347 * Process one completion ring entry. 1348 */ 1349 invalid_rcr_entry = 0; 1350 hxge_receive_packet(hxgep, 1351 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont, 1352 &invalid_rcr_entry); 1353 if (invalid_rcr_entry != 0) { 1354 rdc_stats = rcr_p->rdc_stats; 1355 rdc_stats->rcr_invalids++; 1356 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1357 "Channel %d could only read 0x%x packets, " 1358 "but 0x%x pending\n", channel, npkt_read, qlen_hw)); 1359 break; 1360 } 1361 1362 /* 1363 * message chaining modes (nemo msg chaining) 1364 */ 1365 if (nmp) { 1366 nmp->b_next = NULL; 1367 if (!multi && !mp_cont) { /* frame fits a partition */ 1368 *tail_mp = nmp; 1369 tail_mp = &nmp->b_next; 1370 nmp = NULL; 1371 } else if (multi && !mp_cont) { /* first segment */ 1372 *tail_mp = nmp; 1373 tail_mp = &nmp->b_cont; 1374 } else if (multi && mp_cont) { /* mid of multi segs */ 1375 *tail_mp = mp_cont; 1376 tail_mp = &mp_cont->b_cont; 1377 } else if (!multi && mp_cont) { /* last segment */ 1378 *tail_mp = mp_cont; 1379 tail_mp = &nmp->b_next; 1380 nmp = NULL; 1381 } 1382 } 1383 1384 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1385 "==> hxge_rx_pkts: loop: rcr channel %d " 1386 "before updating: multi %d " 1387 "nrcr_read %d " 1388 "npk read %d " 1389 "head_pp $%p index %d ", 1390 channel, multi, 1391 nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index)); 1392 1393 if (!multi) { 1394 qlen_hw--; 1395 npkt_read++; 1396 } 1397 1398 /* 1399 * Update the next read entry. 1400 */ 1401 comp_rd_index = NEXT_ENTRY(comp_rd_index, 1402 rcr_p->comp_wrap_mask); 1403 1404 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 1405 rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p); 1406 1407 nrcr_read++; 1408 1409 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1410 "<== hxge_rx_pkts: (SAM, process one packet) " 1411 "nrcr_read %d", nrcr_read)); 1412 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1413 "==> hxge_rx_pkts: loop: rcr channel %d " 1414 "multi %d nrcr_read %d npk read %d head_pp $%p index %d ", 1415 channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1416 comp_rd_index)); 1417 } 1418 1419 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 1420 rcr_p->comp_rd_index = comp_rd_index; 1421 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 1422 1423 /* Adjust the mailbox queue length for a hardware bug workaround */ 1424 mboxp->rcrstat_a.bits.qlen -= npkt_read; 1425 1426 if ((hxgep->intr_timeout != rcr_p->intr_timeout) || 1427 (hxgep->intr_threshold != rcr_p->intr_threshold)) { 1428 rcr_p->intr_timeout = hxgep->intr_timeout; 1429 rcr_p->intr_threshold = hxgep->intr_threshold; 1430 rcr_cfg_b.value = 0x0ULL; 1431 if (rcr_p->intr_timeout) 1432 rcr_cfg_b.bits.entout = 1; 1433 rcr_cfg_b.bits.timeout = rcr_p->intr_timeout; 1434 rcr_cfg_b.bits.pthres = rcr_p->intr_threshold; 1435 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, 1436 channel, rcr_cfg_b.value); 1437 } 1438 1439 cs.bits.pktread = npkt_read; 1440 cs.bits.ptrread = nrcr_read; 1441 value = cs.value; 1442 cs.value &= 0xffffffffULL; 1443 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1444 1445 cs.value = value & ~0xffffffffULL; 1446 cs.bits.pktread = 0; 1447 cs.bits.ptrread = 0; 1448 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1449 1450 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1451 "==> hxge_rx_pkts: EXIT: rcr channel %d " 1452 "head_pp $%p index %016llx ", 1453 channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1454 1455 /* 1456 * Update RCR buffer pointer read and number of packets read. 1457 */ 1458 1459 *rcrp = rcr_p; 1460 1461 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts")); 1462 1463 return (head_mp); 1464 } 1465 1466 #define RCR_ENTRY_PATTERN 0x5a5a6b6b7c7c8d8dULL 1467 1468 /*ARGSUSED*/ 1469 void 1470 hxge_receive_packet(p_hxge_t hxgep, 1471 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 1472 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont, 1473 uint32_t *invalid_rcr_entry) 1474 { 1475 p_mblk_t nmp = NULL; 1476 uint64_t multi; 1477 uint8_t channel; 1478 1479 boolean_t first_entry = B_TRUE; 1480 boolean_t is_tcp_udp = B_FALSE; 1481 boolean_t buffer_free = B_FALSE; 1482 boolean_t error_send_up = B_FALSE; 1483 uint8_t error_type; 1484 uint16_t l2_len; 1485 uint16_t skip_len; 1486 uint8_t pktbufsz_type; 1487 uint64_t rcr_entry; 1488 uint64_t *pkt_buf_addr_pp; 1489 uint64_t *pkt_buf_addr_p; 1490 uint32_t buf_offset; 1491 uint32_t bsize; 1492 uint32_t msg_index; 1493 p_rx_rbr_ring_t rx_rbr_p; 1494 p_rx_msg_t *rx_msg_ring_p; 1495 p_rx_msg_t rx_msg_p; 1496 1497 uint16_t sw_offset_bytes = 0, hdr_size = 0; 1498 hxge_status_t status = HXGE_OK; 1499 boolean_t is_valid = B_FALSE; 1500 p_hxge_rx_ring_stats_t rdc_stats; 1501 uint32_t bytes_read; 1502 1503 uint64_t pkt_type; 1504 1505 channel = rcr_p->rdc; 1506 1507 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet")); 1508 1509 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 1510 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 1511 1512 /* Verify the content of the rcr_entry for a hardware bug workaround */ 1513 if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) { 1514 *invalid_rcr_entry = 1; 1515 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet " 1516 "Channel %d invalid RCR entry 0x%llx found, returning\n", 1517 channel, (long long) rcr_entry)); 1518 return; 1519 } 1520 *((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN; 1521 1522 multi = (rcr_entry & RCR_MULTI_MASK); 1523 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 1524 1525 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 1526 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 1527 1528 /* 1529 * Hardware does not strip the CRC due bug ID 11451 where 1530 * the hardware mis handles minimum size packets. 1531 */ 1532 l2_len -= ETHERFCSL; 1533 1534 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 1535 RCR_PKTBUFSZ_SHIFT); 1536 #if defined(__i386) 1537 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 1538 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 1539 #else 1540 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 1541 RCR_PKT_BUF_ADDR_SHIFT); 1542 #endif 1543 1544 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1545 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1546 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1547 "error_type 0x%x pkt_type 0x%x " 1548 "pktbufsz_type %d ", 1549 rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len, 1550 multi, error_type, pkt_type, pktbufsz_type)); 1551 1552 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1553 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1554 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1555 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 1556 rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type, pkt_type)); 1557 1558 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1559 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1560 "full pkt_buf_addr_pp $%p l2_len %d", 1561 rcr_entry, pkt_buf_addr_pp, l2_len)); 1562 1563 /* get the stats ptr */ 1564 rdc_stats = rcr_p->rdc_stats; 1565 1566 if (!l2_len) { 1567 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1568 "<== hxge_receive_packet: failed: l2 length is 0.")); 1569 return; 1570 } 1571 1572 /* shift 6 bits to get the full io address */ 1573 #if defined(__i386) 1574 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 1575 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1576 #else 1577 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 1578 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1579 #endif 1580 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1581 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1582 "full pkt_buf_addr_pp $%p l2_len %d", 1583 rcr_entry, pkt_buf_addr_pp, l2_len)); 1584 1585 rx_rbr_p = rcr_p->rx_rbr_p; 1586 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 1587 1588 if (first_entry) { 1589 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 1590 RXDMA_HDR_SIZE_DEFAULT); 1591 1592 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1593 "==> hxge_receive_packet: first entry 0x%016llx " 1594 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 1595 rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size)); 1596 } 1597 1598 MUTEX_ENTER(&rcr_p->lock); 1599 MUTEX_ENTER(&rx_rbr_p->lock); 1600 1601 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1602 "==> (rbr 1) hxge_receive_packet: entry 0x%0llx " 1603 "full pkt_buf_addr_pp $%p l2_len %d", 1604 rcr_entry, pkt_buf_addr_pp, l2_len)); 1605 1606 /* 1607 * Packet buffer address in the completion entry points to the starting 1608 * buffer address (offset 0). Use the starting buffer address to locate 1609 * the corresponding kernel address. 1610 */ 1611 status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p, 1612 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 1613 &buf_offset, &msg_index); 1614 1615 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1616 "==> (rbr 2) hxge_receive_packet: entry 0x%0llx " 1617 "full pkt_buf_addr_pp $%p l2_len %d", 1618 rcr_entry, pkt_buf_addr_pp, l2_len)); 1619 1620 if (status != HXGE_OK) { 1621 MUTEX_EXIT(&rx_rbr_p->lock); 1622 MUTEX_EXIT(&rcr_p->lock); 1623 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1624 "<== hxge_receive_packet: found vaddr failed %d", status)); 1625 return; 1626 } 1627 1628 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1629 "==> (rbr 3) hxge_receive_packet: entry 0x%0llx " 1630 "full pkt_buf_addr_pp $%p l2_len %d", 1631 rcr_entry, pkt_buf_addr_pp, l2_len)); 1632 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1633 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1634 "full pkt_buf_addr_pp $%p l2_len %d", 1635 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1636 1637 if (msg_index >= rx_rbr_p->tnblocks) { 1638 MUTEX_EXIT(&rx_rbr_p->lock); 1639 MUTEX_EXIT(&rcr_p->lock); 1640 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1641 "==> hxge_receive_packet: FATAL msg_index (%d) " 1642 "should be smaller than tnblocks (%d)\n", 1643 msg_index, rx_rbr_p->tnblocks)); 1644 return; 1645 } 1646 1647 rx_msg_p = rx_msg_ring_p[msg_index]; 1648 1649 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1650 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1651 "full pkt_buf_addr_pp $%p l2_len %d", 1652 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1653 1654 switch (pktbufsz_type) { 1655 case RCR_PKTBUFSZ_0: 1656 bsize = rx_rbr_p->pkt_buf_size0_bytes; 1657 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1658 "==> hxge_receive_packet: 0 buf %d", bsize)); 1659 break; 1660 case RCR_PKTBUFSZ_1: 1661 bsize = rx_rbr_p->pkt_buf_size1_bytes; 1662 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1663 "==> hxge_receive_packet: 1 buf %d", bsize)); 1664 break; 1665 case RCR_PKTBUFSZ_2: 1666 bsize = rx_rbr_p->pkt_buf_size2_bytes; 1667 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1668 "==> hxge_receive_packet: 2 buf %d", bsize)); 1669 break; 1670 case RCR_SINGLE_BLOCK: 1671 bsize = rx_msg_p->block_size; 1672 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1673 "==> hxge_receive_packet: single %d", bsize)); 1674 1675 break; 1676 default: 1677 MUTEX_EXIT(&rx_rbr_p->lock); 1678 MUTEX_EXIT(&rcr_p->lock); 1679 return; 1680 } 1681 1682 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 1683 (buf_offset + sw_offset_bytes), (hdr_size + l2_len), 1684 DDI_DMA_SYNC_FORCPU); 1685 1686 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1687 "==> hxge_receive_packet: after first dump:usage count")); 1688 1689 if (rx_msg_p->cur_usage_cnt == 0) { 1690 if (rx_rbr_p->rbr_use_bcopy) { 1691 atomic_inc_32(&rx_rbr_p->rbr_consumed); 1692 if (rx_rbr_p->rbr_consumed > 1693 rx_rbr_p->rbr_threshold_hi) { 1694 rx_msg_p->rx_use_bcopy = B_TRUE; 1695 } 1696 } 1697 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1698 "==> hxge_receive_packet: buf %d (new block) ", bsize)); 1699 1700 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 1701 rx_msg_p->pkt_buf_size = bsize; 1702 rx_msg_p->cur_usage_cnt = 1; 1703 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 1704 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1705 "==> hxge_receive_packet: buf %d (single block) ", 1706 bsize)); 1707 /* 1708 * Buffer can be reused once the free function is 1709 * called. 1710 */ 1711 rx_msg_p->max_usage_cnt = 1; 1712 buffer_free = B_TRUE; 1713 } else { 1714 rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize; 1715 if (rx_msg_p->max_usage_cnt == 1) { 1716 buffer_free = B_TRUE; 1717 } 1718 } 1719 } else { 1720 rx_msg_p->cur_usage_cnt++; 1721 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 1722 buffer_free = B_TRUE; 1723 } 1724 } 1725 1726 if (rx_msg_p->rx_use_bcopy) { 1727 rdc_stats->pkt_drop++; 1728 atomic_inc_32(&rx_msg_p->ref_cnt); 1729 if (buffer_free == B_TRUE) { 1730 rx_msg_p->free = B_TRUE; 1731 } 1732 1733 MUTEX_EXIT(&rx_rbr_p->lock); 1734 MUTEX_EXIT(&rcr_p->lock); 1735 hxge_freeb(rx_msg_p); 1736 return; 1737 } 1738 1739 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1740 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 1741 msg_index, l2_len, 1742 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 1743 1744 if (error_type) { 1745 rdc_stats->ierrors++; 1746 /* Update error stats */ 1747 rdc_stats->errlog.compl_err_type = error_type; 1748 HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR); 1749 1750 if (error_type & RCR_CTRL_FIFO_DED) { 1751 rdc_stats->ctrl_fifo_ecc_err++; 1752 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1753 " hxge_receive_packet: " 1754 " channel %d RCR ctrl_fifo_ded error", channel)); 1755 } else if (error_type & RCR_DATA_FIFO_DED) { 1756 rdc_stats->data_fifo_ecc_err++; 1757 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1758 " hxge_receive_packet: channel %d" 1759 " RCR data_fifo_ded error", channel)); 1760 } 1761 1762 /* 1763 * Update and repost buffer block if max usage count is 1764 * reached. 1765 */ 1766 if (error_send_up == B_FALSE) { 1767 atomic_inc_32(&rx_msg_p->ref_cnt); 1768 if (buffer_free == B_TRUE) { 1769 rx_msg_p->free = B_TRUE; 1770 } 1771 1772 MUTEX_EXIT(&rx_rbr_p->lock); 1773 MUTEX_EXIT(&rcr_p->lock); 1774 hxge_freeb(rx_msg_p); 1775 return; 1776 } 1777 } 1778 1779 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1780 "==> hxge_receive_packet: DMA sync second ")); 1781 1782 bytes_read = rcr_p->rcvd_pkt_bytes; 1783 skip_len = sw_offset_bytes + hdr_size; 1784 if (!rx_msg_p->rx_use_bcopy) { 1785 /* 1786 * For loaned up buffers, the driver reference count 1787 * will be incremented first and then the free state. 1788 */ 1789 if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 1790 if (first_entry) { 1791 nmp->b_rptr = &nmp->b_rptr[skip_len]; 1792 if (l2_len < bsize - skip_len) { 1793 nmp->b_wptr = &nmp->b_rptr[l2_len]; 1794 } else { 1795 nmp->b_wptr = &nmp->b_rptr[bsize 1796 - skip_len]; 1797 } 1798 } else { 1799 if (l2_len - bytes_read < bsize) { 1800 nmp->b_wptr = 1801 &nmp->b_rptr[l2_len - bytes_read]; 1802 } else { 1803 nmp->b_wptr = &nmp->b_rptr[bsize]; 1804 } 1805 } 1806 } 1807 } else { 1808 if (first_entry) { 1809 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 1810 l2_len < bsize - skip_len ? 1811 l2_len : bsize - skip_len); 1812 } else { 1813 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset, 1814 l2_len - bytes_read < bsize ? 1815 l2_len - bytes_read : bsize); 1816 } 1817 } 1818 1819 if (nmp != NULL) { 1820 if (first_entry) 1821 bytes_read = nmp->b_wptr - nmp->b_rptr; 1822 else 1823 bytes_read += nmp->b_wptr - nmp->b_rptr; 1824 1825 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1826 "==> hxge_receive_packet after dupb: " 1827 "rbr consumed %d " 1828 "pktbufsz_type %d " 1829 "nmp $%p rptr $%p wptr $%p " 1830 "buf_offset %d bzise %d l2_len %d skip_len %d", 1831 rx_rbr_p->rbr_consumed, 1832 pktbufsz_type, 1833 nmp, nmp->b_rptr, nmp->b_wptr, 1834 buf_offset, bsize, l2_len, skip_len)); 1835 } else { 1836 cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)"); 1837 1838 atomic_inc_32(&rx_msg_p->ref_cnt); 1839 if (buffer_free == B_TRUE) { 1840 rx_msg_p->free = B_TRUE; 1841 } 1842 1843 MUTEX_EXIT(&rx_rbr_p->lock); 1844 MUTEX_EXIT(&rcr_p->lock); 1845 hxge_freeb(rx_msg_p); 1846 return; 1847 } 1848 1849 if (buffer_free == B_TRUE) { 1850 rx_msg_p->free = B_TRUE; 1851 } 1852 1853 /* 1854 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a 1855 * packet is not fragmented and no error bit is set, then L4 checksum 1856 * is OK. 1857 */ 1858 is_valid = (nmp != NULL); 1859 if (first_entry) { 1860 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 1861 if (l2_len > (STD_FRAME_SIZE - ETHERFCSL)) 1862 rdc_stats->jumbo_pkts++; 1863 rdc_stats->ibytes += skip_len + l2_len < bsize ? 1864 l2_len : bsize; 1865 } else { 1866 /* 1867 * Add the current portion of the packet to the kstats. 1868 * The current portion of the packet is calculated by using 1869 * length of the packet and the previously received portion. 1870 */ 1871 rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ? 1872 l2_len - rcr_p->rcvd_pkt_bytes : bsize; 1873 } 1874 1875 rcr_p->rcvd_pkt_bytes = bytes_read; 1876 1877 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 1878 atomic_inc_32(&rx_msg_p->ref_cnt); 1879 MUTEX_EXIT(&rx_rbr_p->lock); 1880 MUTEX_EXIT(&rcr_p->lock); 1881 hxge_freeb(rx_msg_p); 1882 } else { 1883 MUTEX_EXIT(&rx_rbr_p->lock); 1884 MUTEX_EXIT(&rcr_p->lock); 1885 } 1886 1887 if (is_valid) { 1888 nmp->b_cont = NULL; 1889 if (first_entry) { 1890 *mp = nmp; 1891 *mp_cont = NULL; 1892 } else { 1893 *mp_cont = nmp; 1894 } 1895 } 1896 1897 /* 1898 * Update stats and hardware checksuming. 1899 */ 1900 if (is_valid && !multi) { 1901 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 1902 pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE); 1903 1904 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_receive_packet: " 1905 "is_valid 0x%x multi %d pkt %d d error %d", 1906 is_valid, multi, is_tcp_udp, error_type)); 1907 1908 if (is_tcp_udp && !error_type) { 1909 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 1910 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 1911 1912 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1913 "==> hxge_receive_packet: Full tcp/udp cksum " 1914 "is_valid 0x%x multi %d pkt %d " 1915 "error %d", 1916 is_valid, multi, is_tcp_udp, error_type)); 1917 } 1918 } 1919 1920 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1921 "==> hxge_receive_packet: *mp 0x%016llx", *mp)); 1922 1923 *multi_p = (multi == RCR_MULTI_MASK); 1924 1925 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: " 1926 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 1927 *multi_p, nmp, *mp, *mp_cont)); 1928 } 1929 1930 /*ARGSUSED*/ 1931 static hxge_status_t 1932 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 1933 rdc_stat_t cs) 1934 { 1935 p_hxge_rx_ring_stats_t rdc_stats; 1936 hpi_handle_t handle; 1937 boolean_t rxchan_fatal = B_FALSE; 1938 uint8_t channel; 1939 hxge_status_t status = HXGE_OK; 1940 uint64_t cs_val; 1941 1942 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts")); 1943 1944 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1945 channel = ldvp->channel; 1946 1947 /* Clear the interrupts */ 1948 cs.bits.pktread = 0; 1949 cs.bits.ptrread = 0; 1950 cs_val = cs.value & RDC_STAT_WR1C; 1951 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val); 1952 1953 rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index]; 1954 1955 if (cs.bits.rbr_cpl_to) { 1956 rdc_stats->rbr_tmout++; 1957 HXGE_FM_REPORT_ERROR(hxgep, channel, 1958 HXGE_FM_EREPORT_RDMC_RBR_CPL_TO); 1959 rxchan_fatal = B_TRUE; 1960 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1961 "==> hxge_rx_err_evnts(channel %d): " 1962 "fatal error: rx_rbr_timeout", channel)); 1963 } 1964 1965 if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) { 1966 (void) hpi_rxdma_ring_perr_stat_get(handle, 1967 &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par); 1968 } 1969 1970 if (cs.bits.rcr_shadow_par_err) { 1971 rdc_stats->rcr_sha_par++; 1972 HXGE_FM_REPORT_ERROR(hxgep, channel, 1973 HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 1974 rxchan_fatal = B_TRUE; 1975 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1976 "==> hxge_rx_err_evnts(channel %d): " 1977 "fatal error: rcr_shadow_par_err", channel)); 1978 } 1979 1980 if (cs.bits.rbr_prefetch_par_err) { 1981 rdc_stats->rbr_pre_par++; 1982 HXGE_FM_REPORT_ERROR(hxgep, channel, 1983 HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 1984 rxchan_fatal = B_TRUE; 1985 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1986 "==> hxge_rx_err_evnts(channel %d): " 1987 "fatal error: rbr_prefetch_par_err", channel)); 1988 } 1989 1990 if (cs.bits.rbr_pre_empty) { 1991 rdc_stats->rbr_pre_empty++; 1992 HXGE_FM_REPORT_ERROR(hxgep, channel, 1993 HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY); 1994 rxchan_fatal = B_TRUE; 1995 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1996 "==> hxge_rx_err_evnts(channel %d): " 1997 "fatal error: rbr_pre_empty", channel)); 1998 } 1999 2000 if (cs.bits.peu_resp_err) { 2001 rdc_stats->peu_resp_err++; 2002 HXGE_FM_REPORT_ERROR(hxgep, channel, 2003 HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR); 2004 rxchan_fatal = B_TRUE; 2005 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2006 "==> hxge_rx_err_evnts(channel %d): " 2007 "fatal error: peu_resp_err", channel)); 2008 } 2009 2010 if (cs.bits.rcr_thres) { 2011 rdc_stats->rcr_thres++; 2012 } 2013 2014 if (cs.bits.rcr_to) { 2015 rdc_stats->rcr_to++; 2016 } 2017 2018 if (cs.bits.rcr_shadow_full) { 2019 rdc_stats->rcr_shadow_full++; 2020 HXGE_FM_REPORT_ERROR(hxgep, channel, 2021 HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL); 2022 rxchan_fatal = B_TRUE; 2023 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2024 "==> hxge_rx_err_evnts(channel %d): " 2025 "fatal error: rcr_shadow_full", channel)); 2026 } 2027 2028 if (cs.bits.rcr_full) { 2029 rdc_stats->rcrfull++; 2030 HXGE_FM_REPORT_ERROR(hxgep, channel, 2031 HXGE_FM_EREPORT_RDMC_RCRFULL); 2032 rxchan_fatal = B_TRUE; 2033 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2034 "==> hxge_rx_err_evnts(channel %d): " 2035 "fatal error: rcrfull error", channel)); 2036 } 2037 2038 if (cs.bits.rbr_empty) { 2039 rdc_stats->rbr_empty++; 2040 2041 /* 2042 * Wait for channel to be quiet. 2043 */ 2044 (void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel); 2045 2046 /* 2047 * Re-enable the DMA. 2048 */ 2049 (void) hpi_rxdma_cfg_rdc_enable(handle, channel); 2050 } 2051 2052 if (cs.bits.rbr_full) { 2053 rdc_stats->rbrfull++; 2054 HXGE_FM_REPORT_ERROR(hxgep, channel, 2055 HXGE_FM_EREPORT_RDMC_RBRFULL); 2056 rxchan_fatal = B_TRUE; 2057 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2058 "==> hxge_rx_err_evnts(channel %d): " 2059 "fatal error: rbr_full error", channel)); 2060 } 2061 2062 if (rxchan_fatal) { 2063 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2064 " hxge_rx_err_evnts: fatal error on Channel #%d\n", 2065 channel)); 2066 status = hxge_rxdma_fatal_err_recover(hxgep, channel); 2067 if (status == HXGE_OK) { 2068 FM_SERVICE_RESTORED(hxgep); 2069 } 2070 } 2071 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts")); 2072 2073 return (status); 2074 } 2075 2076 static hxge_status_t 2077 hxge_map_rxdma(p_hxge_t hxgep) 2078 { 2079 int i, ndmas; 2080 uint16_t channel; 2081 p_rx_rbr_rings_t rx_rbr_rings; 2082 p_rx_rbr_ring_t *rbr_rings; 2083 p_rx_rcr_rings_t rx_rcr_rings; 2084 p_rx_rcr_ring_t *rcr_rings; 2085 p_rx_mbox_areas_t rx_mbox_areas_p; 2086 p_rx_mbox_t *rx_mbox_p; 2087 p_hxge_dma_pool_t dma_buf_poolp; 2088 p_hxge_dma_common_t *dma_buf_p; 2089 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2090 p_hxge_dma_common_t *dma_rbr_cntl_p; 2091 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2092 p_hxge_dma_common_t *dma_rcr_cntl_p; 2093 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2094 p_hxge_dma_common_t *dma_mbox_cntl_p; 2095 uint32_t *num_chunks; 2096 hxge_status_t status = HXGE_OK; 2097 2098 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma")); 2099 2100 dma_buf_poolp = hxgep->rx_buf_pool_p; 2101 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2102 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2103 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2104 2105 if (!dma_buf_poolp->buf_allocated || 2106 !dma_rbr_cntl_poolp->buf_allocated || 2107 !dma_rcr_cntl_poolp->buf_allocated || 2108 !dma_mbox_cntl_poolp->buf_allocated) { 2109 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2110 "<== hxge_map_rxdma: buf not allocated")); 2111 return (HXGE_ERROR); 2112 } 2113 2114 ndmas = dma_buf_poolp->ndmas; 2115 if (!ndmas) { 2116 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2117 "<== hxge_map_rxdma: no dma allocated")); 2118 return (HXGE_ERROR); 2119 } 2120 2121 num_chunks = dma_buf_poolp->num_chunks; 2122 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2123 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p; 2124 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p; 2125 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p; 2126 2127 rx_rbr_rings = (p_rx_rbr_rings_t) 2128 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2129 rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC( 2130 sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2131 2132 rx_rcr_rings = (p_rx_rcr_rings_t) 2133 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2134 rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC( 2135 sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2136 2137 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2138 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2139 rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC( 2140 sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2141 2142 /* 2143 * Timeout should be set based on the system clock divider. 2144 * The following timeout value of 1 assumes that the 2145 * granularity (1000) is 3 microseconds running at 300MHz. 2146 */ 2147 2148 hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2149 hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2150 2151 /* 2152 * Map descriptors from the buffer polls for each dam channel. 2153 */ 2154 for (i = 0; i < ndmas; i++) { 2155 /* 2156 * Set up and prepare buffer blocks, descriptors and mailbox. 2157 */ 2158 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2159 status = hxge_map_rxdma_channel(hxgep, channel, 2160 (p_hxge_dma_common_t *)&dma_buf_p[i], 2161 (p_rx_rbr_ring_t *)&rbr_rings[i], 2162 num_chunks[i], 2163 (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i], 2164 (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i], 2165 (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i], 2166 (p_rx_rcr_ring_t *)&rcr_rings[i], 2167 (p_rx_mbox_t *)&rx_mbox_p[i]); 2168 if (status != HXGE_OK) { 2169 goto hxge_map_rxdma_fail1; 2170 } 2171 rbr_rings[i]->index = (uint16_t)i; 2172 rcr_rings[i]->index = (uint16_t)i; 2173 rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i]; 2174 } 2175 2176 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2177 rx_rbr_rings->rbr_rings = rbr_rings; 2178 hxgep->rx_rbr_rings = rx_rbr_rings; 2179 rx_rcr_rings->rcr_rings = rcr_rings; 2180 hxgep->rx_rcr_rings = rx_rcr_rings; 2181 2182 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2183 hxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2184 2185 goto hxge_map_rxdma_exit; 2186 2187 hxge_map_rxdma_fail1: 2188 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2189 "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)", 2190 status, channel, i)); 2191 i--; 2192 for (; i >= 0; i--) { 2193 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2194 hxge_unmap_rxdma_channel(hxgep, channel, 2195 rbr_rings[i], rcr_rings[i], rx_mbox_p[i]); 2196 } 2197 2198 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2199 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2200 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2201 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2202 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2203 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2204 2205 hxge_map_rxdma_exit: 2206 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2207 "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 2208 2209 return (status); 2210 } 2211 2212 static void 2213 hxge_unmap_rxdma(p_hxge_t hxgep) 2214 { 2215 int i, ndmas; 2216 uint16_t channel; 2217 p_rx_rbr_rings_t rx_rbr_rings; 2218 p_rx_rbr_ring_t *rbr_rings; 2219 p_rx_rcr_rings_t rx_rcr_rings; 2220 p_rx_rcr_ring_t *rcr_rings; 2221 p_rx_mbox_areas_t rx_mbox_areas_p; 2222 p_rx_mbox_t *rx_mbox_p; 2223 p_hxge_dma_pool_t dma_buf_poolp; 2224 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2225 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2226 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2227 p_hxge_dma_common_t *dma_buf_p; 2228 2229 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma")); 2230 2231 dma_buf_poolp = hxgep->rx_buf_pool_p; 2232 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2233 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2234 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2235 2236 if (!dma_buf_poolp->buf_allocated || 2237 !dma_rbr_cntl_poolp->buf_allocated || 2238 !dma_rcr_cntl_poolp->buf_allocated || 2239 !dma_mbox_cntl_poolp->buf_allocated) { 2240 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2241 "<== hxge_unmap_rxdma: NULL buf pointers")); 2242 return; 2243 } 2244 2245 rx_rbr_rings = hxgep->rx_rbr_rings; 2246 rx_rcr_rings = hxgep->rx_rcr_rings; 2247 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2248 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2249 "<== hxge_unmap_rxdma: NULL pointers")); 2250 return; 2251 } 2252 2253 ndmas = rx_rbr_rings->ndmas; 2254 if (!ndmas) { 2255 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2256 "<== hxge_unmap_rxdma: no channel")); 2257 return; 2258 } 2259 2260 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2261 "==> hxge_unmap_rxdma (ndmas %d)", ndmas)); 2262 2263 rbr_rings = rx_rbr_rings->rbr_rings; 2264 rcr_rings = rx_rcr_rings->rcr_rings; 2265 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 2266 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 2267 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2268 2269 for (i = 0; i < ndmas; i++) { 2270 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2271 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2272 "==> hxge_unmap_rxdma (ndmas %d) channel %d", 2273 ndmas, channel)); 2274 (void) hxge_unmap_rxdma_channel(hxgep, channel, 2275 (p_rx_rbr_ring_t)rbr_rings[i], 2276 (p_rx_rcr_ring_t)rcr_rings[i], 2277 (p_rx_mbox_t)rx_mbox_p[i]); 2278 } 2279 2280 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2281 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2282 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2283 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2284 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2285 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2286 2287 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma")); 2288 } 2289 2290 hxge_status_t 2291 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2292 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 2293 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 2294 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 2295 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2296 { 2297 int status = HXGE_OK; 2298 2299 /* 2300 * Set up and prepare buffer blocks, descriptors and mailbox. 2301 */ 2302 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2303 "==> hxge_map_rxdma_channel (channel %d)", channel)); 2304 2305 /* 2306 * Receive buffer blocks 2307 */ 2308 status = hxge_map_rxdma_channel_buf_ring(hxgep, channel, 2309 dma_buf_p, rbr_p, num_chunks); 2310 if (status != HXGE_OK) { 2311 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2312 "==> hxge_map_rxdma_channel (channel %d): " 2313 "map buffer failed 0x%x", channel, status)); 2314 goto hxge_map_rxdma_channel_exit; 2315 } 2316 2317 /* 2318 * Receive block ring, completion ring and mailbox. 2319 */ 2320 status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel, 2321 dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p, 2322 rbr_p, rcr_p, rx_mbox_p); 2323 if (status != HXGE_OK) { 2324 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2325 "==> hxge_map_rxdma_channel (channel %d): " 2326 "map config failed 0x%x", channel, status)); 2327 goto hxge_map_rxdma_channel_fail2; 2328 } 2329 goto hxge_map_rxdma_channel_exit; 2330 2331 hxge_map_rxdma_channel_fail3: 2332 /* Free rbr, rcr */ 2333 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2334 "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)", 2335 status, channel)); 2336 hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p); 2337 2338 hxge_map_rxdma_channel_fail2: 2339 /* Free buffer blocks */ 2340 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2341 "==> hxge_map_rxdma_channel: free rx buffers" 2342 "(hxgep 0x%x status 0x%x channel %d)", 2343 hxgep, status, channel)); 2344 hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p); 2345 2346 status = HXGE_ERROR; 2347 2348 hxge_map_rxdma_channel_exit: 2349 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2350 "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)", 2351 hxgep, status, channel)); 2352 2353 return (status); 2354 } 2355 2356 /*ARGSUSED*/ 2357 static void 2358 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2359 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2360 { 2361 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2362 "==> hxge_unmap_rxdma_channel (channel %d)", channel)); 2363 2364 /* 2365 * unmap receive block ring, completion ring and mailbox. 2366 */ 2367 (void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p); 2368 2369 /* unmap buffer blocks */ 2370 (void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p); 2371 2372 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel")); 2373 } 2374 2375 /*ARGSUSED*/ 2376 static hxge_status_t 2377 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 2378 p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p, 2379 p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p, 2380 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2381 { 2382 p_rx_rbr_ring_t rbrp; 2383 p_rx_rcr_ring_t rcrp; 2384 p_rx_mbox_t mboxp; 2385 p_hxge_dma_common_t cntl_dmap; 2386 p_hxge_dma_common_t dmap; 2387 p_rx_msg_t *rx_msg_ring; 2388 p_rx_msg_t rx_msg_p; 2389 rdc_rbr_cfg_a_t *rcfga_p; 2390 rdc_rbr_cfg_b_t *rcfgb_p; 2391 rdc_rcr_cfg_a_t *cfga_p; 2392 rdc_rcr_cfg_b_t *cfgb_p; 2393 rdc_rx_cfg1_t *cfig1_p; 2394 rdc_rx_cfg2_t *cfig2_p; 2395 rdc_rbr_kick_t *kick_p; 2396 uint32_t dmaaddrp; 2397 uint32_t *rbr_vaddrp; 2398 uint32_t bkaddr; 2399 hxge_status_t status = HXGE_OK; 2400 int i; 2401 uint32_t hxge_port_rcr_size; 2402 2403 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2404 "==> hxge_map_rxdma_channel_cfg_ring")); 2405 2406 cntl_dmap = *dma_rbr_cntl_p; 2407 2408 /* 2409 * Map in the receive block ring 2410 */ 2411 rbrp = *rbr_p; 2412 dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc; 2413 hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 2414 2415 /* 2416 * Zero out buffer block ring descriptors. 2417 */ 2418 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2419 2420 rcfga_p = &(rbrp->rbr_cfga); 2421 rcfgb_p = &(rbrp->rbr_cfgb); 2422 kick_p = &(rbrp->rbr_kick); 2423 rcfga_p->value = 0; 2424 rcfgb_p->value = 0; 2425 kick_p->value = 0; 2426 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 2427 rcfga_p->value = (rbrp->rbr_addr & 2428 (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK)); 2429 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 2430 2431 /* XXXX: how to choose packet buffer sizes */ 2432 rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0; 2433 rcfgb_p->bits.vld0 = 1; 2434 rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1; 2435 rcfgb_p->bits.vld1 = 1; 2436 rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2; 2437 rcfgb_p->bits.vld2 = 1; 2438 rcfgb_p->bits.bksize = hxgep->rx_bksize_code; 2439 2440 /* 2441 * For each buffer block, enter receive block address to the ring. 2442 */ 2443 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 2444 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 2445 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2446 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2447 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 2448 2449 rx_msg_ring = rbrp->rx_msg_ring; 2450 for (i = 0; i < rbrp->tnblocks; i++) { 2451 rx_msg_p = rx_msg_ring[i]; 2452 rx_msg_p->hxgep = hxgep; 2453 rx_msg_p->rx_rbr_p = rbrp; 2454 bkaddr = (uint32_t) 2455 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2456 RBR_BKADDR_SHIFT)); 2457 rx_msg_p->free = B_FALSE; 2458 rx_msg_p->max_usage_cnt = 0xbaddcafe; 2459 2460 *rbr_vaddrp++ = bkaddr; 2461 } 2462 2463 kick_p->bits.bkadd = rbrp->rbb_max; 2464 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 2465 2466 rbrp->rbr_rd_index = 0; 2467 2468 rbrp->rbr_consumed = 0; 2469 rbrp->rbr_use_bcopy = B_TRUE; 2470 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 2471 2472 /* 2473 * Do bcopy on packets greater than bcopy size once the lo threshold is 2474 * reached. This lo threshold should be less than the hi threshold. 2475 * 2476 * Do bcopy on every packet once the hi threshold is reached. 2477 */ 2478 if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) { 2479 /* default it to use hi */ 2480 hxge_rx_threshold_lo = hxge_rx_threshold_hi; 2481 } 2482 if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) { 2483 hxge_rx_buf_size_type = HXGE_RBR_TYPE2; 2484 } 2485 rbrp->rbr_bufsize_type = hxge_rx_buf_size_type; 2486 2487 switch (hxge_rx_threshold_hi) { 2488 default: 2489 case HXGE_RX_COPY_NONE: 2490 /* Do not do bcopy at all */ 2491 rbrp->rbr_use_bcopy = B_FALSE; 2492 rbrp->rbr_threshold_hi = rbrp->rbb_max; 2493 break; 2494 2495 case HXGE_RX_COPY_1: 2496 case HXGE_RX_COPY_2: 2497 case HXGE_RX_COPY_3: 2498 case HXGE_RX_COPY_4: 2499 case HXGE_RX_COPY_5: 2500 case HXGE_RX_COPY_6: 2501 case HXGE_RX_COPY_7: 2502 rbrp->rbr_threshold_hi = 2503 rbrp->rbb_max * (hxge_rx_threshold_hi) / 2504 HXGE_RX_BCOPY_SCALE; 2505 break; 2506 2507 case HXGE_RX_COPY_ALL: 2508 rbrp->rbr_threshold_hi = 0; 2509 break; 2510 } 2511 2512 switch (hxge_rx_threshold_lo) { 2513 default: 2514 case HXGE_RX_COPY_NONE: 2515 /* Do not do bcopy at all */ 2516 if (rbrp->rbr_use_bcopy) { 2517 rbrp->rbr_use_bcopy = B_FALSE; 2518 } 2519 rbrp->rbr_threshold_lo = rbrp->rbb_max; 2520 break; 2521 2522 case HXGE_RX_COPY_1: 2523 case HXGE_RX_COPY_2: 2524 case HXGE_RX_COPY_3: 2525 case HXGE_RX_COPY_4: 2526 case HXGE_RX_COPY_5: 2527 case HXGE_RX_COPY_6: 2528 case HXGE_RX_COPY_7: 2529 rbrp->rbr_threshold_lo = 2530 rbrp->rbb_max * (hxge_rx_threshold_lo) / 2531 HXGE_RX_BCOPY_SCALE; 2532 break; 2533 2534 case HXGE_RX_COPY_ALL: 2535 rbrp->rbr_threshold_lo = 0; 2536 break; 2537 } 2538 2539 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2540 "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d " 2541 "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d " 2542 "rbb_threshold_lo %d", 2543 dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type, 2544 rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo)); 2545 2546 /* Map in the receive completion ring */ 2547 rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 2548 rcrp->rdc = dma_channel; 2549 rcrp->hxgep = hxgep; 2550 2551 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 2552 rcrp->comp_size = hxge_port_rcr_size; 2553 rcrp->comp_wrap_mask = hxge_port_rcr_size - 1; 2554 2555 rcrp->max_receive_pkts = hxge_max_rx_pkts; 2556 2557 cntl_dmap = *dma_rcr_cntl_p; 2558 2559 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 2560 hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 2561 sizeof (rcr_entry_t)); 2562 rcrp->comp_rd_index = 0; 2563 rcrp->comp_wt_index = 0; 2564 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 2565 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 2566 #if defined(__i386) 2567 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2568 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2569 #else 2570 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2571 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2572 #endif 2573 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 2574 (hxge_port_rcr_size - 1); 2575 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 2576 (hxge_port_rcr_size - 1); 2577 2578 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 2579 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 2580 2581 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2582 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2583 "rbr_vaddrp $%p rcr_desc_rd_head_p $%p " 2584 "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p " 2585 "rcr_desc_rd_last_pp $%p ", 2586 dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p, 2587 rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p, 2588 rcrp->rcr_desc_last_pp)); 2589 2590 /* 2591 * Zero out buffer block ring descriptors. 2592 */ 2593 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2594 rcrp->intr_timeout = hxgep->intr_timeout; 2595 rcrp->intr_threshold = hxgep->intr_threshold; 2596 rcrp->full_hdr_flag = B_FALSE; 2597 rcrp->sw_priv_hdr_len = 0; 2598 2599 cfga_p = &(rcrp->rcr_cfga); 2600 cfgb_p = &(rcrp->rcr_cfgb); 2601 cfga_p->value = 0; 2602 cfgb_p->value = 0; 2603 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 2604 2605 cfga_p->value = (rcrp->rcr_addr & 2606 (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK)); 2607 2608 cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF); 2609 2610 /* 2611 * Timeout should be set based on the system clock divider. The 2612 * following timeout value of 1 assumes that the granularity (1000) is 2613 * 3 microseconds running at 300MHz. 2614 */ 2615 cfgb_p->bits.pthres = rcrp->intr_threshold; 2616 cfgb_p->bits.timeout = rcrp->intr_timeout; 2617 cfgb_p->bits.entout = 1; 2618 2619 /* Map in the mailbox */ 2620 cntl_dmap = *dma_mbox_cntl_p; 2621 mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 2622 dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox; 2623 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 2624 cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1; 2625 cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2; 2626 cfig1_p->value = cfig2_p->value = 0; 2627 2628 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 2629 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2630 "==> hxge_map_rxdma_channel_cfg_ring: " 2631 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 2632 dma_channel, cfig1_p->value, cfig2_p->value, 2633 mboxp->mbox_addr)); 2634 2635 dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff); 2636 cfig1_p->bits.mbaddr_h = dmaaddrp; 2637 2638 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 2639 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 2640 RXDMA_CFIG2_MBADDR_L_MASK); 2641 2642 cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 2643 2644 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2645 "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p " 2646 "cfg1 0x%016llx cfig2 0x%016llx", 2647 dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value)); 2648 2649 cfig2_p->bits.full_hdr = rcrp->full_hdr_flag; 2650 cfig2_p->bits.offset = rcrp->sw_priv_hdr_len; 2651 2652 rbrp->rx_rcr_p = rcrp; 2653 rcrp->rx_rbr_p = rbrp; 2654 *rcr_p = rcrp; 2655 *rx_mbox_p = mboxp; 2656 2657 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2658 "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 2659 return (status); 2660 } 2661 2662 /*ARGSUSED*/ 2663 static void 2664 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 2665 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2666 { 2667 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2668 "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc)); 2669 2670 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 2671 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 2672 2673 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2674 "<== hxge_unmap_rxdma_channel_cfg_ring")); 2675 } 2676 2677 static hxge_status_t 2678 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 2679 p_hxge_dma_common_t *dma_buf_p, 2680 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 2681 { 2682 p_rx_rbr_ring_t rbrp; 2683 p_hxge_dma_common_t dma_bufp, tmp_bufp; 2684 p_rx_msg_t *rx_msg_ring; 2685 p_rx_msg_t rx_msg_p; 2686 p_mblk_t mblk_p; 2687 2688 rxring_info_t *ring_info; 2689 hxge_status_t status = HXGE_OK; 2690 int i, j, index; 2691 uint32_t size, bsize, nblocks, nmsgs; 2692 2693 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2694 "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel)); 2695 2696 dma_bufp = tmp_bufp = *dma_buf_p; 2697 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2698 " hxge_map_rxdma_channel_buf_ring: channel %d to map %d " 2699 "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp)); 2700 2701 nmsgs = 0; 2702 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2703 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2704 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2705 "bufp 0x%016llx nblocks %d nmsgs %d", 2706 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2707 nmsgs += tmp_bufp->nblocks; 2708 } 2709 if (!nmsgs) { 2710 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2711 "<== hxge_map_rxdma_channel_buf_ring: channel %d " 2712 "no msg blocks", channel)); 2713 status = HXGE_ERROR; 2714 goto hxge_map_rxdma_channel_buf_ring_exit; 2715 } 2716 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 2717 2718 size = nmsgs * sizeof (p_rx_msg_t); 2719 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2720 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 2721 KM_SLEEP); 2722 2723 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 2724 (void *) hxgep->interrupt_cookie); 2725 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 2726 (void *) hxgep->interrupt_cookie); 2727 2728 rbrp->rdc = channel; 2729 rbrp->num_blocks = num_chunks; 2730 rbrp->tnblocks = nmsgs; 2731 rbrp->rbb_max = nmsgs; 2732 rbrp->rbr_max_size = nmsgs; 2733 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 2734 2735 /* 2736 * Buffer sizes suggested by NIU architect. 256, 512 and 2K. 2737 */ 2738 2739 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 2740 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 2741 rbrp->hpi_pkt_buf_size0 = SIZE_256B; 2742 2743 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 2744 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 2745 rbrp->hpi_pkt_buf_size1 = SIZE_1KB; 2746 2747 rbrp->block_size = hxgep->rx_default_block_size; 2748 2749 if (!hxgep->param_arr[param_accept_jumbo].value) { 2750 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 2751 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 2752 rbrp->hpi_pkt_buf_size2 = SIZE_2KB; 2753 } else { 2754 rbrp->hpi_pkt_buf_size2 = SIZE_4KB; 2755 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 2756 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 2757 } 2758 2759 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2760 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2761 "actual rbr max %d rbb_max %d nmsgs %d " 2762 "rbrp->block_size %d default_block_size %d " 2763 "(config hxge_rbr_size %d hxge_rbr_spare_size %d)", 2764 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 2765 rbrp->block_size, hxgep->rx_default_block_size, 2766 hxge_rbr_size, hxge_rbr_spare_size)); 2767 2768 /* 2769 * Map in buffers from the buffer pool. 2770 * Note that num_blocks is the num_chunks. For Sparc, there is likely 2771 * only one chunk. For x86, there will be many chunks. 2772 * Loop over chunks. 2773 */ 2774 index = 0; 2775 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 2776 bsize = dma_bufp->block_size; 2777 nblocks = dma_bufp->nblocks; 2778 #if defined(__i386) 2779 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 2780 #else 2781 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 2782 #endif 2783 ring_info->buffer[i].buf_index = i; 2784 ring_info->buffer[i].buf_size = dma_bufp->alength; 2785 ring_info->buffer[i].start_index = index; 2786 #if defined(__i386) 2787 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 2788 #else 2789 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 2790 #endif 2791 2792 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2793 " hxge_map_rxdma_channel_buf_ring: map channel %d " 2794 "chunk %d nblocks %d chunk_size %x block_size 0x%x " 2795 "dma_bufp $%p dvma_addr $%p", channel, i, 2796 dma_bufp->nblocks, 2797 ring_info->buffer[i].buf_size, bsize, dma_bufp, 2798 ring_info->buffer[i].dvma_addr)); 2799 2800 /* loop over blocks within a chunk */ 2801 for (j = 0; j < nblocks; j++) { 2802 if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO, 2803 dma_bufp)) == NULL) { 2804 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2805 "allocb failed (index %d i %d j %d)", 2806 index, i, j)); 2807 goto hxge_map_rxdma_channel_buf_ring_fail1; 2808 } 2809 rx_msg_ring[index] = rx_msg_p; 2810 rx_msg_p->block_index = index; 2811 rx_msg_p->shifted_addr = (uint32_t) 2812 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2813 RBR_BKADDR_SHIFT)); 2814 /* 2815 * Too much output 2816 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2817 * "index %d j %d rx_msg_p $%p mblk %p", 2818 * index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 2819 */ 2820 mblk_p = rx_msg_p->rx_mblk_p; 2821 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 2822 2823 rbrp->rbr_ref_cnt++; 2824 index++; 2825 rx_msg_p->buf_dma.dma_channel = channel; 2826 } 2827 } 2828 if (i < rbrp->num_blocks) { 2829 goto hxge_map_rxdma_channel_buf_ring_fail1; 2830 } 2831 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2832 "hxge_map_rxdma_channel_buf_ring: done buf init " 2833 "channel %d msg block entries %d", channel, index)); 2834 ring_info->block_size_mask = bsize - 1; 2835 rbrp->rx_msg_ring = rx_msg_ring; 2836 rbrp->dma_bufp = dma_buf_p; 2837 rbrp->ring_info = ring_info; 2838 2839 status = hxge_rxbuf_index_info_init(hxgep, rbrp); 2840 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: " 2841 "channel %d done buf info init", channel)); 2842 2843 /* 2844 * Finally, permit hxge_freeb() to call hxge_post_page(). 2845 */ 2846 rbrp->rbr_state = RBR_POSTING; 2847 2848 *rbr_p = rbrp; 2849 2850 goto hxge_map_rxdma_channel_buf_ring_exit; 2851 2852 hxge_map_rxdma_channel_buf_ring_fail1: 2853 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2854 " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 2855 channel, status)); 2856 2857 index--; 2858 for (; index >= 0; index--) { 2859 rx_msg_p = rx_msg_ring[index]; 2860 if (rx_msg_p != NULL) { 2861 hxge_freeb(rx_msg_p); 2862 rx_msg_ring[index] = NULL; 2863 } 2864 } 2865 2866 hxge_map_rxdma_channel_buf_ring_fail: 2867 MUTEX_DESTROY(&rbrp->post_lock); 2868 MUTEX_DESTROY(&rbrp->lock); 2869 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2870 KMEM_FREE(rx_msg_ring, size); 2871 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 2872 2873 status = HXGE_ERROR; 2874 2875 hxge_map_rxdma_channel_buf_ring_exit: 2876 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2877 "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 2878 2879 return (status); 2880 } 2881 2882 /*ARGSUSED*/ 2883 static void 2884 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 2885 p_rx_rbr_ring_t rbr_p) 2886 { 2887 p_rx_msg_t *rx_msg_ring; 2888 p_rx_msg_t rx_msg_p; 2889 rxring_info_t *ring_info; 2890 int i; 2891 uint32_t size; 2892 2893 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2894 "==> hxge_unmap_rxdma_channel_buf_ring")); 2895 if (rbr_p == NULL) { 2896 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2897 "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 2898 return; 2899 } 2900 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2901 "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc)); 2902 2903 rx_msg_ring = rbr_p->rx_msg_ring; 2904 ring_info = rbr_p->ring_info; 2905 2906 if (rx_msg_ring == NULL || ring_info == NULL) { 2907 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2908 "<== hxge_unmap_rxdma_channel_buf_ring: " 2909 "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info)); 2910 return; 2911 } 2912 2913 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 2914 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2915 " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 2916 "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks, 2917 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 2918 2919 for (i = 0; i < rbr_p->tnblocks; i++) { 2920 rx_msg_p = rx_msg_ring[i]; 2921 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2922 " hxge_unmap_rxdma_channel_buf_ring: " 2923 "rx_msg_p $%p", rx_msg_p)); 2924 if (rx_msg_p != NULL) { 2925 hxge_freeb(rx_msg_p); 2926 rx_msg_ring[i] = NULL; 2927 } 2928 } 2929 2930 /* 2931 * We no longer may use the mutex <post_lock>. By setting 2932 * <rbr_state> to anything but POSTING, we prevent 2933 * hxge_post_page() from accessing a dead mutex. 2934 */ 2935 rbr_p->rbr_state = RBR_UNMAPPING; 2936 MUTEX_DESTROY(&rbr_p->post_lock); 2937 2938 MUTEX_DESTROY(&rbr_p->lock); 2939 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2940 KMEM_FREE(rx_msg_ring, size); 2941 2942 if (rbr_p->rbr_ref_cnt == 0) { 2943 /* This is the normal state of affairs. */ 2944 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 2945 } else { 2946 /* 2947 * Some of our buffers are still being used. 2948 * Therefore, tell hxge_freeb() this ring is 2949 * unmapped, so it may free <rbr_p> for us. 2950 */ 2951 rbr_p->rbr_state = RBR_UNMAPPED; 2952 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2953 "unmap_rxdma_buf_ring: %d %s outstanding.", 2954 rbr_p->rbr_ref_cnt, 2955 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 2956 } 2957 2958 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2959 "<== hxge_unmap_rxdma_channel_buf_ring")); 2960 } 2961 2962 static hxge_status_t 2963 hxge_rxdma_hw_start_common(p_hxge_t hxgep) 2964 { 2965 hxge_status_t status = HXGE_OK; 2966 2967 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 2968 2969 /* 2970 * Load the sharable parameters by writing to the function zero control 2971 * registers. These FZC registers should be initialized only once for 2972 * the entire chip. 2973 */ 2974 (void) hxge_init_fzc_rx_common(hxgep); 2975 2976 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 2977 2978 return (status); 2979 } 2980 2981 static hxge_status_t 2982 hxge_rxdma_hw_start(p_hxge_t hxgep) 2983 { 2984 int i, ndmas; 2985 uint16_t channel; 2986 p_rx_rbr_rings_t rx_rbr_rings; 2987 p_rx_rbr_ring_t *rbr_rings; 2988 p_rx_rcr_rings_t rx_rcr_rings; 2989 p_rx_rcr_ring_t *rcr_rings; 2990 p_rx_mbox_areas_t rx_mbox_areas_p; 2991 p_rx_mbox_t *rx_mbox_p; 2992 hxge_status_t status = HXGE_OK; 2993 2994 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start")); 2995 2996 rx_rbr_rings = hxgep->rx_rbr_rings; 2997 rx_rcr_rings = hxgep->rx_rcr_rings; 2998 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2999 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3000 "<== hxge_rxdma_hw_start: NULL ring pointers")); 3001 return (HXGE_ERROR); 3002 } 3003 3004 ndmas = rx_rbr_rings->ndmas; 3005 if (ndmas == 0) { 3006 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3007 "<== hxge_rxdma_hw_start: no dma channel allocated")); 3008 return (HXGE_ERROR); 3009 } 3010 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3011 "==> hxge_rxdma_hw_start (ndmas %d)", ndmas)); 3012 3013 /* 3014 * Scrub the RDC Rx DMA Prefetch Buffer Command. 3015 */ 3016 for (i = 0; i < 128; i++) { 3017 HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i); 3018 } 3019 3020 /* 3021 * Scrub Rx DMA Shadow Tail Command. 3022 */ 3023 for (i = 0; i < 64; i++) { 3024 HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i); 3025 } 3026 3027 /* 3028 * Scrub Rx DMA Control Fifo Command. 3029 */ 3030 for (i = 0; i < 512; i++) { 3031 HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i); 3032 } 3033 3034 /* 3035 * Scrub Rx DMA Data Fifo Command. 3036 */ 3037 for (i = 0; i < 1536; i++) { 3038 HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i); 3039 } 3040 3041 /* 3042 * Reset the FIFO Error Stat. 3043 */ 3044 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF); 3045 3046 /* Set the error mask to receive interrupts */ 3047 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3048 3049 rbr_rings = rx_rbr_rings->rbr_rings; 3050 rcr_rings = rx_rcr_rings->rcr_rings; 3051 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 3052 if (rx_mbox_areas_p) { 3053 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3054 } 3055 3056 for (i = 0; i < ndmas; i++) { 3057 channel = rbr_rings[i]->rdc; 3058 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3059 "==> hxge_rxdma_hw_start (ndmas %d) channel %d", 3060 ndmas, channel)); 3061 status = hxge_rxdma_start_channel(hxgep, channel, 3062 (p_rx_rbr_ring_t)rbr_rings[i], 3063 (p_rx_rcr_ring_t)rcr_rings[i], 3064 (p_rx_mbox_t)rx_mbox_p[i], rbr_rings[i]->rbb_max); 3065 if (status != HXGE_OK) { 3066 goto hxge_rxdma_hw_start_fail1; 3067 } 3068 } 3069 3070 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: " 3071 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3072 rx_rbr_rings, rx_rcr_rings)); 3073 goto hxge_rxdma_hw_start_exit; 3074 3075 hxge_rxdma_hw_start_fail1: 3076 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3077 "==> hxge_rxdma_hw_start: disable " 3078 "(status 0x%x channel %d i %d)", status, channel, i)); 3079 for (; i >= 0; i--) { 3080 channel = rbr_rings[i]->rdc; 3081 (void) hxge_rxdma_stop_channel(hxgep, channel); 3082 } 3083 3084 hxge_rxdma_hw_start_exit: 3085 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3086 "==> hxge_rxdma_hw_start: (status 0x%x)", status)); 3087 return (status); 3088 } 3089 3090 static void 3091 hxge_rxdma_hw_stop(p_hxge_t hxgep) 3092 { 3093 int i, ndmas; 3094 uint16_t channel; 3095 p_rx_rbr_rings_t rx_rbr_rings; 3096 p_rx_rbr_ring_t *rbr_rings; 3097 p_rx_rcr_rings_t rx_rcr_rings; 3098 3099 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop")); 3100 3101 rx_rbr_rings = hxgep->rx_rbr_rings; 3102 rx_rcr_rings = hxgep->rx_rcr_rings; 3103 3104 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3105 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3106 "<== hxge_rxdma_hw_stop: NULL ring pointers")); 3107 return; 3108 } 3109 3110 ndmas = rx_rbr_rings->ndmas; 3111 if (!ndmas) { 3112 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3113 "<== hxge_rxdma_hw_stop: no dma channel allocated")); 3114 return; 3115 } 3116 3117 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3118 "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3119 3120 rbr_rings = rx_rbr_rings->rbr_rings; 3121 for (i = 0; i < ndmas; i++) { 3122 channel = rbr_rings[i]->rdc; 3123 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3124 "==> hxge_rxdma_hw_stop (ndmas %d) channel %d", 3125 ndmas, channel)); 3126 (void) hxge_rxdma_stop_channel(hxgep, channel); 3127 } 3128 3129 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: " 3130 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3131 rx_rbr_rings, rx_rcr_rings)); 3132 3133 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop")); 3134 } 3135 3136 static hxge_status_t 3137 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 3138 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 3139 int n_init_kick) 3140 { 3141 hpi_handle_t handle; 3142 hpi_status_t rs = HPI_SUCCESS; 3143 rdc_stat_t cs; 3144 rdc_int_mask_t ent_mask; 3145 hxge_status_t status = HXGE_OK; 3146 3147 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel")); 3148 3149 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3150 3151 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: " 3152 "hpi handle addr $%p acc $%p", 3153 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3154 3155 /* Reset RXDMA channel */ 3156 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3157 if (rs != HPI_SUCCESS) { 3158 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3159 "==> hxge_rxdma_start_channel: " 3160 "reset rxdma failed (0x%08x channel %d)", 3161 status, channel)); 3162 return (HXGE_ERROR | rs); 3163 } 3164 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3165 "==> hxge_rxdma_start_channel: reset done: channel %d", channel)); 3166 3167 /* 3168 * Initialize the RXDMA channel specific FZC control configurations. 3169 * These FZC registers are pertaining to each RX channel (logical 3170 * pages). 3171 */ 3172 status = hxge_init_fzc_rxdma_channel(hxgep, 3173 channel, rbr_p, rcr_p, mbox_p); 3174 if (status != HXGE_OK) { 3175 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3176 "==> hxge_rxdma_start_channel: " 3177 "init fzc rxdma failed (0x%08x channel %d)", 3178 status, channel)); 3179 return (status); 3180 } 3181 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3182 "==> hxge_rxdma_start_channel: fzc done")); 3183 3184 /* 3185 * Zero out the shadow and prefetch ram. 3186 */ 3187 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3188 "==> hxge_rxdma_start_channel: ram done")); 3189 3190 /* Set up the interrupt event masks. */ 3191 ent_mask.value = 0; 3192 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3193 if (rs != HPI_SUCCESS) { 3194 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3195 "==> hxge_rxdma_start_channel: " 3196 "init rxdma event masks failed (0x%08x channel %d)", 3197 status, channel)); 3198 return (HXGE_ERROR | rs); 3199 } 3200 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3201 "event done: channel %d (mask 0x%016llx)", 3202 channel, ent_mask.value)); 3203 3204 /* 3205 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA 3206 * channels and enable each DMA channel. 3207 */ 3208 status = hxge_enable_rxdma_channel(hxgep, 3209 channel, rbr_p, rcr_p, mbox_p, n_init_kick); 3210 if (status != HXGE_OK) { 3211 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3212 " hxge_rxdma_start_channel: " 3213 " init enable rxdma failed (0x%08x channel %d)", 3214 status, channel)); 3215 return (status); 3216 } 3217 3218 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3219 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3220 3221 /* 3222 * Initialize the receive DMA control and status register 3223 * Note that rdc_stat HAS to be set after RBR and RCR rings are set 3224 */ 3225 cs.value = 0; 3226 cs.bits.mex = 1; 3227 cs.bits.rcr_thres = 1; 3228 cs.bits.rcr_to = 1; 3229 cs.bits.rbr_empty = 1; 3230 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3231 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3232 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3233 if (status != HXGE_OK) { 3234 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3235 "==> hxge_rxdma_start_channel: " 3236 "init rxdma control register failed (0x%08x channel %d", 3237 status, channel)); 3238 return (status); 3239 } 3240 3241 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3242 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3243 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3244 "==> hxge_rxdma_start_channel: enable done")); 3245 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel")); 3246 3247 return (HXGE_OK); 3248 } 3249 3250 static hxge_status_t 3251 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel) 3252 { 3253 hpi_handle_t handle; 3254 hpi_status_t rs = HPI_SUCCESS; 3255 rdc_stat_t cs; 3256 rdc_int_mask_t ent_mask; 3257 hxge_status_t status = HXGE_OK; 3258 3259 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel")); 3260 3261 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3262 3263 HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: " 3264 "hpi handle addr $%p acc $%p", 3265 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3266 3267 /* Reset RXDMA channel */ 3268 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3269 if (rs != HPI_SUCCESS) { 3270 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3271 " hxge_rxdma_stop_channel: " 3272 " reset rxdma failed (0x%08x channel %d)", 3273 rs, channel)); 3274 return (HXGE_ERROR | rs); 3275 } 3276 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3277 "==> hxge_rxdma_stop_channel: reset done")); 3278 3279 /* Set up the interrupt event masks. */ 3280 ent_mask.value = RDC_INT_MASK_ALL; 3281 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3282 if (rs != HPI_SUCCESS) { 3283 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3284 "==> hxge_rxdma_stop_channel: " 3285 "set rxdma event masks failed (0x%08x channel %d)", 3286 rs, channel)); 3287 return (HXGE_ERROR | rs); 3288 } 3289 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3290 "==> hxge_rxdma_stop_channel: event done")); 3291 3292 /* Initialize the receive DMA control and status register */ 3293 cs.value = 0; 3294 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3295 3296 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control " 3297 " to default (all 0s) 0x%08x", cs.value)); 3298 3299 if (status != HXGE_OK) { 3300 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3301 " hxge_rxdma_stop_channel: init rxdma" 3302 " control register failed (0x%08x channel %d", 3303 status, channel)); 3304 return (status); 3305 } 3306 3307 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3308 "==> hxge_rxdma_stop_channel: control done")); 3309 3310 /* disable dma channel */ 3311 status = hxge_disable_rxdma_channel(hxgep, channel); 3312 3313 if (status != HXGE_OK) { 3314 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3315 " hxge_rxdma_stop_channel: " 3316 " init enable rxdma failed (0x%08x channel %d)", 3317 status, channel)); 3318 return (status); 3319 } 3320 3321 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3322 "==> hxge_rxdma_stop_channel: disable done")); 3323 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel")); 3324 3325 return (HXGE_OK); 3326 } 3327 3328 hxge_status_t 3329 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep) 3330 { 3331 hpi_handle_t handle; 3332 p_hxge_rdc_sys_stats_t statsp; 3333 rdc_fifo_err_stat_t stat; 3334 hxge_status_t status = HXGE_OK; 3335 3336 handle = hxgep->hpi_handle; 3337 statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats; 3338 3339 /* Clear the int_dbg register in case it is an injected err */ 3340 HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0); 3341 3342 /* Get the error status and clear the register */ 3343 HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value); 3344 HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value); 3345 3346 if (stat.bits.rx_ctrl_fifo_sec) { 3347 statsp->ctrl_fifo_sec++; 3348 if (statsp->ctrl_fifo_sec == 1) 3349 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3350 "==> hxge_rxdma_handle_sys_errors: " 3351 "rx_ctrl_fifo_sec")); 3352 } 3353 3354 if (stat.bits.rx_ctrl_fifo_ded) { 3355 /* Global fatal error encountered */ 3356 statsp->ctrl_fifo_ded++; 3357 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3358 HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED); 3359 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3360 "==> hxge_rxdma_handle_sys_errors: " 3361 "fatal error: rx_ctrl_fifo_ded error")); 3362 } 3363 3364 if (stat.bits.rx_data_fifo_sec) { 3365 statsp->data_fifo_sec++; 3366 if (statsp->data_fifo_sec == 1) 3367 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3368 "==> hxge_rxdma_handle_sys_errors: " 3369 "rx_data_fifo_sec")); 3370 } 3371 3372 if (stat.bits.rx_data_fifo_ded) { 3373 /* Global fatal error encountered */ 3374 statsp->data_fifo_ded++; 3375 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3376 HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED); 3377 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3378 "==> hxge_rxdma_handle_sys_errors: " 3379 "fatal error: rx_data_fifo_ded error")); 3380 } 3381 3382 if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) { 3383 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3384 " hxge_rxdma_handle_sys_errors: fatal error\n")); 3385 status = hxge_rx_port_fatal_err_recover(hxgep); 3386 if (status == HXGE_OK) { 3387 FM_SERVICE_RESTORED(hxgep); 3388 } 3389 } 3390 3391 return (HXGE_OK); 3392 } 3393 3394 static hxge_status_t 3395 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel) 3396 { 3397 hpi_handle_t handle; 3398 hpi_status_t rs = HPI_SUCCESS; 3399 hxge_status_t status = HXGE_OK; 3400 p_rx_rbr_ring_t rbrp; 3401 p_rx_rcr_ring_t rcrp; 3402 p_rx_mbox_t mboxp; 3403 rdc_int_mask_t ent_mask; 3404 p_hxge_dma_common_t dmap; 3405 int ring_idx; 3406 p_rx_msg_t rx_msg_p; 3407 int i; 3408 uint32_t hxge_port_rcr_size; 3409 uint64_t tmp; 3410 int n_init_kick = 0; 3411 3412 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover")); 3413 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3414 "Recovering from RxDMAChannel#%d error...", channel)); 3415 3416 /* 3417 * Stop the dma channel waits for the stop done. If the stop done bit 3418 * is not set, then create an error. 3419 */ 3420 3421 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3422 3423 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop...")); 3424 3425 ring_idx = hxge_rxdma_get_ring_index(hxgep, channel); 3426 rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx]; 3427 rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx]; 3428 3429 MUTEX_ENTER(&rcrp->lock); 3430 MUTEX_ENTER(&rbrp->lock); 3431 MUTEX_ENTER(&rbrp->post_lock); 3432 3433 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel...")); 3434 3435 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 3436 if (rs != HPI_SUCCESS) { 3437 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3438 "hxge_disable_rxdma_channel:failed")); 3439 goto fail; 3440 } 3441 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt...")); 3442 3443 /* Disable interrupt */ 3444 ent_mask.value = RDC_INT_MASK_ALL; 3445 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3446 if (rs != HPI_SUCCESS) { 3447 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3448 "Set rxdma event masks failed (channel %d)", channel)); 3449 } 3450 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset...")); 3451 3452 /* Reset RXDMA channel */ 3453 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3454 if (rs != HPI_SUCCESS) { 3455 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3456 "Reset rxdma failed (channel %d)", channel)); 3457 goto fail; 3458 } 3459 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 3460 mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 3461 3462 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3463 rbrp->rbr_rd_index = 0; 3464 3465 rcrp->comp_rd_index = 0; 3466 rcrp->comp_wt_index = 0; 3467 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3468 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3469 #if defined(__i386) 3470 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3471 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3472 #else 3473 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3474 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3475 #endif 3476 3477 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3478 (hxge_port_rcr_size - 1); 3479 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3480 (hxge_port_rcr_size - 1); 3481 3482 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 3483 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 3484 3485 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 3486 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3487 3488 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n", 3489 rbrp->rbr_max_size)); 3490 3491 /* Count the number of buffers owned by the hardware at this moment */ 3492 for (i = 0; i < rbrp->rbr_max_size; i++) { 3493 rx_msg_p = rbrp->rx_msg_ring[i]; 3494 if (rx_msg_p->ref_cnt == 1) { 3495 n_init_kick++; 3496 } 3497 } 3498 3499 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start...")); 3500 3501 /* 3502 * This is error recover! Some buffers are owned by the hardware and 3503 * the rest are owned by the apps. We should only kick in those 3504 * owned by the hardware initially. The apps will post theirs 3505 * eventually. 3506 */ 3507 status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp, 3508 n_init_kick); 3509 if (status != HXGE_OK) { 3510 goto fail; 3511 } 3512 3513 /* 3514 * The DMA channel may disable itself automatically. 3515 * The following is a work-around. 3516 */ 3517 HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp); 3518 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 3519 if (rs != HPI_SUCCESS) { 3520 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3521 "hpi_rxdma_cfg_rdc_enable (channel %d)", channel)); 3522 } 3523 3524 MUTEX_EXIT(&rbrp->post_lock); 3525 MUTEX_EXIT(&rbrp->lock); 3526 MUTEX_EXIT(&rcrp->lock); 3527 3528 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3529 "Recovery Successful, RxDMAChannel#%d Restored", channel)); 3530 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover")); 3531 3532 return (HXGE_OK); 3533 3534 fail: 3535 MUTEX_EXIT(&rbrp->post_lock); 3536 MUTEX_EXIT(&rbrp->lock); 3537 MUTEX_EXIT(&rcrp->lock); 3538 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3539 3540 return (HXGE_ERROR | rs); 3541 } 3542 3543 static hxge_status_t 3544 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep) 3545 { 3546 hxge_status_t status = HXGE_OK; 3547 p_hxge_dma_common_t *dma_buf_p; 3548 uint16_t channel; 3549 int ndmas; 3550 int i; 3551 block_reset_t reset_reg; 3552 3553 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover")); 3554 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ...")); 3555 3556 /* Reset RDC block from PEU for this fatal error */ 3557 reset_reg.value = 0; 3558 reset_reg.bits.rdc_rst = 1; 3559 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 3560 3561 /* Disable RxMAC */ 3562 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n")); 3563 if (hxge_rx_vmac_disable(hxgep) != HXGE_OK) 3564 goto fail; 3565 3566 HXGE_DELAY(1000); 3567 3568 /* Restore any common settings after PEU reset */ 3569 if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK) 3570 goto fail; 3571 3572 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels...")); 3573 3574 ndmas = hxgep->rx_buf_pool_p->ndmas; 3575 dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p; 3576 3577 for (i = 0; i < ndmas; i++) { 3578 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 3579 if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) { 3580 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3581 "Could not recover channel %d", channel)); 3582 } 3583 } 3584 3585 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC...")); 3586 3587 /* Reset RxMAC */ 3588 if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) { 3589 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3590 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3591 goto fail; 3592 } 3593 3594 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC...")); 3595 3596 /* Re-Initialize RxMAC */ 3597 if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) { 3598 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3599 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3600 goto fail; 3601 } 3602 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC...")); 3603 3604 /* Re-enable RxMAC */ 3605 if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) { 3606 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3607 "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC")); 3608 goto fail; 3609 } 3610 3611 /* Reset the error mask since PEU reset cleared it */ 3612 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3613 3614 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3615 "Recovery Successful, RxPort Restored")); 3616 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover")); 3617 3618 return (HXGE_OK); 3619 fail: 3620 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3621 return (status); 3622 } 3623