1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_rxdma.h> 28 29 /* 30 * Number of blocks to accumulate before re-enabling DMA 31 * when we get RBR empty. 32 */ 33 #define HXGE_RBR_EMPTY_THRESHOLD 64 34 35 /* 36 * Globals: tunable parameters (/etc/system or adb) 37 * 38 */ 39 extern uint32_t hxge_rbr_size; 40 extern uint32_t hxge_rcr_size; 41 extern uint32_t hxge_rbr_spare_size; 42 extern uint32_t hxge_mblks_pending; 43 44 /* 45 * Tunable to reduce the amount of time spent in the 46 * ISR doing Rx Processing. 47 */ 48 extern uint32_t hxge_max_rx_pkts; 49 50 /* 51 * Tunables to manage the receive buffer blocks. 52 * 53 * hxge_rx_threshold_hi: copy all buffers. 54 * hxge_rx_bcopy_size_type: receive buffer block size type. 55 * hxge_rx_threshold_lo: copy only up to tunable block size type. 56 */ 57 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi; 58 extern hxge_rxbuf_type_t hxge_rx_buf_size_type; 59 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo; 60 61 /* 62 * Static local functions. 63 */ 64 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep); 65 static void hxge_unmap_rxdma(p_hxge_t hxgep); 66 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep); 67 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep); 68 static void hxge_rxdma_hw_stop(p_hxge_t hxgep); 69 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 70 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 71 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 72 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 73 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 74 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 75 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 76 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, 77 uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p, 78 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 79 p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 80 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 81 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 82 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, 83 uint16_t channel, p_hxge_dma_common_t *dma_buf_p, 84 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks); 85 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 86 p_rx_rbr_ring_t rbr_p); 87 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 88 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 89 int n_init_kick); 90 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel); 91 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 92 p_rx_rcr_ring_t *rcr_p, rdc_stat_t cs); 93 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p, 94 p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, 95 mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry); 96 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep, 97 uint16_t channel); 98 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t); 99 static void hxge_freeb(p_rx_msg_t); 100 static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, 101 p_hxge_ldv_t ldvp, rdc_stat_t cs); 102 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, 103 p_hxge_ldv_t ldvp, rdc_stat_t cs); 104 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep, 105 p_rx_rbr_ring_t rx_dmap); 106 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, 107 uint16_t channel); 108 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep); 109 static void hxge_rbr_empty_restore(p_hxge_t hxgep, 110 p_rx_rbr_ring_t rx_rbr_p); 111 112 hxge_status_t 113 hxge_init_rxdma_channels(p_hxge_t hxgep) 114 { 115 hxge_status_t status = HXGE_OK; 116 block_reset_t reset_reg; 117 118 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels")); 119 120 /* Reset RDC block from PEU to clear any previous state */ 121 reset_reg.value = 0; 122 reset_reg.bits.rdc_rst = 1; 123 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 124 HXGE_DELAY(1000); 125 126 status = hxge_map_rxdma(hxgep); 127 if (status != HXGE_OK) { 128 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 129 "<== hxge_init_rxdma: status 0x%x", status)); 130 return (status); 131 } 132 133 status = hxge_rxdma_hw_start_common(hxgep); 134 if (status != HXGE_OK) { 135 hxge_unmap_rxdma(hxgep); 136 } 137 138 status = hxge_rxdma_hw_start(hxgep); 139 if (status != HXGE_OK) { 140 hxge_unmap_rxdma(hxgep); 141 } 142 143 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 144 "<== hxge_init_rxdma_channels: status 0x%x", status)); 145 return (status); 146 } 147 148 void 149 hxge_uninit_rxdma_channels(p_hxge_t hxgep) 150 { 151 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels")); 152 153 hxge_rxdma_hw_stop(hxgep); 154 hxge_unmap_rxdma(hxgep); 155 156 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels")); 157 } 158 159 hxge_status_t 160 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel, 161 rdc_stat_t *cs_p) 162 { 163 hpi_handle_t handle; 164 hpi_status_t rs = HPI_SUCCESS; 165 hxge_status_t status = HXGE_OK; 166 167 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 168 "<== hxge_init_rxdma_channel_cntl_stat")); 169 170 handle = HXGE_DEV_HPI_HANDLE(hxgep); 171 rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p); 172 173 if (rs != HPI_SUCCESS) { 174 status = HXGE_ERROR | rs; 175 } 176 return (status); 177 } 178 179 180 hxge_status_t 181 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 182 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 183 int n_init_kick) 184 { 185 hpi_handle_t handle; 186 rdc_desc_cfg_t rdc_desc; 187 rdc_rcr_cfg_b_t *cfgb_p; 188 hpi_status_t rs = HPI_SUCCESS; 189 190 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel")); 191 handle = HXGE_DEV_HPI_HANDLE(hxgep); 192 193 /* 194 * Use configuration data composed at init time. Write to hardware the 195 * receive ring configurations. 196 */ 197 rdc_desc.mbox_enable = 1; 198 rdc_desc.mbox_addr = mbox_p->mbox_addr; 199 HXGE_DEBUG_MSG((hxgep, RX_CTL, 200 "==> hxge_enable_rxdma_channel: mboxp $%p($%p)", 201 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 202 203 rdc_desc.rbr_len = rbr_p->rbb_max; 204 rdc_desc.rbr_addr = rbr_p->rbr_addr; 205 206 switch (hxgep->rx_bksize_code) { 207 case RBR_BKSIZE_4K: 208 rdc_desc.page_size = SIZE_4KB; 209 break; 210 case RBR_BKSIZE_8K: 211 rdc_desc.page_size = SIZE_8KB; 212 break; 213 } 214 215 rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0; 216 rdc_desc.valid0 = 1; 217 218 rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1; 219 rdc_desc.valid1 = 1; 220 221 rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2; 222 rdc_desc.valid2 = 1; 223 224 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 225 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 226 227 rdc_desc.rcr_len = rcr_p->comp_size; 228 rdc_desc.rcr_addr = rcr_p->rcr_addr; 229 230 cfgb_p = &(rcr_p->rcr_cfgb); 231 rdc_desc.rcr_threshold = cfgb_p->bits.pthres; 232 rdc_desc.rcr_timeout = cfgb_p->bits.timeout; 233 rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout; 234 235 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 236 "rbr_len qlen %d pagesize code %d rcr_len %d", 237 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 238 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 239 "size 0 %d size 1 %d size 2 %d", 240 rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1, 241 rbr_p->hpi_pkt_buf_size2)); 242 243 rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 244 if (rs != HPI_SUCCESS) { 245 return (HXGE_ERROR | rs); 246 } 247 248 /* 249 * Enable the timeout and threshold. 250 */ 251 rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 252 rdc_desc.rcr_threshold); 253 if (rs != HPI_SUCCESS) { 254 return (HXGE_ERROR | rs); 255 } 256 257 rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 258 rdc_desc.rcr_timeout); 259 if (rs != HPI_SUCCESS) { 260 return (HXGE_ERROR | rs); 261 } 262 263 /* Enable the DMA */ 264 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 265 if (rs != HPI_SUCCESS) { 266 return (HXGE_ERROR | rs); 267 } 268 269 /* Kick the DMA engine */ 270 hpi_rxdma_rdc_rbr_kick(handle, channel, n_init_kick); 271 272 /* Clear the rbr empty bit */ 273 (void) hpi_rxdma_channel_rbr_empty_clear(handle, channel); 274 275 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel")); 276 277 return (HXGE_OK); 278 } 279 280 static hxge_status_t 281 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel) 282 { 283 hpi_handle_t handle; 284 hpi_status_t rs = HPI_SUCCESS; 285 286 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel")); 287 288 handle = HXGE_DEV_HPI_HANDLE(hxgep); 289 290 /* disable the DMA */ 291 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 292 if (rs != HPI_SUCCESS) { 293 HXGE_DEBUG_MSG((hxgep, RX_CTL, 294 "<== hxge_disable_rxdma_channel:failed (0x%x)", rs)); 295 return (HXGE_ERROR | rs); 296 } 297 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel")); 298 return (HXGE_OK); 299 } 300 301 hxge_status_t 302 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel) 303 { 304 hpi_handle_t handle; 305 hxge_status_t status = HXGE_OK; 306 307 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 308 "==> hxge_rxdma_channel_rcrflush")); 309 310 handle = HXGE_DEV_HPI_HANDLE(hxgep); 311 hpi_rxdma_rdc_rcr_flush(handle, channel); 312 313 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 314 "<== hxge_rxdma_channel_rcrflush")); 315 return (status); 316 317 } 318 319 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 320 321 #define TO_LEFT -1 322 #define TO_RIGHT 1 323 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 324 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 325 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 326 #define NO_HINT 0xffffffff 327 328 /*ARGSUSED*/ 329 hxge_status_t 330 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p, 331 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 332 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 333 { 334 int bufsize; 335 uint64_t pktbuf_pp; 336 uint64_t dvma_addr; 337 rxring_info_t *ring_info; 338 int base_side, end_side; 339 int r_index, l_index, anchor_index; 340 int found, search_done; 341 uint32_t offset, chunk_size, block_size, page_size_mask; 342 uint32_t chunk_index, block_index, total_index; 343 int max_iterations, iteration; 344 rxbuf_index_info_t *bufinfo; 345 346 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp")); 347 348 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 349 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 350 pkt_buf_addr_pp, pktbufsz_type)); 351 352 #if defined(__i386) 353 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 354 #else 355 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 356 #endif 357 358 switch (pktbufsz_type) { 359 case 0: 360 bufsize = rbr_p->pkt_buf_size0; 361 break; 362 case 1: 363 bufsize = rbr_p->pkt_buf_size1; 364 break; 365 case 2: 366 bufsize = rbr_p->pkt_buf_size2; 367 break; 368 case RCR_SINGLE_BLOCK: 369 bufsize = 0; 370 anchor_index = 0; 371 break; 372 default: 373 return (HXGE_ERROR); 374 } 375 376 if (rbr_p->num_blocks == 1) { 377 anchor_index = 0; 378 ring_info = rbr_p->ring_info; 379 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 380 381 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 382 "==> hxge_rxbuf_pp_to_vp: (found, 1 block) " 383 "buf_pp $%p btype %d anchor_index %d bufinfo $%p", 384 pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo)); 385 386 goto found_index; 387 } 388 389 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 390 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d", 391 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 392 393 ring_info = rbr_p->ring_info; 394 found = B_FALSE; 395 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 396 iteration = 0; 397 max_iterations = ring_info->max_iterations; 398 399 /* 400 * First check if this block have been seen recently. This is indicated 401 * by a hint which is initialized when the first buffer of the block is 402 * seen. The hint is reset when the last buffer of the block has been 403 * processed. As three block sizes are supported, three hints are kept. 404 * The idea behind the hints is that once the hardware uses a block 405 * for a buffer of that size, it will use it exclusively for that size 406 * and will use it until it is exhausted. It is assumed that there 407 * would a single block being used for the same buffer sizes at any 408 * given time. 409 */ 410 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 411 anchor_index = ring_info->hint[pktbufsz_type]; 412 dvma_addr = bufinfo[anchor_index].dvma_addr; 413 chunk_size = bufinfo[anchor_index].buf_size; 414 if ((pktbuf_pp >= dvma_addr) && 415 (pktbuf_pp < (dvma_addr + chunk_size))) { 416 found = B_TRUE; 417 /* 418 * check if this is the last buffer in the block If so, 419 * then reset the hint for the size; 420 */ 421 422 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 423 ring_info->hint[pktbufsz_type] = NO_HINT; 424 } 425 } 426 427 if (found == B_FALSE) { 428 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 429 "==> hxge_rxbuf_pp_to_vp: (!found)" 430 "buf_pp $%p btype %d anchor_index %d", 431 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 432 433 /* 434 * This is the first buffer of the block of this size. Need to 435 * search the whole information array. the search algorithm 436 * uses a binary tree search algorithm. It assumes that the 437 * information is already sorted with increasing order info[0] 438 * < info[1] < info[2] .... < info[n-1] where n is the size of 439 * the information array 440 */ 441 r_index = rbr_p->num_blocks - 1; 442 l_index = 0; 443 search_done = B_FALSE; 444 anchor_index = MID_INDEX(r_index, l_index); 445 while (search_done == B_FALSE) { 446 if ((r_index == l_index) || 447 (iteration >= max_iterations)) 448 search_done = B_TRUE; 449 450 end_side = TO_RIGHT; /* to the right */ 451 base_side = TO_LEFT; /* to the left */ 452 /* read the DVMA address information and sort it */ 453 dvma_addr = bufinfo[anchor_index].dvma_addr; 454 chunk_size = bufinfo[anchor_index].buf_size; 455 456 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 457 "==> hxge_rxbuf_pp_to_vp: (searching)" 458 "buf_pp $%p btype %d " 459 "anchor_index %d chunk_size %d dvmaaddr $%p", 460 pkt_buf_addr_pp, pktbufsz_type, anchor_index, 461 chunk_size, dvma_addr)); 462 463 if (pktbuf_pp >= dvma_addr) 464 base_side = TO_RIGHT; /* to the right */ 465 if (pktbuf_pp < (dvma_addr + chunk_size)) 466 end_side = TO_LEFT; /* to the left */ 467 468 switch (base_side + end_side) { 469 case IN_MIDDLE: 470 /* found */ 471 found = B_TRUE; 472 search_done = B_TRUE; 473 if ((pktbuf_pp + bufsize) < 474 (dvma_addr + chunk_size)) 475 ring_info->hint[pktbufsz_type] = 476 bufinfo[anchor_index].buf_index; 477 break; 478 case BOTH_RIGHT: 479 /* not found: go to the right */ 480 l_index = anchor_index + 1; 481 anchor_index = MID_INDEX(r_index, l_index); 482 break; 483 484 case BOTH_LEFT: 485 /* not found: go to the left */ 486 r_index = anchor_index - 1; 487 anchor_index = MID_INDEX(r_index, l_index); 488 break; 489 default: /* should not come here */ 490 return (HXGE_ERROR); 491 } 492 iteration++; 493 } 494 495 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 496 "==> hxge_rxbuf_pp_to_vp: (search done)" 497 "buf_pp $%p btype %d anchor_index %d", 498 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 499 } 500 501 if (found == B_FALSE) { 502 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 503 "==> hxge_rxbuf_pp_to_vp: (search failed)" 504 "buf_pp $%p btype %d anchor_index %d", 505 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 506 return (HXGE_ERROR); 507 } 508 509 found_index: 510 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 511 "==> hxge_rxbuf_pp_to_vp: (FOUND1)" 512 "buf_pp $%p btype %d bufsize %d anchor_index %d", 513 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index)); 514 515 /* index of the first block in this chunk */ 516 chunk_index = bufinfo[anchor_index].start_index; 517 dvma_addr = bufinfo[anchor_index].dvma_addr; 518 page_size_mask = ring_info->block_size_mask; 519 520 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 521 "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 522 "buf_pp $%p btype %d bufsize %d " 523 "anchor_index %d chunk_index %d dvma $%p", 524 pkt_buf_addr_pp, pktbufsz_type, bufsize, 525 anchor_index, chunk_index, dvma_addr)); 526 527 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 528 block_size = rbr_p->block_size; /* System block(page) size */ 529 530 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 531 "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 532 "buf_pp $%p btype %d bufsize %d " 533 "anchor_index %d chunk_index %d dvma $%p " 534 "offset %d block_size %d", 535 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index, 536 chunk_index, dvma_addr, offset, block_size)); 537 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index")); 538 539 block_index = (offset / block_size); /* index within chunk */ 540 total_index = chunk_index + block_index; 541 542 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 543 "==> hxge_rxbuf_pp_to_vp: " 544 "total_index %d dvma_addr $%p " 545 "offset %d block_size %d " 546 "block_index %d ", 547 total_index, dvma_addr, offset, block_size, block_index)); 548 549 #if defined(__i386) 550 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 551 (uint32_t)offset); 552 #else 553 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 554 offset); 555 #endif 556 557 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 558 "==> hxge_rxbuf_pp_to_vp: " 559 "total_index %d dvma_addr $%p " 560 "offset %d block_size %d " 561 "block_index %d " 562 "*pkt_buf_addr_p $%p", 563 total_index, dvma_addr, offset, block_size, 564 block_index, *pkt_buf_addr_p)); 565 566 *msg_index = total_index; 567 *bufoffset = (offset & page_size_mask); 568 569 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 570 "==> hxge_rxbuf_pp_to_vp: get msg index: " 571 "msg_index %d bufoffset_index %d", 572 *msg_index, *bufoffset)); 573 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp")); 574 575 return (HXGE_OK); 576 } 577 578 579 /* 580 * used by quick sort (qsort) function 581 * to perform comparison 582 */ 583 static int 584 hxge_sort_compare(const void *p1, const void *p2) 585 { 586 587 rxbuf_index_info_t *a, *b; 588 589 a = (rxbuf_index_info_t *)p1; 590 b = (rxbuf_index_info_t *)p2; 591 592 if (a->dvma_addr > b->dvma_addr) 593 return (1); 594 if (a->dvma_addr < b->dvma_addr) 595 return (-1); 596 return (0); 597 } 598 599 /* 600 * Grabbed this sort implementation from common/syscall/avl.c 601 * 602 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 603 * v = Ptr to array/vector of objs 604 * n = # objs in the array 605 * s = size of each obj (must be multiples of a word size) 606 * f = ptr to function to compare two objs 607 * returns (-1 = less than, 0 = equal, 1 = greater than 608 */ 609 void 610 hxge_ksort(caddr_t v, int n, int s, int (*f) ()) 611 { 612 int g, i, j, ii; 613 unsigned int *p1, *p2; 614 unsigned int tmp; 615 616 /* No work to do */ 617 if (v == NULL || n <= 1) 618 return; 619 /* Sanity check on arguments */ 620 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 621 ASSERT(s > 0); 622 623 for (g = n / 2; g > 0; g /= 2) { 624 for (i = g; i < n; i++) { 625 for (j = i - g; j >= 0 && 626 (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) { 627 p1 = (unsigned *)(v + j * s); 628 p2 = (unsigned *)(v + (j + g) * s); 629 for (ii = 0; ii < s / 4; ii++) { 630 tmp = *p1; 631 *p1++ = *p2; 632 *p2++ = tmp; 633 } 634 } 635 } 636 } 637 } 638 639 /* 640 * Initialize data structures required for rxdma 641 * buffer dvma->vmem address lookup 642 */ 643 /*ARGSUSED*/ 644 static hxge_status_t 645 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp) 646 { 647 int index; 648 rxring_info_t *ring_info; 649 int max_iteration = 0, max_index = 0; 650 651 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init")); 652 653 ring_info = rbrp->ring_info; 654 ring_info->hint[0] = NO_HINT; 655 ring_info->hint[1] = NO_HINT; 656 ring_info->hint[2] = NO_HINT; 657 max_index = rbrp->num_blocks; 658 659 /* read the DVMA address information and sort it */ 660 /* do init of the information array */ 661 662 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 663 " hxge_rxbuf_index_info_init Sort ptrs")); 664 665 /* sort the array */ 666 hxge_ksort((void *) ring_info->buffer, max_index, 667 sizeof (rxbuf_index_info_t), hxge_sort_compare); 668 669 for (index = 0; index < max_index; index++) { 670 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 671 " hxge_rxbuf_index_info_init: sorted chunk %d " 672 " ioaddr $%p kaddr $%p size %x", 673 index, ring_info->buffer[index].dvma_addr, 674 ring_info->buffer[index].kaddr, 675 ring_info->buffer[index].buf_size)); 676 } 677 678 max_iteration = 0; 679 while (max_index >= (1ULL << max_iteration)) 680 max_iteration++; 681 ring_info->max_iterations = max_iteration + 1; 682 683 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 684 " hxge_rxbuf_index_info_init Find max iter %d", 685 ring_info->max_iterations)); 686 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init")); 687 688 return (HXGE_OK); 689 } 690 691 /*ARGSUSED*/ 692 void 693 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p) 694 { 695 #ifdef HXGE_DEBUG 696 697 uint32_t bptr; 698 uint64_t pp; 699 700 bptr = entry_p->bits.pkt_buf_addr; 701 702 HXGE_DEBUG_MSG((hxgep, RX_CTL, 703 "\trcr entry $%p " 704 "\trcr entry 0x%0llx " 705 "\trcr entry 0x%08x " 706 "\trcr entry 0x%08x " 707 "\tvalue 0x%0llx\n" 708 "\tmulti = %d\n" 709 "\tpkt_type = 0x%x\n" 710 "\terror = 0x%04x\n" 711 "\tl2_len = %d\n" 712 "\tpktbufsize = %d\n" 713 "\tpkt_buf_addr = $%p\n" 714 "\tpkt_buf_addr (<< 6) = $%p\n", 715 entry_p, 716 *(int64_t *)entry_p, 717 *(int32_t *)entry_p, 718 *(int32_t *)((char *)entry_p + 32), 719 entry_p->value, 720 entry_p->bits.multi, 721 entry_p->bits.pkt_type, 722 entry_p->bits.error, 723 entry_p->bits.l2_len, 724 entry_p->bits.pktbufsz, 725 bptr, 726 entry_p->bits.pkt_buf_addr_l)); 727 728 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 729 RCR_PKT_BUF_ADDR_SHIFT; 730 731 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 732 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 733 #endif 734 } 735 736 /*ARGSUSED*/ 737 void 738 hxge_rxdma_stop(p_hxge_t hxgep) 739 { 740 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop")); 741 742 (void) hxge_rx_vmac_disable(hxgep); 743 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 744 745 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop")); 746 } 747 748 void 749 hxge_rxdma_stop_reinit(p_hxge_t hxgep) 750 { 751 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit")); 752 753 (void) hxge_rxdma_stop(hxgep); 754 (void) hxge_uninit_rxdma_channels(hxgep); 755 (void) hxge_init_rxdma_channels(hxgep); 756 757 (void) hxge_rx_vmac_enable(hxgep); 758 759 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit")); 760 } 761 762 hxge_status_t 763 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 764 { 765 int i, ndmas; 766 uint16_t channel; 767 p_rx_rbr_rings_t rx_rbr_rings; 768 p_rx_rbr_ring_t *rbr_rings; 769 hpi_handle_t handle; 770 hpi_status_t rs = HPI_SUCCESS; 771 hxge_status_t status = HXGE_OK; 772 773 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 774 "==> hxge_rxdma_hw_mode: mode %d", enable)); 775 776 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 777 HXGE_DEBUG_MSG((hxgep, RX_CTL, 778 "<== hxge_rxdma_mode: not initialized")); 779 return (HXGE_ERROR); 780 } 781 782 rx_rbr_rings = hxgep->rx_rbr_rings; 783 if (rx_rbr_rings == NULL) { 784 HXGE_DEBUG_MSG((hxgep, RX_CTL, 785 "<== hxge_rxdma_mode: NULL ring pointer")); 786 return (HXGE_ERROR); 787 } 788 789 if (rx_rbr_rings->rbr_rings == NULL) { 790 HXGE_DEBUG_MSG((hxgep, RX_CTL, 791 "<== hxge_rxdma_mode: NULL rbr rings pointer")); 792 return (HXGE_ERROR); 793 } 794 795 ndmas = rx_rbr_rings->ndmas; 796 if (!ndmas) { 797 HXGE_DEBUG_MSG((hxgep, RX_CTL, 798 "<== hxge_rxdma_mode: no channel")); 799 return (HXGE_ERROR); 800 } 801 802 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 803 "==> hxge_rxdma_mode (ndmas %d)", ndmas)); 804 805 rbr_rings = rx_rbr_rings->rbr_rings; 806 807 handle = HXGE_DEV_HPI_HANDLE(hxgep); 808 809 for (i = 0; i < ndmas; i++) { 810 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 811 continue; 812 } 813 channel = rbr_rings[i]->rdc; 814 if (enable) { 815 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 816 "==> hxge_rxdma_hw_mode: channel %d (enable)", 817 channel)); 818 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 819 } else { 820 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 821 "==> hxge_rxdma_hw_mode: channel %d (disable)", 822 channel)); 823 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 824 } 825 } 826 827 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 828 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 829 "<== hxge_rxdma_hw_mode: status 0x%x", status)); 830 831 return (status); 832 } 833 834 int 835 hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel) 836 { 837 int i, ndmas; 838 uint16_t rdc; 839 p_rx_rbr_rings_t rx_rbr_rings; 840 p_rx_rbr_ring_t *rbr_rings; 841 842 HXGE_DEBUG_MSG((hxgep, RX_CTL, 843 "==> hxge_rxdma_get_ring_index: channel %d", channel)); 844 845 rx_rbr_rings = hxgep->rx_rbr_rings; 846 if (rx_rbr_rings == NULL) { 847 HXGE_DEBUG_MSG((hxgep, RX_CTL, 848 "<== hxge_rxdma_get_ring_index: NULL ring pointer")); 849 return (-1); 850 } 851 852 ndmas = rx_rbr_rings->ndmas; 853 if (!ndmas) { 854 HXGE_DEBUG_MSG((hxgep, RX_CTL, 855 "<== hxge_rxdma_get_ring_index: no channel")); 856 return (-1); 857 } 858 859 HXGE_DEBUG_MSG((hxgep, RX_CTL, 860 "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 861 862 rbr_rings = rx_rbr_rings->rbr_rings; 863 for (i = 0; i < ndmas; i++) { 864 rdc = rbr_rings[i]->rdc; 865 if (channel == rdc) { 866 HXGE_DEBUG_MSG((hxgep, RX_CTL, 867 "==> hxge_rxdma_get_rbr_ring: " 868 "channel %d (index %d) " 869 "ring %d", channel, i, rbr_rings[i])); 870 871 return (i); 872 } 873 } 874 875 HXGE_DEBUG_MSG((hxgep, RX_CTL, 876 "<== hxge_rxdma_get_rbr_ring_index: not found")); 877 878 return (-1); 879 } 880 881 /* 882 * Static functions start here. 883 */ 884 static p_rx_msg_t 885 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p) 886 { 887 p_rx_msg_t hxge_mp = NULL; 888 p_hxge_dma_common_t dmamsg_p; 889 uchar_t *buffer; 890 891 hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 892 if (hxge_mp == NULL) { 893 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 894 "Allocation of a rx msg failed.")); 895 goto hxge_allocb_exit; 896 } 897 898 hxge_mp->use_buf_pool = B_FALSE; 899 if (dmabuf_p) { 900 hxge_mp->use_buf_pool = B_TRUE; 901 902 dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma; 903 *dmamsg_p = *dmabuf_p; 904 dmamsg_p->nblocks = 1; 905 dmamsg_p->block_size = size; 906 dmamsg_p->alength = size; 907 buffer = (uchar_t *)dmabuf_p->kaddrp; 908 909 dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size); 910 dmabuf_p->ioaddr_pp = (void *) 911 ((char *)dmabuf_p->ioaddr_pp + size); 912 913 dmabuf_p->alength -= size; 914 dmabuf_p->offset += size; 915 dmabuf_p->dma_cookie.dmac_laddress += size; 916 dmabuf_p->dma_cookie.dmac_size -= size; 917 } else { 918 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 919 if (buffer == NULL) { 920 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 921 "Allocation of a receive page failed.")); 922 goto hxge_allocb_fail1; 923 } 924 } 925 926 hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb); 927 if (hxge_mp->rx_mblk_p == NULL) { 928 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed.")); 929 goto hxge_allocb_fail2; 930 } 931 hxge_mp->buffer = buffer; 932 hxge_mp->block_size = size; 933 hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb; 934 hxge_mp->freeb.free_arg = (caddr_t)hxge_mp; 935 hxge_mp->ref_cnt = 1; 936 hxge_mp->free = B_TRUE; 937 hxge_mp->rx_use_bcopy = B_FALSE; 938 939 atomic_inc_32(&hxge_mblks_pending); 940 941 goto hxge_allocb_exit; 942 943 hxge_allocb_fail2: 944 if (!hxge_mp->use_buf_pool) { 945 KMEM_FREE(buffer, size); 946 } 947 hxge_allocb_fail1: 948 KMEM_FREE(hxge_mp, sizeof (rx_msg_t)); 949 hxge_mp = NULL; 950 951 hxge_allocb_exit: 952 return (hxge_mp); 953 } 954 955 p_mblk_t 956 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 957 { 958 p_mblk_t mp; 959 960 HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb")); 961 HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p " 962 "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size)); 963 964 mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb); 965 if (mp == NULL) { 966 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 967 goto hxge_dupb_exit; 968 } 969 970 atomic_inc_32(&hxge_mp->ref_cnt); 971 972 hxge_dupb_exit: 973 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 974 return (mp); 975 } 976 977 p_mblk_t 978 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 979 { 980 p_mblk_t mp; 981 uchar_t *dp; 982 983 mp = allocb(size + HXGE_RXBUF_EXTRA, 0); 984 if (mp == NULL) { 985 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 986 goto hxge_dupb_bcopy_exit; 987 } 988 dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA; 989 bcopy((void *) &hxge_mp->buffer[offset], dp, size); 990 mp->b_wptr = dp + size; 991 992 hxge_dupb_bcopy_exit: 993 994 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 995 996 return (mp); 997 } 998 999 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, 1000 p_rx_msg_t rx_msg_p); 1001 1002 void 1003 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1004 { 1005 1006 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page")); 1007 1008 /* Reuse this buffer */ 1009 rx_msg_p->free = B_FALSE; 1010 rx_msg_p->cur_usage_cnt = 0; 1011 rx_msg_p->max_usage_cnt = 0; 1012 rx_msg_p->pkt_buf_size = 0; 1013 1014 if (rx_rbr_p->rbr_use_bcopy) { 1015 rx_msg_p->rx_use_bcopy = B_FALSE; 1016 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1017 } 1018 1019 /* 1020 * Get the rbr header pointer and its offset index. 1021 */ 1022 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1023 rx_rbr_p->rbr_wrap_mask); 1024 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1025 1026 /* 1027 * Accumulate some buffers in the ring before re-enabling the 1028 * DMA channel, if rbr empty was signaled. 1029 */ 1030 hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1); 1031 if (rx_rbr_p->rbr_is_empty && 1032 rx_rbr_p->rbr_consumed < rx_rbr_p->rbb_max / 16) { 1033 hxge_rbr_empty_restore(hxgep, rx_rbr_p); 1034 } 1035 1036 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1037 "<== hxge_post_page (channel %d post_next_index %d)", 1038 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1039 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page")); 1040 } 1041 1042 void 1043 hxge_freeb(p_rx_msg_t rx_msg_p) 1044 { 1045 size_t size; 1046 uchar_t *buffer = NULL; 1047 int ref_cnt; 1048 boolean_t free_state = B_FALSE; 1049 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1050 1051 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb")); 1052 HXGE_DEBUG_MSG((NULL, MEM2_CTL, 1053 "hxge_freeb:rx_msg_p = $%p (block pending %d)", 1054 rx_msg_p, hxge_mblks_pending)); 1055 1056 if (ring == NULL) 1057 return; 1058 1059 /* 1060 * This is to prevent posting activities while we are recovering 1061 * from fatal errors. This should not be a performance drag since 1062 * ref_cnt != 0 most times. 1063 */ 1064 if (ring->rbr_state == RBR_POSTING) 1065 MUTEX_ENTER(&ring->post_lock); 1066 1067 /* 1068 * First we need to get the free state, then 1069 * atomic decrement the reference count to prevent 1070 * the race condition with the interrupt thread that 1071 * is processing a loaned up buffer block. 1072 */ 1073 free_state = rx_msg_p->free; 1074 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1075 if (!ref_cnt) { 1076 atomic_dec_32(&hxge_mblks_pending); 1077 1078 buffer = rx_msg_p->buffer; 1079 size = rx_msg_p->block_size; 1080 1081 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: " 1082 "will free: rx_msg_p = $%p (block pending %d)", 1083 rx_msg_p, hxge_mblks_pending)); 1084 1085 if (!rx_msg_p->use_buf_pool) { 1086 KMEM_FREE(buffer, size); 1087 } 1088 1089 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1090 /* 1091 * Decrement the receive buffer ring's reference 1092 * count, too. 1093 */ 1094 atomic_dec_32(&ring->rbr_ref_cnt); 1095 1096 /* 1097 * Free the receive buffer ring, iff 1098 * 1. all the receive buffers have been freed 1099 * 2. and we are in the proper state (that is, 1100 * we are not UNMAPPING). 1101 */ 1102 if (ring->rbr_ref_cnt == 0 && 1103 ring->rbr_state == RBR_UNMAPPED) { 1104 KMEM_FREE(ring, sizeof (*ring)); 1105 /* post_lock has been destroyed already */ 1106 return; 1107 } 1108 } 1109 1110 /* 1111 * Repost buffer. 1112 */ 1113 if (free_state && (ref_cnt == 1)) { 1114 HXGE_DEBUG_MSG((NULL, RX_CTL, 1115 "hxge_freeb: post page $%p:", rx_msg_p)); 1116 if (ring->rbr_state == RBR_POSTING) 1117 hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p); 1118 } 1119 1120 if (ring->rbr_state == RBR_POSTING) 1121 MUTEX_EXIT(&ring->post_lock); 1122 1123 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb")); 1124 } 1125 1126 uint_t 1127 hxge_rx_intr(caddr_t arg1, caddr_t arg2) 1128 { 1129 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 1130 p_hxge_t hxgep = (p_hxge_t)arg2; 1131 p_hxge_ldg_t ldgp; 1132 uint8_t channel; 1133 hpi_handle_t handle; 1134 rdc_stat_t cs; 1135 uint_t serviced = DDI_INTR_UNCLAIMED; 1136 1137 if (ldvp == NULL) { 1138 HXGE_DEBUG_MSG((NULL, RX_INT_CTL, 1139 "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1140 return (DDI_INTR_UNCLAIMED); 1141 } 1142 1143 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 1144 hxgep = ldvp->hxgep; 1145 } 1146 1147 /* 1148 * If the interface is not started, just swallow the interrupt 1149 * for the logical device and don't rearm it. 1150 */ 1151 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) 1152 return (DDI_INTR_CLAIMED); 1153 1154 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1155 "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1156 1157 /* 1158 * This interrupt handler is for a specific receive dma channel. 1159 */ 1160 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1161 1162 /* 1163 * Get the control and status for this channel. 1164 */ 1165 channel = ldvp->channel; 1166 ldgp = ldvp->ldgp; 1167 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value); 1168 1169 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d " 1170 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1171 channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres)); 1172 1173 hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs); 1174 serviced = DDI_INTR_CLAIMED; 1175 1176 /* error events. */ 1177 if (cs.value & RDC_STAT_ERROR) { 1178 (void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 1179 } 1180 1181 hxge_intr_exit: 1182 /* 1183 * Enable the mailbox update interrupt if we want to use mailbox. We 1184 * probably don't need to use mailbox as it only saves us one pio read. 1185 * Also write 1 to rcrthres and rcrto to clear these two edge triggered 1186 * bits. 1187 */ 1188 cs.value &= RDC_STAT_WR1C; 1189 cs.bits.mex = 1; 1190 cs.bits.ptrread = 0; 1191 cs.bits.pktread = 0; 1192 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1193 1194 /* 1195 * Rearm this logical group if this is a single device group. 1196 */ 1197 if (ldgp->nldvs == 1) { 1198 ld_intr_mgmt_t mgm; 1199 1200 mgm.value = 0; 1201 mgm.bits.arm = 1; 1202 mgm.bits.timer = ldgp->ldg_timer; 1203 HXGE_REG_WR32(handle, 1204 LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value); 1205 } 1206 1207 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1208 "<== hxge_rx_intr: serviced %d", serviced)); 1209 1210 return (serviced); 1211 } 1212 1213 static void 1214 hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1215 rdc_stat_t cs) 1216 { 1217 p_mblk_t mp; 1218 p_rx_rcr_ring_t rcrp; 1219 1220 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring")); 1221 if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1222 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1223 "<== hxge_rx_pkts_vring: no mp")); 1224 return; 1225 } 1226 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp)); 1227 1228 #ifdef HXGE_DEBUG 1229 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1230 "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) " 1231 "LEN %d mp $%p mp->b_next $%p rcrp $%p " 1232 "mac_handle $%p", 1233 (mp->b_wptr - mp->b_rptr), mp, mp->b_next, 1234 rcrp, rcrp->rcr_mac_handle)); 1235 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1236 "==> hxge_rx_pkts_vring: dump packets " 1237 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1238 mp, mp->b_rptr, mp->b_wptr, 1239 hxge_dump_packet((char *)mp->b_rptr, 64))); 1240 1241 if (mp->b_cont) { 1242 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1243 "==> hxge_rx_pkts_vring: dump b_cont packets " 1244 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1245 mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr, 1246 hxge_dump_packet((char *)mp->b_cont->b_rptr, 1247 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1248 } 1249 if (mp->b_next) { 1250 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1251 "==> hxge_rx_pkts_vring: dump next packets " 1252 "(b_rptr $%p): %s", 1253 mp->b_next->b_rptr, 1254 hxge_dump_packet((char *)mp->b_next->b_rptr, 64))); 1255 } 1256 #endif 1257 1258 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1259 "==> hxge_rx_pkts_vring: send packet to stack")); 1260 mac_rx(hxgep->mach, rcrp->rcr_mac_handle, mp); 1261 1262 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring")); 1263 } 1264 1265 /*ARGSUSED*/ 1266 mblk_t * 1267 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1268 p_rx_rcr_ring_t *rcrp, rdc_stat_t cs) 1269 { 1270 hpi_handle_t handle; 1271 uint8_t channel; 1272 p_rx_rcr_rings_t rx_rcr_rings; 1273 p_rx_rcr_ring_t rcr_p; 1274 uint32_t comp_rd_index; 1275 p_rcr_entry_t rcr_desc_rd_head_p; 1276 p_rcr_entry_t rcr_desc_rd_head_pp; 1277 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1278 uint16_t qlen, nrcr_read, npkt_read; 1279 uint32_t qlen_hw, qlen_sw; 1280 uint32_t invalid_rcr_entry; 1281 boolean_t multi; 1282 rdc_rcr_cfg_b_t rcr_cfg_b; 1283 p_rx_mbox_t rx_mboxp; 1284 p_rxdma_mailbox_t mboxp; 1285 uint64_t rcr_head_index, rcr_tail_index; 1286 uint64_t rcr_tail; 1287 uint64_t value; 1288 rdc_rcr_tail_t rcr_tail_reg; 1289 p_hxge_rx_ring_stats_t rdc_stats; 1290 1291 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d " 1292 "channel %d", vindex, ldvp->channel)); 1293 1294 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 1295 return (NULL); 1296 } 1297 1298 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1299 rx_rcr_rings = hxgep->rx_rcr_rings; 1300 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1301 channel = rcr_p->rdc; 1302 if (channel != ldvp->channel) { 1303 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d " 1304 "channel %d, and rcr channel %d not matched.", 1305 vindex, ldvp->channel, channel)); 1306 return (NULL); 1307 } 1308 1309 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1310 "==> hxge_rx_pkts: START: rcr channel %d " 1311 "head_p $%p head_pp $%p index %d ", 1312 channel, rcr_p->rcr_desc_rd_head_p, 1313 rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1314 1315 rx_mboxp = hxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 1316 mboxp = (p_rxdma_mailbox_t)rx_mboxp->rx_mbox.kaddrp; 1317 1318 (void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1319 RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value); 1320 rcr_tail = rcr_tail_reg.bits.tail; 1321 1322 if (!qlen) { 1323 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1324 "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)", 1325 channel, qlen)); 1326 return (NULL); 1327 } 1328 1329 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d " 1330 "qlen %d", channel, qlen)); 1331 1332 comp_rd_index = rcr_p->comp_rd_index; 1333 1334 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1335 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1336 nrcr_read = npkt_read = 0; 1337 1338 /* 1339 * Number of packets queued (The jumbo or multi packet will be counted 1340 * as only one paccket and it may take up more than one completion 1341 * entry). 1342 */ 1343 qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts; 1344 head_mp = NULL; 1345 tail_mp = &head_mp; 1346 nmp = mp_cont = NULL; 1347 multi = B_FALSE; 1348 1349 rcr_head_index = rcr_p->rcr_desc_rd_head_p - rcr_p->rcr_desc_first_p; 1350 rcr_tail_index = rcr_tail - rcr_p->rcr_tail_begin; 1351 1352 if (rcr_tail_index >= rcr_head_index) { 1353 qlen_sw = rcr_tail_index - rcr_head_index; 1354 } else { 1355 /* rcr_tail has wrapped around */ 1356 qlen_sw = (rcr_p->comp_size - rcr_head_index) + rcr_tail_index; 1357 } 1358 1359 if (qlen_hw > qlen_sw) { 1360 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1361 "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n", 1362 channel, qlen_hw, qlen_sw)); 1363 qlen_hw = qlen_sw; 1364 } 1365 1366 while (qlen_hw) { 1367 #ifdef HXGE_DEBUG 1368 hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p); 1369 #endif 1370 /* 1371 * Process one completion ring entry. 1372 */ 1373 invalid_rcr_entry = 0; 1374 hxge_receive_packet(hxgep, 1375 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont, 1376 &invalid_rcr_entry); 1377 if (invalid_rcr_entry != 0) { 1378 rdc_stats = rcr_p->rdc_stats; 1379 rdc_stats->rcr_invalids++; 1380 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1381 "Channel %d could only read 0x%x packets, " 1382 "but 0x%x pending\n", channel, npkt_read, qlen_hw)); 1383 break; 1384 } 1385 1386 /* 1387 * message chaining modes (nemo msg chaining) 1388 */ 1389 if (nmp) { 1390 nmp->b_next = NULL; 1391 if (!multi && !mp_cont) { /* frame fits a partition */ 1392 *tail_mp = nmp; 1393 tail_mp = &nmp->b_next; 1394 nmp = NULL; 1395 } else if (multi && !mp_cont) { /* first segment */ 1396 *tail_mp = nmp; 1397 tail_mp = &nmp->b_cont; 1398 } else if (multi && mp_cont) { /* mid of multi segs */ 1399 *tail_mp = mp_cont; 1400 tail_mp = &mp_cont->b_cont; 1401 } else if (!multi && mp_cont) { /* last segment */ 1402 *tail_mp = mp_cont; 1403 tail_mp = &nmp->b_next; 1404 nmp = NULL; 1405 } 1406 } 1407 1408 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1409 "==> hxge_rx_pkts: loop: rcr channel %d " 1410 "before updating: multi %d " 1411 "nrcr_read %d " 1412 "npk read %d " 1413 "head_pp $%p index %d ", 1414 channel, multi, 1415 nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index)); 1416 1417 if (!multi) { 1418 qlen_hw--; 1419 npkt_read++; 1420 } 1421 1422 /* 1423 * Update the next read entry. 1424 */ 1425 comp_rd_index = NEXT_ENTRY(comp_rd_index, 1426 rcr_p->comp_wrap_mask); 1427 1428 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 1429 rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p); 1430 1431 nrcr_read++; 1432 1433 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1434 "<== hxge_rx_pkts: (SAM, process one packet) " 1435 "nrcr_read %d", nrcr_read)); 1436 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1437 "==> hxge_rx_pkts: loop: rcr channel %d " 1438 "multi %d nrcr_read %d npk read %d head_pp $%p index %d ", 1439 channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1440 comp_rd_index)); 1441 } 1442 1443 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 1444 rcr_p->comp_rd_index = comp_rd_index; 1445 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 1446 1447 /* Adjust the mailbox queue length for a hardware bug workaround */ 1448 mboxp->rcrstat_a.bits.qlen -= npkt_read; 1449 1450 if ((hxgep->intr_timeout != rcr_p->intr_timeout) || 1451 (hxgep->intr_threshold != rcr_p->intr_threshold)) { 1452 rcr_p->intr_timeout = hxgep->intr_timeout; 1453 rcr_p->intr_threshold = hxgep->intr_threshold; 1454 rcr_cfg_b.value = 0x0ULL; 1455 if (rcr_p->intr_timeout) 1456 rcr_cfg_b.bits.entout = 1; 1457 rcr_cfg_b.bits.timeout = rcr_p->intr_timeout; 1458 rcr_cfg_b.bits.pthres = rcr_p->intr_threshold; 1459 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, 1460 channel, rcr_cfg_b.value); 1461 } 1462 1463 cs.bits.pktread = npkt_read; 1464 cs.bits.ptrread = nrcr_read; 1465 value = cs.value; 1466 cs.value &= 0xffffffffULL; 1467 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1468 1469 cs.value = value & ~0xffffffffULL; 1470 cs.bits.pktread = 0; 1471 cs.bits.ptrread = 0; 1472 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1473 1474 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1475 "==> hxge_rx_pkts: EXIT: rcr channel %d " 1476 "head_pp $%p index %016llx ", 1477 channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1478 1479 /* 1480 * Update RCR buffer pointer read and number of packets read. 1481 */ 1482 1483 *rcrp = rcr_p; 1484 1485 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts")); 1486 1487 return (head_mp); 1488 } 1489 1490 #define RCR_ENTRY_PATTERN 0x5a5a6b6b7c7c8d8dULL 1491 #define NO_PORT_BIT 0x20 1492 1493 /*ARGSUSED*/ 1494 void 1495 hxge_receive_packet(p_hxge_t hxgep, 1496 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 1497 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont, 1498 uint32_t *invalid_rcr_entry) 1499 { 1500 p_mblk_t nmp = NULL; 1501 uint64_t multi; 1502 uint8_t channel; 1503 1504 boolean_t first_entry = B_TRUE; 1505 boolean_t buffer_free = B_FALSE; 1506 boolean_t error_send_up = B_FALSE; 1507 uint8_t error_type; 1508 uint16_t l2_len; 1509 uint16_t skip_len; 1510 uint8_t pktbufsz_type; 1511 uint64_t rcr_entry; 1512 uint64_t *pkt_buf_addr_pp; 1513 uint64_t *pkt_buf_addr_p; 1514 uint32_t buf_offset; 1515 uint32_t bsize; 1516 uint32_t msg_index; 1517 p_rx_rbr_ring_t rx_rbr_p; 1518 p_rx_msg_t *rx_msg_ring_p; 1519 p_rx_msg_t rx_msg_p; 1520 1521 uint16_t sw_offset_bytes = 0, hdr_size = 0; 1522 hxge_status_t status = HXGE_OK; 1523 boolean_t is_valid = B_FALSE; 1524 p_hxge_rx_ring_stats_t rdc_stats; 1525 uint32_t bytes_read; 1526 uint8_t header = 0; 1527 1528 channel = rcr_p->rdc; 1529 1530 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet")); 1531 1532 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 1533 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 1534 1535 /* Verify the content of the rcr_entry for a hardware bug workaround */ 1536 if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) { 1537 *invalid_rcr_entry = 1; 1538 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet " 1539 "Channel %d invalid RCR entry 0x%llx found, returning\n", 1540 channel, (long long) rcr_entry)); 1541 return; 1542 } 1543 *((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN; 1544 1545 multi = (rcr_entry & RCR_MULTI_MASK); 1546 1547 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 1548 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 1549 1550 /* 1551 * Hardware does not strip the CRC due bug ID 11451 where 1552 * the hardware mis handles minimum size packets. 1553 */ 1554 l2_len -= ETHERFCSL; 1555 1556 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 1557 RCR_PKTBUFSZ_SHIFT); 1558 #if defined(__i386) 1559 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 1560 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 1561 #else 1562 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 1563 RCR_PKT_BUF_ADDR_SHIFT); 1564 #endif 1565 1566 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1567 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1568 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1569 "error_type 0x%x pktbufsz_type %d ", 1570 rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len, 1571 multi, error_type, pktbufsz_type)); 1572 1573 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1574 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1575 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1576 "error_type 0x%x ", rcr_desc_rd_head_p, 1577 rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type)); 1578 1579 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1580 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1581 "full pkt_buf_addr_pp $%p l2_len %d", 1582 rcr_entry, pkt_buf_addr_pp, l2_len)); 1583 1584 /* get the stats ptr */ 1585 rdc_stats = rcr_p->rdc_stats; 1586 1587 if (!l2_len) { 1588 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1589 "<== hxge_receive_packet: failed: l2 length is 0.")); 1590 return; 1591 } 1592 1593 /* shift 6 bits to get the full io address */ 1594 #if defined(__i386) 1595 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 1596 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1597 #else 1598 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 1599 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1600 #endif 1601 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1602 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1603 "full pkt_buf_addr_pp $%p l2_len %d", 1604 rcr_entry, pkt_buf_addr_pp, l2_len)); 1605 1606 rx_rbr_p = rcr_p->rx_rbr_p; 1607 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 1608 1609 if (first_entry) { 1610 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 1611 RXDMA_HDR_SIZE_DEFAULT); 1612 1613 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1614 "==> hxge_receive_packet: first entry 0x%016llx " 1615 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 1616 rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size)); 1617 } 1618 1619 MUTEX_ENTER(&rcr_p->lock); 1620 MUTEX_ENTER(&rx_rbr_p->lock); 1621 1622 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1623 "==> (rbr 1) hxge_receive_packet: entry 0x%0llx " 1624 "full pkt_buf_addr_pp $%p l2_len %d", 1625 rcr_entry, pkt_buf_addr_pp, l2_len)); 1626 1627 /* 1628 * Packet buffer address in the completion entry points to the starting 1629 * buffer address (offset 0). Use the starting buffer address to locate 1630 * the corresponding kernel address. 1631 */ 1632 status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p, 1633 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 1634 &buf_offset, &msg_index); 1635 1636 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1637 "==> (rbr 2) hxge_receive_packet: entry 0x%0llx " 1638 "full pkt_buf_addr_pp $%p l2_len %d", 1639 rcr_entry, pkt_buf_addr_pp, l2_len)); 1640 1641 if (status != HXGE_OK) { 1642 MUTEX_EXIT(&rx_rbr_p->lock); 1643 MUTEX_EXIT(&rcr_p->lock); 1644 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1645 "<== hxge_receive_packet: found vaddr failed %d", status)); 1646 return; 1647 } 1648 1649 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1650 "==> (rbr 3) hxge_receive_packet: entry 0x%0llx " 1651 "full pkt_buf_addr_pp $%p l2_len %d", 1652 rcr_entry, pkt_buf_addr_pp, l2_len)); 1653 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1654 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1655 "full pkt_buf_addr_pp $%p l2_len %d", 1656 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1657 1658 if (msg_index >= rx_rbr_p->tnblocks) { 1659 MUTEX_EXIT(&rx_rbr_p->lock); 1660 MUTEX_EXIT(&rcr_p->lock); 1661 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1662 "==> hxge_receive_packet: FATAL msg_index (%d) " 1663 "should be smaller than tnblocks (%d)\n", 1664 msg_index, rx_rbr_p->tnblocks)); 1665 return; 1666 } 1667 1668 rx_msg_p = rx_msg_ring_p[msg_index]; 1669 1670 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1671 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1672 "full pkt_buf_addr_pp $%p l2_len %d", 1673 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1674 1675 switch (pktbufsz_type) { 1676 case RCR_PKTBUFSZ_0: 1677 bsize = rx_rbr_p->pkt_buf_size0_bytes; 1678 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1679 "==> hxge_receive_packet: 0 buf %d", bsize)); 1680 break; 1681 case RCR_PKTBUFSZ_1: 1682 bsize = rx_rbr_p->pkt_buf_size1_bytes; 1683 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1684 "==> hxge_receive_packet: 1 buf %d", bsize)); 1685 break; 1686 case RCR_PKTBUFSZ_2: 1687 bsize = rx_rbr_p->pkt_buf_size2_bytes; 1688 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1689 "==> hxge_receive_packet: 2 buf %d", bsize)); 1690 break; 1691 case RCR_SINGLE_BLOCK: 1692 bsize = rx_msg_p->block_size; 1693 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1694 "==> hxge_receive_packet: single %d", bsize)); 1695 1696 break; 1697 default: 1698 MUTEX_EXIT(&rx_rbr_p->lock); 1699 MUTEX_EXIT(&rcr_p->lock); 1700 return; 1701 } 1702 1703 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 1704 (buf_offset + sw_offset_bytes), (hdr_size + l2_len), 1705 DDI_DMA_SYNC_FORCPU); 1706 1707 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1708 "==> hxge_receive_packet: after first dump:usage count")); 1709 1710 if (rx_msg_p->cur_usage_cnt == 0) { 1711 if (rx_rbr_p->rbr_use_bcopy) { 1712 atomic_inc_32(&rx_rbr_p->rbr_consumed); 1713 if (rx_rbr_p->rbr_consumed > 1714 rx_rbr_p->rbr_threshold_hi) { 1715 rx_msg_p->rx_use_bcopy = B_TRUE; 1716 } 1717 } 1718 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1719 "==> hxge_receive_packet: buf %d (new block) ", bsize)); 1720 1721 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 1722 rx_msg_p->pkt_buf_size = bsize; 1723 rx_msg_p->cur_usage_cnt = 1; 1724 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 1725 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1726 "==> hxge_receive_packet: buf %d (single block) ", 1727 bsize)); 1728 /* 1729 * Buffer can be reused once the free function is 1730 * called. 1731 */ 1732 rx_msg_p->max_usage_cnt = 1; 1733 buffer_free = B_TRUE; 1734 } else { 1735 rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize; 1736 if (rx_msg_p->max_usage_cnt == 1) { 1737 buffer_free = B_TRUE; 1738 } 1739 } 1740 } else { 1741 rx_msg_p->cur_usage_cnt++; 1742 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 1743 buffer_free = B_TRUE; 1744 } 1745 } 1746 1747 if (rx_msg_p->rx_use_bcopy) { 1748 rdc_stats->pkt_drop++; 1749 atomic_inc_32(&rx_msg_p->ref_cnt); 1750 if (buffer_free == B_TRUE) { 1751 rx_msg_p->free = B_TRUE; 1752 } 1753 1754 MUTEX_EXIT(&rx_rbr_p->lock); 1755 MUTEX_EXIT(&rcr_p->lock); 1756 hxge_freeb(rx_msg_p); 1757 return; 1758 } 1759 1760 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1761 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 1762 msg_index, l2_len, 1763 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 1764 1765 if (error_type) { 1766 rdc_stats->ierrors++; 1767 /* Update error stats */ 1768 rdc_stats->errlog.compl_err_type = error_type; 1769 HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR); 1770 1771 if (error_type & RCR_CTRL_FIFO_DED) { 1772 rdc_stats->ctrl_fifo_ecc_err++; 1773 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1774 " hxge_receive_packet: " 1775 " channel %d RCR ctrl_fifo_ded error", channel)); 1776 } else if (error_type & RCR_DATA_FIFO_DED) { 1777 rdc_stats->data_fifo_ecc_err++; 1778 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1779 " hxge_receive_packet: channel %d" 1780 " RCR data_fifo_ded error", channel)); 1781 } 1782 1783 /* 1784 * Update and repost buffer block if max usage count is 1785 * reached. 1786 */ 1787 if (error_send_up == B_FALSE) { 1788 atomic_inc_32(&rx_msg_p->ref_cnt); 1789 if (buffer_free == B_TRUE) { 1790 rx_msg_p->free = B_TRUE; 1791 } 1792 1793 MUTEX_EXIT(&rx_rbr_p->lock); 1794 MUTEX_EXIT(&rcr_p->lock); 1795 hxge_freeb(rx_msg_p); 1796 return; 1797 } 1798 } 1799 1800 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1801 "==> hxge_receive_packet: DMA sync second ")); 1802 1803 bytes_read = rcr_p->rcvd_pkt_bytes; 1804 skip_len = sw_offset_bytes + hdr_size; 1805 1806 if (first_entry) { 1807 header = rx_msg_p->buffer[buf_offset]; 1808 } 1809 1810 if (!rx_msg_p->rx_use_bcopy) { 1811 /* 1812 * For loaned up buffers, the driver reference count 1813 * will be incremented first and then the free state. 1814 */ 1815 if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 1816 if (first_entry) { 1817 nmp->b_rptr = &nmp->b_rptr[skip_len]; 1818 if (l2_len < bsize - skip_len) { 1819 nmp->b_wptr = &nmp->b_rptr[l2_len]; 1820 } else { 1821 nmp->b_wptr = &nmp->b_rptr[bsize 1822 - skip_len]; 1823 } 1824 } else { 1825 if (l2_len - bytes_read < bsize) { 1826 nmp->b_wptr = 1827 &nmp->b_rptr[l2_len - bytes_read]; 1828 } else { 1829 nmp->b_wptr = &nmp->b_rptr[bsize]; 1830 } 1831 } 1832 } 1833 } else { 1834 if (first_entry) { 1835 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 1836 l2_len < bsize - skip_len ? 1837 l2_len : bsize - skip_len); 1838 } else { 1839 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset, 1840 l2_len - bytes_read < bsize ? 1841 l2_len - bytes_read : bsize); 1842 } 1843 } 1844 1845 if (nmp != NULL) { 1846 if (first_entry) 1847 bytes_read = nmp->b_wptr - nmp->b_rptr; 1848 else 1849 bytes_read += nmp->b_wptr - nmp->b_rptr; 1850 1851 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1852 "==> hxge_receive_packet after dupb: " 1853 "rbr consumed %d " 1854 "pktbufsz_type %d " 1855 "nmp $%p rptr $%p wptr $%p " 1856 "buf_offset %d bzise %d l2_len %d skip_len %d", 1857 rx_rbr_p->rbr_consumed, 1858 pktbufsz_type, 1859 nmp, nmp->b_rptr, nmp->b_wptr, 1860 buf_offset, bsize, l2_len, skip_len)); 1861 } else { 1862 cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)"); 1863 1864 atomic_inc_32(&rx_msg_p->ref_cnt); 1865 if (buffer_free == B_TRUE) { 1866 rx_msg_p->free = B_TRUE; 1867 } 1868 1869 MUTEX_EXIT(&rx_rbr_p->lock); 1870 MUTEX_EXIT(&rcr_p->lock); 1871 hxge_freeb(rx_msg_p); 1872 return; 1873 } 1874 1875 if (buffer_free == B_TRUE) { 1876 rx_msg_p->free = B_TRUE; 1877 } 1878 1879 /* 1880 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a 1881 * packet is not fragmented and no error bit is set, then L4 checksum 1882 * is OK. 1883 */ 1884 is_valid = (nmp != NULL); 1885 if (first_entry) { 1886 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 1887 if (l2_len > (STD_FRAME_SIZE - ETHERFCSL)) 1888 rdc_stats->jumbo_pkts++; 1889 rdc_stats->ibytes += skip_len + l2_len < bsize ? 1890 l2_len : bsize; 1891 } else { 1892 /* 1893 * Add the current portion of the packet to the kstats. 1894 * The current portion of the packet is calculated by using 1895 * length of the packet and the previously received portion. 1896 */ 1897 rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ? 1898 l2_len - rcr_p->rcvd_pkt_bytes : bsize; 1899 } 1900 1901 rcr_p->rcvd_pkt_bytes = bytes_read; 1902 1903 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 1904 atomic_inc_32(&rx_msg_p->ref_cnt); 1905 MUTEX_EXIT(&rx_rbr_p->lock); 1906 MUTEX_EXIT(&rcr_p->lock); 1907 hxge_freeb(rx_msg_p); 1908 } else { 1909 MUTEX_EXIT(&rx_rbr_p->lock); 1910 MUTEX_EXIT(&rcr_p->lock); 1911 } 1912 1913 if (is_valid) { 1914 nmp->b_cont = NULL; 1915 if (first_entry) { 1916 *mp = nmp; 1917 *mp_cont = NULL; 1918 } else { 1919 *mp_cont = nmp; 1920 } 1921 } 1922 1923 /* 1924 * Update stats and hardware checksuming. 1925 */ 1926 if (is_valid && !multi) { 1927 if (!(header & NO_PORT_BIT) && !error_type) { 1928 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 1929 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 1930 1931 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1932 "==> hxge_receive_packet: Full tcp/udp cksum " 1933 "is_valid 0x%x multi %d error %d", 1934 is_valid, multi, error_type)); 1935 } 1936 } 1937 1938 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1939 "==> hxge_receive_packet: *mp 0x%016llx", *mp)); 1940 1941 *multi_p = (multi == RCR_MULTI_MASK); 1942 1943 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: " 1944 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 1945 *multi_p, nmp, *mp, *mp_cont)); 1946 } 1947 1948 static void 1949 hxge_rx_rbr_empty_recover(p_hxge_t hxgep, uint8_t channel) 1950 { 1951 hpi_handle_t handle; 1952 p_rx_rcr_ring_t rcrp; 1953 p_rx_rbr_ring_t rbrp; 1954 1955 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 1956 rbrp = rcrp->rx_rbr_p; 1957 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1958 1959 /* 1960 * Wait for the channel to be quiet 1961 */ 1962 (void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel); 1963 1964 /* 1965 * Post page will accumulate some buffers before re-enabling 1966 * the DMA channel. 1967 */ 1968 MUTEX_ENTER(&rbrp->post_lock); 1969 if (rbrp->rbr_consumed < rbrp->rbb_max / 32) { 1970 hxge_rbr_empty_restore(hxgep, rbrp); 1971 } else { 1972 rbrp->rbr_is_empty = B_TRUE; 1973 } 1974 MUTEX_EXIT(&rbrp->post_lock); 1975 } 1976 1977 /*ARGSUSED*/ 1978 static hxge_status_t 1979 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 1980 rdc_stat_t cs) 1981 { 1982 p_hxge_rx_ring_stats_t rdc_stats; 1983 hpi_handle_t handle; 1984 boolean_t rxchan_fatal = B_FALSE; 1985 uint8_t channel; 1986 hxge_status_t status = HXGE_OK; 1987 uint64_t cs_val; 1988 1989 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts")); 1990 1991 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1992 channel = ldvp->channel; 1993 1994 /* Clear the interrupts */ 1995 cs.bits.pktread = 0; 1996 cs.bits.ptrread = 0; 1997 cs_val = cs.value & RDC_STAT_WR1C; 1998 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val); 1999 2000 rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index]; 2001 2002 if (cs.bits.rbr_cpl_to) { 2003 rdc_stats->rbr_tmout++; 2004 HXGE_FM_REPORT_ERROR(hxgep, channel, 2005 HXGE_FM_EREPORT_RDMC_RBR_CPL_TO); 2006 rxchan_fatal = B_TRUE; 2007 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2008 "==> hxge_rx_err_evnts(channel %d): " 2009 "fatal error: rx_rbr_timeout", channel)); 2010 } 2011 2012 if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) { 2013 (void) hpi_rxdma_ring_perr_stat_get(handle, 2014 &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par); 2015 } 2016 2017 if (cs.bits.rcr_shadow_par_err) { 2018 rdc_stats->rcr_sha_par++; 2019 HXGE_FM_REPORT_ERROR(hxgep, channel, 2020 HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2021 rxchan_fatal = B_TRUE; 2022 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2023 "==> hxge_rx_err_evnts(channel %d): " 2024 "fatal error: rcr_shadow_par_err", channel)); 2025 } 2026 2027 if (cs.bits.rbr_prefetch_par_err) { 2028 rdc_stats->rbr_pre_par++; 2029 HXGE_FM_REPORT_ERROR(hxgep, channel, 2030 HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2031 rxchan_fatal = B_TRUE; 2032 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2033 "==> hxge_rx_err_evnts(channel %d): " 2034 "fatal error: rbr_prefetch_par_err", channel)); 2035 } 2036 2037 if (cs.bits.rbr_pre_empty) { 2038 rdc_stats->rbr_pre_empty++; 2039 HXGE_FM_REPORT_ERROR(hxgep, channel, 2040 HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY); 2041 rxchan_fatal = B_TRUE; 2042 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2043 "==> hxge_rx_err_evnts(channel %d): " 2044 "fatal error: rbr_pre_empty", channel)); 2045 } 2046 2047 if (cs.bits.peu_resp_err) { 2048 rdc_stats->peu_resp_err++; 2049 HXGE_FM_REPORT_ERROR(hxgep, channel, 2050 HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR); 2051 rxchan_fatal = B_TRUE; 2052 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2053 "==> hxge_rx_err_evnts(channel %d): " 2054 "fatal error: peu_resp_err", channel)); 2055 } 2056 2057 if (cs.bits.rcr_thres) { 2058 rdc_stats->rcr_thres++; 2059 } 2060 2061 if (cs.bits.rcr_to) { 2062 rdc_stats->rcr_to++; 2063 } 2064 2065 if (cs.bits.rcr_shadow_full) { 2066 rdc_stats->rcr_shadow_full++; 2067 HXGE_FM_REPORT_ERROR(hxgep, channel, 2068 HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL); 2069 rxchan_fatal = B_TRUE; 2070 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2071 "==> hxge_rx_err_evnts(channel %d): " 2072 "fatal error: rcr_shadow_full", channel)); 2073 } 2074 2075 if (cs.bits.rcr_full) { 2076 rdc_stats->rcrfull++; 2077 HXGE_FM_REPORT_ERROR(hxgep, channel, 2078 HXGE_FM_EREPORT_RDMC_RCRFULL); 2079 rxchan_fatal = B_TRUE; 2080 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2081 "==> hxge_rx_err_evnts(channel %d): " 2082 "fatal error: rcrfull error", channel)); 2083 } 2084 2085 if (cs.bits.rbr_empty) { 2086 rdc_stats->rbr_empty++; 2087 hxge_rx_rbr_empty_recover(hxgep, channel); 2088 } 2089 2090 if (cs.bits.rbr_full) { 2091 rdc_stats->rbrfull++; 2092 HXGE_FM_REPORT_ERROR(hxgep, channel, 2093 HXGE_FM_EREPORT_RDMC_RBRFULL); 2094 rxchan_fatal = B_TRUE; 2095 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2096 "==> hxge_rx_err_evnts(channel %d): " 2097 "fatal error: rbr_full error", channel)); 2098 } 2099 2100 if (rxchan_fatal) { 2101 p_rx_rcr_ring_t rcrp; 2102 p_rx_rbr_ring_t rbrp; 2103 2104 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 2105 rbrp = rcrp->rx_rbr_p; 2106 2107 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2108 " hxge_rx_err_evnts: fatal error on Channel #%d\n", 2109 channel)); 2110 MUTEX_ENTER(&rbrp->post_lock); 2111 /* This function needs to be inside the post_lock */ 2112 status = hxge_rxdma_fatal_err_recover(hxgep, channel); 2113 MUTEX_EXIT(&rbrp->post_lock); 2114 if (status == HXGE_OK) { 2115 FM_SERVICE_RESTORED(hxgep); 2116 } 2117 } 2118 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts")); 2119 2120 return (status); 2121 } 2122 2123 static hxge_status_t 2124 hxge_map_rxdma(p_hxge_t hxgep) 2125 { 2126 int i, ndmas; 2127 uint16_t channel; 2128 p_rx_rbr_rings_t rx_rbr_rings; 2129 p_rx_rbr_ring_t *rbr_rings; 2130 p_rx_rcr_rings_t rx_rcr_rings; 2131 p_rx_rcr_ring_t *rcr_rings; 2132 p_rx_mbox_areas_t rx_mbox_areas_p; 2133 p_rx_mbox_t *rx_mbox_p; 2134 p_hxge_dma_pool_t dma_buf_poolp; 2135 p_hxge_dma_common_t *dma_buf_p; 2136 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2137 p_hxge_dma_common_t *dma_rbr_cntl_p; 2138 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2139 p_hxge_dma_common_t *dma_rcr_cntl_p; 2140 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2141 p_hxge_dma_common_t *dma_mbox_cntl_p; 2142 uint32_t *num_chunks; 2143 hxge_status_t status = HXGE_OK; 2144 2145 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma")); 2146 2147 dma_buf_poolp = hxgep->rx_buf_pool_p; 2148 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2149 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2150 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2151 2152 if (!dma_buf_poolp->buf_allocated || 2153 !dma_rbr_cntl_poolp->buf_allocated || 2154 !dma_rcr_cntl_poolp->buf_allocated || 2155 !dma_mbox_cntl_poolp->buf_allocated) { 2156 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2157 "<== hxge_map_rxdma: buf not allocated")); 2158 return (HXGE_ERROR); 2159 } 2160 2161 ndmas = dma_buf_poolp->ndmas; 2162 if (!ndmas) { 2163 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2164 "<== hxge_map_rxdma: no dma allocated")); 2165 return (HXGE_ERROR); 2166 } 2167 2168 num_chunks = dma_buf_poolp->num_chunks; 2169 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2170 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p; 2171 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p; 2172 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p; 2173 2174 rx_rbr_rings = (p_rx_rbr_rings_t) 2175 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2176 rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC( 2177 sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2178 2179 rx_rcr_rings = (p_rx_rcr_rings_t) 2180 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2181 rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC( 2182 sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2183 2184 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2185 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2186 rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC( 2187 sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2188 2189 /* 2190 * Timeout should be set based on the system clock divider. 2191 * The following timeout value of 1 assumes that the 2192 * granularity (1000) is 3 microseconds running at 300MHz. 2193 */ 2194 2195 hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2196 hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2197 2198 /* 2199 * Map descriptors from the buffer polls for each dam channel. 2200 */ 2201 for (i = 0; i < ndmas; i++) { 2202 /* 2203 * Set up and prepare buffer blocks, descriptors and mailbox. 2204 */ 2205 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2206 status = hxge_map_rxdma_channel(hxgep, channel, 2207 (p_hxge_dma_common_t *)&dma_buf_p[i], 2208 (p_rx_rbr_ring_t *)&rbr_rings[i], 2209 num_chunks[i], 2210 (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i], 2211 (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i], 2212 (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i], 2213 (p_rx_rcr_ring_t *)&rcr_rings[i], 2214 (p_rx_mbox_t *)&rx_mbox_p[i]); 2215 if (status != HXGE_OK) { 2216 goto hxge_map_rxdma_fail1; 2217 } 2218 rbr_rings[i]->index = (uint16_t)i; 2219 rcr_rings[i]->index = (uint16_t)i; 2220 rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i]; 2221 } 2222 2223 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2224 rx_rbr_rings->rbr_rings = rbr_rings; 2225 hxgep->rx_rbr_rings = rx_rbr_rings; 2226 rx_rcr_rings->rcr_rings = rcr_rings; 2227 hxgep->rx_rcr_rings = rx_rcr_rings; 2228 2229 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2230 hxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2231 2232 goto hxge_map_rxdma_exit; 2233 2234 hxge_map_rxdma_fail1: 2235 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2236 "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)", 2237 status, channel, i)); 2238 i--; 2239 for (; i >= 0; i--) { 2240 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2241 hxge_unmap_rxdma_channel(hxgep, channel, 2242 rbr_rings[i], rcr_rings[i], rx_mbox_p[i]); 2243 } 2244 2245 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2246 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2247 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2248 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2249 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2250 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2251 2252 hxge_map_rxdma_exit: 2253 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2254 "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 2255 2256 return (status); 2257 } 2258 2259 static void 2260 hxge_unmap_rxdma(p_hxge_t hxgep) 2261 { 2262 int i, ndmas; 2263 uint16_t channel; 2264 p_rx_rbr_rings_t rx_rbr_rings; 2265 p_rx_rbr_ring_t *rbr_rings; 2266 p_rx_rcr_rings_t rx_rcr_rings; 2267 p_rx_rcr_ring_t *rcr_rings; 2268 p_rx_mbox_areas_t rx_mbox_areas_p; 2269 p_rx_mbox_t *rx_mbox_p; 2270 p_hxge_dma_pool_t dma_buf_poolp; 2271 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2272 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2273 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2274 p_hxge_dma_common_t *dma_buf_p; 2275 2276 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma")); 2277 2278 dma_buf_poolp = hxgep->rx_buf_pool_p; 2279 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2280 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2281 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2282 2283 if (!dma_buf_poolp->buf_allocated || 2284 !dma_rbr_cntl_poolp->buf_allocated || 2285 !dma_rcr_cntl_poolp->buf_allocated || 2286 !dma_mbox_cntl_poolp->buf_allocated) { 2287 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2288 "<== hxge_unmap_rxdma: NULL buf pointers")); 2289 return; 2290 } 2291 2292 rx_rbr_rings = hxgep->rx_rbr_rings; 2293 rx_rcr_rings = hxgep->rx_rcr_rings; 2294 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2295 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2296 "<== hxge_unmap_rxdma: NULL pointers")); 2297 return; 2298 } 2299 2300 ndmas = rx_rbr_rings->ndmas; 2301 if (!ndmas) { 2302 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2303 "<== hxge_unmap_rxdma: no channel")); 2304 return; 2305 } 2306 2307 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2308 "==> hxge_unmap_rxdma (ndmas %d)", ndmas)); 2309 2310 rbr_rings = rx_rbr_rings->rbr_rings; 2311 rcr_rings = rx_rcr_rings->rcr_rings; 2312 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 2313 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 2314 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2315 2316 for (i = 0; i < ndmas; i++) { 2317 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2318 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2319 "==> hxge_unmap_rxdma (ndmas %d) channel %d", 2320 ndmas, channel)); 2321 (void) hxge_unmap_rxdma_channel(hxgep, channel, 2322 (p_rx_rbr_ring_t)rbr_rings[i], 2323 (p_rx_rcr_ring_t)rcr_rings[i], 2324 (p_rx_mbox_t)rx_mbox_p[i]); 2325 } 2326 2327 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2328 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2329 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2330 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2331 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2332 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2333 2334 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma")); 2335 } 2336 2337 hxge_status_t 2338 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2339 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 2340 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 2341 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 2342 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2343 { 2344 int status = HXGE_OK; 2345 2346 /* 2347 * Set up and prepare buffer blocks, descriptors and mailbox. 2348 */ 2349 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2350 "==> hxge_map_rxdma_channel (channel %d)", channel)); 2351 2352 /* 2353 * Receive buffer blocks 2354 */ 2355 status = hxge_map_rxdma_channel_buf_ring(hxgep, channel, 2356 dma_buf_p, rbr_p, num_chunks); 2357 if (status != HXGE_OK) { 2358 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2359 "==> hxge_map_rxdma_channel (channel %d): " 2360 "map buffer failed 0x%x", channel, status)); 2361 goto hxge_map_rxdma_channel_exit; 2362 } 2363 2364 /* 2365 * Receive block ring, completion ring and mailbox. 2366 */ 2367 status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel, 2368 dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p, 2369 rbr_p, rcr_p, rx_mbox_p); 2370 if (status != HXGE_OK) { 2371 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2372 "==> hxge_map_rxdma_channel (channel %d): " 2373 "map config failed 0x%x", channel, status)); 2374 goto hxge_map_rxdma_channel_fail2; 2375 } 2376 goto hxge_map_rxdma_channel_exit; 2377 2378 hxge_map_rxdma_channel_fail3: 2379 /* Free rbr, rcr */ 2380 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2381 "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)", 2382 status, channel)); 2383 hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p); 2384 2385 hxge_map_rxdma_channel_fail2: 2386 /* Free buffer blocks */ 2387 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2388 "==> hxge_map_rxdma_channel: free rx buffers" 2389 "(hxgep 0x%x status 0x%x channel %d)", 2390 hxgep, status, channel)); 2391 hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p); 2392 2393 status = HXGE_ERROR; 2394 2395 hxge_map_rxdma_channel_exit: 2396 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2397 "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)", 2398 hxgep, status, channel)); 2399 2400 return (status); 2401 } 2402 2403 /*ARGSUSED*/ 2404 static void 2405 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2406 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2407 { 2408 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2409 "==> hxge_unmap_rxdma_channel (channel %d)", channel)); 2410 2411 /* 2412 * unmap receive block ring, completion ring and mailbox. 2413 */ 2414 (void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p); 2415 2416 /* unmap buffer blocks */ 2417 (void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p); 2418 2419 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel")); 2420 } 2421 2422 /*ARGSUSED*/ 2423 static hxge_status_t 2424 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 2425 p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p, 2426 p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p, 2427 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2428 { 2429 p_rx_rbr_ring_t rbrp; 2430 p_rx_rcr_ring_t rcrp; 2431 p_rx_mbox_t mboxp; 2432 p_hxge_dma_common_t cntl_dmap; 2433 p_hxge_dma_common_t dmap; 2434 p_rx_msg_t *rx_msg_ring; 2435 p_rx_msg_t rx_msg_p; 2436 rdc_rbr_cfg_a_t *rcfga_p; 2437 rdc_rbr_cfg_b_t *rcfgb_p; 2438 rdc_rcr_cfg_a_t *cfga_p; 2439 rdc_rcr_cfg_b_t *cfgb_p; 2440 rdc_rx_cfg1_t *cfig1_p; 2441 rdc_rx_cfg2_t *cfig2_p; 2442 rdc_rbr_kick_t *kick_p; 2443 uint32_t dmaaddrp; 2444 uint32_t *rbr_vaddrp; 2445 uint32_t bkaddr; 2446 hxge_status_t status = HXGE_OK; 2447 int i; 2448 uint32_t hxge_port_rcr_size; 2449 2450 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2451 "==> hxge_map_rxdma_channel_cfg_ring")); 2452 2453 cntl_dmap = *dma_rbr_cntl_p; 2454 2455 /* 2456 * Map in the receive block ring 2457 */ 2458 rbrp = *rbr_p; 2459 dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc; 2460 hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 2461 2462 /* 2463 * Zero out buffer block ring descriptors. 2464 */ 2465 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2466 2467 rcfga_p = &(rbrp->rbr_cfga); 2468 rcfgb_p = &(rbrp->rbr_cfgb); 2469 kick_p = &(rbrp->rbr_kick); 2470 rcfga_p->value = 0; 2471 rcfgb_p->value = 0; 2472 kick_p->value = 0; 2473 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 2474 rcfga_p->value = (rbrp->rbr_addr & 2475 (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK)); 2476 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 2477 2478 /* XXXX: how to choose packet buffer sizes */ 2479 rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0; 2480 rcfgb_p->bits.vld0 = 1; 2481 rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1; 2482 rcfgb_p->bits.vld1 = 1; 2483 rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2; 2484 rcfgb_p->bits.vld2 = 1; 2485 rcfgb_p->bits.bksize = hxgep->rx_bksize_code; 2486 2487 /* 2488 * For each buffer block, enter receive block address to the ring. 2489 */ 2490 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 2491 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 2492 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2493 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2494 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 2495 2496 rx_msg_ring = rbrp->rx_msg_ring; 2497 for (i = 0; i < rbrp->tnblocks; i++) { 2498 rx_msg_p = rx_msg_ring[i]; 2499 rx_msg_p->hxgep = hxgep; 2500 rx_msg_p->rx_rbr_p = rbrp; 2501 bkaddr = (uint32_t) 2502 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2503 RBR_BKADDR_SHIFT)); 2504 rx_msg_p->free = B_FALSE; 2505 rx_msg_p->max_usage_cnt = 0xbaddcafe; 2506 2507 *rbr_vaddrp++ = bkaddr; 2508 } 2509 2510 kick_p->bits.bkadd = rbrp->rbb_max; 2511 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 2512 2513 rbrp->rbr_rd_index = 0; 2514 2515 rbrp->rbr_consumed = 0; 2516 rbrp->rbr_use_bcopy = B_TRUE; 2517 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 2518 2519 /* 2520 * Do bcopy on packets greater than bcopy size once the lo threshold is 2521 * reached. This lo threshold should be less than the hi threshold. 2522 * 2523 * Do bcopy on every packet once the hi threshold is reached. 2524 */ 2525 if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) { 2526 /* default it to use hi */ 2527 hxge_rx_threshold_lo = hxge_rx_threshold_hi; 2528 } 2529 if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) { 2530 hxge_rx_buf_size_type = HXGE_RBR_TYPE2; 2531 } 2532 rbrp->rbr_bufsize_type = hxge_rx_buf_size_type; 2533 2534 switch (hxge_rx_threshold_hi) { 2535 default: 2536 case HXGE_RX_COPY_NONE: 2537 /* Do not do bcopy at all */ 2538 rbrp->rbr_use_bcopy = B_FALSE; 2539 rbrp->rbr_threshold_hi = rbrp->rbb_max; 2540 break; 2541 2542 case HXGE_RX_COPY_1: 2543 case HXGE_RX_COPY_2: 2544 case HXGE_RX_COPY_3: 2545 case HXGE_RX_COPY_4: 2546 case HXGE_RX_COPY_5: 2547 case HXGE_RX_COPY_6: 2548 case HXGE_RX_COPY_7: 2549 rbrp->rbr_threshold_hi = 2550 rbrp->rbb_max * (hxge_rx_threshold_hi) / 2551 HXGE_RX_BCOPY_SCALE; 2552 break; 2553 2554 case HXGE_RX_COPY_ALL: 2555 rbrp->rbr_threshold_hi = 0; 2556 break; 2557 } 2558 2559 switch (hxge_rx_threshold_lo) { 2560 default: 2561 case HXGE_RX_COPY_NONE: 2562 /* Do not do bcopy at all */ 2563 if (rbrp->rbr_use_bcopy) { 2564 rbrp->rbr_use_bcopy = B_FALSE; 2565 } 2566 rbrp->rbr_threshold_lo = rbrp->rbb_max; 2567 break; 2568 2569 case HXGE_RX_COPY_1: 2570 case HXGE_RX_COPY_2: 2571 case HXGE_RX_COPY_3: 2572 case HXGE_RX_COPY_4: 2573 case HXGE_RX_COPY_5: 2574 case HXGE_RX_COPY_6: 2575 case HXGE_RX_COPY_7: 2576 rbrp->rbr_threshold_lo = 2577 rbrp->rbb_max * (hxge_rx_threshold_lo) / 2578 HXGE_RX_BCOPY_SCALE; 2579 break; 2580 2581 case HXGE_RX_COPY_ALL: 2582 rbrp->rbr_threshold_lo = 0; 2583 break; 2584 } 2585 2586 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2587 "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d " 2588 "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d " 2589 "rbb_threshold_lo %d", 2590 dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type, 2591 rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo)); 2592 2593 /* Map in the receive completion ring */ 2594 rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 2595 rcrp->rdc = dma_channel; 2596 rcrp->hxgep = hxgep; 2597 2598 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 2599 rcrp->comp_size = hxge_port_rcr_size; 2600 rcrp->comp_wrap_mask = hxge_port_rcr_size - 1; 2601 2602 rcrp->max_receive_pkts = hxge_max_rx_pkts; 2603 2604 cntl_dmap = *dma_rcr_cntl_p; 2605 2606 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 2607 hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 2608 sizeof (rcr_entry_t)); 2609 rcrp->comp_rd_index = 0; 2610 rcrp->comp_wt_index = 0; 2611 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 2612 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 2613 #if defined(__i386) 2614 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2615 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2616 #else 2617 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2618 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2619 #endif 2620 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 2621 (hxge_port_rcr_size - 1); 2622 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 2623 (hxge_port_rcr_size - 1); 2624 2625 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 2626 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 2627 2628 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2629 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2630 "rbr_vaddrp $%p rcr_desc_rd_head_p $%p " 2631 "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p " 2632 "rcr_desc_rd_last_pp $%p ", 2633 dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p, 2634 rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p, 2635 rcrp->rcr_desc_last_pp)); 2636 2637 /* 2638 * Zero out buffer block ring descriptors. 2639 */ 2640 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2641 rcrp->intr_timeout = hxgep->intr_timeout; 2642 rcrp->intr_threshold = hxgep->intr_threshold; 2643 rcrp->full_hdr_flag = B_FALSE; 2644 rcrp->sw_priv_hdr_len = 0; 2645 2646 cfga_p = &(rcrp->rcr_cfga); 2647 cfgb_p = &(rcrp->rcr_cfgb); 2648 cfga_p->value = 0; 2649 cfgb_p->value = 0; 2650 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 2651 2652 cfga_p->value = (rcrp->rcr_addr & 2653 (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK)); 2654 2655 cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF); 2656 2657 /* 2658 * Timeout should be set based on the system clock divider. The 2659 * following timeout value of 1 assumes that the granularity (1000) is 2660 * 3 microseconds running at 300MHz. 2661 */ 2662 cfgb_p->bits.pthres = rcrp->intr_threshold; 2663 cfgb_p->bits.timeout = rcrp->intr_timeout; 2664 cfgb_p->bits.entout = 1; 2665 2666 /* Map in the mailbox */ 2667 cntl_dmap = *dma_mbox_cntl_p; 2668 mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 2669 dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox; 2670 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 2671 cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1; 2672 cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2; 2673 cfig1_p->value = cfig2_p->value = 0; 2674 2675 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 2676 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2677 "==> hxge_map_rxdma_channel_cfg_ring: " 2678 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 2679 dma_channel, cfig1_p->value, cfig2_p->value, 2680 mboxp->mbox_addr)); 2681 2682 dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff); 2683 cfig1_p->bits.mbaddr_h = dmaaddrp; 2684 2685 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 2686 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 2687 RXDMA_CFIG2_MBADDR_L_MASK); 2688 2689 cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 2690 2691 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2692 "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p " 2693 "cfg1 0x%016llx cfig2 0x%016llx", 2694 dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value)); 2695 2696 cfig2_p->bits.full_hdr = rcrp->full_hdr_flag; 2697 cfig2_p->bits.offset = rcrp->sw_priv_hdr_len; 2698 2699 rbrp->rx_rcr_p = rcrp; 2700 rcrp->rx_rbr_p = rbrp; 2701 *rcr_p = rcrp; 2702 *rx_mbox_p = mboxp; 2703 2704 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2705 "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 2706 return (status); 2707 } 2708 2709 /*ARGSUSED*/ 2710 static void 2711 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 2712 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2713 { 2714 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2715 "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc)); 2716 2717 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 2718 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 2719 2720 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2721 "<== hxge_unmap_rxdma_channel_cfg_ring")); 2722 } 2723 2724 static hxge_status_t 2725 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 2726 p_hxge_dma_common_t *dma_buf_p, 2727 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 2728 { 2729 p_rx_rbr_ring_t rbrp; 2730 p_hxge_dma_common_t dma_bufp, tmp_bufp; 2731 p_rx_msg_t *rx_msg_ring; 2732 p_rx_msg_t rx_msg_p; 2733 p_mblk_t mblk_p; 2734 2735 rxring_info_t *ring_info; 2736 hxge_status_t status = HXGE_OK; 2737 int i, j, index; 2738 uint32_t size, bsize, nblocks, nmsgs; 2739 2740 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2741 "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel)); 2742 2743 dma_bufp = tmp_bufp = *dma_buf_p; 2744 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2745 " hxge_map_rxdma_channel_buf_ring: channel %d to map %d " 2746 "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp)); 2747 2748 nmsgs = 0; 2749 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2750 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2751 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2752 "bufp 0x%016llx nblocks %d nmsgs %d", 2753 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2754 nmsgs += tmp_bufp->nblocks; 2755 } 2756 if (!nmsgs) { 2757 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2758 "<== hxge_map_rxdma_channel_buf_ring: channel %d " 2759 "no msg blocks", channel)); 2760 status = HXGE_ERROR; 2761 goto hxge_map_rxdma_channel_buf_ring_exit; 2762 } 2763 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 2764 2765 size = nmsgs * sizeof (p_rx_msg_t); 2766 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2767 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 2768 KM_SLEEP); 2769 2770 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 2771 (void *) hxgep->interrupt_cookie); 2772 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 2773 (void *) hxgep->interrupt_cookie); 2774 2775 rbrp->rdc = channel; 2776 rbrp->num_blocks = num_chunks; 2777 rbrp->tnblocks = nmsgs; 2778 rbrp->rbb_max = nmsgs; 2779 rbrp->rbr_max_size = nmsgs; 2780 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 2781 2782 /* 2783 * Buffer sizes suggested by NIU architect. 256, 512 and 2K. 2784 */ 2785 2786 switch (hxgep->rx_bksize_code) { 2787 case RBR_BKSIZE_4K: 2788 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 2789 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 2790 rbrp->hpi_pkt_buf_size0 = SIZE_256B; 2791 break; 2792 case RBR_BKSIZE_8K: 2793 /* Use 512 to avoid possible rcr_full condition */ 2794 rbrp->pkt_buf_size0 = RBR_BUFSZ0_512B; 2795 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_512_BYTES; 2796 rbrp->hpi_pkt_buf_size0 = SIZE_512B; 2797 break; 2798 } 2799 2800 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 2801 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 2802 rbrp->hpi_pkt_buf_size1 = SIZE_1KB; 2803 2804 rbrp->block_size = hxgep->rx_default_block_size; 2805 2806 if (!hxgep->param_arr[param_accept_jumbo].value) { 2807 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 2808 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 2809 rbrp->hpi_pkt_buf_size2 = SIZE_2KB; 2810 } else { 2811 rbrp->hpi_pkt_buf_size2 = SIZE_4KB; 2812 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 2813 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 2814 } 2815 2816 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2817 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2818 "actual rbr max %d rbb_max %d nmsgs %d " 2819 "rbrp->block_size %d default_block_size %d " 2820 "(config hxge_rbr_size %d hxge_rbr_spare_size %d)", 2821 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 2822 rbrp->block_size, hxgep->rx_default_block_size, 2823 hxge_rbr_size, hxge_rbr_spare_size)); 2824 2825 /* 2826 * Map in buffers from the buffer pool. 2827 * Note that num_blocks is the num_chunks. For Sparc, there is likely 2828 * only one chunk. For x86, there will be many chunks. 2829 * Loop over chunks. 2830 */ 2831 index = 0; 2832 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 2833 bsize = dma_bufp->block_size; 2834 nblocks = dma_bufp->nblocks; 2835 #if defined(__i386) 2836 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 2837 #else 2838 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 2839 #endif 2840 ring_info->buffer[i].buf_index = i; 2841 ring_info->buffer[i].buf_size = dma_bufp->alength; 2842 ring_info->buffer[i].start_index = index; 2843 #if defined(__i386) 2844 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 2845 #else 2846 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 2847 #endif 2848 2849 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2850 " hxge_map_rxdma_channel_buf_ring: map channel %d " 2851 "chunk %d nblocks %d chunk_size %x block_size 0x%x " 2852 "dma_bufp $%p dvma_addr $%p", channel, i, 2853 dma_bufp->nblocks, 2854 ring_info->buffer[i].buf_size, bsize, dma_bufp, 2855 ring_info->buffer[i].dvma_addr)); 2856 2857 /* loop over blocks within a chunk */ 2858 for (j = 0; j < nblocks; j++) { 2859 if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO, 2860 dma_bufp)) == NULL) { 2861 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2862 "allocb failed (index %d i %d j %d)", 2863 index, i, j)); 2864 goto hxge_map_rxdma_channel_buf_ring_fail1; 2865 } 2866 rx_msg_ring[index] = rx_msg_p; 2867 rx_msg_p->block_index = index; 2868 rx_msg_p->shifted_addr = (uint32_t) 2869 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2870 RBR_BKADDR_SHIFT)); 2871 /* 2872 * Too much output 2873 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2874 * "index %d j %d rx_msg_p $%p mblk %p", 2875 * index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 2876 */ 2877 mblk_p = rx_msg_p->rx_mblk_p; 2878 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 2879 2880 rbrp->rbr_ref_cnt++; 2881 index++; 2882 rx_msg_p->buf_dma.dma_channel = channel; 2883 } 2884 } 2885 if (i < rbrp->num_blocks) { 2886 goto hxge_map_rxdma_channel_buf_ring_fail1; 2887 } 2888 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2889 "hxge_map_rxdma_channel_buf_ring: done buf init " 2890 "channel %d msg block entries %d", channel, index)); 2891 ring_info->block_size_mask = bsize - 1; 2892 rbrp->rx_msg_ring = rx_msg_ring; 2893 rbrp->dma_bufp = dma_buf_p; 2894 rbrp->ring_info = ring_info; 2895 2896 status = hxge_rxbuf_index_info_init(hxgep, rbrp); 2897 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: " 2898 "channel %d done buf info init", channel)); 2899 2900 /* 2901 * Finally, permit hxge_freeb() to call hxge_post_page(). 2902 */ 2903 rbrp->rbr_state = RBR_POSTING; 2904 2905 *rbr_p = rbrp; 2906 2907 goto hxge_map_rxdma_channel_buf_ring_exit; 2908 2909 hxge_map_rxdma_channel_buf_ring_fail1: 2910 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2911 " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 2912 channel, status)); 2913 2914 index--; 2915 for (; index >= 0; index--) { 2916 rx_msg_p = rx_msg_ring[index]; 2917 if (rx_msg_p != NULL) { 2918 freeb(rx_msg_p->rx_mblk_p); 2919 rx_msg_ring[index] = NULL; 2920 } 2921 } 2922 2923 hxge_map_rxdma_channel_buf_ring_fail: 2924 MUTEX_DESTROY(&rbrp->post_lock); 2925 MUTEX_DESTROY(&rbrp->lock); 2926 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2927 KMEM_FREE(rx_msg_ring, size); 2928 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 2929 2930 status = HXGE_ERROR; 2931 2932 hxge_map_rxdma_channel_buf_ring_exit: 2933 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2934 "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 2935 2936 return (status); 2937 } 2938 2939 /*ARGSUSED*/ 2940 static void 2941 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 2942 p_rx_rbr_ring_t rbr_p) 2943 { 2944 p_rx_msg_t *rx_msg_ring; 2945 p_rx_msg_t rx_msg_p; 2946 rxring_info_t *ring_info; 2947 int i; 2948 uint32_t size; 2949 2950 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2951 "==> hxge_unmap_rxdma_channel_buf_ring")); 2952 if (rbr_p == NULL) { 2953 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2954 "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 2955 return; 2956 } 2957 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2958 "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc)); 2959 2960 rx_msg_ring = rbr_p->rx_msg_ring; 2961 ring_info = rbr_p->ring_info; 2962 2963 if (rx_msg_ring == NULL || ring_info == NULL) { 2964 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2965 "<== hxge_unmap_rxdma_channel_buf_ring: " 2966 "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info)); 2967 return; 2968 } 2969 2970 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 2971 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2972 " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 2973 "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks, 2974 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 2975 2976 for (i = 0; i < rbr_p->tnblocks; i++) { 2977 rx_msg_p = rx_msg_ring[i]; 2978 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2979 " hxge_unmap_rxdma_channel_buf_ring: " 2980 "rx_msg_p $%p", rx_msg_p)); 2981 if (rx_msg_p != NULL) { 2982 freeb(rx_msg_p->rx_mblk_p); 2983 rx_msg_ring[i] = NULL; 2984 } 2985 } 2986 2987 /* 2988 * We no longer may use the mutex <post_lock>. By setting 2989 * <rbr_state> to anything but POSTING, we prevent 2990 * hxge_post_page() from accessing a dead mutex. 2991 */ 2992 rbr_p->rbr_state = RBR_UNMAPPING; 2993 MUTEX_DESTROY(&rbr_p->post_lock); 2994 2995 MUTEX_DESTROY(&rbr_p->lock); 2996 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2997 KMEM_FREE(rx_msg_ring, size); 2998 2999 if (rbr_p->rbr_ref_cnt == 0) { 3000 /* This is the normal state of affairs. */ 3001 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3002 } else { 3003 /* 3004 * Some of our buffers are still being used. 3005 * Therefore, tell hxge_freeb() this ring is 3006 * unmapped, so it may free <rbr_p> for us. 3007 */ 3008 rbr_p->rbr_state = RBR_UNMAPPED; 3009 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3010 "unmap_rxdma_buf_ring: %d %s outstanding.", 3011 rbr_p->rbr_ref_cnt, 3012 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3013 } 3014 3015 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3016 "<== hxge_unmap_rxdma_channel_buf_ring")); 3017 } 3018 3019 static hxge_status_t 3020 hxge_rxdma_hw_start_common(p_hxge_t hxgep) 3021 { 3022 hxge_status_t status = HXGE_OK; 3023 3024 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 3025 3026 /* 3027 * Load the sharable parameters by writing to the function zero control 3028 * registers. These FZC registers should be initialized only once for 3029 * the entire chip. 3030 */ 3031 (void) hxge_init_fzc_rx_common(hxgep); 3032 3033 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 3034 3035 return (status); 3036 } 3037 3038 static hxge_status_t 3039 hxge_rxdma_hw_start(p_hxge_t hxgep) 3040 { 3041 int i, ndmas; 3042 uint16_t channel; 3043 p_rx_rbr_rings_t rx_rbr_rings; 3044 p_rx_rbr_ring_t *rbr_rings; 3045 p_rx_rcr_rings_t rx_rcr_rings; 3046 p_rx_rcr_ring_t *rcr_rings; 3047 p_rx_mbox_areas_t rx_mbox_areas_p; 3048 p_rx_mbox_t *rx_mbox_p; 3049 hxge_status_t status = HXGE_OK; 3050 3051 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start")); 3052 3053 rx_rbr_rings = hxgep->rx_rbr_rings; 3054 rx_rcr_rings = hxgep->rx_rcr_rings; 3055 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3056 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3057 "<== hxge_rxdma_hw_start: NULL ring pointers")); 3058 return (HXGE_ERROR); 3059 } 3060 3061 ndmas = rx_rbr_rings->ndmas; 3062 if (ndmas == 0) { 3063 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3064 "<== hxge_rxdma_hw_start: no dma channel allocated")); 3065 return (HXGE_ERROR); 3066 } 3067 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3068 "==> hxge_rxdma_hw_start (ndmas %d)", ndmas)); 3069 3070 /* 3071 * Scrub the RDC Rx DMA Prefetch Buffer Command. 3072 */ 3073 for (i = 0; i < 128; i++) { 3074 HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i); 3075 } 3076 3077 /* 3078 * Scrub Rx DMA Shadow Tail Command. 3079 */ 3080 for (i = 0; i < 64; i++) { 3081 HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i); 3082 } 3083 3084 /* 3085 * Scrub Rx DMA Control Fifo Command. 3086 */ 3087 for (i = 0; i < 512; i++) { 3088 HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i); 3089 } 3090 3091 /* 3092 * Scrub Rx DMA Data Fifo Command. 3093 */ 3094 for (i = 0; i < 1536; i++) { 3095 HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i); 3096 } 3097 3098 /* 3099 * Reset the FIFO Error Stat. 3100 */ 3101 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF); 3102 3103 /* Set the error mask to receive interrupts */ 3104 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3105 3106 rbr_rings = rx_rbr_rings->rbr_rings; 3107 rcr_rings = rx_rcr_rings->rcr_rings; 3108 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 3109 if (rx_mbox_areas_p) { 3110 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3111 } 3112 3113 for (i = 0; i < ndmas; i++) { 3114 channel = rbr_rings[i]->rdc; 3115 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3116 "==> hxge_rxdma_hw_start (ndmas %d) channel %d", 3117 ndmas, channel)); 3118 status = hxge_rxdma_start_channel(hxgep, channel, 3119 (p_rx_rbr_ring_t)rbr_rings[i], 3120 (p_rx_rcr_ring_t)rcr_rings[i], 3121 (p_rx_mbox_t)rx_mbox_p[i], rbr_rings[i]->rbb_max); 3122 if (status != HXGE_OK) { 3123 goto hxge_rxdma_hw_start_fail1; 3124 } 3125 } 3126 3127 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: " 3128 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3129 rx_rbr_rings, rx_rcr_rings)); 3130 goto hxge_rxdma_hw_start_exit; 3131 3132 hxge_rxdma_hw_start_fail1: 3133 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3134 "==> hxge_rxdma_hw_start: disable " 3135 "(status 0x%x channel %d i %d)", status, channel, i)); 3136 for (; i >= 0; i--) { 3137 channel = rbr_rings[i]->rdc; 3138 (void) hxge_rxdma_stop_channel(hxgep, channel); 3139 } 3140 3141 hxge_rxdma_hw_start_exit: 3142 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3143 "==> hxge_rxdma_hw_start: (status 0x%x)", status)); 3144 return (status); 3145 } 3146 3147 static void 3148 hxge_rxdma_hw_stop(p_hxge_t hxgep) 3149 { 3150 int i, ndmas; 3151 uint16_t channel; 3152 p_rx_rbr_rings_t rx_rbr_rings; 3153 p_rx_rbr_ring_t *rbr_rings; 3154 p_rx_rcr_rings_t rx_rcr_rings; 3155 3156 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop")); 3157 3158 rx_rbr_rings = hxgep->rx_rbr_rings; 3159 rx_rcr_rings = hxgep->rx_rcr_rings; 3160 3161 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3162 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3163 "<== hxge_rxdma_hw_stop: NULL ring pointers")); 3164 return; 3165 } 3166 3167 ndmas = rx_rbr_rings->ndmas; 3168 if (!ndmas) { 3169 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3170 "<== hxge_rxdma_hw_stop: no dma channel allocated")); 3171 return; 3172 } 3173 3174 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3175 "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3176 3177 rbr_rings = rx_rbr_rings->rbr_rings; 3178 for (i = 0; i < ndmas; i++) { 3179 channel = rbr_rings[i]->rdc; 3180 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3181 "==> hxge_rxdma_hw_stop (ndmas %d) channel %d", 3182 ndmas, channel)); 3183 (void) hxge_rxdma_stop_channel(hxgep, channel); 3184 } 3185 3186 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: " 3187 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3188 rx_rbr_rings, rx_rcr_rings)); 3189 3190 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop")); 3191 } 3192 3193 static hxge_status_t 3194 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 3195 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 3196 int n_init_kick) 3197 { 3198 hpi_handle_t handle; 3199 hpi_status_t rs = HPI_SUCCESS; 3200 rdc_stat_t cs; 3201 rdc_int_mask_t ent_mask; 3202 hxge_status_t status = HXGE_OK; 3203 3204 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel")); 3205 3206 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3207 3208 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: " 3209 "hpi handle addr $%p acc $%p", 3210 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3211 3212 /* Reset RXDMA channel */ 3213 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3214 if (rs != HPI_SUCCESS) { 3215 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3216 "==> hxge_rxdma_start_channel: " 3217 "reset rxdma failed (0x%08x channel %d)", 3218 status, channel)); 3219 return (HXGE_ERROR | rs); 3220 } 3221 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3222 "==> hxge_rxdma_start_channel: reset done: channel %d", channel)); 3223 3224 /* 3225 * Initialize the RXDMA channel specific FZC control configurations. 3226 * These FZC registers are pertaining to each RX channel (logical 3227 * pages). 3228 */ 3229 status = hxge_init_fzc_rxdma_channel(hxgep, 3230 channel, rbr_p, rcr_p, mbox_p); 3231 if (status != HXGE_OK) { 3232 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3233 "==> hxge_rxdma_start_channel: " 3234 "init fzc rxdma failed (0x%08x channel %d)", 3235 status, channel)); 3236 return (status); 3237 } 3238 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3239 "==> hxge_rxdma_start_channel: fzc done")); 3240 3241 /* 3242 * Zero out the shadow and prefetch ram. 3243 */ 3244 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3245 "==> hxge_rxdma_start_channel: ram done")); 3246 3247 /* Set up the interrupt event masks. */ 3248 ent_mask.value = 0; 3249 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3250 if (rs != HPI_SUCCESS) { 3251 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3252 "==> hxge_rxdma_start_channel: " 3253 "init rxdma event masks failed (0x%08x channel %d)", 3254 status, channel)); 3255 return (HXGE_ERROR | rs); 3256 } 3257 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3258 "event done: channel %d (mask 0x%016llx)", 3259 channel, ent_mask.value)); 3260 3261 /* 3262 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA 3263 * channels and enable each DMA channel. 3264 */ 3265 status = hxge_enable_rxdma_channel(hxgep, 3266 channel, rbr_p, rcr_p, mbox_p, n_init_kick); 3267 if (status != HXGE_OK) { 3268 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3269 " hxge_rxdma_start_channel: " 3270 " init enable rxdma failed (0x%08x channel %d)", 3271 status, channel)); 3272 return (status); 3273 } 3274 3275 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3276 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3277 3278 /* 3279 * Initialize the receive DMA control and status register 3280 * Note that rdc_stat HAS to be set after RBR and RCR rings are set 3281 */ 3282 cs.value = 0; 3283 cs.bits.mex = 1; 3284 cs.bits.rcr_thres = 1; 3285 cs.bits.rcr_to = 1; 3286 cs.bits.rbr_empty = 1; 3287 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3288 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3289 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3290 if (status != HXGE_OK) { 3291 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3292 "==> hxge_rxdma_start_channel: " 3293 "init rxdma control register failed (0x%08x channel %d", 3294 status, channel)); 3295 return (status); 3296 } 3297 3298 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3299 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3300 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3301 "==> hxge_rxdma_start_channel: enable done")); 3302 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel")); 3303 3304 return (HXGE_OK); 3305 } 3306 3307 static hxge_status_t 3308 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel) 3309 { 3310 hpi_handle_t handle; 3311 hpi_status_t rs = HPI_SUCCESS; 3312 rdc_stat_t cs; 3313 rdc_int_mask_t ent_mask; 3314 hxge_status_t status = HXGE_OK; 3315 3316 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel")); 3317 3318 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3319 3320 HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: " 3321 "hpi handle addr $%p acc $%p", 3322 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3323 3324 /* Reset RXDMA channel */ 3325 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3326 if (rs != HPI_SUCCESS) { 3327 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3328 " hxge_rxdma_stop_channel: " 3329 " reset rxdma failed (0x%08x channel %d)", 3330 rs, channel)); 3331 return (HXGE_ERROR | rs); 3332 } 3333 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3334 "==> hxge_rxdma_stop_channel: reset done")); 3335 3336 /* Set up the interrupt event masks. */ 3337 ent_mask.value = RDC_INT_MASK_ALL; 3338 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3339 if (rs != HPI_SUCCESS) { 3340 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3341 "==> hxge_rxdma_stop_channel: " 3342 "set rxdma event masks failed (0x%08x channel %d)", 3343 rs, channel)); 3344 return (HXGE_ERROR | rs); 3345 } 3346 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3347 "==> hxge_rxdma_stop_channel: event done")); 3348 3349 /* Initialize the receive DMA control and status register */ 3350 cs.value = 0; 3351 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3352 3353 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control " 3354 " to default (all 0s) 0x%08x", cs.value)); 3355 3356 if (status != HXGE_OK) { 3357 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3358 " hxge_rxdma_stop_channel: init rxdma" 3359 " control register failed (0x%08x channel %d", 3360 status, channel)); 3361 return (status); 3362 } 3363 3364 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3365 "==> hxge_rxdma_stop_channel: control done")); 3366 3367 /* disable dma channel */ 3368 status = hxge_disable_rxdma_channel(hxgep, channel); 3369 3370 if (status != HXGE_OK) { 3371 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3372 " hxge_rxdma_stop_channel: " 3373 " init enable rxdma failed (0x%08x channel %d)", 3374 status, channel)); 3375 return (status); 3376 } 3377 3378 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3379 "==> hxge_rxdma_stop_channel: disable done")); 3380 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel")); 3381 3382 return (HXGE_OK); 3383 } 3384 3385 hxge_status_t 3386 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep) 3387 { 3388 hpi_handle_t handle; 3389 p_hxge_rdc_sys_stats_t statsp; 3390 rdc_fifo_err_stat_t stat; 3391 hxge_status_t status = HXGE_OK; 3392 3393 handle = hxgep->hpi_handle; 3394 statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats; 3395 3396 /* Clear the int_dbg register in case it is an injected err */ 3397 HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0); 3398 3399 /* Get the error status and clear the register */ 3400 HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value); 3401 HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value); 3402 3403 if (stat.bits.rx_ctrl_fifo_sec) { 3404 statsp->ctrl_fifo_sec++; 3405 if (statsp->ctrl_fifo_sec == 1) 3406 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3407 "==> hxge_rxdma_handle_sys_errors: " 3408 "rx_ctrl_fifo_sec")); 3409 } 3410 3411 if (stat.bits.rx_ctrl_fifo_ded) { 3412 /* Global fatal error encountered */ 3413 statsp->ctrl_fifo_ded++; 3414 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3415 HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED); 3416 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3417 "==> hxge_rxdma_handle_sys_errors: " 3418 "fatal error: rx_ctrl_fifo_ded error")); 3419 } 3420 3421 if (stat.bits.rx_data_fifo_sec) { 3422 statsp->data_fifo_sec++; 3423 if (statsp->data_fifo_sec == 1) 3424 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3425 "==> hxge_rxdma_handle_sys_errors: " 3426 "rx_data_fifo_sec")); 3427 } 3428 3429 if (stat.bits.rx_data_fifo_ded) { 3430 /* Global fatal error encountered */ 3431 statsp->data_fifo_ded++; 3432 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3433 HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED); 3434 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3435 "==> hxge_rxdma_handle_sys_errors: " 3436 "fatal error: rx_data_fifo_ded error")); 3437 } 3438 3439 if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) { 3440 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3441 " hxge_rxdma_handle_sys_errors: fatal error\n")); 3442 status = hxge_rx_port_fatal_err_recover(hxgep); 3443 if (status == HXGE_OK) { 3444 FM_SERVICE_RESTORED(hxgep); 3445 } 3446 } 3447 3448 return (HXGE_OK); 3449 } 3450 3451 static hxge_status_t 3452 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel) 3453 { 3454 hpi_handle_t handle; 3455 hpi_status_t rs = HPI_SUCCESS; 3456 hxge_status_t status = HXGE_OK; 3457 p_rx_rbr_ring_t rbrp; 3458 p_rx_rcr_ring_t rcrp; 3459 p_rx_mbox_t mboxp; 3460 rdc_int_mask_t ent_mask; 3461 p_hxge_dma_common_t dmap; 3462 int ring_idx; 3463 p_rx_msg_t rx_msg_p; 3464 int i; 3465 uint32_t hxge_port_rcr_size; 3466 uint64_t tmp; 3467 int n_init_kick = 0; 3468 3469 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover")); 3470 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3471 "Recovering from RxDMAChannel#%d error...", channel)); 3472 3473 /* 3474 * Stop the dma channel waits for the stop done. If the stop done bit 3475 * is not set, then create an error. 3476 */ 3477 3478 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3479 3480 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop...")); 3481 3482 ring_idx = hxge_rxdma_get_ring_index(hxgep, channel); 3483 rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx]; 3484 rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx]; 3485 3486 MUTEX_ENTER(&rcrp->lock); 3487 MUTEX_ENTER(&rbrp->lock); 3488 3489 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel...")); 3490 3491 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 3492 if (rs != HPI_SUCCESS) { 3493 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3494 "hxge_disable_rxdma_channel:failed")); 3495 goto fail; 3496 } 3497 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt...")); 3498 3499 /* Disable interrupt */ 3500 ent_mask.value = RDC_INT_MASK_ALL; 3501 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3502 if (rs != HPI_SUCCESS) { 3503 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3504 "Set rxdma event masks failed (channel %d)", channel)); 3505 } 3506 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset...")); 3507 3508 /* Reset RXDMA channel */ 3509 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3510 if (rs != HPI_SUCCESS) { 3511 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3512 "Reset rxdma failed (channel %d)", channel)); 3513 goto fail; 3514 } 3515 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 3516 mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 3517 3518 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3519 rbrp->rbr_rd_index = 0; 3520 3521 rcrp->comp_rd_index = 0; 3522 rcrp->comp_wt_index = 0; 3523 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3524 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3525 #if defined(__i386) 3526 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3527 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3528 #else 3529 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3530 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3531 #endif 3532 3533 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3534 (hxge_port_rcr_size - 1); 3535 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3536 (hxge_port_rcr_size - 1); 3537 3538 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 3539 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 3540 3541 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 3542 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3543 3544 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n", 3545 rbrp->rbr_max_size)); 3546 3547 /* Count the number of buffers owned by the hardware at this moment */ 3548 for (i = 0; i < rbrp->rbr_max_size; i++) { 3549 rx_msg_p = rbrp->rx_msg_ring[i]; 3550 if (rx_msg_p->ref_cnt == 1) { 3551 n_init_kick++; 3552 } 3553 } 3554 3555 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start...")); 3556 3557 /* 3558 * This is error recover! Some buffers are owned by the hardware and 3559 * the rest are owned by the apps. We should only kick in those 3560 * owned by the hardware initially. The apps will post theirs 3561 * eventually. 3562 */ 3563 status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp, 3564 n_init_kick); 3565 if (status != HXGE_OK) { 3566 goto fail; 3567 } 3568 3569 /* 3570 * The DMA channel may disable itself automatically. 3571 * The following is a work-around. 3572 */ 3573 HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp); 3574 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 3575 if (rs != HPI_SUCCESS) { 3576 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3577 "hpi_rxdma_cfg_rdc_enable (channel %d)", channel)); 3578 } 3579 3580 MUTEX_EXIT(&rbrp->lock); 3581 MUTEX_EXIT(&rcrp->lock); 3582 3583 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3584 "Recovery Successful, RxDMAChannel#%d Restored", channel)); 3585 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover")); 3586 3587 return (HXGE_OK); 3588 3589 fail: 3590 MUTEX_EXIT(&rbrp->lock); 3591 MUTEX_EXIT(&rcrp->lock); 3592 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3593 3594 return (HXGE_ERROR | rs); 3595 } 3596 3597 static hxge_status_t 3598 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep) 3599 { 3600 hxge_status_t status = HXGE_OK; 3601 p_hxge_dma_common_t *dma_buf_p; 3602 uint16_t channel; 3603 int ndmas; 3604 int i; 3605 block_reset_t reset_reg; 3606 p_rx_rcr_ring_t rcrp; 3607 p_rx_rbr_ring_t rbrp; 3608 3609 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover")); 3610 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ...")); 3611 3612 /* Reset RDC block from PEU for this fatal error */ 3613 reset_reg.value = 0; 3614 reset_reg.bits.rdc_rst = 1; 3615 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 3616 3617 /* Disable RxMAC */ 3618 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n")); 3619 if (hxge_rx_vmac_disable(hxgep) != HXGE_OK) 3620 goto fail; 3621 3622 HXGE_DELAY(1000); 3623 3624 /* Restore any common settings after PEU reset */ 3625 if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK) 3626 goto fail; 3627 3628 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels...")); 3629 3630 ndmas = hxgep->rx_buf_pool_p->ndmas; 3631 dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p; 3632 3633 for (i = 0; i < ndmas; i++) { 3634 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 3635 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 3636 rbrp = rcrp->rx_rbr_p; 3637 3638 MUTEX_ENTER(&rbrp->post_lock); 3639 /* This function needs to be inside the post_lock */ 3640 if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) { 3641 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3642 "Could not recover channel %d", channel)); 3643 } 3644 MUTEX_EXIT(&rbrp->post_lock); 3645 } 3646 3647 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC...")); 3648 3649 /* Reset RxMAC */ 3650 if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) { 3651 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3652 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3653 goto fail; 3654 } 3655 3656 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC...")); 3657 3658 /* Re-Initialize RxMAC */ 3659 if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) { 3660 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3661 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3662 goto fail; 3663 } 3664 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC...")); 3665 3666 /* Re-enable RxMAC */ 3667 if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) { 3668 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3669 "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC")); 3670 goto fail; 3671 } 3672 3673 /* Reset the error mask since PEU reset cleared it */ 3674 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3675 3676 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3677 "Recovery Successful, RxPort Restored")); 3678 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover")); 3679 3680 return (HXGE_OK); 3681 fail: 3682 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3683 return (status); 3684 } 3685 3686 static void 3687 hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p) 3688 { 3689 hpi_status_t hpi_status; 3690 hxge_status_t status; 3691 int i; 3692 p_hxge_rx_ring_stats_t rdc_stats; 3693 3694 rdc_stats = &hxgep->statsp->rdc_stats[rx_rbr_p->rdc]; 3695 rdc_stats->rbr_empty_restore++; 3696 rx_rbr_p->rbr_is_empty = B_FALSE; 3697 3698 /* 3699 * Complete the processing for the RBR Empty by: 3700 * 0) kicking back HXGE_RBR_EMPTY_THRESHOLD 3701 * packets. 3702 * 1) Disable the RX vmac. 3703 * 2) Re-enable the affected DMA channel. 3704 * 3) Re-enable the RX vmac. 3705 */ 3706 3707 /* 3708 * Disable the RX VMAC, but setting the framelength 3709 * to 0, since there is a hardware bug when disabling 3710 * the vmac. 3711 */ 3712 MUTEX_ENTER(hxgep->genlock); 3713 (void) hpi_vmac_rx_set_framesize( 3714 HXGE_DEV_HPI_HANDLE(hxgep), (uint16_t)0); 3715 3716 hpi_status = hpi_rxdma_cfg_rdc_enable( 3717 HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc); 3718 if (hpi_status != HPI_SUCCESS) { 3719 rdc_stats->rbr_empty_fail++; 3720 3721 /* Assume we are already inside the post_lock */ 3722 status = hxge_rxdma_fatal_err_recover(hxgep, rx_rbr_p->rdc); 3723 if (status != HXGE_OK) { 3724 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3725 "hxge(%d): channel(%d) is empty.", 3726 hxgep->instance, rx_rbr_p->rdc)); 3727 } 3728 } 3729 3730 for (i = 0; i < 1024; i++) { 3731 uint64_t value; 3732 RXDMA_REG_READ64(HXGE_DEV_HPI_HANDLE(hxgep), 3733 RDC_STAT, i & 3, &value); 3734 } 3735 3736 /* 3737 * Re-enable the RX VMAC. 3738 */ 3739 (void) hpi_vmac_rx_set_framesize(HXGE_DEV_HPI_HANDLE(hxgep), 3740 (uint16_t)hxgep->vmac.maxframesize); 3741 MUTEX_EXIT(hxgep->genlock); 3742 } 3743