1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_rxdma.h> 28 #include <hpi.h> 29 #include <hpi_vir.h> 30 31 /* 32 * Number of blocks to accumulate before re-enabling DMA 33 * when we get RBR empty. 34 */ 35 #define HXGE_RBR_EMPTY_THRESHOLD 64 36 37 /* 38 * Globals: tunable parameters (/etc/system or adb) 39 * 40 */ 41 extern uint32_t hxge_rbr_size; 42 extern uint32_t hxge_rcr_size; 43 extern uint32_t hxge_rbr_spare_size; 44 extern uint32_t hxge_mblks_pending; 45 46 /* 47 * Tunables to manage the receive buffer blocks. 48 * 49 * hxge_rx_threshold_hi: copy all buffers. 50 * hxge_rx_bcopy_size_type: receive buffer block size type. 51 * hxge_rx_threshold_lo: copy only up to tunable block size type. 52 */ 53 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi; 54 extern hxge_rxbuf_type_t hxge_rx_buf_size_type; 55 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo; 56 57 /* 58 * Static local functions. 59 */ 60 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep); 61 static void hxge_unmap_rxdma(p_hxge_t hxgep); 62 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep); 63 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep); 64 static void hxge_rxdma_hw_stop(p_hxge_t hxgep); 65 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 66 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 67 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 68 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 69 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 70 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 71 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 72 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, 73 uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p, 74 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 75 p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 76 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 77 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 78 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, 79 uint16_t channel, p_hxge_dma_common_t *dma_buf_p, 80 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks); 81 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 82 p_rx_rbr_ring_t rbr_p); 83 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 84 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 85 int n_init_kick); 86 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel); 87 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 88 p_rx_rcr_ring_t rcr_p, rdc_stat_t cs, int bytes_to_read); 89 static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcr_p, 90 p_rcr_entry_t rcr_desc_rd_head_p, uint32_t num_rcrs); 91 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p, 92 p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, 93 mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry); 94 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep, 95 uint16_t channel); 96 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t); 97 static void hxge_freeb(p_rx_msg_t); 98 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, 99 p_hxge_ldv_t ldvp, rdc_stat_t cs); 100 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep, 101 p_rx_rbr_ring_t rx_dmap); 102 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, 103 uint16_t channel); 104 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep); 105 static void hxge_rbr_empty_restore(p_hxge_t hxgep, 106 p_rx_rbr_ring_t rx_rbr_p); 107 108 hxge_status_t 109 hxge_init_rxdma_channels(p_hxge_t hxgep) 110 { 111 hxge_status_t status = HXGE_OK; 112 block_reset_t reset_reg; 113 int i; 114 115 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels")); 116 117 for (i = 0; i < HXGE_MAX_RDCS; i++) 118 hxgep->rdc_first_intr[i] = B_TRUE; 119 120 /* Reset RDC block from PEU to clear any previous state */ 121 reset_reg.value = 0; 122 reset_reg.bits.rdc_rst = 1; 123 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 124 HXGE_DELAY(1000); 125 126 status = hxge_map_rxdma(hxgep); 127 if (status != HXGE_OK) { 128 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 129 "<== hxge_init_rxdma: status 0x%x", status)); 130 return (status); 131 } 132 133 status = hxge_rxdma_hw_start_common(hxgep); 134 if (status != HXGE_OK) { 135 hxge_unmap_rxdma(hxgep); 136 } 137 138 status = hxge_rxdma_hw_start(hxgep); 139 if (status != HXGE_OK) { 140 hxge_unmap_rxdma(hxgep); 141 } 142 143 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 144 "<== hxge_init_rxdma_channels: status 0x%x", status)); 145 return (status); 146 } 147 148 void 149 hxge_uninit_rxdma_channels(p_hxge_t hxgep) 150 { 151 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels")); 152 153 hxge_rxdma_hw_stop(hxgep); 154 hxge_unmap_rxdma(hxgep); 155 156 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels")); 157 } 158 159 hxge_status_t 160 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel, 161 rdc_stat_t *cs_p) 162 { 163 hpi_handle_t handle; 164 hpi_status_t rs = HPI_SUCCESS; 165 hxge_status_t status = HXGE_OK; 166 167 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 168 "<== hxge_init_rxdma_channel_cntl_stat")); 169 170 handle = HXGE_DEV_HPI_HANDLE(hxgep); 171 rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p); 172 173 if (rs != HPI_SUCCESS) { 174 status = HXGE_ERROR | rs; 175 } 176 return (status); 177 } 178 179 180 hxge_status_t 181 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 182 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 183 int n_init_kick) 184 { 185 hpi_handle_t handle; 186 rdc_desc_cfg_t rdc_desc; 187 rdc_rcr_cfg_b_t *cfgb_p; 188 hpi_status_t rs = HPI_SUCCESS; 189 190 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel")); 191 handle = HXGE_DEV_HPI_HANDLE(hxgep); 192 193 /* 194 * Use configuration data composed at init time. Write to hardware the 195 * receive ring configurations. 196 */ 197 rdc_desc.mbox_enable = 1; 198 rdc_desc.mbox_addr = mbox_p->mbox_addr; 199 HXGE_DEBUG_MSG((hxgep, RX_CTL, 200 "==> hxge_enable_rxdma_channel: mboxp $%p($%p)", 201 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 202 203 rdc_desc.rbr_len = rbr_p->rbb_max; 204 rdc_desc.rbr_addr = rbr_p->rbr_addr; 205 206 switch (hxgep->rx_bksize_code) { 207 case RBR_BKSIZE_4K: 208 rdc_desc.page_size = SIZE_4KB; 209 break; 210 case RBR_BKSIZE_8K: 211 rdc_desc.page_size = SIZE_8KB; 212 break; 213 } 214 215 rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0; 216 rdc_desc.valid0 = 1; 217 218 rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1; 219 rdc_desc.valid1 = 1; 220 221 rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2; 222 rdc_desc.valid2 = 1; 223 224 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 225 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 226 227 rdc_desc.rcr_len = rcr_p->comp_size; 228 rdc_desc.rcr_addr = rcr_p->rcr_addr; 229 230 cfgb_p = &(rcr_p->rcr_cfgb); 231 rdc_desc.rcr_threshold = cfgb_p->bits.pthres; 232 rdc_desc.rcr_timeout = cfgb_p->bits.timeout; 233 rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout; 234 235 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 236 "rbr_len qlen %d pagesize code %d rcr_len %d", 237 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 238 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 239 "size 0 %d size 1 %d size 2 %d", 240 rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1, 241 rbr_p->hpi_pkt_buf_size2)); 242 243 rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 244 if (rs != HPI_SUCCESS) { 245 return (HXGE_ERROR | rs); 246 } 247 248 /* 249 * Enable the timeout and threshold. 250 */ 251 rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 252 rdc_desc.rcr_threshold); 253 if (rs != HPI_SUCCESS) { 254 return (HXGE_ERROR | rs); 255 } 256 257 rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 258 rdc_desc.rcr_timeout); 259 if (rs != HPI_SUCCESS) { 260 return (HXGE_ERROR | rs); 261 } 262 263 /* Kick the DMA engine */ 264 hpi_rxdma_rdc_rbr_kick(handle, channel, n_init_kick); 265 266 /* Clear the rbr empty bit */ 267 (void) hpi_rxdma_channel_rbr_empty_clear(handle, channel); 268 269 /* 270 * Enable the DMA 271 */ 272 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 273 if (rs != HPI_SUCCESS) { 274 return (HXGE_ERROR | rs); 275 } 276 277 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel")); 278 279 return (HXGE_OK); 280 } 281 282 static hxge_status_t 283 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel) 284 { 285 hpi_handle_t handle; 286 hpi_status_t rs = HPI_SUCCESS; 287 288 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel")); 289 290 handle = HXGE_DEV_HPI_HANDLE(hxgep); 291 292 /* disable the DMA */ 293 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 294 if (rs != HPI_SUCCESS) { 295 HXGE_DEBUG_MSG((hxgep, RX_CTL, 296 "<== hxge_disable_rxdma_channel:failed (0x%x)", rs)); 297 return (HXGE_ERROR | rs); 298 } 299 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel")); 300 return (HXGE_OK); 301 } 302 303 hxge_status_t 304 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel) 305 { 306 hpi_handle_t handle; 307 hxge_status_t status = HXGE_OK; 308 309 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 310 "==> hxge_rxdma_channel_rcrflush")); 311 312 handle = HXGE_DEV_HPI_HANDLE(hxgep); 313 hpi_rxdma_rdc_rcr_flush(handle, channel); 314 315 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 316 "<== hxge_rxdma_channel_rcrflush")); 317 return (status); 318 319 } 320 321 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 322 323 #define TO_LEFT -1 324 #define TO_RIGHT 1 325 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 326 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 327 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 328 #define NO_HINT 0xffffffff 329 330 /*ARGSUSED*/ 331 hxge_status_t 332 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p, 333 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 334 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 335 { 336 int bufsize; 337 uint64_t pktbuf_pp; 338 uint64_t dvma_addr; 339 rxring_info_t *ring_info; 340 int base_side, end_side; 341 int r_index, l_index, anchor_index; 342 int found, search_done; 343 uint32_t offset, chunk_size, block_size, page_size_mask; 344 uint32_t chunk_index, block_index, total_index; 345 int max_iterations, iteration; 346 rxbuf_index_info_t *bufinfo; 347 348 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp")); 349 350 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 351 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 352 pkt_buf_addr_pp, pktbufsz_type)); 353 354 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 355 356 switch (pktbufsz_type) { 357 case 0: 358 bufsize = rbr_p->pkt_buf_size0; 359 break; 360 case 1: 361 bufsize = rbr_p->pkt_buf_size1; 362 break; 363 case 2: 364 bufsize = rbr_p->pkt_buf_size2; 365 break; 366 case RCR_SINGLE_BLOCK: 367 bufsize = 0; 368 anchor_index = 0; 369 break; 370 default: 371 return (HXGE_ERROR); 372 } 373 374 if (rbr_p->num_blocks == 1) { 375 anchor_index = 0; 376 ring_info = rbr_p->ring_info; 377 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 378 379 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 380 "==> hxge_rxbuf_pp_to_vp: (found, 1 block) " 381 "buf_pp $%p btype %d anchor_index %d bufinfo $%p", 382 pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo)); 383 384 goto found_index; 385 } 386 387 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 388 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d", 389 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 390 391 ring_info = rbr_p->ring_info; 392 found = B_FALSE; 393 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 394 iteration = 0; 395 max_iterations = ring_info->max_iterations; 396 397 /* 398 * First check if this block have been seen recently. This is indicated 399 * by a hint which is initialized when the first buffer of the block is 400 * seen. The hint is reset when the last buffer of the block has been 401 * processed. As three block sizes are supported, three hints are kept. 402 * The idea behind the hints is that once the hardware uses a block 403 * for a buffer of that size, it will use it exclusively for that size 404 * and will use it until it is exhausted. It is assumed that there 405 * would a single block being used for the same buffer sizes at any 406 * given time. 407 */ 408 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 409 anchor_index = ring_info->hint[pktbufsz_type]; 410 dvma_addr = bufinfo[anchor_index].dvma_addr; 411 chunk_size = bufinfo[anchor_index].buf_size; 412 if ((pktbuf_pp >= dvma_addr) && 413 (pktbuf_pp < (dvma_addr + chunk_size))) { 414 found = B_TRUE; 415 /* 416 * check if this is the last buffer in the block If so, 417 * then reset the hint for the size; 418 */ 419 420 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 421 ring_info->hint[pktbufsz_type] = NO_HINT; 422 } 423 } 424 425 if (found == B_FALSE) { 426 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 427 "==> hxge_rxbuf_pp_to_vp: (!found)" 428 "buf_pp $%p btype %d anchor_index %d", 429 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 430 431 /* 432 * This is the first buffer of the block of this size. Need to 433 * search the whole information array. the search algorithm 434 * uses a binary tree search algorithm. It assumes that the 435 * information is already sorted with increasing order info[0] 436 * < info[1] < info[2] .... < info[n-1] where n is the size of 437 * the information array 438 */ 439 r_index = rbr_p->num_blocks - 1; 440 l_index = 0; 441 search_done = B_FALSE; 442 anchor_index = MID_INDEX(r_index, l_index); 443 while (search_done == B_FALSE) { 444 if ((r_index == l_index) || 445 (iteration >= max_iterations)) 446 search_done = B_TRUE; 447 448 end_side = TO_RIGHT; /* to the right */ 449 base_side = TO_LEFT; /* to the left */ 450 /* read the DVMA address information and sort it */ 451 dvma_addr = bufinfo[anchor_index].dvma_addr; 452 chunk_size = bufinfo[anchor_index].buf_size; 453 454 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 455 "==> hxge_rxbuf_pp_to_vp: (searching)" 456 "buf_pp $%p btype %d " 457 "anchor_index %d chunk_size %d dvmaaddr $%p", 458 pkt_buf_addr_pp, pktbufsz_type, anchor_index, 459 chunk_size, dvma_addr)); 460 461 if (pktbuf_pp >= dvma_addr) 462 base_side = TO_RIGHT; /* to the right */ 463 if (pktbuf_pp < (dvma_addr + chunk_size)) 464 end_side = TO_LEFT; /* to the left */ 465 466 switch (base_side + end_side) { 467 case IN_MIDDLE: 468 /* found */ 469 found = B_TRUE; 470 search_done = B_TRUE; 471 if ((pktbuf_pp + bufsize) < 472 (dvma_addr + chunk_size)) 473 ring_info->hint[pktbufsz_type] = 474 bufinfo[anchor_index].buf_index; 475 break; 476 case BOTH_RIGHT: 477 /* not found: go to the right */ 478 l_index = anchor_index + 1; 479 anchor_index = MID_INDEX(r_index, l_index); 480 break; 481 482 case BOTH_LEFT: 483 /* not found: go to the left */ 484 r_index = anchor_index - 1; 485 anchor_index = MID_INDEX(r_index, l_index); 486 break; 487 default: /* should not come here */ 488 return (HXGE_ERROR); 489 } 490 iteration++; 491 } 492 493 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 494 "==> hxge_rxbuf_pp_to_vp: (search done)" 495 "buf_pp $%p btype %d anchor_index %d", 496 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 497 } 498 499 if (found == B_FALSE) { 500 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 501 "==> hxge_rxbuf_pp_to_vp: (search failed)" 502 "buf_pp $%p btype %d anchor_index %d", 503 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 504 return (HXGE_ERROR); 505 } 506 507 found_index: 508 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 509 "==> hxge_rxbuf_pp_to_vp: (FOUND1)" 510 "buf_pp $%p btype %d bufsize %d anchor_index %d", 511 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index)); 512 513 /* index of the first block in this chunk */ 514 chunk_index = bufinfo[anchor_index].start_index; 515 dvma_addr = bufinfo[anchor_index].dvma_addr; 516 page_size_mask = ring_info->block_size_mask; 517 518 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 519 "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 520 "buf_pp $%p btype %d bufsize %d " 521 "anchor_index %d chunk_index %d dvma $%p", 522 pkt_buf_addr_pp, pktbufsz_type, bufsize, 523 anchor_index, chunk_index, dvma_addr)); 524 525 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 526 block_size = rbr_p->block_size; /* System block(page) size */ 527 528 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 529 "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 530 "buf_pp $%p btype %d bufsize %d " 531 "anchor_index %d chunk_index %d dvma $%p " 532 "offset %d block_size %d", 533 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index, 534 chunk_index, dvma_addr, offset, block_size)); 535 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index")); 536 537 block_index = (offset / block_size); /* index within chunk */ 538 total_index = chunk_index + block_index; 539 540 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 541 "==> hxge_rxbuf_pp_to_vp: " 542 "total_index %d dvma_addr $%p " 543 "offset %d block_size %d " 544 "block_index %d ", 545 total_index, dvma_addr, offset, block_size, block_index)); 546 547 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 548 offset); 549 550 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 551 "==> hxge_rxbuf_pp_to_vp: " 552 "total_index %d dvma_addr $%p " 553 "offset %d block_size %d " 554 "block_index %d " 555 "*pkt_buf_addr_p $%p", 556 total_index, dvma_addr, offset, block_size, 557 block_index, *pkt_buf_addr_p)); 558 559 *msg_index = total_index; 560 *bufoffset = (offset & page_size_mask); 561 562 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 563 "==> hxge_rxbuf_pp_to_vp: get msg index: " 564 "msg_index %d bufoffset_index %d", 565 *msg_index, *bufoffset)); 566 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp")); 567 568 return (HXGE_OK); 569 } 570 571 572 /* 573 * used by quick sort (qsort) function 574 * to perform comparison 575 */ 576 static int 577 hxge_sort_compare(const void *p1, const void *p2) 578 { 579 580 rxbuf_index_info_t *a, *b; 581 582 a = (rxbuf_index_info_t *)p1; 583 b = (rxbuf_index_info_t *)p2; 584 585 if (a->dvma_addr > b->dvma_addr) 586 return (1); 587 if (a->dvma_addr < b->dvma_addr) 588 return (-1); 589 return (0); 590 } 591 592 /* 593 * Grabbed this sort implementation from common/syscall/avl.c 594 * 595 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 596 * v = Ptr to array/vector of objs 597 * n = # objs in the array 598 * s = size of each obj (must be multiples of a word size) 599 * f = ptr to function to compare two objs 600 * returns (-1 = less than, 0 = equal, 1 = greater than 601 */ 602 void 603 hxge_ksort(caddr_t v, int n, int s, int (*f) ()) 604 { 605 int g, i, j, ii; 606 unsigned int *p1, *p2; 607 unsigned int tmp; 608 609 /* No work to do */ 610 if (v == NULL || n <= 1) 611 return; 612 /* Sanity check on arguments */ 613 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 614 ASSERT(s > 0); 615 616 for (g = n / 2; g > 0; g /= 2) { 617 for (i = g; i < n; i++) { 618 for (j = i - g; j >= 0 && 619 (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) { 620 p1 = (unsigned *)(v + j * s); 621 p2 = (unsigned *)(v + (j + g) * s); 622 for (ii = 0; ii < s / 4; ii++) { 623 tmp = *p1; 624 *p1++ = *p2; 625 *p2++ = tmp; 626 } 627 } 628 } 629 } 630 } 631 632 /* 633 * Initialize data structures required for rxdma 634 * buffer dvma->vmem address lookup 635 */ 636 /*ARGSUSED*/ 637 static hxge_status_t 638 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp) 639 { 640 int index; 641 rxring_info_t *ring_info; 642 int max_iteration = 0, max_index = 0; 643 644 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init")); 645 646 ring_info = rbrp->ring_info; 647 ring_info->hint[0] = NO_HINT; 648 ring_info->hint[1] = NO_HINT; 649 ring_info->hint[2] = NO_HINT; 650 ring_info->hint[3] = NO_HINT; 651 max_index = rbrp->num_blocks; 652 653 /* read the DVMA address information and sort it */ 654 /* do init of the information array */ 655 656 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 657 " hxge_rxbuf_index_info_init Sort ptrs")); 658 659 /* sort the array */ 660 hxge_ksort((void *) ring_info->buffer, max_index, 661 sizeof (rxbuf_index_info_t), hxge_sort_compare); 662 663 for (index = 0; index < max_index; index++) { 664 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 665 " hxge_rxbuf_index_info_init: sorted chunk %d " 666 " ioaddr $%p kaddr $%p size %x", 667 index, ring_info->buffer[index].dvma_addr, 668 ring_info->buffer[index].kaddr, 669 ring_info->buffer[index].buf_size)); 670 } 671 672 max_iteration = 0; 673 while (max_index >= (1ULL << max_iteration)) 674 max_iteration++; 675 ring_info->max_iterations = max_iteration + 1; 676 677 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 678 " hxge_rxbuf_index_info_init Find max iter %d", 679 ring_info->max_iterations)); 680 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init")); 681 682 return (HXGE_OK); 683 } 684 685 /*ARGSUSED*/ 686 void 687 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p) 688 { 689 #ifdef HXGE_DEBUG 690 691 uint32_t bptr; 692 uint64_t pp; 693 694 bptr = entry_p->bits.pkt_buf_addr; 695 696 HXGE_DEBUG_MSG((hxgep, RX_CTL, 697 "\trcr entry $%p " 698 "\trcr entry 0x%0llx " 699 "\trcr entry 0x%08x " 700 "\trcr entry 0x%08x " 701 "\tvalue 0x%0llx\n" 702 "\tmulti = %d\n" 703 "\tpkt_type = 0x%x\n" 704 "\terror = 0x%04x\n" 705 "\tl2_len = %d\n" 706 "\tpktbufsize = %d\n" 707 "\tpkt_buf_addr = $%p\n" 708 "\tpkt_buf_addr (<< 6) = $%p\n", 709 entry_p, 710 *(int64_t *)entry_p, 711 *(int32_t *)entry_p, 712 *(int32_t *)((char *)entry_p + 32), 713 entry_p->value, 714 entry_p->bits.multi, 715 entry_p->bits.pkt_type, 716 entry_p->bits.error, 717 entry_p->bits.l2_len, 718 entry_p->bits.pktbufsz, 719 bptr, 720 entry_p->bits.pkt_buf_addr_l)); 721 722 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 723 RCR_PKT_BUF_ADDR_SHIFT; 724 725 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 726 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 727 #endif 728 } 729 730 /*ARGSUSED*/ 731 void 732 hxge_rxdma_stop(p_hxge_t hxgep) 733 { 734 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop")); 735 736 MUTEX_ENTER(&hxgep->vmac_lock); 737 (void) hxge_rx_vmac_disable(hxgep); 738 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 739 MUTEX_EXIT(&hxgep->vmac_lock); 740 741 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop")); 742 } 743 744 void 745 hxge_rxdma_stop_reinit(p_hxge_t hxgep) 746 { 747 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit")); 748 749 (void) hxge_rxdma_stop(hxgep); 750 (void) hxge_uninit_rxdma_channels(hxgep); 751 (void) hxge_init_rxdma_channels(hxgep); 752 753 MUTEX_ENTER(&hxgep->vmac_lock); 754 (void) hxge_rx_vmac_enable(hxgep); 755 MUTEX_EXIT(&hxgep->vmac_lock); 756 757 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit")); 758 } 759 760 hxge_status_t 761 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 762 { 763 int i, ndmas; 764 uint16_t channel; 765 p_rx_rbr_rings_t rx_rbr_rings; 766 p_rx_rbr_ring_t *rbr_rings; 767 hpi_handle_t handle; 768 hpi_status_t rs = HPI_SUCCESS; 769 hxge_status_t status = HXGE_OK; 770 771 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 772 "==> hxge_rxdma_hw_mode: mode %d", enable)); 773 774 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 775 HXGE_DEBUG_MSG((hxgep, RX_CTL, 776 "<== hxge_rxdma_mode: not initialized")); 777 return (HXGE_ERROR); 778 } 779 780 rx_rbr_rings = hxgep->rx_rbr_rings; 781 if (rx_rbr_rings == NULL) { 782 HXGE_DEBUG_MSG((hxgep, RX_CTL, 783 "<== hxge_rxdma_mode: NULL ring pointer")); 784 return (HXGE_ERROR); 785 } 786 787 if (rx_rbr_rings->rbr_rings == NULL) { 788 HXGE_DEBUG_MSG((hxgep, RX_CTL, 789 "<== hxge_rxdma_mode: NULL rbr rings pointer")); 790 return (HXGE_ERROR); 791 } 792 793 ndmas = rx_rbr_rings->ndmas; 794 if (!ndmas) { 795 HXGE_DEBUG_MSG((hxgep, RX_CTL, 796 "<== hxge_rxdma_mode: no channel")); 797 return (HXGE_ERROR); 798 } 799 800 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 801 "==> hxge_rxdma_mode (ndmas %d)", ndmas)); 802 803 rbr_rings = rx_rbr_rings->rbr_rings; 804 805 handle = HXGE_DEV_HPI_HANDLE(hxgep); 806 807 for (i = 0; i < ndmas; i++) { 808 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 809 continue; 810 } 811 channel = rbr_rings[i]->rdc; 812 if (enable) { 813 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 814 "==> hxge_rxdma_hw_mode: channel %d (enable)", 815 channel)); 816 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 817 } else { 818 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 819 "==> hxge_rxdma_hw_mode: channel %d (disable)", 820 channel)); 821 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 822 } 823 } 824 825 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 826 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 827 "<== hxge_rxdma_hw_mode: status 0x%x", status)); 828 829 return (status); 830 } 831 832 /* 833 * Static functions start here. 834 */ 835 static p_rx_msg_t 836 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p) 837 { 838 p_rx_msg_t hxge_mp = NULL; 839 p_hxge_dma_common_t dmamsg_p; 840 uchar_t *buffer; 841 842 hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 843 if (hxge_mp == NULL) { 844 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 845 "Allocation of a rx msg failed.")); 846 goto hxge_allocb_exit; 847 } 848 849 hxge_mp->use_buf_pool = B_FALSE; 850 if (dmabuf_p) { 851 hxge_mp->use_buf_pool = B_TRUE; 852 853 dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma; 854 *dmamsg_p = *dmabuf_p; 855 dmamsg_p->nblocks = 1; 856 dmamsg_p->block_size = size; 857 dmamsg_p->alength = size; 858 buffer = (uchar_t *)dmabuf_p->kaddrp; 859 860 dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size); 861 dmabuf_p->ioaddr_pp = (void *) 862 ((char *)dmabuf_p->ioaddr_pp + size); 863 864 dmabuf_p->alength -= size; 865 dmabuf_p->offset += size; 866 dmabuf_p->dma_cookie.dmac_laddress += size; 867 dmabuf_p->dma_cookie.dmac_size -= size; 868 } else { 869 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 870 if (buffer == NULL) { 871 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 872 "Allocation of a receive page failed.")); 873 goto hxge_allocb_fail1; 874 } 875 } 876 877 hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb); 878 if (hxge_mp->rx_mblk_p == NULL) { 879 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed.")); 880 goto hxge_allocb_fail2; 881 } 882 hxge_mp->buffer = buffer; 883 hxge_mp->block_size = size; 884 hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb; 885 hxge_mp->freeb.free_arg = (caddr_t)hxge_mp; 886 hxge_mp->ref_cnt = 1; 887 hxge_mp->free = B_TRUE; 888 hxge_mp->rx_use_bcopy = B_FALSE; 889 890 atomic_inc_32(&hxge_mblks_pending); 891 892 goto hxge_allocb_exit; 893 894 hxge_allocb_fail2: 895 if (!hxge_mp->use_buf_pool) { 896 KMEM_FREE(buffer, size); 897 } 898 hxge_allocb_fail1: 899 KMEM_FREE(hxge_mp, sizeof (rx_msg_t)); 900 hxge_mp = NULL; 901 902 hxge_allocb_exit: 903 return (hxge_mp); 904 } 905 906 p_mblk_t 907 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 908 { 909 p_mblk_t mp; 910 911 HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb")); 912 HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p " 913 "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size)); 914 915 mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb); 916 if (mp == NULL) { 917 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 918 goto hxge_dupb_exit; 919 } 920 921 atomic_inc_32(&hxge_mp->ref_cnt); 922 923 hxge_dupb_exit: 924 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 925 return (mp); 926 } 927 928 p_mblk_t 929 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 930 { 931 p_mblk_t mp; 932 uchar_t *dp; 933 934 mp = allocb(size + HXGE_RXBUF_EXTRA, 0); 935 if (mp == NULL) { 936 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 937 goto hxge_dupb_bcopy_exit; 938 } 939 dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA; 940 bcopy((void *) &hxge_mp->buffer[offset], dp, size); 941 mp->b_wptr = dp + size; 942 943 hxge_dupb_bcopy_exit: 944 945 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 946 947 return (mp); 948 } 949 950 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, 951 p_rx_msg_t rx_msg_p); 952 953 void 954 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 955 { 956 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page")); 957 958 /* Reuse this buffer */ 959 rx_msg_p->free = B_FALSE; 960 rx_msg_p->cur_usage_cnt = 0; 961 rx_msg_p->max_usage_cnt = 0; 962 rx_msg_p->pkt_buf_size = 0; 963 964 if (rx_rbr_p->rbr_use_bcopy) { 965 rx_msg_p->rx_use_bcopy = B_FALSE; 966 atomic_dec_32(&rx_rbr_p->rbr_consumed); 967 } 968 atomic_dec_32(&rx_rbr_p->rbr_used); 969 970 /* 971 * Get the rbr header pointer and its offset index. 972 */ 973 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 974 rx_rbr_p->rbr_wrap_mask); 975 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 976 977 /* 978 * Accumulate some buffers in the ring before re-enabling the 979 * DMA channel, if rbr empty was signaled. 980 */ 981 hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1); 982 if (rx_rbr_p->rbr_is_empty && (rx_rbr_p->rbb_max - 983 rx_rbr_p->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) { 984 hxge_rbr_empty_restore(hxgep, rx_rbr_p); 985 } 986 987 HXGE_DEBUG_MSG((hxgep, RX_CTL, 988 "<== hxge_post_page (channel %d post_next_index %d)", 989 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 990 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page")); 991 } 992 993 void 994 hxge_freeb(p_rx_msg_t rx_msg_p) 995 { 996 size_t size; 997 uchar_t *buffer = NULL; 998 int ref_cnt; 999 boolean_t free_state = B_FALSE; 1000 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1001 1002 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb")); 1003 HXGE_DEBUG_MSG((NULL, MEM2_CTL, 1004 "hxge_freeb:rx_msg_p = $%p (block pending %d)", 1005 rx_msg_p, hxge_mblks_pending)); 1006 1007 if (ring == NULL) 1008 return; 1009 1010 /* 1011 * This is to prevent posting activities while we are recovering 1012 * from fatal errors. This should not be a performance drag since 1013 * ref_cnt != 0 most times. 1014 */ 1015 if (ring->rbr_state == RBR_POSTING) 1016 MUTEX_ENTER(&ring->post_lock); 1017 1018 /* 1019 * First we need to get the free state, then 1020 * atomic decrement the reference count to prevent 1021 * the race condition with the interrupt thread that 1022 * is processing a loaned up buffer block. 1023 */ 1024 free_state = rx_msg_p->free; 1025 ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt); 1026 if (!ref_cnt) { 1027 atomic_dec_32(&hxge_mblks_pending); 1028 1029 buffer = rx_msg_p->buffer; 1030 size = rx_msg_p->block_size; 1031 1032 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: " 1033 "will free: rx_msg_p = $%p (block pending %d)", 1034 rx_msg_p, hxge_mblks_pending)); 1035 1036 if (!rx_msg_p->use_buf_pool) { 1037 KMEM_FREE(buffer, size); 1038 } 1039 1040 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1041 /* 1042 * Decrement the receive buffer ring's reference 1043 * count, too. 1044 */ 1045 atomic_dec_32(&ring->rbr_ref_cnt); 1046 1047 /* 1048 * Free the receive buffer ring, iff 1049 * 1. all the receive buffers have been freed 1050 * 2. and we are in the proper state (that is, 1051 * we are not UNMAPPING). 1052 */ 1053 if (ring->rbr_ref_cnt == 0 && 1054 ring->rbr_state == RBR_UNMAPPED) { 1055 KMEM_FREE(ring, sizeof (*ring)); 1056 /* post_lock has been destroyed already */ 1057 return; 1058 } 1059 } 1060 1061 /* 1062 * Repost buffer. 1063 */ 1064 if (free_state && (ref_cnt == 1)) { 1065 HXGE_DEBUG_MSG((NULL, RX_CTL, 1066 "hxge_freeb: post page $%p:", rx_msg_p)); 1067 if (ring->rbr_state == RBR_POSTING) 1068 hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p); 1069 } 1070 1071 if (ring->rbr_state == RBR_POSTING) 1072 MUTEX_EXIT(&ring->post_lock); 1073 1074 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb")); 1075 } 1076 1077 uint_t 1078 hxge_rx_intr(caddr_t arg1, caddr_t arg2) 1079 { 1080 p_hxge_ring_handle_t rhp; 1081 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 1082 p_hxge_t hxgep = (p_hxge_t)arg2; 1083 p_hxge_ldg_t ldgp; 1084 uint8_t channel; 1085 hpi_handle_t handle; 1086 rdc_stat_t cs; 1087 p_rx_rcr_ring_t ring; 1088 p_rx_rbr_ring_t rbrp; 1089 mblk_t *mp = NULL; 1090 1091 if (ldvp == NULL) { 1092 HXGE_DEBUG_MSG((NULL, RX_INT_CTL, 1093 "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1094 return (DDI_INTR_UNCLAIMED); 1095 } 1096 1097 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 1098 hxgep = ldvp->hxgep; 1099 } 1100 1101 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1102 "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1103 1104 /* 1105 * This interrupt handler is for a specific receive dma channel. 1106 */ 1107 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1108 1109 /* 1110 * Get the control and status for this channel. 1111 */ 1112 channel = ldvp->vdma_index; 1113 ring = hxgep->rx_rcr_rings->rcr_rings[channel]; 1114 rhp = &hxgep->rx_ring_handles[channel]; 1115 ldgp = ldvp->ldgp; 1116 1117 ASSERT(ring != NULL); 1118 #if defined(DEBUG) 1119 if (rhp->started) { 1120 ASSERT(ring->ldgp == ldgp); 1121 ASSERT(ring->ldvp == ldvp); 1122 } 1123 #endif 1124 1125 MUTEX_ENTER(&ring->lock); 1126 1127 if (!ring->poll_flag) { 1128 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value); 1129 cs.bits.ptrread = 0; 1130 cs.bits.pktread = 0; 1131 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1132 1133 /* 1134 * Process packets, if we are not in polling mode, the ring is 1135 * started and the interface is started. The MAC layer under 1136 * load will be operating in polling mode for RX traffic. 1137 */ 1138 if ((rhp->started) && 1139 (hxgep->hxge_mac_state == HXGE_MAC_STARTED)) { 1140 mp = hxge_rx_pkts(hxgep, ldvp->vdma_index, 1141 ldvp, ring, cs, -1); 1142 } 1143 1144 /* Process error events. */ 1145 if (cs.value & RDC_STAT_ERROR) { 1146 MUTEX_EXIT(&ring->lock); 1147 (void) hxge_rx_err_evnts(hxgep, channel, ldvp, cs); 1148 MUTEX_ENTER(&ring->lock); 1149 } 1150 1151 /* 1152 * Enable the mailbox update interrupt if we want to use 1153 * mailbox. We probably don't need to use mailbox as it only 1154 * saves us one pio read. Also write 1 to rcrthres and 1155 * rcrto to clear these two edge triggered bits. 1156 */ 1157 rbrp = hxgep->rx_rbr_rings->rbr_rings[channel]; 1158 MUTEX_ENTER(&rbrp->post_lock); 1159 if (!rbrp->rbr_is_empty) { 1160 cs.value = 0; 1161 cs.bits.mex = 1; 1162 cs.bits.ptrread = 0; 1163 cs.bits.pktread = 0; 1164 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1165 } 1166 MUTEX_EXIT(&rbrp->post_lock); 1167 1168 if (ldgp->nldvs == 1) { 1169 /* 1170 * Re-arm the group. 1171 */ 1172 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE, 1173 ldgp->ldg_timer); 1174 } 1175 } else if ((ldgp->nldvs == 1) && (ring->poll_flag)) { 1176 /* 1177 * Disarm the group, if we are not a shared interrupt. 1178 */ 1179 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_FALSE, 0); 1180 } else if (ring->poll_flag) { 1181 /* 1182 * Mask-off this device from the group. 1183 */ 1184 (void) hpi_intr_mask_set(handle, ldvp->ldv, 1); 1185 } 1186 1187 MUTEX_EXIT(&ring->lock); 1188 1189 /* 1190 * Send the packets up the stack. 1191 */ 1192 if (mp != NULL) { 1193 mac_rx_ring(hxgep->mach, ring->rcr_mac_handle, mp, 1194 ring->rcr_gen_num); 1195 } 1196 1197 HXGE_DEBUG_MSG((NULL, RX_INT_CTL, "<== hxge_rx_intr")); 1198 return (DDI_INTR_CLAIMED); 1199 } 1200 1201 /* 1202 * Enable polling for a ring. Interrupt for the ring is disabled when 1203 * the hxge interrupt comes (see hxge_rx_intr). 1204 */ 1205 int 1206 hxge_enable_poll(void *arg) 1207 { 1208 p_hxge_ring_handle_t ring_handle = (p_hxge_ring_handle_t)arg; 1209 p_rx_rcr_ring_t ringp; 1210 p_hxge_t hxgep; 1211 p_hxge_ldg_t ldgp; 1212 1213 if (ring_handle == NULL) { 1214 ASSERT(ring_handle != NULL); 1215 return (1); 1216 } 1217 1218 1219 hxgep = ring_handle->hxgep; 1220 ringp = hxgep->rx_rcr_rings->rcr_rings[ring_handle->index]; 1221 1222 MUTEX_ENTER(&ringp->lock); 1223 1224 /* 1225 * Are we already polling ? 1226 */ 1227 if (ringp->poll_flag) { 1228 MUTEX_EXIT(&ringp->lock); 1229 return (1); 1230 } 1231 1232 ldgp = ringp->ldgp; 1233 if (ldgp == NULL) { 1234 MUTEX_EXIT(&ringp->lock); 1235 return (1); 1236 } 1237 1238 /* 1239 * Enable polling 1240 */ 1241 ringp->poll_flag = B_TRUE; 1242 1243 MUTEX_EXIT(&ringp->lock); 1244 return (0); 1245 } 1246 1247 /* 1248 * Disable polling for a ring and enable its interrupt. 1249 */ 1250 int 1251 hxge_disable_poll(void *arg) 1252 { 1253 p_hxge_ring_handle_t ring_handle = (p_hxge_ring_handle_t)arg; 1254 p_rx_rcr_ring_t ringp; 1255 p_hxge_t hxgep; 1256 1257 if (ring_handle == NULL) { 1258 ASSERT(ring_handle != NULL); 1259 return (0); 1260 } 1261 1262 hxgep = ring_handle->hxgep; 1263 ringp = hxgep->rx_rcr_rings->rcr_rings[ring_handle->index]; 1264 1265 MUTEX_ENTER(&ringp->lock); 1266 1267 /* 1268 * Disable polling: enable interrupt 1269 */ 1270 if (ringp->poll_flag) { 1271 hpi_handle_t handle; 1272 rdc_stat_t cs; 1273 p_hxge_ldg_t ldgp; 1274 1275 /* 1276 * Get the control and status for this channel. 1277 */ 1278 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1279 1280 /* 1281 * Rearm this logical group if this is a single device 1282 * group. 1283 */ 1284 ldgp = ringp->ldgp; 1285 if (ldgp == NULL) { 1286 MUTEX_EXIT(&ringp->lock); 1287 return (1); 1288 } 1289 1290 ringp->poll_flag = B_FALSE; 1291 1292 /* 1293 * Enable mailbox update, to start interrupts again. 1294 */ 1295 cs.value = 0ULL; 1296 cs.bits.mex = 1; 1297 cs.bits.pktread = 0; 1298 cs.bits.ptrread = 0; 1299 RXDMA_REG_WRITE64(handle, RDC_STAT, ringp->rdc, cs.value); 1300 1301 if (ldgp->nldvs == 1) { 1302 /* 1303 * Re-arm the group, since it is the only member 1304 * of the group. 1305 */ 1306 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE, 1307 ldgp->ldg_timer); 1308 } else { 1309 /* 1310 * Mask-on interrupts for the device and re-arm 1311 * the group. 1312 */ 1313 (void) hpi_intr_mask_set(handle, ringp->ldvp->ldv, 0); 1314 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE, 1315 ldgp->ldg_timer); 1316 } 1317 } 1318 MUTEX_EXIT(&ringp->lock); 1319 return (0); 1320 } 1321 1322 /* 1323 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 1324 */ 1325 mblk_t * 1326 hxge_rx_poll(void *arg, int bytes_to_pickup) 1327 { 1328 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)arg; 1329 p_rx_rcr_ring_t ring; 1330 p_hxge_t hxgep; 1331 hpi_handle_t handle; 1332 rdc_stat_t cs; 1333 mblk_t *mblk; 1334 p_hxge_ldv_t ldvp; 1335 1336 hxgep = rhp->hxgep; 1337 1338 /* 1339 * Get the control and status for this channel. 1340 */ 1341 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1342 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index]; 1343 1344 MUTEX_ENTER(&ring->lock); 1345 ASSERT(ring->poll_flag == B_TRUE); 1346 ASSERT(rhp->started); 1347 1348 if (!ring->poll_flag) { 1349 MUTEX_EXIT(&ring->lock); 1350 return ((mblk_t *)NULL); 1351 } 1352 1353 /* 1354 * Get the control and status bits for the ring. 1355 */ 1356 RXDMA_REG_READ64(handle, RDC_STAT, rhp->index, &cs.value); 1357 cs.bits.ptrread = 0; 1358 cs.bits.pktread = 0; 1359 RXDMA_REG_WRITE64(handle, RDC_STAT, rhp->index, cs.value); 1360 1361 /* 1362 * Process packets. 1363 */ 1364 mblk = hxge_rx_pkts(hxgep, ring->ldvp->vdma_index, 1365 ring->ldvp, ring, cs, bytes_to_pickup); 1366 ldvp = ring->ldvp; 1367 1368 /* 1369 * Process Error Events. 1370 */ 1371 if (ldvp && (cs.value & RDC_STAT_ERROR)) { 1372 /* 1373 * Recovery routines will grab the RCR ring lock. 1374 */ 1375 MUTEX_EXIT(&ring->lock); 1376 (void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 1377 MUTEX_ENTER(&ring->lock); 1378 } 1379 1380 MUTEX_EXIT(&ring->lock); 1381 return (mblk); 1382 } 1383 1384 /*ARGSUSED*/ 1385 mblk_t * 1386 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1387 p_rx_rcr_ring_t rcrp, rdc_stat_t cs, int bytes_to_read) 1388 { 1389 hpi_handle_t handle; 1390 uint8_t channel; 1391 uint32_t comp_rd_index; 1392 p_rcr_entry_t rcr_desc_rd_head_p; 1393 p_rcr_entry_t rcr_desc_rd_head_pp; 1394 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1395 uint16_t qlen, nrcr_read, npkt_read; 1396 uint32_t qlen_hw, npkts, num_rcrs; 1397 uint32_t invalid_rcr_entry; 1398 boolean_t multi; 1399 rdc_stat_t pktcs; 1400 rdc_rcr_cfg_b_t rcr_cfg_b; 1401 uint64_t rcr_head_index, rcr_tail_index; 1402 uint64_t rcr_tail; 1403 rdc_rcr_tail_t rcr_tail_reg; 1404 p_hxge_rx_ring_stats_t rdc_stats; 1405 int totallen = 0; 1406 1407 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d " 1408 "channel %d", vindex, ldvp->channel)); 1409 1410 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1411 channel = rcrp->rdc; 1412 if (channel != ldvp->channel) { 1413 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d " 1414 "channel %d, and rcr channel %d not matched.", 1415 vindex, ldvp->channel, channel)); 1416 return (NULL); 1417 } 1418 1419 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1420 "==> hxge_rx_pkts: START: rcr channel %d " 1421 "head_p $%p head_pp $%p index %d ", 1422 channel, rcrp->rcr_desc_rd_head_p, 1423 rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index)); 1424 1425 (void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1426 RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value); 1427 rcr_tail = rcr_tail_reg.bits.tail; 1428 1429 if (!qlen) { 1430 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1431 "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)", 1432 channel, qlen)); 1433 return (NULL); 1434 } 1435 1436 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d " 1437 "qlen %d", channel, qlen)); 1438 1439 comp_rd_index = rcrp->comp_rd_index; 1440 1441 rcr_desc_rd_head_p = rcrp->rcr_desc_rd_head_p; 1442 rcr_desc_rd_head_pp = rcrp->rcr_desc_rd_head_pp; 1443 nrcr_read = npkt_read = 0; 1444 1445 if (hxgep->rdc_first_intr[channel]) 1446 qlen_hw = qlen; 1447 else 1448 qlen_hw = qlen - 1; 1449 1450 head_mp = NULL; 1451 tail_mp = &head_mp; 1452 nmp = mp_cont = NULL; 1453 multi = B_FALSE; 1454 1455 rcr_head_index = rcrp->rcr_desc_rd_head_p - rcrp->rcr_desc_first_p; 1456 rcr_tail_index = rcr_tail - rcrp->rcr_tail_begin; 1457 1458 if (rcr_tail_index >= rcr_head_index) { 1459 num_rcrs = rcr_tail_index - rcr_head_index; 1460 } else { 1461 /* rcr_tail has wrapped around */ 1462 num_rcrs = (rcrp->comp_size - rcr_head_index) + rcr_tail_index; 1463 } 1464 1465 npkts = hxge_scan_for_last_eop(rcrp, rcr_desc_rd_head_p, num_rcrs); 1466 if (!npkts) 1467 return (NULL); 1468 1469 if (qlen_hw > npkts) { 1470 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1471 "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n", 1472 channel, qlen_hw, qlen_sw)); 1473 qlen_hw = npkts; 1474 } 1475 1476 while (qlen_hw) { 1477 #ifdef HXGE_DEBUG 1478 hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p); 1479 #endif 1480 /* 1481 * Process one completion ring entry. 1482 */ 1483 invalid_rcr_entry = 0; 1484 hxge_receive_packet(hxgep, 1485 rcrp, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont, 1486 &invalid_rcr_entry); 1487 if (invalid_rcr_entry != 0) { 1488 rdc_stats = rcrp->rdc_stats; 1489 rdc_stats->rcr_invalids++; 1490 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1491 "Channel %d could only read 0x%x packets, " 1492 "but 0x%x pending\n", channel, npkt_read, qlen_hw)); 1493 break; 1494 } 1495 1496 /* 1497 * message chaining modes (nemo msg chaining) 1498 */ 1499 if (nmp) { 1500 nmp->b_next = NULL; 1501 if (!multi && !mp_cont) { /* frame fits a partition */ 1502 *tail_mp = nmp; 1503 tail_mp = &nmp->b_next; 1504 nmp = NULL; 1505 } else if (multi && !mp_cont) { /* first segment */ 1506 *tail_mp = nmp; 1507 tail_mp = &nmp->b_cont; 1508 } else if (multi && mp_cont) { /* mid of multi segs */ 1509 *tail_mp = mp_cont; 1510 tail_mp = &mp_cont->b_cont; 1511 } else if (!multi && mp_cont) { /* last segment */ 1512 *tail_mp = mp_cont; 1513 tail_mp = &nmp->b_next; 1514 totallen += MBLKL(mp_cont); 1515 nmp = NULL; 1516 } 1517 } 1518 1519 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1520 "==> hxge_rx_pkts: loop: rcr channel %d " 1521 "before updating: multi %d " 1522 "nrcr_read %d " 1523 "npk read %d " 1524 "head_pp $%p index %d ", 1525 channel, multi, 1526 nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index)); 1527 1528 if (!multi) { 1529 qlen_hw--; 1530 npkt_read++; 1531 } 1532 1533 /* 1534 * Update the next read entry. 1535 */ 1536 comp_rd_index = NEXT_ENTRY(comp_rd_index, 1537 rcrp->comp_wrap_mask); 1538 1539 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 1540 rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p); 1541 1542 nrcr_read++; 1543 1544 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1545 "<== hxge_rx_pkts: (SAM, process one packet) " 1546 "nrcr_read %d", nrcr_read)); 1547 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1548 "==> hxge_rx_pkts: loop: rcr channel %d " 1549 "multi %d nrcr_read %d npk read %d head_pp $%p index %d ", 1550 channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1551 comp_rd_index)); 1552 1553 if ((bytes_to_read != -1) && 1554 (totallen >= bytes_to_read)) { 1555 break; 1556 } 1557 } 1558 1559 rcrp->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 1560 rcrp->comp_rd_index = comp_rd_index; 1561 rcrp->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 1562 1563 if ((hxgep->intr_timeout != rcrp->intr_timeout) || 1564 (hxgep->intr_threshold != rcrp->intr_threshold)) { 1565 rcrp->intr_timeout = hxgep->intr_timeout; 1566 rcrp->intr_threshold = hxgep->intr_threshold; 1567 rcr_cfg_b.value = 0x0ULL; 1568 if (rcrp->intr_timeout) 1569 rcr_cfg_b.bits.entout = 1; 1570 rcr_cfg_b.bits.timeout = rcrp->intr_timeout; 1571 rcr_cfg_b.bits.pthres = rcrp->intr_threshold; 1572 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, 1573 channel, rcr_cfg_b.value); 1574 } 1575 1576 pktcs.value = 0; 1577 if (hxgep->rdc_first_intr[channel] && (npkt_read > 0)) { 1578 hxgep->rdc_first_intr[channel] = B_FALSE; 1579 pktcs.bits.pktread = npkt_read - 1; 1580 } else 1581 pktcs.bits.pktread = npkt_read; 1582 pktcs.bits.ptrread = nrcr_read; 1583 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, pktcs.value); 1584 1585 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1586 "==> hxge_rx_pkts: EXIT: rcr channel %d " 1587 "head_pp $%p index %016llx ", 1588 channel, rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index)); 1589 1590 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts")); 1591 return (head_mp); 1592 } 1593 1594 #define RCR_ENTRY_PATTERN 0x5a5a6b6b7c7c8d8dULL 1595 #define NO_PORT_BIT 0x20 1596 #define L4_CS_EQ_BIT 0x40 1597 1598 static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcrp, 1599 p_rcr_entry_t rcr_desc_rd_head_p, uint32_t num_rcrs) 1600 { 1601 uint64_t rcr_entry; 1602 uint32_t rcrs = 0; 1603 uint32_t pkts = 0; 1604 1605 while (rcrs < num_rcrs) { 1606 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 1607 1608 if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) 1609 break; 1610 1611 if (!(rcr_entry & RCR_MULTI_MASK)) 1612 pkts++; 1613 1614 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 1615 rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p); 1616 1617 rcrs++; 1618 } 1619 1620 return (pkts); 1621 } 1622 1623 /*ARGSUSED*/ 1624 void 1625 hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p, 1626 p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, mblk_t **mp, 1627 mblk_t **mp_cont, uint32_t *invalid_rcr_entry) 1628 { 1629 p_mblk_t nmp = NULL; 1630 uint64_t multi; 1631 uint8_t channel; 1632 boolean_t first_entry = B_TRUE; 1633 boolean_t is_tcp_udp = B_FALSE; 1634 boolean_t buffer_free = B_FALSE; 1635 boolean_t error_send_up = B_FALSE; 1636 uint8_t error_type; 1637 uint16_t l2_len; 1638 uint16_t skip_len; 1639 uint8_t pktbufsz_type; 1640 uint64_t rcr_entry; 1641 uint64_t *pkt_buf_addr_pp; 1642 uint64_t *pkt_buf_addr_p; 1643 uint32_t buf_offset; 1644 uint32_t bsize; 1645 uint32_t msg_index; 1646 p_rx_rbr_ring_t rx_rbr_p; 1647 p_rx_msg_t *rx_msg_ring_p; 1648 p_rx_msg_t rx_msg_p; 1649 uint16_t sw_offset_bytes = 0, hdr_size = 0; 1650 hxge_status_t status = HXGE_OK; 1651 boolean_t is_valid = B_FALSE; 1652 p_hxge_rx_ring_stats_t rdc_stats; 1653 uint32_t bytes_read; 1654 uint8_t header0 = 0; 1655 uint8_t header1 = 0; 1656 uint64_t pkt_type; 1657 uint8_t no_port_bit = 0; 1658 uint8_t l4_cs_eq_bit = 0; 1659 1660 channel = rcr_p->rdc; 1661 1662 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet")); 1663 1664 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 1665 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 1666 1667 /* Verify the content of the rcr_entry for a hardware bug workaround */ 1668 if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) { 1669 *invalid_rcr_entry = 1; 1670 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet " 1671 "Channel %d invalid RCR entry 0x%llx found, returning\n", 1672 channel, (long long) rcr_entry)); 1673 return; 1674 } 1675 *((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN; 1676 1677 multi = (rcr_entry & RCR_MULTI_MASK); 1678 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 1679 1680 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 1681 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 1682 1683 /* 1684 * Hardware does not strip the CRC due bug ID 11451 where 1685 * the hardware mis handles minimum size packets. 1686 */ 1687 l2_len -= ETHERFCSL; 1688 1689 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 1690 RCR_PKTBUFSZ_SHIFT); 1691 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 1692 RCR_PKT_BUF_ADDR_SHIFT); 1693 1694 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1695 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1696 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1697 "error_type 0x%x pktbufsz_type %d ", 1698 rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len, 1699 multi, error_type, pktbufsz_type)); 1700 1701 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1702 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1703 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1704 "error_type 0x%x ", rcr_desc_rd_head_p, 1705 rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type)); 1706 1707 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1708 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1709 "full pkt_buf_addr_pp $%p l2_len %d", 1710 rcr_entry, pkt_buf_addr_pp, l2_len)); 1711 1712 /* get the stats ptr */ 1713 rdc_stats = rcr_p->rdc_stats; 1714 1715 if (!l2_len) { 1716 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1717 "<== hxge_receive_packet: failed: l2 length is 0.")); 1718 return; 1719 } 1720 1721 /* shift 6 bits to get the full io address */ 1722 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 1723 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1724 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1725 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1726 "full pkt_buf_addr_pp $%p l2_len %d", 1727 rcr_entry, pkt_buf_addr_pp, l2_len)); 1728 1729 rx_rbr_p = rcr_p->rx_rbr_p; 1730 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 1731 1732 if (first_entry) { 1733 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 1734 RXDMA_HDR_SIZE_DEFAULT); 1735 1736 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1737 "==> hxge_receive_packet: first entry 0x%016llx " 1738 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 1739 rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size)); 1740 } 1741 1742 MUTEX_ENTER(&rx_rbr_p->lock); 1743 1744 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1745 "==> (rbr 1) hxge_receive_packet: entry 0x%0llx " 1746 "full pkt_buf_addr_pp $%p l2_len %d", 1747 rcr_entry, pkt_buf_addr_pp, l2_len)); 1748 1749 /* 1750 * Packet buffer address in the completion entry points to the starting 1751 * buffer address (offset 0). Use the starting buffer address to locate 1752 * the corresponding kernel address. 1753 */ 1754 status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p, 1755 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 1756 &buf_offset, &msg_index); 1757 1758 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1759 "==> (rbr 2) hxge_receive_packet: entry 0x%0llx " 1760 "full pkt_buf_addr_pp $%p l2_len %d", 1761 rcr_entry, pkt_buf_addr_pp, l2_len)); 1762 1763 if (status != HXGE_OK) { 1764 MUTEX_EXIT(&rx_rbr_p->lock); 1765 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1766 "<== hxge_receive_packet: found vaddr failed %d", status)); 1767 return; 1768 } 1769 1770 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1771 "==> (rbr 3) hxge_receive_packet: entry 0x%0llx " 1772 "full pkt_buf_addr_pp $%p l2_len %d", 1773 rcr_entry, pkt_buf_addr_pp, l2_len)); 1774 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1775 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1776 "full pkt_buf_addr_pp $%p l2_len %d", 1777 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1778 1779 if (msg_index >= rx_rbr_p->tnblocks) { 1780 MUTEX_EXIT(&rx_rbr_p->lock); 1781 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1782 "==> hxge_receive_packet: FATAL msg_index (%d) " 1783 "should be smaller than tnblocks (%d)\n", 1784 msg_index, rx_rbr_p->tnblocks)); 1785 return; 1786 } 1787 1788 rx_msg_p = rx_msg_ring_p[msg_index]; 1789 1790 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1791 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1792 "full pkt_buf_addr_pp $%p l2_len %d", 1793 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1794 1795 switch (pktbufsz_type) { 1796 case RCR_PKTBUFSZ_0: 1797 bsize = rx_rbr_p->pkt_buf_size0_bytes; 1798 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1799 "==> hxge_receive_packet: 0 buf %d", bsize)); 1800 break; 1801 case RCR_PKTBUFSZ_1: 1802 bsize = rx_rbr_p->pkt_buf_size1_bytes; 1803 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1804 "==> hxge_receive_packet: 1 buf %d", bsize)); 1805 break; 1806 case RCR_PKTBUFSZ_2: 1807 bsize = rx_rbr_p->pkt_buf_size2_bytes; 1808 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1809 "==> hxge_receive_packet: 2 buf %d", bsize)); 1810 break; 1811 case RCR_SINGLE_BLOCK: 1812 bsize = rx_msg_p->block_size; 1813 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1814 "==> hxge_receive_packet: single %d", bsize)); 1815 1816 break; 1817 default: 1818 MUTEX_EXIT(&rx_rbr_p->lock); 1819 return; 1820 } 1821 1822 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 1823 (buf_offset + sw_offset_bytes), (hdr_size + l2_len), 1824 DDI_DMA_SYNC_FORCPU); 1825 1826 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1827 "==> hxge_receive_packet: after first dump:usage count")); 1828 1829 if (rx_msg_p->cur_usage_cnt == 0) { 1830 atomic_inc_32(&rx_rbr_p->rbr_used); 1831 if (rx_rbr_p->rbr_use_bcopy) { 1832 atomic_inc_32(&rx_rbr_p->rbr_consumed); 1833 if (rx_rbr_p->rbr_consumed < 1834 rx_rbr_p->rbr_threshold_hi) { 1835 if (rx_rbr_p->rbr_threshold_lo == 0 || 1836 ((rx_rbr_p->rbr_consumed >= 1837 rx_rbr_p->rbr_threshold_lo) && 1838 (rx_rbr_p->rbr_bufsize_type >= 1839 pktbufsz_type))) { 1840 rx_msg_p->rx_use_bcopy = B_TRUE; 1841 } 1842 } else { 1843 rx_msg_p->rx_use_bcopy = B_TRUE; 1844 } 1845 } 1846 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1847 "==> hxge_receive_packet: buf %d (new block) ", bsize)); 1848 1849 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 1850 rx_msg_p->pkt_buf_size = bsize; 1851 rx_msg_p->cur_usage_cnt = 1; 1852 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 1853 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1854 "==> hxge_receive_packet: buf %d (single block) ", 1855 bsize)); 1856 /* 1857 * Buffer can be reused once the free function is 1858 * called. 1859 */ 1860 rx_msg_p->max_usage_cnt = 1; 1861 buffer_free = B_TRUE; 1862 } else { 1863 rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize; 1864 if (rx_msg_p->max_usage_cnt == 1) { 1865 buffer_free = B_TRUE; 1866 } 1867 } 1868 } else { 1869 rx_msg_p->cur_usage_cnt++; 1870 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 1871 buffer_free = B_TRUE; 1872 } 1873 } 1874 1875 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1876 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 1877 msg_index, l2_len, 1878 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 1879 1880 if (error_type) { 1881 rdc_stats->ierrors++; 1882 /* Update error stats */ 1883 rdc_stats->errlog.compl_err_type = error_type; 1884 HXGE_FM_REPORT_ERROR(hxgep, 0, HXGE_FM_EREPORT_RDMC_RCR_ERR); 1885 1886 if (error_type & RCR_CTRL_FIFO_DED) { 1887 rdc_stats->ctrl_fifo_ecc_err++; 1888 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1889 " hxge_receive_packet: " 1890 " channel %d RCR ctrl_fifo_ded error", channel)); 1891 } else if (error_type & RCR_DATA_FIFO_DED) { 1892 rdc_stats->data_fifo_ecc_err++; 1893 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1894 " hxge_receive_packet: channel %d" 1895 " RCR data_fifo_ded error", channel)); 1896 } 1897 1898 /* 1899 * Update and repost buffer block if max usage count is 1900 * reached. 1901 */ 1902 if (error_send_up == B_FALSE) { 1903 atomic_inc_32(&rx_msg_p->ref_cnt); 1904 if (buffer_free == B_TRUE) { 1905 rx_msg_p->free = B_TRUE; 1906 } 1907 1908 MUTEX_EXIT(&rx_rbr_p->lock); 1909 hxge_freeb(rx_msg_p); 1910 return; 1911 } 1912 } 1913 1914 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1915 "==> hxge_receive_packet: DMA sync second ")); 1916 1917 bytes_read = rcr_p->rcvd_pkt_bytes; 1918 skip_len = sw_offset_bytes + hdr_size; 1919 1920 if (first_entry) { 1921 header0 = rx_msg_p->buffer[buf_offset]; 1922 no_port_bit = header0 & NO_PORT_BIT; 1923 header1 = rx_msg_p->buffer[buf_offset + 1]; 1924 l4_cs_eq_bit = header1 & L4_CS_EQ_BIT; 1925 } 1926 1927 if (!rx_msg_p->rx_use_bcopy) { 1928 /* 1929 * For loaned up buffers, the driver reference count 1930 * will be incremented first and then the free state. 1931 */ 1932 if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 1933 if (first_entry) { 1934 nmp->b_rptr = &nmp->b_rptr[skip_len]; 1935 if (l2_len < bsize - skip_len) { 1936 nmp->b_wptr = &nmp->b_rptr[l2_len]; 1937 } else { 1938 nmp->b_wptr = &nmp->b_rptr[bsize 1939 - skip_len]; 1940 } 1941 } else { 1942 if (l2_len - bytes_read < bsize) { 1943 nmp->b_wptr = 1944 &nmp->b_rptr[l2_len - bytes_read]; 1945 } else { 1946 nmp->b_wptr = &nmp->b_rptr[bsize]; 1947 } 1948 } 1949 } 1950 } else { 1951 if (first_entry) { 1952 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 1953 l2_len < bsize - skip_len ? 1954 l2_len : bsize - skip_len); 1955 } else { 1956 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset, 1957 l2_len - bytes_read < bsize ? 1958 l2_len - bytes_read : bsize); 1959 } 1960 } 1961 1962 if (nmp != NULL) { 1963 if (first_entry) 1964 bytes_read = nmp->b_wptr - nmp->b_rptr; 1965 else 1966 bytes_read += nmp->b_wptr - nmp->b_rptr; 1967 1968 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1969 "==> hxge_receive_packet after dupb: " 1970 "rbr consumed %d " 1971 "pktbufsz_type %d " 1972 "nmp $%p rptr $%p wptr $%p " 1973 "buf_offset %d bzise %d l2_len %d skip_len %d", 1974 rx_rbr_p->rbr_consumed, 1975 pktbufsz_type, 1976 nmp, nmp->b_rptr, nmp->b_wptr, 1977 buf_offset, bsize, l2_len, skip_len)); 1978 } else { 1979 cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)"); 1980 1981 atomic_inc_32(&rx_msg_p->ref_cnt); 1982 if (buffer_free == B_TRUE) { 1983 rx_msg_p->free = B_TRUE; 1984 } 1985 1986 MUTEX_EXIT(&rx_rbr_p->lock); 1987 hxge_freeb(rx_msg_p); 1988 return; 1989 } 1990 1991 if (buffer_free == B_TRUE) { 1992 rx_msg_p->free = B_TRUE; 1993 } 1994 1995 /* 1996 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a 1997 * packet is not fragmented and no error bit is set, then L4 checksum 1998 * is OK. 1999 */ 2000 is_valid = (nmp != NULL); 2001 if (first_entry) { 2002 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 2003 if (l2_len > (STD_FRAME_SIZE - ETHERFCSL)) 2004 rdc_stats->jumbo_pkts++; 2005 rdc_stats->ibytes += skip_len + l2_len < bsize ? 2006 l2_len : bsize; 2007 } else { 2008 /* 2009 * Add the current portion of the packet to the kstats. 2010 * The current portion of the packet is calculated by using 2011 * length of the packet and the previously received portion. 2012 */ 2013 rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ? 2014 l2_len - rcr_p->rcvd_pkt_bytes : bsize; 2015 } 2016 2017 rcr_p->rcvd_pkt_bytes = bytes_read; 2018 2019 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2020 atomic_inc_32(&rx_msg_p->ref_cnt); 2021 MUTEX_EXIT(&rx_rbr_p->lock); 2022 hxge_freeb(rx_msg_p); 2023 } else 2024 MUTEX_EXIT(&rx_rbr_p->lock); 2025 2026 if (is_valid) { 2027 nmp->b_cont = NULL; 2028 if (first_entry) { 2029 *mp = nmp; 2030 *mp_cont = NULL; 2031 } else { 2032 *mp_cont = nmp; 2033 } 2034 } 2035 2036 /* 2037 * Update stats and hardware checksuming. 2038 */ 2039 if (is_valid && !multi) { 2040 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2041 pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE); 2042 2043 if (!no_port_bit && l4_cs_eq_bit && is_tcp_udp && !error_type) { 2044 mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK); 2045 2046 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2047 "==> hxge_receive_packet: Full tcp/udp cksum " 2048 "is_valid 0x%x multi %d error %d", 2049 is_valid, multi, error_type)); 2050 } 2051 } 2052 2053 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 2054 "==> hxge_receive_packet: *mp 0x%016llx", *mp)); 2055 2056 *multi_p = (multi == RCR_MULTI_MASK); 2057 2058 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: " 2059 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2060 *multi_p, nmp, *mp, *mp_cont)); 2061 } 2062 2063 static void 2064 hxge_rx_rbr_empty_recover(p_hxge_t hxgep, uint8_t channel) 2065 { 2066 hpi_handle_t handle; 2067 p_rx_rcr_ring_t rcrp; 2068 p_rx_rbr_ring_t rbrp; 2069 2070 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 2071 rbrp = rcrp->rx_rbr_p; 2072 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2073 2074 /* 2075 * Wait for the channel to be quiet 2076 */ 2077 (void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel); 2078 2079 /* 2080 * Post page will accumulate some buffers before re-enabling 2081 * the DMA channel. 2082 */ 2083 2084 MUTEX_ENTER(&rbrp->post_lock); 2085 if ((rbrp->rbb_max - rbrp->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) { 2086 hxge_rbr_empty_restore(hxgep, rbrp); 2087 } else { 2088 rbrp->rbr_is_empty = B_TRUE; 2089 } 2090 MUTEX_EXIT(&rbrp->post_lock); 2091 } 2092 2093 2094 /*ARGSUSED*/ 2095 static hxge_status_t 2096 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 2097 rdc_stat_t cs) 2098 { 2099 p_hxge_rx_ring_stats_t rdc_stats; 2100 hpi_handle_t handle; 2101 boolean_t rxchan_fatal = B_FALSE; 2102 uint8_t channel; 2103 hxge_status_t status = HXGE_OK; 2104 2105 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts")); 2106 2107 handle = HXGE_DEV_HPI_HANDLE(hxgep); 2108 channel = ldvp->channel; 2109 2110 rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index]; 2111 2112 if (cs.bits.rbr_cpl_to) { 2113 rdc_stats->rbr_tmout++; 2114 HXGE_FM_REPORT_ERROR(hxgep, channel, 2115 HXGE_FM_EREPORT_RDMC_RBR_CPL_TO); 2116 rxchan_fatal = B_TRUE; 2117 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2118 "==> hxge_rx_err_evnts(channel %d): " 2119 "fatal error: rx_rbr_timeout", channel)); 2120 } 2121 2122 if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) { 2123 (void) hpi_rxdma_ring_perr_stat_get(handle, 2124 &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par); 2125 } 2126 2127 if (cs.bits.rcr_shadow_par_err) { 2128 rdc_stats->rcr_sha_par++; 2129 HXGE_FM_REPORT_ERROR(hxgep, channel, 2130 HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2131 rxchan_fatal = B_TRUE; 2132 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2133 "==> hxge_rx_err_evnts(channel %d): " 2134 "fatal error: rcr_shadow_par_err", channel)); 2135 } 2136 2137 if (cs.bits.rbr_prefetch_par_err) { 2138 rdc_stats->rbr_pre_par++; 2139 HXGE_FM_REPORT_ERROR(hxgep, channel, 2140 HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2141 rxchan_fatal = B_TRUE; 2142 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2143 "==> hxge_rx_err_evnts(channel %d): " 2144 "fatal error: rbr_prefetch_par_err", channel)); 2145 } 2146 2147 if (cs.bits.rbr_pre_empty) { 2148 rdc_stats->rbr_pre_empty++; 2149 HXGE_FM_REPORT_ERROR(hxgep, channel, 2150 HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY); 2151 rxchan_fatal = B_TRUE; 2152 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2153 "==> hxge_rx_err_evnts(channel %d): " 2154 "fatal error: rbr_pre_empty", channel)); 2155 } 2156 2157 if (cs.bits.peu_resp_err) { 2158 rdc_stats->peu_resp_err++; 2159 HXGE_FM_REPORT_ERROR(hxgep, channel, 2160 HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR); 2161 rxchan_fatal = B_TRUE; 2162 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2163 "==> hxge_rx_err_evnts(channel %d): " 2164 "fatal error: peu_resp_err", channel)); 2165 } 2166 2167 if (cs.bits.rcr_thres) { 2168 rdc_stats->rcr_thres++; 2169 } 2170 2171 if (cs.bits.rcr_to) { 2172 rdc_stats->rcr_to++; 2173 } 2174 2175 if (cs.bits.rcr_shadow_full) { 2176 rdc_stats->rcr_shadow_full++; 2177 HXGE_FM_REPORT_ERROR(hxgep, channel, 2178 HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL); 2179 rxchan_fatal = B_TRUE; 2180 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2181 "==> hxge_rx_err_evnts(channel %d): " 2182 "fatal error: rcr_shadow_full", channel)); 2183 } 2184 2185 if (cs.bits.rcr_full) { 2186 rdc_stats->rcrfull++; 2187 HXGE_FM_REPORT_ERROR(hxgep, channel, 2188 HXGE_FM_EREPORT_RDMC_RCRFULL); 2189 rxchan_fatal = B_TRUE; 2190 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2191 "==> hxge_rx_err_evnts(channel %d): " 2192 "fatal error: rcrfull error", channel)); 2193 } 2194 2195 if (cs.bits.rbr_empty) { 2196 rdc_stats->rbr_empty++; 2197 hxge_rx_rbr_empty_recover(hxgep, channel); 2198 } 2199 2200 if (cs.bits.rbr_full) { 2201 rdc_stats->rbrfull++; 2202 HXGE_FM_REPORT_ERROR(hxgep, channel, 2203 HXGE_FM_EREPORT_RDMC_RBRFULL); 2204 rxchan_fatal = B_TRUE; 2205 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2206 "==> hxge_rx_err_evnts(channel %d): " 2207 "fatal error: rbr_full error", channel)); 2208 } 2209 2210 if (rxchan_fatal) { 2211 p_rx_rcr_ring_t rcrp; 2212 p_rx_rbr_ring_t rbrp; 2213 2214 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 2215 rbrp = rcrp->rx_rbr_p; 2216 2217 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2218 " hxge_rx_err_evnts: fatal error on Channel #%d\n", 2219 channel)); 2220 2221 MUTEX_ENTER(&rbrp->post_lock); 2222 /* This function needs to be inside the post_lock */ 2223 status = hxge_rxdma_fatal_err_recover(hxgep, channel); 2224 MUTEX_EXIT(&rbrp->post_lock); 2225 if (status == HXGE_OK) { 2226 FM_SERVICE_RESTORED(hxgep); 2227 } 2228 } 2229 2230 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts")); 2231 return (status); 2232 } 2233 2234 static hxge_status_t 2235 hxge_map_rxdma(p_hxge_t hxgep) 2236 { 2237 int i, ndmas; 2238 uint16_t channel; 2239 p_rx_rbr_rings_t rx_rbr_rings; 2240 p_rx_rbr_ring_t *rbr_rings; 2241 p_rx_rcr_rings_t rx_rcr_rings; 2242 p_rx_rcr_ring_t *rcr_rings; 2243 p_rx_mbox_areas_t rx_mbox_areas_p; 2244 p_rx_mbox_t *rx_mbox_p; 2245 p_hxge_dma_pool_t dma_buf_poolp; 2246 p_hxge_dma_common_t *dma_buf_p; 2247 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2248 p_hxge_dma_common_t *dma_rbr_cntl_p; 2249 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2250 p_hxge_dma_common_t *dma_rcr_cntl_p; 2251 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2252 p_hxge_dma_common_t *dma_mbox_cntl_p; 2253 uint32_t *num_chunks; 2254 hxge_status_t status = HXGE_OK; 2255 2256 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma")); 2257 2258 dma_buf_poolp = hxgep->rx_buf_pool_p; 2259 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2260 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2261 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2262 2263 if (!dma_buf_poolp->buf_allocated || 2264 !dma_rbr_cntl_poolp->buf_allocated || 2265 !dma_rcr_cntl_poolp->buf_allocated || 2266 !dma_mbox_cntl_poolp->buf_allocated) { 2267 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2268 "<== hxge_map_rxdma: buf not allocated")); 2269 return (HXGE_ERROR); 2270 } 2271 2272 ndmas = dma_buf_poolp->ndmas; 2273 if (!ndmas) { 2274 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2275 "<== hxge_map_rxdma: no dma allocated")); 2276 return (HXGE_ERROR); 2277 } 2278 2279 num_chunks = dma_buf_poolp->num_chunks; 2280 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2281 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p; 2282 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p; 2283 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p; 2284 2285 rx_rbr_rings = (p_rx_rbr_rings_t) 2286 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2287 rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC( 2288 sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2289 2290 rx_rcr_rings = (p_rx_rcr_rings_t) 2291 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2292 rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC( 2293 sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2294 2295 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2296 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2297 rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC( 2298 sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2299 2300 /* 2301 * Timeout should be set based on the system clock divider. 2302 * The following timeout value of 1 assumes that the 2303 * granularity (1000) is 3 microseconds running at 300MHz. 2304 */ 2305 2306 hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2307 hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2308 2309 /* 2310 * Map descriptors from the buffer polls for each dam channel. 2311 */ 2312 for (i = 0; i < ndmas; i++) { 2313 if (((p_hxge_dma_common_t)dma_buf_p[i]) == NULL) { 2314 status = HXGE_ERROR; 2315 goto hxge_map_rxdma_fail1; 2316 } 2317 2318 /* 2319 * Set up and prepare buffer blocks, descriptors and mailbox. 2320 */ 2321 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2322 status = hxge_map_rxdma_channel(hxgep, channel, 2323 (p_hxge_dma_common_t *)&dma_buf_p[i], 2324 (p_rx_rbr_ring_t *)&rbr_rings[i], 2325 num_chunks[i], 2326 (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i], 2327 (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i], 2328 (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i], 2329 (p_rx_rcr_ring_t *)&rcr_rings[i], 2330 (p_rx_mbox_t *)&rx_mbox_p[i]); 2331 if (status != HXGE_OK) { 2332 goto hxge_map_rxdma_fail1; 2333 } 2334 rbr_rings[i]->index = (uint16_t)i; 2335 rcr_rings[i]->index = (uint16_t)i; 2336 rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i]; 2337 } 2338 2339 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2340 rx_rbr_rings->rbr_rings = rbr_rings; 2341 hxgep->rx_rbr_rings = rx_rbr_rings; 2342 rx_rcr_rings->rcr_rings = rcr_rings; 2343 hxgep->rx_rcr_rings = rx_rcr_rings; 2344 2345 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2346 hxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2347 2348 goto hxge_map_rxdma_exit; 2349 2350 hxge_map_rxdma_fail1: 2351 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2352 "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)", 2353 status, channel, i)); 2354 i--; 2355 for (; i >= 0; i--) { 2356 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2357 hxge_unmap_rxdma_channel(hxgep, channel, 2358 rbr_rings[i], rcr_rings[i], rx_mbox_p[i]); 2359 } 2360 2361 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2362 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2363 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2364 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2365 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2366 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2367 2368 hxge_map_rxdma_exit: 2369 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2370 "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 2371 2372 return (status); 2373 } 2374 2375 static void 2376 hxge_unmap_rxdma(p_hxge_t hxgep) 2377 { 2378 int i, ndmas; 2379 uint16_t channel; 2380 p_rx_rbr_rings_t rx_rbr_rings; 2381 p_rx_rbr_ring_t *rbr_rings; 2382 p_rx_rcr_rings_t rx_rcr_rings; 2383 p_rx_rcr_ring_t *rcr_rings; 2384 p_rx_mbox_areas_t rx_mbox_areas_p; 2385 p_rx_mbox_t *rx_mbox_p; 2386 p_hxge_dma_pool_t dma_buf_poolp; 2387 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2388 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2389 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2390 p_hxge_dma_common_t *dma_buf_p; 2391 2392 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma")); 2393 2394 dma_buf_poolp = hxgep->rx_buf_pool_p; 2395 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2396 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2397 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2398 2399 if (!dma_buf_poolp->buf_allocated || 2400 !dma_rbr_cntl_poolp->buf_allocated || 2401 !dma_rcr_cntl_poolp->buf_allocated || 2402 !dma_mbox_cntl_poolp->buf_allocated) { 2403 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2404 "<== hxge_unmap_rxdma: NULL buf pointers")); 2405 return; 2406 } 2407 2408 rx_rbr_rings = hxgep->rx_rbr_rings; 2409 rx_rcr_rings = hxgep->rx_rcr_rings; 2410 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2411 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2412 "<== hxge_unmap_rxdma: NULL pointers")); 2413 return; 2414 } 2415 2416 ndmas = rx_rbr_rings->ndmas; 2417 if (!ndmas) { 2418 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2419 "<== hxge_unmap_rxdma: no channel")); 2420 return; 2421 } 2422 2423 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2424 "==> hxge_unmap_rxdma (ndmas %d)", ndmas)); 2425 2426 rbr_rings = rx_rbr_rings->rbr_rings; 2427 rcr_rings = rx_rcr_rings->rcr_rings; 2428 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 2429 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 2430 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2431 2432 for (i = 0; i < ndmas; i++) { 2433 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2434 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2435 "==> hxge_unmap_rxdma (ndmas %d) channel %d", 2436 ndmas, channel)); 2437 (void) hxge_unmap_rxdma_channel(hxgep, channel, 2438 (p_rx_rbr_ring_t)rbr_rings[i], 2439 (p_rx_rcr_ring_t)rcr_rings[i], 2440 (p_rx_mbox_t)rx_mbox_p[i]); 2441 } 2442 2443 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2444 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2445 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2446 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2447 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2448 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2449 2450 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma")); 2451 } 2452 2453 hxge_status_t 2454 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2455 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 2456 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 2457 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 2458 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2459 { 2460 int status = HXGE_OK; 2461 2462 /* 2463 * Set up and prepare buffer blocks, descriptors and mailbox. 2464 */ 2465 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2466 "==> hxge_map_rxdma_channel (channel %d)", channel)); 2467 2468 /* 2469 * Receive buffer blocks 2470 */ 2471 status = hxge_map_rxdma_channel_buf_ring(hxgep, channel, 2472 dma_buf_p, rbr_p, num_chunks); 2473 if (status != HXGE_OK) { 2474 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2475 "==> hxge_map_rxdma_channel (channel %d): " 2476 "map buffer failed 0x%x", channel, status)); 2477 goto hxge_map_rxdma_channel_exit; 2478 } 2479 2480 /* 2481 * Receive block ring, completion ring and mailbox. 2482 */ 2483 status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel, 2484 dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p, 2485 rbr_p, rcr_p, rx_mbox_p); 2486 if (status != HXGE_OK) { 2487 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2488 "==> hxge_map_rxdma_channel (channel %d): " 2489 "map config failed 0x%x", channel, status)); 2490 goto hxge_map_rxdma_channel_fail2; 2491 } 2492 goto hxge_map_rxdma_channel_exit; 2493 2494 hxge_map_rxdma_channel_fail3: 2495 /* Free rbr, rcr */ 2496 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2497 "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)", 2498 status, channel)); 2499 hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p); 2500 2501 hxge_map_rxdma_channel_fail2: 2502 /* Free buffer blocks */ 2503 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2504 "==> hxge_map_rxdma_channel: free rx buffers" 2505 "(hxgep 0x%x status 0x%x channel %d)", 2506 hxgep, status, channel)); 2507 hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p); 2508 2509 status = HXGE_ERROR; 2510 2511 hxge_map_rxdma_channel_exit: 2512 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2513 "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)", 2514 hxgep, status, channel)); 2515 2516 return (status); 2517 } 2518 2519 /*ARGSUSED*/ 2520 static void 2521 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2522 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2523 { 2524 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2525 "==> hxge_unmap_rxdma_channel (channel %d)", channel)); 2526 2527 /* 2528 * unmap receive block ring, completion ring and mailbox. 2529 */ 2530 (void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p); 2531 2532 /* unmap buffer blocks */ 2533 (void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p); 2534 2535 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel")); 2536 } 2537 2538 /*ARGSUSED*/ 2539 static hxge_status_t 2540 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 2541 p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p, 2542 p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p, 2543 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2544 { 2545 p_rx_rbr_ring_t rbrp; 2546 p_rx_rcr_ring_t rcrp; 2547 p_rx_mbox_t mboxp; 2548 p_hxge_dma_common_t cntl_dmap; 2549 p_hxge_dma_common_t dmap; 2550 p_rx_msg_t *rx_msg_ring; 2551 p_rx_msg_t rx_msg_p; 2552 rdc_rbr_cfg_a_t *rcfga_p; 2553 rdc_rbr_cfg_b_t *rcfgb_p; 2554 rdc_rcr_cfg_a_t *cfga_p; 2555 rdc_rcr_cfg_b_t *cfgb_p; 2556 rdc_rx_cfg1_t *cfig1_p; 2557 rdc_rx_cfg2_t *cfig2_p; 2558 rdc_rbr_kick_t *kick_p; 2559 uint32_t dmaaddrp; 2560 uint32_t *rbr_vaddrp; 2561 uint32_t bkaddr; 2562 hxge_status_t status = HXGE_OK; 2563 int i; 2564 uint32_t hxge_port_rcr_size; 2565 2566 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2567 "==> hxge_map_rxdma_channel_cfg_ring")); 2568 2569 cntl_dmap = *dma_rbr_cntl_p; 2570 2571 /* 2572 * Map in the receive block ring 2573 */ 2574 rbrp = *rbr_p; 2575 dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc; 2576 hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 2577 2578 /* 2579 * Zero out buffer block ring descriptors. 2580 */ 2581 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2582 2583 rcfga_p = &(rbrp->rbr_cfga); 2584 rcfgb_p = &(rbrp->rbr_cfgb); 2585 kick_p = &(rbrp->rbr_kick); 2586 rcfga_p->value = 0; 2587 rcfgb_p->value = 0; 2588 kick_p->value = 0; 2589 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 2590 rcfga_p->value = (rbrp->rbr_addr & 2591 (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK)); 2592 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 2593 2594 /* XXXX: how to choose packet buffer sizes */ 2595 rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0; 2596 rcfgb_p->bits.vld0 = 1; 2597 rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1; 2598 rcfgb_p->bits.vld1 = 1; 2599 rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2; 2600 rcfgb_p->bits.vld2 = 1; 2601 rcfgb_p->bits.bksize = hxgep->rx_bksize_code; 2602 2603 /* 2604 * For each buffer block, enter receive block address to the ring. 2605 */ 2606 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 2607 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 2608 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2609 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2610 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 2611 2612 rx_msg_ring = rbrp->rx_msg_ring; 2613 for (i = 0; i < rbrp->tnblocks; i++) { 2614 rx_msg_p = rx_msg_ring[i]; 2615 rx_msg_p->hxgep = hxgep; 2616 rx_msg_p->rx_rbr_p = rbrp; 2617 bkaddr = (uint32_t) 2618 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2619 RBR_BKADDR_SHIFT)); 2620 rx_msg_p->free = B_FALSE; 2621 rx_msg_p->max_usage_cnt = 0xbaddcafe; 2622 2623 *rbr_vaddrp++ = bkaddr; 2624 } 2625 2626 kick_p->bits.bkadd = rbrp->rbb_max; 2627 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 2628 2629 rbrp->rbr_rd_index = 0; 2630 2631 rbrp->rbr_consumed = 0; 2632 rbrp->rbr_used = 0; 2633 rbrp->rbr_use_bcopy = B_TRUE; 2634 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 2635 2636 /* 2637 * Do bcopy on packets greater than bcopy size once the lo threshold is 2638 * reached. This lo threshold should be less than the hi threshold. 2639 * 2640 * Do bcopy on every packet once the hi threshold is reached. 2641 */ 2642 if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) { 2643 /* default it to use hi */ 2644 hxge_rx_threshold_lo = hxge_rx_threshold_hi; 2645 } 2646 if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) { 2647 hxge_rx_buf_size_type = HXGE_RBR_TYPE2; 2648 } 2649 rbrp->rbr_bufsize_type = hxge_rx_buf_size_type; 2650 2651 switch (hxge_rx_threshold_hi) { 2652 default: 2653 case HXGE_RX_COPY_NONE: 2654 /* Do not do bcopy at all */ 2655 rbrp->rbr_use_bcopy = B_FALSE; 2656 rbrp->rbr_threshold_hi = rbrp->rbb_max; 2657 break; 2658 2659 case HXGE_RX_COPY_1: 2660 case HXGE_RX_COPY_2: 2661 case HXGE_RX_COPY_3: 2662 case HXGE_RX_COPY_4: 2663 case HXGE_RX_COPY_5: 2664 case HXGE_RX_COPY_6: 2665 case HXGE_RX_COPY_7: 2666 rbrp->rbr_threshold_hi = 2667 rbrp->rbb_max * (hxge_rx_threshold_hi) / 2668 HXGE_RX_BCOPY_SCALE; 2669 break; 2670 2671 case HXGE_RX_COPY_ALL: 2672 rbrp->rbr_threshold_hi = 0; 2673 break; 2674 } 2675 2676 switch (hxge_rx_threshold_lo) { 2677 default: 2678 case HXGE_RX_COPY_NONE: 2679 /* Do not do bcopy at all */ 2680 if (rbrp->rbr_use_bcopy) { 2681 rbrp->rbr_use_bcopy = B_FALSE; 2682 } 2683 rbrp->rbr_threshold_lo = rbrp->rbb_max; 2684 break; 2685 2686 case HXGE_RX_COPY_1: 2687 case HXGE_RX_COPY_2: 2688 case HXGE_RX_COPY_3: 2689 case HXGE_RX_COPY_4: 2690 case HXGE_RX_COPY_5: 2691 case HXGE_RX_COPY_6: 2692 case HXGE_RX_COPY_7: 2693 rbrp->rbr_threshold_lo = 2694 rbrp->rbb_max * (hxge_rx_threshold_lo) / 2695 HXGE_RX_BCOPY_SCALE; 2696 break; 2697 2698 case HXGE_RX_COPY_ALL: 2699 rbrp->rbr_threshold_lo = 0; 2700 break; 2701 } 2702 2703 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2704 "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d " 2705 "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d " 2706 "rbb_threshold_lo %d", 2707 dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type, 2708 rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo)); 2709 2710 /* Map in the receive completion ring */ 2711 rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 2712 MUTEX_INIT(&rcrp->lock, NULL, MUTEX_DRIVER, 2713 (void *) hxgep->interrupt_cookie); 2714 rcrp->rdc = dma_channel; 2715 rcrp->hxgep = hxgep; 2716 2717 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 2718 rcrp->comp_size = hxge_port_rcr_size; 2719 rcrp->comp_wrap_mask = hxge_port_rcr_size - 1; 2720 2721 cntl_dmap = *dma_rcr_cntl_p; 2722 2723 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 2724 hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 2725 sizeof (rcr_entry_t)); 2726 rcrp->comp_rd_index = 0; 2727 rcrp->comp_wt_index = 0; 2728 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 2729 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 2730 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2731 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2732 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 2733 (hxge_port_rcr_size - 1); 2734 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 2735 (hxge_port_rcr_size - 1); 2736 2737 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 2738 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 2739 2740 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2741 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2742 "rbr_vaddrp $%p rcr_desc_rd_head_p $%p " 2743 "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p " 2744 "rcr_desc_rd_last_pp $%p ", 2745 dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p, 2746 rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p, 2747 rcrp->rcr_desc_last_pp)); 2748 2749 /* 2750 * Zero out buffer block ring descriptors. 2751 */ 2752 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2753 rcrp->intr_timeout = hxgep->intr_timeout; 2754 rcrp->intr_threshold = hxgep->intr_threshold; 2755 rcrp->full_hdr_flag = B_FALSE; 2756 rcrp->sw_priv_hdr_len = 0; 2757 2758 cfga_p = &(rcrp->rcr_cfga); 2759 cfgb_p = &(rcrp->rcr_cfgb); 2760 cfga_p->value = 0; 2761 cfgb_p->value = 0; 2762 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 2763 2764 cfga_p->value = (rcrp->rcr_addr & 2765 (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK)); 2766 2767 cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF); 2768 2769 /* 2770 * Timeout should be set based on the system clock divider. The 2771 * following timeout value of 1 assumes that the granularity (1000) is 2772 * 3 microseconds running at 300MHz. 2773 */ 2774 cfgb_p->bits.pthres = rcrp->intr_threshold; 2775 cfgb_p->bits.timeout = rcrp->intr_timeout; 2776 cfgb_p->bits.entout = 1; 2777 2778 /* Map in the mailbox */ 2779 cntl_dmap = *dma_mbox_cntl_p; 2780 mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 2781 dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox; 2782 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 2783 cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1; 2784 cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2; 2785 cfig1_p->value = cfig2_p->value = 0; 2786 2787 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 2788 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2789 "==> hxge_map_rxdma_channel_cfg_ring: " 2790 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 2791 dma_channel, cfig1_p->value, cfig2_p->value, 2792 mboxp->mbox_addr)); 2793 2794 dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff); 2795 cfig1_p->bits.mbaddr_h = dmaaddrp; 2796 2797 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 2798 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 2799 RXDMA_CFIG2_MBADDR_L_MASK); 2800 2801 cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 2802 2803 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2804 "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p " 2805 "cfg1 0x%016llx cfig2 0x%016llx", 2806 dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value)); 2807 2808 cfig2_p->bits.full_hdr = rcrp->full_hdr_flag; 2809 cfig2_p->bits.offset = rcrp->sw_priv_hdr_len; 2810 2811 rbrp->rx_rcr_p = rcrp; 2812 rcrp->rx_rbr_p = rbrp; 2813 *rcr_p = rcrp; 2814 *rx_mbox_p = mboxp; 2815 2816 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2817 "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 2818 return (status); 2819 } 2820 2821 /*ARGSUSED*/ 2822 static void 2823 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 2824 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2825 { 2826 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2827 "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc)); 2828 2829 MUTEX_DESTROY(&rcr_p->lock); 2830 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 2831 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 2832 2833 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2834 "<== hxge_unmap_rxdma_channel_cfg_ring")); 2835 } 2836 2837 static hxge_status_t 2838 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 2839 p_hxge_dma_common_t *dma_buf_p, 2840 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 2841 { 2842 p_rx_rbr_ring_t rbrp; 2843 p_hxge_dma_common_t dma_bufp, tmp_bufp; 2844 p_rx_msg_t *rx_msg_ring; 2845 p_rx_msg_t rx_msg_p; 2846 p_mblk_t mblk_p; 2847 2848 rxring_info_t *ring_info; 2849 hxge_status_t status = HXGE_OK; 2850 int i, j, index; 2851 uint32_t size, bsize, nblocks, nmsgs; 2852 2853 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2854 "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel)); 2855 2856 dma_bufp = tmp_bufp = *dma_buf_p; 2857 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2858 " hxge_map_rxdma_channel_buf_ring: channel %d to map %d " 2859 "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp)); 2860 2861 nmsgs = 0; 2862 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2863 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2864 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2865 "bufp 0x%016llx nblocks %d nmsgs %d", 2866 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2867 nmsgs += tmp_bufp->nblocks; 2868 } 2869 if (!nmsgs) { 2870 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2871 "<== hxge_map_rxdma_channel_buf_ring: channel %d " 2872 "no msg blocks", channel)); 2873 status = HXGE_ERROR; 2874 goto hxge_map_rxdma_channel_buf_ring_exit; 2875 } 2876 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 2877 2878 size = nmsgs * sizeof (p_rx_msg_t); 2879 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2880 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 2881 KM_SLEEP); 2882 2883 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 2884 (void *) hxgep->interrupt_cookie); 2885 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 2886 (void *) hxgep->interrupt_cookie); 2887 2888 rbrp->rdc = channel; 2889 rbrp->num_blocks = num_chunks; 2890 rbrp->tnblocks = nmsgs; 2891 rbrp->rbb_max = nmsgs; 2892 rbrp->rbr_max_size = nmsgs; 2893 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 2894 2895 /* 2896 * Buffer sizes: 256, 1K, and 2K. 2897 * 2898 * Blk 0 size. 2899 */ 2900 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 2901 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 2902 rbrp->hpi_pkt_buf_size0 = SIZE_256B; 2903 2904 /* 2905 * Blk 1 size. 2906 */ 2907 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 2908 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 2909 rbrp->hpi_pkt_buf_size1 = SIZE_1KB; 2910 2911 /* 2912 * Blk 2 size. 2913 */ 2914 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 2915 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 2916 rbrp->hpi_pkt_buf_size2 = SIZE_2KB; 2917 2918 rbrp->block_size = hxgep->rx_default_block_size; 2919 2920 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2921 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2922 "actual rbr max %d rbb_max %d nmsgs %d " 2923 "rbrp->block_size %d default_block_size %d " 2924 "(config hxge_rbr_size %d hxge_rbr_spare_size %d)", 2925 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 2926 rbrp->block_size, hxgep->rx_default_block_size, 2927 hxge_rbr_size, hxge_rbr_spare_size)); 2928 2929 /* 2930 * Map in buffers from the buffer pool. 2931 * Note that num_blocks is the num_chunks. For Sparc, there is likely 2932 * only one chunk. For x86, there will be many chunks. 2933 * Loop over chunks. 2934 */ 2935 index = 0; 2936 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 2937 bsize = dma_bufp->block_size; 2938 nblocks = dma_bufp->nblocks; 2939 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 2940 ring_info->buffer[i].buf_index = i; 2941 ring_info->buffer[i].buf_size = dma_bufp->alength; 2942 ring_info->buffer[i].start_index = index; 2943 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 2944 2945 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2946 " hxge_map_rxdma_channel_buf_ring: map channel %d " 2947 "chunk %d nblocks %d chunk_size %x block_size 0x%x " 2948 "dma_bufp $%p dvma_addr $%p", channel, i, 2949 dma_bufp->nblocks, 2950 ring_info->buffer[i].buf_size, bsize, dma_bufp, 2951 ring_info->buffer[i].dvma_addr)); 2952 2953 /* loop over blocks within a chunk */ 2954 for (j = 0; j < nblocks; j++) { 2955 if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO, 2956 dma_bufp)) == NULL) { 2957 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2958 "allocb failed (index %d i %d j %d)", 2959 index, i, j)); 2960 goto hxge_map_rxdma_channel_buf_ring_fail1; 2961 } 2962 rx_msg_ring[index] = rx_msg_p; 2963 rx_msg_p->block_index = index; 2964 rx_msg_p->shifted_addr = (uint32_t) 2965 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2966 RBR_BKADDR_SHIFT)); 2967 /* 2968 * Too much output 2969 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2970 * "index %d j %d rx_msg_p $%p mblk %p", 2971 * index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 2972 */ 2973 mblk_p = rx_msg_p->rx_mblk_p; 2974 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 2975 2976 rbrp->rbr_ref_cnt++; 2977 index++; 2978 rx_msg_p->buf_dma.dma_channel = channel; 2979 } 2980 } 2981 if (i < rbrp->num_blocks) { 2982 goto hxge_map_rxdma_channel_buf_ring_fail1; 2983 } 2984 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2985 "hxge_map_rxdma_channel_buf_ring: done buf init " 2986 "channel %d msg block entries %d", channel, index)); 2987 ring_info->block_size_mask = bsize - 1; 2988 rbrp->rx_msg_ring = rx_msg_ring; 2989 rbrp->dma_bufp = dma_buf_p; 2990 rbrp->ring_info = ring_info; 2991 2992 status = hxge_rxbuf_index_info_init(hxgep, rbrp); 2993 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: " 2994 "channel %d done buf info init", channel)); 2995 2996 /* 2997 * Finally, permit hxge_freeb() to call hxge_post_page(). 2998 */ 2999 rbrp->rbr_state = RBR_POSTING; 3000 3001 *rbr_p = rbrp; 3002 3003 goto hxge_map_rxdma_channel_buf_ring_exit; 3004 3005 hxge_map_rxdma_channel_buf_ring_fail1: 3006 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3007 " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3008 channel, status)); 3009 3010 index--; 3011 for (; index >= 0; index--) { 3012 rx_msg_p = rx_msg_ring[index]; 3013 if (rx_msg_p != NULL) { 3014 freeb(rx_msg_p->rx_mblk_p); 3015 rx_msg_ring[index] = NULL; 3016 } 3017 } 3018 3019 hxge_map_rxdma_channel_buf_ring_fail: 3020 MUTEX_DESTROY(&rbrp->post_lock); 3021 MUTEX_DESTROY(&rbrp->lock); 3022 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3023 KMEM_FREE(rx_msg_ring, size); 3024 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3025 3026 status = HXGE_ERROR; 3027 3028 hxge_map_rxdma_channel_buf_ring_exit: 3029 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3030 "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3031 3032 return (status); 3033 } 3034 3035 /*ARGSUSED*/ 3036 static void 3037 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 3038 p_rx_rbr_ring_t rbr_p) 3039 { 3040 p_rx_msg_t *rx_msg_ring; 3041 p_rx_msg_t rx_msg_p; 3042 rxring_info_t *ring_info; 3043 int i; 3044 uint32_t size; 3045 3046 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3047 "==> hxge_unmap_rxdma_channel_buf_ring")); 3048 if (rbr_p == NULL) { 3049 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3050 "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3051 return; 3052 } 3053 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3054 "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc)); 3055 3056 rx_msg_ring = rbr_p->rx_msg_ring; 3057 ring_info = rbr_p->ring_info; 3058 3059 if (rx_msg_ring == NULL || ring_info == NULL) { 3060 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3061 "<== hxge_unmap_rxdma_channel_buf_ring: " 3062 "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info)); 3063 return; 3064 } 3065 3066 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3067 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3068 " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3069 "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks, 3070 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3071 3072 for (i = 0; i < rbr_p->tnblocks; i++) { 3073 rx_msg_p = rx_msg_ring[i]; 3074 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3075 " hxge_unmap_rxdma_channel_buf_ring: " 3076 "rx_msg_p $%p", rx_msg_p)); 3077 if (rx_msg_p != NULL) { 3078 freeb(rx_msg_p->rx_mblk_p); 3079 rx_msg_ring[i] = NULL; 3080 } 3081 } 3082 3083 /* 3084 * We no longer may use the mutex <post_lock>. By setting 3085 * <rbr_state> to anything but POSTING, we prevent 3086 * hxge_post_page() from accessing a dead mutex. 3087 */ 3088 rbr_p->rbr_state = RBR_UNMAPPING; 3089 MUTEX_DESTROY(&rbr_p->post_lock); 3090 3091 MUTEX_DESTROY(&rbr_p->lock); 3092 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3093 KMEM_FREE(rx_msg_ring, size); 3094 3095 if (rbr_p->rbr_ref_cnt == 0) { 3096 /* This is the normal state of affairs. */ 3097 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3098 } else { 3099 /* 3100 * Some of our buffers are still being used. 3101 * Therefore, tell hxge_freeb() this ring is 3102 * unmapped, so it may free <rbr_p> for us. 3103 */ 3104 rbr_p->rbr_state = RBR_UNMAPPED; 3105 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3106 "unmap_rxdma_buf_ring: %d %s outstanding.", 3107 rbr_p->rbr_ref_cnt, 3108 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3109 } 3110 3111 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3112 "<== hxge_unmap_rxdma_channel_buf_ring")); 3113 } 3114 3115 static hxge_status_t 3116 hxge_rxdma_hw_start_common(p_hxge_t hxgep) 3117 { 3118 hxge_status_t status = HXGE_OK; 3119 3120 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 3121 3122 /* 3123 * Load the sharable parameters by writing to the function zero control 3124 * registers. These FZC registers should be initialized only once for 3125 * the entire chip. 3126 */ 3127 (void) hxge_init_fzc_rx_common(hxgep); 3128 3129 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 3130 3131 return (status); 3132 } 3133 3134 static hxge_status_t 3135 hxge_rxdma_hw_start(p_hxge_t hxgep) 3136 { 3137 int i, ndmas; 3138 uint16_t channel; 3139 p_rx_rbr_rings_t rx_rbr_rings; 3140 p_rx_rbr_ring_t *rbr_rings; 3141 p_rx_rcr_rings_t rx_rcr_rings; 3142 p_rx_rcr_ring_t *rcr_rings; 3143 p_rx_mbox_areas_t rx_mbox_areas_p; 3144 p_rx_mbox_t *rx_mbox_p; 3145 hxge_status_t status = HXGE_OK; 3146 3147 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start")); 3148 3149 rx_rbr_rings = hxgep->rx_rbr_rings; 3150 rx_rcr_rings = hxgep->rx_rcr_rings; 3151 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3152 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3153 "<== hxge_rxdma_hw_start: NULL ring pointers")); 3154 return (HXGE_ERROR); 3155 } 3156 3157 ndmas = rx_rbr_rings->ndmas; 3158 if (ndmas == 0) { 3159 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3160 "<== hxge_rxdma_hw_start: no dma channel allocated")); 3161 return (HXGE_ERROR); 3162 } 3163 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3164 "==> hxge_rxdma_hw_start (ndmas %d)", ndmas)); 3165 3166 /* 3167 * Scrub the RDC Rx DMA Prefetch Buffer Command. 3168 */ 3169 for (i = 0; i < 128; i++) { 3170 HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i); 3171 } 3172 3173 /* 3174 * Scrub Rx DMA Shadow Tail Command. 3175 */ 3176 for (i = 0; i < 64; i++) { 3177 HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i); 3178 } 3179 3180 /* 3181 * Scrub Rx DMA Control Fifo Command. 3182 */ 3183 for (i = 0; i < 512; i++) { 3184 HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i); 3185 } 3186 3187 /* 3188 * Scrub Rx DMA Data Fifo Command. 3189 */ 3190 for (i = 0; i < 1536; i++) { 3191 HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i); 3192 } 3193 3194 /* 3195 * Reset the FIFO Error Stat. 3196 */ 3197 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF); 3198 3199 /* Set the error mask to receive interrupts */ 3200 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3201 3202 rbr_rings = rx_rbr_rings->rbr_rings; 3203 rcr_rings = rx_rcr_rings->rcr_rings; 3204 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 3205 if (rx_mbox_areas_p) { 3206 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3207 } 3208 3209 for (i = 0; i < ndmas; i++) { 3210 channel = rbr_rings[i]->rdc; 3211 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3212 "==> hxge_rxdma_hw_start (ndmas %d) channel %d", 3213 ndmas, channel)); 3214 status = hxge_rxdma_start_channel(hxgep, channel, 3215 (p_rx_rbr_ring_t)rbr_rings[i], 3216 (p_rx_rcr_ring_t)rcr_rings[i], 3217 (p_rx_mbox_t)rx_mbox_p[i], rbr_rings[i]->rbb_max); 3218 if (status != HXGE_OK) { 3219 goto hxge_rxdma_hw_start_fail1; 3220 } 3221 } 3222 3223 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: " 3224 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3225 rx_rbr_rings, rx_rcr_rings)); 3226 goto hxge_rxdma_hw_start_exit; 3227 3228 hxge_rxdma_hw_start_fail1: 3229 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3230 "==> hxge_rxdma_hw_start: disable " 3231 "(status 0x%x channel %d i %d)", status, channel, i)); 3232 for (; i >= 0; i--) { 3233 channel = rbr_rings[i]->rdc; 3234 (void) hxge_rxdma_stop_channel(hxgep, channel); 3235 } 3236 3237 hxge_rxdma_hw_start_exit: 3238 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3239 "==> hxge_rxdma_hw_start: (status 0x%x)", status)); 3240 return (status); 3241 } 3242 3243 static void 3244 hxge_rxdma_hw_stop(p_hxge_t hxgep) 3245 { 3246 int i, ndmas; 3247 uint16_t channel; 3248 p_rx_rbr_rings_t rx_rbr_rings; 3249 p_rx_rbr_ring_t *rbr_rings; 3250 p_rx_rcr_rings_t rx_rcr_rings; 3251 3252 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop")); 3253 3254 rx_rbr_rings = hxgep->rx_rbr_rings; 3255 rx_rcr_rings = hxgep->rx_rcr_rings; 3256 3257 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3258 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3259 "<== hxge_rxdma_hw_stop: NULL ring pointers")); 3260 return; 3261 } 3262 3263 ndmas = rx_rbr_rings->ndmas; 3264 if (!ndmas) { 3265 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3266 "<== hxge_rxdma_hw_stop: no dma channel allocated")); 3267 return; 3268 } 3269 3270 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3271 "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3272 3273 rbr_rings = rx_rbr_rings->rbr_rings; 3274 for (i = 0; i < ndmas; i++) { 3275 channel = rbr_rings[i]->rdc; 3276 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3277 "==> hxge_rxdma_hw_stop (ndmas %d) channel %d", 3278 ndmas, channel)); 3279 (void) hxge_rxdma_stop_channel(hxgep, channel); 3280 } 3281 3282 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: " 3283 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3284 rx_rbr_rings, rx_rcr_rings)); 3285 3286 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop")); 3287 } 3288 3289 static hxge_status_t 3290 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 3291 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 3292 int n_init_kick) 3293 { 3294 hpi_handle_t handle; 3295 hpi_status_t rs = HPI_SUCCESS; 3296 rdc_stat_t cs; 3297 rdc_int_mask_t ent_mask; 3298 hxge_status_t status = HXGE_OK; 3299 3300 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel")); 3301 3302 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3303 3304 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: " 3305 "hpi handle addr $%p acc $%p", 3306 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3307 3308 /* Reset RXDMA channel */ 3309 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3310 if (rs != HPI_SUCCESS) { 3311 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3312 "==> hxge_rxdma_start_channel: " 3313 "reset rxdma failed (0x%08x channel %d)", 3314 status, channel)); 3315 return (HXGE_ERROR | rs); 3316 } 3317 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3318 "==> hxge_rxdma_start_channel: reset done: channel %d", channel)); 3319 3320 /* 3321 * Initialize the RXDMA channel specific FZC control configurations. 3322 * These FZC registers are pertaining to each RX channel (logical 3323 * pages). 3324 */ 3325 status = hxge_init_fzc_rxdma_channel(hxgep, 3326 channel, rbr_p, rcr_p, mbox_p); 3327 if (status != HXGE_OK) { 3328 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3329 "==> hxge_rxdma_start_channel: " 3330 "init fzc rxdma failed (0x%08x channel %d)", 3331 status, channel)); 3332 return (status); 3333 } 3334 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3335 "==> hxge_rxdma_start_channel: fzc done")); 3336 3337 /* 3338 * Zero out the shadow and prefetch ram. 3339 */ 3340 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3341 "==> hxge_rxdma_start_channel: ram done")); 3342 3343 /* Set up the interrupt event masks. */ 3344 ent_mask.value = 0; 3345 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3346 if (rs != HPI_SUCCESS) { 3347 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3348 "==> hxge_rxdma_start_channel: " 3349 "init rxdma event masks failed (0x%08x channel %d)", 3350 status, channel)); 3351 return (HXGE_ERROR | rs); 3352 } 3353 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3354 "event done: channel %d (mask 0x%016llx)", 3355 channel, ent_mask.value)); 3356 3357 /* 3358 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA 3359 * channels and enable each DMA channel. 3360 */ 3361 status = hxge_enable_rxdma_channel(hxgep, 3362 channel, rbr_p, rcr_p, mbox_p, n_init_kick); 3363 if (status != HXGE_OK) { 3364 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3365 " hxge_rxdma_start_channel: " 3366 " init enable rxdma failed (0x%08x channel %d)", 3367 status, channel)); 3368 return (status); 3369 } 3370 3371 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3372 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3373 3374 /* 3375 * Initialize the receive DMA control and status register 3376 * Note that rdc_stat HAS to be set after RBR and RCR rings are set 3377 */ 3378 cs.value = 0; 3379 cs.bits.mex = 1; 3380 cs.bits.rcr_thres = 1; 3381 cs.bits.rcr_to = 1; 3382 cs.bits.rbr_empty = 1; 3383 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3384 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3385 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3386 if (status != HXGE_OK) { 3387 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3388 "==> hxge_rxdma_start_channel: " 3389 "init rxdma control register failed (0x%08x channel %d", 3390 status, channel)); 3391 return (status); 3392 } 3393 3394 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3395 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3396 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3397 "==> hxge_rxdma_start_channel: enable done")); 3398 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel")); 3399 return (HXGE_OK); 3400 } 3401 3402 static hxge_status_t 3403 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel) 3404 { 3405 hpi_handle_t handle; 3406 hpi_status_t rs = HPI_SUCCESS; 3407 rdc_stat_t cs; 3408 rdc_int_mask_t ent_mask; 3409 hxge_status_t status = HXGE_OK; 3410 3411 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel")); 3412 3413 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3414 3415 HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: " 3416 "hpi handle addr $%p acc $%p", 3417 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3418 3419 /* Reset RXDMA channel */ 3420 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3421 if (rs != HPI_SUCCESS) { 3422 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3423 " hxge_rxdma_stop_channel: " 3424 " reset rxdma failed (0x%08x channel %d)", 3425 rs, channel)); 3426 return (HXGE_ERROR | rs); 3427 } 3428 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3429 "==> hxge_rxdma_stop_channel: reset done")); 3430 3431 /* Set up the interrupt event masks. */ 3432 ent_mask.value = RDC_INT_MASK_ALL; 3433 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3434 if (rs != HPI_SUCCESS) { 3435 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3436 "==> hxge_rxdma_stop_channel: " 3437 "set rxdma event masks failed (0x%08x channel %d)", 3438 rs, channel)); 3439 return (HXGE_ERROR | rs); 3440 } 3441 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3442 "==> hxge_rxdma_stop_channel: event done")); 3443 3444 /* Initialize the receive DMA control and status register */ 3445 cs.value = 0; 3446 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3447 3448 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control " 3449 " to default (all 0s) 0x%08x", cs.value)); 3450 3451 if (status != HXGE_OK) { 3452 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3453 " hxge_rxdma_stop_channel: init rxdma" 3454 " control register failed (0x%08x channel %d", 3455 status, channel)); 3456 return (status); 3457 } 3458 3459 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3460 "==> hxge_rxdma_stop_channel: control done")); 3461 3462 /* disable dma channel */ 3463 status = hxge_disable_rxdma_channel(hxgep, channel); 3464 3465 if (status != HXGE_OK) { 3466 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3467 " hxge_rxdma_stop_channel: " 3468 " init enable rxdma failed (0x%08x channel %d)", 3469 status, channel)); 3470 return (status); 3471 } 3472 3473 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3474 "==> hxge_rxdma_stop_channel: disable done")); 3475 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel")); 3476 3477 return (HXGE_OK); 3478 } 3479 3480 hxge_status_t 3481 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep) 3482 { 3483 hpi_handle_t handle; 3484 p_hxge_rdc_sys_stats_t statsp; 3485 rdc_fifo_err_stat_t stat; 3486 hxge_status_t status = HXGE_OK; 3487 3488 handle = hxgep->hpi_handle; 3489 statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats; 3490 3491 /* Get the error status and clear the register */ 3492 HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value); 3493 HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value); 3494 3495 if (stat.bits.rx_ctrl_fifo_sec) { 3496 statsp->ctrl_fifo_sec++; 3497 if (statsp->ctrl_fifo_sec == 1) 3498 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3499 "==> hxge_rxdma_handle_sys_errors: " 3500 "rx_ctrl_fifo_sec")); 3501 } 3502 3503 if (stat.bits.rx_ctrl_fifo_ded) { 3504 /* Global fatal error encountered */ 3505 statsp->ctrl_fifo_ded++; 3506 HXGE_FM_REPORT_ERROR(hxgep, 0, 3507 HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED); 3508 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3509 "==> hxge_rxdma_handle_sys_errors: " 3510 "fatal error: rx_ctrl_fifo_ded error")); 3511 } 3512 3513 if (stat.bits.rx_data_fifo_sec) { 3514 statsp->data_fifo_sec++; 3515 if (statsp->data_fifo_sec == 1) 3516 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3517 "==> hxge_rxdma_handle_sys_errors: " 3518 "rx_data_fifo_sec")); 3519 } 3520 3521 if (stat.bits.rx_data_fifo_ded) { 3522 /* Global fatal error encountered */ 3523 statsp->data_fifo_ded++; 3524 HXGE_FM_REPORT_ERROR(hxgep, 0, 3525 HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED); 3526 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3527 "==> hxge_rxdma_handle_sys_errors: " 3528 "fatal error: rx_data_fifo_ded error")); 3529 } 3530 3531 if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) { 3532 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3533 " hxge_rxdma_handle_sys_errors: fatal error\n")); 3534 status = hxge_rx_port_fatal_err_recover(hxgep); 3535 if (status == HXGE_OK) { 3536 FM_SERVICE_RESTORED(hxgep); 3537 } 3538 } 3539 3540 return (HXGE_OK); 3541 } 3542 3543 static hxge_status_t 3544 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel) 3545 { 3546 hpi_handle_t handle; 3547 hpi_status_t rs = HPI_SUCCESS; 3548 p_rx_rbr_ring_t rbrp; 3549 p_rx_rcr_ring_t rcrp; 3550 p_rx_mbox_t mboxp; 3551 rdc_int_mask_t ent_mask; 3552 p_hxge_dma_common_t dmap; 3553 p_rx_msg_t rx_msg_p; 3554 int i; 3555 uint32_t hxge_port_rcr_size; 3556 uint64_t tmp; 3557 int n_init_kick = 0; 3558 3559 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover")); 3560 3561 /* 3562 * Stop the dma channel waits for the stop done. If the stop done bit 3563 * is not set, then create an error. 3564 */ 3565 3566 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3567 3568 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop...")); 3569 3570 rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[channel]; 3571 rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[channel]; 3572 3573 MUTEX_ENTER(&rcrp->lock); 3574 MUTEX_ENTER(&rbrp->lock); 3575 3576 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel...")); 3577 3578 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 3579 if (rs != HPI_SUCCESS) { 3580 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3581 "hxge_disable_rxdma_channel:failed")); 3582 goto fail; 3583 } 3584 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt...")); 3585 3586 /* Disable interrupt */ 3587 ent_mask.value = RDC_INT_MASK_ALL; 3588 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3589 if (rs != HPI_SUCCESS) { 3590 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3591 "Set rxdma event masks failed (channel %d)", channel)); 3592 } 3593 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset...")); 3594 3595 /* Reset RXDMA channel */ 3596 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3597 if (rs != HPI_SUCCESS) { 3598 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3599 "Reset rxdma failed (channel %d)", channel)); 3600 goto fail; 3601 } 3602 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 3603 mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3604 3605 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3606 rbrp->rbr_rd_index = 0; 3607 3608 rcrp->comp_rd_index = 0; 3609 rcrp->comp_wt_index = 0; 3610 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3611 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3612 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3613 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3614 3615 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3616 (hxge_port_rcr_size - 1); 3617 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3618 (hxge_port_rcr_size - 1); 3619 3620 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 3621 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 3622 3623 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 3624 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3625 3626 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n", 3627 rbrp->rbr_max_size)); 3628 3629 /* Count the number of buffers owned by the hardware at this moment */ 3630 for (i = 0; i < rbrp->rbr_max_size; i++) { 3631 rx_msg_p = rbrp->rx_msg_ring[i]; 3632 if (rx_msg_p->ref_cnt == 1) { 3633 n_init_kick++; 3634 } 3635 } 3636 3637 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start...")); 3638 3639 /* 3640 * This is error recover! Some buffers are owned by the hardware and 3641 * the rest are owned by the apps. We should only kick in those 3642 * owned by the hardware initially. The apps will post theirs 3643 * eventually. 3644 */ 3645 (void) hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp, 3646 n_init_kick); 3647 3648 /* 3649 * The DMA channel may disable itself automatically. 3650 * The following is a work-around. 3651 */ 3652 HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp); 3653 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 3654 if (rs != HPI_SUCCESS) { 3655 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3656 "hpi_rxdma_cfg_rdc_enable (channel %d)", channel)); 3657 } 3658 3659 /* 3660 * Delay a bit of time by doing reads. 3661 */ 3662 for (i = 0; i < 1024; i++) { 3663 uint64_t value; 3664 RXDMA_REG_READ64(HXGE_DEV_HPI_HANDLE(hxgep), 3665 RDC_INT_MASK, i & 3, &value); 3666 } 3667 3668 MUTEX_EXIT(&rbrp->lock); 3669 MUTEX_EXIT(&rcrp->lock); 3670 3671 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover")); 3672 return (HXGE_OK); 3673 3674 fail: 3675 MUTEX_EXIT(&rbrp->lock); 3676 MUTEX_EXIT(&rcrp->lock); 3677 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3678 "Error Recovery failed for channel(%d)", channel)); 3679 return (HXGE_ERROR | rs); 3680 } 3681 3682 static hxge_status_t 3683 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep) 3684 { 3685 hxge_status_t status = HXGE_OK; 3686 p_hxge_dma_common_t *dma_buf_p; 3687 uint16_t channel; 3688 int ndmas; 3689 int i; 3690 block_reset_t reset_reg; 3691 p_rx_rcr_ring_t rcrp; 3692 p_rx_rbr_ring_t rbrp; 3693 3694 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover")); 3695 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ...")); 3696 3697 /* Disable RxMAC */ 3698 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n")); 3699 MUTEX_ENTER(&hxgep->vmac_lock); 3700 if (hxge_rx_vmac_disable(hxgep) != HXGE_OK) 3701 goto fail; 3702 3703 HXGE_DELAY(1000); 3704 3705 /* 3706 * Reset RDC block from PEU for this fatal error 3707 */ 3708 reset_reg.value = 0; 3709 reset_reg.bits.rdc_rst = 1; 3710 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 3711 3712 HXGE_DELAY(1000); 3713 3714 /* Restore any common settings after PEU reset */ 3715 if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK) 3716 goto fail; 3717 3718 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels...")); 3719 3720 ndmas = hxgep->rx_buf_pool_p->ndmas; 3721 dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p; 3722 3723 for (i = 0; i < ndmas; i++) { 3724 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 3725 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 3726 rbrp = rcrp->rx_rbr_p; 3727 3728 MUTEX_ENTER(&rbrp->post_lock); 3729 3730 /* 3731 * This function needs to be inside the post_lock 3732 */ 3733 if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) { 3734 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3735 "Could not recover channel %d", channel)); 3736 } 3737 MUTEX_EXIT(&rbrp->post_lock); 3738 } 3739 3740 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC...")); 3741 3742 /* Reset RxMAC */ 3743 if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) { 3744 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3745 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3746 goto fail; 3747 } 3748 3749 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC...")); 3750 3751 /* Re-Initialize RxMAC */ 3752 if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) { 3753 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3754 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3755 goto fail; 3756 } 3757 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC...")); 3758 3759 /* Re-enable RxMAC */ 3760 if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) { 3761 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3762 "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC")); 3763 goto fail; 3764 } 3765 MUTEX_EXIT(&hxgep->vmac_lock); 3766 3767 /* Reset the error mask since PEU reset cleared it */ 3768 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3769 3770 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3771 "Recovery Successful, RxPort Restored")); 3772 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover")); 3773 return (HXGE_OK); 3774 3775 fail: 3776 MUTEX_EXIT(&hxgep->vmac_lock); 3777 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3778 "Error Recovery failed for hxge(%d)", hxgep->instance)); 3779 return (status); 3780 } 3781 3782 static void 3783 hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p) 3784 { 3785 hpi_status_t hpi_status; 3786 hxge_status_t status; 3787 rdc_stat_t cs; 3788 p_hxge_rx_ring_stats_t rdc_stats; 3789 3790 rdc_stats = &hxgep->statsp->rdc_stats[rx_rbr_p->rdc]; 3791 3792 /* 3793 * Complete the processing for the RBR Empty by: 3794 * 0) kicking back HXGE_RBR_EMPTY_THRESHOLD 3795 * packets. 3796 * 1) Disable the RX vmac. 3797 * 2) Re-enable the affected DMA channel. 3798 * 3) Re-enable the RX vmac. 3799 */ 3800 3801 /* 3802 * Disable the RX VMAC, but setting the framelength 3803 * to 0, since there is a hardware bug when disabling 3804 * the vmac. 3805 */ 3806 MUTEX_ENTER(&hxgep->vmac_lock); 3807 (void) hxge_rx_vmac_disable(hxgep); 3808 3809 /* 3810 * Re-arm the mex bit for interrupts to be enabled. 3811 */ 3812 cs.value = 0; 3813 cs.bits.mex = 1; 3814 RXDMA_REG_WRITE64(HXGE_DEV_HPI_HANDLE(hxgep), RDC_STAT, 3815 rx_rbr_p->rdc, cs.value); 3816 3817 hpi_status = hpi_rxdma_cfg_rdc_enable( 3818 HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc); 3819 if (hpi_status != HPI_SUCCESS) { 3820 rdc_stats->rbr_empty_fail++; 3821 3822 /* Assume we are already inside the post_lock */ 3823 status = hxge_rxdma_fatal_err_recover(hxgep, rx_rbr_p->rdc); 3824 if (status != HXGE_OK) { 3825 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3826 "hxge(%d): channel(%d) is empty.", 3827 hxgep->instance, rx_rbr_p->rdc)); 3828 } 3829 } 3830 3831 /* 3832 * Re-enable the RX VMAC. 3833 */ 3834 (void) hxge_rx_vmac_enable(hxgep); 3835 MUTEX_EXIT(&hxgep->vmac_lock); 3836 3837 rdc_stats->rbr_empty_restore++; 3838 rx_rbr_p->rbr_is_empty = B_FALSE; 3839 } 3840