1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_rxdma.h> 28 29 /* 30 * Number of blocks to accumulate before re-enabling DMA 31 * when we get RBR empty. 32 */ 33 #define HXGE_RBR_EMPTY_THRESHOLD 64 34 35 /* 36 * Globals: tunable parameters (/etc/system or adb) 37 * 38 */ 39 extern uint32_t hxge_rbr_size; 40 extern uint32_t hxge_rcr_size; 41 extern uint32_t hxge_rbr_spare_size; 42 extern uint32_t hxge_mblks_pending; 43 44 /* 45 * Tunable to reduce the amount of time spent in the 46 * ISR doing Rx Processing. 47 */ 48 extern uint32_t hxge_max_rx_pkts; 49 50 /* 51 * Tunables to manage the receive buffer blocks. 52 * 53 * hxge_rx_threshold_hi: copy all buffers. 54 * hxge_rx_bcopy_size_type: receive buffer block size type. 55 * hxge_rx_threshold_lo: copy only up to tunable block size type. 56 */ 57 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi; 58 extern hxge_rxbuf_type_t hxge_rx_buf_size_type; 59 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo; 60 61 /* 62 * Static local functions. 63 */ 64 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep); 65 static void hxge_unmap_rxdma(p_hxge_t hxgep); 66 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep); 67 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep); 68 static void hxge_rxdma_hw_stop(p_hxge_t hxgep); 69 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 70 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 71 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 72 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 73 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 74 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 75 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 76 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, 77 uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p, 78 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 79 p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 80 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 81 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 82 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, 83 uint16_t channel, p_hxge_dma_common_t *dma_buf_p, 84 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks); 85 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 86 p_rx_rbr_ring_t rbr_p); 87 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 88 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 89 int n_init_kick); 90 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel); 91 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 92 p_rx_rcr_ring_t *rcr_p, rdc_stat_t cs, 93 uint16_t *nptrs, uint16_t *npkts); 94 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p, 95 p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, 96 mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry); 97 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep, 98 uint16_t channel); 99 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t); 100 static void hxge_freeb(p_rx_msg_t); 101 static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, 102 p_hxge_ldv_t ldvp, rdc_stat_t cs, 103 uint16_t *nptrs, uint16_t *npkts); 104 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, 105 p_hxge_ldv_t ldvp, rdc_stat_t cs); 106 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep, 107 p_rx_rbr_ring_t rx_dmap); 108 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, 109 uint16_t channel); 110 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep); 111 static void hxge_rbr_empty_restore(p_hxge_t hxgep, 112 p_rx_rbr_ring_t rx_rbr_p); 113 114 hxge_status_t 115 hxge_init_rxdma_channels(p_hxge_t hxgep) 116 { 117 hxge_status_t status = HXGE_OK; 118 block_reset_t reset_reg; 119 120 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels")); 121 122 /* Reset RDC block from PEU to clear any previous state */ 123 reset_reg.value = 0; 124 reset_reg.bits.rdc_rst = 1; 125 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 126 HXGE_DELAY(1000); 127 128 status = hxge_map_rxdma(hxgep); 129 if (status != HXGE_OK) { 130 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 131 "<== hxge_init_rxdma: status 0x%x", status)); 132 return (status); 133 } 134 135 status = hxge_rxdma_hw_start_common(hxgep); 136 if (status != HXGE_OK) { 137 hxge_unmap_rxdma(hxgep); 138 } 139 140 status = hxge_rxdma_hw_start(hxgep); 141 if (status != HXGE_OK) { 142 hxge_unmap_rxdma(hxgep); 143 } 144 145 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 146 "<== hxge_init_rxdma_channels: status 0x%x", status)); 147 return (status); 148 } 149 150 void 151 hxge_uninit_rxdma_channels(p_hxge_t hxgep) 152 { 153 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels")); 154 155 hxge_rxdma_hw_stop(hxgep); 156 hxge_unmap_rxdma(hxgep); 157 158 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels")); 159 } 160 161 hxge_status_t 162 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel, 163 rdc_stat_t *cs_p) 164 { 165 hpi_handle_t handle; 166 hpi_status_t rs = HPI_SUCCESS; 167 hxge_status_t status = HXGE_OK; 168 169 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 170 "<== hxge_init_rxdma_channel_cntl_stat")); 171 172 handle = HXGE_DEV_HPI_HANDLE(hxgep); 173 rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p); 174 175 if (rs != HPI_SUCCESS) { 176 status = HXGE_ERROR | rs; 177 } 178 return (status); 179 } 180 181 182 hxge_status_t 183 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 184 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 185 int n_init_kick) 186 { 187 hpi_handle_t handle; 188 rdc_desc_cfg_t rdc_desc; 189 rdc_rcr_cfg_b_t *cfgb_p; 190 hpi_status_t rs = HPI_SUCCESS; 191 192 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel")); 193 handle = HXGE_DEV_HPI_HANDLE(hxgep); 194 195 /* 196 * Use configuration data composed at init time. Write to hardware the 197 * receive ring configurations. 198 */ 199 rdc_desc.mbox_enable = 1; 200 rdc_desc.mbox_addr = mbox_p->mbox_addr; 201 HXGE_DEBUG_MSG((hxgep, RX_CTL, 202 "==> hxge_enable_rxdma_channel: mboxp $%p($%p)", 203 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 204 205 rdc_desc.rbr_len = rbr_p->rbb_max; 206 rdc_desc.rbr_addr = rbr_p->rbr_addr; 207 208 switch (hxgep->rx_bksize_code) { 209 case RBR_BKSIZE_4K: 210 rdc_desc.page_size = SIZE_4KB; 211 break; 212 case RBR_BKSIZE_8K: 213 rdc_desc.page_size = SIZE_8KB; 214 break; 215 } 216 217 rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0; 218 rdc_desc.valid0 = 1; 219 220 rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1; 221 rdc_desc.valid1 = 1; 222 223 rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2; 224 rdc_desc.valid2 = 1; 225 226 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 227 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 228 229 rdc_desc.rcr_len = rcr_p->comp_size; 230 rdc_desc.rcr_addr = rcr_p->rcr_addr; 231 232 cfgb_p = &(rcr_p->rcr_cfgb); 233 rdc_desc.rcr_threshold = cfgb_p->bits.pthres; 234 rdc_desc.rcr_timeout = cfgb_p->bits.timeout; 235 rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout; 236 237 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 238 "rbr_len qlen %d pagesize code %d rcr_len %d", 239 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 240 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 241 "size 0 %d size 1 %d size 2 %d", 242 rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1, 243 rbr_p->hpi_pkt_buf_size2)); 244 245 rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 246 if (rs != HPI_SUCCESS) { 247 return (HXGE_ERROR | rs); 248 } 249 250 /* 251 * Enable the timeout and threshold. 252 */ 253 rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 254 rdc_desc.rcr_threshold); 255 if (rs != HPI_SUCCESS) { 256 return (HXGE_ERROR | rs); 257 } 258 259 rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 260 rdc_desc.rcr_timeout); 261 if (rs != HPI_SUCCESS) { 262 return (HXGE_ERROR | rs); 263 } 264 265 /* Enable the DMA */ 266 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 267 if (rs != HPI_SUCCESS) { 268 return (HXGE_ERROR | rs); 269 } 270 271 /* Kick the DMA engine */ 272 hpi_rxdma_rdc_rbr_kick(handle, channel, n_init_kick); 273 274 /* Clear the rbr empty bit */ 275 (void) hpi_rxdma_channel_rbr_empty_clear(handle, channel); 276 277 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel")); 278 279 return (HXGE_OK); 280 } 281 282 static hxge_status_t 283 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel) 284 { 285 hpi_handle_t handle; 286 hpi_status_t rs = HPI_SUCCESS; 287 288 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel")); 289 290 handle = HXGE_DEV_HPI_HANDLE(hxgep); 291 292 /* disable the DMA */ 293 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 294 if (rs != HPI_SUCCESS) { 295 HXGE_DEBUG_MSG((hxgep, RX_CTL, 296 "<== hxge_disable_rxdma_channel:failed (0x%x)", rs)); 297 return (HXGE_ERROR | rs); 298 } 299 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel")); 300 return (HXGE_OK); 301 } 302 303 hxge_status_t 304 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel) 305 { 306 hpi_handle_t handle; 307 hxge_status_t status = HXGE_OK; 308 309 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 310 "==> hxge_rxdma_channel_rcrflush")); 311 312 handle = HXGE_DEV_HPI_HANDLE(hxgep); 313 hpi_rxdma_rdc_rcr_flush(handle, channel); 314 315 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 316 "<== hxge_rxdma_channel_rcrflush")); 317 return (status); 318 319 } 320 321 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 322 323 #define TO_LEFT -1 324 #define TO_RIGHT 1 325 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 326 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 327 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 328 #define NO_HINT 0xffffffff 329 330 /*ARGSUSED*/ 331 hxge_status_t 332 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p, 333 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 334 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 335 { 336 int bufsize; 337 uint64_t pktbuf_pp; 338 uint64_t dvma_addr; 339 rxring_info_t *ring_info; 340 int base_side, end_side; 341 int r_index, l_index, anchor_index; 342 int found, search_done; 343 uint32_t offset, chunk_size, block_size, page_size_mask; 344 uint32_t chunk_index, block_index, total_index; 345 int max_iterations, iteration; 346 rxbuf_index_info_t *bufinfo; 347 348 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp")); 349 350 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 351 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 352 pkt_buf_addr_pp, pktbufsz_type)); 353 354 #if defined(__i386) 355 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 356 #else 357 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 358 #endif 359 360 switch (pktbufsz_type) { 361 case 0: 362 bufsize = rbr_p->pkt_buf_size0; 363 break; 364 case 1: 365 bufsize = rbr_p->pkt_buf_size1; 366 break; 367 case 2: 368 bufsize = rbr_p->pkt_buf_size2; 369 break; 370 case RCR_SINGLE_BLOCK: 371 bufsize = 0; 372 anchor_index = 0; 373 break; 374 default: 375 return (HXGE_ERROR); 376 } 377 378 if (rbr_p->num_blocks == 1) { 379 anchor_index = 0; 380 ring_info = rbr_p->ring_info; 381 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 382 383 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 384 "==> hxge_rxbuf_pp_to_vp: (found, 1 block) " 385 "buf_pp $%p btype %d anchor_index %d bufinfo $%p", 386 pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo)); 387 388 goto found_index; 389 } 390 391 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 392 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d", 393 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 394 395 ring_info = rbr_p->ring_info; 396 found = B_FALSE; 397 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 398 iteration = 0; 399 max_iterations = ring_info->max_iterations; 400 401 /* 402 * First check if this block have been seen recently. This is indicated 403 * by a hint which is initialized when the first buffer of the block is 404 * seen. The hint is reset when the last buffer of the block has been 405 * processed. As three block sizes are supported, three hints are kept. 406 * The idea behind the hints is that once the hardware uses a block 407 * for a buffer of that size, it will use it exclusively for that size 408 * and will use it until it is exhausted. It is assumed that there 409 * would a single block being used for the same buffer sizes at any 410 * given time. 411 */ 412 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 413 anchor_index = ring_info->hint[pktbufsz_type]; 414 dvma_addr = bufinfo[anchor_index].dvma_addr; 415 chunk_size = bufinfo[anchor_index].buf_size; 416 if ((pktbuf_pp >= dvma_addr) && 417 (pktbuf_pp < (dvma_addr + chunk_size))) { 418 found = B_TRUE; 419 /* 420 * check if this is the last buffer in the block If so, 421 * then reset the hint for the size; 422 */ 423 424 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 425 ring_info->hint[pktbufsz_type] = NO_HINT; 426 } 427 } 428 429 if (found == B_FALSE) { 430 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 431 "==> hxge_rxbuf_pp_to_vp: (!found)" 432 "buf_pp $%p btype %d anchor_index %d", 433 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 434 435 /* 436 * This is the first buffer of the block of this size. Need to 437 * search the whole information array. the search algorithm 438 * uses a binary tree search algorithm. It assumes that the 439 * information is already sorted with increasing order info[0] 440 * < info[1] < info[2] .... < info[n-1] where n is the size of 441 * the information array 442 */ 443 r_index = rbr_p->num_blocks - 1; 444 l_index = 0; 445 search_done = B_FALSE; 446 anchor_index = MID_INDEX(r_index, l_index); 447 while (search_done == B_FALSE) { 448 if ((r_index == l_index) || 449 (iteration >= max_iterations)) 450 search_done = B_TRUE; 451 452 end_side = TO_RIGHT; /* to the right */ 453 base_side = TO_LEFT; /* to the left */ 454 /* read the DVMA address information and sort it */ 455 dvma_addr = bufinfo[anchor_index].dvma_addr; 456 chunk_size = bufinfo[anchor_index].buf_size; 457 458 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 459 "==> hxge_rxbuf_pp_to_vp: (searching)" 460 "buf_pp $%p btype %d " 461 "anchor_index %d chunk_size %d dvmaaddr $%p", 462 pkt_buf_addr_pp, pktbufsz_type, anchor_index, 463 chunk_size, dvma_addr)); 464 465 if (pktbuf_pp >= dvma_addr) 466 base_side = TO_RIGHT; /* to the right */ 467 if (pktbuf_pp < (dvma_addr + chunk_size)) 468 end_side = TO_LEFT; /* to the left */ 469 470 switch (base_side + end_side) { 471 case IN_MIDDLE: 472 /* found */ 473 found = B_TRUE; 474 search_done = B_TRUE; 475 if ((pktbuf_pp + bufsize) < 476 (dvma_addr + chunk_size)) 477 ring_info->hint[pktbufsz_type] = 478 bufinfo[anchor_index].buf_index; 479 break; 480 case BOTH_RIGHT: 481 /* not found: go to the right */ 482 l_index = anchor_index + 1; 483 anchor_index = MID_INDEX(r_index, l_index); 484 break; 485 486 case BOTH_LEFT: 487 /* not found: go to the left */ 488 r_index = anchor_index - 1; 489 anchor_index = MID_INDEX(r_index, l_index); 490 break; 491 default: /* should not come here */ 492 return (HXGE_ERROR); 493 } 494 iteration++; 495 } 496 497 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 498 "==> hxge_rxbuf_pp_to_vp: (search done)" 499 "buf_pp $%p btype %d anchor_index %d", 500 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 501 } 502 503 if (found == B_FALSE) { 504 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 505 "==> hxge_rxbuf_pp_to_vp: (search failed)" 506 "buf_pp $%p btype %d anchor_index %d", 507 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 508 return (HXGE_ERROR); 509 } 510 511 found_index: 512 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 513 "==> hxge_rxbuf_pp_to_vp: (FOUND1)" 514 "buf_pp $%p btype %d bufsize %d anchor_index %d", 515 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index)); 516 517 /* index of the first block in this chunk */ 518 chunk_index = bufinfo[anchor_index].start_index; 519 dvma_addr = bufinfo[anchor_index].dvma_addr; 520 page_size_mask = ring_info->block_size_mask; 521 522 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 523 "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 524 "buf_pp $%p btype %d bufsize %d " 525 "anchor_index %d chunk_index %d dvma $%p", 526 pkt_buf_addr_pp, pktbufsz_type, bufsize, 527 anchor_index, chunk_index, dvma_addr)); 528 529 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 530 block_size = rbr_p->block_size; /* System block(page) size */ 531 532 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 533 "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 534 "buf_pp $%p btype %d bufsize %d " 535 "anchor_index %d chunk_index %d dvma $%p " 536 "offset %d block_size %d", 537 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index, 538 chunk_index, dvma_addr, offset, block_size)); 539 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index")); 540 541 block_index = (offset / block_size); /* index within chunk */ 542 total_index = chunk_index + block_index; 543 544 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 545 "==> hxge_rxbuf_pp_to_vp: " 546 "total_index %d dvma_addr $%p " 547 "offset %d block_size %d " 548 "block_index %d ", 549 total_index, dvma_addr, offset, block_size, block_index)); 550 551 #if defined(__i386) 552 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 553 (uint32_t)offset); 554 #else 555 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 556 offset); 557 #endif 558 559 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 560 "==> hxge_rxbuf_pp_to_vp: " 561 "total_index %d dvma_addr $%p " 562 "offset %d block_size %d " 563 "block_index %d " 564 "*pkt_buf_addr_p $%p", 565 total_index, dvma_addr, offset, block_size, 566 block_index, *pkt_buf_addr_p)); 567 568 *msg_index = total_index; 569 *bufoffset = (offset & page_size_mask); 570 571 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 572 "==> hxge_rxbuf_pp_to_vp: get msg index: " 573 "msg_index %d bufoffset_index %d", 574 *msg_index, *bufoffset)); 575 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp")); 576 577 return (HXGE_OK); 578 } 579 580 581 /* 582 * used by quick sort (qsort) function 583 * to perform comparison 584 */ 585 static int 586 hxge_sort_compare(const void *p1, const void *p2) 587 { 588 589 rxbuf_index_info_t *a, *b; 590 591 a = (rxbuf_index_info_t *)p1; 592 b = (rxbuf_index_info_t *)p2; 593 594 if (a->dvma_addr > b->dvma_addr) 595 return (1); 596 if (a->dvma_addr < b->dvma_addr) 597 return (-1); 598 return (0); 599 } 600 601 /* 602 * Grabbed this sort implementation from common/syscall/avl.c 603 * 604 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 605 * v = Ptr to array/vector of objs 606 * n = # objs in the array 607 * s = size of each obj (must be multiples of a word size) 608 * f = ptr to function to compare two objs 609 * returns (-1 = less than, 0 = equal, 1 = greater than 610 */ 611 void 612 hxge_ksort(caddr_t v, int n, int s, int (*f) ()) 613 { 614 int g, i, j, ii; 615 unsigned int *p1, *p2; 616 unsigned int tmp; 617 618 /* No work to do */ 619 if (v == NULL || n <= 1) 620 return; 621 /* Sanity check on arguments */ 622 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 623 ASSERT(s > 0); 624 625 for (g = n / 2; g > 0; g /= 2) { 626 for (i = g; i < n; i++) { 627 for (j = i - g; j >= 0 && 628 (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) { 629 p1 = (unsigned *)(v + j * s); 630 p2 = (unsigned *)(v + (j + g) * s); 631 for (ii = 0; ii < s / 4; ii++) { 632 tmp = *p1; 633 *p1++ = *p2; 634 *p2++ = tmp; 635 } 636 } 637 } 638 } 639 } 640 641 /* 642 * Initialize data structures required for rxdma 643 * buffer dvma->vmem address lookup 644 */ 645 /*ARGSUSED*/ 646 static hxge_status_t 647 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp) 648 { 649 int index; 650 rxring_info_t *ring_info; 651 int max_iteration = 0, max_index = 0; 652 653 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init")); 654 655 ring_info = rbrp->ring_info; 656 ring_info->hint[0] = NO_HINT; 657 ring_info->hint[1] = NO_HINT; 658 ring_info->hint[2] = NO_HINT; 659 max_index = rbrp->num_blocks; 660 661 /* read the DVMA address information and sort it */ 662 /* do init of the information array */ 663 664 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 665 " hxge_rxbuf_index_info_init Sort ptrs")); 666 667 /* sort the array */ 668 hxge_ksort((void *) ring_info->buffer, max_index, 669 sizeof (rxbuf_index_info_t), hxge_sort_compare); 670 671 for (index = 0; index < max_index; index++) { 672 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 673 " hxge_rxbuf_index_info_init: sorted chunk %d " 674 " ioaddr $%p kaddr $%p size %x", 675 index, ring_info->buffer[index].dvma_addr, 676 ring_info->buffer[index].kaddr, 677 ring_info->buffer[index].buf_size)); 678 } 679 680 max_iteration = 0; 681 while (max_index >= (1ULL << max_iteration)) 682 max_iteration++; 683 ring_info->max_iterations = max_iteration + 1; 684 685 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 686 " hxge_rxbuf_index_info_init Find max iter %d", 687 ring_info->max_iterations)); 688 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init")); 689 690 return (HXGE_OK); 691 } 692 693 /*ARGSUSED*/ 694 void 695 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p) 696 { 697 #ifdef HXGE_DEBUG 698 699 uint32_t bptr; 700 uint64_t pp; 701 702 bptr = entry_p->bits.pkt_buf_addr; 703 704 HXGE_DEBUG_MSG((hxgep, RX_CTL, 705 "\trcr entry $%p " 706 "\trcr entry 0x%0llx " 707 "\trcr entry 0x%08x " 708 "\trcr entry 0x%08x " 709 "\tvalue 0x%0llx\n" 710 "\tmulti = %d\n" 711 "\tpkt_type = 0x%x\n" 712 "\terror = 0x%04x\n" 713 "\tl2_len = %d\n" 714 "\tpktbufsize = %d\n" 715 "\tpkt_buf_addr = $%p\n" 716 "\tpkt_buf_addr (<< 6) = $%p\n", 717 entry_p, 718 *(int64_t *)entry_p, 719 *(int32_t *)entry_p, 720 *(int32_t *)((char *)entry_p + 32), 721 entry_p->value, 722 entry_p->bits.multi, 723 entry_p->bits.pkt_type, 724 entry_p->bits.error, 725 entry_p->bits.l2_len, 726 entry_p->bits.pktbufsz, 727 bptr, 728 entry_p->bits.pkt_buf_addr_l)); 729 730 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 731 RCR_PKT_BUF_ADDR_SHIFT; 732 733 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 734 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 735 #endif 736 } 737 738 /*ARGSUSED*/ 739 void 740 hxge_rxdma_stop(p_hxge_t hxgep) 741 { 742 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop")); 743 744 (void) hxge_rx_vmac_disable(hxgep); 745 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 746 747 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop")); 748 } 749 750 void 751 hxge_rxdma_stop_reinit(p_hxge_t hxgep) 752 { 753 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit")); 754 755 (void) hxge_rxdma_stop(hxgep); 756 (void) hxge_uninit_rxdma_channels(hxgep); 757 (void) hxge_init_rxdma_channels(hxgep); 758 759 (void) hxge_rx_vmac_enable(hxgep); 760 761 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit")); 762 } 763 764 hxge_status_t 765 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 766 { 767 int i, ndmas; 768 uint16_t channel; 769 p_rx_rbr_rings_t rx_rbr_rings; 770 p_rx_rbr_ring_t *rbr_rings; 771 hpi_handle_t handle; 772 hpi_status_t rs = HPI_SUCCESS; 773 hxge_status_t status = HXGE_OK; 774 775 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 776 "==> hxge_rxdma_hw_mode: mode %d", enable)); 777 778 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 779 HXGE_DEBUG_MSG((hxgep, RX_CTL, 780 "<== hxge_rxdma_mode: not initialized")); 781 return (HXGE_ERROR); 782 } 783 784 rx_rbr_rings = hxgep->rx_rbr_rings; 785 if (rx_rbr_rings == NULL) { 786 HXGE_DEBUG_MSG((hxgep, RX_CTL, 787 "<== hxge_rxdma_mode: NULL ring pointer")); 788 return (HXGE_ERROR); 789 } 790 791 if (rx_rbr_rings->rbr_rings == NULL) { 792 HXGE_DEBUG_MSG((hxgep, RX_CTL, 793 "<== hxge_rxdma_mode: NULL rbr rings pointer")); 794 return (HXGE_ERROR); 795 } 796 797 ndmas = rx_rbr_rings->ndmas; 798 if (!ndmas) { 799 HXGE_DEBUG_MSG((hxgep, RX_CTL, 800 "<== hxge_rxdma_mode: no channel")); 801 return (HXGE_ERROR); 802 } 803 804 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 805 "==> hxge_rxdma_mode (ndmas %d)", ndmas)); 806 807 rbr_rings = rx_rbr_rings->rbr_rings; 808 809 handle = HXGE_DEV_HPI_HANDLE(hxgep); 810 811 for (i = 0; i < ndmas; i++) { 812 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 813 continue; 814 } 815 channel = rbr_rings[i]->rdc; 816 if (enable) { 817 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 818 "==> hxge_rxdma_hw_mode: channel %d (enable)", 819 channel)); 820 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 821 } else { 822 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 823 "==> hxge_rxdma_hw_mode: channel %d (disable)", 824 channel)); 825 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 826 } 827 } 828 829 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 830 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 831 "<== hxge_rxdma_hw_mode: status 0x%x", status)); 832 833 return (status); 834 } 835 836 int 837 hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel) 838 { 839 int i, ndmas; 840 uint16_t rdc; 841 p_rx_rbr_rings_t rx_rbr_rings; 842 p_rx_rbr_ring_t *rbr_rings; 843 844 HXGE_DEBUG_MSG((hxgep, RX_CTL, 845 "==> hxge_rxdma_get_ring_index: channel %d", channel)); 846 847 rx_rbr_rings = hxgep->rx_rbr_rings; 848 if (rx_rbr_rings == NULL) { 849 HXGE_DEBUG_MSG((hxgep, RX_CTL, 850 "<== hxge_rxdma_get_ring_index: NULL ring pointer")); 851 return (-1); 852 } 853 854 ndmas = rx_rbr_rings->ndmas; 855 if (!ndmas) { 856 HXGE_DEBUG_MSG((hxgep, RX_CTL, 857 "<== hxge_rxdma_get_ring_index: no channel")); 858 return (-1); 859 } 860 861 HXGE_DEBUG_MSG((hxgep, RX_CTL, 862 "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 863 864 rbr_rings = rx_rbr_rings->rbr_rings; 865 for (i = 0; i < ndmas; i++) { 866 rdc = rbr_rings[i]->rdc; 867 if (channel == rdc) { 868 HXGE_DEBUG_MSG((hxgep, RX_CTL, 869 "==> hxge_rxdma_get_rbr_ring: " 870 "channel %d (index %d) " 871 "ring %d", channel, i, rbr_rings[i])); 872 873 return (i); 874 } 875 } 876 877 HXGE_DEBUG_MSG((hxgep, RX_CTL, 878 "<== hxge_rxdma_get_rbr_ring_index: not found")); 879 880 return (-1); 881 } 882 883 /* 884 * Static functions start here. 885 */ 886 static p_rx_msg_t 887 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p) 888 { 889 p_rx_msg_t hxge_mp = NULL; 890 p_hxge_dma_common_t dmamsg_p; 891 uchar_t *buffer; 892 893 hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 894 if (hxge_mp == NULL) { 895 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 896 "Allocation of a rx msg failed.")); 897 goto hxge_allocb_exit; 898 } 899 900 hxge_mp->use_buf_pool = B_FALSE; 901 if (dmabuf_p) { 902 hxge_mp->use_buf_pool = B_TRUE; 903 904 dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma; 905 *dmamsg_p = *dmabuf_p; 906 dmamsg_p->nblocks = 1; 907 dmamsg_p->block_size = size; 908 dmamsg_p->alength = size; 909 buffer = (uchar_t *)dmabuf_p->kaddrp; 910 911 dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size); 912 dmabuf_p->ioaddr_pp = (void *) 913 ((char *)dmabuf_p->ioaddr_pp + size); 914 915 dmabuf_p->alength -= size; 916 dmabuf_p->offset += size; 917 dmabuf_p->dma_cookie.dmac_laddress += size; 918 dmabuf_p->dma_cookie.dmac_size -= size; 919 } else { 920 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 921 if (buffer == NULL) { 922 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 923 "Allocation of a receive page failed.")); 924 goto hxge_allocb_fail1; 925 } 926 } 927 928 hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb); 929 if (hxge_mp->rx_mblk_p == NULL) { 930 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed.")); 931 goto hxge_allocb_fail2; 932 } 933 hxge_mp->buffer = buffer; 934 hxge_mp->block_size = size; 935 hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb; 936 hxge_mp->freeb.free_arg = (caddr_t)hxge_mp; 937 hxge_mp->ref_cnt = 1; 938 hxge_mp->free = B_TRUE; 939 hxge_mp->rx_use_bcopy = B_FALSE; 940 941 atomic_inc_32(&hxge_mblks_pending); 942 943 goto hxge_allocb_exit; 944 945 hxge_allocb_fail2: 946 if (!hxge_mp->use_buf_pool) { 947 KMEM_FREE(buffer, size); 948 } 949 hxge_allocb_fail1: 950 KMEM_FREE(hxge_mp, sizeof (rx_msg_t)); 951 hxge_mp = NULL; 952 953 hxge_allocb_exit: 954 return (hxge_mp); 955 } 956 957 p_mblk_t 958 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 959 { 960 p_mblk_t mp; 961 962 HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb")); 963 HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p " 964 "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size)); 965 966 mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb); 967 if (mp == NULL) { 968 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 969 goto hxge_dupb_exit; 970 } 971 972 atomic_inc_32(&hxge_mp->ref_cnt); 973 974 hxge_dupb_exit: 975 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 976 return (mp); 977 } 978 979 p_mblk_t 980 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 981 { 982 p_mblk_t mp; 983 uchar_t *dp; 984 985 mp = allocb(size + HXGE_RXBUF_EXTRA, 0); 986 if (mp == NULL) { 987 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 988 goto hxge_dupb_bcopy_exit; 989 } 990 dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA; 991 bcopy((void *) &hxge_mp->buffer[offset], dp, size); 992 mp->b_wptr = dp + size; 993 994 hxge_dupb_bcopy_exit: 995 996 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 997 998 return (mp); 999 } 1000 1001 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, 1002 p_rx_msg_t rx_msg_p); 1003 1004 void 1005 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1006 { 1007 1008 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page")); 1009 1010 /* Reuse this buffer */ 1011 rx_msg_p->free = B_FALSE; 1012 rx_msg_p->cur_usage_cnt = 0; 1013 rx_msg_p->max_usage_cnt = 0; 1014 rx_msg_p->pkt_buf_size = 0; 1015 1016 if (rx_rbr_p->rbr_use_bcopy) { 1017 rx_msg_p->rx_use_bcopy = B_FALSE; 1018 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1019 } 1020 1021 /* 1022 * Get the rbr header pointer and its offset index. 1023 */ 1024 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1025 rx_rbr_p->rbr_wrap_mask); 1026 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1027 1028 /* 1029 * Accumulate some buffers in the ring before re-enabling the 1030 * DMA channel, if rbr empty was signaled. 1031 */ 1032 hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1); 1033 if (rx_rbr_p->rbr_is_empty && 1034 rx_rbr_p->rbr_consumed < rx_rbr_p->rbb_max / 16) { 1035 hxge_rbr_empty_restore(hxgep, rx_rbr_p); 1036 } 1037 1038 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1039 "<== hxge_post_page (channel %d post_next_index %d)", 1040 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1041 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page")); 1042 } 1043 1044 void 1045 hxge_freeb(p_rx_msg_t rx_msg_p) 1046 { 1047 size_t size; 1048 uchar_t *buffer = NULL; 1049 int ref_cnt; 1050 boolean_t free_state = B_FALSE; 1051 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1052 1053 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb")); 1054 HXGE_DEBUG_MSG((NULL, MEM2_CTL, 1055 "hxge_freeb:rx_msg_p = $%p (block pending %d)", 1056 rx_msg_p, hxge_mblks_pending)); 1057 1058 if (ring == NULL) 1059 return; 1060 1061 /* 1062 * This is to prevent posting activities while we are recovering 1063 * from fatal errors. This should not be a performance drag since 1064 * ref_cnt != 0 most times. 1065 */ 1066 if (ring->rbr_state == RBR_POSTING) 1067 MUTEX_ENTER(&ring->post_lock); 1068 1069 /* 1070 * First we need to get the free state, then 1071 * atomic decrement the reference count to prevent 1072 * the race condition with the interrupt thread that 1073 * is processing a loaned up buffer block. 1074 */ 1075 free_state = rx_msg_p->free; 1076 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1077 if (!ref_cnt) { 1078 atomic_dec_32(&hxge_mblks_pending); 1079 1080 buffer = rx_msg_p->buffer; 1081 size = rx_msg_p->block_size; 1082 1083 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: " 1084 "will free: rx_msg_p = $%p (block pending %d)", 1085 rx_msg_p, hxge_mblks_pending)); 1086 1087 if (!rx_msg_p->use_buf_pool) { 1088 KMEM_FREE(buffer, size); 1089 } 1090 1091 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1092 /* 1093 * Decrement the receive buffer ring's reference 1094 * count, too. 1095 */ 1096 atomic_dec_32(&ring->rbr_ref_cnt); 1097 1098 /* 1099 * Free the receive buffer ring, iff 1100 * 1. all the receive buffers have been freed 1101 * 2. and we are in the proper state (that is, 1102 * we are not UNMAPPING). 1103 */ 1104 if (ring->rbr_ref_cnt == 0 && 1105 ring->rbr_state == RBR_UNMAPPED) { 1106 KMEM_FREE(ring, sizeof (*ring)); 1107 /* post_lock has been destroyed already */ 1108 return; 1109 } 1110 } 1111 1112 /* 1113 * Repost buffer. 1114 */ 1115 if (free_state && (ref_cnt == 1)) { 1116 HXGE_DEBUG_MSG((NULL, RX_CTL, 1117 "hxge_freeb: post page $%p:", rx_msg_p)); 1118 if (ring->rbr_state == RBR_POSTING) 1119 hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p); 1120 } 1121 1122 if (ring->rbr_state == RBR_POSTING) 1123 MUTEX_EXIT(&ring->post_lock); 1124 1125 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb")); 1126 } 1127 1128 uint_t 1129 hxge_rx_intr(caddr_t arg1, caddr_t arg2) 1130 { 1131 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 1132 p_hxge_t hxgep = (p_hxge_t)arg2; 1133 p_hxge_ldg_t ldgp; 1134 uint8_t channel; 1135 hpi_handle_t handle; 1136 rdc_stat_t cs; 1137 uint16_t nptrs = 0, npkts = 0; 1138 uint_t serviced = DDI_INTR_UNCLAIMED; 1139 1140 if (ldvp == NULL) { 1141 HXGE_DEBUG_MSG((NULL, RX_INT_CTL, 1142 "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1143 return (DDI_INTR_UNCLAIMED); 1144 } 1145 1146 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 1147 hxgep = ldvp->hxgep; 1148 } 1149 1150 /* 1151 * If the interface is not started, just swallow the interrupt 1152 * for the logical device and don't rearm it. 1153 */ 1154 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) 1155 return (DDI_INTR_CLAIMED); 1156 1157 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1158 "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1159 1160 /* 1161 * This interrupt handler is for a specific receive dma channel. 1162 */ 1163 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1164 1165 /* 1166 * Get the control and status for this channel. 1167 */ 1168 channel = ldvp->channel; 1169 ldgp = ldvp->ldgp; 1170 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value); 1171 cs.bits.ptrread = 0; 1172 cs.bits.pktread = 0; 1173 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1174 1175 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d " 1176 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1177 channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres)); 1178 1179 hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs, 1180 &nptrs, &npkts); 1181 serviced = DDI_INTR_CLAIMED; 1182 1183 /* error events. */ 1184 if (cs.value & RDC_STAT_ERROR) { 1185 (void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 1186 } 1187 1188 hxge_intr_exit: 1189 /* 1190 * Enable the mailbox update interrupt if we want to use mailbox. We 1191 * probably don't need to use mailbox as it only saves us one pio read. 1192 * Also write 1 to rcrthres and rcrto to clear these two edge triggered 1193 * bits. 1194 */ 1195 cs.value &= RDC_STAT_WR1C; 1196 cs.bits.mex = 1; 1197 cs.bits.ptrread = nptrs; 1198 cs.bits.pktread = (npkts > 1) ? (npkts - 1) : 0; 1199 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1200 1201 hpi_rxdma_rdc_rcr_flush(handle, channel); 1202 1203 /* 1204 * Rearm this logical group if this is a single device group. 1205 */ 1206 if (ldgp->nldvs == 1) { 1207 ld_intr_mgmt_t mgm; 1208 1209 mgm.value = 0; 1210 mgm.bits.arm = 1; 1211 mgm.bits.timer = ldgp->ldg_timer; 1212 HXGE_REG_WR32(handle, 1213 LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value); 1214 } 1215 1216 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1217 "<== hxge_rx_intr: serviced %d", serviced)); 1218 1219 return (serviced); 1220 } 1221 1222 static void 1223 hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1224 rdc_stat_t cs, uint16_t *nptrs, uint16_t *npkts) 1225 { 1226 p_mblk_t mp; 1227 p_rx_rcr_ring_t rcrp; 1228 1229 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring")); 1230 if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs, 1231 nptrs, npkts)) == NULL) { 1232 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1233 "<== hxge_rx_pkts_vring: no mp")); 1234 return; 1235 } 1236 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp)); 1237 1238 #ifdef HXGE_DEBUG 1239 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1240 "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) " 1241 "LEN %d mp $%p mp->b_next $%p rcrp $%p", 1242 (mp->b_wptr - mp->b_rptr), mp, mp->b_next, rcrp)); 1243 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1244 "==> hxge_rx_pkts_vring: dump packets " 1245 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1246 mp, mp->b_rptr, mp->b_wptr, 1247 hxge_dump_packet((char *)mp->b_rptr, 64))); 1248 1249 if (mp->b_cont) { 1250 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1251 "==> hxge_rx_pkts_vring: dump b_cont packets " 1252 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1253 mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr, 1254 hxge_dump_packet((char *)mp->b_cont->b_rptr, 1255 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1256 } 1257 if (mp->b_next) { 1258 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1259 "==> hxge_rx_pkts_vring: dump next packets " 1260 "(b_rptr $%p): %s", 1261 mp->b_next->b_rptr, 1262 hxge_dump_packet((char *)mp->b_next->b_rptr, 64))); 1263 } 1264 #endif 1265 1266 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1267 "==> hxge_rx_pkts_vring: send packet to stack")); 1268 mac_rx(hxgep->mach, NULL, mp); 1269 1270 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring")); 1271 } 1272 1273 /*ARGSUSED*/ 1274 mblk_t * 1275 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1276 p_rx_rcr_ring_t *rcrp, rdc_stat_t cs, uint16_t *nptrs, uint16_t *npkts) 1277 { 1278 hpi_handle_t handle; 1279 uint8_t channel; 1280 p_rx_rcr_rings_t rx_rcr_rings; 1281 p_rx_rcr_ring_t rcr_p; 1282 uint32_t comp_rd_index; 1283 p_rcr_entry_t rcr_desc_rd_head_p; 1284 p_rcr_entry_t rcr_desc_rd_head_pp; 1285 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1286 uint16_t qlen, nrcr_read, npkt_read; 1287 uint32_t qlen_hw, qlen_sw; 1288 uint32_t invalid_rcr_entry; 1289 boolean_t multi; 1290 rdc_rcr_cfg_b_t rcr_cfg_b; 1291 p_rx_mbox_t rx_mboxp; 1292 p_rxdma_mailbox_t mboxp; 1293 uint64_t rcr_head_index, rcr_tail_index; 1294 uint64_t rcr_tail; 1295 rdc_rcr_tail_t rcr_tail_reg; 1296 p_hxge_rx_ring_stats_t rdc_stats; 1297 1298 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d " 1299 "channel %d", vindex, ldvp->channel)); 1300 1301 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 1302 return (NULL); 1303 } 1304 1305 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1306 rx_rcr_rings = hxgep->rx_rcr_rings; 1307 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1308 channel = rcr_p->rdc; 1309 if (channel != ldvp->channel) { 1310 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d " 1311 "channel %d, and rcr channel %d not matched.", 1312 vindex, ldvp->channel, channel)); 1313 return (NULL); 1314 } 1315 1316 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1317 "==> hxge_rx_pkts: START: rcr channel %d " 1318 "head_p $%p head_pp $%p index %d ", 1319 channel, rcr_p->rcr_desc_rd_head_p, 1320 rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1321 1322 rx_mboxp = hxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 1323 mboxp = (p_rxdma_mailbox_t)rx_mboxp->rx_mbox.kaddrp; 1324 1325 (void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1326 RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value); 1327 rcr_tail = rcr_tail_reg.bits.tail; 1328 1329 if (!qlen) { 1330 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1331 "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)", 1332 channel, qlen)); 1333 return (NULL); 1334 } 1335 1336 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d " 1337 "qlen %d", channel, qlen)); 1338 1339 comp_rd_index = rcr_p->comp_rd_index; 1340 1341 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1342 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1343 nrcr_read = npkt_read = 0; 1344 1345 /* 1346 * Number of packets queued (The jumbo or multi packet will be counted 1347 * as only one paccket and it may take up more than one completion 1348 * entry). 1349 */ 1350 qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts; 1351 head_mp = NULL; 1352 tail_mp = &head_mp; 1353 nmp = mp_cont = NULL; 1354 multi = B_FALSE; 1355 1356 rcr_head_index = rcr_p->rcr_desc_rd_head_p - rcr_p->rcr_desc_first_p; 1357 rcr_tail_index = rcr_tail - rcr_p->rcr_tail_begin; 1358 1359 if (rcr_tail_index >= rcr_head_index) { 1360 qlen_sw = rcr_tail_index - rcr_head_index; 1361 } else { 1362 /* rcr_tail has wrapped around */ 1363 qlen_sw = (rcr_p->comp_size - rcr_head_index) + rcr_tail_index; 1364 } 1365 1366 if (qlen_hw > qlen_sw) { 1367 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1368 "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n", 1369 channel, qlen_hw, qlen_sw)); 1370 qlen_hw = qlen_sw; 1371 } 1372 1373 while (qlen_hw) { 1374 #ifdef HXGE_DEBUG 1375 hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p); 1376 #endif 1377 /* 1378 * Process one completion ring entry. 1379 */ 1380 invalid_rcr_entry = 0; 1381 hxge_receive_packet(hxgep, 1382 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont, 1383 &invalid_rcr_entry); 1384 if (invalid_rcr_entry != 0) { 1385 rdc_stats = rcr_p->rdc_stats; 1386 rdc_stats->rcr_invalids++; 1387 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1388 "Channel %d could only read 0x%x packets, " 1389 "but 0x%x pending\n", channel, npkt_read, qlen_hw)); 1390 break; 1391 } 1392 1393 /* 1394 * message chaining modes (nemo msg chaining) 1395 */ 1396 if (nmp) { 1397 nmp->b_next = NULL; 1398 if (!multi && !mp_cont) { /* frame fits a partition */ 1399 *tail_mp = nmp; 1400 tail_mp = &nmp->b_next; 1401 nmp = NULL; 1402 } else if (multi && !mp_cont) { /* first segment */ 1403 *tail_mp = nmp; 1404 tail_mp = &nmp->b_cont; 1405 } else if (multi && mp_cont) { /* mid of multi segs */ 1406 *tail_mp = mp_cont; 1407 tail_mp = &mp_cont->b_cont; 1408 } else if (!multi && mp_cont) { /* last segment */ 1409 *tail_mp = mp_cont; 1410 tail_mp = &nmp->b_next; 1411 nmp = NULL; 1412 } 1413 } 1414 1415 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1416 "==> hxge_rx_pkts: loop: rcr channel %d " 1417 "before updating: multi %d " 1418 "nrcr_read %d " 1419 "npk read %d " 1420 "head_pp $%p index %d ", 1421 channel, multi, 1422 nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index)); 1423 1424 if (!multi) { 1425 qlen_hw--; 1426 npkt_read++; 1427 } 1428 1429 /* 1430 * Update the next read entry. 1431 */ 1432 comp_rd_index = NEXT_ENTRY(comp_rd_index, 1433 rcr_p->comp_wrap_mask); 1434 1435 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 1436 rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p); 1437 1438 nrcr_read++; 1439 1440 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1441 "<== hxge_rx_pkts: (SAM, process one packet) " 1442 "nrcr_read %d", nrcr_read)); 1443 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1444 "==> hxge_rx_pkts: loop: rcr channel %d " 1445 "multi %d nrcr_read %d npk read %d head_pp $%p index %d ", 1446 channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1447 comp_rd_index)); 1448 } 1449 1450 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 1451 rcr_p->comp_rd_index = comp_rd_index; 1452 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 1453 1454 /* Adjust the mailbox queue length for a hardware bug workaround */ 1455 mboxp->rcrstat_a.bits.qlen -= npkt_read; 1456 1457 if ((hxgep->intr_timeout != rcr_p->intr_timeout) || 1458 (hxgep->intr_threshold != rcr_p->intr_threshold)) { 1459 rcr_p->intr_timeout = hxgep->intr_timeout; 1460 rcr_p->intr_threshold = hxgep->intr_threshold; 1461 rcr_cfg_b.value = 0x0ULL; 1462 if (rcr_p->intr_timeout) 1463 rcr_cfg_b.bits.entout = 1; 1464 rcr_cfg_b.bits.timeout = rcr_p->intr_timeout; 1465 rcr_cfg_b.bits.pthres = rcr_p->intr_threshold; 1466 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, 1467 channel, rcr_cfg_b.value); 1468 } 1469 1470 *nptrs = nrcr_read; 1471 *npkts = npkt_read; 1472 1473 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1474 "==> hxge_rx_pkts: EXIT: rcr channel %d " 1475 "head_pp $%p index %016llx ", 1476 channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1477 1478 /* 1479 * Update RCR buffer pointer read and number of packets read. 1480 */ 1481 1482 *rcrp = rcr_p; 1483 1484 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts")); 1485 1486 return (head_mp); 1487 } 1488 1489 #define RCR_ENTRY_PATTERN 0x5a5a6b6b7c7c8d8dULL 1490 #define NO_PORT_BIT 0x20 1491 1492 /*ARGSUSED*/ 1493 void 1494 hxge_receive_packet(p_hxge_t hxgep, 1495 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 1496 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont, 1497 uint32_t *invalid_rcr_entry) 1498 { 1499 p_mblk_t nmp = NULL; 1500 uint64_t multi; 1501 uint8_t channel; 1502 1503 boolean_t first_entry = B_TRUE; 1504 boolean_t buffer_free = B_FALSE; 1505 boolean_t error_send_up = B_FALSE; 1506 uint8_t error_type; 1507 uint16_t l2_len; 1508 uint16_t skip_len; 1509 uint8_t pktbufsz_type; 1510 uint64_t rcr_entry; 1511 uint64_t *pkt_buf_addr_pp; 1512 uint64_t *pkt_buf_addr_p; 1513 uint32_t buf_offset; 1514 uint32_t bsize; 1515 uint32_t msg_index; 1516 p_rx_rbr_ring_t rx_rbr_p; 1517 p_rx_msg_t *rx_msg_ring_p; 1518 p_rx_msg_t rx_msg_p; 1519 1520 uint16_t sw_offset_bytes = 0, hdr_size = 0; 1521 hxge_status_t status = HXGE_OK; 1522 boolean_t is_valid = B_FALSE; 1523 p_hxge_rx_ring_stats_t rdc_stats; 1524 uint32_t bytes_read; 1525 uint8_t header = 0; 1526 1527 channel = rcr_p->rdc; 1528 1529 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet")); 1530 1531 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 1532 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 1533 1534 /* Verify the content of the rcr_entry for a hardware bug workaround */ 1535 if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) { 1536 *invalid_rcr_entry = 1; 1537 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet " 1538 "Channel %d invalid RCR entry 0x%llx found, returning\n", 1539 channel, (long long) rcr_entry)); 1540 return; 1541 } 1542 *((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN; 1543 1544 multi = (rcr_entry & RCR_MULTI_MASK); 1545 1546 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 1547 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 1548 1549 /* 1550 * Hardware does not strip the CRC due bug ID 11451 where 1551 * the hardware mis handles minimum size packets. 1552 */ 1553 l2_len -= ETHERFCSL; 1554 1555 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 1556 RCR_PKTBUFSZ_SHIFT); 1557 #if defined(__i386) 1558 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 1559 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 1560 #else 1561 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 1562 RCR_PKT_BUF_ADDR_SHIFT); 1563 #endif 1564 1565 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1566 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1567 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1568 "error_type 0x%x pktbufsz_type %d ", 1569 rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len, 1570 multi, error_type, pktbufsz_type)); 1571 1572 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1573 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1574 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1575 "error_type 0x%x ", rcr_desc_rd_head_p, 1576 rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type)); 1577 1578 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1579 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1580 "full pkt_buf_addr_pp $%p l2_len %d", 1581 rcr_entry, pkt_buf_addr_pp, l2_len)); 1582 1583 /* get the stats ptr */ 1584 rdc_stats = rcr_p->rdc_stats; 1585 1586 if (!l2_len) { 1587 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1588 "<== hxge_receive_packet: failed: l2 length is 0.")); 1589 return; 1590 } 1591 1592 /* shift 6 bits to get the full io address */ 1593 #if defined(__i386) 1594 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 1595 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1596 #else 1597 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 1598 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1599 #endif 1600 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1601 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1602 "full pkt_buf_addr_pp $%p l2_len %d", 1603 rcr_entry, pkt_buf_addr_pp, l2_len)); 1604 1605 rx_rbr_p = rcr_p->rx_rbr_p; 1606 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 1607 1608 if (first_entry) { 1609 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 1610 RXDMA_HDR_SIZE_DEFAULT); 1611 1612 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1613 "==> hxge_receive_packet: first entry 0x%016llx " 1614 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 1615 rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size)); 1616 } 1617 1618 MUTEX_ENTER(&rcr_p->lock); 1619 MUTEX_ENTER(&rx_rbr_p->lock); 1620 1621 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1622 "==> (rbr 1) hxge_receive_packet: entry 0x%0llx " 1623 "full pkt_buf_addr_pp $%p l2_len %d", 1624 rcr_entry, pkt_buf_addr_pp, l2_len)); 1625 1626 /* 1627 * Packet buffer address in the completion entry points to the starting 1628 * buffer address (offset 0). Use the starting buffer address to locate 1629 * the corresponding kernel address. 1630 */ 1631 status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p, 1632 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 1633 &buf_offset, &msg_index); 1634 1635 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1636 "==> (rbr 2) hxge_receive_packet: entry 0x%0llx " 1637 "full pkt_buf_addr_pp $%p l2_len %d", 1638 rcr_entry, pkt_buf_addr_pp, l2_len)); 1639 1640 if (status != HXGE_OK) { 1641 MUTEX_EXIT(&rx_rbr_p->lock); 1642 MUTEX_EXIT(&rcr_p->lock); 1643 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1644 "<== hxge_receive_packet: found vaddr failed %d", status)); 1645 return; 1646 } 1647 1648 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1649 "==> (rbr 3) hxge_receive_packet: entry 0x%0llx " 1650 "full pkt_buf_addr_pp $%p l2_len %d", 1651 rcr_entry, pkt_buf_addr_pp, l2_len)); 1652 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1653 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1654 "full pkt_buf_addr_pp $%p l2_len %d", 1655 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1656 1657 if (msg_index >= rx_rbr_p->tnblocks) { 1658 MUTEX_EXIT(&rx_rbr_p->lock); 1659 MUTEX_EXIT(&rcr_p->lock); 1660 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1661 "==> hxge_receive_packet: FATAL msg_index (%d) " 1662 "should be smaller than tnblocks (%d)\n", 1663 msg_index, rx_rbr_p->tnblocks)); 1664 return; 1665 } 1666 1667 rx_msg_p = rx_msg_ring_p[msg_index]; 1668 1669 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1670 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1671 "full pkt_buf_addr_pp $%p l2_len %d", 1672 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1673 1674 switch (pktbufsz_type) { 1675 case RCR_PKTBUFSZ_0: 1676 bsize = rx_rbr_p->pkt_buf_size0_bytes; 1677 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1678 "==> hxge_receive_packet: 0 buf %d", bsize)); 1679 break; 1680 case RCR_PKTBUFSZ_1: 1681 bsize = rx_rbr_p->pkt_buf_size1_bytes; 1682 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1683 "==> hxge_receive_packet: 1 buf %d", bsize)); 1684 break; 1685 case RCR_PKTBUFSZ_2: 1686 bsize = rx_rbr_p->pkt_buf_size2_bytes; 1687 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1688 "==> hxge_receive_packet: 2 buf %d", bsize)); 1689 break; 1690 case RCR_SINGLE_BLOCK: 1691 bsize = rx_msg_p->block_size; 1692 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1693 "==> hxge_receive_packet: single %d", bsize)); 1694 1695 break; 1696 default: 1697 MUTEX_EXIT(&rx_rbr_p->lock); 1698 MUTEX_EXIT(&rcr_p->lock); 1699 return; 1700 } 1701 1702 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 1703 (buf_offset + sw_offset_bytes), (hdr_size + l2_len), 1704 DDI_DMA_SYNC_FORCPU); 1705 1706 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1707 "==> hxge_receive_packet: after first dump:usage count")); 1708 1709 if (rx_msg_p->cur_usage_cnt == 0) { 1710 if (rx_rbr_p->rbr_use_bcopy) { 1711 atomic_inc_32(&rx_rbr_p->rbr_consumed); 1712 if (rx_rbr_p->rbr_consumed > 1713 rx_rbr_p->rbr_threshold_hi) { 1714 rx_msg_p->rx_use_bcopy = B_TRUE; 1715 } 1716 } 1717 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1718 "==> hxge_receive_packet: buf %d (new block) ", bsize)); 1719 1720 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 1721 rx_msg_p->pkt_buf_size = bsize; 1722 rx_msg_p->cur_usage_cnt = 1; 1723 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 1724 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1725 "==> hxge_receive_packet: buf %d (single block) ", 1726 bsize)); 1727 /* 1728 * Buffer can be reused once the free function is 1729 * called. 1730 */ 1731 rx_msg_p->max_usage_cnt = 1; 1732 buffer_free = B_TRUE; 1733 } else { 1734 rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize; 1735 if (rx_msg_p->max_usage_cnt == 1) { 1736 buffer_free = B_TRUE; 1737 } 1738 } 1739 } else { 1740 rx_msg_p->cur_usage_cnt++; 1741 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 1742 buffer_free = B_TRUE; 1743 } 1744 } 1745 1746 if (rx_msg_p->rx_use_bcopy) { 1747 rdc_stats->pkt_drop++; 1748 atomic_inc_32(&rx_msg_p->ref_cnt); 1749 if (buffer_free == B_TRUE) { 1750 rx_msg_p->free = B_TRUE; 1751 } 1752 1753 MUTEX_EXIT(&rx_rbr_p->lock); 1754 MUTEX_EXIT(&rcr_p->lock); 1755 hxge_freeb(rx_msg_p); 1756 return; 1757 } 1758 1759 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1760 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 1761 msg_index, l2_len, 1762 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 1763 1764 if (error_type) { 1765 rdc_stats->ierrors++; 1766 /* Update error stats */ 1767 rdc_stats->errlog.compl_err_type = error_type; 1768 HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR); 1769 1770 if (error_type & RCR_CTRL_FIFO_DED) { 1771 rdc_stats->ctrl_fifo_ecc_err++; 1772 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1773 " hxge_receive_packet: " 1774 " channel %d RCR ctrl_fifo_ded error", channel)); 1775 } else if (error_type & RCR_DATA_FIFO_DED) { 1776 rdc_stats->data_fifo_ecc_err++; 1777 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1778 " hxge_receive_packet: channel %d" 1779 " RCR data_fifo_ded error", channel)); 1780 } 1781 1782 /* 1783 * Update and repost buffer block if max usage count is 1784 * reached. 1785 */ 1786 if (error_send_up == B_FALSE) { 1787 atomic_inc_32(&rx_msg_p->ref_cnt); 1788 if (buffer_free == B_TRUE) { 1789 rx_msg_p->free = B_TRUE; 1790 } 1791 1792 MUTEX_EXIT(&rx_rbr_p->lock); 1793 MUTEX_EXIT(&rcr_p->lock); 1794 hxge_freeb(rx_msg_p); 1795 return; 1796 } 1797 } 1798 1799 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1800 "==> hxge_receive_packet: DMA sync second ")); 1801 1802 bytes_read = rcr_p->rcvd_pkt_bytes; 1803 skip_len = sw_offset_bytes + hdr_size; 1804 1805 if (first_entry) { 1806 header = rx_msg_p->buffer[buf_offset]; 1807 } 1808 1809 if (!rx_msg_p->rx_use_bcopy) { 1810 /* 1811 * For loaned up buffers, the driver reference count 1812 * will be incremented first and then the free state. 1813 */ 1814 if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 1815 if (first_entry) { 1816 nmp->b_rptr = &nmp->b_rptr[skip_len]; 1817 if (l2_len < bsize - skip_len) { 1818 nmp->b_wptr = &nmp->b_rptr[l2_len]; 1819 } else { 1820 nmp->b_wptr = &nmp->b_rptr[bsize 1821 - skip_len]; 1822 } 1823 } else { 1824 if (l2_len - bytes_read < bsize) { 1825 nmp->b_wptr = 1826 &nmp->b_rptr[l2_len - bytes_read]; 1827 } else { 1828 nmp->b_wptr = &nmp->b_rptr[bsize]; 1829 } 1830 } 1831 } 1832 } else { 1833 if (first_entry) { 1834 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 1835 l2_len < bsize - skip_len ? 1836 l2_len : bsize - skip_len); 1837 } else { 1838 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset, 1839 l2_len - bytes_read < bsize ? 1840 l2_len - bytes_read : bsize); 1841 } 1842 } 1843 1844 if (nmp != NULL) { 1845 if (first_entry) 1846 bytes_read = nmp->b_wptr - nmp->b_rptr; 1847 else 1848 bytes_read += nmp->b_wptr - nmp->b_rptr; 1849 1850 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1851 "==> hxge_receive_packet after dupb: " 1852 "rbr consumed %d " 1853 "pktbufsz_type %d " 1854 "nmp $%p rptr $%p wptr $%p " 1855 "buf_offset %d bzise %d l2_len %d skip_len %d", 1856 rx_rbr_p->rbr_consumed, 1857 pktbufsz_type, 1858 nmp, nmp->b_rptr, nmp->b_wptr, 1859 buf_offset, bsize, l2_len, skip_len)); 1860 } else { 1861 cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)"); 1862 1863 atomic_inc_32(&rx_msg_p->ref_cnt); 1864 if (buffer_free == B_TRUE) { 1865 rx_msg_p->free = B_TRUE; 1866 } 1867 1868 MUTEX_EXIT(&rx_rbr_p->lock); 1869 MUTEX_EXIT(&rcr_p->lock); 1870 hxge_freeb(rx_msg_p); 1871 return; 1872 } 1873 1874 if (buffer_free == B_TRUE) { 1875 rx_msg_p->free = B_TRUE; 1876 } 1877 1878 /* 1879 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a 1880 * packet is not fragmented and no error bit is set, then L4 checksum 1881 * is OK. 1882 */ 1883 is_valid = (nmp != NULL); 1884 if (first_entry) { 1885 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 1886 if (l2_len > (STD_FRAME_SIZE - ETHERFCSL)) 1887 rdc_stats->jumbo_pkts++; 1888 rdc_stats->ibytes += skip_len + l2_len < bsize ? 1889 l2_len : bsize; 1890 } else { 1891 /* 1892 * Add the current portion of the packet to the kstats. 1893 * The current portion of the packet is calculated by using 1894 * length of the packet and the previously received portion. 1895 */ 1896 rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ? 1897 l2_len - rcr_p->rcvd_pkt_bytes : bsize; 1898 } 1899 1900 rcr_p->rcvd_pkt_bytes = bytes_read; 1901 1902 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 1903 atomic_inc_32(&rx_msg_p->ref_cnt); 1904 MUTEX_EXIT(&rx_rbr_p->lock); 1905 MUTEX_EXIT(&rcr_p->lock); 1906 hxge_freeb(rx_msg_p); 1907 } else { 1908 MUTEX_EXIT(&rx_rbr_p->lock); 1909 MUTEX_EXIT(&rcr_p->lock); 1910 } 1911 1912 if (is_valid) { 1913 nmp->b_cont = NULL; 1914 if (first_entry) { 1915 *mp = nmp; 1916 *mp_cont = NULL; 1917 } else { 1918 *mp_cont = nmp; 1919 } 1920 } 1921 1922 /* 1923 * Update stats and hardware checksuming. 1924 */ 1925 if (is_valid && !multi) { 1926 if (!(header & NO_PORT_BIT) && !error_type) { 1927 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 1928 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 1929 1930 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1931 "==> hxge_receive_packet: Full tcp/udp cksum " 1932 "is_valid 0x%x multi %d error %d", 1933 is_valid, multi, error_type)); 1934 } 1935 } 1936 1937 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1938 "==> hxge_receive_packet: *mp 0x%016llx", *mp)); 1939 1940 *multi_p = (multi == RCR_MULTI_MASK); 1941 1942 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: " 1943 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 1944 *multi_p, nmp, *mp, *mp_cont)); 1945 } 1946 1947 static void 1948 hxge_rx_rbr_empty_recover(p_hxge_t hxgep, uint8_t channel) 1949 { 1950 hpi_handle_t handle; 1951 p_rx_rcr_ring_t rcrp; 1952 p_rx_rbr_ring_t rbrp; 1953 1954 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 1955 rbrp = rcrp->rx_rbr_p; 1956 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1957 1958 /* 1959 * Wait for the channel to be quiet 1960 */ 1961 (void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel); 1962 1963 /* 1964 * Post page will accumulate some buffers before re-enabling 1965 * the DMA channel. 1966 */ 1967 MUTEX_ENTER(&rbrp->post_lock); 1968 if (rbrp->rbr_consumed < rbrp->rbb_max / 32) { 1969 hxge_rbr_empty_restore(hxgep, rbrp); 1970 } else { 1971 rbrp->rbr_is_empty = B_TRUE; 1972 } 1973 MUTEX_EXIT(&rbrp->post_lock); 1974 } 1975 1976 /*ARGSUSED*/ 1977 static hxge_status_t 1978 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 1979 rdc_stat_t cs) 1980 { 1981 p_hxge_rx_ring_stats_t rdc_stats; 1982 hpi_handle_t handle; 1983 boolean_t rxchan_fatal = B_FALSE; 1984 uint8_t channel; 1985 hxge_status_t status = HXGE_OK; 1986 uint64_t cs_val; 1987 1988 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts")); 1989 1990 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1991 channel = ldvp->channel; 1992 1993 /* Clear the interrupts */ 1994 cs.bits.pktread = 0; 1995 cs.bits.ptrread = 0; 1996 cs_val = cs.value & RDC_STAT_WR1C; 1997 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val); 1998 1999 rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index]; 2000 2001 if (cs.bits.rbr_cpl_to) { 2002 rdc_stats->rbr_tmout++; 2003 HXGE_FM_REPORT_ERROR(hxgep, channel, 2004 HXGE_FM_EREPORT_RDMC_RBR_CPL_TO); 2005 rxchan_fatal = B_TRUE; 2006 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2007 "==> hxge_rx_err_evnts(channel %d): " 2008 "fatal error: rx_rbr_timeout", channel)); 2009 } 2010 2011 if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) { 2012 (void) hpi_rxdma_ring_perr_stat_get(handle, 2013 &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par); 2014 } 2015 2016 if (cs.bits.rcr_shadow_par_err) { 2017 rdc_stats->rcr_sha_par++; 2018 HXGE_FM_REPORT_ERROR(hxgep, channel, 2019 HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2020 rxchan_fatal = B_TRUE; 2021 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2022 "==> hxge_rx_err_evnts(channel %d): " 2023 "fatal error: rcr_shadow_par_err", channel)); 2024 } 2025 2026 if (cs.bits.rbr_prefetch_par_err) { 2027 rdc_stats->rbr_pre_par++; 2028 HXGE_FM_REPORT_ERROR(hxgep, channel, 2029 HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2030 rxchan_fatal = B_TRUE; 2031 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2032 "==> hxge_rx_err_evnts(channel %d): " 2033 "fatal error: rbr_prefetch_par_err", channel)); 2034 } 2035 2036 if (cs.bits.rbr_pre_empty) { 2037 rdc_stats->rbr_pre_empty++; 2038 HXGE_FM_REPORT_ERROR(hxgep, channel, 2039 HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY); 2040 rxchan_fatal = B_TRUE; 2041 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2042 "==> hxge_rx_err_evnts(channel %d): " 2043 "fatal error: rbr_pre_empty", channel)); 2044 } 2045 2046 if (cs.bits.peu_resp_err) { 2047 rdc_stats->peu_resp_err++; 2048 HXGE_FM_REPORT_ERROR(hxgep, channel, 2049 HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR); 2050 rxchan_fatal = B_TRUE; 2051 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2052 "==> hxge_rx_err_evnts(channel %d): " 2053 "fatal error: peu_resp_err", channel)); 2054 } 2055 2056 if (cs.bits.rcr_thres) { 2057 rdc_stats->rcr_thres++; 2058 } 2059 2060 if (cs.bits.rcr_to) { 2061 rdc_stats->rcr_to++; 2062 } 2063 2064 if (cs.bits.rcr_shadow_full) { 2065 rdc_stats->rcr_shadow_full++; 2066 HXGE_FM_REPORT_ERROR(hxgep, channel, 2067 HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL); 2068 rxchan_fatal = B_TRUE; 2069 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2070 "==> hxge_rx_err_evnts(channel %d): " 2071 "fatal error: rcr_shadow_full", channel)); 2072 } 2073 2074 if (cs.bits.rcr_full) { 2075 rdc_stats->rcrfull++; 2076 HXGE_FM_REPORT_ERROR(hxgep, channel, 2077 HXGE_FM_EREPORT_RDMC_RCRFULL); 2078 rxchan_fatal = B_TRUE; 2079 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2080 "==> hxge_rx_err_evnts(channel %d): " 2081 "fatal error: rcrfull error", channel)); 2082 } 2083 2084 if (cs.bits.rbr_empty) { 2085 rdc_stats->rbr_empty++; 2086 hxge_rx_rbr_empty_recover(hxgep, channel); 2087 } 2088 2089 if (cs.bits.rbr_full) { 2090 rdc_stats->rbrfull++; 2091 HXGE_FM_REPORT_ERROR(hxgep, channel, 2092 HXGE_FM_EREPORT_RDMC_RBRFULL); 2093 rxchan_fatal = B_TRUE; 2094 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2095 "==> hxge_rx_err_evnts(channel %d): " 2096 "fatal error: rbr_full error", channel)); 2097 } 2098 2099 if (rxchan_fatal) { 2100 p_rx_rcr_ring_t rcrp; 2101 p_rx_rbr_ring_t rbrp; 2102 2103 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 2104 rbrp = rcrp->rx_rbr_p; 2105 2106 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2107 " hxge_rx_err_evnts: fatal error on Channel #%d\n", 2108 channel)); 2109 MUTEX_ENTER(&rbrp->post_lock); 2110 /* This function needs to be inside the post_lock */ 2111 status = hxge_rxdma_fatal_err_recover(hxgep, channel); 2112 MUTEX_EXIT(&rbrp->post_lock); 2113 if (status == HXGE_OK) { 2114 FM_SERVICE_RESTORED(hxgep); 2115 } 2116 } 2117 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts")); 2118 2119 return (status); 2120 } 2121 2122 static hxge_status_t 2123 hxge_map_rxdma(p_hxge_t hxgep) 2124 { 2125 int i, ndmas; 2126 uint16_t channel; 2127 p_rx_rbr_rings_t rx_rbr_rings; 2128 p_rx_rbr_ring_t *rbr_rings; 2129 p_rx_rcr_rings_t rx_rcr_rings; 2130 p_rx_rcr_ring_t *rcr_rings; 2131 p_rx_mbox_areas_t rx_mbox_areas_p; 2132 p_rx_mbox_t *rx_mbox_p; 2133 p_hxge_dma_pool_t dma_buf_poolp; 2134 p_hxge_dma_common_t *dma_buf_p; 2135 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2136 p_hxge_dma_common_t *dma_rbr_cntl_p; 2137 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2138 p_hxge_dma_common_t *dma_rcr_cntl_p; 2139 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2140 p_hxge_dma_common_t *dma_mbox_cntl_p; 2141 uint32_t *num_chunks; 2142 hxge_status_t status = HXGE_OK; 2143 2144 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma")); 2145 2146 dma_buf_poolp = hxgep->rx_buf_pool_p; 2147 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2148 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2149 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2150 2151 if (!dma_buf_poolp->buf_allocated || 2152 !dma_rbr_cntl_poolp->buf_allocated || 2153 !dma_rcr_cntl_poolp->buf_allocated || 2154 !dma_mbox_cntl_poolp->buf_allocated) { 2155 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2156 "<== hxge_map_rxdma: buf not allocated")); 2157 return (HXGE_ERROR); 2158 } 2159 2160 ndmas = dma_buf_poolp->ndmas; 2161 if (!ndmas) { 2162 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2163 "<== hxge_map_rxdma: no dma allocated")); 2164 return (HXGE_ERROR); 2165 } 2166 2167 num_chunks = dma_buf_poolp->num_chunks; 2168 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2169 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p; 2170 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p; 2171 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p; 2172 2173 rx_rbr_rings = (p_rx_rbr_rings_t) 2174 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2175 rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC( 2176 sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2177 2178 rx_rcr_rings = (p_rx_rcr_rings_t) 2179 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2180 rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC( 2181 sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2182 2183 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2184 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2185 rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC( 2186 sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2187 2188 /* 2189 * Timeout should be set based on the system clock divider. 2190 * The following timeout value of 1 assumes that the 2191 * granularity (1000) is 3 microseconds running at 300MHz. 2192 */ 2193 2194 hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2195 hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2196 2197 /* 2198 * Map descriptors from the buffer polls for each dam channel. 2199 */ 2200 for (i = 0; i < ndmas; i++) { 2201 /* 2202 * Set up and prepare buffer blocks, descriptors and mailbox. 2203 */ 2204 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2205 status = hxge_map_rxdma_channel(hxgep, channel, 2206 (p_hxge_dma_common_t *)&dma_buf_p[i], 2207 (p_rx_rbr_ring_t *)&rbr_rings[i], 2208 num_chunks[i], 2209 (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i], 2210 (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i], 2211 (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i], 2212 (p_rx_rcr_ring_t *)&rcr_rings[i], 2213 (p_rx_mbox_t *)&rx_mbox_p[i]); 2214 if (status != HXGE_OK) { 2215 goto hxge_map_rxdma_fail1; 2216 } 2217 rbr_rings[i]->index = (uint16_t)i; 2218 rcr_rings[i]->index = (uint16_t)i; 2219 rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i]; 2220 } 2221 2222 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2223 rx_rbr_rings->rbr_rings = rbr_rings; 2224 hxgep->rx_rbr_rings = rx_rbr_rings; 2225 rx_rcr_rings->rcr_rings = rcr_rings; 2226 hxgep->rx_rcr_rings = rx_rcr_rings; 2227 2228 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2229 hxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2230 2231 goto hxge_map_rxdma_exit; 2232 2233 hxge_map_rxdma_fail1: 2234 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2235 "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)", 2236 status, channel, i)); 2237 i--; 2238 for (; i >= 0; i--) { 2239 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2240 hxge_unmap_rxdma_channel(hxgep, channel, 2241 rbr_rings[i], rcr_rings[i], rx_mbox_p[i]); 2242 } 2243 2244 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2245 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2246 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2247 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2248 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2249 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2250 2251 hxge_map_rxdma_exit: 2252 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2253 "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 2254 2255 return (status); 2256 } 2257 2258 static void 2259 hxge_unmap_rxdma(p_hxge_t hxgep) 2260 { 2261 int i, ndmas; 2262 uint16_t channel; 2263 p_rx_rbr_rings_t rx_rbr_rings; 2264 p_rx_rbr_ring_t *rbr_rings; 2265 p_rx_rcr_rings_t rx_rcr_rings; 2266 p_rx_rcr_ring_t *rcr_rings; 2267 p_rx_mbox_areas_t rx_mbox_areas_p; 2268 p_rx_mbox_t *rx_mbox_p; 2269 p_hxge_dma_pool_t dma_buf_poolp; 2270 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2271 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2272 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2273 p_hxge_dma_common_t *dma_buf_p; 2274 2275 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma")); 2276 2277 dma_buf_poolp = hxgep->rx_buf_pool_p; 2278 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2279 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2280 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2281 2282 if (!dma_buf_poolp->buf_allocated || 2283 !dma_rbr_cntl_poolp->buf_allocated || 2284 !dma_rcr_cntl_poolp->buf_allocated || 2285 !dma_mbox_cntl_poolp->buf_allocated) { 2286 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2287 "<== hxge_unmap_rxdma: NULL buf pointers")); 2288 return; 2289 } 2290 2291 rx_rbr_rings = hxgep->rx_rbr_rings; 2292 rx_rcr_rings = hxgep->rx_rcr_rings; 2293 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2294 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2295 "<== hxge_unmap_rxdma: NULL pointers")); 2296 return; 2297 } 2298 2299 ndmas = rx_rbr_rings->ndmas; 2300 if (!ndmas) { 2301 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2302 "<== hxge_unmap_rxdma: no channel")); 2303 return; 2304 } 2305 2306 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2307 "==> hxge_unmap_rxdma (ndmas %d)", ndmas)); 2308 2309 rbr_rings = rx_rbr_rings->rbr_rings; 2310 rcr_rings = rx_rcr_rings->rcr_rings; 2311 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 2312 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 2313 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2314 2315 for (i = 0; i < ndmas; i++) { 2316 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2317 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2318 "==> hxge_unmap_rxdma (ndmas %d) channel %d", 2319 ndmas, channel)); 2320 (void) hxge_unmap_rxdma_channel(hxgep, channel, 2321 (p_rx_rbr_ring_t)rbr_rings[i], 2322 (p_rx_rcr_ring_t)rcr_rings[i], 2323 (p_rx_mbox_t)rx_mbox_p[i]); 2324 } 2325 2326 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2327 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2328 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2329 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2330 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2331 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2332 2333 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma")); 2334 } 2335 2336 hxge_status_t 2337 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2338 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 2339 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 2340 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 2341 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2342 { 2343 int status = HXGE_OK; 2344 2345 /* 2346 * Set up and prepare buffer blocks, descriptors and mailbox. 2347 */ 2348 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2349 "==> hxge_map_rxdma_channel (channel %d)", channel)); 2350 2351 /* 2352 * Receive buffer blocks 2353 */ 2354 status = hxge_map_rxdma_channel_buf_ring(hxgep, channel, 2355 dma_buf_p, rbr_p, num_chunks); 2356 if (status != HXGE_OK) { 2357 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2358 "==> hxge_map_rxdma_channel (channel %d): " 2359 "map buffer failed 0x%x", channel, status)); 2360 goto hxge_map_rxdma_channel_exit; 2361 } 2362 2363 /* 2364 * Receive block ring, completion ring and mailbox. 2365 */ 2366 status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel, 2367 dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p, 2368 rbr_p, rcr_p, rx_mbox_p); 2369 if (status != HXGE_OK) { 2370 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2371 "==> hxge_map_rxdma_channel (channel %d): " 2372 "map config failed 0x%x", channel, status)); 2373 goto hxge_map_rxdma_channel_fail2; 2374 } 2375 goto hxge_map_rxdma_channel_exit; 2376 2377 hxge_map_rxdma_channel_fail3: 2378 /* Free rbr, rcr */ 2379 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2380 "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)", 2381 status, channel)); 2382 hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p); 2383 2384 hxge_map_rxdma_channel_fail2: 2385 /* Free buffer blocks */ 2386 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2387 "==> hxge_map_rxdma_channel: free rx buffers" 2388 "(hxgep 0x%x status 0x%x channel %d)", 2389 hxgep, status, channel)); 2390 hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p); 2391 2392 status = HXGE_ERROR; 2393 2394 hxge_map_rxdma_channel_exit: 2395 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2396 "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)", 2397 hxgep, status, channel)); 2398 2399 return (status); 2400 } 2401 2402 /*ARGSUSED*/ 2403 static void 2404 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2405 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2406 { 2407 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2408 "==> hxge_unmap_rxdma_channel (channel %d)", channel)); 2409 2410 /* 2411 * unmap receive block ring, completion ring and mailbox. 2412 */ 2413 (void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p); 2414 2415 /* unmap buffer blocks */ 2416 (void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p); 2417 2418 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel")); 2419 } 2420 2421 /*ARGSUSED*/ 2422 static hxge_status_t 2423 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 2424 p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p, 2425 p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p, 2426 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2427 { 2428 p_rx_rbr_ring_t rbrp; 2429 p_rx_rcr_ring_t rcrp; 2430 p_rx_mbox_t mboxp; 2431 p_hxge_dma_common_t cntl_dmap; 2432 p_hxge_dma_common_t dmap; 2433 p_rx_msg_t *rx_msg_ring; 2434 p_rx_msg_t rx_msg_p; 2435 rdc_rbr_cfg_a_t *rcfga_p; 2436 rdc_rbr_cfg_b_t *rcfgb_p; 2437 rdc_rcr_cfg_a_t *cfga_p; 2438 rdc_rcr_cfg_b_t *cfgb_p; 2439 rdc_rx_cfg1_t *cfig1_p; 2440 rdc_rx_cfg2_t *cfig2_p; 2441 rdc_rbr_kick_t *kick_p; 2442 uint32_t dmaaddrp; 2443 uint32_t *rbr_vaddrp; 2444 uint32_t bkaddr; 2445 hxge_status_t status = HXGE_OK; 2446 int i; 2447 uint32_t hxge_port_rcr_size; 2448 2449 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2450 "==> hxge_map_rxdma_channel_cfg_ring")); 2451 2452 cntl_dmap = *dma_rbr_cntl_p; 2453 2454 /* 2455 * Map in the receive block ring 2456 */ 2457 rbrp = *rbr_p; 2458 dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc; 2459 hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 2460 2461 /* 2462 * Zero out buffer block ring descriptors. 2463 */ 2464 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2465 2466 rcfga_p = &(rbrp->rbr_cfga); 2467 rcfgb_p = &(rbrp->rbr_cfgb); 2468 kick_p = &(rbrp->rbr_kick); 2469 rcfga_p->value = 0; 2470 rcfgb_p->value = 0; 2471 kick_p->value = 0; 2472 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 2473 rcfga_p->value = (rbrp->rbr_addr & 2474 (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK)); 2475 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 2476 2477 /* XXXX: how to choose packet buffer sizes */ 2478 rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0; 2479 rcfgb_p->bits.vld0 = 1; 2480 rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1; 2481 rcfgb_p->bits.vld1 = 1; 2482 rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2; 2483 rcfgb_p->bits.vld2 = 1; 2484 rcfgb_p->bits.bksize = hxgep->rx_bksize_code; 2485 2486 /* 2487 * For each buffer block, enter receive block address to the ring. 2488 */ 2489 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 2490 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 2491 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2492 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2493 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 2494 2495 rx_msg_ring = rbrp->rx_msg_ring; 2496 for (i = 0; i < rbrp->tnblocks; i++) { 2497 rx_msg_p = rx_msg_ring[i]; 2498 rx_msg_p->hxgep = hxgep; 2499 rx_msg_p->rx_rbr_p = rbrp; 2500 bkaddr = (uint32_t) 2501 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2502 RBR_BKADDR_SHIFT)); 2503 rx_msg_p->free = B_FALSE; 2504 rx_msg_p->max_usage_cnt = 0xbaddcafe; 2505 2506 *rbr_vaddrp++ = bkaddr; 2507 } 2508 2509 kick_p->bits.bkadd = rbrp->rbb_max; 2510 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 2511 2512 rbrp->rbr_rd_index = 0; 2513 2514 rbrp->rbr_consumed = 0; 2515 rbrp->rbr_use_bcopy = B_TRUE; 2516 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 2517 2518 /* 2519 * Do bcopy on packets greater than bcopy size once the lo threshold is 2520 * reached. This lo threshold should be less than the hi threshold. 2521 * 2522 * Do bcopy on every packet once the hi threshold is reached. 2523 */ 2524 if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) { 2525 /* default it to use hi */ 2526 hxge_rx_threshold_lo = hxge_rx_threshold_hi; 2527 } 2528 if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) { 2529 hxge_rx_buf_size_type = HXGE_RBR_TYPE2; 2530 } 2531 rbrp->rbr_bufsize_type = hxge_rx_buf_size_type; 2532 2533 switch (hxge_rx_threshold_hi) { 2534 default: 2535 case HXGE_RX_COPY_NONE: 2536 /* Do not do bcopy at all */ 2537 rbrp->rbr_use_bcopy = B_FALSE; 2538 rbrp->rbr_threshold_hi = rbrp->rbb_max; 2539 break; 2540 2541 case HXGE_RX_COPY_1: 2542 case HXGE_RX_COPY_2: 2543 case HXGE_RX_COPY_3: 2544 case HXGE_RX_COPY_4: 2545 case HXGE_RX_COPY_5: 2546 case HXGE_RX_COPY_6: 2547 case HXGE_RX_COPY_7: 2548 rbrp->rbr_threshold_hi = 2549 rbrp->rbb_max * (hxge_rx_threshold_hi) / 2550 HXGE_RX_BCOPY_SCALE; 2551 break; 2552 2553 case HXGE_RX_COPY_ALL: 2554 rbrp->rbr_threshold_hi = 0; 2555 break; 2556 } 2557 2558 switch (hxge_rx_threshold_lo) { 2559 default: 2560 case HXGE_RX_COPY_NONE: 2561 /* Do not do bcopy at all */ 2562 if (rbrp->rbr_use_bcopy) { 2563 rbrp->rbr_use_bcopy = B_FALSE; 2564 } 2565 rbrp->rbr_threshold_lo = rbrp->rbb_max; 2566 break; 2567 2568 case HXGE_RX_COPY_1: 2569 case HXGE_RX_COPY_2: 2570 case HXGE_RX_COPY_3: 2571 case HXGE_RX_COPY_4: 2572 case HXGE_RX_COPY_5: 2573 case HXGE_RX_COPY_6: 2574 case HXGE_RX_COPY_7: 2575 rbrp->rbr_threshold_lo = 2576 rbrp->rbb_max * (hxge_rx_threshold_lo) / 2577 HXGE_RX_BCOPY_SCALE; 2578 break; 2579 2580 case HXGE_RX_COPY_ALL: 2581 rbrp->rbr_threshold_lo = 0; 2582 break; 2583 } 2584 2585 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2586 "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d " 2587 "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d " 2588 "rbb_threshold_lo %d", 2589 dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type, 2590 rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo)); 2591 2592 /* Map in the receive completion ring */ 2593 rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 2594 rcrp->rdc = dma_channel; 2595 rcrp->hxgep = hxgep; 2596 2597 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 2598 rcrp->comp_size = hxge_port_rcr_size; 2599 rcrp->comp_wrap_mask = hxge_port_rcr_size - 1; 2600 2601 rcrp->max_receive_pkts = hxge_max_rx_pkts; 2602 2603 cntl_dmap = *dma_rcr_cntl_p; 2604 2605 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 2606 hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 2607 sizeof (rcr_entry_t)); 2608 rcrp->comp_rd_index = 0; 2609 rcrp->comp_wt_index = 0; 2610 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 2611 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 2612 #if defined(__i386) 2613 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2614 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2615 #else 2616 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2617 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2618 #endif 2619 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 2620 (hxge_port_rcr_size - 1); 2621 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 2622 (hxge_port_rcr_size - 1); 2623 2624 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 2625 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 2626 2627 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2628 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2629 "rbr_vaddrp $%p rcr_desc_rd_head_p $%p " 2630 "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p " 2631 "rcr_desc_rd_last_pp $%p ", 2632 dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p, 2633 rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p, 2634 rcrp->rcr_desc_last_pp)); 2635 2636 /* 2637 * Zero out buffer block ring descriptors. 2638 */ 2639 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2640 rcrp->intr_timeout = hxgep->intr_timeout; 2641 rcrp->intr_threshold = hxgep->intr_threshold; 2642 rcrp->full_hdr_flag = B_FALSE; 2643 rcrp->sw_priv_hdr_len = 0; 2644 2645 cfga_p = &(rcrp->rcr_cfga); 2646 cfgb_p = &(rcrp->rcr_cfgb); 2647 cfga_p->value = 0; 2648 cfgb_p->value = 0; 2649 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 2650 2651 cfga_p->value = (rcrp->rcr_addr & 2652 (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK)); 2653 2654 cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF); 2655 2656 /* 2657 * Timeout should be set based on the system clock divider. The 2658 * following timeout value of 1 assumes that the granularity (1000) is 2659 * 3 microseconds running at 300MHz. 2660 */ 2661 cfgb_p->bits.pthres = rcrp->intr_threshold; 2662 cfgb_p->bits.timeout = rcrp->intr_timeout; 2663 cfgb_p->bits.entout = 1; 2664 2665 /* Map in the mailbox */ 2666 cntl_dmap = *dma_mbox_cntl_p; 2667 mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 2668 dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox; 2669 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 2670 cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1; 2671 cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2; 2672 cfig1_p->value = cfig2_p->value = 0; 2673 2674 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 2675 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2676 "==> hxge_map_rxdma_channel_cfg_ring: " 2677 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 2678 dma_channel, cfig1_p->value, cfig2_p->value, 2679 mboxp->mbox_addr)); 2680 2681 dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff); 2682 cfig1_p->bits.mbaddr_h = dmaaddrp; 2683 2684 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 2685 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 2686 RXDMA_CFIG2_MBADDR_L_MASK); 2687 2688 cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 2689 2690 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2691 "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p " 2692 "cfg1 0x%016llx cfig2 0x%016llx", 2693 dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value)); 2694 2695 cfig2_p->bits.full_hdr = rcrp->full_hdr_flag; 2696 cfig2_p->bits.offset = rcrp->sw_priv_hdr_len; 2697 2698 rbrp->rx_rcr_p = rcrp; 2699 rcrp->rx_rbr_p = rbrp; 2700 *rcr_p = rcrp; 2701 *rx_mbox_p = mboxp; 2702 2703 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2704 "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 2705 return (status); 2706 } 2707 2708 /*ARGSUSED*/ 2709 static void 2710 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 2711 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2712 { 2713 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2714 "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc)); 2715 2716 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 2717 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 2718 2719 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2720 "<== hxge_unmap_rxdma_channel_cfg_ring")); 2721 } 2722 2723 static hxge_status_t 2724 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 2725 p_hxge_dma_common_t *dma_buf_p, 2726 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 2727 { 2728 p_rx_rbr_ring_t rbrp; 2729 p_hxge_dma_common_t dma_bufp, tmp_bufp; 2730 p_rx_msg_t *rx_msg_ring; 2731 p_rx_msg_t rx_msg_p; 2732 p_mblk_t mblk_p; 2733 2734 rxring_info_t *ring_info; 2735 hxge_status_t status = HXGE_OK; 2736 int i, j, index; 2737 uint32_t size, bsize, nblocks, nmsgs; 2738 2739 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2740 "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel)); 2741 2742 dma_bufp = tmp_bufp = *dma_buf_p; 2743 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2744 " hxge_map_rxdma_channel_buf_ring: channel %d to map %d " 2745 "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp)); 2746 2747 nmsgs = 0; 2748 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2749 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2750 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2751 "bufp 0x%016llx nblocks %d nmsgs %d", 2752 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2753 nmsgs += tmp_bufp->nblocks; 2754 } 2755 if (!nmsgs) { 2756 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2757 "<== hxge_map_rxdma_channel_buf_ring: channel %d " 2758 "no msg blocks", channel)); 2759 status = HXGE_ERROR; 2760 goto hxge_map_rxdma_channel_buf_ring_exit; 2761 } 2762 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 2763 2764 size = nmsgs * sizeof (p_rx_msg_t); 2765 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2766 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 2767 KM_SLEEP); 2768 2769 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 2770 (void *) hxgep->interrupt_cookie); 2771 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 2772 (void *) hxgep->interrupt_cookie); 2773 2774 rbrp->rdc = channel; 2775 rbrp->num_blocks = num_chunks; 2776 rbrp->tnblocks = nmsgs; 2777 rbrp->rbb_max = nmsgs; 2778 rbrp->rbr_max_size = nmsgs; 2779 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 2780 2781 /* 2782 * Buffer sizes suggested by NIU architect. 256, 512 and 2K. 2783 */ 2784 2785 switch (hxgep->rx_bksize_code) { 2786 case RBR_BKSIZE_4K: 2787 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 2788 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 2789 rbrp->hpi_pkt_buf_size0 = SIZE_256B; 2790 break; 2791 case RBR_BKSIZE_8K: 2792 /* Use 512 to avoid possible rcr_full condition */ 2793 rbrp->pkt_buf_size0 = RBR_BUFSZ0_512B; 2794 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_512_BYTES; 2795 rbrp->hpi_pkt_buf_size0 = SIZE_512B; 2796 break; 2797 } 2798 2799 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 2800 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 2801 rbrp->hpi_pkt_buf_size1 = SIZE_1KB; 2802 2803 rbrp->block_size = hxgep->rx_default_block_size; 2804 2805 if (!hxgep->param_arr[param_accept_jumbo].value) { 2806 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 2807 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 2808 rbrp->hpi_pkt_buf_size2 = SIZE_2KB; 2809 } else { 2810 rbrp->hpi_pkt_buf_size2 = SIZE_4KB; 2811 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 2812 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 2813 } 2814 2815 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2816 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2817 "actual rbr max %d rbb_max %d nmsgs %d " 2818 "rbrp->block_size %d default_block_size %d " 2819 "(config hxge_rbr_size %d hxge_rbr_spare_size %d)", 2820 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 2821 rbrp->block_size, hxgep->rx_default_block_size, 2822 hxge_rbr_size, hxge_rbr_spare_size)); 2823 2824 /* 2825 * Map in buffers from the buffer pool. 2826 * Note that num_blocks is the num_chunks. For Sparc, there is likely 2827 * only one chunk. For x86, there will be many chunks. 2828 * Loop over chunks. 2829 */ 2830 index = 0; 2831 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 2832 bsize = dma_bufp->block_size; 2833 nblocks = dma_bufp->nblocks; 2834 #if defined(__i386) 2835 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 2836 #else 2837 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 2838 #endif 2839 ring_info->buffer[i].buf_index = i; 2840 ring_info->buffer[i].buf_size = dma_bufp->alength; 2841 ring_info->buffer[i].start_index = index; 2842 #if defined(__i386) 2843 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 2844 #else 2845 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 2846 #endif 2847 2848 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2849 " hxge_map_rxdma_channel_buf_ring: map channel %d " 2850 "chunk %d nblocks %d chunk_size %x block_size 0x%x " 2851 "dma_bufp $%p dvma_addr $%p", channel, i, 2852 dma_bufp->nblocks, 2853 ring_info->buffer[i].buf_size, bsize, dma_bufp, 2854 ring_info->buffer[i].dvma_addr)); 2855 2856 /* loop over blocks within a chunk */ 2857 for (j = 0; j < nblocks; j++) { 2858 if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO, 2859 dma_bufp)) == NULL) { 2860 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2861 "allocb failed (index %d i %d j %d)", 2862 index, i, j)); 2863 goto hxge_map_rxdma_channel_buf_ring_fail1; 2864 } 2865 rx_msg_ring[index] = rx_msg_p; 2866 rx_msg_p->block_index = index; 2867 rx_msg_p->shifted_addr = (uint32_t) 2868 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2869 RBR_BKADDR_SHIFT)); 2870 /* 2871 * Too much output 2872 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2873 * "index %d j %d rx_msg_p $%p mblk %p", 2874 * index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 2875 */ 2876 mblk_p = rx_msg_p->rx_mblk_p; 2877 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 2878 2879 rbrp->rbr_ref_cnt++; 2880 index++; 2881 rx_msg_p->buf_dma.dma_channel = channel; 2882 } 2883 } 2884 if (i < rbrp->num_blocks) { 2885 goto hxge_map_rxdma_channel_buf_ring_fail1; 2886 } 2887 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2888 "hxge_map_rxdma_channel_buf_ring: done buf init " 2889 "channel %d msg block entries %d", channel, index)); 2890 ring_info->block_size_mask = bsize - 1; 2891 rbrp->rx_msg_ring = rx_msg_ring; 2892 rbrp->dma_bufp = dma_buf_p; 2893 rbrp->ring_info = ring_info; 2894 2895 status = hxge_rxbuf_index_info_init(hxgep, rbrp); 2896 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: " 2897 "channel %d done buf info init", channel)); 2898 2899 /* 2900 * Finally, permit hxge_freeb() to call hxge_post_page(). 2901 */ 2902 rbrp->rbr_state = RBR_POSTING; 2903 2904 *rbr_p = rbrp; 2905 2906 goto hxge_map_rxdma_channel_buf_ring_exit; 2907 2908 hxge_map_rxdma_channel_buf_ring_fail1: 2909 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2910 " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 2911 channel, status)); 2912 2913 index--; 2914 for (; index >= 0; index--) { 2915 rx_msg_p = rx_msg_ring[index]; 2916 if (rx_msg_p != NULL) { 2917 freeb(rx_msg_p->rx_mblk_p); 2918 rx_msg_ring[index] = NULL; 2919 } 2920 } 2921 2922 hxge_map_rxdma_channel_buf_ring_fail: 2923 MUTEX_DESTROY(&rbrp->post_lock); 2924 MUTEX_DESTROY(&rbrp->lock); 2925 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2926 KMEM_FREE(rx_msg_ring, size); 2927 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 2928 2929 status = HXGE_ERROR; 2930 2931 hxge_map_rxdma_channel_buf_ring_exit: 2932 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2933 "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 2934 2935 return (status); 2936 } 2937 2938 /*ARGSUSED*/ 2939 static void 2940 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 2941 p_rx_rbr_ring_t rbr_p) 2942 { 2943 p_rx_msg_t *rx_msg_ring; 2944 p_rx_msg_t rx_msg_p; 2945 rxring_info_t *ring_info; 2946 int i; 2947 uint32_t size; 2948 2949 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2950 "==> hxge_unmap_rxdma_channel_buf_ring")); 2951 if (rbr_p == NULL) { 2952 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2953 "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 2954 return; 2955 } 2956 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2957 "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc)); 2958 2959 rx_msg_ring = rbr_p->rx_msg_ring; 2960 ring_info = rbr_p->ring_info; 2961 2962 if (rx_msg_ring == NULL || ring_info == NULL) { 2963 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2964 "<== hxge_unmap_rxdma_channel_buf_ring: " 2965 "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info)); 2966 return; 2967 } 2968 2969 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 2970 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2971 " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 2972 "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks, 2973 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 2974 2975 for (i = 0; i < rbr_p->tnblocks; i++) { 2976 rx_msg_p = rx_msg_ring[i]; 2977 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2978 " hxge_unmap_rxdma_channel_buf_ring: " 2979 "rx_msg_p $%p", rx_msg_p)); 2980 if (rx_msg_p != NULL) { 2981 freeb(rx_msg_p->rx_mblk_p); 2982 rx_msg_ring[i] = NULL; 2983 } 2984 } 2985 2986 /* 2987 * We no longer may use the mutex <post_lock>. By setting 2988 * <rbr_state> to anything but POSTING, we prevent 2989 * hxge_post_page() from accessing a dead mutex. 2990 */ 2991 rbr_p->rbr_state = RBR_UNMAPPING; 2992 MUTEX_DESTROY(&rbr_p->post_lock); 2993 2994 MUTEX_DESTROY(&rbr_p->lock); 2995 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2996 KMEM_FREE(rx_msg_ring, size); 2997 2998 if (rbr_p->rbr_ref_cnt == 0) { 2999 /* This is the normal state of affairs. */ 3000 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3001 } else { 3002 /* 3003 * Some of our buffers are still being used. 3004 * Therefore, tell hxge_freeb() this ring is 3005 * unmapped, so it may free <rbr_p> for us. 3006 */ 3007 rbr_p->rbr_state = RBR_UNMAPPED; 3008 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3009 "unmap_rxdma_buf_ring: %d %s outstanding.", 3010 rbr_p->rbr_ref_cnt, 3011 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3012 } 3013 3014 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3015 "<== hxge_unmap_rxdma_channel_buf_ring")); 3016 } 3017 3018 static hxge_status_t 3019 hxge_rxdma_hw_start_common(p_hxge_t hxgep) 3020 { 3021 hxge_status_t status = HXGE_OK; 3022 3023 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 3024 3025 /* 3026 * Load the sharable parameters by writing to the function zero control 3027 * registers. These FZC registers should be initialized only once for 3028 * the entire chip. 3029 */ 3030 (void) hxge_init_fzc_rx_common(hxgep); 3031 3032 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 3033 3034 return (status); 3035 } 3036 3037 static hxge_status_t 3038 hxge_rxdma_hw_start(p_hxge_t hxgep) 3039 { 3040 int i, ndmas; 3041 uint16_t channel; 3042 p_rx_rbr_rings_t rx_rbr_rings; 3043 p_rx_rbr_ring_t *rbr_rings; 3044 p_rx_rcr_rings_t rx_rcr_rings; 3045 p_rx_rcr_ring_t *rcr_rings; 3046 p_rx_mbox_areas_t rx_mbox_areas_p; 3047 p_rx_mbox_t *rx_mbox_p; 3048 hxge_status_t status = HXGE_OK; 3049 3050 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start")); 3051 3052 rx_rbr_rings = hxgep->rx_rbr_rings; 3053 rx_rcr_rings = hxgep->rx_rcr_rings; 3054 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3055 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3056 "<== hxge_rxdma_hw_start: NULL ring pointers")); 3057 return (HXGE_ERROR); 3058 } 3059 3060 ndmas = rx_rbr_rings->ndmas; 3061 if (ndmas == 0) { 3062 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3063 "<== hxge_rxdma_hw_start: no dma channel allocated")); 3064 return (HXGE_ERROR); 3065 } 3066 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3067 "==> hxge_rxdma_hw_start (ndmas %d)", ndmas)); 3068 3069 /* 3070 * Scrub the RDC Rx DMA Prefetch Buffer Command. 3071 */ 3072 for (i = 0; i < 128; i++) { 3073 HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i); 3074 } 3075 3076 /* 3077 * Scrub Rx DMA Shadow Tail Command. 3078 */ 3079 for (i = 0; i < 64; i++) { 3080 HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i); 3081 } 3082 3083 /* 3084 * Scrub Rx DMA Control Fifo Command. 3085 */ 3086 for (i = 0; i < 512; i++) { 3087 HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i); 3088 } 3089 3090 /* 3091 * Scrub Rx DMA Data Fifo Command. 3092 */ 3093 for (i = 0; i < 1536; i++) { 3094 HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i); 3095 } 3096 3097 /* 3098 * Reset the FIFO Error Stat. 3099 */ 3100 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF); 3101 3102 /* Set the error mask to receive interrupts */ 3103 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3104 3105 rbr_rings = rx_rbr_rings->rbr_rings; 3106 rcr_rings = rx_rcr_rings->rcr_rings; 3107 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 3108 if (rx_mbox_areas_p) { 3109 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3110 } 3111 3112 for (i = 0; i < ndmas; i++) { 3113 channel = rbr_rings[i]->rdc; 3114 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3115 "==> hxge_rxdma_hw_start (ndmas %d) channel %d", 3116 ndmas, channel)); 3117 status = hxge_rxdma_start_channel(hxgep, channel, 3118 (p_rx_rbr_ring_t)rbr_rings[i], 3119 (p_rx_rcr_ring_t)rcr_rings[i], 3120 (p_rx_mbox_t)rx_mbox_p[i], rbr_rings[i]->rbb_max); 3121 if (status != HXGE_OK) { 3122 goto hxge_rxdma_hw_start_fail1; 3123 } 3124 } 3125 3126 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: " 3127 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3128 rx_rbr_rings, rx_rcr_rings)); 3129 goto hxge_rxdma_hw_start_exit; 3130 3131 hxge_rxdma_hw_start_fail1: 3132 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3133 "==> hxge_rxdma_hw_start: disable " 3134 "(status 0x%x channel %d i %d)", status, channel, i)); 3135 for (; i >= 0; i--) { 3136 channel = rbr_rings[i]->rdc; 3137 (void) hxge_rxdma_stop_channel(hxgep, channel); 3138 } 3139 3140 hxge_rxdma_hw_start_exit: 3141 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3142 "==> hxge_rxdma_hw_start: (status 0x%x)", status)); 3143 return (status); 3144 } 3145 3146 static void 3147 hxge_rxdma_hw_stop(p_hxge_t hxgep) 3148 { 3149 int i, ndmas; 3150 uint16_t channel; 3151 p_rx_rbr_rings_t rx_rbr_rings; 3152 p_rx_rbr_ring_t *rbr_rings; 3153 p_rx_rcr_rings_t rx_rcr_rings; 3154 3155 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop")); 3156 3157 rx_rbr_rings = hxgep->rx_rbr_rings; 3158 rx_rcr_rings = hxgep->rx_rcr_rings; 3159 3160 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3161 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3162 "<== hxge_rxdma_hw_stop: NULL ring pointers")); 3163 return; 3164 } 3165 3166 ndmas = rx_rbr_rings->ndmas; 3167 if (!ndmas) { 3168 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3169 "<== hxge_rxdma_hw_stop: no dma channel allocated")); 3170 return; 3171 } 3172 3173 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3174 "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3175 3176 rbr_rings = rx_rbr_rings->rbr_rings; 3177 for (i = 0; i < ndmas; i++) { 3178 channel = rbr_rings[i]->rdc; 3179 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3180 "==> hxge_rxdma_hw_stop (ndmas %d) channel %d", 3181 ndmas, channel)); 3182 (void) hxge_rxdma_stop_channel(hxgep, channel); 3183 } 3184 3185 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: " 3186 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3187 rx_rbr_rings, rx_rcr_rings)); 3188 3189 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop")); 3190 } 3191 3192 static hxge_status_t 3193 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 3194 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p, 3195 int n_init_kick) 3196 { 3197 hpi_handle_t handle; 3198 hpi_status_t rs = HPI_SUCCESS; 3199 rdc_stat_t cs; 3200 rdc_int_mask_t ent_mask; 3201 hxge_status_t status = HXGE_OK; 3202 3203 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel")); 3204 3205 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3206 3207 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: " 3208 "hpi handle addr $%p acc $%p", 3209 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3210 3211 /* Reset RXDMA channel */ 3212 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3213 if (rs != HPI_SUCCESS) { 3214 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3215 "==> hxge_rxdma_start_channel: " 3216 "reset rxdma failed (0x%08x channel %d)", 3217 status, channel)); 3218 return (HXGE_ERROR | rs); 3219 } 3220 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3221 "==> hxge_rxdma_start_channel: reset done: channel %d", channel)); 3222 3223 /* 3224 * Initialize the RXDMA channel specific FZC control configurations. 3225 * These FZC registers are pertaining to each RX channel (logical 3226 * pages). 3227 */ 3228 status = hxge_init_fzc_rxdma_channel(hxgep, 3229 channel, rbr_p, rcr_p, mbox_p); 3230 if (status != HXGE_OK) { 3231 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3232 "==> hxge_rxdma_start_channel: " 3233 "init fzc rxdma failed (0x%08x channel %d)", 3234 status, channel)); 3235 return (status); 3236 } 3237 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3238 "==> hxge_rxdma_start_channel: fzc done")); 3239 3240 /* 3241 * Zero out the shadow and prefetch ram. 3242 */ 3243 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3244 "==> hxge_rxdma_start_channel: ram done")); 3245 3246 /* Set up the interrupt event masks. */ 3247 ent_mask.value = 0; 3248 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3249 if (rs != HPI_SUCCESS) { 3250 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3251 "==> hxge_rxdma_start_channel: " 3252 "init rxdma event masks failed (0x%08x channel %d)", 3253 status, channel)); 3254 return (HXGE_ERROR | rs); 3255 } 3256 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3257 "event done: channel %d (mask 0x%016llx)", 3258 channel, ent_mask.value)); 3259 3260 /* 3261 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA 3262 * channels and enable each DMA channel. 3263 */ 3264 status = hxge_enable_rxdma_channel(hxgep, 3265 channel, rbr_p, rcr_p, mbox_p, n_init_kick); 3266 if (status != HXGE_OK) { 3267 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3268 " hxge_rxdma_start_channel: " 3269 " init enable rxdma failed (0x%08x channel %d)", 3270 status, channel)); 3271 return (status); 3272 } 3273 3274 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3275 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3276 3277 /* 3278 * Initialize the receive DMA control and status register 3279 * Note that rdc_stat HAS to be set after RBR and RCR rings are set 3280 */ 3281 cs.value = 0; 3282 cs.bits.mex = 1; 3283 cs.bits.rcr_thres = 1; 3284 cs.bits.rcr_to = 1; 3285 cs.bits.rbr_empty = 1; 3286 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3287 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3288 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3289 if (status != HXGE_OK) { 3290 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3291 "==> hxge_rxdma_start_channel: " 3292 "init rxdma control register failed (0x%08x channel %d", 3293 status, channel)); 3294 return (status); 3295 } 3296 3297 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3298 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3299 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3300 "==> hxge_rxdma_start_channel: enable done")); 3301 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel")); 3302 3303 return (HXGE_OK); 3304 } 3305 3306 static hxge_status_t 3307 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel) 3308 { 3309 hpi_handle_t handle; 3310 hpi_status_t rs = HPI_SUCCESS; 3311 rdc_stat_t cs; 3312 rdc_int_mask_t ent_mask; 3313 hxge_status_t status = HXGE_OK; 3314 3315 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel")); 3316 3317 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3318 3319 HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: " 3320 "hpi handle addr $%p acc $%p", 3321 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3322 3323 /* Reset RXDMA channel */ 3324 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3325 if (rs != HPI_SUCCESS) { 3326 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3327 " hxge_rxdma_stop_channel: " 3328 " reset rxdma failed (0x%08x channel %d)", 3329 rs, channel)); 3330 return (HXGE_ERROR | rs); 3331 } 3332 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3333 "==> hxge_rxdma_stop_channel: reset done")); 3334 3335 /* Set up the interrupt event masks. */ 3336 ent_mask.value = RDC_INT_MASK_ALL; 3337 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3338 if (rs != HPI_SUCCESS) { 3339 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3340 "==> hxge_rxdma_stop_channel: " 3341 "set rxdma event masks failed (0x%08x channel %d)", 3342 rs, channel)); 3343 return (HXGE_ERROR | rs); 3344 } 3345 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3346 "==> hxge_rxdma_stop_channel: event done")); 3347 3348 /* Initialize the receive DMA control and status register */ 3349 cs.value = 0; 3350 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3351 3352 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control " 3353 " to default (all 0s) 0x%08x", cs.value)); 3354 3355 if (status != HXGE_OK) { 3356 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3357 " hxge_rxdma_stop_channel: init rxdma" 3358 " control register failed (0x%08x channel %d", 3359 status, channel)); 3360 return (status); 3361 } 3362 3363 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3364 "==> hxge_rxdma_stop_channel: control done")); 3365 3366 /* disable dma channel */ 3367 status = hxge_disable_rxdma_channel(hxgep, channel); 3368 3369 if (status != HXGE_OK) { 3370 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3371 " hxge_rxdma_stop_channel: " 3372 " init enable rxdma failed (0x%08x channel %d)", 3373 status, channel)); 3374 return (status); 3375 } 3376 3377 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3378 "==> hxge_rxdma_stop_channel: disable done")); 3379 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel")); 3380 3381 return (HXGE_OK); 3382 } 3383 3384 hxge_status_t 3385 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep) 3386 { 3387 hpi_handle_t handle; 3388 p_hxge_rdc_sys_stats_t statsp; 3389 rdc_fifo_err_stat_t stat; 3390 hxge_status_t status = HXGE_OK; 3391 3392 handle = hxgep->hpi_handle; 3393 statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats; 3394 3395 /* Clear the int_dbg register in case it is an injected err */ 3396 HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0); 3397 3398 /* Get the error status and clear the register */ 3399 HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value); 3400 HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value); 3401 3402 if (stat.bits.rx_ctrl_fifo_sec) { 3403 statsp->ctrl_fifo_sec++; 3404 if (statsp->ctrl_fifo_sec == 1) 3405 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3406 "==> hxge_rxdma_handle_sys_errors: " 3407 "rx_ctrl_fifo_sec")); 3408 } 3409 3410 if (stat.bits.rx_ctrl_fifo_ded) { 3411 /* Global fatal error encountered */ 3412 statsp->ctrl_fifo_ded++; 3413 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3414 HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED); 3415 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3416 "==> hxge_rxdma_handle_sys_errors: " 3417 "fatal error: rx_ctrl_fifo_ded error")); 3418 } 3419 3420 if (stat.bits.rx_data_fifo_sec) { 3421 statsp->data_fifo_sec++; 3422 if (statsp->data_fifo_sec == 1) 3423 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3424 "==> hxge_rxdma_handle_sys_errors: " 3425 "rx_data_fifo_sec")); 3426 } 3427 3428 if (stat.bits.rx_data_fifo_ded) { 3429 /* Global fatal error encountered */ 3430 statsp->data_fifo_ded++; 3431 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3432 HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED); 3433 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3434 "==> hxge_rxdma_handle_sys_errors: " 3435 "fatal error: rx_data_fifo_ded error")); 3436 } 3437 3438 if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) { 3439 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3440 " hxge_rxdma_handle_sys_errors: fatal error\n")); 3441 status = hxge_rx_port_fatal_err_recover(hxgep); 3442 if (status == HXGE_OK) { 3443 FM_SERVICE_RESTORED(hxgep); 3444 } 3445 } 3446 3447 return (HXGE_OK); 3448 } 3449 3450 static hxge_status_t 3451 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel) 3452 { 3453 hpi_handle_t handle; 3454 hpi_status_t rs = HPI_SUCCESS; 3455 hxge_status_t status = HXGE_OK; 3456 p_rx_rbr_ring_t rbrp; 3457 p_rx_rcr_ring_t rcrp; 3458 p_rx_mbox_t mboxp; 3459 rdc_int_mask_t ent_mask; 3460 p_hxge_dma_common_t dmap; 3461 int ring_idx; 3462 p_rx_msg_t rx_msg_p; 3463 int i; 3464 uint32_t hxge_port_rcr_size; 3465 uint64_t tmp; 3466 int n_init_kick = 0; 3467 3468 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover")); 3469 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3470 "Recovering from RxDMAChannel#%d error...", channel)); 3471 3472 /* 3473 * Stop the dma channel waits for the stop done. If the stop done bit 3474 * is not set, then create an error. 3475 */ 3476 3477 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3478 3479 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop...")); 3480 3481 ring_idx = hxge_rxdma_get_ring_index(hxgep, channel); 3482 rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx]; 3483 rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx]; 3484 3485 MUTEX_ENTER(&rcrp->lock); 3486 MUTEX_ENTER(&rbrp->lock); 3487 3488 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel...")); 3489 3490 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 3491 if (rs != HPI_SUCCESS) { 3492 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3493 "hxge_disable_rxdma_channel:failed")); 3494 goto fail; 3495 } 3496 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt...")); 3497 3498 /* Disable interrupt */ 3499 ent_mask.value = RDC_INT_MASK_ALL; 3500 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3501 if (rs != HPI_SUCCESS) { 3502 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3503 "Set rxdma event masks failed (channel %d)", channel)); 3504 } 3505 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset...")); 3506 3507 /* Reset RXDMA channel */ 3508 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3509 if (rs != HPI_SUCCESS) { 3510 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3511 "Reset rxdma failed (channel %d)", channel)); 3512 goto fail; 3513 } 3514 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 3515 mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 3516 3517 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3518 rbrp->rbr_rd_index = 0; 3519 3520 rcrp->comp_rd_index = 0; 3521 rcrp->comp_wt_index = 0; 3522 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3523 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3524 #if defined(__i386) 3525 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3526 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3527 #else 3528 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3529 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3530 #endif 3531 3532 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3533 (hxge_port_rcr_size - 1); 3534 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3535 (hxge_port_rcr_size - 1); 3536 3537 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 3538 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 3539 3540 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 3541 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3542 3543 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n", 3544 rbrp->rbr_max_size)); 3545 3546 /* Count the number of buffers owned by the hardware at this moment */ 3547 for (i = 0; i < rbrp->rbr_max_size; i++) { 3548 rx_msg_p = rbrp->rx_msg_ring[i]; 3549 if (rx_msg_p->ref_cnt == 1) { 3550 n_init_kick++; 3551 } 3552 } 3553 3554 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start...")); 3555 3556 /* 3557 * This is error recover! Some buffers are owned by the hardware and 3558 * the rest are owned by the apps. We should only kick in those 3559 * owned by the hardware initially. The apps will post theirs 3560 * eventually. 3561 */ 3562 status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp, 3563 n_init_kick); 3564 if (status != HXGE_OK) { 3565 goto fail; 3566 } 3567 3568 /* 3569 * The DMA channel may disable itself automatically. 3570 * The following is a work-around. 3571 */ 3572 HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp); 3573 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 3574 if (rs != HPI_SUCCESS) { 3575 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3576 "hpi_rxdma_cfg_rdc_enable (channel %d)", channel)); 3577 } 3578 3579 MUTEX_EXIT(&rbrp->lock); 3580 MUTEX_EXIT(&rcrp->lock); 3581 3582 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3583 "Recovery Successful, RxDMAChannel#%d Restored", channel)); 3584 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover")); 3585 3586 return (HXGE_OK); 3587 3588 fail: 3589 MUTEX_EXIT(&rbrp->lock); 3590 MUTEX_EXIT(&rcrp->lock); 3591 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3592 3593 return (HXGE_ERROR | rs); 3594 } 3595 3596 static hxge_status_t 3597 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep) 3598 { 3599 hxge_status_t status = HXGE_OK; 3600 p_hxge_dma_common_t *dma_buf_p; 3601 uint16_t channel; 3602 int ndmas; 3603 int i; 3604 block_reset_t reset_reg; 3605 p_rx_rcr_ring_t rcrp; 3606 p_rx_rbr_ring_t rbrp; 3607 3608 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover")); 3609 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ...")); 3610 3611 /* Reset RDC block from PEU for this fatal error */ 3612 reset_reg.value = 0; 3613 reset_reg.bits.rdc_rst = 1; 3614 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 3615 3616 /* Disable RxMAC */ 3617 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n")); 3618 if (hxge_rx_vmac_disable(hxgep) != HXGE_OK) 3619 goto fail; 3620 3621 HXGE_DELAY(1000); 3622 3623 /* Restore any common settings after PEU reset */ 3624 if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK) 3625 goto fail; 3626 3627 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels...")); 3628 3629 ndmas = hxgep->rx_buf_pool_p->ndmas; 3630 dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p; 3631 3632 for (i = 0; i < ndmas; i++) { 3633 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 3634 rcrp = hxgep->rx_rcr_rings->rcr_rings[channel]; 3635 rbrp = rcrp->rx_rbr_p; 3636 3637 MUTEX_ENTER(&rbrp->post_lock); 3638 /* This function needs to be inside the post_lock */ 3639 if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) { 3640 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3641 "Could not recover channel %d", channel)); 3642 } 3643 MUTEX_EXIT(&rbrp->post_lock); 3644 } 3645 3646 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC...")); 3647 3648 /* Reset RxMAC */ 3649 if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) { 3650 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3651 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3652 goto fail; 3653 } 3654 3655 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC...")); 3656 3657 /* Re-Initialize RxMAC */ 3658 if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) { 3659 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3660 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3661 goto fail; 3662 } 3663 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC...")); 3664 3665 /* Re-enable RxMAC */ 3666 if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) { 3667 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3668 "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC")); 3669 goto fail; 3670 } 3671 3672 /* Reset the error mask since PEU reset cleared it */ 3673 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3674 3675 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3676 "Recovery Successful, RxPort Restored")); 3677 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover")); 3678 3679 return (HXGE_OK); 3680 fail: 3681 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3682 return (status); 3683 } 3684 3685 static void 3686 hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p) 3687 { 3688 hpi_status_t hpi_status; 3689 hxge_status_t status; 3690 int i; 3691 p_hxge_rx_ring_stats_t rdc_stats; 3692 3693 rdc_stats = &hxgep->statsp->rdc_stats[rx_rbr_p->rdc]; 3694 rdc_stats->rbr_empty_restore++; 3695 rx_rbr_p->rbr_is_empty = B_FALSE; 3696 3697 /* 3698 * Complete the processing for the RBR Empty by: 3699 * 0) kicking back HXGE_RBR_EMPTY_THRESHOLD 3700 * packets. 3701 * 1) Disable the RX vmac. 3702 * 2) Re-enable the affected DMA channel. 3703 * 3) Re-enable the RX vmac. 3704 */ 3705 3706 /* 3707 * Disable the RX VMAC, but setting the framelength 3708 * to 0, since there is a hardware bug when disabling 3709 * the vmac. 3710 */ 3711 MUTEX_ENTER(hxgep->genlock); 3712 (void) hpi_vmac_rx_set_framesize( 3713 HXGE_DEV_HPI_HANDLE(hxgep), (uint16_t)0); 3714 3715 hpi_status = hpi_rxdma_cfg_rdc_enable( 3716 HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc); 3717 if (hpi_status != HPI_SUCCESS) { 3718 rdc_stats->rbr_empty_fail++; 3719 3720 /* Assume we are already inside the post_lock */ 3721 status = hxge_rxdma_fatal_err_recover(hxgep, rx_rbr_p->rdc); 3722 if (status != HXGE_OK) { 3723 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3724 "hxge(%d): channel(%d) is empty.", 3725 hxgep->instance, rx_rbr_p->rdc)); 3726 } 3727 } 3728 3729 for (i = 0; i < 1024; i++) { 3730 uint64_t value; 3731 RXDMA_REG_READ64(HXGE_DEV_HPI_HANDLE(hxgep), 3732 RDC_STAT, i & 3, &value); 3733 } 3734 3735 /* 3736 * Re-enable the RX VMAC. 3737 */ 3738 (void) hpi_vmac_rx_set_framesize(HXGE_DEV_HPI_HANDLE(hxgep), 3739 (uint16_t)hxgep->vmac.maxframesize); 3740 MUTEX_EXIT(hxgep->genlock); 3741 } 3742