1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 #include <hxge_rxdma.h> 28 29 /* 30 * Globals: tunable parameters (/etc/system or adb) 31 * 32 */ 33 extern uint32_t hxge_rbr_size; 34 extern uint32_t hxge_rcr_size; 35 extern uint32_t hxge_rbr_spare_size; 36 extern uint32_t hxge_mblks_pending; 37 38 /* 39 * Tunable to reduce the amount of time spent in the 40 * ISR doing Rx Processing. 41 */ 42 extern uint32_t hxge_max_rx_pkts; 43 44 /* 45 * Tunables to manage the receive buffer blocks. 46 * 47 * hxge_rx_threshold_hi: copy all buffers. 48 * hxge_rx_bcopy_size_type: receive buffer block size type. 49 * hxge_rx_threshold_lo: copy only up to tunable block size type. 50 */ 51 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi; 52 extern hxge_rxbuf_type_t hxge_rx_buf_size_type; 53 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo; 54 55 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep); 56 static void hxge_unmap_rxdma(p_hxge_t hxgep); 57 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep); 58 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep); 59 static void hxge_rxdma_hw_stop(p_hxge_t hxgep); 60 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 61 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 62 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 63 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 64 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 65 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 66 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 67 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, 68 uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p, 69 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 70 p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p); 71 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 72 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p); 73 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, 74 uint16_t channel, p_hxge_dma_common_t *dma_buf_p, 75 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks); 76 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 77 p_rx_rbr_ring_t rbr_p); 78 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 79 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p); 80 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel); 81 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 82 p_rx_rcr_ring_t *rcr_p, rdc_stat_t cs); 83 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p, 84 p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, 85 mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry); 86 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep, 87 uint16_t channel); 88 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t); 89 static void hxge_freeb(p_rx_msg_t); 90 static void hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, 91 p_hxge_ldv_t ldvp, rdc_stat_t cs); 92 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, 93 p_hxge_ldv_t ldvp, rdc_stat_t cs); 94 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep, 95 p_rx_rbr_ring_t rx_dmap); 96 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, 97 uint16_t channel); 98 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep); 99 100 #define HXGE_RXDMA_RBB_MAX(x) (((x) >> 4) * 15) 101 #define HXGE_RXDMA_RBB_MIN(x) ((x) >> 4) 102 #define HXGE_RXDMA_RBB_THRESHOLD(x) (((x) >> 4) * 14) 103 104 hxge_status_t 105 hxge_init_rxdma_channels(p_hxge_t hxgep) 106 { 107 hxge_status_t status = HXGE_OK; 108 block_reset_t reset_reg; 109 110 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels")); 111 112 /* Reset RDC block from PEU to clear any previous state */ 113 reset_reg.value = 0; 114 reset_reg.bits.rdc_rst = 1; 115 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 116 HXGE_DELAY(1000); 117 118 status = hxge_map_rxdma(hxgep); 119 if (status != HXGE_OK) { 120 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 121 "<== hxge_init_rxdma: status 0x%x", status)); 122 return (status); 123 } 124 125 status = hxge_rxdma_hw_start_common(hxgep); 126 if (status != HXGE_OK) { 127 hxge_unmap_rxdma(hxgep); 128 } 129 130 status = hxge_rxdma_hw_start(hxgep); 131 if (status != HXGE_OK) { 132 hxge_unmap_rxdma(hxgep); 133 } 134 135 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 136 "<== hxge_init_rxdma_channels: status 0x%x", status)); 137 return (status); 138 } 139 140 void 141 hxge_uninit_rxdma_channels(p_hxge_t hxgep) 142 { 143 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels")); 144 145 hxge_rxdma_hw_stop(hxgep); 146 hxge_unmap_rxdma(hxgep); 147 148 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels")); 149 } 150 151 hxge_status_t 152 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel, 153 rdc_stat_t *cs_p) 154 { 155 hpi_handle_t handle; 156 hpi_status_t rs = HPI_SUCCESS; 157 hxge_status_t status = HXGE_OK; 158 159 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 160 "<== hxge_init_rxdma_channel_cntl_stat")); 161 162 handle = HXGE_DEV_HPI_HANDLE(hxgep); 163 rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p); 164 165 if (rs != HPI_SUCCESS) { 166 status = HXGE_ERROR | rs; 167 } 168 return (status); 169 } 170 171 172 hxge_status_t 173 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 174 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 175 { 176 hpi_handle_t handle; 177 rdc_desc_cfg_t rdc_desc; 178 rdc_rcr_cfg_b_t *cfgb_p; 179 hpi_status_t rs = HPI_SUCCESS; 180 181 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel")); 182 handle = HXGE_DEV_HPI_HANDLE(hxgep); 183 184 /* 185 * Use configuration data composed at init time. Write to hardware the 186 * receive ring configurations. 187 */ 188 rdc_desc.mbox_enable = 1; 189 rdc_desc.mbox_addr = mbox_p->mbox_addr; 190 HXGE_DEBUG_MSG((hxgep, RX_CTL, 191 "==> hxge_enable_rxdma_channel: mboxp $%p($%p)", 192 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 193 194 rdc_desc.rbr_len = rbr_p->rbb_max; 195 rdc_desc.rbr_addr = rbr_p->rbr_addr; 196 197 switch (hxgep->rx_bksize_code) { 198 case RBR_BKSIZE_4K: 199 rdc_desc.page_size = SIZE_4KB; 200 break; 201 case RBR_BKSIZE_8K: 202 rdc_desc.page_size = SIZE_8KB; 203 break; 204 } 205 206 rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0; 207 rdc_desc.valid0 = 1; 208 209 rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1; 210 rdc_desc.valid1 = 1; 211 212 rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2; 213 rdc_desc.valid2 = 1; 214 215 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 216 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 217 218 rdc_desc.rcr_len = rcr_p->comp_size; 219 rdc_desc.rcr_addr = rcr_p->rcr_addr; 220 221 cfgb_p = &(rcr_p->rcr_cfgb); 222 rdc_desc.rcr_threshold = cfgb_p->bits.pthres; 223 rdc_desc.rcr_timeout = cfgb_p->bits.timeout; 224 rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout; 225 226 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 227 "rbr_len qlen %d pagesize code %d rcr_len %d", 228 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 229 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: " 230 "size 0 %d size 1 %d size 2 %d", 231 rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1, 232 rbr_p->hpi_pkt_buf_size2)); 233 234 rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 235 if (rs != HPI_SUCCESS) { 236 return (HXGE_ERROR | rs); 237 } 238 239 /* 240 * Enable the timeout and threshold. 241 */ 242 rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 243 rdc_desc.rcr_threshold); 244 if (rs != HPI_SUCCESS) { 245 return (HXGE_ERROR | rs); 246 } 247 248 rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 249 rdc_desc.rcr_timeout); 250 if (rs != HPI_SUCCESS) { 251 return (HXGE_ERROR | rs); 252 } 253 254 /* Enable the DMA */ 255 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 256 if (rs != HPI_SUCCESS) { 257 return (HXGE_ERROR | rs); 258 } 259 260 /* 261 * Kick the DMA engine with the initial kick and indicate 262 * that we have remaining blocks to post. 263 */ 264 rbr_p->pages_to_post = HXGE_RXDMA_RBB_MIN(rbr_p->rbb_max); 265 hpi_rxdma_rdc_rbr_kick(handle, channel, 266 HXGE_RXDMA_RBB_MAX(rbr_p->rbb_max)); 267 268 /* Clear the rbr empty bit */ 269 (void) hpi_rxdma_channel_rbr_empty_clear(handle, channel); 270 271 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel")); 272 273 return (HXGE_OK); 274 } 275 276 static hxge_status_t 277 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel) 278 { 279 hpi_handle_t handle; 280 hpi_status_t rs = HPI_SUCCESS; 281 282 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel")); 283 284 handle = HXGE_DEV_HPI_HANDLE(hxgep); 285 286 /* disable the DMA */ 287 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 288 if (rs != HPI_SUCCESS) { 289 HXGE_DEBUG_MSG((hxgep, RX_CTL, 290 "<== hxge_disable_rxdma_channel:failed (0x%x)", rs)); 291 return (HXGE_ERROR | rs); 292 } 293 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel")); 294 return (HXGE_OK); 295 } 296 297 hxge_status_t 298 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel) 299 { 300 hpi_handle_t handle; 301 hxge_status_t status = HXGE_OK; 302 303 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 304 "==> hxge_rxdma_channel_rcrflush")); 305 306 handle = HXGE_DEV_HPI_HANDLE(hxgep); 307 hpi_rxdma_rdc_rcr_flush(handle, channel); 308 309 HXGE_DEBUG_MSG((hxgep, DMA_CTL, 310 "<== hxge_rxdma_channel_rcrflush")); 311 return (status); 312 313 } 314 315 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 316 317 #define TO_LEFT -1 318 #define TO_RIGHT 1 319 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 320 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 321 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 322 #define NO_HINT 0xffffffff 323 324 /*ARGSUSED*/ 325 hxge_status_t 326 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p, 327 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 328 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 329 { 330 int bufsize; 331 uint64_t pktbuf_pp; 332 uint64_t dvma_addr; 333 rxring_info_t *ring_info; 334 int base_side, end_side; 335 int r_index, l_index, anchor_index; 336 int found, search_done; 337 uint32_t offset, chunk_size, block_size, page_size_mask; 338 uint32_t chunk_index, block_index, total_index; 339 int max_iterations, iteration; 340 rxbuf_index_info_t *bufinfo; 341 342 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp")); 343 344 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 345 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 346 pkt_buf_addr_pp, pktbufsz_type)); 347 348 #if defined(__i386) 349 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 350 #else 351 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 352 #endif 353 354 switch (pktbufsz_type) { 355 case 0: 356 bufsize = rbr_p->pkt_buf_size0; 357 break; 358 case 1: 359 bufsize = rbr_p->pkt_buf_size1; 360 break; 361 case 2: 362 bufsize = rbr_p->pkt_buf_size2; 363 break; 364 case RCR_SINGLE_BLOCK: 365 bufsize = 0; 366 anchor_index = 0; 367 break; 368 default: 369 return (HXGE_ERROR); 370 } 371 372 if (rbr_p->num_blocks == 1) { 373 anchor_index = 0; 374 ring_info = rbr_p->ring_info; 375 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 376 377 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 378 "==> hxge_rxbuf_pp_to_vp: (found, 1 block) " 379 "buf_pp $%p btype %d anchor_index %d bufinfo $%p", 380 pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo)); 381 382 goto found_index; 383 } 384 385 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 386 "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d", 387 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 388 389 ring_info = rbr_p->ring_info; 390 found = B_FALSE; 391 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 392 iteration = 0; 393 max_iterations = ring_info->max_iterations; 394 395 /* 396 * First check if this block have been seen recently. This is indicated 397 * by a hint which is initialized when the first buffer of the block is 398 * seen. The hint is reset when the last buffer of the block has been 399 * processed. As three block sizes are supported, three hints are kept. 400 * The idea behind the hints is that once the hardware uses a block 401 * for a buffer of that size, it will use it exclusively for that size 402 * and will use it until it is exhausted. It is assumed that there 403 * would a single block being used for the same buffer sizes at any 404 * given time. 405 */ 406 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 407 anchor_index = ring_info->hint[pktbufsz_type]; 408 dvma_addr = bufinfo[anchor_index].dvma_addr; 409 chunk_size = bufinfo[anchor_index].buf_size; 410 if ((pktbuf_pp >= dvma_addr) && 411 (pktbuf_pp < (dvma_addr + chunk_size))) { 412 found = B_TRUE; 413 /* 414 * check if this is the last buffer in the block If so, 415 * then reset the hint for the size; 416 */ 417 418 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 419 ring_info->hint[pktbufsz_type] = NO_HINT; 420 } 421 } 422 423 if (found == B_FALSE) { 424 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 425 "==> hxge_rxbuf_pp_to_vp: (!found)" 426 "buf_pp $%p btype %d anchor_index %d", 427 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 428 429 /* 430 * This is the first buffer of the block of this size. Need to 431 * search the whole information array. the search algorithm 432 * uses a binary tree search algorithm. It assumes that the 433 * information is already sorted with increasing order info[0] 434 * < info[1] < info[2] .... < info[n-1] where n is the size of 435 * the information array 436 */ 437 r_index = rbr_p->num_blocks - 1; 438 l_index = 0; 439 search_done = B_FALSE; 440 anchor_index = MID_INDEX(r_index, l_index); 441 while (search_done == B_FALSE) { 442 if ((r_index == l_index) || 443 (iteration >= max_iterations)) 444 search_done = B_TRUE; 445 446 end_side = TO_RIGHT; /* to the right */ 447 base_side = TO_LEFT; /* to the left */ 448 /* read the DVMA address information and sort it */ 449 dvma_addr = bufinfo[anchor_index].dvma_addr; 450 chunk_size = bufinfo[anchor_index].buf_size; 451 452 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 453 "==> hxge_rxbuf_pp_to_vp: (searching)" 454 "buf_pp $%p btype %d " 455 "anchor_index %d chunk_size %d dvmaaddr $%p", 456 pkt_buf_addr_pp, pktbufsz_type, anchor_index, 457 chunk_size, dvma_addr)); 458 459 if (pktbuf_pp >= dvma_addr) 460 base_side = TO_RIGHT; /* to the right */ 461 if (pktbuf_pp < (dvma_addr + chunk_size)) 462 end_side = TO_LEFT; /* to the left */ 463 464 switch (base_side + end_side) { 465 case IN_MIDDLE: 466 /* found */ 467 found = B_TRUE; 468 search_done = B_TRUE; 469 if ((pktbuf_pp + bufsize) < 470 (dvma_addr + chunk_size)) 471 ring_info->hint[pktbufsz_type] = 472 bufinfo[anchor_index].buf_index; 473 break; 474 case BOTH_RIGHT: 475 /* not found: go to the right */ 476 l_index = anchor_index + 1; 477 anchor_index = MID_INDEX(r_index, l_index); 478 break; 479 480 case BOTH_LEFT: 481 /* not found: go to the left */ 482 r_index = anchor_index - 1; 483 anchor_index = MID_INDEX(r_index, l_index); 484 break; 485 default: /* should not come here */ 486 return (HXGE_ERROR); 487 } 488 iteration++; 489 } 490 491 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 492 "==> hxge_rxbuf_pp_to_vp: (search done)" 493 "buf_pp $%p btype %d anchor_index %d", 494 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 495 } 496 497 if (found == B_FALSE) { 498 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 499 "==> hxge_rxbuf_pp_to_vp: (search failed)" 500 "buf_pp $%p btype %d anchor_index %d", 501 pkt_buf_addr_pp, pktbufsz_type, anchor_index)); 502 return (HXGE_ERROR); 503 } 504 505 found_index: 506 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 507 "==> hxge_rxbuf_pp_to_vp: (FOUND1)" 508 "buf_pp $%p btype %d bufsize %d anchor_index %d", 509 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index)); 510 511 /* index of the first block in this chunk */ 512 chunk_index = bufinfo[anchor_index].start_index; 513 dvma_addr = bufinfo[anchor_index].dvma_addr; 514 page_size_mask = ring_info->block_size_mask; 515 516 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 517 "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 518 "buf_pp $%p btype %d bufsize %d " 519 "anchor_index %d chunk_index %d dvma $%p", 520 pkt_buf_addr_pp, pktbufsz_type, bufsize, 521 anchor_index, chunk_index, dvma_addr)); 522 523 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 524 block_size = rbr_p->block_size; /* System block(page) size */ 525 526 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 527 "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 528 "buf_pp $%p btype %d bufsize %d " 529 "anchor_index %d chunk_index %d dvma $%p " 530 "offset %d block_size %d", 531 pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index, 532 chunk_index, dvma_addr, offset, block_size)); 533 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index")); 534 535 block_index = (offset / block_size); /* index within chunk */ 536 total_index = chunk_index + block_index; 537 538 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 539 "==> hxge_rxbuf_pp_to_vp: " 540 "total_index %d dvma_addr $%p " 541 "offset %d block_size %d " 542 "block_index %d ", 543 total_index, dvma_addr, offset, block_size, block_index)); 544 545 #if defined(__i386) 546 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 547 (uint32_t)offset); 548 #else 549 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 550 offset); 551 #endif 552 553 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 554 "==> hxge_rxbuf_pp_to_vp: " 555 "total_index %d dvma_addr $%p " 556 "offset %d block_size %d " 557 "block_index %d " 558 "*pkt_buf_addr_p $%p", 559 total_index, dvma_addr, offset, block_size, 560 block_index, *pkt_buf_addr_p)); 561 562 *msg_index = total_index; 563 *bufoffset = (offset & page_size_mask); 564 565 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 566 "==> hxge_rxbuf_pp_to_vp: get msg index: " 567 "msg_index %d bufoffset_index %d", 568 *msg_index, *bufoffset)); 569 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp")); 570 571 return (HXGE_OK); 572 } 573 574 575 /* 576 * used by quick sort (qsort) function 577 * to perform comparison 578 */ 579 static int 580 hxge_sort_compare(const void *p1, const void *p2) 581 { 582 583 rxbuf_index_info_t *a, *b; 584 585 a = (rxbuf_index_info_t *)p1; 586 b = (rxbuf_index_info_t *)p2; 587 588 if (a->dvma_addr > b->dvma_addr) 589 return (1); 590 if (a->dvma_addr < b->dvma_addr) 591 return (-1); 592 return (0); 593 } 594 595 /* 596 * Grabbed this sort implementation from common/syscall/avl.c 597 * 598 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 599 * v = Ptr to array/vector of objs 600 * n = # objs in the array 601 * s = size of each obj (must be multiples of a word size) 602 * f = ptr to function to compare two objs 603 * returns (-1 = less than, 0 = equal, 1 = greater than 604 */ 605 void 606 hxge_ksort(caddr_t v, int n, int s, int (*f) ()) 607 { 608 int g, i, j, ii; 609 unsigned int *p1, *p2; 610 unsigned int tmp; 611 612 /* No work to do */ 613 if (v == NULL || n <= 1) 614 return; 615 /* Sanity check on arguments */ 616 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 617 ASSERT(s > 0); 618 619 for (g = n / 2; g > 0; g /= 2) { 620 for (i = g; i < n; i++) { 621 for (j = i - g; j >= 0 && 622 (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) { 623 p1 = (unsigned *)(v + j * s); 624 p2 = (unsigned *)(v + (j + g) * s); 625 for (ii = 0; ii < s / 4; ii++) { 626 tmp = *p1; 627 *p1++ = *p2; 628 *p2++ = tmp; 629 } 630 } 631 } 632 } 633 } 634 635 /* 636 * Initialize data structures required for rxdma 637 * buffer dvma->vmem address lookup 638 */ 639 /*ARGSUSED*/ 640 static hxge_status_t 641 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp) 642 { 643 int index; 644 rxring_info_t *ring_info; 645 int max_iteration = 0, max_index = 0; 646 647 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init")); 648 649 ring_info = rbrp->ring_info; 650 ring_info->hint[0] = NO_HINT; 651 ring_info->hint[1] = NO_HINT; 652 ring_info->hint[2] = NO_HINT; 653 max_index = rbrp->num_blocks; 654 655 /* read the DVMA address information and sort it */ 656 /* do init of the information array */ 657 658 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 659 " hxge_rxbuf_index_info_init Sort ptrs")); 660 661 /* sort the array */ 662 hxge_ksort((void *) ring_info->buffer, max_index, 663 sizeof (rxbuf_index_info_t), hxge_sort_compare); 664 665 for (index = 0; index < max_index; index++) { 666 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 667 " hxge_rxbuf_index_info_init: sorted chunk %d " 668 " ioaddr $%p kaddr $%p size %x", 669 index, ring_info->buffer[index].dvma_addr, 670 ring_info->buffer[index].kaddr, 671 ring_info->buffer[index].buf_size)); 672 } 673 674 max_iteration = 0; 675 while (max_index >= (1ULL << max_iteration)) 676 max_iteration++; 677 ring_info->max_iterations = max_iteration + 1; 678 679 HXGE_DEBUG_MSG((hxgep, DMA2_CTL, 680 " hxge_rxbuf_index_info_init Find max iter %d", 681 ring_info->max_iterations)); 682 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init")); 683 684 return (HXGE_OK); 685 } 686 687 /*ARGSUSED*/ 688 void 689 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p) 690 { 691 #ifdef HXGE_DEBUG 692 693 uint32_t bptr; 694 uint64_t pp; 695 696 bptr = entry_p->bits.pkt_buf_addr; 697 698 HXGE_DEBUG_MSG((hxgep, RX_CTL, 699 "\trcr entry $%p " 700 "\trcr entry 0x%0llx " 701 "\trcr entry 0x%08x " 702 "\trcr entry 0x%08x " 703 "\tvalue 0x%0llx\n" 704 "\tmulti = %d\n" 705 "\tpkt_type = 0x%x\n" 706 "\terror = 0x%04x\n" 707 "\tl2_len = %d\n" 708 "\tpktbufsize = %d\n" 709 "\tpkt_buf_addr = $%p\n" 710 "\tpkt_buf_addr (<< 6) = $%p\n", 711 entry_p, 712 *(int64_t *)entry_p, 713 *(int32_t *)entry_p, 714 *(int32_t *)((char *)entry_p + 32), 715 entry_p->value, 716 entry_p->bits.multi, 717 entry_p->bits.pkt_type, 718 entry_p->bits.error, 719 entry_p->bits.l2_len, 720 entry_p->bits.pktbufsz, 721 bptr, 722 entry_p->bits.pkt_buf_addr_l)); 723 724 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 725 RCR_PKT_BUF_ADDR_SHIFT; 726 727 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 728 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 729 #endif 730 } 731 732 /*ARGSUSED*/ 733 void 734 hxge_rxdma_stop(p_hxge_t hxgep) 735 { 736 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop")); 737 738 (void) hxge_rx_vmac_disable(hxgep); 739 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 740 741 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop")); 742 } 743 744 void 745 hxge_rxdma_stop_reinit(p_hxge_t hxgep) 746 { 747 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit")); 748 749 (void) hxge_rxdma_stop(hxgep); 750 (void) hxge_uninit_rxdma_channels(hxgep); 751 (void) hxge_init_rxdma_channels(hxgep); 752 753 (void) hxge_rx_vmac_enable(hxgep); 754 755 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit")); 756 } 757 758 hxge_status_t 759 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable) 760 { 761 int i, ndmas; 762 uint16_t channel; 763 p_rx_rbr_rings_t rx_rbr_rings; 764 p_rx_rbr_ring_t *rbr_rings; 765 hpi_handle_t handle; 766 hpi_status_t rs = HPI_SUCCESS; 767 hxge_status_t status = HXGE_OK; 768 769 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 770 "==> hxge_rxdma_hw_mode: mode %d", enable)); 771 772 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 773 HXGE_DEBUG_MSG((hxgep, RX_CTL, 774 "<== hxge_rxdma_mode: not initialized")); 775 return (HXGE_ERROR); 776 } 777 778 rx_rbr_rings = hxgep->rx_rbr_rings; 779 if (rx_rbr_rings == NULL) { 780 HXGE_DEBUG_MSG((hxgep, RX_CTL, 781 "<== hxge_rxdma_mode: NULL ring pointer")); 782 return (HXGE_ERROR); 783 } 784 785 if (rx_rbr_rings->rbr_rings == NULL) { 786 HXGE_DEBUG_MSG((hxgep, RX_CTL, 787 "<== hxge_rxdma_mode: NULL rbr rings pointer")); 788 return (HXGE_ERROR); 789 } 790 791 ndmas = rx_rbr_rings->ndmas; 792 if (!ndmas) { 793 HXGE_DEBUG_MSG((hxgep, RX_CTL, 794 "<== hxge_rxdma_mode: no channel")); 795 return (HXGE_ERROR); 796 } 797 798 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 799 "==> hxge_rxdma_mode (ndmas %d)", ndmas)); 800 801 rbr_rings = rx_rbr_rings->rbr_rings; 802 803 handle = HXGE_DEV_HPI_HANDLE(hxgep); 804 805 for (i = 0; i < ndmas; i++) { 806 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 807 continue; 808 } 809 channel = rbr_rings[i]->rdc; 810 if (enable) { 811 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 812 "==> hxge_rxdma_hw_mode: channel %d (enable)", 813 channel)); 814 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 815 } else { 816 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 817 "==> hxge_rxdma_hw_mode: channel %d (disable)", 818 channel)); 819 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 820 } 821 } 822 823 status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs); 824 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 825 "<== hxge_rxdma_hw_mode: status 0x%x", status)); 826 827 return (status); 828 } 829 830 int 831 hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel) 832 { 833 int i, ndmas; 834 uint16_t rdc; 835 p_rx_rbr_rings_t rx_rbr_rings; 836 p_rx_rbr_ring_t *rbr_rings; 837 838 HXGE_DEBUG_MSG((hxgep, RX_CTL, 839 "==> hxge_rxdma_get_ring_index: channel %d", channel)); 840 841 rx_rbr_rings = hxgep->rx_rbr_rings; 842 if (rx_rbr_rings == NULL) { 843 HXGE_DEBUG_MSG((hxgep, RX_CTL, 844 "<== hxge_rxdma_get_ring_index: NULL ring pointer")); 845 return (-1); 846 } 847 848 ndmas = rx_rbr_rings->ndmas; 849 if (!ndmas) { 850 HXGE_DEBUG_MSG((hxgep, RX_CTL, 851 "<== hxge_rxdma_get_ring_index: no channel")); 852 return (-1); 853 } 854 855 HXGE_DEBUG_MSG((hxgep, RX_CTL, 856 "==> hxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 857 858 rbr_rings = rx_rbr_rings->rbr_rings; 859 for (i = 0; i < ndmas; i++) { 860 rdc = rbr_rings[i]->rdc; 861 if (channel == rdc) { 862 HXGE_DEBUG_MSG((hxgep, RX_CTL, 863 "==> hxge_rxdma_get_rbr_ring: " 864 "channel %d (index %d) " 865 "ring %d", channel, i, rbr_rings[i])); 866 867 return (i); 868 } 869 } 870 871 HXGE_DEBUG_MSG((hxgep, RX_CTL, 872 "<== hxge_rxdma_get_rbr_ring_index: not found")); 873 874 return (-1); 875 } 876 877 /* 878 * Static functions start here. 879 */ 880 static p_rx_msg_t 881 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p) 882 { 883 p_rx_msg_t hxge_mp = NULL; 884 p_hxge_dma_common_t dmamsg_p; 885 uchar_t *buffer; 886 887 hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 888 if (hxge_mp == NULL) { 889 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 890 "Allocation of a rx msg failed.")); 891 goto hxge_allocb_exit; 892 } 893 894 hxge_mp->use_buf_pool = B_FALSE; 895 if (dmabuf_p) { 896 hxge_mp->use_buf_pool = B_TRUE; 897 898 dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma; 899 *dmamsg_p = *dmabuf_p; 900 dmamsg_p->nblocks = 1; 901 dmamsg_p->block_size = size; 902 dmamsg_p->alength = size; 903 buffer = (uchar_t *)dmabuf_p->kaddrp; 904 905 dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size); 906 dmabuf_p->ioaddr_pp = (void *) 907 ((char *)dmabuf_p->ioaddr_pp + size); 908 909 dmabuf_p->alength -= size; 910 dmabuf_p->offset += size; 911 dmabuf_p->dma_cookie.dmac_laddress += size; 912 dmabuf_p->dma_cookie.dmac_size -= size; 913 } else { 914 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 915 if (buffer == NULL) { 916 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, 917 "Allocation of a receive page failed.")); 918 goto hxge_allocb_fail1; 919 } 920 } 921 922 hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb); 923 if (hxge_mp->rx_mblk_p == NULL) { 924 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed.")); 925 goto hxge_allocb_fail2; 926 } 927 hxge_mp->buffer = buffer; 928 hxge_mp->block_size = size; 929 hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb; 930 hxge_mp->freeb.free_arg = (caddr_t)hxge_mp; 931 hxge_mp->ref_cnt = 1; 932 hxge_mp->free = B_TRUE; 933 hxge_mp->rx_use_bcopy = B_FALSE; 934 935 atomic_inc_32(&hxge_mblks_pending); 936 937 goto hxge_allocb_exit; 938 939 hxge_allocb_fail2: 940 if (!hxge_mp->use_buf_pool) { 941 KMEM_FREE(buffer, size); 942 } 943 hxge_allocb_fail1: 944 KMEM_FREE(hxge_mp, sizeof (rx_msg_t)); 945 hxge_mp = NULL; 946 947 hxge_allocb_exit: 948 return (hxge_mp); 949 } 950 951 p_mblk_t 952 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 953 { 954 p_mblk_t mp; 955 956 HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb")); 957 HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p " 958 "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size)); 959 960 mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb); 961 if (mp == NULL) { 962 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 963 goto hxge_dupb_exit; 964 } 965 966 atomic_inc_32(&hxge_mp->ref_cnt); 967 968 hxge_dupb_exit: 969 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 970 return (mp); 971 } 972 973 p_mblk_t 974 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size) 975 { 976 p_mblk_t mp; 977 uchar_t *dp; 978 979 mp = allocb(size + HXGE_RXBUF_EXTRA, 0); 980 if (mp == NULL) { 981 HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 982 goto hxge_dupb_bcopy_exit; 983 } 984 dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA; 985 bcopy((void *) &hxge_mp->buffer[offset], dp, size); 986 mp->b_wptr = dp + size; 987 988 hxge_dupb_bcopy_exit: 989 990 HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp)); 991 992 return (mp); 993 } 994 995 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, 996 p_rx_msg_t rx_msg_p); 997 998 void 999 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1000 { 1001 hpi_handle_t handle; 1002 uint64_t rbr_qlen, blocks_to_post = 0ULL; 1003 1004 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page")); 1005 1006 /* Reuse this buffer */ 1007 rx_msg_p->free = B_FALSE; 1008 rx_msg_p->cur_usage_cnt = 0; 1009 rx_msg_p->max_usage_cnt = 0; 1010 rx_msg_p->pkt_buf_size = 0; 1011 1012 if (rx_rbr_p->rbr_use_bcopy) { 1013 rx_msg_p->rx_use_bcopy = B_FALSE; 1014 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1015 } 1016 1017 /* 1018 * Get the rbr header pointer and its offset index. 1019 */ 1020 MUTEX_ENTER(&rx_rbr_p->post_lock); 1021 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1022 rx_rbr_p->rbr_wrap_mask); 1023 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1024 1025 /* 1026 * Don't post when index is close to 0 or near the max to reduce the 1027 * number rbr_emepty errors 1028 */ 1029 rx_rbr_p->pages_to_post++; 1030 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1031 1032 /* 1033 * False RBR Empty Workaround 1034 */ 1035 RXDMA_REG_READ64(handle, RDC_RBR_QLEN, rx_rbr_p->rdc, &rbr_qlen); 1036 rbr_qlen = rbr_qlen & 0xffff; 1037 1038 if ((rbr_qlen > 0) && 1039 (rbr_qlen < HXGE_RXDMA_RBB_THRESHOLD(rx_rbr_p->rbb_max))) { 1040 blocks_to_post = 1041 HXGE_RXDMA_RBB_MAX(rx_rbr_p->rbb_max) - rbr_qlen; 1042 } 1043 1044 /* 1045 * Clamp posting to what we have available. 1046 */ 1047 if ((blocks_to_post > 0) && 1048 (blocks_to_post > rx_rbr_p->pages_to_post)) { 1049 blocks_to_post = rx_rbr_p->pages_to_post; 1050 } 1051 1052 /* 1053 * Post blocks to the hardware, if any is available. 1054 */ 1055 if (blocks_to_post > 0) { 1056 hpi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, blocks_to_post); 1057 rx_rbr_p->pages_to_post -= blocks_to_post; 1058 } 1059 MUTEX_EXIT(&rx_rbr_p->post_lock); 1060 1061 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1062 "<== hxge_post_page (channel %d post_next_index %d)", 1063 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1064 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page")); 1065 } 1066 1067 void 1068 hxge_freeb(p_rx_msg_t rx_msg_p) 1069 { 1070 size_t size; 1071 uchar_t *buffer = NULL; 1072 int ref_cnt; 1073 boolean_t free_state = B_FALSE; 1074 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1075 1076 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb")); 1077 HXGE_DEBUG_MSG((NULL, MEM2_CTL, 1078 "hxge_freeb:rx_msg_p = $%p (block pending %d)", 1079 rx_msg_p, hxge_mblks_pending)); 1080 1081 /* 1082 * First we need to get the free state, then 1083 * atomic decrement the reference count to prevent 1084 * the race condition with the interrupt thread that 1085 * is processing a loaned up buffer block. 1086 */ 1087 free_state = rx_msg_p->free; 1088 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1089 if (!ref_cnt) { 1090 atomic_dec_32(&hxge_mblks_pending); 1091 1092 buffer = rx_msg_p->buffer; 1093 size = rx_msg_p->block_size; 1094 1095 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: " 1096 "will free: rx_msg_p = $%p (block pending %d)", 1097 rx_msg_p, hxge_mblks_pending)); 1098 1099 if (!rx_msg_p->use_buf_pool) { 1100 KMEM_FREE(buffer, size); 1101 } 1102 1103 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1104 if (ring) { 1105 /* 1106 * Decrement the receive buffer ring's reference 1107 * count, too. 1108 */ 1109 atomic_dec_32(&ring->rbr_ref_cnt); 1110 1111 /* 1112 * Free the receive buffer ring, iff 1113 * 1. all the receive buffers have been freed 1114 * 2. and we are in the proper state (that is, 1115 * we are not UNMAPPING). 1116 */ 1117 if (ring->rbr_ref_cnt == 0 && 1118 ring->rbr_state == RBR_UNMAPPED) { 1119 KMEM_FREE(ring, sizeof (*ring)); 1120 } 1121 } 1122 goto hxge_freeb_exit; 1123 } 1124 1125 /* 1126 * Repost buffer. 1127 */ 1128 if ((ring != NULL) && free_state && (ref_cnt == 1)) { 1129 HXGE_DEBUG_MSG((NULL, RX_CTL, 1130 "hxge_freeb: post page $%p:", rx_msg_p)); 1131 if (ring->rbr_state == RBR_POSTING) 1132 hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p); 1133 } 1134 1135 hxge_freeb_exit: 1136 HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb")); 1137 } 1138 1139 uint_t 1140 hxge_rx_intr(caddr_t arg1, caddr_t arg2) 1141 { 1142 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 1143 p_hxge_t hxgep = (p_hxge_t)arg2; 1144 p_hxge_ldg_t ldgp; 1145 uint8_t channel; 1146 hpi_handle_t handle; 1147 rdc_stat_t cs; 1148 uint_t serviced = DDI_INTR_UNCLAIMED; 1149 1150 if (ldvp == NULL) { 1151 HXGE_DEBUG_MSG((NULL, RX_INT_CTL, 1152 "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1153 return (DDI_INTR_UNCLAIMED); 1154 } 1155 1156 if (arg2 == NULL || (void *) ldvp->hxgep != arg2) { 1157 hxgep = ldvp->hxgep; 1158 } 1159 1160 /* 1161 * If the interface is not started, just swallow the interrupt 1162 * for the logical device and don't rearm it. 1163 */ 1164 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) 1165 return (DDI_INTR_CLAIMED); 1166 1167 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1168 "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 1169 1170 /* 1171 * This interrupt handler is for a specific receive dma channel. 1172 */ 1173 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1174 1175 /* 1176 * Get the control and status for this channel. 1177 */ 1178 channel = ldvp->channel; 1179 ldgp = ldvp->ldgp; 1180 RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value); 1181 1182 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_intr:channel %d " 1183 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1184 channel, cs.value, cs.bits.rcr_to, cs.bits.rcr_thres)); 1185 1186 hxge_rx_pkts_vring(hxgep, ldvp->vdma_index, ldvp, cs); 1187 serviced = DDI_INTR_CLAIMED; 1188 1189 /* error events. */ 1190 if (cs.value & RDC_STAT_ERROR) { 1191 (void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs); 1192 } 1193 1194 hxge_intr_exit: 1195 /* 1196 * Enable the mailbox update interrupt if we want to use mailbox. We 1197 * probably don't need to use mailbox as it only saves us one pio read. 1198 * Also write 1 to rcrthres and rcrto to clear these two edge triggered 1199 * bits. 1200 */ 1201 cs.value &= RDC_STAT_WR1C; 1202 cs.bits.mex = 1; 1203 cs.bits.ptrread = 0; 1204 cs.bits.pktread = 0; 1205 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1206 1207 /* 1208 * Rearm this logical group if this is a single device group. 1209 */ 1210 if (ldgp->nldvs == 1) { 1211 ld_intr_mgmt_t mgm; 1212 1213 mgm.value = 0; 1214 mgm.bits.arm = 1; 1215 mgm.bits.timer = ldgp->ldg_timer; 1216 HXGE_REG_WR32(handle, 1217 LD_INTR_MGMT + LDSV_OFFSET(ldgp->ldg), mgm.value); 1218 } 1219 1220 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1221 "<== hxge_rx_intr: serviced %d", serviced)); 1222 1223 return (serviced); 1224 } 1225 1226 static void 1227 hxge_rx_pkts_vring(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1228 rdc_stat_t cs) 1229 { 1230 p_mblk_t mp; 1231 p_rx_rcr_ring_t rcrp; 1232 1233 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts_vring")); 1234 if ((mp = hxge_rx_pkts(hxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1235 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1236 "<== hxge_rx_pkts_vring: no mp")); 1237 return; 1238 } 1239 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts_vring: $%p", mp)); 1240 1241 #ifdef HXGE_DEBUG 1242 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1243 "==> hxge_rx_pkts_vring:calling mac_rx (NEMO) " 1244 "LEN %d mp $%p mp->b_next $%p rcrp $%p " 1245 "mac_handle $%p", 1246 (mp->b_wptr - mp->b_rptr), mp, mp->b_next, 1247 rcrp, rcrp->rcr_mac_handle)); 1248 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1249 "==> hxge_rx_pkts_vring: dump packets " 1250 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1251 mp, mp->b_rptr, mp->b_wptr, 1252 hxge_dump_packet((char *)mp->b_rptr, 64))); 1253 1254 if (mp->b_cont) { 1255 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1256 "==> hxge_rx_pkts_vring: dump b_cont packets " 1257 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1258 mp->b_cont, mp->b_cont->b_rptr, mp->b_cont->b_wptr, 1259 hxge_dump_packet((char *)mp->b_cont->b_rptr, 1260 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1261 } 1262 if (mp->b_next) { 1263 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1264 "==> hxge_rx_pkts_vring: dump next packets " 1265 "(b_rptr $%p): %s", 1266 mp->b_next->b_rptr, 1267 hxge_dump_packet((char *)mp->b_next->b_rptr, 64))); 1268 } 1269 #endif 1270 1271 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1272 "==> hxge_rx_pkts_vring: send packet to stack")); 1273 mac_rx(hxgep->mach, rcrp->rcr_mac_handle, mp); 1274 1275 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_pkts_vring")); 1276 } 1277 1278 /*ARGSUSED*/ 1279 mblk_t * 1280 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp, 1281 p_rx_rcr_ring_t *rcrp, rdc_stat_t cs) 1282 { 1283 hpi_handle_t handle; 1284 uint8_t channel; 1285 p_rx_rcr_rings_t rx_rcr_rings; 1286 p_rx_rcr_ring_t rcr_p; 1287 uint32_t comp_rd_index; 1288 p_rcr_entry_t rcr_desc_rd_head_p; 1289 p_rcr_entry_t rcr_desc_rd_head_pp; 1290 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1291 uint16_t qlen, nrcr_read, npkt_read; 1292 uint32_t qlen_hw, qlen_sw; 1293 uint32_t invalid_rcr_entry; 1294 boolean_t multi; 1295 rdc_rcr_cfg_b_t rcr_cfg_b; 1296 p_rx_mbox_t rx_mboxp; 1297 p_rxdma_mailbox_t mboxp; 1298 uint64_t rcr_head_index, rcr_tail_index; 1299 uint64_t rcr_tail; 1300 uint64_t value; 1301 rdc_rcr_tail_t rcr_tail_reg; 1302 p_hxge_rx_ring_stats_t rdc_stats; 1303 1304 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d " 1305 "channel %d", vindex, ldvp->channel)); 1306 1307 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 1308 return (NULL); 1309 } 1310 1311 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1312 rx_rcr_rings = hxgep->rx_rcr_rings; 1313 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1314 channel = rcr_p->rdc; 1315 if (channel != ldvp->channel) { 1316 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d " 1317 "channel %d, and rcr channel %d not matched.", 1318 vindex, ldvp->channel, channel)); 1319 return (NULL); 1320 } 1321 1322 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1323 "==> hxge_rx_pkts: START: rcr channel %d " 1324 "head_p $%p head_pp $%p index %d ", 1325 channel, rcr_p->rcr_desc_rd_head_p, 1326 rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1327 1328 rx_mboxp = hxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 1329 mboxp = (p_rxdma_mailbox_t)rx_mboxp->rx_mbox.kaddrp; 1330 1331 (void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1332 RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value); 1333 rcr_tail = rcr_tail_reg.bits.tail; 1334 1335 if (!qlen) { 1336 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1337 "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)", 1338 channel, qlen)); 1339 return (NULL); 1340 } 1341 1342 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d " 1343 "qlen %d", channel, qlen)); 1344 1345 comp_rd_index = rcr_p->comp_rd_index; 1346 1347 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1348 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1349 nrcr_read = npkt_read = 0; 1350 1351 /* 1352 * Number of packets queued (The jumbo or multi packet will be counted 1353 * as only one paccket and it may take up more than one completion 1354 * entry). 1355 */ 1356 qlen_hw = (qlen < hxge_max_rx_pkts) ? qlen : hxge_max_rx_pkts; 1357 head_mp = NULL; 1358 tail_mp = &head_mp; 1359 nmp = mp_cont = NULL; 1360 multi = B_FALSE; 1361 1362 rcr_head_index = rcr_p->rcr_desc_rd_head_p - rcr_p->rcr_desc_first_p; 1363 rcr_tail_index = rcr_tail - rcr_p->rcr_tail_begin; 1364 1365 if (rcr_tail_index >= rcr_head_index) { 1366 qlen_sw = rcr_tail_index - rcr_head_index; 1367 } else { 1368 /* rcr_tail has wrapped around */ 1369 qlen_sw = (rcr_p->comp_size - rcr_head_index) + rcr_tail_index; 1370 } 1371 1372 if (qlen_hw > qlen_sw) { 1373 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1374 "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n", 1375 channel, qlen_hw, qlen_sw)); 1376 qlen_hw = qlen_sw; 1377 } 1378 1379 while (qlen_hw) { 1380 #ifdef HXGE_DEBUG 1381 hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p); 1382 #endif 1383 /* 1384 * Process one completion ring entry. 1385 */ 1386 invalid_rcr_entry = 0; 1387 hxge_receive_packet(hxgep, 1388 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont, 1389 &invalid_rcr_entry); 1390 if (invalid_rcr_entry != 0) { 1391 rdc_stats = rcr_p->rdc_stats; 1392 rdc_stats->rcr_invalids++; 1393 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1394 "Channel %d could only read 0x%x packets, " 1395 "but 0x%x pending\n", channel, npkt_read, qlen_hw)); 1396 break; 1397 } 1398 1399 /* 1400 * message chaining modes (nemo msg chaining) 1401 */ 1402 if (nmp) { 1403 nmp->b_next = NULL; 1404 if (!multi && !mp_cont) { /* frame fits a partition */ 1405 *tail_mp = nmp; 1406 tail_mp = &nmp->b_next; 1407 nmp = NULL; 1408 } else if (multi && !mp_cont) { /* first segment */ 1409 *tail_mp = nmp; 1410 tail_mp = &nmp->b_cont; 1411 } else if (multi && mp_cont) { /* mid of multi segs */ 1412 *tail_mp = mp_cont; 1413 tail_mp = &mp_cont->b_cont; 1414 } else if (!multi && mp_cont) { /* last segment */ 1415 *tail_mp = mp_cont; 1416 tail_mp = &nmp->b_next; 1417 nmp = NULL; 1418 } 1419 } 1420 1421 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1422 "==> hxge_rx_pkts: loop: rcr channel %d " 1423 "before updating: multi %d " 1424 "nrcr_read %d " 1425 "npk read %d " 1426 "head_pp $%p index %d ", 1427 channel, multi, 1428 nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index)); 1429 1430 if (!multi) { 1431 qlen_hw--; 1432 npkt_read++; 1433 } 1434 1435 /* 1436 * Update the next read entry. 1437 */ 1438 comp_rd_index = NEXT_ENTRY(comp_rd_index, 1439 rcr_p->comp_wrap_mask); 1440 1441 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 1442 rcr_p->rcr_desc_first_p, rcr_p->rcr_desc_last_p); 1443 1444 nrcr_read++; 1445 1446 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1447 "<== hxge_rx_pkts: (SAM, process one packet) " 1448 "nrcr_read %d", nrcr_read)); 1449 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1450 "==> hxge_rx_pkts: loop: rcr channel %d " 1451 "multi %d nrcr_read %d npk read %d head_pp $%p index %d ", 1452 channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1453 comp_rd_index)); 1454 } 1455 1456 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 1457 rcr_p->comp_rd_index = comp_rd_index; 1458 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 1459 1460 /* Adjust the mailbox queue length for a hardware bug workaround */ 1461 mboxp->rcrstat_a.bits.qlen -= npkt_read; 1462 1463 if ((hxgep->intr_timeout != rcr_p->intr_timeout) || 1464 (hxgep->intr_threshold != rcr_p->intr_threshold)) { 1465 rcr_p->intr_timeout = hxgep->intr_timeout; 1466 rcr_p->intr_threshold = hxgep->intr_threshold; 1467 rcr_cfg_b.value = 0x0ULL; 1468 if (rcr_p->intr_timeout) 1469 rcr_cfg_b.bits.entout = 1; 1470 rcr_cfg_b.bits.timeout = rcr_p->intr_timeout; 1471 rcr_cfg_b.bits.pthres = rcr_p->intr_threshold; 1472 RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B, 1473 channel, rcr_cfg_b.value); 1474 } 1475 1476 cs.bits.pktread = npkt_read; 1477 cs.bits.ptrread = nrcr_read; 1478 value = cs.value; 1479 cs.value &= 0xffffffffULL; 1480 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1481 1482 cs.value = value & ~0xffffffffULL; 1483 cs.bits.pktread = 0; 1484 cs.bits.ptrread = 0; 1485 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value); 1486 1487 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, 1488 "==> hxge_rx_pkts: EXIT: rcr channel %d " 1489 "head_pp $%p index %016llx ", 1490 channel, rcr_p->rcr_desc_rd_head_pp, rcr_p->comp_rd_index)); 1491 1492 /* 1493 * Update RCR buffer pointer read and number of packets read. 1494 */ 1495 1496 *rcrp = rcr_p; 1497 1498 HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts")); 1499 1500 return (head_mp); 1501 } 1502 1503 #define RCR_ENTRY_PATTERN 0x5a5a6b6b7c7c8d8dULL 1504 1505 /*ARGSUSED*/ 1506 void 1507 hxge_receive_packet(p_hxge_t hxgep, 1508 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 1509 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont, 1510 uint32_t *invalid_rcr_entry) 1511 { 1512 p_mblk_t nmp = NULL; 1513 uint64_t multi; 1514 uint8_t channel; 1515 1516 boolean_t first_entry = B_TRUE; 1517 boolean_t is_tcp_udp = B_FALSE; 1518 boolean_t buffer_free = B_FALSE; 1519 boolean_t error_send_up = B_FALSE; 1520 uint8_t error_type; 1521 uint16_t l2_len; 1522 uint16_t skip_len; 1523 uint8_t pktbufsz_type; 1524 uint64_t rcr_entry; 1525 uint64_t *pkt_buf_addr_pp; 1526 uint64_t *pkt_buf_addr_p; 1527 uint32_t buf_offset; 1528 uint32_t bsize; 1529 uint32_t msg_index; 1530 p_rx_rbr_ring_t rx_rbr_p; 1531 p_rx_msg_t *rx_msg_ring_p; 1532 p_rx_msg_t rx_msg_p; 1533 1534 uint16_t sw_offset_bytes = 0, hdr_size = 0; 1535 hxge_status_t status = HXGE_OK; 1536 boolean_t is_valid = B_FALSE; 1537 p_hxge_rx_ring_stats_t rdc_stats; 1538 uint32_t bytes_read; 1539 1540 uint64_t pkt_type; 1541 1542 channel = rcr_p->rdc; 1543 1544 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet")); 1545 1546 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 1547 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 1548 1549 /* Verify the content of the rcr_entry for a hardware bug workaround */ 1550 if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) { 1551 *invalid_rcr_entry = 1; 1552 HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet " 1553 "Channel %d invalid RCR entry 0x%llx found, returning\n", 1554 channel, (long long) rcr_entry)); 1555 return; 1556 } 1557 *((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN; 1558 1559 multi = (rcr_entry & RCR_MULTI_MASK); 1560 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 1561 1562 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 1563 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 1564 1565 /* 1566 * Hardware does not strip the CRC due bug ID 11451 where 1567 * the hardware mis handles minimum size packets. 1568 */ 1569 l2_len -= ETHERFCSL; 1570 1571 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 1572 RCR_PKTBUFSZ_SHIFT); 1573 #if defined(__i386) 1574 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 1575 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 1576 #else 1577 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 1578 RCR_PKT_BUF_ADDR_SHIFT); 1579 #endif 1580 1581 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1582 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1583 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1584 "error_type 0x%x pkt_type 0x%x " 1585 "pktbufsz_type %d ", 1586 rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len, 1587 multi, error_type, pkt_type, pktbufsz_type)); 1588 1589 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1590 "==> hxge_receive_packet: entryp $%p entry 0x%0llx " 1591 "pkt_buf_addr_pp $%p l2_len %d multi %d " 1592 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 1593 rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type, pkt_type)); 1594 1595 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1596 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1597 "full pkt_buf_addr_pp $%p l2_len %d", 1598 rcr_entry, pkt_buf_addr_pp, l2_len)); 1599 1600 /* get the stats ptr */ 1601 rdc_stats = rcr_p->rdc_stats; 1602 1603 if (!l2_len) { 1604 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1605 "<== hxge_receive_packet: failed: l2 length is 0.")); 1606 return; 1607 } 1608 1609 /* shift 6 bits to get the full io address */ 1610 #if defined(__i386) 1611 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 1612 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1613 #else 1614 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 1615 RCR_PKT_BUF_ADDR_SHIFT_FULL); 1616 #endif 1617 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1618 "==> (rbr) hxge_receive_packet: entry 0x%0llx " 1619 "full pkt_buf_addr_pp $%p l2_len %d", 1620 rcr_entry, pkt_buf_addr_pp, l2_len)); 1621 1622 rx_rbr_p = rcr_p->rx_rbr_p; 1623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 1624 1625 if (first_entry) { 1626 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 1627 RXDMA_HDR_SIZE_DEFAULT); 1628 1629 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1630 "==> hxge_receive_packet: first entry 0x%016llx " 1631 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 1632 rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size)); 1633 } 1634 1635 MUTEX_ENTER(&rcr_p->lock); 1636 MUTEX_ENTER(&rx_rbr_p->lock); 1637 1638 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1639 "==> (rbr 1) hxge_receive_packet: entry 0x%0llx " 1640 "full pkt_buf_addr_pp $%p l2_len %d", 1641 rcr_entry, pkt_buf_addr_pp, l2_len)); 1642 1643 /* 1644 * Packet buffer address in the completion entry points to the starting 1645 * buffer address (offset 0). Use the starting buffer address to locate 1646 * the corresponding kernel address. 1647 */ 1648 status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p, 1649 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 1650 &buf_offset, &msg_index); 1651 1652 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1653 "==> (rbr 2) hxge_receive_packet: entry 0x%0llx " 1654 "full pkt_buf_addr_pp $%p l2_len %d", 1655 rcr_entry, pkt_buf_addr_pp, l2_len)); 1656 1657 if (status != HXGE_OK) { 1658 MUTEX_EXIT(&rx_rbr_p->lock); 1659 MUTEX_EXIT(&rcr_p->lock); 1660 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1661 "<== hxge_receive_packet: found vaddr failed %d", status)); 1662 return; 1663 } 1664 1665 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1666 "==> (rbr 3) hxge_receive_packet: entry 0x%0llx " 1667 "full pkt_buf_addr_pp $%p l2_len %d", 1668 rcr_entry, pkt_buf_addr_pp, l2_len)); 1669 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1670 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1671 "full pkt_buf_addr_pp $%p l2_len %d", 1672 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1673 1674 if (msg_index >= rx_rbr_p->tnblocks) { 1675 MUTEX_EXIT(&rx_rbr_p->lock); 1676 MUTEX_EXIT(&rcr_p->lock); 1677 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1678 "==> hxge_receive_packet: FATAL msg_index (%d) " 1679 "should be smaller than tnblocks (%d)\n", 1680 msg_index, rx_rbr_p->tnblocks)); 1681 return; 1682 } 1683 1684 rx_msg_p = rx_msg_ring_p[msg_index]; 1685 1686 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1687 "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx " 1688 "full pkt_buf_addr_pp $%p l2_len %d", 1689 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 1690 1691 switch (pktbufsz_type) { 1692 case RCR_PKTBUFSZ_0: 1693 bsize = rx_rbr_p->pkt_buf_size0_bytes; 1694 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1695 "==> hxge_receive_packet: 0 buf %d", bsize)); 1696 break; 1697 case RCR_PKTBUFSZ_1: 1698 bsize = rx_rbr_p->pkt_buf_size1_bytes; 1699 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1700 "==> hxge_receive_packet: 1 buf %d", bsize)); 1701 break; 1702 case RCR_PKTBUFSZ_2: 1703 bsize = rx_rbr_p->pkt_buf_size2_bytes; 1704 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1705 "==> hxge_receive_packet: 2 buf %d", bsize)); 1706 break; 1707 case RCR_SINGLE_BLOCK: 1708 bsize = rx_msg_p->block_size; 1709 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1710 "==> hxge_receive_packet: single %d", bsize)); 1711 1712 break; 1713 default: 1714 MUTEX_EXIT(&rx_rbr_p->lock); 1715 MUTEX_EXIT(&rcr_p->lock); 1716 return; 1717 } 1718 1719 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 1720 (buf_offset + sw_offset_bytes), (hdr_size + l2_len), 1721 DDI_DMA_SYNC_FORCPU); 1722 1723 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1724 "==> hxge_receive_packet: after first dump:usage count")); 1725 1726 if (rx_msg_p->cur_usage_cnt == 0) { 1727 if (rx_rbr_p->rbr_use_bcopy) { 1728 atomic_inc_32(&rx_rbr_p->rbr_consumed); 1729 if (rx_rbr_p->rbr_consumed < 1730 rx_rbr_p->rbr_threshold_hi) { 1731 if (rx_rbr_p->rbr_threshold_lo == 0 || 1732 ((rx_rbr_p->rbr_consumed >= 1733 rx_rbr_p->rbr_threshold_lo) && 1734 (rx_rbr_p->rbr_bufsize_type >= 1735 pktbufsz_type))) { 1736 rx_msg_p->rx_use_bcopy = B_TRUE; 1737 } 1738 } else { 1739 rx_msg_p->rx_use_bcopy = B_TRUE; 1740 } 1741 } 1742 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1743 "==> hxge_receive_packet: buf %d (new block) ", bsize)); 1744 1745 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 1746 rx_msg_p->pkt_buf_size = bsize; 1747 rx_msg_p->cur_usage_cnt = 1; 1748 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 1749 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1750 "==> hxge_receive_packet: buf %d (single block) ", 1751 bsize)); 1752 /* 1753 * Buffer can be reused once the free function is 1754 * called. 1755 */ 1756 rx_msg_p->max_usage_cnt = 1; 1757 buffer_free = B_TRUE; 1758 } else { 1759 rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize; 1760 if (rx_msg_p->max_usage_cnt == 1) { 1761 buffer_free = B_TRUE; 1762 } 1763 } 1764 } else { 1765 rx_msg_p->cur_usage_cnt++; 1766 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 1767 buffer_free = B_TRUE; 1768 } 1769 } 1770 1771 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1772 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 1773 msg_index, l2_len, 1774 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 1775 1776 if (error_type) { 1777 rdc_stats->ierrors++; 1778 /* Update error stats */ 1779 rdc_stats->errlog.compl_err_type = error_type; 1780 HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR); 1781 1782 if (error_type & RCR_CTRL_FIFO_DED) { 1783 rdc_stats->ctrl_fifo_ecc_err++; 1784 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1785 " hxge_receive_packet: " 1786 " channel %d RCR ctrl_fifo_ded error", channel)); 1787 } else if (error_type & RCR_DATA_FIFO_DED) { 1788 rdc_stats->data_fifo_ecc_err++; 1789 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1790 " hxge_receive_packet: channel %d" 1791 " RCR data_fifo_ded error", channel)); 1792 } 1793 1794 /* 1795 * Update and repost buffer block if max usage count is 1796 * reached. 1797 */ 1798 if (error_send_up == B_FALSE) { 1799 atomic_inc_32(&rx_msg_p->ref_cnt); 1800 if (buffer_free == B_TRUE) { 1801 rx_msg_p->free = B_TRUE; 1802 } 1803 1804 MUTEX_EXIT(&rx_rbr_p->lock); 1805 MUTEX_EXIT(&rcr_p->lock); 1806 hxge_freeb(rx_msg_p); 1807 return; 1808 } 1809 } 1810 1811 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1812 "==> hxge_receive_packet: DMA sync second ")); 1813 1814 bytes_read = rcr_p->rcvd_pkt_bytes; 1815 skip_len = sw_offset_bytes + hdr_size; 1816 if (!rx_msg_p->rx_use_bcopy) { 1817 /* 1818 * For loaned up buffers, the driver reference count 1819 * will be incremented first and then the free state. 1820 */ 1821 if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 1822 if (first_entry) { 1823 nmp->b_rptr = &nmp->b_rptr[skip_len]; 1824 if (l2_len < bsize - skip_len) { 1825 nmp->b_wptr = &nmp->b_rptr[l2_len]; 1826 } else { 1827 nmp->b_wptr = &nmp->b_rptr[bsize 1828 - skip_len]; 1829 } 1830 } else { 1831 if (l2_len - bytes_read < bsize) { 1832 nmp->b_wptr = 1833 &nmp->b_rptr[l2_len - bytes_read]; 1834 } else { 1835 nmp->b_wptr = &nmp->b_rptr[bsize]; 1836 } 1837 } 1838 } 1839 } else { 1840 if (first_entry) { 1841 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 1842 l2_len < bsize - skip_len ? 1843 l2_len : bsize - skip_len); 1844 } else { 1845 nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset, 1846 l2_len - bytes_read < bsize ? 1847 l2_len - bytes_read : bsize); 1848 } 1849 } 1850 1851 if (nmp != NULL) { 1852 if (first_entry) 1853 bytes_read = nmp->b_wptr - nmp->b_rptr; 1854 else 1855 bytes_read += nmp->b_wptr - nmp->b_rptr; 1856 1857 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1858 "==> hxge_receive_packet after dupb: " 1859 "rbr consumed %d " 1860 "pktbufsz_type %d " 1861 "nmp $%p rptr $%p wptr $%p " 1862 "buf_offset %d bzise %d l2_len %d skip_len %d", 1863 rx_rbr_p->rbr_consumed, 1864 pktbufsz_type, 1865 nmp, nmp->b_rptr, nmp->b_wptr, 1866 buf_offset, bsize, l2_len, skip_len)); 1867 } else { 1868 cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)"); 1869 1870 atomic_inc_32(&rx_msg_p->ref_cnt); 1871 if (buffer_free == B_TRUE) { 1872 rx_msg_p->free = B_TRUE; 1873 } 1874 1875 MUTEX_EXIT(&rx_rbr_p->lock); 1876 MUTEX_EXIT(&rcr_p->lock); 1877 hxge_freeb(rx_msg_p); 1878 return; 1879 } 1880 1881 if (buffer_free == B_TRUE) { 1882 rx_msg_p->free = B_TRUE; 1883 } 1884 1885 /* 1886 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a 1887 * packet is not fragmented and no error bit is set, then L4 checksum 1888 * is OK. 1889 */ 1890 is_valid = (nmp != NULL); 1891 if (first_entry) { 1892 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 1893 if (l2_len > (STD_FRAME_SIZE - ETHERFCSL)) 1894 rdc_stats->jumbo_pkts++; 1895 rdc_stats->ibytes += skip_len + l2_len < bsize ? 1896 l2_len : bsize; 1897 } else { 1898 /* 1899 * Add the current portion of the packet to the kstats. 1900 * The current portion of the packet is calculated by using 1901 * length of the packet and the previously received portion. 1902 */ 1903 rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ? 1904 l2_len - rcr_p->rcvd_pkt_bytes : bsize; 1905 } 1906 1907 rcr_p->rcvd_pkt_bytes = bytes_read; 1908 1909 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 1910 atomic_inc_32(&rx_msg_p->ref_cnt); 1911 MUTEX_EXIT(&rx_rbr_p->lock); 1912 MUTEX_EXIT(&rcr_p->lock); 1913 hxge_freeb(rx_msg_p); 1914 } else { 1915 MUTEX_EXIT(&rx_rbr_p->lock); 1916 MUTEX_EXIT(&rcr_p->lock); 1917 } 1918 1919 if (is_valid) { 1920 nmp->b_cont = NULL; 1921 if (first_entry) { 1922 *mp = nmp; 1923 *mp_cont = NULL; 1924 } else { 1925 *mp_cont = nmp; 1926 } 1927 } 1928 1929 /* 1930 * Update stats and hardware checksuming. 1931 */ 1932 if (is_valid && !multi) { 1933 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 1934 pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE); 1935 1936 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_receive_packet: " 1937 "is_valid 0x%x multi %d pkt %d d error %d", 1938 is_valid, multi, is_tcp_udp, error_type)); 1939 1940 if (is_tcp_udp && !error_type) { 1941 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 1942 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 1943 1944 HXGE_DEBUG_MSG((hxgep, RX_CTL, 1945 "==> hxge_receive_packet: Full tcp/udp cksum " 1946 "is_valid 0x%x multi %d pkt %d " 1947 "error %d", 1948 is_valid, multi, is_tcp_udp, error_type)); 1949 } 1950 } 1951 1952 HXGE_DEBUG_MSG((hxgep, RX2_CTL, 1953 "==> hxge_receive_packet: *mp 0x%016llx", *mp)); 1954 1955 *multi_p = (multi == RCR_MULTI_MASK); 1956 1957 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: " 1958 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 1959 *multi_p, nmp, *mp, *mp_cont)); 1960 } 1961 1962 /*ARGSUSED*/ 1963 static hxge_status_t 1964 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp, 1965 rdc_stat_t cs) 1966 { 1967 p_hxge_rx_ring_stats_t rdc_stats; 1968 hpi_handle_t handle; 1969 boolean_t rxchan_fatal = B_FALSE; 1970 uint8_t channel; 1971 hxge_status_t status = HXGE_OK; 1972 uint64_t cs_val; 1973 1974 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts")); 1975 1976 handle = HXGE_DEV_HPI_HANDLE(hxgep); 1977 channel = ldvp->channel; 1978 1979 /* Clear the interrupts */ 1980 cs.bits.pktread = 0; 1981 cs.bits.ptrread = 0; 1982 cs_val = cs.value & RDC_STAT_WR1C; 1983 RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs_val); 1984 1985 rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index]; 1986 1987 if (cs.bits.rbr_cpl_to) { 1988 rdc_stats->rbr_tmout++; 1989 HXGE_FM_REPORT_ERROR(hxgep, channel, 1990 HXGE_FM_EREPORT_RDMC_RBR_CPL_TO); 1991 rxchan_fatal = B_TRUE; 1992 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 1993 "==> hxge_rx_err_evnts(channel %d): " 1994 "fatal error: rx_rbr_timeout", channel)); 1995 } 1996 1997 if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) { 1998 (void) hpi_rxdma_ring_perr_stat_get(handle, 1999 &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par); 2000 } 2001 2002 if (cs.bits.rcr_shadow_par_err) { 2003 rdc_stats->rcr_sha_par++; 2004 HXGE_FM_REPORT_ERROR(hxgep, channel, 2005 HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2006 rxchan_fatal = B_TRUE; 2007 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2008 "==> hxge_rx_err_evnts(channel %d): " 2009 "fatal error: rcr_shadow_par_err", channel)); 2010 } 2011 2012 if (cs.bits.rbr_prefetch_par_err) { 2013 rdc_stats->rbr_pre_par++; 2014 HXGE_FM_REPORT_ERROR(hxgep, channel, 2015 HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2016 rxchan_fatal = B_TRUE; 2017 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2018 "==> hxge_rx_err_evnts(channel %d): " 2019 "fatal error: rbr_prefetch_par_err", channel)); 2020 } 2021 2022 if (cs.bits.rbr_pre_empty) { 2023 rdc_stats->rbr_pre_empty++; 2024 HXGE_FM_REPORT_ERROR(hxgep, channel, 2025 HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY); 2026 rxchan_fatal = B_TRUE; 2027 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2028 "==> hxge_rx_err_evnts(channel %d): " 2029 "fatal error: rbr_pre_empty", channel)); 2030 } 2031 2032 if (cs.bits.peu_resp_err) { 2033 rdc_stats->peu_resp_err++; 2034 HXGE_FM_REPORT_ERROR(hxgep, channel, 2035 HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR); 2036 rxchan_fatal = B_TRUE; 2037 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2038 "==> hxge_rx_err_evnts(channel %d): " 2039 "fatal error: peu_resp_err", channel)); 2040 } 2041 2042 if (cs.bits.rcr_thres) { 2043 rdc_stats->rcr_thres++; 2044 } 2045 2046 if (cs.bits.rcr_to) { 2047 rdc_stats->rcr_to++; 2048 } 2049 2050 if (cs.bits.rcr_shadow_full) { 2051 rdc_stats->rcr_shadow_full++; 2052 HXGE_FM_REPORT_ERROR(hxgep, channel, 2053 HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL); 2054 rxchan_fatal = B_TRUE; 2055 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2056 "==> hxge_rx_err_evnts(channel %d): " 2057 "fatal error: rcr_shadow_full", channel)); 2058 } 2059 2060 if (cs.bits.rcr_full) { 2061 rdc_stats->rcrfull++; 2062 HXGE_FM_REPORT_ERROR(hxgep, channel, 2063 HXGE_FM_EREPORT_RDMC_RCRFULL); 2064 rxchan_fatal = B_TRUE; 2065 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2066 "==> hxge_rx_err_evnts(channel %d): " 2067 "fatal error: rcrfull error", channel)); 2068 } 2069 2070 if (cs.bits.rbr_empty) { 2071 rdc_stats->rbr_empty++; 2072 if (rdc_stats->rbr_empty == 1) 2073 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2074 "==> hxge_rx_err_evnts(channel %d): " 2075 "rbr empty error", channel)); 2076 /* 2077 * DMA channel is disabled due to rbr_empty bit is set 2078 * although it is not fatal. Enable the DMA channel here 2079 * to work-around the hardware bug. 2080 */ 2081 (void) hpi_rxdma_cfg_rdc_enable(handle, channel); 2082 } 2083 2084 if (cs.bits.rbr_full) { 2085 rdc_stats->rbrfull++; 2086 HXGE_FM_REPORT_ERROR(hxgep, channel, 2087 HXGE_FM_EREPORT_RDMC_RBRFULL); 2088 rxchan_fatal = B_TRUE; 2089 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2090 "==> hxge_rx_err_evnts(channel %d): " 2091 "fatal error: rbr_full error", channel)); 2092 } 2093 2094 if (rxchan_fatal) { 2095 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2096 " hxge_rx_err_evnts: fatal error on Channel #%d\n", 2097 channel)); 2098 status = hxge_rxdma_fatal_err_recover(hxgep, channel); 2099 if (status == HXGE_OK) { 2100 FM_SERVICE_RESTORED(hxgep); 2101 } 2102 } 2103 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts")); 2104 2105 return (status); 2106 } 2107 2108 static hxge_status_t 2109 hxge_map_rxdma(p_hxge_t hxgep) 2110 { 2111 int i, ndmas; 2112 uint16_t channel; 2113 p_rx_rbr_rings_t rx_rbr_rings; 2114 p_rx_rbr_ring_t *rbr_rings; 2115 p_rx_rcr_rings_t rx_rcr_rings; 2116 p_rx_rcr_ring_t *rcr_rings; 2117 p_rx_mbox_areas_t rx_mbox_areas_p; 2118 p_rx_mbox_t *rx_mbox_p; 2119 p_hxge_dma_pool_t dma_buf_poolp; 2120 p_hxge_dma_common_t *dma_buf_p; 2121 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2122 p_hxge_dma_common_t *dma_rbr_cntl_p; 2123 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2124 p_hxge_dma_common_t *dma_rcr_cntl_p; 2125 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2126 p_hxge_dma_common_t *dma_mbox_cntl_p; 2127 uint32_t *num_chunks; 2128 hxge_status_t status = HXGE_OK; 2129 2130 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma")); 2131 2132 dma_buf_poolp = hxgep->rx_buf_pool_p; 2133 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2134 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2135 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2136 2137 if (!dma_buf_poolp->buf_allocated || 2138 !dma_rbr_cntl_poolp->buf_allocated || 2139 !dma_rcr_cntl_poolp->buf_allocated || 2140 !dma_mbox_cntl_poolp->buf_allocated) { 2141 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2142 "<== hxge_map_rxdma: buf not allocated")); 2143 return (HXGE_ERROR); 2144 } 2145 2146 ndmas = dma_buf_poolp->ndmas; 2147 if (!ndmas) { 2148 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2149 "<== hxge_map_rxdma: no dma allocated")); 2150 return (HXGE_ERROR); 2151 } 2152 2153 num_chunks = dma_buf_poolp->num_chunks; 2154 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2155 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p; 2156 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p; 2157 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p; 2158 2159 rx_rbr_rings = (p_rx_rbr_rings_t) 2160 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2161 rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC( 2162 sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2163 2164 rx_rcr_rings = (p_rx_rcr_rings_t) 2165 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2166 rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC( 2167 sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2168 2169 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2170 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2171 rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC( 2172 sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2173 2174 /* 2175 * Timeout should be set based on the system clock divider. 2176 * The following timeout value of 1 assumes that the 2177 * granularity (1000) is 3 microseconds running at 300MHz. 2178 */ 2179 2180 hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2181 hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2182 2183 /* 2184 * Map descriptors from the buffer polls for each dam channel. 2185 */ 2186 for (i = 0; i < ndmas; i++) { 2187 /* 2188 * Set up and prepare buffer blocks, descriptors and mailbox. 2189 */ 2190 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2191 status = hxge_map_rxdma_channel(hxgep, channel, 2192 (p_hxge_dma_common_t *)&dma_buf_p[i], 2193 (p_rx_rbr_ring_t *)&rbr_rings[i], 2194 num_chunks[i], 2195 (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i], 2196 (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i], 2197 (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i], 2198 (p_rx_rcr_ring_t *)&rcr_rings[i], 2199 (p_rx_mbox_t *)&rx_mbox_p[i]); 2200 if (status != HXGE_OK) { 2201 goto hxge_map_rxdma_fail1; 2202 } 2203 rbr_rings[i]->index = (uint16_t)i; 2204 rcr_rings[i]->index = (uint16_t)i; 2205 rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i]; 2206 } 2207 2208 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2209 rx_rbr_rings->rbr_rings = rbr_rings; 2210 hxgep->rx_rbr_rings = rx_rbr_rings; 2211 rx_rcr_rings->rcr_rings = rcr_rings; 2212 hxgep->rx_rcr_rings = rx_rcr_rings; 2213 2214 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2215 hxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2216 2217 goto hxge_map_rxdma_exit; 2218 2219 hxge_map_rxdma_fail1: 2220 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2221 "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)", 2222 status, channel, i)); 2223 i--; 2224 for (; i >= 0; i--) { 2225 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2226 hxge_unmap_rxdma_channel(hxgep, channel, 2227 rbr_rings[i], rcr_rings[i], rx_mbox_p[i]); 2228 } 2229 2230 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2231 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2232 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2233 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2234 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2235 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2236 2237 hxge_map_rxdma_exit: 2238 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2239 "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 2240 2241 return (status); 2242 } 2243 2244 static void 2245 hxge_unmap_rxdma(p_hxge_t hxgep) 2246 { 2247 int i, ndmas; 2248 uint16_t channel; 2249 p_rx_rbr_rings_t rx_rbr_rings; 2250 p_rx_rbr_ring_t *rbr_rings; 2251 p_rx_rcr_rings_t rx_rcr_rings; 2252 p_rx_rcr_ring_t *rcr_rings; 2253 p_rx_mbox_areas_t rx_mbox_areas_p; 2254 p_rx_mbox_t *rx_mbox_p; 2255 p_hxge_dma_pool_t dma_buf_poolp; 2256 p_hxge_dma_pool_t dma_rbr_cntl_poolp; 2257 p_hxge_dma_pool_t dma_rcr_cntl_poolp; 2258 p_hxge_dma_pool_t dma_mbox_cntl_poolp; 2259 p_hxge_dma_common_t *dma_buf_p; 2260 2261 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma")); 2262 2263 dma_buf_poolp = hxgep->rx_buf_pool_p; 2264 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p; 2265 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p; 2266 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p; 2267 2268 if (!dma_buf_poolp->buf_allocated || 2269 !dma_rbr_cntl_poolp->buf_allocated || 2270 !dma_rcr_cntl_poolp->buf_allocated || 2271 !dma_mbox_cntl_poolp->buf_allocated) { 2272 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2273 "<== hxge_unmap_rxdma: NULL buf pointers")); 2274 return; 2275 } 2276 2277 rx_rbr_rings = hxgep->rx_rbr_rings; 2278 rx_rcr_rings = hxgep->rx_rcr_rings; 2279 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2280 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2281 "<== hxge_unmap_rxdma: NULL pointers")); 2282 return; 2283 } 2284 2285 ndmas = rx_rbr_rings->ndmas; 2286 if (!ndmas) { 2287 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2288 "<== hxge_unmap_rxdma: no channel")); 2289 return; 2290 } 2291 2292 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2293 "==> hxge_unmap_rxdma (ndmas %d)", ndmas)); 2294 2295 rbr_rings = rx_rbr_rings->rbr_rings; 2296 rcr_rings = rx_rcr_rings->rcr_rings; 2297 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 2298 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 2299 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2300 2301 for (i = 0; i < ndmas; i++) { 2302 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 2303 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2304 "==> hxge_unmap_rxdma (ndmas %d) channel %d", 2305 ndmas, channel)); 2306 (void) hxge_unmap_rxdma_channel(hxgep, channel, 2307 (p_rx_rbr_ring_t)rbr_rings[i], 2308 (p_rx_rcr_ring_t)rcr_rings[i], 2309 (p_rx_mbox_t)rx_mbox_p[i]); 2310 } 2311 2312 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2313 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2314 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2315 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2316 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2317 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2318 2319 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma")); 2320 } 2321 2322 hxge_status_t 2323 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2324 p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 2325 uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p, 2326 p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p, 2327 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2328 { 2329 int status = HXGE_OK; 2330 2331 /* 2332 * Set up and prepare buffer blocks, descriptors and mailbox. 2333 */ 2334 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2335 "==> hxge_map_rxdma_channel (channel %d)", channel)); 2336 2337 /* 2338 * Receive buffer blocks 2339 */ 2340 status = hxge_map_rxdma_channel_buf_ring(hxgep, channel, 2341 dma_buf_p, rbr_p, num_chunks); 2342 if (status != HXGE_OK) { 2343 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2344 "==> hxge_map_rxdma_channel (channel %d): " 2345 "map buffer failed 0x%x", channel, status)); 2346 goto hxge_map_rxdma_channel_exit; 2347 } 2348 2349 /* 2350 * Receive block ring, completion ring and mailbox. 2351 */ 2352 status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel, 2353 dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p, 2354 rbr_p, rcr_p, rx_mbox_p); 2355 if (status != HXGE_OK) { 2356 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2357 "==> hxge_map_rxdma_channel (channel %d): " 2358 "map config failed 0x%x", channel, status)); 2359 goto hxge_map_rxdma_channel_fail2; 2360 } 2361 goto hxge_map_rxdma_channel_exit; 2362 2363 hxge_map_rxdma_channel_fail3: 2364 /* Free rbr, rcr */ 2365 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2366 "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)", 2367 status, channel)); 2368 hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p); 2369 2370 hxge_map_rxdma_channel_fail2: 2371 /* Free buffer blocks */ 2372 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2373 "==> hxge_map_rxdma_channel: free rx buffers" 2374 "(hxgep 0x%x status 0x%x channel %d)", 2375 hxgep, status, channel)); 2376 hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p); 2377 2378 status = HXGE_ERROR; 2379 2380 hxge_map_rxdma_channel_exit: 2381 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2382 "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)", 2383 hxgep, status, channel)); 2384 2385 return (status); 2386 } 2387 2388 /*ARGSUSED*/ 2389 static void 2390 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel, 2391 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2392 { 2393 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2394 "==> hxge_unmap_rxdma_channel (channel %d)", channel)); 2395 2396 /* 2397 * unmap receive block ring, completion ring and mailbox. 2398 */ 2399 (void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p); 2400 2401 /* unmap buffer blocks */ 2402 (void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p); 2403 2404 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel")); 2405 } 2406 2407 /*ARGSUSED*/ 2408 static hxge_status_t 2409 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel, 2410 p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p, 2411 p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p, 2412 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 2413 { 2414 p_rx_rbr_ring_t rbrp; 2415 p_rx_rcr_ring_t rcrp; 2416 p_rx_mbox_t mboxp; 2417 p_hxge_dma_common_t cntl_dmap; 2418 p_hxge_dma_common_t dmap; 2419 p_rx_msg_t *rx_msg_ring; 2420 p_rx_msg_t rx_msg_p; 2421 rdc_rbr_cfg_a_t *rcfga_p; 2422 rdc_rbr_cfg_b_t *rcfgb_p; 2423 rdc_rcr_cfg_a_t *cfga_p; 2424 rdc_rcr_cfg_b_t *cfgb_p; 2425 rdc_rx_cfg1_t *cfig1_p; 2426 rdc_rx_cfg2_t *cfig2_p; 2427 rdc_rbr_kick_t *kick_p; 2428 uint32_t dmaaddrp; 2429 uint32_t *rbr_vaddrp; 2430 uint32_t bkaddr; 2431 hxge_status_t status = HXGE_OK; 2432 int i; 2433 uint32_t hxge_port_rcr_size; 2434 2435 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2436 "==> hxge_map_rxdma_channel_cfg_ring")); 2437 2438 cntl_dmap = *dma_rbr_cntl_p; 2439 2440 /* 2441 * Map in the receive block ring 2442 */ 2443 rbrp = *rbr_p; 2444 dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc; 2445 hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 2446 2447 /* 2448 * Zero out buffer block ring descriptors. 2449 */ 2450 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2451 2452 rcfga_p = &(rbrp->rbr_cfga); 2453 rcfgb_p = &(rbrp->rbr_cfgb); 2454 kick_p = &(rbrp->rbr_kick); 2455 rcfga_p->value = 0; 2456 rcfgb_p->value = 0; 2457 kick_p->value = 0; 2458 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 2459 rcfga_p->value = (rbrp->rbr_addr & 2460 (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK)); 2461 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 2462 2463 /* XXXX: how to choose packet buffer sizes */ 2464 rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0; 2465 rcfgb_p->bits.vld0 = 1; 2466 rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1; 2467 rcfgb_p->bits.vld1 = 1; 2468 rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2; 2469 rcfgb_p->bits.vld2 = 1; 2470 rcfgb_p->bits.bksize = hxgep->rx_bksize_code; 2471 2472 /* 2473 * For each buffer block, enter receive block address to the ring. 2474 */ 2475 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 2476 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 2477 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2478 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2479 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 2480 2481 rx_msg_ring = rbrp->rx_msg_ring; 2482 for (i = 0; i < rbrp->tnblocks; i++) { 2483 rx_msg_p = rx_msg_ring[i]; 2484 rx_msg_p->hxgep = hxgep; 2485 rx_msg_p->rx_rbr_p = rbrp; 2486 bkaddr = (uint32_t) 2487 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2488 RBR_BKADDR_SHIFT)); 2489 rx_msg_p->free = B_FALSE; 2490 rx_msg_p->max_usage_cnt = 0xbaddcafe; 2491 2492 *rbr_vaddrp++ = bkaddr; 2493 } 2494 2495 kick_p->bits.bkadd = rbrp->rbb_max; 2496 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 2497 2498 rbrp->rbr_rd_index = 0; 2499 2500 rbrp->rbr_consumed = 0; 2501 rbrp->rbr_use_bcopy = B_TRUE; 2502 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 2503 2504 /* 2505 * Do bcopy on packets greater than bcopy size once the lo threshold is 2506 * reached. This lo threshold should be less than the hi threshold. 2507 * 2508 * Do bcopy on every packet once the hi threshold is reached. 2509 */ 2510 if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) { 2511 /* default it to use hi */ 2512 hxge_rx_threshold_lo = hxge_rx_threshold_hi; 2513 } 2514 if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) { 2515 hxge_rx_buf_size_type = HXGE_RBR_TYPE2; 2516 } 2517 rbrp->rbr_bufsize_type = hxge_rx_buf_size_type; 2518 2519 switch (hxge_rx_threshold_hi) { 2520 default: 2521 case HXGE_RX_COPY_NONE: 2522 /* Do not do bcopy at all */ 2523 rbrp->rbr_use_bcopy = B_FALSE; 2524 rbrp->rbr_threshold_hi = rbrp->rbb_max; 2525 break; 2526 2527 case HXGE_RX_COPY_1: 2528 case HXGE_RX_COPY_2: 2529 case HXGE_RX_COPY_3: 2530 case HXGE_RX_COPY_4: 2531 case HXGE_RX_COPY_5: 2532 case HXGE_RX_COPY_6: 2533 case HXGE_RX_COPY_7: 2534 rbrp->rbr_threshold_hi = 2535 rbrp->rbb_max * (hxge_rx_threshold_hi) / 2536 HXGE_RX_BCOPY_SCALE; 2537 break; 2538 2539 case HXGE_RX_COPY_ALL: 2540 rbrp->rbr_threshold_hi = 0; 2541 break; 2542 } 2543 2544 switch (hxge_rx_threshold_lo) { 2545 default: 2546 case HXGE_RX_COPY_NONE: 2547 /* Do not do bcopy at all */ 2548 if (rbrp->rbr_use_bcopy) { 2549 rbrp->rbr_use_bcopy = B_FALSE; 2550 } 2551 rbrp->rbr_threshold_lo = rbrp->rbb_max; 2552 break; 2553 2554 case HXGE_RX_COPY_1: 2555 case HXGE_RX_COPY_2: 2556 case HXGE_RX_COPY_3: 2557 case HXGE_RX_COPY_4: 2558 case HXGE_RX_COPY_5: 2559 case HXGE_RX_COPY_6: 2560 case HXGE_RX_COPY_7: 2561 rbrp->rbr_threshold_lo = 2562 rbrp->rbb_max * (hxge_rx_threshold_lo) / 2563 HXGE_RX_BCOPY_SCALE; 2564 break; 2565 2566 case HXGE_RX_COPY_ALL: 2567 rbrp->rbr_threshold_lo = 0; 2568 break; 2569 } 2570 2571 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2572 "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d " 2573 "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d " 2574 "rbb_threshold_lo %d", 2575 dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type, 2576 rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo)); 2577 2578 /* Map in the receive completion ring */ 2579 rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 2580 rcrp->rdc = dma_channel; 2581 2582 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 2583 rcrp->comp_size = hxge_port_rcr_size; 2584 rcrp->comp_wrap_mask = hxge_port_rcr_size - 1; 2585 2586 rcrp->max_receive_pkts = hxge_max_rx_pkts; 2587 2588 cntl_dmap = *dma_rcr_cntl_p; 2589 2590 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 2591 hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 2592 sizeof (rcr_entry_t)); 2593 rcrp->comp_rd_index = 0; 2594 rcrp->comp_wt_index = 0; 2595 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 2596 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 2597 #if defined(__i386) 2598 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2599 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2600 #else 2601 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 2602 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 2603 #endif 2604 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 2605 (hxge_port_rcr_size - 1); 2606 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 2607 (hxge_port_rcr_size - 1); 2608 2609 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 2610 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 2611 2612 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2613 "==> hxge_map_rxdma_channel_cfg_ring: channel %d " 2614 "rbr_vaddrp $%p rcr_desc_rd_head_p $%p " 2615 "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p " 2616 "rcr_desc_rd_last_pp $%p ", 2617 dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p, 2618 rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p, 2619 rcrp->rcr_desc_last_pp)); 2620 2621 /* 2622 * Zero out buffer block ring descriptors. 2623 */ 2624 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2625 rcrp->intr_timeout = hxgep->intr_timeout; 2626 rcrp->intr_threshold = hxgep->intr_threshold; 2627 rcrp->full_hdr_flag = B_FALSE; 2628 rcrp->sw_priv_hdr_len = 0; 2629 2630 cfga_p = &(rcrp->rcr_cfga); 2631 cfgb_p = &(rcrp->rcr_cfgb); 2632 cfga_p->value = 0; 2633 cfgb_p->value = 0; 2634 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 2635 2636 cfga_p->value = (rcrp->rcr_addr & 2637 (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK)); 2638 2639 cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF); 2640 2641 /* 2642 * Timeout should be set based on the system clock divider. The 2643 * following timeout value of 1 assumes that the granularity (1000) is 2644 * 3 microseconds running at 300MHz. 2645 */ 2646 cfgb_p->bits.pthres = rcrp->intr_threshold; 2647 cfgb_p->bits.timeout = rcrp->intr_timeout; 2648 cfgb_p->bits.entout = 1; 2649 2650 /* Map in the mailbox */ 2651 cntl_dmap = *dma_mbox_cntl_p; 2652 mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 2653 dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox; 2654 hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 2655 cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1; 2656 cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2; 2657 cfig1_p->value = cfig2_p->value = 0; 2658 2659 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 2660 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2661 "==> hxge_map_rxdma_channel_cfg_ring: " 2662 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 2663 dma_channel, cfig1_p->value, cfig2_p->value, 2664 mboxp->mbox_addr)); 2665 2666 dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff); 2667 cfig1_p->bits.mbaddr_h = dmaaddrp; 2668 2669 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 2670 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 2671 RXDMA_CFIG2_MBADDR_L_MASK); 2672 2673 cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 2674 2675 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2676 "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p " 2677 "cfg1 0x%016llx cfig2 0x%016llx", 2678 dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value)); 2679 2680 cfig2_p->bits.full_hdr = rcrp->full_hdr_flag; 2681 cfig2_p->bits.offset = rcrp->sw_priv_hdr_len; 2682 2683 rbrp->rx_rcr_p = rcrp; 2684 rcrp->rx_rbr_p = rbrp; 2685 *rcr_p = rcrp; 2686 *rx_mbox_p = mboxp; 2687 2688 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2689 "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 2690 return (status); 2691 } 2692 2693 /*ARGSUSED*/ 2694 static void 2695 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep, 2696 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 2697 { 2698 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2699 "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc)); 2700 2701 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 2702 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 2703 2704 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2705 "<== hxge_unmap_rxdma_channel_cfg_ring")); 2706 } 2707 2708 static hxge_status_t 2709 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel, 2710 p_hxge_dma_common_t *dma_buf_p, 2711 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 2712 { 2713 p_rx_rbr_ring_t rbrp; 2714 p_hxge_dma_common_t dma_bufp, tmp_bufp; 2715 p_rx_msg_t *rx_msg_ring; 2716 p_rx_msg_t rx_msg_p; 2717 p_mblk_t mblk_p; 2718 2719 rxring_info_t *ring_info; 2720 hxge_status_t status = HXGE_OK; 2721 int i, j, index; 2722 uint32_t size, bsize, nblocks, nmsgs; 2723 2724 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2725 "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel)); 2726 2727 dma_bufp = tmp_bufp = *dma_buf_p; 2728 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2729 " hxge_map_rxdma_channel_buf_ring: channel %d to map %d " 2730 "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp)); 2731 2732 nmsgs = 0; 2733 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2734 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2735 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2736 "bufp 0x%016llx nblocks %d nmsgs %d", 2737 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2738 nmsgs += tmp_bufp->nblocks; 2739 } 2740 if (!nmsgs) { 2741 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2742 "<== hxge_map_rxdma_channel_buf_ring: channel %d " 2743 "no msg blocks", channel)); 2744 status = HXGE_ERROR; 2745 goto hxge_map_rxdma_channel_buf_ring_exit; 2746 } 2747 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 2748 2749 size = nmsgs * sizeof (p_rx_msg_t); 2750 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2751 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 2752 KM_SLEEP); 2753 2754 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 2755 (void *) hxgep->interrupt_cookie); 2756 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 2757 (void *) hxgep->interrupt_cookie); 2758 rbrp->rdc = channel; 2759 rbrp->num_blocks = num_chunks; 2760 rbrp->tnblocks = nmsgs; 2761 rbrp->rbb_max = nmsgs; 2762 rbrp->rbr_max_size = nmsgs; 2763 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 2764 2765 rbrp->pages_to_post = 0; 2766 rbrp->pages_to_skip = 20; 2767 rbrp->pages_to_post_threshold = rbrp->rbb_max - rbrp->pages_to_skip / 2; 2768 2769 /* 2770 * Buffer sizes suggested by NIU architect. 256, 512 and 2K. 2771 */ 2772 2773 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 2774 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 2775 rbrp->hpi_pkt_buf_size0 = SIZE_256B; 2776 2777 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 2778 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 2779 rbrp->hpi_pkt_buf_size1 = SIZE_1KB; 2780 2781 rbrp->block_size = hxgep->rx_default_block_size; 2782 2783 if (!hxgep->param_arr[param_accept_jumbo].value) { 2784 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 2785 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 2786 rbrp->hpi_pkt_buf_size2 = SIZE_2KB; 2787 } else { 2788 if (rbrp->block_size >= 0x2000) { 2789 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2790 "<== hxge_map_rxdma_channel_buf_ring: channel %d " 2791 "no msg blocks", channel)); 2792 status = HXGE_ERROR; 2793 goto hxge_map_rxdma_channel_buf_ring_fail1; 2794 } else { 2795 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 2796 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 2797 rbrp->hpi_pkt_buf_size2 = SIZE_4KB; 2798 } 2799 } 2800 2801 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2802 "==> hxge_map_rxdma_channel_buf_ring: channel %d " 2803 "actual rbr max %d rbb_max %d nmsgs %d " 2804 "rbrp->block_size %d default_block_size %d " 2805 "(config hxge_rbr_size %d hxge_rbr_spare_size %d)", 2806 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 2807 rbrp->block_size, hxgep->rx_default_block_size, 2808 hxge_rbr_size, hxge_rbr_spare_size)); 2809 2810 /* 2811 * Map in buffers from the buffer pool. 2812 * Note that num_blocks is the num_chunks. For Sparc, there is likely 2813 * only one chunk. For x86, there will be many chunks. 2814 * Loop over chunks. 2815 */ 2816 index = 0; 2817 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 2818 bsize = dma_bufp->block_size; 2819 nblocks = dma_bufp->nblocks; 2820 #if defined(__i386) 2821 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 2822 #else 2823 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 2824 #endif 2825 ring_info->buffer[i].buf_index = i; 2826 ring_info->buffer[i].buf_size = dma_bufp->alength; 2827 ring_info->buffer[i].start_index = index; 2828 #if defined(__i386) 2829 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 2830 #else 2831 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 2832 #endif 2833 2834 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2835 " hxge_map_rxdma_channel_buf_ring: map channel %d " 2836 "chunk %d nblocks %d chunk_size %x block_size 0x%x " 2837 "dma_bufp $%p dvma_addr $%p", channel, i, 2838 dma_bufp->nblocks, 2839 ring_info->buffer[i].buf_size, bsize, dma_bufp, 2840 ring_info->buffer[i].dvma_addr)); 2841 2842 /* loop over blocks within a chunk */ 2843 for (j = 0; j < nblocks; j++) { 2844 if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO, 2845 dma_bufp)) == NULL) { 2846 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2847 "allocb failed (index %d i %d j %d)", 2848 index, i, j)); 2849 goto hxge_map_rxdma_channel_buf_ring_fail1; 2850 } 2851 rx_msg_ring[index] = rx_msg_p; 2852 rx_msg_p->block_index = index; 2853 rx_msg_p->shifted_addr = (uint32_t) 2854 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 2855 RBR_BKADDR_SHIFT)); 2856 /* 2857 * Too much output 2858 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2859 * "index %d j %d rx_msg_p $%p mblk %p", 2860 * index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 2861 */ 2862 mblk_p = rx_msg_p->rx_mblk_p; 2863 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 2864 2865 rbrp->rbr_ref_cnt++; 2866 index++; 2867 rx_msg_p->buf_dma.dma_channel = channel; 2868 } 2869 } 2870 if (i < rbrp->num_blocks) { 2871 goto hxge_map_rxdma_channel_buf_ring_fail1; 2872 } 2873 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2874 "hxge_map_rxdma_channel_buf_ring: done buf init " 2875 "channel %d msg block entries %d", channel, index)); 2876 ring_info->block_size_mask = bsize - 1; 2877 rbrp->rx_msg_ring = rx_msg_ring; 2878 rbrp->dma_bufp = dma_buf_p; 2879 rbrp->ring_info = ring_info; 2880 2881 status = hxge_rxbuf_index_info_init(hxgep, rbrp); 2882 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: " 2883 "channel %d done buf info init", channel)); 2884 2885 /* 2886 * Finally, permit hxge_freeb() to call hxge_post_page(). 2887 */ 2888 rbrp->rbr_state = RBR_POSTING; 2889 2890 *rbr_p = rbrp; 2891 2892 goto hxge_map_rxdma_channel_buf_ring_exit; 2893 2894 hxge_map_rxdma_channel_buf_ring_fail1: 2895 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2896 " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 2897 channel, status)); 2898 2899 index--; 2900 for (; index >= 0; index--) { 2901 rx_msg_p = rx_msg_ring[index]; 2902 if (rx_msg_p != NULL) { 2903 hxge_freeb(rx_msg_p); 2904 rx_msg_ring[index] = NULL; 2905 } 2906 } 2907 2908 hxge_map_rxdma_channel_buf_ring_fail: 2909 MUTEX_DESTROY(&rbrp->post_lock); 2910 MUTEX_DESTROY(&rbrp->lock); 2911 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2912 KMEM_FREE(rx_msg_ring, size); 2913 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 2914 2915 status = HXGE_ERROR; 2916 2917 hxge_map_rxdma_channel_buf_ring_exit: 2918 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2919 "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 2920 2921 return (status); 2922 } 2923 2924 /*ARGSUSED*/ 2925 static void 2926 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep, 2927 p_rx_rbr_ring_t rbr_p) 2928 { 2929 p_rx_msg_t *rx_msg_ring; 2930 p_rx_msg_t rx_msg_p; 2931 rxring_info_t *ring_info; 2932 int i; 2933 uint32_t size; 2934 2935 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2936 "==> hxge_unmap_rxdma_channel_buf_ring")); 2937 if (rbr_p == NULL) { 2938 HXGE_DEBUG_MSG((hxgep, RX_CTL, 2939 "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 2940 return; 2941 } 2942 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2943 "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc)); 2944 2945 rx_msg_ring = rbr_p->rx_msg_ring; 2946 ring_info = rbr_p->ring_info; 2947 2948 if (rx_msg_ring == NULL || ring_info == NULL) { 2949 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2950 "<== hxge_unmap_rxdma_channel_buf_ring: " 2951 "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info)); 2952 return; 2953 } 2954 2955 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 2956 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2957 " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 2958 "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks, 2959 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 2960 2961 for (i = 0; i < rbr_p->tnblocks; i++) { 2962 rx_msg_p = rx_msg_ring[i]; 2963 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 2964 " hxge_unmap_rxdma_channel_buf_ring: " 2965 "rx_msg_p $%p", rx_msg_p)); 2966 if (rx_msg_p != NULL) { 2967 hxge_freeb(rx_msg_p); 2968 rx_msg_ring[i] = NULL; 2969 } 2970 } 2971 2972 /* 2973 * We no longer may use the mutex <post_lock>. By setting 2974 * <rbr_state> to anything but POSTING, we prevent 2975 * hxge_post_page() from accessing a dead mutex. 2976 */ 2977 rbr_p->rbr_state = RBR_UNMAPPING; 2978 MUTEX_DESTROY(&rbr_p->post_lock); 2979 2980 MUTEX_DESTROY(&rbr_p->lock); 2981 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 2982 KMEM_FREE(rx_msg_ring, size); 2983 2984 if (rbr_p->rbr_ref_cnt == 0) { 2985 /* This is the normal state of affairs. */ 2986 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 2987 } else { 2988 /* 2989 * Some of our buffers are still being used. 2990 * Therefore, tell hxge_freeb() this ring is 2991 * unmapped, so it may free <rbr_p> for us. 2992 */ 2993 rbr_p->rbr_state = RBR_UNMAPPED; 2994 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 2995 "unmap_rxdma_buf_ring: %d %s outstanding.", 2996 rbr_p->rbr_ref_cnt, 2997 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 2998 } 2999 3000 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3001 "<== hxge_unmap_rxdma_channel_buf_ring")); 3002 } 3003 3004 static hxge_status_t 3005 hxge_rxdma_hw_start_common(p_hxge_t hxgep) 3006 { 3007 hxge_status_t status = HXGE_OK; 3008 3009 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 3010 3011 /* 3012 * Load the sharable parameters by writing to the function zero control 3013 * registers. These FZC registers should be initialized only once for 3014 * the entire chip. 3015 */ 3016 (void) hxge_init_fzc_rx_common(hxgep); 3017 3018 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common")); 3019 3020 return (status); 3021 } 3022 3023 static hxge_status_t 3024 hxge_rxdma_hw_start(p_hxge_t hxgep) 3025 { 3026 int i, ndmas; 3027 uint16_t channel; 3028 p_rx_rbr_rings_t rx_rbr_rings; 3029 p_rx_rbr_ring_t *rbr_rings; 3030 p_rx_rcr_rings_t rx_rcr_rings; 3031 p_rx_rcr_ring_t *rcr_rings; 3032 p_rx_mbox_areas_t rx_mbox_areas_p; 3033 p_rx_mbox_t *rx_mbox_p; 3034 hxge_status_t status = HXGE_OK; 3035 3036 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start")); 3037 3038 rx_rbr_rings = hxgep->rx_rbr_rings; 3039 rx_rcr_rings = hxgep->rx_rcr_rings; 3040 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3041 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3042 "<== hxge_rxdma_hw_start: NULL ring pointers")); 3043 return (HXGE_ERROR); 3044 } 3045 3046 ndmas = rx_rbr_rings->ndmas; 3047 if (ndmas == 0) { 3048 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3049 "<== hxge_rxdma_hw_start: no dma channel allocated")); 3050 return (HXGE_ERROR); 3051 } 3052 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3053 "==> hxge_rxdma_hw_start (ndmas %d)", ndmas)); 3054 3055 /* 3056 * Scrub the RDC Rx DMA Prefetch Buffer Command. 3057 */ 3058 for (i = 0; i < 128; i++) { 3059 HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i); 3060 } 3061 3062 /* 3063 * Scrub Rx DMA Shadow Tail Command. 3064 */ 3065 for (i = 0; i < 64; i++) { 3066 HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i); 3067 } 3068 3069 /* 3070 * Scrub Rx DMA Control Fifo Command. 3071 */ 3072 for (i = 0; i < 512; i++) { 3073 HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i); 3074 } 3075 3076 /* 3077 * Scrub Rx DMA Data Fifo Command. 3078 */ 3079 for (i = 0; i < 1536; i++) { 3080 HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i); 3081 } 3082 3083 /* 3084 * Reset the FIFO Error Stat. 3085 */ 3086 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF); 3087 3088 /* Set the error mask to receive interrupts */ 3089 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3090 3091 rbr_rings = rx_rbr_rings->rbr_rings; 3092 rcr_rings = rx_rcr_rings->rcr_rings; 3093 rx_mbox_areas_p = hxgep->rx_mbox_areas_p; 3094 if (rx_mbox_areas_p) { 3095 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3096 } 3097 3098 for (i = 0; i < ndmas; i++) { 3099 channel = rbr_rings[i]->rdc; 3100 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3101 "==> hxge_rxdma_hw_start (ndmas %d) channel %d", 3102 ndmas, channel)); 3103 status = hxge_rxdma_start_channel(hxgep, channel, 3104 (p_rx_rbr_ring_t)rbr_rings[i], 3105 (p_rx_rcr_ring_t)rcr_rings[i], 3106 (p_rx_mbox_t)rx_mbox_p[i]); 3107 if (status != HXGE_OK) { 3108 goto hxge_rxdma_hw_start_fail1; 3109 } 3110 } 3111 3112 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: " 3113 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3114 rx_rbr_rings, rx_rcr_rings)); 3115 goto hxge_rxdma_hw_start_exit; 3116 3117 hxge_rxdma_hw_start_fail1: 3118 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3119 "==> hxge_rxdma_hw_start: disable " 3120 "(status 0x%x channel %d i %d)", status, channel, i)); 3121 for (; i >= 0; i--) { 3122 channel = rbr_rings[i]->rdc; 3123 (void) hxge_rxdma_stop_channel(hxgep, channel); 3124 } 3125 3126 hxge_rxdma_hw_start_exit: 3127 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3128 "==> hxge_rxdma_hw_start: (status 0x%x)", status)); 3129 return (status); 3130 } 3131 3132 static void 3133 hxge_rxdma_hw_stop(p_hxge_t hxgep) 3134 { 3135 int i, ndmas; 3136 uint16_t channel; 3137 p_rx_rbr_rings_t rx_rbr_rings; 3138 p_rx_rbr_ring_t *rbr_rings; 3139 p_rx_rcr_rings_t rx_rcr_rings; 3140 3141 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop")); 3142 3143 rx_rbr_rings = hxgep->rx_rbr_rings; 3144 rx_rcr_rings = hxgep->rx_rcr_rings; 3145 3146 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3147 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3148 "<== hxge_rxdma_hw_stop: NULL ring pointers")); 3149 return; 3150 } 3151 3152 ndmas = rx_rbr_rings->ndmas; 3153 if (!ndmas) { 3154 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3155 "<== hxge_rxdma_hw_stop: no dma channel allocated")); 3156 return; 3157 } 3158 3159 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3160 "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3161 3162 rbr_rings = rx_rbr_rings->rbr_rings; 3163 for (i = 0; i < ndmas; i++) { 3164 channel = rbr_rings[i]->rdc; 3165 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3166 "==> hxge_rxdma_hw_stop (ndmas %d) channel %d", 3167 ndmas, channel)); 3168 (void) hxge_rxdma_stop_channel(hxgep, channel); 3169 } 3170 3171 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: " 3172 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3173 rx_rbr_rings, rx_rcr_rings)); 3174 3175 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop")); 3176 } 3177 3178 static hxge_status_t 3179 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel, 3180 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3181 { 3182 hpi_handle_t handle; 3183 hpi_status_t rs = HPI_SUCCESS; 3184 rdc_stat_t cs; 3185 rdc_int_mask_t ent_mask; 3186 hxge_status_t status = HXGE_OK; 3187 3188 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel")); 3189 3190 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3191 3192 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: " 3193 "hpi handle addr $%p acc $%p", 3194 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3195 3196 /* Reset RXDMA channel */ 3197 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3198 if (rs != HPI_SUCCESS) { 3199 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3200 "==> hxge_rxdma_start_channel: " 3201 "reset rxdma failed (0x%08x channel %d)", 3202 status, channel)); 3203 return (HXGE_ERROR | rs); 3204 } 3205 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3206 "==> hxge_rxdma_start_channel: reset done: channel %d", channel)); 3207 3208 /* 3209 * Initialize the RXDMA channel specific FZC control configurations. 3210 * These FZC registers are pertaining to each RX channel (logical 3211 * pages). 3212 */ 3213 status = hxge_init_fzc_rxdma_channel(hxgep, 3214 channel, rbr_p, rcr_p, mbox_p); 3215 if (status != HXGE_OK) { 3216 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3217 "==> hxge_rxdma_start_channel: " 3218 "init fzc rxdma failed (0x%08x channel %d)", 3219 status, channel)); 3220 return (status); 3221 } 3222 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3223 "==> hxge_rxdma_start_channel: fzc done")); 3224 3225 /* 3226 * Zero out the shadow and prefetch ram. 3227 */ 3228 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3229 "==> hxge_rxdma_start_channel: ram done")); 3230 3231 /* Set up the interrupt event masks. */ 3232 ent_mask.value = 0; 3233 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3234 if (rs != HPI_SUCCESS) { 3235 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3236 "==> hxge_rxdma_start_channel: " 3237 "init rxdma event masks failed (0x%08x channel %d)", 3238 status, channel)); 3239 return (HXGE_ERROR | rs); 3240 } 3241 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3242 "event done: channel %d (mask 0x%016llx)", 3243 channel, ent_mask.value)); 3244 3245 /* 3246 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA 3247 * channels and enable each DMA channel. 3248 */ 3249 status = hxge_enable_rxdma_channel(hxgep, 3250 channel, rbr_p, rcr_p, mbox_p); 3251 if (status != HXGE_OK) { 3252 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3253 " hxge_rxdma_start_channel: " 3254 " init enable rxdma failed (0x%08x channel %d)", 3255 status, channel)); 3256 return (status); 3257 } 3258 3259 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3260 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3261 3262 /* 3263 * Initialize the receive DMA control and status register 3264 * Note that rdc_stat HAS to be set after RBR and RCR rings are set 3265 */ 3266 cs.value = 0; 3267 cs.bits.mex = 1; 3268 cs.bits.rcr_thres = 1; 3269 cs.bits.rcr_to = 1; 3270 cs.bits.rbr_empty = 1; 3271 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3272 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3273 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3274 if (status != HXGE_OK) { 3275 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3276 "==> hxge_rxdma_start_channel: " 3277 "init rxdma control register failed (0x%08x channel %d", 3278 status, channel)); 3279 return (status); 3280 } 3281 3282 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: " 3283 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3284 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, 3285 "==> hxge_rxdma_start_channel: enable done")); 3286 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel")); 3287 3288 return (HXGE_OK); 3289 } 3290 3291 static hxge_status_t 3292 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel) 3293 { 3294 hpi_handle_t handle; 3295 hpi_status_t rs = HPI_SUCCESS; 3296 rdc_stat_t cs; 3297 rdc_int_mask_t ent_mask; 3298 hxge_status_t status = HXGE_OK; 3299 3300 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel")); 3301 3302 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3303 3304 HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: " 3305 "hpi handle addr $%p acc $%p", 3306 hxgep->hpi_handle.regp, hxgep->hpi_handle.regh)); 3307 3308 /* Reset RXDMA channel */ 3309 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3310 if (rs != HPI_SUCCESS) { 3311 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3312 " hxge_rxdma_stop_channel: " 3313 " reset rxdma failed (0x%08x channel %d)", 3314 rs, channel)); 3315 return (HXGE_ERROR | rs); 3316 } 3317 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3318 "==> hxge_rxdma_stop_channel: reset done")); 3319 3320 /* Set up the interrupt event masks. */ 3321 ent_mask.value = RDC_INT_MASK_ALL; 3322 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3323 if (rs != HPI_SUCCESS) { 3324 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3325 "==> hxge_rxdma_stop_channel: " 3326 "set rxdma event masks failed (0x%08x channel %d)", 3327 rs, channel)); 3328 return (HXGE_ERROR | rs); 3329 } 3330 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3331 "==> hxge_rxdma_stop_channel: event done")); 3332 3333 /* Initialize the receive DMA control and status register */ 3334 cs.value = 0; 3335 status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs); 3336 3337 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control " 3338 " to default (all 0s) 0x%08x", cs.value)); 3339 3340 if (status != HXGE_OK) { 3341 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3342 " hxge_rxdma_stop_channel: init rxdma" 3343 " control register failed (0x%08x channel %d", 3344 status, channel)); 3345 return (status); 3346 } 3347 3348 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3349 "==> hxge_rxdma_stop_channel: control done")); 3350 3351 /* disable dma channel */ 3352 status = hxge_disable_rxdma_channel(hxgep, channel); 3353 3354 if (status != HXGE_OK) { 3355 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3356 " hxge_rxdma_stop_channel: " 3357 " init enable rxdma failed (0x%08x channel %d)", 3358 status, channel)); 3359 return (status); 3360 } 3361 3362 HXGE_DEBUG_MSG((hxgep, RX_CTL, 3363 "==> hxge_rxdma_stop_channel: disable done")); 3364 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel")); 3365 3366 return (HXGE_OK); 3367 } 3368 3369 hxge_status_t 3370 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep) 3371 { 3372 hpi_handle_t handle; 3373 p_hxge_rdc_sys_stats_t statsp; 3374 rdc_fifo_err_stat_t stat; 3375 hxge_status_t status = HXGE_OK; 3376 3377 handle = hxgep->hpi_handle; 3378 statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats; 3379 3380 /* Clear the int_dbg register in case it is an injected err */ 3381 HXGE_REG_WR64(handle, RDC_FIFO_ERR_INT_DBG, 0x0); 3382 3383 /* Get the error status and clear the register */ 3384 HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value); 3385 HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value); 3386 3387 if (stat.bits.rx_ctrl_fifo_sec) { 3388 statsp->ctrl_fifo_sec++; 3389 if (statsp->ctrl_fifo_sec == 1) 3390 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3391 "==> hxge_rxdma_handle_sys_errors: " 3392 "rx_ctrl_fifo_sec")); 3393 } 3394 3395 if (stat.bits.rx_ctrl_fifo_ded) { 3396 /* Global fatal error encountered */ 3397 statsp->ctrl_fifo_ded++; 3398 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3399 HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED); 3400 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3401 "==> hxge_rxdma_handle_sys_errors: " 3402 "fatal error: rx_ctrl_fifo_ded error")); 3403 } 3404 3405 if (stat.bits.rx_data_fifo_sec) { 3406 statsp->data_fifo_sec++; 3407 if (statsp->data_fifo_sec == 1) 3408 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3409 "==> hxge_rxdma_handle_sys_errors: " 3410 "rx_data_fifo_sec")); 3411 } 3412 3413 if (stat.bits.rx_data_fifo_ded) { 3414 /* Global fatal error encountered */ 3415 statsp->data_fifo_ded++; 3416 HXGE_FM_REPORT_ERROR(hxgep, NULL, 3417 HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED); 3418 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3419 "==> hxge_rxdma_handle_sys_errors: " 3420 "fatal error: rx_data_fifo_ded error")); 3421 } 3422 3423 if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) { 3424 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3425 " hxge_rxdma_handle_sys_errors: fatal error\n")); 3426 status = hxge_rx_port_fatal_err_recover(hxgep); 3427 if (status == HXGE_OK) { 3428 FM_SERVICE_RESTORED(hxgep); 3429 } 3430 } 3431 3432 return (HXGE_OK); 3433 } 3434 3435 static hxge_status_t 3436 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel) 3437 { 3438 hpi_handle_t handle; 3439 hpi_status_t rs = HPI_SUCCESS; 3440 hxge_status_t status = HXGE_OK; 3441 p_rx_rbr_ring_t rbrp; 3442 p_rx_rcr_ring_t rcrp; 3443 p_rx_mbox_t mboxp; 3444 rdc_int_mask_t ent_mask; 3445 p_hxge_dma_common_t dmap; 3446 int ring_idx; 3447 p_rx_msg_t rx_msg_p; 3448 int i; 3449 uint32_t hxge_port_rcr_size; 3450 uint64_t tmp; 3451 3452 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover")); 3453 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3454 "Recovering from RxDMAChannel#%d error...", channel)); 3455 3456 /* 3457 * Stop the dma channel waits for the stop done. If the stop done bit 3458 * is not set, then create an error. 3459 */ 3460 3461 handle = HXGE_DEV_HPI_HANDLE(hxgep); 3462 3463 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop...")); 3464 3465 ring_idx = hxge_rxdma_get_ring_index(hxgep, channel); 3466 rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[ring_idx]; 3467 rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[ring_idx]; 3468 3469 MUTEX_ENTER(&rcrp->lock); 3470 MUTEX_ENTER(&rbrp->lock); 3471 MUTEX_ENTER(&rbrp->post_lock); 3472 3473 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel...")); 3474 3475 rs = hpi_rxdma_cfg_rdc_disable(handle, channel); 3476 if (rs != HPI_SUCCESS) { 3477 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3478 "hxge_disable_rxdma_channel:failed")); 3479 goto fail; 3480 } 3481 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt...")); 3482 3483 /* Disable interrupt */ 3484 ent_mask.value = RDC_INT_MASK_ALL; 3485 rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 3486 if (rs != HPI_SUCCESS) { 3487 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3488 "Set rxdma event masks failed (channel %d)", channel)); 3489 } 3490 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset...")); 3491 3492 /* Reset RXDMA channel */ 3493 rs = hpi_rxdma_cfg_rdc_reset(handle, channel); 3494 if (rs != HPI_SUCCESS) { 3495 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3496 "Reset rxdma failed (channel %d)", channel)); 3497 goto fail; 3498 } 3499 hxge_port_rcr_size = hxgep->hxge_port_rcr_size; 3500 mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 3501 3502 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3503 rbrp->rbr_rd_index = 0; 3504 rbrp->pages_to_post = 0; 3505 3506 rcrp->comp_rd_index = 0; 3507 rcrp->comp_wt_index = 0; 3508 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3509 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3510 #if defined(__i386) 3511 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3512 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3513 #else 3514 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3515 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3516 #endif 3517 3518 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3519 (hxge_port_rcr_size - 1); 3520 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3521 (hxge_port_rcr_size - 1); 3522 3523 rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc); 3524 rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3; 3525 3526 dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc; 3527 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3528 3529 HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n", 3530 rbrp->rbr_max_size)); 3531 3532 for (i = 0; i < rbrp->rbr_max_size; i++) { 3533 /* Reset all the buffers */ 3534 rx_msg_p = rbrp->rx_msg_ring[i]; 3535 rx_msg_p->ref_cnt = 1; 3536 rx_msg_p->free = B_TRUE; 3537 rx_msg_p->cur_usage_cnt = 0; 3538 rx_msg_p->max_usage_cnt = 0; 3539 rx_msg_p->pkt_buf_size = 0; 3540 } 3541 3542 HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start...")); 3543 3544 status = hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp); 3545 if (status != HXGE_OK) { 3546 goto fail; 3547 } 3548 3549 /* 3550 * The DMA channel may disable itself automatically. 3551 * The following is a work-around. 3552 */ 3553 HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp); 3554 rs = hpi_rxdma_cfg_rdc_enable(handle, channel); 3555 if (rs != HPI_SUCCESS) { 3556 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3557 "hpi_rxdma_cfg_rdc_enable (channel %d)", channel)); 3558 } 3559 3560 MUTEX_EXIT(&rbrp->post_lock); 3561 MUTEX_EXIT(&rbrp->lock); 3562 MUTEX_EXIT(&rcrp->lock); 3563 3564 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3565 "Recovery Successful, RxDMAChannel#%d Restored", channel)); 3566 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover")); 3567 3568 return (HXGE_OK); 3569 3570 fail: 3571 MUTEX_EXIT(&rbrp->post_lock); 3572 MUTEX_EXIT(&rbrp->lock); 3573 MUTEX_EXIT(&rcrp->lock); 3574 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3575 3576 return (HXGE_ERROR | rs); 3577 } 3578 3579 static hxge_status_t 3580 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep) 3581 { 3582 hxge_status_t status = HXGE_OK; 3583 p_hxge_dma_common_t *dma_buf_p; 3584 uint16_t channel; 3585 int ndmas; 3586 int i; 3587 block_reset_t reset_reg; 3588 3589 HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover")); 3590 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ...")); 3591 3592 /* Reset RDC block from PEU for this fatal error */ 3593 reset_reg.value = 0; 3594 reset_reg.bits.rdc_rst = 1; 3595 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value); 3596 3597 /* Disable RxMAC */ 3598 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n")); 3599 if (hxge_rx_vmac_disable(hxgep) != HXGE_OK) 3600 goto fail; 3601 3602 HXGE_DELAY(1000); 3603 3604 /* Restore any common settings after PEU reset */ 3605 if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK) 3606 goto fail; 3607 3608 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels...")); 3609 3610 ndmas = hxgep->rx_buf_pool_p->ndmas; 3611 dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p; 3612 3613 for (i = 0; i < ndmas; i++) { 3614 channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel; 3615 if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) { 3616 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3617 "Could not recover channel %d", channel)); 3618 } 3619 } 3620 3621 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC...")); 3622 3623 /* Reset RxMAC */ 3624 if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) { 3625 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3626 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3627 goto fail; 3628 } 3629 3630 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC...")); 3631 3632 /* Re-Initialize RxMAC */ 3633 if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) { 3634 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3635 "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC")); 3636 goto fail; 3637 } 3638 HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC...")); 3639 3640 /* Re-enable RxMAC */ 3641 if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) { 3642 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3643 "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC")); 3644 goto fail; 3645 } 3646 3647 /* Reset the error mask since PEU reset cleared it */ 3648 HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0); 3649 3650 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 3651 "Recovery Successful, RxPort Restored")); 3652 HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover")); 3653 3654 return (HXGE_OK); 3655 fail: 3656 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovery failed")); 3657 return (status); 3658 } 3659