1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_rxdma.h> 30 #include <sys/nxge/nxge_hio.h> 31 32 #if !defined(_BIG_ENDIAN) 33 #include <npi_rx_rd32.h> 34 #endif 35 #include <npi_rx_rd64.h> 36 #include <npi_rx_wr64.h> 37 38 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 39 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 40 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 41 (rdc + nxgep->pt_config.hw_config.start_rdc) 42 43 /* 44 * Globals: tunable parameters (/etc/system or adb) 45 * 46 */ 47 extern uint32_t nxge_rbr_size; 48 extern uint32_t nxge_rcr_size; 49 extern uint32_t nxge_rbr_spare_size; 50 51 extern uint32_t nxge_mblks_pending; 52 53 /* 54 * Tunable to reduce the amount of time spent in the 55 * ISR doing Rx Processing. 56 */ 57 extern uint32_t nxge_max_rx_pkts; 58 boolean_t nxge_jumbo_enable; 59 60 /* 61 * Tunables to manage the receive buffer blocks. 62 * 63 * nxge_rx_threshold_hi: copy all buffers. 64 * nxge_rx_bcopy_size_type: receive buffer block size type. 65 * nxge_rx_threshold_lo: copy only up to tunable block size type. 66 */ 67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 68 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 69 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 70 71 extern uint32_t nxge_cksum_offload; 72 73 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 74 static void nxge_unmap_rxdma(p_nxge_t, int); 75 76 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 77 78 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 79 static void nxge_rxdma_hw_stop(p_nxge_t, int); 80 81 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 82 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 83 uint32_t, 84 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 85 p_rx_mbox_t *); 86 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 87 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 88 89 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 90 uint16_t, 91 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 92 p_rx_rcr_ring_t *, p_rx_mbox_t *); 93 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 94 p_rx_rcr_ring_t, p_rx_mbox_t); 95 96 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 97 uint16_t, 98 p_nxge_dma_common_t *, 99 p_rx_rbr_ring_t *, uint32_t); 100 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 101 p_rx_rbr_ring_t); 102 103 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 104 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 105 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 106 107 static mblk_t * 108 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 109 110 static void nxge_receive_packet(p_nxge_t, 111 p_rx_rcr_ring_t, 112 p_rcr_entry_t, 113 boolean_t *, 114 mblk_t **, mblk_t **); 115 116 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 117 118 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 119 static void nxge_freeb(p_rx_msg_t); 120 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 121 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 122 123 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 124 uint32_t, uint32_t); 125 126 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 127 p_rx_rbr_ring_t); 128 129 130 static nxge_status_t 131 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 132 133 nxge_status_t 134 nxge_rx_port_fatal_err_recover(p_nxge_t); 135 136 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 137 138 nxge_status_t 139 nxge_init_rxdma_channels(p_nxge_t nxgep) 140 { 141 nxge_grp_set_t *set = &nxgep->rx_set; 142 int i, count; 143 144 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 145 146 if (!isLDOMguest(nxgep)) { 147 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 148 cmn_err(CE_NOTE, "hw_start_common"); 149 return (NXGE_ERROR); 150 } 151 } 152 153 /* 154 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 155 * We only have 8 hardware RDC tables, but we may have 156 * up to 16 logical (software-defined) groups of RDCS, 157 * if we make use of layer 3 & 4 hardware classification. 158 */ 159 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 160 if ((1 << i) & set->lg.map) { 161 int channel; 162 nxge_grp_t *group = set->group[i]; 163 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 164 if ((1 << channel) & group->map) { 165 if ((nxge_grp_dc_add(nxgep, 166 (vr_handle_t)group, 167 VP_BOUND_RX, channel))) 168 return (NXGE_ERROR); 169 } 170 } 171 } 172 if (++count == set->lg.count) 173 break; 174 } 175 176 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 177 178 return (NXGE_OK); 179 } 180 181 nxge_status_t 182 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 183 { 184 nxge_status_t status; 185 186 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 187 188 status = nxge_map_rxdma(nxge, channel); 189 if (status != NXGE_OK) { 190 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 191 "<== nxge_init_rxdma: status 0x%x", status)); 192 return (status); 193 } 194 195 status = nxge_rxdma_hw_start(nxge, channel); 196 if (status != NXGE_OK) { 197 nxge_unmap_rxdma(nxge, channel); 198 } 199 200 if (!nxge->statsp->rdc_ksp[channel]) 201 nxge_setup_rdc_kstats(nxge, channel); 202 203 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 204 "<== nxge_init_rxdma_channel: status 0x%x", status)); 205 206 return (status); 207 } 208 209 void 210 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 211 { 212 nxge_grp_set_t *set = &nxgep->rx_set; 213 int rdc; 214 215 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 216 217 if (set->owned.map == 0) { 218 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 219 "nxge_uninit_rxdma_channels: no channels")); 220 return; 221 } 222 223 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 224 if ((1 << rdc) & set->owned.map) { 225 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 226 } 227 } 228 229 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 230 } 231 232 void 233 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 234 { 235 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 236 237 if (nxgep->statsp->rdc_ksp[channel]) { 238 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 239 nxgep->statsp->rdc_ksp[channel] = 0; 240 } 241 242 nxge_rxdma_hw_stop(nxgep, channel); 243 nxge_unmap_rxdma(nxgep, channel); 244 245 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 246 } 247 248 nxge_status_t 249 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 250 { 251 npi_handle_t handle; 252 npi_status_t rs = NPI_SUCCESS; 253 nxge_status_t status = NXGE_OK; 254 255 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 256 257 handle = NXGE_DEV_NPI_HANDLE(nxgep); 258 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 259 260 if (rs != NPI_SUCCESS) { 261 status = NXGE_ERROR | rs; 262 } 263 264 return (status); 265 } 266 267 void 268 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 269 { 270 nxge_grp_set_t *set = &nxgep->rx_set; 271 int rdc; 272 273 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 274 275 if (!isLDOMguest(nxgep)) { 276 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 277 (void) npi_rxdma_dump_fzc_regs(handle); 278 } 279 280 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 281 NXGE_DEBUG_MSG((nxgep, TX_CTL, 282 "nxge_rxdma_regs_dump_channels: " 283 "NULL ring pointer(s)")); 284 return; 285 } 286 287 if (set->owned.map == 0) { 288 NXGE_DEBUG_MSG((nxgep, RX_CTL, 289 "nxge_rxdma_regs_dump_channels: no channels")); 290 return; 291 } 292 293 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 294 if ((1 << rdc) & set->owned.map) { 295 rx_rbr_ring_t *ring = 296 nxgep->rx_rbr_rings->rbr_rings[rdc]; 297 if (ring) { 298 (void) nxge_dump_rxdma_channel(nxgep, rdc); 299 } 300 } 301 } 302 303 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 304 } 305 306 nxge_status_t 307 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 308 { 309 npi_handle_t handle; 310 npi_status_t rs = NPI_SUCCESS; 311 nxge_status_t status = NXGE_OK; 312 313 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 314 315 handle = NXGE_DEV_NPI_HANDLE(nxgep); 316 rs = npi_rxdma_dump_rdc_regs(handle, channel); 317 318 if (rs != NPI_SUCCESS) { 319 status = NXGE_ERROR | rs; 320 } 321 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 322 return (status); 323 } 324 325 nxge_status_t 326 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 327 p_rx_dma_ent_msk_t mask_p) 328 { 329 npi_handle_t handle; 330 npi_status_t rs = NPI_SUCCESS; 331 nxge_status_t status = NXGE_OK; 332 333 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 334 "<== nxge_init_rxdma_channel_event_mask")); 335 336 handle = NXGE_DEV_NPI_HANDLE(nxgep); 337 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 338 if (rs != NPI_SUCCESS) { 339 status = NXGE_ERROR | rs; 340 } 341 342 return (status); 343 } 344 345 nxge_status_t 346 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 347 p_rx_dma_ctl_stat_t cs_p) 348 { 349 npi_handle_t handle; 350 npi_status_t rs = NPI_SUCCESS; 351 nxge_status_t status = NXGE_OK; 352 353 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 354 "<== nxge_init_rxdma_channel_cntl_stat")); 355 356 handle = NXGE_DEV_NPI_HANDLE(nxgep); 357 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 358 359 if (rs != NPI_SUCCESS) { 360 status = NXGE_ERROR | rs; 361 } 362 363 return (status); 364 } 365 366 /* 367 * nxge_rxdma_cfg_rdcgrp_default_rdc 368 * 369 * Set the default RDC for an RDC Group (Table) 370 * 371 * Arguments: 372 * nxgep 373 * rdcgrp The group to modify 374 * rdc The new default RDC. 375 * 376 * Notes: 377 * 378 * NPI/NXGE function calls: 379 * npi_rxdma_cfg_rdc_table_default_rdc() 380 * 381 * Registers accessed: 382 * RDC_TBL_REG: FZC_ZCP + 0x10000 383 * 384 * Context: 385 * Service domain 386 */ 387 nxge_status_t 388 nxge_rxdma_cfg_rdcgrp_default_rdc( 389 p_nxge_t nxgep, 390 uint8_t rdcgrp, 391 uint8_t rdc) 392 { 393 npi_handle_t handle; 394 npi_status_t rs = NPI_SUCCESS; 395 p_nxge_dma_pt_cfg_t p_dma_cfgp; 396 p_nxge_rdc_grp_t rdc_grp_p; 397 uint8_t actual_rdcgrp, actual_rdc; 398 399 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 400 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 401 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 402 403 handle = NXGE_DEV_NPI_HANDLE(nxgep); 404 405 /* 406 * This has to be rewritten. Do we even allow this anymore? 407 */ 408 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 409 RDC_MAP_IN(rdc_grp_p->map, rdc); 410 rdc_grp_p->def_rdc = rdc; 411 412 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 413 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 414 415 rs = npi_rxdma_cfg_rdc_table_default_rdc( 416 handle, actual_rdcgrp, actual_rdc); 417 418 if (rs != NPI_SUCCESS) { 419 return (NXGE_ERROR | rs); 420 } 421 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 422 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 423 return (NXGE_OK); 424 } 425 426 nxge_status_t 427 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 428 { 429 npi_handle_t handle; 430 431 uint8_t actual_rdc; 432 npi_status_t rs = NPI_SUCCESS; 433 434 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 435 " ==> nxge_rxdma_cfg_port_default_rdc")); 436 437 handle = NXGE_DEV_NPI_HANDLE(nxgep); 438 actual_rdc = rdc; /* XXX Hack! */ 439 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 440 441 442 if (rs != NPI_SUCCESS) { 443 return (NXGE_ERROR | rs); 444 } 445 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 446 " <== nxge_rxdma_cfg_port_default_rdc")); 447 448 return (NXGE_OK); 449 } 450 451 nxge_status_t 452 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 453 uint16_t pkts) 454 { 455 npi_status_t rs = NPI_SUCCESS; 456 npi_handle_t handle; 457 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 458 " ==> nxge_rxdma_cfg_rcr_threshold")); 459 handle = NXGE_DEV_NPI_HANDLE(nxgep); 460 461 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 462 463 if (rs != NPI_SUCCESS) { 464 return (NXGE_ERROR | rs); 465 } 466 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 467 return (NXGE_OK); 468 } 469 470 nxge_status_t 471 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 472 uint16_t tout, uint8_t enable) 473 { 474 npi_status_t rs = NPI_SUCCESS; 475 npi_handle_t handle; 476 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 477 handle = NXGE_DEV_NPI_HANDLE(nxgep); 478 if (enable == 0) { 479 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 480 } else { 481 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 482 tout); 483 } 484 485 if (rs != NPI_SUCCESS) { 486 return (NXGE_ERROR | rs); 487 } 488 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 489 return (NXGE_OK); 490 } 491 492 nxge_status_t 493 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 494 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 495 { 496 npi_handle_t handle; 497 rdc_desc_cfg_t rdc_desc; 498 p_rcrcfig_b_t cfgb_p; 499 npi_status_t rs = NPI_SUCCESS; 500 501 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 502 handle = NXGE_DEV_NPI_HANDLE(nxgep); 503 /* 504 * Use configuration data composed at init time. 505 * Write to hardware the receive ring configurations. 506 */ 507 rdc_desc.mbox_enable = 1; 508 rdc_desc.mbox_addr = mbox_p->mbox_addr; 509 NXGE_DEBUG_MSG((nxgep, RX_CTL, 510 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 511 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 512 513 rdc_desc.rbr_len = rbr_p->rbb_max; 514 rdc_desc.rbr_addr = rbr_p->rbr_addr; 515 516 switch (nxgep->rx_bksize_code) { 517 case RBR_BKSIZE_4K: 518 rdc_desc.page_size = SIZE_4KB; 519 break; 520 case RBR_BKSIZE_8K: 521 rdc_desc.page_size = SIZE_8KB; 522 break; 523 case RBR_BKSIZE_16K: 524 rdc_desc.page_size = SIZE_16KB; 525 break; 526 case RBR_BKSIZE_32K: 527 rdc_desc.page_size = SIZE_32KB; 528 break; 529 } 530 531 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 532 rdc_desc.valid0 = 1; 533 534 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 535 rdc_desc.valid1 = 1; 536 537 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 538 rdc_desc.valid2 = 1; 539 540 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 541 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 542 543 rdc_desc.rcr_len = rcr_p->comp_size; 544 rdc_desc.rcr_addr = rcr_p->rcr_addr; 545 546 cfgb_p = &(rcr_p->rcr_cfgb); 547 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 548 /* For now, disable this timeout in a guest domain. */ 549 if (isLDOMguest(nxgep)) { 550 rdc_desc.rcr_timeout = 0; 551 rdc_desc.rcr_timeout_enable = 0; 552 } else { 553 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 554 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 555 } 556 557 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 558 "rbr_len qlen %d pagesize code %d rcr_len %d", 559 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 560 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 561 "size 0 %d size 1 %d size 2 %d", 562 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 563 rbr_p->npi_pkt_buf_size2)); 564 565 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 566 if (rs != NPI_SUCCESS) { 567 return (NXGE_ERROR | rs); 568 } 569 570 /* 571 * Enable the timeout and threshold. 572 */ 573 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 574 rdc_desc.rcr_threshold); 575 if (rs != NPI_SUCCESS) { 576 return (NXGE_ERROR | rs); 577 } 578 579 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 580 rdc_desc.rcr_timeout); 581 if (rs != NPI_SUCCESS) { 582 return (NXGE_ERROR | rs); 583 } 584 585 /* Enable the DMA */ 586 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 587 if (rs != NPI_SUCCESS) { 588 return (NXGE_ERROR | rs); 589 } 590 591 /* Kick the DMA engine. */ 592 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 593 /* Clear the rbr empty bit */ 594 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 595 596 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 597 598 return (NXGE_OK); 599 } 600 601 nxge_status_t 602 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 603 { 604 npi_handle_t handle; 605 npi_status_t rs = NPI_SUCCESS; 606 607 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 608 handle = NXGE_DEV_NPI_HANDLE(nxgep); 609 610 /* disable the DMA */ 611 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 612 if (rs != NPI_SUCCESS) { 613 NXGE_DEBUG_MSG((nxgep, RX_CTL, 614 "<== nxge_disable_rxdma_channel:failed (0x%x)", 615 rs)); 616 return (NXGE_ERROR | rs); 617 } 618 619 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 620 return (NXGE_OK); 621 } 622 623 nxge_status_t 624 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 625 { 626 npi_handle_t handle; 627 nxge_status_t status = NXGE_OK; 628 629 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 630 "<== nxge_init_rxdma_channel_rcrflush")); 631 632 handle = NXGE_DEV_NPI_HANDLE(nxgep); 633 npi_rxdma_rdc_rcr_flush(handle, channel); 634 635 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 636 "<== nxge_init_rxdma_channel_rcrflsh")); 637 return (status); 638 639 } 640 641 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 642 643 #define TO_LEFT -1 644 #define TO_RIGHT 1 645 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 646 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 647 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 648 #define NO_HINT 0xffffffff 649 650 /*ARGSUSED*/ 651 nxge_status_t 652 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 653 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 654 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 655 { 656 int bufsize; 657 uint64_t pktbuf_pp; 658 uint64_t dvma_addr; 659 rxring_info_t *ring_info; 660 int base_side, end_side; 661 int r_index, l_index, anchor_index; 662 int found, search_done; 663 uint32_t offset, chunk_size, block_size, page_size_mask; 664 uint32_t chunk_index, block_index, total_index; 665 int max_iterations, iteration; 666 rxbuf_index_info_t *bufinfo; 667 668 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 669 670 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 671 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 672 pkt_buf_addr_pp, 673 pktbufsz_type)); 674 #if defined(__i386) 675 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 676 #else 677 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 678 #endif 679 680 switch (pktbufsz_type) { 681 case 0: 682 bufsize = rbr_p->pkt_buf_size0; 683 break; 684 case 1: 685 bufsize = rbr_p->pkt_buf_size1; 686 break; 687 case 2: 688 bufsize = rbr_p->pkt_buf_size2; 689 break; 690 case RCR_SINGLE_BLOCK: 691 bufsize = 0; 692 anchor_index = 0; 693 break; 694 default: 695 return (NXGE_ERROR); 696 } 697 698 if (rbr_p->num_blocks == 1) { 699 anchor_index = 0; 700 ring_info = rbr_p->ring_info; 701 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 702 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 703 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 704 "buf_pp $%p btype %d anchor_index %d " 705 "bufinfo $%p", 706 pkt_buf_addr_pp, 707 pktbufsz_type, 708 anchor_index, 709 bufinfo)); 710 711 goto found_index; 712 } 713 714 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 715 "==> nxge_rxbuf_pp_to_vp: " 716 "buf_pp $%p btype %d anchor_index %d", 717 pkt_buf_addr_pp, 718 pktbufsz_type, 719 anchor_index)); 720 721 ring_info = rbr_p->ring_info; 722 found = B_FALSE; 723 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 724 iteration = 0; 725 max_iterations = ring_info->max_iterations; 726 /* 727 * First check if this block has been seen 728 * recently. This is indicated by a hint which 729 * is initialized when the first buffer of the block 730 * is seen. The hint is reset when the last buffer of 731 * the block has been processed. 732 * As three block sizes are supported, three hints 733 * are kept. The idea behind the hints is that once 734 * the hardware uses a block for a buffer of that 735 * size, it will use it exclusively for that size 736 * and will use it until it is exhausted. It is assumed 737 * that there would a single block being used for the same 738 * buffer sizes at any given time. 739 */ 740 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 741 anchor_index = ring_info->hint[pktbufsz_type]; 742 dvma_addr = bufinfo[anchor_index].dvma_addr; 743 chunk_size = bufinfo[anchor_index].buf_size; 744 if ((pktbuf_pp >= dvma_addr) && 745 (pktbuf_pp < (dvma_addr + chunk_size))) { 746 found = B_TRUE; 747 /* 748 * check if this is the last buffer in the block 749 * If so, then reset the hint for the size; 750 */ 751 752 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 753 ring_info->hint[pktbufsz_type] = NO_HINT; 754 } 755 } 756 757 if (found == B_FALSE) { 758 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 759 "==> nxge_rxbuf_pp_to_vp: (!found)" 760 "buf_pp $%p btype %d anchor_index %d", 761 pkt_buf_addr_pp, 762 pktbufsz_type, 763 anchor_index)); 764 765 /* 766 * This is the first buffer of the block of this 767 * size. Need to search the whole information 768 * array. 769 * the search algorithm uses a binary tree search 770 * algorithm. It assumes that the information is 771 * already sorted with increasing order 772 * info[0] < info[1] < info[2] .... < info[n-1] 773 * where n is the size of the information array 774 */ 775 r_index = rbr_p->num_blocks - 1; 776 l_index = 0; 777 search_done = B_FALSE; 778 anchor_index = MID_INDEX(r_index, l_index); 779 while (search_done == B_FALSE) { 780 if ((r_index == l_index) || 781 (iteration >= max_iterations)) 782 search_done = B_TRUE; 783 end_side = TO_RIGHT; /* to the right */ 784 base_side = TO_LEFT; /* to the left */ 785 /* read the DVMA address information and sort it */ 786 dvma_addr = bufinfo[anchor_index].dvma_addr; 787 chunk_size = bufinfo[anchor_index].buf_size; 788 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 789 "==> nxge_rxbuf_pp_to_vp: (searching)" 790 "buf_pp $%p btype %d " 791 "anchor_index %d chunk_size %d dvmaaddr $%p", 792 pkt_buf_addr_pp, 793 pktbufsz_type, 794 anchor_index, 795 chunk_size, 796 dvma_addr)); 797 798 if (pktbuf_pp >= dvma_addr) 799 base_side = TO_RIGHT; /* to the right */ 800 if (pktbuf_pp < (dvma_addr + chunk_size)) 801 end_side = TO_LEFT; /* to the left */ 802 803 switch (base_side + end_side) { 804 case IN_MIDDLE: 805 /* found */ 806 found = B_TRUE; 807 search_done = B_TRUE; 808 if ((pktbuf_pp + bufsize) < 809 (dvma_addr + chunk_size)) 810 ring_info->hint[pktbufsz_type] = 811 bufinfo[anchor_index].buf_index; 812 break; 813 case BOTH_RIGHT: 814 /* not found: go to the right */ 815 l_index = anchor_index + 1; 816 anchor_index = MID_INDEX(r_index, l_index); 817 break; 818 819 case BOTH_LEFT: 820 /* not found: go to the left */ 821 r_index = anchor_index - 1; 822 anchor_index = MID_INDEX(r_index, l_index); 823 break; 824 default: /* should not come here */ 825 return (NXGE_ERROR); 826 } 827 iteration++; 828 } 829 830 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 831 "==> nxge_rxbuf_pp_to_vp: (search done)" 832 "buf_pp $%p btype %d anchor_index %d", 833 pkt_buf_addr_pp, 834 pktbufsz_type, 835 anchor_index)); 836 } 837 838 if (found == B_FALSE) { 839 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 840 "==> nxge_rxbuf_pp_to_vp: (search failed)" 841 "buf_pp $%p btype %d anchor_index %d", 842 pkt_buf_addr_pp, 843 pktbufsz_type, 844 anchor_index)); 845 return (NXGE_ERROR); 846 } 847 848 found_index: 849 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 850 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 851 "buf_pp $%p btype %d bufsize %d anchor_index %d", 852 pkt_buf_addr_pp, 853 pktbufsz_type, 854 bufsize, 855 anchor_index)); 856 857 /* index of the first block in this chunk */ 858 chunk_index = bufinfo[anchor_index].start_index; 859 dvma_addr = bufinfo[anchor_index].dvma_addr; 860 page_size_mask = ring_info->block_size_mask; 861 862 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 863 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 864 "buf_pp $%p btype %d bufsize %d " 865 "anchor_index %d chunk_index %d dvma $%p", 866 pkt_buf_addr_pp, 867 pktbufsz_type, 868 bufsize, 869 anchor_index, 870 chunk_index, 871 dvma_addr)); 872 873 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 874 block_size = rbr_p->block_size; /* System block(page) size */ 875 876 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 877 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 878 "buf_pp $%p btype %d bufsize %d " 879 "anchor_index %d chunk_index %d dvma $%p " 880 "offset %d block_size %d", 881 pkt_buf_addr_pp, 882 pktbufsz_type, 883 bufsize, 884 anchor_index, 885 chunk_index, 886 dvma_addr, 887 offset, 888 block_size)); 889 890 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 891 892 block_index = (offset / block_size); /* index within chunk */ 893 total_index = chunk_index + block_index; 894 895 896 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 897 "==> nxge_rxbuf_pp_to_vp: " 898 "total_index %d dvma_addr $%p " 899 "offset %d block_size %d " 900 "block_index %d ", 901 total_index, dvma_addr, 902 offset, block_size, 903 block_index)); 904 #if defined(__i386) 905 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 906 (uint32_t)offset); 907 #else 908 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 909 (uint64_t)offset); 910 #endif 911 912 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 913 "==> nxge_rxbuf_pp_to_vp: " 914 "total_index %d dvma_addr $%p " 915 "offset %d block_size %d " 916 "block_index %d " 917 "*pkt_buf_addr_p $%p", 918 total_index, dvma_addr, 919 offset, block_size, 920 block_index, 921 *pkt_buf_addr_p)); 922 923 924 *msg_index = total_index; 925 *bufoffset = (offset & page_size_mask); 926 927 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 928 "==> nxge_rxbuf_pp_to_vp: get msg index: " 929 "msg_index %d bufoffset_index %d", 930 *msg_index, 931 *bufoffset)); 932 933 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 934 935 return (NXGE_OK); 936 } 937 938 /* 939 * used by quick sort (qsort) function 940 * to perform comparison 941 */ 942 static int 943 nxge_sort_compare(const void *p1, const void *p2) 944 { 945 946 rxbuf_index_info_t *a, *b; 947 948 a = (rxbuf_index_info_t *)p1; 949 b = (rxbuf_index_info_t *)p2; 950 951 if (a->dvma_addr > b->dvma_addr) 952 return (1); 953 if (a->dvma_addr < b->dvma_addr) 954 return (-1); 955 return (0); 956 } 957 958 959 960 /* 961 * grabbed this sort implementation from common/syscall/avl.c 962 * 963 */ 964 /* 965 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 966 * v = Ptr to array/vector of objs 967 * n = # objs in the array 968 * s = size of each obj (must be multiples of a word size) 969 * f = ptr to function to compare two objs 970 * returns (-1 = less than, 0 = equal, 1 = greater than 971 */ 972 void 973 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 974 { 975 int g, i, j, ii; 976 unsigned int *p1, *p2; 977 unsigned int tmp; 978 979 /* No work to do */ 980 if (v == NULL || n <= 1) 981 return; 982 /* Sanity check on arguments */ 983 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 984 ASSERT(s > 0); 985 986 for (g = n / 2; g > 0; g /= 2) { 987 for (i = g; i < n; i++) { 988 for (j = i - g; j >= 0 && 989 (*f)(v + j * s, v + (j + g) * s) == 1; 990 j -= g) { 991 p1 = (unsigned *)(v + j * s); 992 p2 = (unsigned *)(v + (j + g) * s); 993 for (ii = 0; ii < s / 4; ii++) { 994 tmp = *p1; 995 *p1++ = *p2; 996 *p2++ = tmp; 997 } 998 } 999 } 1000 } 1001 } 1002 1003 /* 1004 * Initialize data structures required for rxdma 1005 * buffer dvma->vmem address lookup 1006 */ 1007 /*ARGSUSED*/ 1008 static nxge_status_t 1009 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1010 { 1011 1012 int index; 1013 rxring_info_t *ring_info; 1014 int max_iteration = 0, max_index = 0; 1015 1016 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1017 1018 ring_info = rbrp->ring_info; 1019 ring_info->hint[0] = NO_HINT; 1020 ring_info->hint[1] = NO_HINT; 1021 ring_info->hint[2] = NO_HINT; 1022 max_index = rbrp->num_blocks; 1023 1024 /* read the DVMA address information and sort it */ 1025 /* do init of the information array */ 1026 1027 1028 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1029 " nxge_rxbuf_index_info_init Sort ptrs")); 1030 1031 /* sort the array */ 1032 nxge_ksort((void *)ring_info->buffer, max_index, 1033 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1034 1035 1036 1037 for (index = 0; index < max_index; index++) { 1038 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1039 " nxge_rxbuf_index_info_init: sorted chunk %d " 1040 " ioaddr $%p kaddr $%p size %x", 1041 index, ring_info->buffer[index].dvma_addr, 1042 ring_info->buffer[index].kaddr, 1043 ring_info->buffer[index].buf_size)); 1044 } 1045 1046 max_iteration = 0; 1047 while (max_index >= (1ULL << max_iteration)) 1048 max_iteration++; 1049 ring_info->max_iterations = max_iteration + 1; 1050 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1051 " nxge_rxbuf_index_info_init Find max iter %d", 1052 ring_info->max_iterations)); 1053 1054 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1055 return (NXGE_OK); 1056 } 1057 1058 /* ARGSUSED */ 1059 void 1060 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1061 { 1062 #ifdef NXGE_DEBUG 1063 1064 uint32_t bptr; 1065 uint64_t pp; 1066 1067 bptr = entry_p->bits.hdw.pkt_buf_addr; 1068 1069 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1070 "\trcr entry $%p " 1071 "\trcr entry 0x%0llx " 1072 "\trcr entry 0x%08x " 1073 "\trcr entry 0x%08x " 1074 "\tvalue 0x%0llx\n" 1075 "\tmulti = %d\n" 1076 "\tpkt_type = 0x%x\n" 1077 "\tzero_copy = %d\n" 1078 "\tnoport = %d\n" 1079 "\tpromis = %d\n" 1080 "\terror = 0x%04x\n" 1081 "\tdcf_err = 0x%01x\n" 1082 "\tl2_len = %d\n" 1083 "\tpktbufsize = %d\n" 1084 "\tpkt_buf_addr = $%p\n" 1085 "\tpkt_buf_addr (<< 6) = $%p\n", 1086 entry_p, 1087 *(int64_t *)entry_p, 1088 *(int32_t *)entry_p, 1089 *(int32_t *)((char *)entry_p + 32), 1090 entry_p->value, 1091 entry_p->bits.hdw.multi, 1092 entry_p->bits.hdw.pkt_type, 1093 entry_p->bits.hdw.zero_copy, 1094 entry_p->bits.hdw.noport, 1095 entry_p->bits.hdw.promis, 1096 entry_p->bits.hdw.error, 1097 entry_p->bits.hdw.dcf_err, 1098 entry_p->bits.hdw.l2_len, 1099 entry_p->bits.hdw.pktbufsz, 1100 bptr, 1101 entry_p->bits.ldw.pkt_buf_addr)); 1102 1103 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1104 RCR_PKT_BUF_ADDR_SHIFT; 1105 1106 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1107 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1108 #endif 1109 } 1110 1111 void 1112 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1113 { 1114 npi_handle_t handle; 1115 rbr_stat_t rbr_stat; 1116 addr44_t hd_addr; 1117 addr44_t tail_addr; 1118 uint16_t qlen; 1119 1120 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1121 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1122 1123 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1124 1125 /* RBR head */ 1126 hd_addr.addr = 0; 1127 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1128 #if defined(__i386) 1129 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1130 (void *)(uint32_t)hd_addr.addr); 1131 #else 1132 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1133 (void *)hd_addr.addr); 1134 #endif 1135 1136 /* RBR stats */ 1137 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1138 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1139 1140 /* RCR tail */ 1141 tail_addr.addr = 0; 1142 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1143 #if defined(__i386) 1144 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1145 (void *)(uint32_t)tail_addr.addr); 1146 #else 1147 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1148 (void *)tail_addr.addr); 1149 #endif 1150 1151 /* RCR qlen */ 1152 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1153 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1154 1155 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1156 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1157 } 1158 1159 void 1160 nxge_rxdma_stop(p_nxge_t nxgep) 1161 { 1162 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1163 1164 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1165 (void) nxge_rx_mac_disable(nxgep); 1166 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1167 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1168 } 1169 1170 void 1171 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1172 { 1173 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1174 1175 (void) nxge_rxdma_stop(nxgep); 1176 (void) nxge_uninit_rxdma_channels(nxgep); 1177 (void) nxge_init_rxdma_channels(nxgep); 1178 1179 #ifndef AXIS_DEBUG_LB 1180 (void) nxge_xcvr_init(nxgep); 1181 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1182 #endif 1183 (void) nxge_rx_mac_enable(nxgep); 1184 1185 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1186 } 1187 1188 nxge_status_t 1189 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1190 { 1191 nxge_grp_set_t *set = &nxgep->rx_set; 1192 nxge_status_t status; 1193 npi_status_t rs; 1194 int rdc; 1195 1196 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1197 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1198 1199 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1200 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1201 "<== nxge_rxdma_mode: not initialized")); 1202 return (NXGE_ERROR); 1203 } 1204 1205 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1206 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1207 "<== nxge_tx_port_fatal_err_recover: " 1208 "NULL ring pointer(s)")); 1209 return (NXGE_ERROR); 1210 } 1211 1212 if (set->owned.map == 0) { 1213 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1214 "nxge_rxdma_regs_dump_channels: no channels")); 1215 return (NULL); 1216 } 1217 1218 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1219 if ((1 << rdc) & set->owned.map) { 1220 rx_rbr_ring_t *ring = 1221 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1222 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1223 if (ring) { 1224 if (enable) { 1225 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1226 "==> nxge_rxdma_hw_mode: " 1227 "channel %d (enable)", rdc)); 1228 rs = npi_rxdma_cfg_rdc_enable 1229 (handle, rdc); 1230 } else { 1231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1232 "==> nxge_rxdma_hw_mode: " 1233 "channel %d disable)", rdc)); 1234 rs = npi_rxdma_cfg_rdc_disable 1235 (handle, rdc); 1236 } 1237 } 1238 } 1239 } 1240 1241 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1242 1243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1244 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1245 1246 return (status); 1247 } 1248 1249 void 1250 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1251 { 1252 npi_handle_t handle; 1253 1254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1255 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1256 1257 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1258 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1259 1260 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1261 } 1262 1263 void 1264 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1265 { 1266 npi_handle_t handle; 1267 1268 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1269 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1270 1271 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1272 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1273 1274 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1275 } 1276 1277 void 1278 nxge_hw_start_rx(p_nxge_t nxgep) 1279 { 1280 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1281 1282 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1283 (void) nxge_rx_mac_enable(nxgep); 1284 1285 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1286 } 1287 1288 /*ARGSUSED*/ 1289 void 1290 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1291 { 1292 nxge_grp_set_t *set = &nxgep->rx_set; 1293 int rdc; 1294 1295 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1296 1297 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1298 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1299 "<== nxge_tx_port_fatal_err_recover: " 1300 "NULL ring pointer(s)")); 1301 return; 1302 } 1303 1304 if (set->owned.map == 0) { 1305 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1306 "nxge_rxdma_regs_dump_channels: no channels")); 1307 return; 1308 } 1309 1310 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1311 if ((1 << rdc) & set->owned.map) { 1312 rx_rbr_ring_t *ring = 1313 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1314 if (ring) { 1315 nxge_rxdma_hw_stop(nxgep, rdc); 1316 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1317 "==> nxge_fixup_rxdma_rings: " 1318 "channel %d ring $%px", 1319 rdc, ring)); 1320 (void) nxge_rxdma_fixup_channel 1321 (nxgep, rdc, rdc); 1322 } 1323 } 1324 } 1325 1326 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1327 } 1328 1329 void 1330 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1331 { 1332 int i; 1333 1334 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1335 i = nxge_rxdma_get_ring_index(nxgep, channel); 1336 if (i < 0) { 1337 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1338 "<== nxge_rxdma_fix_channel: no entry found")); 1339 return; 1340 } 1341 1342 nxge_rxdma_fixup_channel(nxgep, channel, i); 1343 1344 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1345 } 1346 1347 void 1348 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1349 { 1350 int ndmas; 1351 p_rx_rbr_rings_t rx_rbr_rings; 1352 p_rx_rbr_ring_t *rbr_rings; 1353 p_rx_rcr_rings_t rx_rcr_rings; 1354 p_rx_rcr_ring_t *rcr_rings; 1355 p_rx_mbox_areas_t rx_mbox_areas_p; 1356 p_rx_mbox_t *rx_mbox_p; 1357 p_nxge_dma_pool_t dma_buf_poolp; 1358 p_nxge_dma_pool_t dma_cntl_poolp; 1359 p_rx_rbr_ring_t rbrp; 1360 p_rx_rcr_ring_t rcrp; 1361 p_rx_mbox_t mboxp; 1362 p_nxge_dma_common_t dmap; 1363 nxge_status_t status = NXGE_OK; 1364 1365 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1366 1367 (void) nxge_rxdma_stop_channel(nxgep, channel); 1368 1369 dma_buf_poolp = nxgep->rx_buf_pool_p; 1370 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1371 1372 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1373 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1374 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1375 return; 1376 } 1377 1378 ndmas = dma_buf_poolp->ndmas; 1379 if (!ndmas) { 1380 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1381 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1382 return; 1383 } 1384 1385 rx_rbr_rings = nxgep->rx_rbr_rings; 1386 rx_rcr_rings = nxgep->rx_rcr_rings; 1387 rbr_rings = rx_rbr_rings->rbr_rings; 1388 rcr_rings = rx_rcr_rings->rcr_rings; 1389 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1390 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1391 1392 /* Reinitialize the receive block and completion rings */ 1393 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1394 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1395 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1396 1397 1398 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1399 rbrp->rbr_rd_index = 0; 1400 rcrp->comp_rd_index = 0; 1401 rcrp->comp_wt_index = 0; 1402 1403 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1404 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1405 1406 status = nxge_rxdma_start_channel(nxgep, channel, 1407 rbrp, rcrp, mboxp); 1408 if (status != NXGE_OK) { 1409 goto nxge_rxdma_fixup_channel_fail; 1410 } 1411 if (status != NXGE_OK) { 1412 goto nxge_rxdma_fixup_channel_fail; 1413 } 1414 1415 nxge_rxdma_fixup_channel_fail: 1416 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1417 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1418 1419 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1420 } 1421 1422 /* ARGSUSED */ 1423 int 1424 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1425 { 1426 return (channel); 1427 } 1428 1429 p_rx_rbr_ring_t 1430 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1431 { 1432 nxge_grp_set_t *set = &nxgep->rx_set; 1433 nxge_channel_t rdc; 1434 1435 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1436 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1437 1438 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1439 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1440 "<== nxge_rxdma_get_rbr_ring: " 1441 "NULL ring pointer(s)")); 1442 return (NULL); 1443 } 1444 1445 if (set->owned.map == 0) { 1446 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1447 "<== nxge_rxdma_get_rbr_ring: no channels")); 1448 return (NULL); 1449 } 1450 1451 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1452 if ((1 << rdc) & set->owned.map) { 1453 rx_rbr_ring_t *ring = 1454 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1455 if (ring) { 1456 if (channel == ring->rdc) { 1457 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1458 "==> nxge_rxdma_get_rbr_ring: " 1459 "channel %d ring $%p", rdc, ring)); 1460 return (ring); 1461 } 1462 } 1463 } 1464 } 1465 1466 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1467 "<== nxge_rxdma_get_rbr_ring: not found")); 1468 1469 return (NULL); 1470 } 1471 1472 p_rx_rcr_ring_t 1473 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1474 { 1475 nxge_grp_set_t *set = &nxgep->rx_set; 1476 nxge_channel_t rdc; 1477 1478 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1479 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1480 1481 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1482 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1483 "<== nxge_rxdma_get_rcr_ring: " 1484 "NULL ring pointer(s)")); 1485 return (NULL); 1486 } 1487 1488 if (set->owned.map == 0) { 1489 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1490 "<== nxge_rxdma_get_rbr_ring: no channels")); 1491 return (NULL); 1492 } 1493 1494 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1495 if ((1 << rdc) & set->owned.map) { 1496 rx_rcr_ring_t *ring = 1497 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1498 if (ring) { 1499 if (channel == ring->rdc) { 1500 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1501 "==> nxge_rxdma_get_rcr_ring: " 1502 "channel %d ring $%p", rdc, ring)); 1503 return (ring); 1504 } 1505 } 1506 } 1507 } 1508 1509 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1510 "<== nxge_rxdma_get_rcr_ring: not found")); 1511 1512 return (NULL); 1513 } 1514 1515 /* 1516 * Static functions start here. 1517 */ 1518 static p_rx_msg_t 1519 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1520 { 1521 p_rx_msg_t nxge_mp = NULL; 1522 p_nxge_dma_common_t dmamsg_p; 1523 uchar_t *buffer; 1524 1525 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1526 if (nxge_mp == NULL) { 1527 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1528 "Allocation of a rx msg failed.")); 1529 goto nxge_allocb_exit; 1530 } 1531 1532 nxge_mp->use_buf_pool = B_FALSE; 1533 if (dmabuf_p) { 1534 nxge_mp->use_buf_pool = B_TRUE; 1535 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1536 *dmamsg_p = *dmabuf_p; 1537 dmamsg_p->nblocks = 1; 1538 dmamsg_p->block_size = size; 1539 dmamsg_p->alength = size; 1540 buffer = (uchar_t *)dmabuf_p->kaddrp; 1541 1542 dmabuf_p->kaddrp = (void *) 1543 ((char *)dmabuf_p->kaddrp + size); 1544 dmabuf_p->ioaddr_pp = (void *) 1545 ((char *)dmabuf_p->ioaddr_pp + size); 1546 dmabuf_p->alength -= size; 1547 dmabuf_p->offset += size; 1548 dmabuf_p->dma_cookie.dmac_laddress += size; 1549 dmabuf_p->dma_cookie.dmac_size -= size; 1550 1551 } else { 1552 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1553 if (buffer == NULL) { 1554 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1555 "Allocation of a receive page failed.")); 1556 goto nxge_allocb_fail1; 1557 } 1558 } 1559 1560 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1561 if (nxge_mp->rx_mblk_p == NULL) { 1562 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1563 goto nxge_allocb_fail2; 1564 } 1565 1566 nxge_mp->buffer = buffer; 1567 nxge_mp->block_size = size; 1568 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1569 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1570 nxge_mp->ref_cnt = 1; 1571 nxge_mp->free = B_TRUE; 1572 nxge_mp->rx_use_bcopy = B_FALSE; 1573 1574 atomic_inc_32(&nxge_mblks_pending); 1575 1576 goto nxge_allocb_exit; 1577 1578 nxge_allocb_fail2: 1579 if (!nxge_mp->use_buf_pool) { 1580 KMEM_FREE(buffer, size); 1581 } 1582 1583 nxge_allocb_fail1: 1584 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1585 nxge_mp = NULL; 1586 1587 nxge_allocb_exit: 1588 return (nxge_mp); 1589 } 1590 1591 p_mblk_t 1592 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1593 { 1594 p_mblk_t mp; 1595 1596 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1597 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1598 "offset = 0x%08X " 1599 "size = 0x%08X", 1600 nxge_mp, offset, size)); 1601 1602 mp = desballoc(&nxge_mp->buffer[offset], size, 1603 0, &nxge_mp->freeb); 1604 if (mp == NULL) { 1605 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1606 goto nxge_dupb_exit; 1607 } 1608 atomic_inc_32(&nxge_mp->ref_cnt); 1609 1610 1611 nxge_dupb_exit: 1612 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1613 nxge_mp)); 1614 return (mp); 1615 } 1616 1617 p_mblk_t 1618 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1619 { 1620 p_mblk_t mp; 1621 uchar_t *dp; 1622 1623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1624 if (mp == NULL) { 1625 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1626 goto nxge_dupb_bcopy_exit; 1627 } 1628 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1629 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1630 mp->b_wptr = dp + size; 1631 1632 nxge_dupb_bcopy_exit: 1633 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1634 nxge_mp)); 1635 return (mp); 1636 } 1637 1638 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1639 p_rx_msg_t rx_msg_p); 1640 1641 void 1642 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1643 { 1644 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1645 1646 /* Reuse this buffer */ 1647 rx_msg_p->free = B_FALSE; 1648 rx_msg_p->cur_usage_cnt = 0; 1649 rx_msg_p->max_usage_cnt = 0; 1650 rx_msg_p->pkt_buf_size = 0; 1651 1652 if (rx_rbr_p->rbr_use_bcopy) { 1653 rx_msg_p->rx_use_bcopy = B_FALSE; 1654 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1655 } 1656 1657 /* 1658 * Get the rbr header pointer and its offset index. 1659 */ 1660 MUTEX_ENTER(&rx_rbr_p->post_lock); 1661 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1662 rx_rbr_p->rbr_wrap_mask); 1663 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1664 MUTEX_EXIT(&rx_rbr_p->post_lock); 1665 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1666 rx_rbr_p->rdc, 1); 1667 1668 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1669 "<== nxge_post_page (channel %d post_next_index %d)", 1670 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1671 1672 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1673 } 1674 1675 void 1676 nxge_freeb(p_rx_msg_t rx_msg_p) 1677 { 1678 size_t size; 1679 uchar_t *buffer = NULL; 1680 int ref_cnt; 1681 boolean_t free_state = B_FALSE; 1682 1683 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1684 1685 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1686 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1687 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1688 rx_msg_p, nxge_mblks_pending)); 1689 1690 /* 1691 * First we need to get the free state, then 1692 * atomic decrement the reference count to prevent 1693 * the race condition with the interrupt thread that 1694 * is processing a loaned up buffer block. 1695 */ 1696 free_state = rx_msg_p->free; 1697 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1698 if (!ref_cnt) { 1699 atomic_dec_32(&nxge_mblks_pending); 1700 buffer = rx_msg_p->buffer; 1701 size = rx_msg_p->block_size; 1702 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1703 "will free: rx_msg_p = $%p (block pending %d)", 1704 rx_msg_p, nxge_mblks_pending)); 1705 1706 if (!rx_msg_p->use_buf_pool) { 1707 KMEM_FREE(buffer, size); 1708 } 1709 1710 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1711 1712 if (ring) { 1713 /* 1714 * Decrement the receive buffer ring's reference 1715 * count, too. 1716 */ 1717 atomic_dec_32(&ring->rbr_ref_cnt); 1718 1719 /* 1720 * Free the receive buffer ring, if 1721 * 1. all the receive buffers have been freed 1722 * 2. and we are in the proper state (that is, 1723 * we are not UNMAPPING). 1724 */ 1725 if (ring->rbr_ref_cnt == 0 && 1726 ring->rbr_state == RBR_UNMAPPED) { 1727 /* 1728 * Free receive data buffers, 1729 * buffer index information 1730 * (rxring_info) and 1731 * the message block ring. 1732 */ 1733 NXGE_DEBUG_MSG((NULL, RX_CTL, 1734 "nxge_freeb:rx_msg_p = $%p " 1735 "(block pending %d) free buffers", 1736 rx_msg_p, nxge_mblks_pending)); 1737 nxge_rxdma_databuf_free(ring); 1738 if (ring->ring_info) { 1739 KMEM_FREE(ring->ring_info, 1740 sizeof (rxring_info_t)); 1741 } 1742 1743 if (ring->rx_msg_ring) { 1744 KMEM_FREE(ring->rx_msg_ring, 1745 ring->tnblocks * 1746 sizeof (p_rx_msg_t)); 1747 } 1748 KMEM_FREE(ring, sizeof (*ring)); 1749 } 1750 } 1751 return; 1752 } 1753 1754 /* 1755 * Repost buffer. 1756 */ 1757 if (free_state && (ref_cnt == 1) && ring) { 1758 NXGE_DEBUG_MSG((NULL, RX_CTL, 1759 "nxge_freeb: post page $%p:", rx_msg_p)); 1760 if (ring->rbr_state == RBR_POSTING) 1761 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1762 } 1763 1764 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1765 } 1766 1767 uint_t 1768 nxge_rx_intr(void *arg1, void *arg2) 1769 { 1770 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1771 p_nxge_t nxgep = (p_nxge_t)arg2; 1772 p_nxge_ldg_t ldgp; 1773 uint8_t channel; 1774 npi_handle_t handle; 1775 rx_dma_ctl_stat_t cs; 1776 1777 #ifdef NXGE_DEBUG 1778 rxdma_cfig1_t cfg; 1779 #endif 1780 uint_t serviced = DDI_INTR_UNCLAIMED; 1781 1782 if (ldvp == NULL) { 1783 NXGE_DEBUG_MSG((NULL, INT_CTL, 1784 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1785 nxgep, ldvp)); 1786 1787 return (DDI_INTR_CLAIMED); 1788 } 1789 1790 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1791 nxgep = ldvp->nxgep; 1792 } 1793 1794 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1795 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1796 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1797 "<== nxge_rx_intr: interface not started or intialized")); 1798 return (DDI_INTR_CLAIMED); 1799 } 1800 1801 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1802 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1803 nxgep, ldvp)); 1804 1805 /* 1806 * This interrupt handler is for a specific 1807 * receive dma channel. 1808 */ 1809 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1810 /* 1811 * Get the control and status for this channel. 1812 */ 1813 channel = ldvp->channel; 1814 ldgp = ldvp->ldgp; 1815 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1816 1817 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1818 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1819 channel, 1820 cs.value, 1821 cs.bits.hdw.rcrto, 1822 cs.bits.hdw.rcrthres)); 1823 1824 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1825 serviced = DDI_INTR_CLAIMED; 1826 1827 /* error events. */ 1828 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1829 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1830 } 1831 1832 nxge_intr_exit: 1833 /* 1834 * Enable the mailbox update interrupt if we want 1835 * to use mailbox. We probably don't need to use 1836 * mailbox as it only saves us one pio read. 1837 * Also write 1 to rcrthres and rcrto to clear 1838 * these two edge triggered bits. 1839 */ 1840 1841 cs.value &= RX_DMA_CTL_STAT_WR1C; 1842 cs.bits.hdw.mex = 1; 1843 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1844 cs.value); 1845 1846 /* 1847 * Rearm this logical group if this is a single device 1848 * group. 1849 */ 1850 if (ldgp->nldvs == 1) { 1851 ldgimgm_t mgm; 1852 mgm.value = 0; 1853 mgm.bits.ldw.arm = 1; 1854 mgm.bits.ldw.timer = ldgp->ldg_timer; 1855 if (isLDOMguest(nxgep)) { 1856 nxge_hio_ldgimgn(nxgep, ldgp); 1857 } else { 1858 NXGE_REG_WR64(handle, 1859 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1860 mgm.value); 1861 } 1862 } 1863 1864 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1865 serviced)); 1866 return (serviced); 1867 } 1868 1869 /* 1870 * Process the packets received in the specified logical device 1871 * and pass up a chain of message blocks to the upper layer. 1872 */ 1873 static void 1874 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 1875 { 1876 p_mblk_t mp; 1877 p_rx_rcr_ring_t rcrp; 1878 1879 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1880 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 1881 if (rcrp->poll_flag) { 1882 /* It is in the poll mode */ 1883 return; 1884 } 1885 1886 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 1887 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1888 "<== nxge_rx_pkts_vring: no mp")); 1889 return; 1890 } 1891 1892 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1893 mp)); 1894 1895 #ifdef NXGE_DEBUG 1896 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1897 "==> nxge_rx_pkts_vring:calling mac_rx " 1898 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1899 "mac_handle $%p", 1900 mp->b_wptr - mp->b_rptr, 1901 mp, mp->b_cont, mp->b_next, 1902 rcrp, rcrp->rcr_mac_handle)); 1903 1904 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1905 "==> nxge_rx_pkts_vring: dump packets " 1906 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1907 mp, 1908 mp->b_rptr, 1909 mp->b_wptr, 1910 nxge_dump_packet((char *)mp->b_rptr, 1911 mp->b_wptr - mp->b_rptr))); 1912 if (mp->b_cont) { 1913 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1914 "==> nxge_rx_pkts_vring: dump b_cont packets " 1915 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1916 mp->b_cont, 1917 mp->b_cont->b_rptr, 1918 mp->b_cont->b_wptr, 1919 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1920 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1921 } 1922 if (mp->b_next) { 1923 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1924 "==> nxge_rx_pkts_vring: dump next packets " 1925 "(b_rptr $%p): %s", 1926 mp->b_next->b_rptr, 1927 nxge_dump_packet((char *)mp->b_next->b_rptr, 1928 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1929 } 1930 #endif 1931 1932 if (!isLDOMguest(nxgep)) 1933 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1934 #if defined(sun4v) 1935 else { /* isLDOMguest(nxgep) */ 1936 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1937 nxgep->nxge_hw_p->hio; 1938 nx_vio_fp_t *vio = &nhd->hio.vio; 1939 1940 if (vio->cb.vio_net_rx_cb) { 1941 (*vio->cb.vio_net_rx_cb) 1942 (nxgep->hio_vr->vhp, mp); 1943 } 1944 } 1945 #endif 1946 } 1947 1948 1949 /* 1950 * This routine is the main packet receive processing function. 1951 * It gets the packet type, error code, and buffer related 1952 * information from the receive completion entry. 1953 * How many completion entries to process is based on the number of packets 1954 * queued by the hardware, a hardware maintained tail pointer 1955 * and a configurable receive packet count. 1956 * 1957 * A chain of message blocks will be created as result of processing 1958 * the completion entries. This chain of message blocks will be returned and 1959 * a hardware control status register will be updated with the number of 1960 * packets were removed from the hardware queue. 1961 * 1962 */ 1963 static mblk_t * 1964 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1965 int bytes_to_pickup) 1966 { 1967 npi_handle_t handle; 1968 uint8_t channel; 1969 uint32_t comp_rd_index; 1970 p_rcr_entry_t rcr_desc_rd_head_p; 1971 p_rcr_entry_t rcr_desc_rd_head_pp; 1972 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1973 uint16_t qlen, nrcr_read, npkt_read; 1974 uint32_t qlen_hw; 1975 boolean_t multi; 1976 rcrcfig_b_t rcr_cfg_b; 1977 int totallen = 0; 1978 #if defined(_BIG_ENDIAN) 1979 npi_status_t rs = NPI_SUCCESS; 1980 #endif 1981 1982 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 1983 "channel %d", rcr_p->rdc)); 1984 1985 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1986 return (NULL); 1987 } 1988 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1989 channel = rcr_p->rdc; 1990 1991 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1992 "==> nxge_rx_pkts: START: rcr channel %d " 1993 "head_p $%p head_pp $%p index %d ", 1994 channel, rcr_p->rcr_desc_rd_head_p, 1995 rcr_p->rcr_desc_rd_head_pp, 1996 rcr_p->comp_rd_index)); 1997 1998 1999 #if !defined(_BIG_ENDIAN) 2000 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2001 #else 2002 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2003 if (rs != NPI_SUCCESS) { 2004 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2005 "channel %d, get qlen failed 0x%08x", 2006 channel, rs)); 2007 return (NULL); 2008 } 2009 #endif 2010 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2011 "qlen %d", channel, qlen)); 2012 2013 2014 2015 if (!qlen) { 2016 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2017 "==> nxge_rx_pkts:rcr channel %d " 2018 "qlen %d (no pkts)", channel, qlen)); 2019 2020 return (NULL); 2021 } 2022 2023 comp_rd_index = rcr_p->comp_rd_index; 2024 2025 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2026 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2027 nrcr_read = npkt_read = 0; 2028 2029 /* 2030 * Number of packets queued 2031 * (The jumbo or multi packet will be counted as only one 2032 * packets and it may take up more than one completion entry). 2033 */ 2034 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2035 qlen : nxge_max_rx_pkts; 2036 head_mp = NULL; 2037 tail_mp = &head_mp; 2038 nmp = mp_cont = NULL; 2039 multi = B_FALSE; 2040 2041 while (qlen_hw) { 2042 2043 #ifdef NXGE_DEBUG 2044 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2045 #endif 2046 /* 2047 * Process one completion ring entry. 2048 */ 2049 nxge_receive_packet(nxgep, 2050 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2051 2052 /* 2053 * message chaining modes 2054 */ 2055 if (nmp) { 2056 nmp->b_next = NULL; 2057 if (!multi && !mp_cont) { /* frame fits a partition */ 2058 *tail_mp = nmp; 2059 tail_mp = &nmp->b_next; 2060 totallen += MBLKL(nmp); 2061 nmp = NULL; 2062 } else if (multi && !mp_cont) { /* first segment */ 2063 *tail_mp = nmp; 2064 tail_mp = &nmp->b_cont; 2065 totallen += MBLKL(nmp); 2066 } else if (multi && mp_cont) { /* mid of multi segs */ 2067 *tail_mp = mp_cont; 2068 tail_mp = &mp_cont->b_cont; 2069 totallen += MBLKL(mp_cont); 2070 } else if (!multi && mp_cont) { /* last segment */ 2071 *tail_mp = mp_cont; 2072 tail_mp = &nmp->b_next; 2073 totallen += MBLKL(mp_cont); 2074 nmp = NULL; 2075 } 2076 } 2077 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2078 "==> nxge_rx_pkts: loop: rcr channel %d " 2079 "before updating: multi %d " 2080 "nrcr_read %d " 2081 "npk read %d " 2082 "head_pp $%p index %d ", 2083 channel, 2084 multi, 2085 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2086 comp_rd_index)); 2087 2088 if (!multi) { 2089 qlen_hw--; 2090 npkt_read++; 2091 } 2092 2093 /* 2094 * Update the next read entry. 2095 */ 2096 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2097 rcr_p->comp_wrap_mask); 2098 2099 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2100 rcr_p->rcr_desc_first_p, 2101 rcr_p->rcr_desc_last_p); 2102 2103 nrcr_read++; 2104 2105 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2106 "<== nxge_rx_pkts: (SAM, process one packet) " 2107 "nrcr_read %d", 2108 nrcr_read)); 2109 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2110 "==> nxge_rx_pkts: loop: rcr channel %d " 2111 "multi %d " 2112 "nrcr_read %d " 2113 "npk read %d " 2114 "head_pp $%p index %d ", 2115 channel, 2116 multi, 2117 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2118 comp_rd_index)); 2119 2120 if ((bytes_to_pickup != -1) && 2121 (totallen >= bytes_to_pickup)) { 2122 break; 2123 } 2124 } 2125 2126 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2127 rcr_p->comp_rd_index = comp_rd_index; 2128 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2129 2130 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2131 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2132 rcr_p->intr_timeout = nxgep->intr_timeout; 2133 rcr_p->intr_threshold = nxgep->intr_threshold; 2134 rcr_cfg_b.value = 0x0ULL; 2135 if (rcr_p->intr_timeout) 2136 rcr_cfg_b.bits.ldw.entout = 1; 2137 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2138 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2139 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2140 channel, rcr_cfg_b.value); 2141 } 2142 2143 cs.bits.ldw.pktread = npkt_read; 2144 cs.bits.ldw.ptrread = nrcr_read; 2145 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2146 channel, cs.value); 2147 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2148 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2149 "head_pp $%p index %016llx ", 2150 channel, 2151 rcr_p->rcr_desc_rd_head_pp, 2152 rcr_p->comp_rd_index)); 2153 /* 2154 * Update RCR buffer pointer read and number of packets 2155 * read. 2156 */ 2157 2158 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2159 return (head_mp); 2160 } 2161 2162 void 2163 nxge_receive_packet(p_nxge_t nxgep, 2164 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2165 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2166 { 2167 p_mblk_t nmp = NULL; 2168 uint64_t multi; 2169 uint64_t dcf_err; 2170 uint8_t channel; 2171 2172 boolean_t first_entry = B_TRUE; 2173 boolean_t is_tcp_udp = B_FALSE; 2174 boolean_t buffer_free = B_FALSE; 2175 boolean_t error_send_up = B_FALSE; 2176 uint8_t error_type; 2177 uint16_t l2_len; 2178 uint16_t skip_len; 2179 uint8_t pktbufsz_type; 2180 uint64_t rcr_entry; 2181 uint64_t *pkt_buf_addr_pp; 2182 uint64_t *pkt_buf_addr_p; 2183 uint32_t buf_offset; 2184 uint32_t bsize; 2185 uint32_t error_disp_cnt; 2186 uint32_t msg_index; 2187 p_rx_rbr_ring_t rx_rbr_p; 2188 p_rx_msg_t *rx_msg_ring_p; 2189 p_rx_msg_t rx_msg_p; 2190 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2191 nxge_status_t status = NXGE_OK; 2192 boolean_t is_valid = B_FALSE; 2193 p_nxge_rx_ring_stats_t rdc_stats; 2194 uint32_t bytes_read; 2195 uint64_t pkt_type; 2196 uint64_t frag; 2197 boolean_t pkt_too_long_err = B_FALSE; 2198 #ifdef NXGE_DEBUG 2199 int dump_len; 2200 #endif 2201 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2202 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2203 2204 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2205 2206 multi = (rcr_entry & RCR_MULTI_MASK); 2207 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2208 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2209 2210 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2211 frag = (rcr_entry & RCR_FRAG_MASK); 2212 2213 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2214 2215 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2216 RCR_PKTBUFSZ_SHIFT); 2217 #if defined(__i386) 2218 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2219 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2220 #else 2221 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2222 RCR_PKT_BUF_ADDR_SHIFT); 2223 #endif 2224 2225 channel = rcr_p->rdc; 2226 2227 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2228 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2229 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2230 "error_type 0x%x pkt_type 0x%x " 2231 "pktbufsz_type %d ", 2232 rcr_desc_rd_head_p, 2233 rcr_entry, pkt_buf_addr_pp, l2_len, 2234 multi, 2235 error_type, 2236 pkt_type, 2237 pktbufsz_type)); 2238 2239 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2240 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2241 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2242 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2243 rcr_entry, pkt_buf_addr_pp, l2_len, 2244 multi, 2245 error_type, 2246 pkt_type)); 2247 2248 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2249 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2250 "full pkt_buf_addr_pp $%p l2_len %d", 2251 rcr_entry, pkt_buf_addr_pp, l2_len)); 2252 2253 /* get the stats ptr */ 2254 rdc_stats = rcr_p->rdc_stats; 2255 2256 if (!l2_len) { 2257 2258 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2259 "<== nxge_receive_packet: failed: l2 length is 0.")); 2260 return; 2261 } 2262 2263 /* 2264 * Sofware workaround for BMAC hardware limitation that allows 2265 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2266 * instead of 0x2400 for jumbo. 2267 */ 2268 if (l2_len > nxgep->mac.maxframesize) { 2269 pkt_too_long_err = B_TRUE; 2270 } 2271 2272 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2273 l2_len -= ETHERFCSL; 2274 2275 /* shift 6 bits to get the full io address */ 2276 #if defined(__i386) 2277 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2278 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2279 #else 2280 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2281 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2282 #endif 2283 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2284 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2285 "full pkt_buf_addr_pp $%p l2_len %d", 2286 rcr_entry, pkt_buf_addr_pp, l2_len)); 2287 2288 rx_rbr_p = rcr_p->rx_rbr_p; 2289 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2290 2291 if (first_entry) { 2292 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2293 RXDMA_HDR_SIZE_DEFAULT); 2294 2295 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2296 "==> nxge_receive_packet: first entry 0x%016llx " 2297 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2298 rcr_entry, pkt_buf_addr_pp, l2_len, 2299 hdr_size)); 2300 } 2301 2302 MUTEX_ENTER(&rcr_p->lock); 2303 MUTEX_ENTER(&rx_rbr_p->lock); 2304 2305 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2306 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2307 "full pkt_buf_addr_pp $%p l2_len %d", 2308 rcr_entry, pkt_buf_addr_pp, l2_len)); 2309 2310 /* 2311 * Packet buffer address in the completion entry points 2312 * to the starting buffer address (offset 0). 2313 * Use the starting buffer address to locate the corresponding 2314 * kernel address. 2315 */ 2316 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2317 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2318 &buf_offset, 2319 &msg_index); 2320 2321 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2322 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2323 "full pkt_buf_addr_pp $%p l2_len %d", 2324 rcr_entry, pkt_buf_addr_pp, l2_len)); 2325 2326 if (status != NXGE_OK) { 2327 MUTEX_EXIT(&rx_rbr_p->lock); 2328 MUTEX_EXIT(&rcr_p->lock); 2329 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2330 "<== nxge_receive_packet: found vaddr failed %d", 2331 status)); 2332 return; 2333 } 2334 2335 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2336 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2337 "full pkt_buf_addr_pp $%p l2_len %d", 2338 rcr_entry, pkt_buf_addr_pp, l2_len)); 2339 2340 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2341 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2342 "full pkt_buf_addr_pp $%p l2_len %d", 2343 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2344 2345 rx_msg_p = rx_msg_ring_p[msg_index]; 2346 2347 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2348 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2349 "full pkt_buf_addr_pp $%p l2_len %d", 2350 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2351 2352 switch (pktbufsz_type) { 2353 case RCR_PKTBUFSZ_0: 2354 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2355 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2356 "==> nxge_receive_packet: 0 buf %d", bsize)); 2357 break; 2358 case RCR_PKTBUFSZ_1: 2359 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2360 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2361 "==> nxge_receive_packet: 1 buf %d", bsize)); 2362 break; 2363 case RCR_PKTBUFSZ_2: 2364 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2365 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2366 "==> nxge_receive_packet: 2 buf %d", bsize)); 2367 break; 2368 case RCR_SINGLE_BLOCK: 2369 bsize = rx_msg_p->block_size; 2370 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2371 "==> nxge_receive_packet: single %d", bsize)); 2372 2373 break; 2374 default: 2375 MUTEX_EXIT(&rx_rbr_p->lock); 2376 MUTEX_EXIT(&rcr_p->lock); 2377 return; 2378 } 2379 2380 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2381 (buf_offset + sw_offset_bytes), 2382 (hdr_size + l2_len), 2383 DDI_DMA_SYNC_FORCPU); 2384 2385 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2386 "==> nxge_receive_packet: after first dump:usage count")); 2387 2388 if (rx_msg_p->cur_usage_cnt == 0) { 2389 if (rx_rbr_p->rbr_use_bcopy) { 2390 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2391 if (rx_rbr_p->rbr_consumed < 2392 rx_rbr_p->rbr_threshold_hi) { 2393 if (rx_rbr_p->rbr_threshold_lo == 0 || 2394 ((rx_rbr_p->rbr_consumed >= 2395 rx_rbr_p->rbr_threshold_lo) && 2396 (rx_rbr_p->rbr_bufsize_type >= 2397 pktbufsz_type))) { 2398 rx_msg_p->rx_use_bcopy = B_TRUE; 2399 } 2400 } else { 2401 rx_msg_p->rx_use_bcopy = B_TRUE; 2402 } 2403 } 2404 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2405 "==> nxge_receive_packet: buf %d (new block) ", 2406 bsize)); 2407 2408 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2409 rx_msg_p->pkt_buf_size = bsize; 2410 rx_msg_p->cur_usage_cnt = 1; 2411 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2412 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2413 "==> nxge_receive_packet: buf %d " 2414 "(single block) ", 2415 bsize)); 2416 /* 2417 * Buffer can be reused once the free function 2418 * is called. 2419 */ 2420 rx_msg_p->max_usage_cnt = 1; 2421 buffer_free = B_TRUE; 2422 } else { 2423 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2424 if (rx_msg_p->max_usage_cnt == 1) { 2425 buffer_free = B_TRUE; 2426 } 2427 } 2428 } else { 2429 rx_msg_p->cur_usage_cnt++; 2430 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2431 buffer_free = B_TRUE; 2432 } 2433 } 2434 2435 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2436 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2437 msg_index, l2_len, 2438 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2439 2440 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2441 rdc_stats->ierrors++; 2442 if (dcf_err) { 2443 rdc_stats->dcf_err++; 2444 #ifdef NXGE_DEBUG 2445 if (!rdc_stats->dcf_err) { 2446 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2447 "nxge_receive_packet: channel %d dcf_err rcr" 2448 " 0x%llx", channel, rcr_entry)); 2449 } 2450 #endif 2451 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2452 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2453 } else if (pkt_too_long_err) { 2454 rdc_stats->pkt_too_long_err++; 2455 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2456 " channel %d packet length [%d] > " 2457 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2458 nxgep->mac.maxframesize)); 2459 } else { 2460 /* Update error stats */ 2461 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2462 rdc_stats->errlog.compl_err_type = error_type; 2463 2464 switch (error_type) { 2465 /* 2466 * Do not send FMA ereport for RCR_L2_ERROR and 2467 * RCR_L4_CSUM_ERROR because most likely they indicate 2468 * back pressure rather than HW failures. 2469 */ 2470 case RCR_L2_ERROR: 2471 rdc_stats->l2_err++; 2472 if (rdc_stats->l2_err < 2473 error_disp_cnt) { 2474 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2475 " nxge_receive_packet:" 2476 " channel %d RCR L2_ERROR", 2477 channel)); 2478 } 2479 break; 2480 case RCR_L4_CSUM_ERROR: 2481 error_send_up = B_TRUE; 2482 rdc_stats->l4_cksum_err++; 2483 if (rdc_stats->l4_cksum_err < 2484 error_disp_cnt) { 2485 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2486 " nxge_receive_packet:" 2487 " channel %d" 2488 " RCR L4_CSUM_ERROR", channel)); 2489 } 2490 break; 2491 /* 2492 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2493 * RCR_ZCP_SOFT_ERROR because they reflect the same 2494 * FFLP and ZCP errors that have been reported by 2495 * nxge_fflp.c and nxge_zcp.c. 2496 */ 2497 case RCR_FFLP_SOFT_ERROR: 2498 error_send_up = B_TRUE; 2499 rdc_stats->fflp_soft_err++; 2500 if (rdc_stats->fflp_soft_err < 2501 error_disp_cnt) { 2502 NXGE_ERROR_MSG((nxgep, 2503 NXGE_ERR_CTL, 2504 " nxge_receive_packet:" 2505 " channel %d" 2506 " RCR FFLP_SOFT_ERROR", channel)); 2507 } 2508 break; 2509 case RCR_ZCP_SOFT_ERROR: 2510 error_send_up = B_TRUE; 2511 rdc_stats->fflp_soft_err++; 2512 if (rdc_stats->zcp_soft_err < 2513 error_disp_cnt) 2514 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2515 " nxge_receive_packet: Channel %d" 2516 " RCR ZCP_SOFT_ERROR", channel)); 2517 break; 2518 default: 2519 rdc_stats->rcr_unknown_err++; 2520 if (rdc_stats->rcr_unknown_err 2521 < error_disp_cnt) { 2522 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2523 " nxge_receive_packet: Channel %d" 2524 " RCR entry 0x%llx error 0x%x", 2525 rcr_entry, channel, error_type)); 2526 } 2527 break; 2528 } 2529 } 2530 2531 /* 2532 * Update and repost buffer block if max usage 2533 * count is reached. 2534 */ 2535 if (error_send_up == B_FALSE) { 2536 atomic_inc_32(&rx_msg_p->ref_cnt); 2537 if (buffer_free == B_TRUE) { 2538 rx_msg_p->free = B_TRUE; 2539 } 2540 2541 MUTEX_EXIT(&rx_rbr_p->lock); 2542 MUTEX_EXIT(&rcr_p->lock); 2543 nxge_freeb(rx_msg_p); 2544 return; 2545 } 2546 } 2547 2548 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2549 "==> nxge_receive_packet: DMA sync second ")); 2550 2551 bytes_read = rcr_p->rcvd_pkt_bytes; 2552 skip_len = sw_offset_bytes + hdr_size; 2553 if (!rx_msg_p->rx_use_bcopy) { 2554 /* 2555 * For loaned up buffers, the driver reference count 2556 * will be incremented first and then the free state. 2557 */ 2558 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2559 if (first_entry) { 2560 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2561 if (l2_len < bsize - skip_len) { 2562 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2563 } else { 2564 nmp->b_wptr = &nmp->b_rptr[bsize 2565 - skip_len]; 2566 } 2567 } else { 2568 if (l2_len - bytes_read < bsize) { 2569 nmp->b_wptr = 2570 &nmp->b_rptr[l2_len - bytes_read]; 2571 } else { 2572 nmp->b_wptr = &nmp->b_rptr[bsize]; 2573 } 2574 } 2575 } 2576 } else { 2577 if (first_entry) { 2578 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2579 l2_len < bsize - skip_len ? 2580 l2_len : bsize - skip_len); 2581 } else { 2582 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2583 l2_len - bytes_read < bsize ? 2584 l2_len - bytes_read : bsize); 2585 } 2586 } 2587 if (nmp != NULL) { 2588 if (first_entry) { 2589 /* 2590 * Jumbo packets may be received with more than one 2591 * buffer, increment ipackets for the first entry only. 2592 */ 2593 rdc_stats->ipackets++; 2594 2595 /* Update ibytes for kstat. */ 2596 rdc_stats->ibytes += skip_len 2597 + l2_len < bsize ? l2_len : bsize; 2598 /* 2599 * Update the number of bytes read so far for the 2600 * current frame. 2601 */ 2602 bytes_read = nmp->b_wptr - nmp->b_rptr; 2603 } else { 2604 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2605 l2_len - bytes_read : bsize; 2606 bytes_read += nmp->b_wptr - nmp->b_rptr; 2607 } 2608 2609 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2610 "==> nxge_receive_packet after dupb: " 2611 "rbr consumed %d " 2612 "pktbufsz_type %d " 2613 "nmp $%p rptr $%p wptr $%p " 2614 "buf_offset %d bzise %d l2_len %d skip_len %d", 2615 rx_rbr_p->rbr_consumed, 2616 pktbufsz_type, 2617 nmp, nmp->b_rptr, nmp->b_wptr, 2618 buf_offset, bsize, l2_len, skip_len)); 2619 } else { 2620 cmn_err(CE_WARN, "!nxge_receive_packet: " 2621 "update stats (error)"); 2622 atomic_inc_32(&rx_msg_p->ref_cnt); 2623 if (buffer_free == B_TRUE) { 2624 rx_msg_p->free = B_TRUE; 2625 } 2626 MUTEX_EXIT(&rx_rbr_p->lock); 2627 MUTEX_EXIT(&rcr_p->lock); 2628 nxge_freeb(rx_msg_p); 2629 return; 2630 } 2631 2632 if (buffer_free == B_TRUE) { 2633 rx_msg_p->free = B_TRUE; 2634 } 2635 2636 is_valid = (nmp != NULL); 2637 2638 rcr_p->rcvd_pkt_bytes = bytes_read; 2639 2640 MUTEX_EXIT(&rx_rbr_p->lock); 2641 MUTEX_EXIT(&rcr_p->lock); 2642 2643 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2644 atomic_inc_32(&rx_msg_p->ref_cnt); 2645 nxge_freeb(rx_msg_p); 2646 } 2647 2648 if (is_valid) { 2649 nmp->b_cont = NULL; 2650 if (first_entry) { 2651 *mp = nmp; 2652 *mp_cont = NULL; 2653 } else { 2654 *mp_cont = nmp; 2655 } 2656 } 2657 2658 /* 2659 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2660 * If a packet is not fragmented and no error bit is set, then 2661 * L4 checksum is OK. 2662 */ 2663 2664 if (is_valid && !multi) { 2665 /* 2666 * Update hardware checksuming. 2667 * 2668 * If the checksum flag nxge_chksum_offload 2669 * is 1, TCP and UDP packets can be sent 2670 * up with good checksum. If the checksum flag 2671 * is set to 0, checksum reporting will apply to 2672 * TCP packets only (workaround for a hardware bug). 2673 * If the checksum flag nxge_cksum_offload is 2674 * greater than 1, both TCP and UDP packets 2675 * will not be reported its hardware checksum results. 2676 */ 2677 if (nxge_cksum_offload == 1) { 2678 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2679 pkt_type == RCR_PKT_IS_UDP) ? 2680 B_TRUE: B_FALSE); 2681 } else if (!nxge_cksum_offload) { 2682 /* TCP checksum only. */ 2683 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2684 B_TRUE: B_FALSE); 2685 } 2686 2687 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2688 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2689 is_valid, multi, is_tcp_udp, frag, error_type)); 2690 2691 if (is_tcp_udp && !frag && !error_type) { 2692 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2693 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2694 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2695 "==> nxge_receive_packet: Full tcp/udp cksum " 2696 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2697 "error %d", 2698 is_valid, multi, is_tcp_udp, frag, error_type)); 2699 } 2700 } 2701 2702 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2703 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2704 2705 *multi_p = (multi == RCR_MULTI_MASK); 2706 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2707 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2708 *multi_p, nmp, *mp, *mp_cont)); 2709 } 2710 2711 /*ARGSUSED*/ 2712 static nxge_status_t 2713 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2714 { 2715 p_nxge_rx_ring_stats_t rdc_stats; 2716 npi_handle_t handle; 2717 npi_status_t rs; 2718 boolean_t rxchan_fatal = B_FALSE; 2719 boolean_t rxport_fatal = B_FALSE; 2720 uint8_t portn; 2721 nxge_status_t status = NXGE_OK; 2722 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2723 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2724 2725 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2726 portn = nxgep->mac.portnum; 2727 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2728 2729 if (cs.bits.hdw.rbr_tmout) { 2730 rdc_stats->rx_rbr_tmout++; 2731 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2732 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2733 rxchan_fatal = B_TRUE; 2734 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2735 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2736 } 2737 if (cs.bits.hdw.rsp_cnt_err) { 2738 rdc_stats->rsp_cnt_err++; 2739 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2740 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2741 rxchan_fatal = B_TRUE; 2742 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2743 "==> nxge_rx_err_evnts(channel %d): " 2744 "rsp_cnt_err", channel)); 2745 } 2746 if (cs.bits.hdw.byte_en_bus) { 2747 rdc_stats->byte_en_bus++; 2748 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2749 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2750 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2751 "==> nxge_rx_err_evnts(channel %d): " 2752 "fatal error: byte_en_bus", channel)); 2753 rxchan_fatal = B_TRUE; 2754 } 2755 if (cs.bits.hdw.rsp_dat_err) { 2756 rdc_stats->rsp_dat_err++; 2757 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2758 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2759 rxchan_fatal = B_TRUE; 2760 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2761 "==> nxge_rx_err_evnts(channel %d): " 2762 "fatal error: rsp_dat_err", channel)); 2763 } 2764 if (cs.bits.hdw.rcr_ack_err) { 2765 rdc_stats->rcr_ack_err++; 2766 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2767 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2768 rxchan_fatal = B_TRUE; 2769 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2770 "==> nxge_rx_err_evnts(channel %d): " 2771 "fatal error: rcr_ack_err", channel)); 2772 } 2773 if (cs.bits.hdw.dc_fifo_err) { 2774 rdc_stats->dc_fifo_err++; 2775 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2776 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2777 /* This is not a fatal error! */ 2778 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2779 "==> nxge_rx_err_evnts(channel %d): " 2780 "dc_fifo_err", channel)); 2781 rxport_fatal = B_TRUE; 2782 } 2783 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2784 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2785 &rdc_stats->errlog.pre_par, 2786 &rdc_stats->errlog.sha_par)) 2787 != NPI_SUCCESS) { 2788 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2789 "==> nxge_rx_err_evnts(channel %d): " 2790 "rcr_sha_par: get perr", channel)); 2791 return (NXGE_ERROR | rs); 2792 } 2793 if (cs.bits.hdw.rcr_sha_par) { 2794 rdc_stats->rcr_sha_par++; 2795 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2796 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2797 rxchan_fatal = B_TRUE; 2798 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2799 "==> nxge_rx_err_evnts(channel %d): " 2800 "fatal error: rcr_sha_par", channel)); 2801 } 2802 if (cs.bits.hdw.rbr_pre_par) { 2803 rdc_stats->rbr_pre_par++; 2804 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2805 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2806 rxchan_fatal = B_TRUE; 2807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2808 "==> nxge_rx_err_evnts(channel %d): " 2809 "fatal error: rbr_pre_par", channel)); 2810 } 2811 } 2812 /* 2813 * The Following 4 status bits are for information, the system 2814 * is running fine. There is no need to send FMA ereports or 2815 * log messages. 2816 */ 2817 if (cs.bits.hdw.port_drop_pkt) { 2818 rdc_stats->port_drop_pkt++; 2819 } 2820 if (cs.bits.hdw.wred_drop) { 2821 rdc_stats->wred_drop++; 2822 } 2823 if (cs.bits.hdw.rbr_pre_empty) { 2824 rdc_stats->rbr_pre_empty++; 2825 } 2826 if (cs.bits.hdw.rcr_shadow_full) { 2827 rdc_stats->rcr_shadow_full++; 2828 } 2829 if (cs.bits.hdw.config_err) { 2830 rdc_stats->config_err++; 2831 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2832 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2833 rxchan_fatal = B_TRUE; 2834 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2835 "==> nxge_rx_err_evnts(channel %d): " 2836 "config error", channel)); 2837 } 2838 if (cs.bits.hdw.rcrincon) { 2839 rdc_stats->rcrincon++; 2840 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2841 NXGE_FM_EREPORT_RDMC_RCRINCON); 2842 rxchan_fatal = B_TRUE; 2843 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2844 "==> nxge_rx_err_evnts(channel %d): " 2845 "fatal error: rcrincon error", channel)); 2846 } 2847 if (cs.bits.hdw.rcrfull) { 2848 rdc_stats->rcrfull++; 2849 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2850 NXGE_FM_EREPORT_RDMC_RCRFULL); 2851 rxchan_fatal = B_TRUE; 2852 if (rdc_stats->rcrfull < error_disp_cnt) 2853 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2854 "==> nxge_rx_err_evnts(channel %d): " 2855 "fatal error: rcrfull error", channel)); 2856 } 2857 if (cs.bits.hdw.rbr_empty) { 2858 /* 2859 * This bit is for information, there is no need 2860 * send FMA ereport or log a message. 2861 */ 2862 rdc_stats->rbr_empty++; 2863 } 2864 if (cs.bits.hdw.rbrfull) { 2865 rdc_stats->rbrfull++; 2866 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2867 NXGE_FM_EREPORT_RDMC_RBRFULL); 2868 rxchan_fatal = B_TRUE; 2869 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2870 "==> nxge_rx_err_evnts(channel %d): " 2871 "fatal error: rbr_full error", channel)); 2872 } 2873 if (cs.bits.hdw.rbrlogpage) { 2874 rdc_stats->rbrlogpage++; 2875 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2876 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2877 rxchan_fatal = B_TRUE; 2878 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2879 "==> nxge_rx_err_evnts(channel %d): " 2880 "fatal error: rbr logical page error", channel)); 2881 } 2882 if (cs.bits.hdw.cfiglogpage) { 2883 rdc_stats->cfiglogpage++; 2884 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2885 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2886 rxchan_fatal = B_TRUE; 2887 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2888 "==> nxge_rx_err_evnts(channel %d): " 2889 "fatal error: cfig logical page error", channel)); 2890 } 2891 2892 if (rxport_fatal) { 2893 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2894 " nxge_rx_err_evnts: fatal error on Port #%d\n", 2895 portn)); 2896 if (isLDOMguest(nxgep)) { 2897 status = NXGE_ERROR; 2898 } else { 2899 status = nxge_ipp_fatal_err_recover(nxgep); 2900 if (status == NXGE_OK) { 2901 FM_SERVICE_RESTORED(nxgep); 2902 } 2903 } 2904 } 2905 2906 if (rxchan_fatal) { 2907 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2908 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 2909 channel)); 2910 if (isLDOMguest(nxgep)) { 2911 status = NXGE_ERROR; 2912 } else { 2913 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2914 if (status == NXGE_OK) { 2915 FM_SERVICE_RESTORED(nxgep); 2916 } 2917 } 2918 } 2919 2920 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2921 2922 return (status); 2923 } 2924 2925 /* 2926 * nxge_rdc_hvio_setup 2927 * 2928 * This code appears to setup some Hypervisor variables. 2929 * 2930 * Arguments: 2931 * nxgep 2932 * channel 2933 * 2934 * Notes: 2935 * What does NIU_LP_WORKAROUND mean? 2936 * 2937 * NPI/NXGE function calls: 2938 * na 2939 * 2940 * Context: 2941 * Any domain 2942 */ 2943 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2944 static void 2945 nxge_rdc_hvio_setup( 2946 nxge_t *nxgep, int channel) 2947 { 2948 nxge_dma_common_t *dma_common; 2949 nxge_dma_common_t *dma_control; 2950 rx_rbr_ring_t *ring; 2951 2952 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 2953 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2954 2955 ring->hv_set = B_FALSE; 2956 2957 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 2958 dma_common->orig_ioaddr_pp; 2959 ring->hv_rx_buf_ioaddr_size = (uint64_t) 2960 dma_common->orig_alength; 2961 2962 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2963 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 2964 channel, ring->hv_rx_buf_base_ioaddr_pp, 2965 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 2966 dma_common->orig_alength, dma_common->orig_alength)); 2967 2968 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2969 2970 ring->hv_rx_cntl_base_ioaddr_pp = 2971 (uint64_t)dma_control->orig_ioaddr_pp; 2972 ring->hv_rx_cntl_ioaddr_size = 2973 (uint64_t)dma_control->orig_alength; 2974 2975 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2976 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 2977 channel, ring->hv_rx_cntl_base_ioaddr_pp, 2978 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 2979 dma_control->orig_alength, dma_control->orig_alength)); 2980 } 2981 #endif 2982 2983 /* 2984 * nxge_map_rxdma 2985 * 2986 * Map an RDC into our kernel space. 2987 * 2988 * Arguments: 2989 * nxgep 2990 * channel The channel to map. 2991 * 2992 * Notes: 2993 * 1. Allocate & initialise a memory pool, if necessary. 2994 * 2. Allocate however many receive buffers are required. 2995 * 3. Setup buffers, descriptors, and mailbox. 2996 * 2997 * NPI/NXGE function calls: 2998 * nxge_alloc_rx_mem_pool() 2999 * nxge_alloc_rbb() 3000 * nxge_map_rxdma_channel() 3001 * 3002 * Registers accessed: 3003 * 3004 * Context: 3005 * Any domain 3006 */ 3007 static nxge_status_t 3008 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3009 { 3010 nxge_dma_common_t **data; 3011 nxge_dma_common_t **control; 3012 rx_rbr_ring_t **rbr_ring; 3013 rx_rcr_ring_t **rcr_ring; 3014 rx_mbox_t **mailbox; 3015 uint32_t chunks; 3016 3017 nxge_status_t status; 3018 3019 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3020 3021 if (!nxgep->rx_buf_pool_p) { 3022 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3023 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3024 "<== nxge_map_rxdma: buf not allocated")); 3025 return (NXGE_ERROR); 3026 } 3027 } 3028 3029 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3030 return (NXGE_ERROR); 3031 3032 /* 3033 * Timeout should be set based on the system clock divider. 3034 * The following timeout value of 1 assumes that the 3035 * granularity (1000) is 3 microseconds running at 300MHz. 3036 */ 3037 3038 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 3039 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 3040 3041 /* 3042 * Map descriptors from the buffer polls for each dma channel. 3043 */ 3044 3045 /* 3046 * Set up and prepare buffer blocks, descriptors 3047 * and mailbox. 3048 */ 3049 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3050 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3051 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3052 3053 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3054 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3055 3056 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3057 3058 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3059 chunks, control, rcr_ring, mailbox); 3060 if (status != NXGE_OK) { 3061 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3062 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3063 "returned 0x%x", 3064 channel, status)); 3065 return (status); 3066 } 3067 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3068 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3069 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3070 &nxgep->statsp->rdc_stats[channel]; 3071 3072 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3073 if (!isLDOMguest(nxgep)) 3074 nxge_rdc_hvio_setup(nxgep, channel); 3075 #endif 3076 3077 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3078 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3079 3080 return (status); 3081 } 3082 3083 static void 3084 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3085 { 3086 rx_rbr_ring_t *rbr_ring; 3087 rx_rcr_ring_t *rcr_ring; 3088 rx_mbox_t *mailbox; 3089 3090 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3091 3092 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3093 !nxgep->rx_mbox_areas_p) 3094 return; 3095 3096 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3097 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3098 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3099 3100 if (!rbr_ring || !rcr_ring || !mailbox) 3101 return; 3102 3103 (void) nxge_unmap_rxdma_channel( 3104 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3105 3106 nxge_free_rxb(nxgep, channel); 3107 3108 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3109 } 3110 3111 nxge_status_t 3112 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3113 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3114 uint32_t num_chunks, 3115 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3116 p_rx_mbox_t *rx_mbox_p) 3117 { 3118 int status = NXGE_OK; 3119 3120 /* 3121 * Set up and prepare buffer blocks, descriptors 3122 * and mailbox. 3123 */ 3124 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3125 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3126 /* 3127 * Receive buffer blocks 3128 */ 3129 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3130 dma_buf_p, rbr_p, num_chunks); 3131 if (status != NXGE_OK) { 3132 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3133 "==> nxge_map_rxdma_channel (channel %d): " 3134 "map buffer failed 0x%x", channel, status)); 3135 goto nxge_map_rxdma_channel_exit; 3136 } 3137 3138 /* 3139 * Receive block ring, completion ring and mailbox. 3140 */ 3141 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3142 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3143 if (status != NXGE_OK) { 3144 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3145 "==> nxge_map_rxdma_channel (channel %d): " 3146 "map config failed 0x%x", channel, status)); 3147 goto nxge_map_rxdma_channel_fail2; 3148 } 3149 3150 goto nxge_map_rxdma_channel_exit; 3151 3152 nxge_map_rxdma_channel_fail3: 3153 /* Free rbr, rcr */ 3154 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3155 "==> nxge_map_rxdma_channel: free rbr/rcr " 3156 "(status 0x%x channel %d)", 3157 status, channel)); 3158 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3159 *rcr_p, *rx_mbox_p); 3160 3161 nxge_map_rxdma_channel_fail2: 3162 /* Free buffer blocks */ 3163 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3164 "==> nxge_map_rxdma_channel: free rx buffers" 3165 "(nxgep 0x%x status 0x%x channel %d)", 3166 nxgep, status, channel)); 3167 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3168 3169 status = NXGE_ERROR; 3170 3171 nxge_map_rxdma_channel_exit: 3172 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3173 "<== nxge_map_rxdma_channel: " 3174 "(nxgep 0x%x status 0x%x channel %d)", 3175 nxgep, status, channel)); 3176 3177 return (status); 3178 } 3179 3180 /*ARGSUSED*/ 3181 static void 3182 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3183 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3184 { 3185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3186 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3187 3188 /* 3189 * unmap receive block ring, completion ring and mailbox. 3190 */ 3191 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3192 rcr_p, rx_mbox_p); 3193 3194 /* unmap buffer blocks */ 3195 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3196 3197 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3198 } 3199 3200 /*ARGSUSED*/ 3201 static nxge_status_t 3202 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3203 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3204 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3205 { 3206 p_rx_rbr_ring_t rbrp; 3207 p_rx_rcr_ring_t rcrp; 3208 p_rx_mbox_t mboxp; 3209 p_nxge_dma_common_t cntl_dmap; 3210 p_nxge_dma_common_t dmap; 3211 p_rx_msg_t *rx_msg_ring; 3212 p_rx_msg_t rx_msg_p; 3213 p_rbr_cfig_a_t rcfga_p; 3214 p_rbr_cfig_b_t rcfgb_p; 3215 p_rcrcfig_a_t cfga_p; 3216 p_rcrcfig_b_t cfgb_p; 3217 p_rxdma_cfig1_t cfig1_p; 3218 p_rxdma_cfig2_t cfig2_p; 3219 p_rbr_kick_t kick_p; 3220 uint32_t dmaaddrp; 3221 uint32_t *rbr_vaddrp; 3222 uint32_t bkaddr; 3223 nxge_status_t status = NXGE_OK; 3224 int i; 3225 uint32_t nxge_port_rcr_size; 3226 3227 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3228 "==> nxge_map_rxdma_channel_cfg_ring")); 3229 3230 cntl_dmap = *dma_cntl_p; 3231 3232 /* Map in the receive block ring */ 3233 rbrp = *rbr_p; 3234 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3235 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3236 /* 3237 * Zero out buffer block ring descriptors. 3238 */ 3239 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3240 3241 rcfga_p = &(rbrp->rbr_cfga); 3242 rcfgb_p = &(rbrp->rbr_cfgb); 3243 kick_p = &(rbrp->rbr_kick); 3244 rcfga_p->value = 0; 3245 rcfgb_p->value = 0; 3246 kick_p->value = 0; 3247 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3248 rcfga_p->value = (rbrp->rbr_addr & 3249 (RBR_CFIG_A_STDADDR_MASK | 3250 RBR_CFIG_A_STDADDR_BASE_MASK)); 3251 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3252 3253 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3254 rcfgb_p->bits.ldw.vld0 = 1; 3255 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3256 rcfgb_p->bits.ldw.vld1 = 1; 3257 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3258 rcfgb_p->bits.ldw.vld2 = 1; 3259 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3260 3261 /* 3262 * For each buffer block, enter receive block address to the ring. 3263 */ 3264 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3265 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3266 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3267 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3268 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3269 3270 rx_msg_ring = rbrp->rx_msg_ring; 3271 for (i = 0; i < rbrp->tnblocks; i++) { 3272 rx_msg_p = rx_msg_ring[i]; 3273 rx_msg_p->nxgep = nxgep; 3274 rx_msg_p->rx_rbr_p = rbrp; 3275 bkaddr = (uint32_t) 3276 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3277 >> RBR_BKADDR_SHIFT)); 3278 rx_msg_p->free = B_FALSE; 3279 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3280 3281 *rbr_vaddrp++ = bkaddr; 3282 } 3283 3284 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3285 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3286 3287 rbrp->rbr_rd_index = 0; 3288 3289 rbrp->rbr_consumed = 0; 3290 rbrp->rbr_use_bcopy = B_TRUE; 3291 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3292 /* 3293 * Do bcopy on packets greater than bcopy size once 3294 * the lo threshold is reached. 3295 * This lo threshold should be less than the hi threshold. 3296 * 3297 * Do bcopy on every packet once the hi threshold is reached. 3298 */ 3299 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3300 /* default it to use hi */ 3301 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3302 } 3303 3304 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3305 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3306 } 3307 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3308 3309 switch (nxge_rx_threshold_hi) { 3310 default: 3311 case NXGE_RX_COPY_NONE: 3312 /* Do not do bcopy at all */ 3313 rbrp->rbr_use_bcopy = B_FALSE; 3314 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3315 break; 3316 3317 case NXGE_RX_COPY_1: 3318 case NXGE_RX_COPY_2: 3319 case NXGE_RX_COPY_3: 3320 case NXGE_RX_COPY_4: 3321 case NXGE_RX_COPY_5: 3322 case NXGE_RX_COPY_6: 3323 case NXGE_RX_COPY_7: 3324 rbrp->rbr_threshold_hi = 3325 rbrp->rbb_max * 3326 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3327 break; 3328 3329 case NXGE_RX_COPY_ALL: 3330 rbrp->rbr_threshold_hi = 0; 3331 break; 3332 } 3333 3334 switch (nxge_rx_threshold_lo) { 3335 default: 3336 case NXGE_RX_COPY_NONE: 3337 /* Do not do bcopy at all */ 3338 if (rbrp->rbr_use_bcopy) { 3339 rbrp->rbr_use_bcopy = B_FALSE; 3340 } 3341 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3342 break; 3343 3344 case NXGE_RX_COPY_1: 3345 case NXGE_RX_COPY_2: 3346 case NXGE_RX_COPY_3: 3347 case NXGE_RX_COPY_4: 3348 case NXGE_RX_COPY_5: 3349 case NXGE_RX_COPY_6: 3350 case NXGE_RX_COPY_7: 3351 rbrp->rbr_threshold_lo = 3352 rbrp->rbb_max * 3353 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3354 break; 3355 3356 case NXGE_RX_COPY_ALL: 3357 rbrp->rbr_threshold_lo = 0; 3358 break; 3359 } 3360 3361 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3362 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3363 "rbb_max %d " 3364 "rbrp->rbr_bufsize_type %d " 3365 "rbb_threshold_hi %d " 3366 "rbb_threshold_lo %d", 3367 dma_channel, 3368 rbrp->rbb_max, 3369 rbrp->rbr_bufsize_type, 3370 rbrp->rbr_threshold_hi, 3371 rbrp->rbr_threshold_lo)); 3372 3373 rbrp->page_valid.value = 0; 3374 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3375 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3376 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3377 rbrp->page_hdl.value = 0; 3378 3379 rbrp->page_valid.bits.ldw.page0 = 1; 3380 rbrp->page_valid.bits.ldw.page1 = 1; 3381 3382 /* Map in the receive completion ring */ 3383 rcrp = (p_rx_rcr_ring_t) 3384 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3385 rcrp->rdc = dma_channel; 3386 3387 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3388 rcrp->comp_size = nxge_port_rcr_size; 3389 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3390 3391 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3392 3393 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3394 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3395 sizeof (rcr_entry_t)); 3396 rcrp->comp_rd_index = 0; 3397 rcrp->comp_wt_index = 0; 3398 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3399 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3400 #if defined(__i386) 3401 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3402 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3403 #else 3404 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3405 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3406 #endif 3407 3408 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3409 (nxge_port_rcr_size - 1); 3410 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3411 (nxge_port_rcr_size - 1); 3412 3413 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3414 "==> nxge_map_rxdma_channel_cfg_ring: " 3415 "channel %d " 3416 "rbr_vaddrp $%p " 3417 "rcr_desc_rd_head_p $%p " 3418 "rcr_desc_rd_head_pp $%p " 3419 "rcr_desc_rd_last_p $%p " 3420 "rcr_desc_rd_last_pp $%p ", 3421 dma_channel, 3422 rbr_vaddrp, 3423 rcrp->rcr_desc_rd_head_p, 3424 rcrp->rcr_desc_rd_head_pp, 3425 rcrp->rcr_desc_last_p, 3426 rcrp->rcr_desc_last_pp)); 3427 3428 /* 3429 * Zero out buffer block ring descriptors. 3430 */ 3431 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3432 rcrp->intr_timeout = nxgep->intr_timeout; 3433 rcrp->intr_threshold = nxgep->intr_threshold; 3434 rcrp->full_hdr_flag = B_FALSE; 3435 rcrp->sw_priv_hdr_len = 0; 3436 3437 cfga_p = &(rcrp->rcr_cfga); 3438 cfgb_p = &(rcrp->rcr_cfgb); 3439 cfga_p->value = 0; 3440 cfgb_p->value = 0; 3441 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3442 cfga_p->value = (rcrp->rcr_addr & 3443 (RCRCFIG_A_STADDR_MASK | 3444 RCRCFIG_A_STADDR_BASE_MASK)); 3445 3446 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3447 RCRCFIG_A_LEN_SHIF); 3448 3449 /* 3450 * Timeout should be set based on the system clock divider. 3451 * The following timeout value of 1 assumes that the 3452 * granularity (1000) is 3 microseconds running at 300MHz. 3453 */ 3454 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3455 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3456 cfgb_p->bits.ldw.entout = 1; 3457 3458 /* Map in the mailbox */ 3459 mboxp = (p_rx_mbox_t) 3460 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3461 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3462 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3463 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3464 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3465 cfig1_p->value = cfig2_p->value = 0; 3466 3467 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3468 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3469 "==> nxge_map_rxdma_channel_cfg_ring: " 3470 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3471 dma_channel, cfig1_p->value, cfig2_p->value, 3472 mboxp->mbox_addr)); 3473 3474 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3475 & 0xfff); 3476 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3477 3478 3479 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3480 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3481 RXDMA_CFIG2_MBADDR_L_MASK); 3482 3483 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3484 3485 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3486 "==> nxge_map_rxdma_channel_cfg_ring: " 3487 "channel %d damaddrp $%p " 3488 "cfg1 0x%016llx cfig2 0x%016llx", 3489 dma_channel, dmaaddrp, 3490 cfig1_p->value, cfig2_p->value)); 3491 3492 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3493 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3494 3495 rbrp->rx_rcr_p = rcrp; 3496 rcrp->rx_rbr_p = rbrp; 3497 *rcr_p = rcrp; 3498 *rx_mbox_p = mboxp; 3499 3500 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3501 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3502 3503 return (status); 3504 } 3505 3506 /*ARGSUSED*/ 3507 static void 3508 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3509 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3510 { 3511 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3512 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3513 rcr_p->rdc)); 3514 3515 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3516 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3517 3518 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3519 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3520 } 3521 3522 static nxge_status_t 3523 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3524 p_nxge_dma_common_t *dma_buf_p, 3525 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3526 { 3527 p_rx_rbr_ring_t rbrp; 3528 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3529 p_rx_msg_t *rx_msg_ring; 3530 p_rx_msg_t rx_msg_p; 3531 p_mblk_t mblk_p; 3532 3533 rxring_info_t *ring_info; 3534 nxge_status_t status = NXGE_OK; 3535 int i, j, index; 3536 uint32_t size, bsize, nblocks, nmsgs; 3537 3538 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3539 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3540 channel)); 3541 3542 dma_bufp = tmp_bufp = *dma_buf_p; 3543 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3544 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3545 "chunks bufp 0x%016llx", 3546 channel, num_chunks, dma_bufp)); 3547 3548 nmsgs = 0; 3549 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3550 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3551 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3552 "bufp 0x%016llx nblocks %d nmsgs %d", 3553 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3554 nmsgs += tmp_bufp->nblocks; 3555 } 3556 if (!nmsgs) { 3557 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3558 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3559 "no msg blocks", 3560 channel)); 3561 status = NXGE_ERROR; 3562 goto nxge_map_rxdma_channel_buf_ring_exit; 3563 } 3564 3565 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3566 3567 size = nmsgs * sizeof (p_rx_msg_t); 3568 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3569 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3570 KM_SLEEP); 3571 3572 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3573 (void *)nxgep->interrupt_cookie); 3574 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3575 (void *)nxgep->interrupt_cookie); 3576 rbrp->rdc = channel; 3577 rbrp->num_blocks = num_chunks; 3578 rbrp->tnblocks = nmsgs; 3579 rbrp->rbb_max = nmsgs; 3580 rbrp->rbr_max_size = nmsgs; 3581 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3582 3583 /* 3584 * Buffer sizes suggested by NIU architect. 3585 * 256, 512 and 2K. 3586 */ 3587 3588 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3589 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3590 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3591 3592 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3593 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3594 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3595 3596 rbrp->block_size = nxgep->rx_default_block_size; 3597 3598 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3599 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3600 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3601 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3602 } else { 3603 if (rbrp->block_size >= 0x2000) { 3604 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3605 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3606 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3607 } else { 3608 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3609 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3610 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3611 } 3612 } 3613 3614 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3615 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3616 "actual rbr max %d rbb_max %d nmsgs %d " 3617 "rbrp->block_size %d default_block_size %d " 3618 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3619 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3620 rbrp->block_size, nxgep->rx_default_block_size, 3621 nxge_rbr_size, nxge_rbr_spare_size)); 3622 3623 /* Map in buffers from the buffer pool. */ 3624 index = 0; 3625 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3626 bsize = dma_bufp->block_size; 3627 nblocks = dma_bufp->nblocks; 3628 #if defined(__i386) 3629 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3630 #else 3631 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3632 #endif 3633 ring_info->buffer[i].buf_index = i; 3634 ring_info->buffer[i].buf_size = dma_bufp->alength; 3635 ring_info->buffer[i].start_index = index; 3636 #if defined(__i386) 3637 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3638 #else 3639 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3640 #endif 3641 3642 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3643 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3644 "chunk %d" 3645 " nblocks %d chunk_size %x block_size 0x%x " 3646 "dma_bufp $%p", channel, i, 3647 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3648 dma_bufp)); 3649 3650 for (j = 0; j < nblocks; j++) { 3651 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3652 dma_bufp)) == NULL) { 3653 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3654 "allocb failed (index %d i %d j %d)", 3655 index, i, j)); 3656 goto nxge_map_rxdma_channel_buf_ring_fail1; 3657 } 3658 rx_msg_ring[index] = rx_msg_p; 3659 rx_msg_p->block_index = index; 3660 rx_msg_p->shifted_addr = (uint32_t) 3661 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3662 RBR_BKADDR_SHIFT)); 3663 3664 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3665 "index %d j %d rx_msg_p $%p mblk %p", 3666 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3667 3668 mblk_p = rx_msg_p->rx_mblk_p; 3669 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3670 3671 rbrp->rbr_ref_cnt++; 3672 index++; 3673 rx_msg_p->buf_dma.dma_channel = channel; 3674 } 3675 3676 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3677 if (dma_bufp->contig_alloc_type) { 3678 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3679 } 3680 3681 if (dma_bufp->kmem_alloc_type) { 3682 rbrp->rbr_alloc_type = KMEM_ALLOC; 3683 } 3684 3685 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3686 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3687 "chunk %d" 3688 " nblocks %d chunk_size %x block_size 0x%x " 3689 "dma_bufp $%p", 3690 channel, i, 3691 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3692 dma_bufp)); 3693 } 3694 if (i < rbrp->num_blocks) { 3695 goto nxge_map_rxdma_channel_buf_ring_fail1; 3696 } 3697 3698 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3699 "nxge_map_rxdma_channel_buf_ring: done buf init " 3700 "channel %d msg block entries %d", 3701 channel, index)); 3702 ring_info->block_size_mask = bsize - 1; 3703 rbrp->rx_msg_ring = rx_msg_ring; 3704 rbrp->dma_bufp = dma_buf_p; 3705 rbrp->ring_info = ring_info; 3706 3707 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3708 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3709 " nxge_map_rxdma_channel_buf_ring: " 3710 "channel %d done buf info init", channel)); 3711 3712 /* 3713 * Finally, permit nxge_freeb() to call nxge_post_page(). 3714 */ 3715 rbrp->rbr_state = RBR_POSTING; 3716 3717 *rbr_p = rbrp; 3718 goto nxge_map_rxdma_channel_buf_ring_exit; 3719 3720 nxge_map_rxdma_channel_buf_ring_fail1: 3721 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3722 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3723 channel, status)); 3724 3725 index--; 3726 for (; index >= 0; index--) { 3727 rx_msg_p = rx_msg_ring[index]; 3728 if (rx_msg_p != NULL) { 3729 freeb(rx_msg_p->rx_mblk_p); 3730 rx_msg_ring[index] = NULL; 3731 } 3732 } 3733 nxge_map_rxdma_channel_buf_ring_fail: 3734 MUTEX_DESTROY(&rbrp->post_lock); 3735 MUTEX_DESTROY(&rbrp->lock); 3736 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3737 KMEM_FREE(rx_msg_ring, size); 3738 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3739 3740 status = NXGE_ERROR; 3741 3742 nxge_map_rxdma_channel_buf_ring_exit: 3743 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3744 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3745 3746 return (status); 3747 } 3748 3749 /*ARGSUSED*/ 3750 static void 3751 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3752 p_rx_rbr_ring_t rbr_p) 3753 { 3754 p_rx_msg_t *rx_msg_ring; 3755 p_rx_msg_t rx_msg_p; 3756 rxring_info_t *ring_info; 3757 int i; 3758 uint32_t size; 3759 #ifdef NXGE_DEBUG 3760 int num_chunks; 3761 #endif 3762 3763 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3764 "==> nxge_unmap_rxdma_channel_buf_ring")); 3765 if (rbr_p == NULL) { 3766 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3767 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3768 return; 3769 } 3770 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3771 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3772 rbr_p->rdc)); 3773 3774 rx_msg_ring = rbr_p->rx_msg_ring; 3775 ring_info = rbr_p->ring_info; 3776 3777 if (rx_msg_ring == NULL || ring_info == NULL) { 3778 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3779 "<== nxge_unmap_rxdma_channel_buf_ring: " 3780 "rx_msg_ring $%p ring_info $%p", 3781 rx_msg_p, ring_info)); 3782 return; 3783 } 3784 3785 #ifdef NXGE_DEBUG 3786 num_chunks = rbr_p->num_blocks; 3787 #endif 3788 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3789 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3790 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3791 "tnblocks %d (max %d) size ptrs %d ", 3792 rbr_p->rdc, num_chunks, 3793 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3794 3795 for (i = 0; i < rbr_p->tnblocks; i++) { 3796 rx_msg_p = rx_msg_ring[i]; 3797 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3798 " nxge_unmap_rxdma_channel_buf_ring: " 3799 "rx_msg_p $%p", 3800 rx_msg_p)); 3801 if (rx_msg_p != NULL) { 3802 freeb(rx_msg_p->rx_mblk_p); 3803 rx_msg_ring[i] = NULL; 3804 } 3805 } 3806 3807 /* 3808 * We no longer may use the mutex <post_lock>. By setting 3809 * <rbr_state> to anything but POSTING, we prevent 3810 * nxge_post_page() from accessing a dead mutex. 3811 */ 3812 rbr_p->rbr_state = RBR_UNMAPPING; 3813 MUTEX_DESTROY(&rbr_p->post_lock); 3814 3815 MUTEX_DESTROY(&rbr_p->lock); 3816 3817 if (rbr_p->rbr_ref_cnt == 0) { 3818 /* 3819 * This is the normal state of affairs. 3820 * Need to free the following buffers: 3821 * - data buffers 3822 * - rx_msg ring 3823 * - ring_info 3824 * - rbr ring 3825 */ 3826 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3827 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 3828 nxge_rxdma_databuf_free(rbr_p); 3829 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3830 KMEM_FREE(rx_msg_ring, size); 3831 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3832 } else { 3833 /* 3834 * Some of our buffers are still being used. 3835 * Therefore, tell nxge_freeb() this ring is 3836 * unmapped, so it may free <rbr_p> for us. 3837 */ 3838 rbr_p->rbr_state = RBR_UNMAPPED; 3839 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3840 "unmap_rxdma_buf_ring: %d %s outstanding.", 3841 rbr_p->rbr_ref_cnt, 3842 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3843 } 3844 3845 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3846 "<== nxge_unmap_rxdma_channel_buf_ring")); 3847 } 3848 3849 /* 3850 * nxge_rxdma_hw_start_common 3851 * 3852 * Arguments: 3853 * nxgep 3854 * 3855 * Notes: 3856 * 3857 * NPI/NXGE function calls: 3858 * nxge_init_fzc_rx_common(); 3859 * nxge_init_fzc_rxdma_port(); 3860 * 3861 * Registers accessed: 3862 * 3863 * Context: 3864 * Service domain 3865 */ 3866 static nxge_status_t 3867 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3868 { 3869 nxge_status_t status = NXGE_OK; 3870 3871 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3872 3873 /* 3874 * Load the sharable parameters by writing to the 3875 * function zero control registers. These FZC registers 3876 * should be initialized only once for the entire chip. 3877 */ 3878 (void) nxge_init_fzc_rx_common(nxgep); 3879 3880 /* 3881 * Initialize the RXDMA port specific FZC control configurations. 3882 * These FZC registers are pertaining to each port. 3883 */ 3884 (void) nxge_init_fzc_rxdma_port(nxgep); 3885 3886 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3887 3888 return (status); 3889 } 3890 3891 static nxge_status_t 3892 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 3893 { 3894 int i, ndmas; 3895 p_rx_rbr_rings_t rx_rbr_rings; 3896 p_rx_rbr_ring_t *rbr_rings; 3897 p_rx_rcr_rings_t rx_rcr_rings; 3898 p_rx_rcr_ring_t *rcr_rings; 3899 p_rx_mbox_areas_t rx_mbox_areas_p; 3900 p_rx_mbox_t *rx_mbox_p; 3901 nxge_status_t status = NXGE_OK; 3902 3903 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3904 3905 rx_rbr_rings = nxgep->rx_rbr_rings; 3906 rx_rcr_rings = nxgep->rx_rcr_rings; 3907 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3908 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3909 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3910 return (NXGE_ERROR); 3911 } 3912 ndmas = rx_rbr_rings->ndmas; 3913 if (ndmas == 0) { 3914 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3915 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3916 return (NXGE_ERROR); 3917 } 3918 3919 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3920 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3921 3922 rbr_rings = rx_rbr_rings->rbr_rings; 3923 rcr_rings = rx_rcr_rings->rcr_rings; 3924 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3925 if (rx_mbox_areas_p) { 3926 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3927 } 3928 3929 i = channel; 3930 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3931 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3932 ndmas, channel)); 3933 status = nxge_rxdma_start_channel(nxgep, channel, 3934 (p_rx_rbr_ring_t)rbr_rings[i], 3935 (p_rx_rcr_ring_t)rcr_rings[i], 3936 (p_rx_mbox_t)rx_mbox_p[i]); 3937 if (status != NXGE_OK) { 3938 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3939 "==> nxge_rxdma_hw_start: disable " 3940 "(status 0x%x channel %d)", status, channel)); 3941 return (status); 3942 } 3943 3944 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3945 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3946 rx_rbr_rings, rx_rcr_rings)); 3947 3948 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3949 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3950 3951 return (status); 3952 } 3953 3954 static void 3955 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 3956 { 3957 p_rx_rbr_rings_t rx_rbr_rings; 3958 p_rx_rcr_rings_t rx_rcr_rings; 3959 3960 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3961 3962 rx_rbr_rings = nxgep->rx_rbr_rings; 3963 rx_rcr_rings = nxgep->rx_rcr_rings; 3964 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3965 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3966 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3967 return; 3968 } 3969 3970 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3971 "==> nxge_rxdma_hw_stop(channel %d)", 3972 channel)); 3973 (void) nxge_rxdma_stop_channel(nxgep, channel); 3974 3975 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3976 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3977 rx_rbr_rings, rx_rcr_rings)); 3978 3979 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3980 } 3981 3982 3983 static nxge_status_t 3984 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3985 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3986 3987 { 3988 npi_handle_t handle; 3989 npi_status_t rs = NPI_SUCCESS; 3990 rx_dma_ctl_stat_t cs; 3991 rx_dma_ent_msk_t ent_mask; 3992 nxge_status_t status = NXGE_OK; 3993 3994 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3995 3996 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3997 3998 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3999 "npi handle addr $%p acc $%p", 4000 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4001 4002 /* Reset RXDMA channel, but not if you're a guest. */ 4003 if (!isLDOMguest(nxgep)) { 4004 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4005 if (rs != NPI_SUCCESS) { 4006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4007 "==> nxge_init_fzc_rdc: " 4008 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4009 channel, rs)); 4010 return (NXGE_ERROR | rs); 4011 } 4012 4013 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4014 "==> nxge_rxdma_start_channel: reset done: channel %d", 4015 channel)); 4016 } 4017 4018 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4019 if (isLDOMguest(nxgep)) 4020 (void) nxge_rdc_lp_conf(nxgep, channel); 4021 #endif 4022 4023 /* 4024 * Initialize the RXDMA channel specific FZC control 4025 * configurations. These FZC registers are pertaining 4026 * to each RX channel (logical pages). 4027 */ 4028 if (!isLDOMguest(nxgep)) { 4029 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4030 if (status != NXGE_OK) { 4031 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4032 "==> nxge_rxdma_start_channel: " 4033 "init fzc rxdma failed (0x%08x channel %d)", 4034 status, channel)); 4035 return (status); 4036 } 4037 4038 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4039 "==> nxge_rxdma_start_channel: fzc done")); 4040 } 4041 4042 /* Set up the interrupt event masks. */ 4043 ent_mask.value = 0; 4044 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4045 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4046 &ent_mask); 4047 if (rs != NPI_SUCCESS) { 4048 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4049 "==> nxge_rxdma_start_channel: " 4050 "init rxdma event masks failed " 4051 "(0x%08x channel %d)", 4052 status, channel)); 4053 return (NXGE_ERROR | rs); 4054 } 4055 4056 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4057 "==> nxge_rxdma_start_channel: " 4058 "event done: channel %d (mask 0x%016llx)", 4059 channel, ent_mask.value)); 4060 4061 /* Initialize the receive DMA control and status register */ 4062 cs.value = 0; 4063 cs.bits.hdw.mex = 1; 4064 cs.bits.hdw.rcrthres = 1; 4065 cs.bits.hdw.rcrto = 1; 4066 cs.bits.hdw.rbr_empty = 1; 4067 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4068 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4069 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4070 if (status != NXGE_OK) { 4071 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4072 "==> nxge_rxdma_start_channel: " 4073 "init rxdma control register failed (0x%08x channel %d", 4074 status, channel)); 4075 return (status); 4076 } 4077 4078 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4079 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4080 4081 /* 4082 * Load RXDMA descriptors, buffers, mailbox, 4083 * initialise the receive DMA channels and 4084 * enable each DMA channel. 4085 */ 4086 status = nxge_enable_rxdma_channel(nxgep, 4087 channel, rbr_p, rcr_p, mbox_p); 4088 4089 if (status != NXGE_OK) { 4090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4091 " nxge_rxdma_start_channel: " 4092 " enable rxdma failed (0x%08x channel %d)", 4093 status, channel)); 4094 return (status); 4095 } 4096 4097 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4098 "==> nxge_rxdma_start_channel: enabled channel %d")); 4099 4100 if (isLDOMguest(nxgep)) { 4101 /* Add interrupt handler for this channel. */ 4102 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4103 != NXGE_OK) { 4104 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4105 " nxge_rxdma_start_channel: " 4106 " nxge_hio_intr_add failed (0x%08x channel %d)", 4107 status, channel)); 4108 } 4109 } 4110 4111 ent_mask.value = 0; 4112 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4113 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4114 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4115 &ent_mask); 4116 if (rs != NPI_SUCCESS) { 4117 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4118 "==> nxge_rxdma_start_channel: " 4119 "init rxdma event masks failed (0x%08x channel %d)", 4120 status, channel)); 4121 return (NXGE_ERROR | rs); 4122 } 4123 4124 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4125 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4126 4127 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4128 4129 return (NXGE_OK); 4130 } 4131 4132 static nxge_status_t 4133 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4134 { 4135 npi_handle_t handle; 4136 npi_status_t rs = NPI_SUCCESS; 4137 rx_dma_ctl_stat_t cs; 4138 rx_dma_ent_msk_t ent_mask; 4139 nxge_status_t status = NXGE_OK; 4140 4141 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4142 4143 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4144 4145 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4146 "npi handle addr $%p acc $%p", 4147 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4148 4149 /* Reset RXDMA channel */ 4150 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4151 if (rs != NPI_SUCCESS) { 4152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4153 " nxge_rxdma_stop_channel: " 4154 " reset rxdma failed (0x%08x channel %d)", 4155 rs, channel)); 4156 return (NXGE_ERROR | rs); 4157 } 4158 4159 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4160 "==> nxge_rxdma_stop_channel: reset done")); 4161 4162 /* Set up the interrupt event masks. */ 4163 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4164 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4165 &ent_mask); 4166 if (rs != NPI_SUCCESS) { 4167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4168 "==> nxge_rxdma_stop_channel: " 4169 "set rxdma event masks failed (0x%08x channel %d)", 4170 rs, channel)); 4171 return (NXGE_ERROR | rs); 4172 } 4173 4174 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4175 "==> nxge_rxdma_stop_channel: event done")); 4176 4177 /* Initialize the receive DMA control and status register */ 4178 cs.value = 0; 4179 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4180 &cs); 4181 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4182 " to default (all 0s) 0x%08x", cs.value)); 4183 if (status != NXGE_OK) { 4184 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4185 " nxge_rxdma_stop_channel: init rxdma" 4186 " control register failed (0x%08x channel %d", 4187 status, channel)); 4188 return (status); 4189 } 4190 4191 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4192 "==> nxge_rxdma_stop_channel: control done")); 4193 4194 /* disable dma channel */ 4195 status = nxge_disable_rxdma_channel(nxgep, channel); 4196 4197 if (status != NXGE_OK) { 4198 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4199 " nxge_rxdma_stop_channel: " 4200 " init enable rxdma failed (0x%08x channel %d)", 4201 status, channel)); 4202 return (status); 4203 } 4204 4205 NXGE_DEBUG_MSG((nxgep, 4206 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4207 4208 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4209 4210 return (NXGE_OK); 4211 } 4212 4213 nxge_status_t 4214 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4215 { 4216 npi_handle_t handle; 4217 p_nxge_rdc_sys_stats_t statsp; 4218 rx_ctl_dat_fifo_stat_t stat; 4219 uint32_t zcp_err_status; 4220 uint32_t ipp_err_status; 4221 nxge_status_t status = NXGE_OK; 4222 npi_status_t rs = NPI_SUCCESS; 4223 boolean_t my_err = B_FALSE; 4224 4225 handle = nxgep->npi_handle; 4226 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4227 4228 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4229 4230 if (rs != NPI_SUCCESS) 4231 return (NXGE_ERROR | rs); 4232 4233 if (stat.bits.ldw.id_mismatch) { 4234 statsp->id_mismatch++; 4235 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4236 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4237 /* Global fatal error encountered */ 4238 } 4239 4240 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4241 switch (nxgep->mac.portnum) { 4242 case 0: 4243 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4244 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4245 my_err = B_TRUE; 4246 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4247 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4248 } 4249 break; 4250 case 1: 4251 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4252 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4253 my_err = B_TRUE; 4254 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4255 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4256 } 4257 break; 4258 case 2: 4259 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4260 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4261 my_err = B_TRUE; 4262 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4263 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4264 } 4265 break; 4266 case 3: 4267 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4268 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4269 my_err = B_TRUE; 4270 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4271 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4272 } 4273 break; 4274 default: 4275 return (NXGE_ERROR); 4276 } 4277 } 4278 4279 if (my_err) { 4280 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4281 zcp_err_status); 4282 if (status != NXGE_OK) 4283 return (status); 4284 } 4285 4286 return (NXGE_OK); 4287 } 4288 4289 static nxge_status_t 4290 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4291 uint32_t zcp_status) 4292 { 4293 boolean_t rxport_fatal = B_FALSE; 4294 p_nxge_rdc_sys_stats_t statsp; 4295 nxge_status_t status = NXGE_OK; 4296 uint8_t portn; 4297 4298 portn = nxgep->mac.portnum; 4299 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4300 4301 if (ipp_status & (0x1 << portn)) { 4302 statsp->ipp_eop_err++; 4303 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4304 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4305 rxport_fatal = B_TRUE; 4306 } 4307 4308 if (zcp_status & (0x1 << portn)) { 4309 statsp->zcp_eop_err++; 4310 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4311 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4312 rxport_fatal = B_TRUE; 4313 } 4314 4315 if (rxport_fatal) { 4316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4317 " nxge_rxdma_handle_port_error: " 4318 " fatal error on Port #%d\n", 4319 portn)); 4320 status = nxge_rx_port_fatal_err_recover(nxgep); 4321 if (status == NXGE_OK) { 4322 FM_SERVICE_RESTORED(nxgep); 4323 } 4324 } 4325 4326 return (status); 4327 } 4328 4329 static nxge_status_t 4330 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4331 { 4332 npi_handle_t handle; 4333 npi_status_t rs = NPI_SUCCESS; 4334 nxge_status_t status = NXGE_OK; 4335 p_rx_rbr_ring_t rbrp; 4336 p_rx_rcr_ring_t rcrp; 4337 p_rx_mbox_t mboxp; 4338 rx_dma_ent_msk_t ent_mask; 4339 p_nxge_dma_common_t dmap; 4340 int ring_idx; 4341 uint32_t ref_cnt; 4342 p_rx_msg_t rx_msg_p; 4343 int i; 4344 uint32_t nxge_port_rcr_size; 4345 4346 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4347 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4348 "Recovering from RxDMAChannel#%d error...", channel)); 4349 4350 /* 4351 * Stop the dma channel waits for the stop done. 4352 * If the stop done bit is not set, then create 4353 * an error. 4354 */ 4355 4356 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4357 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4358 4359 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4360 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4361 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4362 4363 MUTEX_ENTER(&rcrp->lock); 4364 MUTEX_ENTER(&rbrp->lock); 4365 MUTEX_ENTER(&rbrp->post_lock); 4366 4367 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4368 4369 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4370 if (rs != NPI_SUCCESS) { 4371 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4372 "nxge_disable_rxdma_channel:failed")); 4373 goto fail; 4374 } 4375 4376 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4377 4378 /* Disable interrupt */ 4379 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4380 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4381 if (rs != NPI_SUCCESS) { 4382 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4383 "nxge_rxdma_stop_channel: " 4384 "set rxdma event masks failed (channel %d)", 4385 channel)); 4386 } 4387 4388 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4389 4390 /* Reset RXDMA channel */ 4391 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4392 if (rs != NPI_SUCCESS) { 4393 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4394 "nxge_rxdma_fatal_err_recover: " 4395 " reset rxdma failed (channel %d)", channel)); 4396 goto fail; 4397 } 4398 4399 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4400 4401 mboxp = 4402 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4403 4404 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4405 rbrp->rbr_rd_index = 0; 4406 4407 rcrp->comp_rd_index = 0; 4408 rcrp->comp_wt_index = 0; 4409 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4410 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4411 #if defined(__i386) 4412 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4413 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4414 #else 4415 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4416 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4417 #endif 4418 4419 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4420 (nxge_port_rcr_size - 1); 4421 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4422 (nxge_port_rcr_size - 1); 4423 4424 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4425 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4426 4427 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4428 4429 for (i = 0; i < rbrp->rbr_max_size; i++) { 4430 rx_msg_p = rbrp->rx_msg_ring[i]; 4431 ref_cnt = rx_msg_p->ref_cnt; 4432 if (ref_cnt != 1) { 4433 if (rx_msg_p->cur_usage_cnt != 4434 rx_msg_p->max_usage_cnt) { 4435 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4436 "buf[%d]: cur_usage_cnt = %d " 4437 "max_usage_cnt = %d\n", i, 4438 rx_msg_p->cur_usage_cnt, 4439 rx_msg_p->max_usage_cnt)); 4440 } else { 4441 /* Buffer can be re-posted */ 4442 rx_msg_p->free = B_TRUE; 4443 rx_msg_p->cur_usage_cnt = 0; 4444 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4445 rx_msg_p->pkt_buf_size = 0; 4446 } 4447 } 4448 } 4449 4450 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4451 4452 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4453 if (status != NXGE_OK) { 4454 goto fail; 4455 } 4456 4457 MUTEX_EXIT(&rbrp->post_lock); 4458 MUTEX_EXIT(&rbrp->lock); 4459 MUTEX_EXIT(&rcrp->lock); 4460 4461 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4462 "Recovery Successful, RxDMAChannel#%d Restored", 4463 channel)); 4464 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4465 4466 return (NXGE_OK); 4467 fail: 4468 MUTEX_EXIT(&rbrp->post_lock); 4469 MUTEX_EXIT(&rbrp->lock); 4470 MUTEX_EXIT(&rcrp->lock); 4471 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4472 4473 return (NXGE_ERROR | rs); 4474 } 4475 4476 nxge_status_t 4477 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4478 { 4479 nxge_grp_set_t *set = &nxgep->rx_set; 4480 nxge_status_t status = NXGE_OK; 4481 int rdc; 4482 4483 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4484 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4485 "Recovering from RxPort error...")); 4486 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4487 4488 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4489 goto fail; 4490 4491 NXGE_DELAY(1000); 4492 4493 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4494 4495 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4496 if ((1 << rdc) & set->owned.map) { 4497 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4498 != NXGE_OK) { 4499 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4500 "Could not recover channel %d", rdc)); 4501 } 4502 } 4503 } 4504 4505 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4506 4507 /* Reset IPP */ 4508 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4509 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4510 "nxge_rx_port_fatal_err_recover: " 4511 "Failed to reset IPP")); 4512 goto fail; 4513 } 4514 4515 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4516 4517 /* Reset RxMAC */ 4518 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4519 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4520 "nxge_rx_port_fatal_err_recover: " 4521 "Failed to reset RxMAC")); 4522 goto fail; 4523 } 4524 4525 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4526 4527 /* Re-Initialize IPP */ 4528 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4529 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4530 "nxge_rx_port_fatal_err_recover: " 4531 "Failed to init IPP")); 4532 goto fail; 4533 } 4534 4535 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4536 4537 /* Re-Initialize RxMAC */ 4538 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4539 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4540 "nxge_rx_port_fatal_err_recover: " 4541 "Failed to reset RxMAC")); 4542 goto fail; 4543 } 4544 4545 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4546 4547 /* Re-enable RxMAC */ 4548 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4549 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4550 "nxge_rx_port_fatal_err_recover: " 4551 "Failed to enable RxMAC")); 4552 goto fail; 4553 } 4554 4555 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4556 "Recovery Successful, RxPort Restored")); 4557 4558 return (NXGE_OK); 4559 fail: 4560 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4561 return (status); 4562 } 4563 4564 void 4565 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4566 { 4567 rx_dma_ctl_stat_t cs; 4568 rx_ctl_dat_fifo_stat_t cdfs; 4569 4570 switch (err_id) { 4571 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4572 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4573 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4574 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4575 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4576 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4577 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4578 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4579 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4580 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4581 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4582 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4583 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4584 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4585 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4586 chan, &cs.value); 4587 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4588 cs.bits.hdw.rcr_ack_err = 1; 4589 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4590 cs.bits.hdw.dc_fifo_err = 1; 4591 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4592 cs.bits.hdw.rcr_sha_par = 1; 4593 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4594 cs.bits.hdw.rbr_pre_par = 1; 4595 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4596 cs.bits.hdw.rbr_tmout = 1; 4597 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4598 cs.bits.hdw.rsp_cnt_err = 1; 4599 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4600 cs.bits.hdw.byte_en_bus = 1; 4601 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4602 cs.bits.hdw.rsp_dat_err = 1; 4603 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4604 cs.bits.hdw.config_err = 1; 4605 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4606 cs.bits.hdw.rcrincon = 1; 4607 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4608 cs.bits.hdw.rcrfull = 1; 4609 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4610 cs.bits.hdw.rbrfull = 1; 4611 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4612 cs.bits.hdw.rbrlogpage = 1; 4613 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4614 cs.bits.hdw.cfiglogpage = 1; 4615 #if defined(__i386) 4616 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4617 cs.value); 4618 #else 4619 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4620 cs.value); 4621 #endif 4622 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4623 chan, cs.value); 4624 break; 4625 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4626 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4627 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4628 cdfs.value = 0; 4629 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4630 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4631 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4632 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4633 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4634 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4635 #if defined(__i386) 4636 cmn_err(CE_NOTE, 4637 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4638 cdfs.value); 4639 #else 4640 cmn_err(CE_NOTE, 4641 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4642 cdfs.value); 4643 #endif 4644 NXGE_REG_WR64(nxgep->npi_handle, 4645 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4646 break; 4647 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4648 break; 4649 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4650 break; 4651 } 4652 } 4653 4654 static void 4655 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4656 { 4657 rxring_info_t *ring_info; 4658 int index; 4659 uint32_t chunk_size; 4660 uint64_t kaddr; 4661 uint_t num_blocks; 4662 4663 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4664 4665 if (rbr_p == NULL) { 4666 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4667 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4668 return; 4669 } 4670 4671 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4672 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4673 "==> nxge_rxdma_databuf_free: DDI")); 4674 return; 4675 } 4676 4677 ring_info = rbr_p->ring_info; 4678 if (ring_info == NULL) { 4679 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4680 "==> nxge_rxdma_databuf_free: NULL ring info")); 4681 return; 4682 } 4683 num_blocks = rbr_p->num_blocks; 4684 for (index = 0; index < num_blocks; index++) { 4685 kaddr = ring_info->buffer[index].kaddr; 4686 chunk_size = ring_info->buffer[index].buf_size; 4687 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4688 "==> nxge_rxdma_databuf_free: free chunk %d " 4689 "kaddrp $%p chunk size %d", 4690 index, kaddr, chunk_size)); 4691 if (kaddr == NULL) continue; 4692 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4693 ring_info->buffer[index].kaddr = NULL; 4694 } 4695 4696 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4697 } 4698 4699 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4700 extern void contig_mem_free(void *, size_t); 4701 #endif 4702 4703 void 4704 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4705 { 4706 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4707 4708 if (kaddr == NULL || !buf_size) { 4709 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4710 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4711 kaddr, buf_size)); 4712 return; 4713 } 4714 4715 switch (alloc_type) { 4716 case KMEM_ALLOC: 4717 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4718 "==> nxge_free_buf: freeing kmem $%p size %d", 4719 kaddr, buf_size)); 4720 #if defined(__i386) 4721 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4722 #else 4723 KMEM_FREE((void *)kaddr, buf_size); 4724 #endif 4725 break; 4726 4727 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4728 case CONTIG_MEM_ALLOC: 4729 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4730 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4731 kaddr, buf_size)); 4732 contig_mem_free((void *)kaddr, buf_size); 4733 break; 4734 #endif 4735 4736 default: 4737 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4738 "<== nxge_free_buf: unsupported alloc type %d", 4739 alloc_type)); 4740 return; 4741 } 4742 4743 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 4744 } 4745