1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * Globals: tunable parameters (/etc/system or adb) 43 * 44 */ 45 extern uint32_t nxge_rbr_size; 46 extern uint32_t nxge_rcr_size; 47 extern uint32_t nxge_rbr_spare_size; 48 49 extern uint32_t nxge_mblks_pending; 50 51 /* 52 * Tunable to reduce the amount of time spent in the 53 * ISR doing Rx Processing. 54 */ 55 extern uint32_t nxge_max_rx_pkts; 56 boolean_t nxge_jumbo_enable; 57 58 /* 59 * Tunables to manage the receive buffer blocks. 60 * 61 * nxge_rx_threshold_hi: copy all buffers. 62 * nxge_rx_bcopy_size_type: receive buffer block size type. 63 * nxge_rx_threshold_lo: copy only up to tunable block size type. 64 */ 65 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 66 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 68 69 extern uint32_t nxge_cksum_offload; 70 71 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 72 static void nxge_unmap_rxdma(p_nxge_t, int); 73 74 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 75 76 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 77 static void nxge_rxdma_hw_stop(p_nxge_t, int); 78 79 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 80 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 81 uint32_t, 82 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 83 p_rx_mbox_t *); 84 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 85 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 86 87 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 88 uint16_t, 89 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 90 p_rx_rcr_ring_t *, p_rx_mbox_t *); 91 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 92 p_rx_rcr_ring_t, p_rx_mbox_t); 93 94 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 95 uint16_t, 96 p_nxge_dma_common_t *, 97 p_rx_rbr_ring_t *, uint32_t); 98 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 99 p_rx_rbr_ring_t); 100 101 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 102 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 103 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 104 105 static mblk_t * 106 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 107 108 static void nxge_receive_packet(p_nxge_t, 109 p_rx_rcr_ring_t, 110 p_rcr_entry_t, 111 boolean_t *, 112 mblk_t **, mblk_t **); 113 114 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 115 116 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 117 static void nxge_freeb(p_rx_msg_t); 118 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 120 121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 122 uint32_t, uint32_t); 123 124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 125 p_rx_rbr_ring_t); 126 127 128 static nxge_status_t 129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 130 131 nxge_status_t 132 nxge_rx_port_fatal_err_recover(p_nxge_t); 133 134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135 136 nxge_status_t 137 nxge_init_rxdma_channels(p_nxge_t nxgep) 138 { 139 nxge_grp_set_t *set = &nxgep->rx_set; 140 int i, count; 141 142 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 143 144 if (!isLDOMguest(nxgep)) { 145 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 146 cmn_err(CE_NOTE, "hw_start_common"); 147 return (NXGE_ERROR); 148 } 149 } 150 151 /* 152 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 153 * We only have 8 hardware RDC tables, but we may have 154 * up to 16 logical (software-defined) groups of RDCS, 155 * if we make use of layer 3 & 4 hardware classification. 156 */ 157 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 158 if ((1 << i) & set->lg.map) { 159 int channel; 160 nxge_grp_t *group = set->group[i]; 161 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 162 if ((1 << channel) & group->map) { 163 if ((nxge_grp_dc_add(nxgep, 164 group, VP_BOUND_RX, channel))) 165 return (NXGE_ERROR); 166 } 167 } 168 } 169 if (++count == set->lg.count) 170 break; 171 } 172 173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 174 175 return (NXGE_OK); 176 } 177 178 nxge_status_t 179 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 180 { 181 nxge_status_t status; 182 183 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 184 185 status = nxge_map_rxdma(nxge, channel); 186 if (status != NXGE_OK) { 187 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 188 "<== nxge_init_rxdma: status 0x%x", status)); 189 return (status); 190 } 191 192 status = nxge_rxdma_hw_start(nxge, channel); 193 if (status != NXGE_OK) { 194 nxge_unmap_rxdma(nxge, channel); 195 } 196 197 if (!nxge->statsp->rdc_ksp[channel]) 198 nxge_setup_rdc_kstats(nxge, channel); 199 200 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 201 "<== nxge_init_rxdma_channel: status 0x%x", status)); 202 203 return (status); 204 } 205 206 void 207 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 208 { 209 nxge_grp_set_t *set = &nxgep->rx_set; 210 int rdc; 211 212 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 213 214 if (set->owned.map == 0) { 215 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 216 "nxge_uninit_rxdma_channels: no channels")); 217 return; 218 } 219 220 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 221 if ((1 << rdc) & set->owned.map) { 222 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 223 } 224 } 225 226 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 227 } 228 229 void 230 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 231 { 232 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 233 234 if (nxgep->statsp->rdc_ksp[channel]) { 235 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 236 nxgep->statsp->rdc_ksp[channel] = 0; 237 } 238 239 nxge_rxdma_hw_stop(nxgep, channel); 240 nxge_unmap_rxdma(nxgep, channel); 241 242 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 243 } 244 245 nxge_status_t 246 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 247 { 248 npi_handle_t handle; 249 npi_status_t rs = NPI_SUCCESS; 250 nxge_status_t status = NXGE_OK; 251 252 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 253 254 handle = NXGE_DEV_NPI_HANDLE(nxgep); 255 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 256 257 if (rs != NPI_SUCCESS) { 258 status = NXGE_ERROR | rs; 259 } 260 261 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 262 263 return (status); 264 } 265 266 void 267 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 268 { 269 nxge_grp_set_t *set = &nxgep->rx_set; 270 int rdc; 271 272 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 273 274 if (!isLDOMguest(nxgep)) { 275 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 276 (void) npi_rxdma_dump_fzc_regs(handle); 277 } 278 279 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 280 NXGE_DEBUG_MSG((nxgep, TX_CTL, 281 "nxge_rxdma_regs_dump_channels: " 282 "NULL ring pointer(s)")); 283 return; 284 } 285 286 if (set->owned.map == 0) { 287 NXGE_DEBUG_MSG((nxgep, RX_CTL, 288 "nxge_rxdma_regs_dump_channels: no channels")); 289 return; 290 } 291 292 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 293 if ((1 << rdc) & set->owned.map) { 294 rx_rbr_ring_t *ring = 295 nxgep->rx_rbr_rings->rbr_rings[rdc]; 296 if (ring) { 297 (void) nxge_dump_rxdma_channel(nxgep, rdc); 298 } 299 } 300 } 301 302 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 303 } 304 305 nxge_status_t 306 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 307 { 308 npi_handle_t handle; 309 npi_status_t rs = NPI_SUCCESS; 310 nxge_status_t status = NXGE_OK; 311 312 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 313 314 handle = NXGE_DEV_NPI_HANDLE(nxgep); 315 rs = npi_rxdma_dump_rdc_regs(handle, channel); 316 317 if (rs != NPI_SUCCESS) { 318 status = NXGE_ERROR | rs; 319 } 320 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 321 return (status); 322 } 323 324 nxge_status_t 325 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 326 p_rx_dma_ent_msk_t mask_p) 327 { 328 npi_handle_t handle; 329 npi_status_t rs = NPI_SUCCESS; 330 nxge_status_t status = NXGE_OK; 331 332 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 333 "<== nxge_init_rxdma_channel_event_mask")); 334 335 handle = NXGE_DEV_NPI_HANDLE(nxgep); 336 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 337 if (rs != NPI_SUCCESS) { 338 status = NXGE_ERROR | rs; 339 } 340 341 return (status); 342 } 343 344 nxge_status_t 345 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 346 p_rx_dma_ctl_stat_t cs_p) 347 { 348 npi_handle_t handle; 349 npi_status_t rs = NPI_SUCCESS; 350 nxge_status_t status = NXGE_OK; 351 352 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 353 "<== nxge_init_rxdma_channel_cntl_stat")); 354 355 handle = NXGE_DEV_NPI_HANDLE(nxgep); 356 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 357 358 if (rs != NPI_SUCCESS) { 359 status = NXGE_ERROR | rs; 360 } 361 362 return (status); 363 } 364 365 /* 366 * nxge_rxdma_cfg_rdcgrp_default_rdc 367 * 368 * Set the default RDC for an RDC Group (Table) 369 * 370 * Arguments: 371 * nxgep 372 * rdcgrp The group to modify 373 * rdc The new default RDC. 374 * 375 * Notes: 376 * 377 * NPI/NXGE function calls: 378 * npi_rxdma_cfg_rdc_table_default_rdc() 379 * 380 * Registers accessed: 381 * RDC_TBL_REG: FZC_ZCP + 0x10000 382 * 383 * Context: 384 * Service domain 385 */ 386 nxge_status_t 387 nxge_rxdma_cfg_rdcgrp_default_rdc( 388 p_nxge_t nxgep, 389 uint8_t rdcgrp, 390 uint8_t rdc) 391 { 392 npi_handle_t handle; 393 npi_status_t rs = NPI_SUCCESS; 394 p_nxge_dma_pt_cfg_t p_dma_cfgp; 395 p_nxge_rdc_grp_t rdc_grp_p; 396 uint8_t actual_rdcgrp, actual_rdc; 397 398 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 399 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 400 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 401 402 handle = NXGE_DEV_NPI_HANDLE(nxgep); 403 404 /* 405 * This has to be rewritten. Do we even allow this anymore? 406 */ 407 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 408 RDC_MAP_IN(rdc_grp_p->map, rdc); 409 rdc_grp_p->def_rdc = rdc; 410 411 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 412 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 413 414 rs = npi_rxdma_cfg_rdc_table_default_rdc( 415 handle, actual_rdcgrp, actual_rdc); 416 417 if (rs != NPI_SUCCESS) { 418 return (NXGE_ERROR | rs); 419 } 420 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 421 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 422 return (NXGE_OK); 423 } 424 425 nxge_status_t 426 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 427 { 428 npi_handle_t handle; 429 430 uint8_t actual_rdc; 431 npi_status_t rs = NPI_SUCCESS; 432 433 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 434 " ==> nxge_rxdma_cfg_port_default_rdc")); 435 436 handle = NXGE_DEV_NPI_HANDLE(nxgep); 437 actual_rdc = rdc; /* XXX Hack! */ 438 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 439 440 441 if (rs != NPI_SUCCESS) { 442 return (NXGE_ERROR | rs); 443 } 444 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 445 " <== nxge_rxdma_cfg_port_default_rdc")); 446 447 return (NXGE_OK); 448 } 449 450 nxge_status_t 451 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 452 uint16_t pkts) 453 { 454 npi_status_t rs = NPI_SUCCESS; 455 npi_handle_t handle; 456 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 457 " ==> nxge_rxdma_cfg_rcr_threshold")); 458 handle = NXGE_DEV_NPI_HANDLE(nxgep); 459 460 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 461 462 if (rs != NPI_SUCCESS) { 463 return (NXGE_ERROR | rs); 464 } 465 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 466 return (NXGE_OK); 467 } 468 469 nxge_status_t 470 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 471 uint16_t tout, uint8_t enable) 472 { 473 npi_status_t rs = NPI_SUCCESS; 474 npi_handle_t handle; 475 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 476 handle = NXGE_DEV_NPI_HANDLE(nxgep); 477 if (enable == 0) { 478 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 479 } else { 480 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 481 tout); 482 } 483 484 if (rs != NPI_SUCCESS) { 485 return (NXGE_ERROR | rs); 486 } 487 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 488 return (NXGE_OK); 489 } 490 491 nxge_status_t 492 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 493 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 494 { 495 npi_handle_t handle; 496 rdc_desc_cfg_t rdc_desc; 497 p_rcrcfig_b_t cfgb_p; 498 npi_status_t rs = NPI_SUCCESS; 499 500 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 501 handle = NXGE_DEV_NPI_HANDLE(nxgep); 502 /* 503 * Use configuration data composed at init time. 504 * Write to hardware the receive ring configurations. 505 */ 506 rdc_desc.mbox_enable = 1; 507 rdc_desc.mbox_addr = mbox_p->mbox_addr; 508 NXGE_DEBUG_MSG((nxgep, RX_CTL, 509 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 510 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 511 512 rdc_desc.rbr_len = rbr_p->rbb_max; 513 rdc_desc.rbr_addr = rbr_p->rbr_addr; 514 515 switch (nxgep->rx_bksize_code) { 516 case RBR_BKSIZE_4K: 517 rdc_desc.page_size = SIZE_4KB; 518 break; 519 case RBR_BKSIZE_8K: 520 rdc_desc.page_size = SIZE_8KB; 521 break; 522 case RBR_BKSIZE_16K: 523 rdc_desc.page_size = SIZE_16KB; 524 break; 525 case RBR_BKSIZE_32K: 526 rdc_desc.page_size = SIZE_32KB; 527 break; 528 } 529 530 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 531 rdc_desc.valid0 = 1; 532 533 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 534 rdc_desc.valid1 = 1; 535 536 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 537 rdc_desc.valid2 = 1; 538 539 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 540 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 541 542 rdc_desc.rcr_len = rcr_p->comp_size; 543 rdc_desc.rcr_addr = rcr_p->rcr_addr; 544 545 cfgb_p = &(rcr_p->rcr_cfgb); 546 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 547 /* For now, disable this timeout in a guest domain. */ 548 if (isLDOMguest(nxgep)) { 549 rdc_desc.rcr_timeout = 0; 550 rdc_desc.rcr_timeout_enable = 0; 551 } else { 552 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 553 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 554 } 555 556 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 557 "rbr_len qlen %d pagesize code %d rcr_len %d", 558 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 559 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 560 "size 0 %d size 1 %d size 2 %d", 561 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 562 rbr_p->npi_pkt_buf_size2)); 563 564 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 565 if (rs != NPI_SUCCESS) { 566 return (NXGE_ERROR | rs); 567 } 568 569 /* 570 * Enable the timeout and threshold. 571 */ 572 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 573 rdc_desc.rcr_threshold); 574 if (rs != NPI_SUCCESS) { 575 return (NXGE_ERROR | rs); 576 } 577 578 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 579 rdc_desc.rcr_timeout); 580 if (rs != NPI_SUCCESS) { 581 return (NXGE_ERROR | rs); 582 } 583 584 /* Enable the DMA */ 585 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 586 if (rs != NPI_SUCCESS) { 587 return (NXGE_ERROR | rs); 588 } 589 590 /* Kick the DMA engine. */ 591 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 592 /* Clear the rbr empty bit */ 593 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 594 595 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 596 597 return (NXGE_OK); 598 } 599 600 nxge_status_t 601 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 602 { 603 npi_handle_t handle; 604 npi_status_t rs = NPI_SUCCESS; 605 606 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 607 handle = NXGE_DEV_NPI_HANDLE(nxgep); 608 609 /* disable the DMA */ 610 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 611 if (rs != NPI_SUCCESS) { 612 NXGE_DEBUG_MSG((nxgep, RX_CTL, 613 "<== nxge_disable_rxdma_channel:failed (0x%x)", 614 rs)); 615 return (NXGE_ERROR | rs); 616 } 617 618 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 619 return (NXGE_OK); 620 } 621 622 nxge_status_t 623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 624 { 625 npi_handle_t handle; 626 nxge_status_t status = NXGE_OK; 627 628 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 629 "<== nxge_init_rxdma_channel_rcrflush")); 630 631 handle = NXGE_DEV_NPI_HANDLE(nxgep); 632 npi_rxdma_rdc_rcr_flush(handle, channel); 633 634 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 635 "<== nxge_init_rxdma_channel_rcrflsh")); 636 return (status); 637 638 } 639 640 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 641 642 #define TO_LEFT -1 643 #define TO_RIGHT 1 644 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 645 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 646 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 647 #define NO_HINT 0xffffffff 648 649 /*ARGSUSED*/ 650 nxge_status_t 651 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 652 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 653 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 654 { 655 int bufsize; 656 uint64_t pktbuf_pp; 657 uint64_t dvma_addr; 658 rxring_info_t *ring_info; 659 int base_side, end_side; 660 int r_index, l_index, anchor_index; 661 int found, search_done; 662 uint32_t offset, chunk_size, block_size, page_size_mask; 663 uint32_t chunk_index, block_index, total_index; 664 int max_iterations, iteration; 665 rxbuf_index_info_t *bufinfo; 666 667 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 668 669 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 670 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 671 pkt_buf_addr_pp, 672 pktbufsz_type)); 673 #if defined(__i386) 674 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 675 #else 676 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 677 #endif 678 679 switch (pktbufsz_type) { 680 case 0: 681 bufsize = rbr_p->pkt_buf_size0; 682 break; 683 case 1: 684 bufsize = rbr_p->pkt_buf_size1; 685 break; 686 case 2: 687 bufsize = rbr_p->pkt_buf_size2; 688 break; 689 case RCR_SINGLE_BLOCK: 690 bufsize = 0; 691 anchor_index = 0; 692 break; 693 default: 694 return (NXGE_ERROR); 695 } 696 697 if (rbr_p->num_blocks == 1) { 698 anchor_index = 0; 699 ring_info = rbr_p->ring_info; 700 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 701 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 702 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 703 "buf_pp $%p btype %d anchor_index %d " 704 "bufinfo $%p", 705 pkt_buf_addr_pp, 706 pktbufsz_type, 707 anchor_index, 708 bufinfo)); 709 710 goto found_index; 711 } 712 713 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 714 "==> nxge_rxbuf_pp_to_vp: " 715 "buf_pp $%p btype %d anchor_index %d", 716 pkt_buf_addr_pp, 717 pktbufsz_type, 718 anchor_index)); 719 720 ring_info = rbr_p->ring_info; 721 found = B_FALSE; 722 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 723 iteration = 0; 724 max_iterations = ring_info->max_iterations; 725 /* 726 * First check if this block has been seen 727 * recently. This is indicated by a hint which 728 * is initialized when the first buffer of the block 729 * is seen. The hint is reset when the last buffer of 730 * the block has been processed. 731 * As three block sizes are supported, three hints 732 * are kept. The idea behind the hints is that once 733 * the hardware uses a block for a buffer of that 734 * size, it will use it exclusively for that size 735 * and will use it until it is exhausted. It is assumed 736 * that there would a single block being used for the same 737 * buffer sizes at any given time. 738 */ 739 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 740 anchor_index = ring_info->hint[pktbufsz_type]; 741 dvma_addr = bufinfo[anchor_index].dvma_addr; 742 chunk_size = bufinfo[anchor_index].buf_size; 743 if ((pktbuf_pp >= dvma_addr) && 744 (pktbuf_pp < (dvma_addr + chunk_size))) { 745 found = B_TRUE; 746 /* 747 * check if this is the last buffer in the block 748 * If so, then reset the hint for the size; 749 */ 750 751 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 752 ring_info->hint[pktbufsz_type] = NO_HINT; 753 } 754 } 755 756 if (found == B_FALSE) { 757 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 758 "==> nxge_rxbuf_pp_to_vp: (!found)" 759 "buf_pp $%p btype %d anchor_index %d", 760 pkt_buf_addr_pp, 761 pktbufsz_type, 762 anchor_index)); 763 764 /* 765 * This is the first buffer of the block of this 766 * size. Need to search the whole information 767 * array. 768 * the search algorithm uses a binary tree search 769 * algorithm. It assumes that the information is 770 * already sorted with increasing order 771 * info[0] < info[1] < info[2] .... < info[n-1] 772 * where n is the size of the information array 773 */ 774 r_index = rbr_p->num_blocks - 1; 775 l_index = 0; 776 search_done = B_FALSE; 777 anchor_index = MID_INDEX(r_index, l_index); 778 while (search_done == B_FALSE) { 779 if ((r_index == l_index) || 780 (iteration >= max_iterations)) 781 search_done = B_TRUE; 782 end_side = TO_RIGHT; /* to the right */ 783 base_side = TO_LEFT; /* to the left */ 784 /* read the DVMA address information and sort it */ 785 dvma_addr = bufinfo[anchor_index].dvma_addr; 786 chunk_size = bufinfo[anchor_index].buf_size; 787 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 788 "==> nxge_rxbuf_pp_to_vp: (searching)" 789 "buf_pp $%p btype %d " 790 "anchor_index %d chunk_size %d dvmaaddr $%p", 791 pkt_buf_addr_pp, 792 pktbufsz_type, 793 anchor_index, 794 chunk_size, 795 dvma_addr)); 796 797 if (pktbuf_pp >= dvma_addr) 798 base_side = TO_RIGHT; /* to the right */ 799 if (pktbuf_pp < (dvma_addr + chunk_size)) 800 end_side = TO_LEFT; /* to the left */ 801 802 switch (base_side + end_side) { 803 case IN_MIDDLE: 804 /* found */ 805 found = B_TRUE; 806 search_done = B_TRUE; 807 if ((pktbuf_pp + bufsize) < 808 (dvma_addr + chunk_size)) 809 ring_info->hint[pktbufsz_type] = 810 bufinfo[anchor_index].buf_index; 811 break; 812 case BOTH_RIGHT: 813 /* not found: go to the right */ 814 l_index = anchor_index + 1; 815 anchor_index = MID_INDEX(r_index, l_index); 816 break; 817 818 case BOTH_LEFT: 819 /* not found: go to the left */ 820 r_index = anchor_index - 1; 821 anchor_index = MID_INDEX(r_index, l_index); 822 break; 823 default: /* should not come here */ 824 return (NXGE_ERROR); 825 } 826 iteration++; 827 } 828 829 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 830 "==> nxge_rxbuf_pp_to_vp: (search done)" 831 "buf_pp $%p btype %d anchor_index %d", 832 pkt_buf_addr_pp, 833 pktbufsz_type, 834 anchor_index)); 835 } 836 837 if (found == B_FALSE) { 838 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 839 "==> nxge_rxbuf_pp_to_vp: (search failed)" 840 "buf_pp $%p btype %d anchor_index %d", 841 pkt_buf_addr_pp, 842 pktbufsz_type, 843 anchor_index)); 844 return (NXGE_ERROR); 845 } 846 847 found_index: 848 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 849 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 850 "buf_pp $%p btype %d bufsize %d anchor_index %d", 851 pkt_buf_addr_pp, 852 pktbufsz_type, 853 bufsize, 854 anchor_index)); 855 856 /* index of the first block in this chunk */ 857 chunk_index = bufinfo[anchor_index].start_index; 858 dvma_addr = bufinfo[anchor_index].dvma_addr; 859 page_size_mask = ring_info->block_size_mask; 860 861 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 862 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 863 "buf_pp $%p btype %d bufsize %d " 864 "anchor_index %d chunk_index %d dvma $%p", 865 pkt_buf_addr_pp, 866 pktbufsz_type, 867 bufsize, 868 anchor_index, 869 chunk_index, 870 dvma_addr)); 871 872 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 873 block_size = rbr_p->block_size; /* System block(page) size */ 874 875 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 876 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 877 "buf_pp $%p btype %d bufsize %d " 878 "anchor_index %d chunk_index %d dvma $%p " 879 "offset %d block_size %d", 880 pkt_buf_addr_pp, 881 pktbufsz_type, 882 bufsize, 883 anchor_index, 884 chunk_index, 885 dvma_addr, 886 offset, 887 block_size)); 888 889 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 890 891 block_index = (offset / block_size); /* index within chunk */ 892 total_index = chunk_index + block_index; 893 894 895 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 896 "==> nxge_rxbuf_pp_to_vp: " 897 "total_index %d dvma_addr $%p " 898 "offset %d block_size %d " 899 "block_index %d ", 900 total_index, dvma_addr, 901 offset, block_size, 902 block_index)); 903 #if defined(__i386) 904 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 905 (uint32_t)offset); 906 #else 907 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 908 (uint64_t)offset); 909 #endif 910 911 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 912 "==> nxge_rxbuf_pp_to_vp: " 913 "total_index %d dvma_addr $%p " 914 "offset %d block_size %d " 915 "block_index %d " 916 "*pkt_buf_addr_p $%p", 917 total_index, dvma_addr, 918 offset, block_size, 919 block_index, 920 *pkt_buf_addr_p)); 921 922 923 *msg_index = total_index; 924 *bufoffset = (offset & page_size_mask); 925 926 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 927 "==> nxge_rxbuf_pp_to_vp: get msg index: " 928 "msg_index %d bufoffset_index %d", 929 *msg_index, 930 *bufoffset)); 931 932 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 933 934 return (NXGE_OK); 935 } 936 937 /* 938 * used by quick sort (qsort) function 939 * to perform comparison 940 */ 941 static int 942 nxge_sort_compare(const void *p1, const void *p2) 943 { 944 945 rxbuf_index_info_t *a, *b; 946 947 a = (rxbuf_index_info_t *)p1; 948 b = (rxbuf_index_info_t *)p2; 949 950 if (a->dvma_addr > b->dvma_addr) 951 return (1); 952 if (a->dvma_addr < b->dvma_addr) 953 return (-1); 954 return (0); 955 } 956 957 958 959 /* 960 * grabbed this sort implementation from common/syscall/avl.c 961 * 962 */ 963 /* 964 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 965 * v = Ptr to array/vector of objs 966 * n = # objs in the array 967 * s = size of each obj (must be multiples of a word size) 968 * f = ptr to function to compare two objs 969 * returns (-1 = less than, 0 = equal, 1 = greater than 970 */ 971 void 972 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 973 { 974 int g, i, j, ii; 975 unsigned int *p1, *p2; 976 unsigned int tmp; 977 978 /* No work to do */ 979 if (v == NULL || n <= 1) 980 return; 981 /* Sanity check on arguments */ 982 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 983 ASSERT(s > 0); 984 985 for (g = n / 2; g > 0; g /= 2) { 986 for (i = g; i < n; i++) { 987 for (j = i - g; j >= 0 && 988 (*f)(v + j * s, v + (j + g) * s) == 1; 989 j -= g) { 990 p1 = (unsigned *)(v + j * s); 991 p2 = (unsigned *)(v + (j + g) * s); 992 for (ii = 0; ii < s / 4; ii++) { 993 tmp = *p1; 994 *p1++ = *p2; 995 *p2++ = tmp; 996 } 997 } 998 } 999 } 1000 } 1001 1002 /* 1003 * Initialize data structures required for rxdma 1004 * buffer dvma->vmem address lookup 1005 */ 1006 /*ARGSUSED*/ 1007 static nxge_status_t 1008 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1009 { 1010 1011 int index; 1012 rxring_info_t *ring_info; 1013 int max_iteration = 0, max_index = 0; 1014 1015 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1016 1017 ring_info = rbrp->ring_info; 1018 ring_info->hint[0] = NO_HINT; 1019 ring_info->hint[1] = NO_HINT; 1020 ring_info->hint[2] = NO_HINT; 1021 max_index = rbrp->num_blocks; 1022 1023 /* read the DVMA address information and sort it */ 1024 /* do init of the information array */ 1025 1026 1027 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1028 " nxge_rxbuf_index_info_init Sort ptrs")); 1029 1030 /* sort the array */ 1031 nxge_ksort((void *)ring_info->buffer, max_index, 1032 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1033 1034 1035 1036 for (index = 0; index < max_index; index++) { 1037 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1038 " nxge_rxbuf_index_info_init: sorted chunk %d " 1039 " ioaddr $%p kaddr $%p size %x", 1040 index, ring_info->buffer[index].dvma_addr, 1041 ring_info->buffer[index].kaddr, 1042 ring_info->buffer[index].buf_size)); 1043 } 1044 1045 max_iteration = 0; 1046 while (max_index >= (1ULL << max_iteration)) 1047 max_iteration++; 1048 ring_info->max_iterations = max_iteration + 1; 1049 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1050 " nxge_rxbuf_index_info_init Find max iter %d", 1051 ring_info->max_iterations)); 1052 1053 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1054 return (NXGE_OK); 1055 } 1056 1057 /* ARGSUSED */ 1058 void 1059 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1060 { 1061 #ifdef NXGE_DEBUG 1062 1063 uint32_t bptr; 1064 uint64_t pp; 1065 1066 bptr = entry_p->bits.hdw.pkt_buf_addr; 1067 1068 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1069 "\trcr entry $%p " 1070 "\trcr entry 0x%0llx " 1071 "\trcr entry 0x%08x " 1072 "\trcr entry 0x%08x " 1073 "\tvalue 0x%0llx\n" 1074 "\tmulti = %d\n" 1075 "\tpkt_type = 0x%x\n" 1076 "\tzero_copy = %d\n" 1077 "\tnoport = %d\n" 1078 "\tpromis = %d\n" 1079 "\terror = 0x%04x\n" 1080 "\tdcf_err = 0x%01x\n" 1081 "\tl2_len = %d\n" 1082 "\tpktbufsize = %d\n" 1083 "\tpkt_buf_addr = $%p\n" 1084 "\tpkt_buf_addr (<< 6) = $%p\n", 1085 entry_p, 1086 *(int64_t *)entry_p, 1087 *(int32_t *)entry_p, 1088 *(int32_t *)((char *)entry_p + 32), 1089 entry_p->value, 1090 entry_p->bits.hdw.multi, 1091 entry_p->bits.hdw.pkt_type, 1092 entry_p->bits.hdw.zero_copy, 1093 entry_p->bits.hdw.noport, 1094 entry_p->bits.hdw.promis, 1095 entry_p->bits.hdw.error, 1096 entry_p->bits.hdw.dcf_err, 1097 entry_p->bits.hdw.l2_len, 1098 entry_p->bits.hdw.pktbufsz, 1099 bptr, 1100 entry_p->bits.ldw.pkt_buf_addr)); 1101 1102 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1103 RCR_PKT_BUF_ADDR_SHIFT; 1104 1105 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1106 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1107 #endif 1108 } 1109 1110 void 1111 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1112 { 1113 npi_handle_t handle; 1114 rbr_stat_t rbr_stat; 1115 addr44_t hd_addr; 1116 addr44_t tail_addr; 1117 uint16_t qlen; 1118 1119 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1120 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1121 1122 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1123 1124 /* RBR head */ 1125 hd_addr.addr = 0; 1126 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1127 #if defined(__i386) 1128 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1129 (void *)(uint32_t)hd_addr.addr); 1130 #else 1131 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1132 (void *)hd_addr.addr); 1133 #endif 1134 1135 /* RBR stats */ 1136 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1137 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1138 1139 /* RCR tail */ 1140 tail_addr.addr = 0; 1141 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1142 #if defined(__i386) 1143 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1144 (void *)(uint32_t)tail_addr.addr); 1145 #else 1146 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1147 (void *)tail_addr.addr); 1148 #endif 1149 1150 /* RCR qlen */ 1151 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1152 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1153 1154 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1155 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1156 } 1157 1158 void 1159 nxge_rxdma_stop(p_nxge_t nxgep) 1160 { 1161 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1162 1163 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1164 (void) nxge_rx_mac_disable(nxgep); 1165 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1166 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1167 } 1168 1169 void 1170 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1171 { 1172 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1173 1174 (void) nxge_rxdma_stop(nxgep); 1175 (void) nxge_uninit_rxdma_channels(nxgep); 1176 (void) nxge_init_rxdma_channels(nxgep); 1177 1178 #ifndef AXIS_DEBUG_LB 1179 (void) nxge_xcvr_init(nxgep); 1180 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1181 #endif 1182 (void) nxge_rx_mac_enable(nxgep); 1183 1184 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1185 } 1186 1187 nxge_status_t 1188 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1189 { 1190 nxge_grp_set_t *set = &nxgep->rx_set; 1191 nxge_status_t status; 1192 npi_status_t rs; 1193 int rdc; 1194 1195 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1196 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1197 1198 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1199 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1200 "<== nxge_rxdma_mode: not initialized")); 1201 return (NXGE_ERROR); 1202 } 1203 1204 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1205 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1206 "<== nxge_tx_port_fatal_err_recover: " 1207 "NULL ring pointer(s)")); 1208 return (NXGE_ERROR); 1209 } 1210 1211 if (set->owned.map == 0) { 1212 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1213 "nxge_rxdma_regs_dump_channels: no channels")); 1214 return (NULL); 1215 } 1216 1217 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1218 if ((1 << rdc) & set->owned.map) { 1219 rx_rbr_ring_t *ring = 1220 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1221 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1222 if (ring) { 1223 if (enable) { 1224 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1225 "==> nxge_rxdma_hw_mode: " 1226 "channel %d (enable)", rdc)); 1227 rs = npi_rxdma_cfg_rdc_enable 1228 (handle, rdc); 1229 } else { 1230 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1231 "==> nxge_rxdma_hw_mode: " 1232 "channel %d disable)", rdc)); 1233 rs = npi_rxdma_cfg_rdc_disable 1234 (handle, rdc); 1235 } 1236 } 1237 } 1238 } 1239 1240 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1241 1242 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1243 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1244 1245 return (status); 1246 } 1247 1248 void 1249 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1250 { 1251 npi_handle_t handle; 1252 1253 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1254 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1255 1256 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1257 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1258 1259 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1260 } 1261 1262 void 1263 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1264 { 1265 npi_handle_t handle; 1266 1267 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1268 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1269 1270 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1271 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1272 1273 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1274 } 1275 1276 void 1277 nxge_hw_start_rx(p_nxge_t nxgep) 1278 { 1279 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1280 1281 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1282 (void) nxge_rx_mac_enable(nxgep); 1283 1284 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1285 } 1286 1287 /*ARGSUSED*/ 1288 void 1289 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1290 { 1291 nxge_grp_set_t *set = &nxgep->rx_set; 1292 int rdc; 1293 1294 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1295 1296 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1297 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1298 "<== nxge_tx_port_fatal_err_recover: " 1299 "NULL ring pointer(s)")); 1300 return; 1301 } 1302 1303 if (set->owned.map == 0) { 1304 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1305 "nxge_rxdma_regs_dump_channels: no channels")); 1306 return; 1307 } 1308 1309 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1310 if ((1 << rdc) & set->owned.map) { 1311 rx_rbr_ring_t *ring = 1312 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1313 if (ring) { 1314 nxge_rxdma_hw_stop(nxgep, rdc); 1315 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1316 "==> nxge_fixup_rxdma_rings: " 1317 "channel %d ring $%px", 1318 rdc, ring)); 1319 (void) nxge_rxdma_fixup_channel 1320 (nxgep, rdc, rdc); 1321 } 1322 } 1323 } 1324 1325 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1326 } 1327 1328 void 1329 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1330 { 1331 int i; 1332 1333 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1334 i = nxge_rxdma_get_ring_index(nxgep, channel); 1335 if (i < 0) { 1336 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1337 "<== nxge_rxdma_fix_channel: no entry found")); 1338 return; 1339 } 1340 1341 nxge_rxdma_fixup_channel(nxgep, channel, i); 1342 1343 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1344 } 1345 1346 void 1347 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1348 { 1349 int ndmas; 1350 p_rx_rbr_rings_t rx_rbr_rings; 1351 p_rx_rbr_ring_t *rbr_rings; 1352 p_rx_rcr_rings_t rx_rcr_rings; 1353 p_rx_rcr_ring_t *rcr_rings; 1354 p_rx_mbox_areas_t rx_mbox_areas_p; 1355 p_rx_mbox_t *rx_mbox_p; 1356 p_nxge_dma_pool_t dma_buf_poolp; 1357 p_nxge_dma_pool_t dma_cntl_poolp; 1358 p_rx_rbr_ring_t rbrp; 1359 p_rx_rcr_ring_t rcrp; 1360 p_rx_mbox_t mboxp; 1361 p_nxge_dma_common_t dmap; 1362 nxge_status_t status = NXGE_OK; 1363 1364 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1365 1366 (void) nxge_rxdma_stop_channel(nxgep, channel); 1367 1368 dma_buf_poolp = nxgep->rx_buf_pool_p; 1369 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1370 1371 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1372 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1373 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1374 return; 1375 } 1376 1377 ndmas = dma_buf_poolp->ndmas; 1378 if (!ndmas) { 1379 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1380 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1381 return; 1382 } 1383 1384 rx_rbr_rings = nxgep->rx_rbr_rings; 1385 rx_rcr_rings = nxgep->rx_rcr_rings; 1386 rbr_rings = rx_rbr_rings->rbr_rings; 1387 rcr_rings = rx_rcr_rings->rcr_rings; 1388 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1389 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1390 1391 /* Reinitialize the receive block and completion rings */ 1392 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1393 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1394 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1395 1396 1397 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1398 rbrp->rbr_rd_index = 0; 1399 rcrp->comp_rd_index = 0; 1400 rcrp->comp_wt_index = 0; 1401 1402 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1403 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1404 1405 status = nxge_rxdma_start_channel(nxgep, channel, 1406 rbrp, rcrp, mboxp); 1407 if (status != NXGE_OK) { 1408 goto nxge_rxdma_fixup_channel_fail; 1409 } 1410 if (status != NXGE_OK) { 1411 goto nxge_rxdma_fixup_channel_fail; 1412 } 1413 1414 nxge_rxdma_fixup_channel_fail: 1415 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1416 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1417 1418 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1419 } 1420 1421 /* ARGSUSED */ 1422 int 1423 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1424 { 1425 return (channel); 1426 } 1427 1428 p_rx_rbr_ring_t 1429 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1430 { 1431 nxge_grp_set_t *set = &nxgep->rx_set; 1432 nxge_channel_t rdc; 1433 1434 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1435 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1436 1437 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1438 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1439 "<== nxge_rxdma_get_rbr_ring: " 1440 "NULL ring pointer(s)")); 1441 return (NULL); 1442 } 1443 1444 if (set->owned.map == 0) { 1445 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1446 "<== nxge_rxdma_get_rbr_ring: no channels")); 1447 return (NULL); 1448 } 1449 1450 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1451 if ((1 << rdc) & set->owned.map) { 1452 rx_rbr_ring_t *ring = 1453 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1454 if (ring) { 1455 if (channel == ring->rdc) { 1456 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1457 "==> nxge_rxdma_get_rbr_ring: " 1458 "channel %d ring $%p", rdc, ring)); 1459 return (ring); 1460 } 1461 } 1462 } 1463 } 1464 1465 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1466 "<== nxge_rxdma_get_rbr_ring: not found")); 1467 1468 return (NULL); 1469 } 1470 1471 p_rx_rcr_ring_t 1472 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1473 { 1474 nxge_grp_set_t *set = &nxgep->rx_set; 1475 nxge_channel_t rdc; 1476 1477 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1478 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1479 1480 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1481 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1482 "<== nxge_rxdma_get_rcr_ring: " 1483 "NULL ring pointer(s)")); 1484 return (NULL); 1485 } 1486 1487 if (set->owned.map == 0) { 1488 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1489 "<== nxge_rxdma_get_rbr_ring: no channels")); 1490 return (NULL); 1491 } 1492 1493 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1494 if ((1 << rdc) & set->owned.map) { 1495 rx_rcr_ring_t *ring = 1496 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1497 if (ring) { 1498 if (channel == ring->rdc) { 1499 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1500 "==> nxge_rxdma_get_rcr_ring: " 1501 "channel %d ring $%p", rdc, ring)); 1502 return (ring); 1503 } 1504 } 1505 } 1506 } 1507 1508 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1509 "<== nxge_rxdma_get_rcr_ring: not found")); 1510 1511 return (NULL); 1512 } 1513 1514 /* 1515 * Static functions start here. 1516 */ 1517 static p_rx_msg_t 1518 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1519 { 1520 p_rx_msg_t nxge_mp = NULL; 1521 p_nxge_dma_common_t dmamsg_p; 1522 uchar_t *buffer; 1523 1524 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1525 if (nxge_mp == NULL) { 1526 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1527 "Allocation of a rx msg failed.")); 1528 goto nxge_allocb_exit; 1529 } 1530 1531 nxge_mp->use_buf_pool = B_FALSE; 1532 if (dmabuf_p) { 1533 nxge_mp->use_buf_pool = B_TRUE; 1534 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1535 *dmamsg_p = *dmabuf_p; 1536 dmamsg_p->nblocks = 1; 1537 dmamsg_p->block_size = size; 1538 dmamsg_p->alength = size; 1539 buffer = (uchar_t *)dmabuf_p->kaddrp; 1540 1541 dmabuf_p->kaddrp = (void *) 1542 ((char *)dmabuf_p->kaddrp + size); 1543 dmabuf_p->ioaddr_pp = (void *) 1544 ((char *)dmabuf_p->ioaddr_pp + size); 1545 dmabuf_p->alength -= size; 1546 dmabuf_p->offset += size; 1547 dmabuf_p->dma_cookie.dmac_laddress += size; 1548 dmabuf_p->dma_cookie.dmac_size -= size; 1549 1550 } else { 1551 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1552 if (buffer == NULL) { 1553 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1554 "Allocation of a receive page failed.")); 1555 goto nxge_allocb_fail1; 1556 } 1557 } 1558 1559 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1560 if (nxge_mp->rx_mblk_p == NULL) { 1561 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1562 goto nxge_allocb_fail2; 1563 } 1564 1565 nxge_mp->buffer = buffer; 1566 nxge_mp->block_size = size; 1567 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1568 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1569 nxge_mp->ref_cnt = 1; 1570 nxge_mp->free = B_TRUE; 1571 nxge_mp->rx_use_bcopy = B_FALSE; 1572 1573 atomic_inc_32(&nxge_mblks_pending); 1574 1575 goto nxge_allocb_exit; 1576 1577 nxge_allocb_fail2: 1578 if (!nxge_mp->use_buf_pool) { 1579 KMEM_FREE(buffer, size); 1580 } 1581 1582 nxge_allocb_fail1: 1583 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1584 nxge_mp = NULL; 1585 1586 nxge_allocb_exit: 1587 return (nxge_mp); 1588 } 1589 1590 p_mblk_t 1591 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1592 { 1593 p_mblk_t mp; 1594 1595 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1596 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1597 "offset = 0x%08X " 1598 "size = 0x%08X", 1599 nxge_mp, offset, size)); 1600 1601 mp = desballoc(&nxge_mp->buffer[offset], size, 1602 0, &nxge_mp->freeb); 1603 if (mp == NULL) { 1604 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1605 goto nxge_dupb_exit; 1606 } 1607 atomic_inc_32(&nxge_mp->ref_cnt); 1608 1609 1610 nxge_dupb_exit: 1611 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1612 nxge_mp)); 1613 return (mp); 1614 } 1615 1616 p_mblk_t 1617 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1618 { 1619 p_mblk_t mp; 1620 uchar_t *dp; 1621 1622 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1623 if (mp == NULL) { 1624 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1625 goto nxge_dupb_bcopy_exit; 1626 } 1627 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1628 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1629 mp->b_wptr = dp + size; 1630 1631 nxge_dupb_bcopy_exit: 1632 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1633 nxge_mp)); 1634 return (mp); 1635 } 1636 1637 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1638 p_rx_msg_t rx_msg_p); 1639 1640 void 1641 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1642 { 1643 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1644 1645 /* Reuse this buffer */ 1646 rx_msg_p->free = B_FALSE; 1647 rx_msg_p->cur_usage_cnt = 0; 1648 rx_msg_p->max_usage_cnt = 0; 1649 rx_msg_p->pkt_buf_size = 0; 1650 1651 if (rx_rbr_p->rbr_use_bcopy) { 1652 rx_msg_p->rx_use_bcopy = B_FALSE; 1653 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1654 } 1655 1656 /* 1657 * Get the rbr header pointer and its offset index. 1658 */ 1659 MUTEX_ENTER(&rx_rbr_p->post_lock); 1660 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1661 rx_rbr_p->rbr_wrap_mask); 1662 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1663 MUTEX_EXIT(&rx_rbr_p->post_lock); 1664 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1665 rx_rbr_p->rdc, 1); 1666 1667 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1668 "<== nxge_post_page (channel %d post_next_index %d)", 1669 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1670 1671 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1672 } 1673 1674 void 1675 nxge_freeb(p_rx_msg_t rx_msg_p) 1676 { 1677 size_t size; 1678 uchar_t *buffer = NULL; 1679 int ref_cnt; 1680 boolean_t free_state = B_FALSE; 1681 1682 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1683 1684 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1685 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1686 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1687 rx_msg_p, nxge_mblks_pending)); 1688 1689 /* 1690 * First we need to get the free state, then 1691 * atomic decrement the reference count to prevent 1692 * the race condition with the interrupt thread that 1693 * is processing a loaned up buffer block. 1694 */ 1695 free_state = rx_msg_p->free; 1696 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1697 if (!ref_cnt) { 1698 atomic_dec_32(&nxge_mblks_pending); 1699 buffer = rx_msg_p->buffer; 1700 size = rx_msg_p->block_size; 1701 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1702 "will free: rx_msg_p = $%p (block pending %d)", 1703 rx_msg_p, nxge_mblks_pending)); 1704 1705 if (!rx_msg_p->use_buf_pool) { 1706 KMEM_FREE(buffer, size); 1707 } 1708 1709 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1710 1711 if (ring) { 1712 /* 1713 * Decrement the receive buffer ring's reference 1714 * count, too. 1715 */ 1716 atomic_dec_32(&ring->rbr_ref_cnt); 1717 1718 /* 1719 * Free the receive buffer ring, if 1720 * 1. all the receive buffers have been freed 1721 * 2. and we are in the proper state (that is, 1722 * we are not UNMAPPING). 1723 */ 1724 if (ring->rbr_ref_cnt == 0 && 1725 ring->rbr_state == RBR_UNMAPPED) { 1726 /* 1727 * Free receive data buffers, 1728 * buffer index information 1729 * (rxring_info) and 1730 * the message block ring. 1731 */ 1732 NXGE_DEBUG_MSG((NULL, RX_CTL, 1733 "nxge_freeb:rx_msg_p = $%p " 1734 "(block pending %d) free buffers", 1735 rx_msg_p, nxge_mblks_pending)); 1736 nxge_rxdma_databuf_free(ring); 1737 if (ring->ring_info) { 1738 KMEM_FREE(ring->ring_info, 1739 sizeof (rxring_info_t)); 1740 } 1741 1742 if (ring->rx_msg_ring) { 1743 KMEM_FREE(ring->rx_msg_ring, 1744 ring->tnblocks * 1745 sizeof (p_rx_msg_t)); 1746 } 1747 KMEM_FREE(ring, sizeof (*ring)); 1748 } 1749 } 1750 return; 1751 } 1752 1753 /* 1754 * Repost buffer. 1755 */ 1756 if (free_state && (ref_cnt == 1) && ring) { 1757 NXGE_DEBUG_MSG((NULL, RX_CTL, 1758 "nxge_freeb: post page $%p:", rx_msg_p)); 1759 if (ring->rbr_state == RBR_POSTING) 1760 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1761 } 1762 1763 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1764 } 1765 1766 uint_t 1767 nxge_rx_intr(void *arg1, void *arg2) 1768 { 1769 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1770 p_nxge_t nxgep = (p_nxge_t)arg2; 1771 p_nxge_ldg_t ldgp; 1772 uint8_t channel; 1773 npi_handle_t handle; 1774 rx_dma_ctl_stat_t cs; 1775 1776 #ifdef NXGE_DEBUG 1777 rxdma_cfig1_t cfg; 1778 #endif 1779 uint_t serviced = DDI_INTR_UNCLAIMED; 1780 1781 if (ldvp == NULL) { 1782 NXGE_DEBUG_MSG((NULL, INT_CTL, 1783 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1784 nxgep, ldvp)); 1785 1786 return (DDI_INTR_CLAIMED); 1787 } 1788 1789 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1790 nxgep = ldvp->nxgep; 1791 } 1792 1793 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1794 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1795 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1796 "<== nxge_rx_intr: interface not started or intialized")); 1797 return (DDI_INTR_CLAIMED); 1798 } 1799 1800 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1801 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1802 nxgep, ldvp)); 1803 1804 /* 1805 * This interrupt handler is for a specific 1806 * receive dma channel. 1807 */ 1808 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1809 /* 1810 * Get the control and status for this channel. 1811 */ 1812 channel = ldvp->channel; 1813 ldgp = ldvp->ldgp; 1814 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1815 1816 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1817 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1818 channel, 1819 cs.value, 1820 cs.bits.hdw.rcrto, 1821 cs.bits.hdw.rcrthres)); 1822 1823 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1824 serviced = DDI_INTR_CLAIMED; 1825 1826 /* error events. */ 1827 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1828 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1829 } 1830 1831 nxge_intr_exit: 1832 /* 1833 * Enable the mailbox update interrupt if we want 1834 * to use mailbox. We probably don't need to use 1835 * mailbox as it only saves us one pio read. 1836 * Also write 1 to rcrthres and rcrto to clear 1837 * these two edge triggered bits. 1838 */ 1839 1840 cs.value &= RX_DMA_CTL_STAT_WR1C; 1841 cs.bits.hdw.mex = 1; 1842 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1843 cs.value); 1844 1845 /* 1846 * Rearm this logical group if this is a single device 1847 * group. 1848 */ 1849 if (ldgp->nldvs == 1) { 1850 ldgimgm_t mgm; 1851 mgm.value = 0; 1852 mgm.bits.ldw.arm = 1; 1853 mgm.bits.ldw.timer = ldgp->ldg_timer; 1854 if (isLDOMguest(nxgep)) { 1855 nxge_hio_ldgimgn(nxgep, ldgp); 1856 } else { 1857 NXGE_REG_WR64(handle, 1858 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1859 mgm.value); 1860 } 1861 } 1862 1863 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1864 serviced)); 1865 return (serviced); 1866 } 1867 1868 /* 1869 * Process the packets received in the specified logical device 1870 * and pass up a chain of message blocks to the upper layer. 1871 */ 1872 static void 1873 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 1874 { 1875 p_mblk_t mp; 1876 p_rx_rcr_ring_t rcrp; 1877 1878 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1879 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 1880 if (rcrp->poll_flag) { 1881 /* It is in the poll mode */ 1882 return; 1883 } 1884 1885 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 1886 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1887 "<== nxge_rx_pkts_vring: no mp")); 1888 return; 1889 } 1890 1891 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1892 mp)); 1893 1894 #ifdef NXGE_DEBUG 1895 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1896 "==> nxge_rx_pkts_vring:calling mac_rx " 1897 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1898 "mac_handle $%p", 1899 mp->b_wptr - mp->b_rptr, 1900 mp, mp->b_cont, mp->b_next, 1901 rcrp, rcrp->rcr_mac_handle)); 1902 1903 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1904 "==> nxge_rx_pkts_vring: dump packets " 1905 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1906 mp, 1907 mp->b_rptr, 1908 mp->b_wptr, 1909 nxge_dump_packet((char *)mp->b_rptr, 1910 mp->b_wptr - mp->b_rptr))); 1911 if (mp->b_cont) { 1912 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1913 "==> nxge_rx_pkts_vring: dump b_cont packets " 1914 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1915 mp->b_cont, 1916 mp->b_cont->b_rptr, 1917 mp->b_cont->b_wptr, 1918 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1919 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1920 } 1921 if (mp->b_next) { 1922 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1923 "==> nxge_rx_pkts_vring: dump next packets " 1924 "(b_rptr $%p): %s", 1925 mp->b_next->b_rptr, 1926 nxge_dump_packet((char *)mp->b_next->b_rptr, 1927 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1928 } 1929 #endif 1930 1931 if (!isLDOMguest(nxgep)) 1932 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1933 #if defined(sun4v) 1934 else { /* isLDOMguest(nxgep) */ 1935 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1936 nxgep->nxge_hw_p->hio; 1937 nx_vio_fp_t *vio = &nhd->hio.vio; 1938 1939 if (vio->cb.vio_net_rx_cb) { 1940 (*vio->cb.vio_net_rx_cb) 1941 (nxgep->hio_vr->vhp, mp); 1942 } 1943 } 1944 #endif 1945 } 1946 1947 1948 /* 1949 * This routine is the main packet receive processing function. 1950 * It gets the packet type, error code, and buffer related 1951 * information from the receive completion entry. 1952 * How many completion entries to process is based on the number of packets 1953 * queued by the hardware, a hardware maintained tail pointer 1954 * and a configurable receive packet count. 1955 * 1956 * A chain of message blocks will be created as result of processing 1957 * the completion entries. This chain of message blocks will be returned and 1958 * a hardware control status register will be updated with the number of 1959 * packets were removed from the hardware queue. 1960 * 1961 */ 1962 static mblk_t * 1963 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1964 int bytes_to_pickup) 1965 { 1966 npi_handle_t handle; 1967 uint8_t channel; 1968 uint32_t comp_rd_index; 1969 p_rcr_entry_t rcr_desc_rd_head_p; 1970 p_rcr_entry_t rcr_desc_rd_head_pp; 1971 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1972 uint16_t qlen, nrcr_read, npkt_read; 1973 uint32_t qlen_hw; 1974 boolean_t multi; 1975 rcrcfig_b_t rcr_cfg_b; 1976 int totallen = 0; 1977 #if defined(_BIG_ENDIAN) 1978 npi_status_t rs = NPI_SUCCESS; 1979 #endif 1980 1981 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 1982 "channel %d", rcr_p->rdc)); 1983 1984 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1985 return (NULL); 1986 } 1987 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1988 channel = rcr_p->rdc; 1989 1990 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1991 "==> nxge_rx_pkts: START: rcr channel %d " 1992 "head_p $%p head_pp $%p index %d ", 1993 channel, rcr_p->rcr_desc_rd_head_p, 1994 rcr_p->rcr_desc_rd_head_pp, 1995 rcr_p->comp_rd_index)); 1996 1997 1998 #if !defined(_BIG_ENDIAN) 1999 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2000 #else 2001 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2002 if (rs != NPI_SUCCESS) { 2003 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2004 "channel %d, get qlen failed 0x%08x", 2005 channel, rs)); 2006 return (NULL); 2007 } 2008 #endif 2009 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2010 "qlen %d", channel, qlen)); 2011 2012 2013 2014 if (!qlen) { 2015 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2016 "==> nxge_rx_pkts:rcr channel %d " 2017 "qlen %d (no pkts)", channel, qlen)); 2018 2019 return (NULL); 2020 } 2021 2022 comp_rd_index = rcr_p->comp_rd_index; 2023 2024 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2025 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2026 nrcr_read = npkt_read = 0; 2027 2028 /* 2029 * Number of packets queued 2030 * (The jumbo or multi packet will be counted as only one 2031 * packets and it may take up more than one completion entry). 2032 */ 2033 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2034 qlen : nxge_max_rx_pkts; 2035 head_mp = NULL; 2036 tail_mp = &head_mp; 2037 nmp = mp_cont = NULL; 2038 multi = B_FALSE; 2039 2040 while (qlen_hw) { 2041 2042 #ifdef NXGE_DEBUG 2043 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2044 #endif 2045 /* 2046 * Process one completion ring entry. 2047 */ 2048 nxge_receive_packet(nxgep, 2049 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2050 2051 /* 2052 * message chaining modes 2053 */ 2054 if (nmp) { 2055 nmp->b_next = NULL; 2056 if (!multi && !mp_cont) { /* frame fits a partition */ 2057 *tail_mp = nmp; 2058 tail_mp = &nmp->b_next; 2059 totallen += MBLKL(nmp); 2060 nmp = NULL; 2061 } else if (multi && !mp_cont) { /* first segment */ 2062 *tail_mp = nmp; 2063 tail_mp = &nmp->b_cont; 2064 totallen += MBLKL(nmp); 2065 } else if (multi && mp_cont) { /* mid of multi segs */ 2066 *tail_mp = mp_cont; 2067 tail_mp = &mp_cont->b_cont; 2068 totallen += MBLKL(mp_cont); 2069 } else if (!multi && mp_cont) { /* last segment */ 2070 *tail_mp = mp_cont; 2071 tail_mp = &nmp->b_next; 2072 totallen += MBLKL(mp_cont); 2073 nmp = NULL; 2074 } 2075 } 2076 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2077 "==> nxge_rx_pkts: loop: rcr channel %d " 2078 "before updating: multi %d " 2079 "nrcr_read %d " 2080 "npk read %d " 2081 "head_pp $%p index %d ", 2082 channel, 2083 multi, 2084 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2085 comp_rd_index)); 2086 2087 if (!multi) { 2088 qlen_hw--; 2089 npkt_read++; 2090 } 2091 2092 /* 2093 * Update the next read entry. 2094 */ 2095 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2096 rcr_p->comp_wrap_mask); 2097 2098 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2099 rcr_p->rcr_desc_first_p, 2100 rcr_p->rcr_desc_last_p); 2101 2102 nrcr_read++; 2103 2104 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2105 "<== nxge_rx_pkts: (SAM, process one packet) " 2106 "nrcr_read %d", 2107 nrcr_read)); 2108 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2109 "==> nxge_rx_pkts: loop: rcr channel %d " 2110 "multi %d " 2111 "nrcr_read %d " 2112 "npk read %d " 2113 "head_pp $%p index %d ", 2114 channel, 2115 multi, 2116 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2117 comp_rd_index)); 2118 2119 if ((bytes_to_pickup != -1) && 2120 (totallen >= bytes_to_pickup)) { 2121 break; 2122 } 2123 } 2124 2125 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2126 rcr_p->comp_rd_index = comp_rd_index; 2127 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2128 2129 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2130 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2131 rcr_p->intr_timeout = nxgep->intr_timeout; 2132 rcr_p->intr_threshold = nxgep->intr_threshold; 2133 rcr_cfg_b.value = 0x0ULL; 2134 if (rcr_p->intr_timeout) 2135 rcr_cfg_b.bits.ldw.entout = 1; 2136 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2137 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2138 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2139 channel, rcr_cfg_b.value); 2140 } 2141 2142 cs.bits.ldw.pktread = npkt_read; 2143 cs.bits.ldw.ptrread = nrcr_read; 2144 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2145 channel, cs.value); 2146 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2147 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2148 "head_pp $%p index %016llx ", 2149 channel, 2150 rcr_p->rcr_desc_rd_head_pp, 2151 rcr_p->comp_rd_index)); 2152 /* 2153 * Update RCR buffer pointer read and number of packets 2154 * read. 2155 */ 2156 2157 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2158 return (head_mp); 2159 } 2160 2161 void 2162 nxge_receive_packet(p_nxge_t nxgep, 2163 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2164 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2165 { 2166 p_mblk_t nmp = NULL; 2167 uint64_t multi; 2168 uint64_t dcf_err; 2169 uint8_t channel; 2170 2171 boolean_t first_entry = B_TRUE; 2172 boolean_t is_tcp_udp = B_FALSE; 2173 boolean_t buffer_free = B_FALSE; 2174 boolean_t error_send_up = B_FALSE; 2175 uint8_t error_type; 2176 uint16_t l2_len; 2177 uint16_t skip_len; 2178 uint8_t pktbufsz_type; 2179 uint64_t rcr_entry; 2180 uint64_t *pkt_buf_addr_pp; 2181 uint64_t *pkt_buf_addr_p; 2182 uint32_t buf_offset; 2183 uint32_t bsize; 2184 uint32_t error_disp_cnt; 2185 uint32_t msg_index; 2186 p_rx_rbr_ring_t rx_rbr_p; 2187 p_rx_msg_t *rx_msg_ring_p; 2188 p_rx_msg_t rx_msg_p; 2189 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2190 nxge_status_t status = NXGE_OK; 2191 boolean_t is_valid = B_FALSE; 2192 p_nxge_rx_ring_stats_t rdc_stats; 2193 uint32_t bytes_read; 2194 uint64_t pkt_type; 2195 uint64_t frag; 2196 boolean_t pkt_too_long_err = B_FALSE; 2197 #ifdef NXGE_DEBUG 2198 int dump_len; 2199 #endif 2200 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2201 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2202 2203 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2204 2205 multi = (rcr_entry & RCR_MULTI_MASK); 2206 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2207 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2208 2209 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2210 frag = (rcr_entry & RCR_FRAG_MASK); 2211 2212 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2213 2214 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2215 RCR_PKTBUFSZ_SHIFT); 2216 #if defined(__i386) 2217 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2218 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2219 #else 2220 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2221 RCR_PKT_BUF_ADDR_SHIFT); 2222 #endif 2223 2224 channel = rcr_p->rdc; 2225 2226 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2227 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2228 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2229 "error_type 0x%x pkt_type 0x%x " 2230 "pktbufsz_type %d ", 2231 rcr_desc_rd_head_p, 2232 rcr_entry, pkt_buf_addr_pp, l2_len, 2233 multi, 2234 error_type, 2235 pkt_type, 2236 pktbufsz_type)); 2237 2238 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2239 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2240 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2241 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2242 rcr_entry, pkt_buf_addr_pp, l2_len, 2243 multi, 2244 error_type, 2245 pkt_type)); 2246 2247 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2248 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2249 "full pkt_buf_addr_pp $%p l2_len %d", 2250 rcr_entry, pkt_buf_addr_pp, l2_len)); 2251 2252 /* get the stats ptr */ 2253 rdc_stats = rcr_p->rdc_stats; 2254 2255 if (!l2_len) { 2256 2257 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2258 "<== nxge_receive_packet: failed: l2 length is 0.")); 2259 return; 2260 } 2261 2262 /* 2263 * Sofware workaround for BMAC hardware limitation that allows 2264 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2265 * instead of 0x2400 for jumbo. 2266 */ 2267 if (l2_len > nxgep->mac.maxframesize) { 2268 pkt_too_long_err = B_TRUE; 2269 } 2270 2271 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2272 l2_len -= ETHERFCSL; 2273 2274 /* shift 6 bits to get the full io address */ 2275 #if defined(__i386) 2276 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2277 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2278 #else 2279 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2280 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2281 #endif 2282 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2283 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2284 "full pkt_buf_addr_pp $%p l2_len %d", 2285 rcr_entry, pkt_buf_addr_pp, l2_len)); 2286 2287 rx_rbr_p = rcr_p->rx_rbr_p; 2288 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2289 2290 if (first_entry) { 2291 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2292 RXDMA_HDR_SIZE_DEFAULT); 2293 2294 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2295 "==> nxge_receive_packet: first entry 0x%016llx " 2296 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2297 rcr_entry, pkt_buf_addr_pp, l2_len, 2298 hdr_size)); 2299 } 2300 2301 MUTEX_ENTER(&rcr_p->lock); 2302 MUTEX_ENTER(&rx_rbr_p->lock); 2303 2304 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2305 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2306 "full pkt_buf_addr_pp $%p l2_len %d", 2307 rcr_entry, pkt_buf_addr_pp, l2_len)); 2308 2309 /* 2310 * Packet buffer address in the completion entry points 2311 * to the starting buffer address (offset 0). 2312 * Use the starting buffer address to locate the corresponding 2313 * kernel address. 2314 */ 2315 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2316 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2317 &buf_offset, 2318 &msg_index); 2319 2320 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2321 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2322 "full pkt_buf_addr_pp $%p l2_len %d", 2323 rcr_entry, pkt_buf_addr_pp, l2_len)); 2324 2325 if (status != NXGE_OK) { 2326 MUTEX_EXIT(&rx_rbr_p->lock); 2327 MUTEX_EXIT(&rcr_p->lock); 2328 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2329 "<== nxge_receive_packet: found vaddr failed %d", 2330 status)); 2331 return; 2332 } 2333 2334 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2335 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2336 "full pkt_buf_addr_pp $%p l2_len %d", 2337 rcr_entry, pkt_buf_addr_pp, l2_len)); 2338 2339 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2340 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2341 "full pkt_buf_addr_pp $%p l2_len %d", 2342 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2343 2344 rx_msg_p = rx_msg_ring_p[msg_index]; 2345 2346 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2347 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2348 "full pkt_buf_addr_pp $%p l2_len %d", 2349 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2350 2351 switch (pktbufsz_type) { 2352 case RCR_PKTBUFSZ_0: 2353 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2354 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2355 "==> nxge_receive_packet: 0 buf %d", bsize)); 2356 break; 2357 case RCR_PKTBUFSZ_1: 2358 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2359 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2360 "==> nxge_receive_packet: 1 buf %d", bsize)); 2361 break; 2362 case RCR_PKTBUFSZ_2: 2363 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2364 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2365 "==> nxge_receive_packet: 2 buf %d", bsize)); 2366 break; 2367 case RCR_SINGLE_BLOCK: 2368 bsize = rx_msg_p->block_size; 2369 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2370 "==> nxge_receive_packet: single %d", bsize)); 2371 2372 break; 2373 default: 2374 MUTEX_EXIT(&rx_rbr_p->lock); 2375 MUTEX_EXIT(&rcr_p->lock); 2376 return; 2377 } 2378 2379 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2380 (buf_offset + sw_offset_bytes), 2381 (hdr_size + l2_len), 2382 DDI_DMA_SYNC_FORCPU); 2383 2384 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2385 "==> nxge_receive_packet: after first dump:usage count")); 2386 2387 if (rx_msg_p->cur_usage_cnt == 0) { 2388 if (rx_rbr_p->rbr_use_bcopy) { 2389 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2390 if (rx_rbr_p->rbr_consumed < 2391 rx_rbr_p->rbr_threshold_hi) { 2392 if (rx_rbr_p->rbr_threshold_lo == 0 || 2393 ((rx_rbr_p->rbr_consumed >= 2394 rx_rbr_p->rbr_threshold_lo) && 2395 (rx_rbr_p->rbr_bufsize_type >= 2396 pktbufsz_type))) { 2397 rx_msg_p->rx_use_bcopy = B_TRUE; 2398 } 2399 } else { 2400 rx_msg_p->rx_use_bcopy = B_TRUE; 2401 } 2402 } 2403 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2404 "==> nxge_receive_packet: buf %d (new block) ", 2405 bsize)); 2406 2407 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2408 rx_msg_p->pkt_buf_size = bsize; 2409 rx_msg_p->cur_usage_cnt = 1; 2410 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2411 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2412 "==> nxge_receive_packet: buf %d " 2413 "(single block) ", 2414 bsize)); 2415 /* 2416 * Buffer can be reused once the free function 2417 * is called. 2418 */ 2419 rx_msg_p->max_usage_cnt = 1; 2420 buffer_free = B_TRUE; 2421 } else { 2422 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2423 if (rx_msg_p->max_usage_cnt == 1) { 2424 buffer_free = B_TRUE; 2425 } 2426 } 2427 } else { 2428 rx_msg_p->cur_usage_cnt++; 2429 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2430 buffer_free = B_TRUE; 2431 } 2432 } 2433 2434 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2435 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2436 msg_index, l2_len, 2437 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2438 2439 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2440 rdc_stats->ierrors++; 2441 if (dcf_err) { 2442 rdc_stats->dcf_err++; 2443 #ifdef NXGE_DEBUG 2444 if (!rdc_stats->dcf_err) { 2445 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2446 "nxge_receive_packet: channel %d dcf_err rcr" 2447 " 0x%llx", channel, rcr_entry)); 2448 } 2449 #endif 2450 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2451 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2452 } else if (pkt_too_long_err) { 2453 rdc_stats->pkt_too_long_err++; 2454 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2455 " channel %d packet length [%d] > " 2456 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2457 nxgep->mac.maxframesize)); 2458 } else { 2459 /* Update error stats */ 2460 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2461 rdc_stats->errlog.compl_err_type = error_type; 2462 2463 switch (error_type) { 2464 /* 2465 * Do not send FMA ereport for RCR_L2_ERROR and 2466 * RCR_L4_CSUM_ERROR because most likely they indicate 2467 * back pressure rather than HW failures. 2468 */ 2469 case RCR_L2_ERROR: 2470 rdc_stats->l2_err++; 2471 if (rdc_stats->l2_err < 2472 error_disp_cnt) { 2473 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2474 " nxge_receive_packet:" 2475 " channel %d RCR L2_ERROR", 2476 channel)); 2477 } 2478 break; 2479 case RCR_L4_CSUM_ERROR: 2480 error_send_up = B_TRUE; 2481 rdc_stats->l4_cksum_err++; 2482 if (rdc_stats->l4_cksum_err < 2483 error_disp_cnt) { 2484 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2485 " nxge_receive_packet:" 2486 " channel %d" 2487 " RCR L4_CSUM_ERROR", channel)); 2488 } 2489 break; 2490 /* 2491 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2492 * RCR_ZCP_SOFT_ERROR because they reflect the same 2493 * FFLP and ZCP errors that have been reported by 2494 * nxge_fflp.c and nxge_zcp.c. 2495 */ 2496 case RCR_FFLP_SOFT_ERROR: 2497 error_send_up = B_TRUE; 2498 rdc_stats->fflp_soft_err++; 2499 if (rdc_stats->fflp_soft_err < 2500 error_disp_cnt) { 2501 NXGE_ERROR_MSG((nxgep, 2502 NXGE_ERR_CTL, 2503 " nxge_receive_packet:" 2504 " channel %d" 2505 " RCR FFLP_SOFT_ERROR", channel)); 2506 } 2507 break; 2508 case RCR_ZCP_SOFT_ERROR: 2509 error_send_up = B_TRUE; 2510 rdc_stats->fflp_soft_err++; 2511 if (rdc_stats->zcp_soft_err < 2512 error_disp_cnt) 2513 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2514 " nxge_receive_packet: Channel %d" 2515 " RCR ZCP_SOFT_ERROR", channel)); 2516 break; 2517 default: 2518 rdc_stats->rcr_unknown_err++; 2519 if (rdc_stats->rcr_unknown_err 2520 < error_disp_cnt) { 2521 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2522 " nxge_receive_packet: Channel %d" 2523 " RCR entry 0x%llx error 0x%x", 2524 rcr_entry, channel, error_type)); 2525 } 2526 break; 2527 } 2528 } 2529 2530 /* 2531 * Update and repost buffer block if max usage 2532 * count is reached. 2533 */ 2534 if (error_send_up == B_FALSE) { 2535 atomic_inc_32(&rx_msg_p->ref_cnt); 2536 if (buffer_free == B_TRUE) { 2537 rx_msg_p->free = B_TRUE; 2538 } 2539 2540 MUTEX_EXIT(&rx_rbr_p->lock); 2541 MUTEX_EXIT(&rcr_p->lock); 2542 nxge_freeb(rx_msg_p); 2543 return; 2544 } 2545 } 2546 2547 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2548 "==> nxge_receive_packet: DMA sync second ")); 2549 2550 bytes_read = rcr_p->rcvd_pkt_bytes; 2551 skip_len = sw_offset_bytes + hdr_size; 2552 if (!rx_msg_p->rx_use_bcopy) { 2553 /* 2554 * For loaned up buffers, the driver reference count 2555 * will be incremented first and then the free state. 2556 */ 2557 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2558 if (first_entry) { 2559 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2560 if (l2_len < bsize - skip_len) { 2561 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2562 } else { 2563 nmp->b_wptr = &nmp->b_rptr[bsize 2564 - skip_len]; 2565 } 2566 } else { 2567 if (l2_len - bytes_read < bsize) { 2568 nmp->b_wptr = 2569 &nmp->b_rptr[l2_len - bytes_read]; 2570 } else { 2571 nmp->b_wptr = &nmp->b_rptr[bsize]; 2572 } 2573 } 2574 } 2575 } else { 2576 if (first_entry) { 2577 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2578 l2_len < bsize - skip_len ? 2579 l2_len : bsize - skip_len); 2580 } else { 2581 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2582 l2_len - bytes_read < bsize ? 2583 l2_len - bytes_read : bsize); 2584 } 2585 } 2586 if (nmp != NULL) { 2587 if (first_entry) { 2588 /* 2589 * Jumbo packets may be received with more than one 2590 * buffer, increment ipackets for the first entry only. 2591 */ 2592 rdc_stats->ipackets++; 2593 2594 /* Update ibytes for kstat. */ 2595 rdc_stats->ibytes += skip_len 2596 + l2_len < bsize ? l2_len : bsize; 2597 /* 2598 * Update the number of bytes read so far for the 2599 * current frame. 2600 */ 2601 bytes_read = nmp->b_wptr - nmp->b_rptr; 2602 } else { 2603 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2604 l2_len - bytes_read : bsize; 2605 bytes_read += nmp->b_wptr - nmp->b_rptr; 2606 } 2607 2608 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2609 "==> nxge_receive_packet after dupb: " 2610 "rbr consumed %d " 2611 "pktbufsz_type %d " 2612 "nmp $%p rptr $%p wptr $%p " 2613 "buf_offset %d bzise %d l2_len %d skip_len %d", 2614 rx_rbr_p->rbr_consumed, 2615 pktbufsz_type, 2616 nmp, nmp->b_rptr, nmp->b_wptr, 2617 buf_offset, bsize, l2_len, skip_len)); 2618 } else { 2619 cmn_err(CE_WARN, "!nxge_receive_packet: " 2620 "update stats (error)"); 2621 atomic_inc_32(&rx_msg_p->ref_cnt); 2622 if (buffer_free == B_TRUE) { 2623 rx_msg_p->free = B_TRUE; 2624 } 2625 MUTEX_EXIT(&rx_rbr_p->lock); 2626 MUTEX_EXIT(&rcr_p->lock); 2627 nxge_freeb(rx_msg_p); 2628 return; 2629 } 2630 2631 if (buffer_free == B_TRUE) { 2632 rx_msg_p->free = B_TRUE; 2633 } 2634 2635 is_valid = (nmp != NULL); 2636 2637 rcr_p->rcvd_pkt_bytes = bytes_read; 2638 2639 MUTEX_EXIT(&rx_rbr_p->lock); 2640 MUTEX_EXIT(&rcr_p->lock); 2641 2642 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2643 atomic_inc_32(&rx_msg_p->ref_cnt); 2644 nxge_freeb(rx_msg_p); 2645 } 2646 2647 if (is_valid) { 2648 nmp->b_cont = NULL; 2649 if (first_entry) { 2650 *mp = nmp; 2651 *mp_cont = NULL; 2652 } else { 2653 *mp_cont = nmp; 2654 } 2655 } 2656 2657 /* 2658 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2659 * If a packet is not fragmented and no error bit is set, then 2660 * L4 checksum is OK. 2661 */ 2662 2663 if (is_valid && !multi) { 2664 /* 2665 * Update hardware checksuming. 2666 * 2667 * If the checksum flag nxge_chksum_offload 2668 * is 1, TCP and UDP packets can be sent 2669 * up with good checksum. If the checksum flag 2670 * is set to 0, checksum reporting will apply to 2671 * TCP packets only (workaround for a hardware bug). 2672 * If the checksum flag nxge_cksum_offload is 2673 * greater than 1, both TCP and UDP packets 2674 * will not be reported its hardware checksum results. 2675 */ 2676 if (nxge_cksum_offload == 1) { 2677 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2678 pkt_type == RCR_PKT_IS_UDP) ? 2679 B_TRUE: B_FALSE); 2680 } else if (!nxge_cksum_offload) { 2681 /* TCP checksum only. */ 2682 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2683 B_TRUE: B_FALSE); 2684 } 2685 2686 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2687 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2688 is_valid, multi, is_tcp_udp, frag, error_type)); 2689 2690 if (is_tcp_udp && !frag && !error_type) { 2691 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2692 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2693 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2694 "==> nxge_receive_packet: Full tcp/udp cksum " 2695 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2696 "error %d", 2697 is_valid, multi, is_tcp_udp, frag, error_type)); 2698 } 2699 } 2700 2701 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2702 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2703 2704 *multi_p = (multi == RCR_MULTI_MASK); 2705 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2706 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2707 *multi_p, nmp, *mp, *mp_cont)); 2708 } 2709 2710 /*ARGSUSED*/ 2711 static nxge_status_t 2712 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2713 { 2714 p_nxge_rx_ring_stats_t rdc_stats; 2715 npi_handle_t handle; 2716 npi_status_t rs; 2717 boolean_t rxchan_fatal = B_FALSE; 2718 boolean_t rxport_fatal = B_FALSE; 2719 uint8_t portn; 2720 nxge_status_t status = NXGE_OK; 2721 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2722 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2723 2724 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2725 portn = nxgep->mac.portnum; 2726 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2727 2728 if (cs.bits.hdw.rbr_tmout) { 2729 rdc_stats->rx_rbr_tmout++; 2730 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2731 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2732 rxchan_fatal = B_TRUE; 2733 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2734 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2735 } 2736 if (cs.bits.hdw.rsp_cnt_err) { 2737 rdc_stats->rsp_cnt_err++; 2738 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2739 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2740 rxchan_fatal = B_TRUE; 2741 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2742 "==> nxge_rx_err_evnts(channel %d): " 2743 "rsp_cnt_err", channel)); 2744 } 2745 if (cs.bits.hdw.byte_en_bus) { 2746 rdc_stats->byte_en_bus++; 2747 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2748 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2749 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2750 "==> nxge_rx_err_evnts(channel %d): " 2751 "fatal error: byte_en_bus", channel)); 2752 rxchan_fatal = B_TRUE; 2753 } 2754 if (cs.bits.hdw.rsp_dat_err) { 2755 rdc_stats->rsp_dat_err++; 2756 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2757 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2758 rxchan_fatal = B_TRUE; 2759 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2760 "==> nxge_rx_err_evnts(channel %d): " 2761 "fatal error: rsp_dat_err", channel)); 2762 } 2763 if (cs.bits.hdw.rcr_ack_err) { 2764 rdc_stats->rcr_ack_err++; 2765 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2766 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2767 rxchan_fatal = B_TRUE; 2768 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2769 "==> nxge_rx_err_evnts(channel %d): " 2770 "fatal error: rcr_ack_err", channel)); 2771 } 2772 if (cs.bits.hdw.dc_fifo_err) { 2773 rdc_stats->dc_fifo_err++; 2774 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2775 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2776 /* This is not a fatal error! */ 2777 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2778 "==> nxge_rx_err_evnts(channel %d): " 2779 "dc_fifo_err", channel)); 2780 rxport_fatal = B_TRUE; 2781 } 2782 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2783 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2784 &rdc_stats->errlog.pre_par, 2785 &rdc_stats->errlog.sha_par)) 2786 != NPI_SUCCESS) { 2787 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2788 "==> nxge_rx_err_evnts(channel %d): " 2789 "rcr_sha_par: get perr", channel)); 2790 return (NXGE_ERROR | rs); 2791 } 2792 if (cs.bits.hdw.rcr_sha_par) { 2793 rdc_stats->rcr_sha_par++; 2794 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2795 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2796 rxchan_fatal = B_TRUE; 2797 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2798 "==> nxge_rx_err_evnts(channel %d): " 2799 "fatal error: rcr_sha_par", channel)); 2800 } 2801 if (cs.bits.hdw.rbr_pre_par) { 2802 rdc_stats->rbr_pre_par++; 2803 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2804 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2805 rxchan_fatal = B_TRUE; 2806 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2807 "==> nxge_rx_err_evnts(channel %d): " 2808 "fatal error: rbr_pre_par", channel)); 2809 } 2810 } 2811 /* 2812 * The Following 4 status bits are for information, the system 2813 * is running fine. There is no need to send FMA ereports or 2814 * log messages. 2815 */ 2816 if (cs.bits.hdw.port_drop_pkt) { 2817 rdc_stats->port_drop_pkt++; 2818 } 2819 if (cs.bits.hdw.wred_drop) { 2820 rdc_stats->wred_drop++; 2821 } 2822 if (cs.bits.hdw.rbr_pre_empty) { 2823 rdc_stats->rbr_pre_empty++; 2824 } 2825 if (cs.bits.hdw.rcr_shadow_full) { 2826 rdc_stats->rcr_shadow_full++; 2827 } 2828 if (cs.bits.hdw.config_err) { 2829 rdc_stats->config_err++; 2830 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2831 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2832 rxchan_fatal = B_TRUE; 2833 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2834 "==> nxge_rx_err_evnts(channel %d): " 2835 "config error", channel)); 2836 } 2837 if (cs.bits.hdw.rcrincon) { 2838 rdc_stats->rcrincon++; 2839 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2840 NXGE_FM_EREPORT_RDMC_RCRINCON); 2841 rxchan_fatal = B_TRUE; 2842 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2843 "==> nxge_rx_err_evnts(channel %d): " 2844 "fatal error: rcrincon error", channel)); 2845 } 2846 if (cs.bits.hdw.rcrfull) { 2847 rdc_stats->rcrfull++; 2848 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2849 NXGE_FM_EREPORT_RDMC_RCRFULL); 2850 rxchan_fatal = B_TRUE; 2851 if (rdc_stats->rcrfull < error_disp_cnt) 2852 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2853 "==> nxge_rx_err_evnts(channel %d): " 2854 "fatal error: rcrfull error", channel)); 2855 } 2856 if (cs.bits.hdw.rbr_empty) { 2857 /* 2858 * This bit is for information, there is no need 2859 * send FMA ereport or log a message. 2860 */ 2861 rdc_stats->rbr_empty++; 2862 } 2863 if (cs.bits.hdw.rbrfull) { 2864 rdc_stats->rbrfull++; 2865 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2866 NXGE_FM_EREPORT_RDMC_RBRFULL); 2867 rxchan_fatal = B_TRUE; 2868 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2869 "==> nxge_rx_err_evnts(channel %d): " 2870 "fatal error: rbr_full error", channel)); 2871 } 2872 if (cs.bits.hdw.rbrlogpage) { 2873 rdc_stats->rbrlogpage++; 2874 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2875 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2876 rxchan_fatal = B_TRUE; 2877 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2878 "==> nxge_rx_err_evnts(channel %d): " 2879 "fatal error: rbr logical page error", channel)); 2880 } 2881 if (cs.bits.hdw.cfiglogpage) { 2882 rdc_stats->cfiglogpage++; 2883 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2884 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2885 rxchan_fatal = B_TRUE; 2886 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2887 "==> nxge_rx_err_evnts(channel %d): " 2888 "fatal error: cfig logical page error", channel)); 2889 } 2890 2891 if (rxport_fatal) { 2892 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2893 " nxge_rx_err_evnts: fatal error on Port #%d\n", 2894 portn)); 2895 if (isLDOMguest(nxgep)) { 2896 status = NXGE_ERROR; 2897 } else { 2898 status = nxge_ipp_fatal_err_recover(nxgep); 2899 if (status == NXGE_OK) { 2900 FM_SERVICE_RESTORED(nxgep); 2901 } 2902 } 2903 } 2904 2905 if (rxchan_fatal) { 2906 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2907 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 2908 channel)); 2909 if (isLDOMguest(nxgep)) { 2910 status = NXGE_ERROR; 2911 } else { 2912 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2913 if (status == NXGE_OK) { 2914 FM_SERVICE_RESTORED(nxgep); 2915 } 2916 } 2917 } 2918 2919 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2920 2921 return (status); 2922 } 2923 2924 /* 2925 * nxge_rdc_hvio_setup 2926 * 2927 * This code appears to setup some Hypervisor variables. 2928 * 2929 * Arguments: 2930 * nxgep 2931 * channel 2932 * 2933 * Notes: 2934 * What does NIU_LP_WORKAROUND mean? 2935 * 2936 * NPI/NXGE function calls: 2937 * na 2938 * 2939 * Context: 2940 * Any domain 2941 */ 2942 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2943 static void 2944 nxge_rdc_hvio_setup( 2945 nxge_t *nxgep, int channel) 2946 { 2947 nxge_dma_common_t *dma_common; 2948 nxge_dma_common_t *dma_control; 2949 rx_rbr_ring_t *ring; 2950 2951 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 2952 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2953 2954 ring->hv_set = B_FALSE; 2955 2956 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 2957 dma_common->orig_ioaddr_pp; 2958 ring->hv_rx_buf_ioaddr_size = (uint64_t) 2959 dma_common->orig_alength; 2960 2961 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2962 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 2963 channel, ring->hv_rx_buf_base_ioaddr_pp, 2964 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 2965 dma_common->orig_alength, dma_common->orig_alength)); 2966 2967 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2968 2969 ring->hv_rx_cntl_base_ioaddr_pp = 2970 (uint64_t)dma_control->orig_ioaddr_pp; 2971 ring->hv_rx_cntl_ioaddr_size = 2972 (uint64_t)dma_control->orig_alength; 2973 2974 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2975 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 2976 channel, ring->hv_rx_cntl_base_ioaddr_pp, 2977 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 2978 dma_control->orig_alength, dma_control->orig_alength)); 2979 } 2980 #endif 2981 2982 /* 2983 * nxge_map_rxdma 2984 * 2985 * Map an RDC into our kernel space. 2986 * 2987 * Arguments: 2988 * nxgep 2989 * channel The channel to map. 2990 * 2991 * Notes: 2992 * 1. Allocate & initialise a memory pool, if necessary. 2993 * 2. Allocate however many receive buffers are required. 2994 * 3. Setup buffers, descriptors, and mailbox. 2995 * 2996 * NPI/NXGE function calls: 2997 * nxge_alloc_rx_mem_pool() 2998 * nxge_alloc_rbb() 2999 * nxge_map_rxdma_channel() 3000 * 3001 * Registers accessed: 3002 * 3003 * Context: 3004 * Any domain 3005 */ 3006 static nxge_status_t 3007 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3008 { 3009 nxge_dma_common_t **data; 3010 nxge_dma_common_t **control; 3011 rx_rbr_ring_t **rbr_ring; 3012 rx_rcr_ring_t **rcr_ring; 3013 rx_mbox_t **mailbox; 3014 uint32_t chunks; 3015 3016 nxge_status_t status; 3017 3018 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3019 3020 if (!nxgep->rx_buf_pool_p) { 3021 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3022 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3023 "<== nxge_map_rxdma: buf not allocated")); 3024 return (NXGE_ERROR); 3025 } 3026 } 3027 3028 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3029 return (NXGE_ERROR); 3030 3031 /* 3032 * Timeout should be set based on the system clock divider. 3033 * The following timeout value of 1 assumes that the 3034 * granularity (1000) is 3 microseconds running at 300MHz. 3035 */ 3036 3037 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 3038 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 3039 3040 /* 3041 * Map descriptors from the buffer polls for each dma channel. 3042 */ 3043 3044 /* 3045 * Set up and prepare buffer blocks, descriptors 3046 * and mailbox. 3047 */ 3048 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3049 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3050 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3051 3052 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3053 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3054 3055 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3056 3057 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3058 chunks, control, rcr_ring, mailbox); 3059 if (status != NXGE_OK) { 3060 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3061 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3062 "returned 0x%x", 3063 channel, status)); 3064 return (status); 3065 } 3066 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3067 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3068 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3069 &nxgep->statsp->rdc_stats[channel]; 3070 3071 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3072 if (!isLDOMguest(nxgep)) 3073 nxge_rdc_hvio_setup(nxgep, channel); 3074 #endif 3075 3076 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3077 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3078 3079 return (status); 3080 } 3081 3082 static void 3083 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3084 { 3085 rx_rbr_ring_t *rbr_ring; 3086 rx_rcr_ring_t *rcr_ring; 3087 rx_mbox_t *mailbox; 3088 3089 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3090 3091 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3092 !nxgep->rx_mbox_areas_p) 3093 return; 3094 3095 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3096 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3097 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3098 3099 if (!rbr_ring || !rcr_ring || !mailbox) 3100 return; 3101 3102 (void) nxge_unmap_rxdma_channel( 3103 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3104 3105 nxge_free_rxb(nxgep, channel); 3106 3107 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3108 } 3109 3110 nxge_status_t 3111 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3112 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3113 uint32_t num_chunks, 3114 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3115 p_rx_mbox_t *rx_mbox_p) 3116 { 3117 int status = NXGE_OK; 3118 3119 /* 3120 * Set up and prepare buffer blocks, descriptors 3121 * and mailbox. 3122 */ 3123 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3124 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3125 /* 3126 * Receive buffer blocks 3127 */ 3128 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3129 dma_buf_p, rbr_p, num_chunks); 3130 if (status != NXGE_OK) { 3131 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3132 "==> nxge_map_rxdma_channel (channel %d): " 3133 "map buffer failed 0x%x", channel, status)); 3134 goto nxge_map_rxdma_channel_exit; 3135 } 3136 3137 /* 3138 * Receive block ring, completion ring and mailbox. 3139 */ 3140 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3141 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3142 if (status != NXGE_OK) { 3143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3144 "==> nxge_map_rxdma_channel (channel %d): " 3145 "map config failed 0x%x", channel, status)); 3146 goto nxge_map_rxdma_channel_fail2; 3147 } 3148 3149 goto nxge_map_rxdma_channel_exit; 3150 3151 nxge_map_rxdma_channel_fail3: 3152 /* Free rbr, rcr */ 3153 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3154 "==> nxge_map_rxdma_channel: free rbr/rcr " 3155 "(status 0x%x channel %d)", 3156 status, channel)); 3157 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3158 *rcr_p, *rx_mbox_p); 3159 3160 nxge_map_rxdma_channel_fail2: 3161 /* Free buffer blocks */ 3162 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3163 "==> nxge_map_rxdma_channel: free rx buffers" 3164 "(nxgep 0x%x status 0x%x channel %d)", 3165 nxgep, status, channel)); 3166 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3167 3168 status = NXGE_ERROR; 3169 3170 nxge_map_rxdma_channel_exit: 3171 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3172 "<== nxge_map_rxdma_channel: " 3173 "(nxgep 0x%x status 0x%x channel %d)", 3174 nxgep, status, channel)); 3175 3176 return (status); 3177 } 3178 3179 /*ARGSUSED*/ 3180 static void 3181 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3182 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3183 { 3184 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3185 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3186 3187 /* 3188 * unmap receive block ring, completion ring and mailbox. 3189 */ 3190 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3191 rcr_p, rx_mbox_p); 3192 3193 /* unmap buffer blocks */ 3194 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3195 3196 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3197 } 3198 3199 /*ARGSUSED*/ 3200 static nxge_status_t 3201 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3202 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3203 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3204 { 3205 p_rx_rbr_ring_t rbrp; 3206 p_rx_rcr_ring_t rcrp; 3207 p_rx_mbox_t mboxp; 3208 p_nxge_dma_common_t cntl_dmap; 3209 p_nxge_dma_common_t dmap; 3210 p_rx_msg_t *rx_msg_ring; 3211 p_rx_msg_t rx_msg_p; 3212 p_rbr_cfig_a_t rcfga_p; 3213 p_rbr_cfig_b_t rcfgb_p; 3214 p_rcrcfig_a_t cfga_p; 3215 p_rcrcfig_b_t cfgb_p; 3216 p_rxdma_cfig1_t cfig1_p; 3217 p_rxdma_cfig2_t cfig2_p; 3218 p_rbr_kick_t kick_p; 3219 uint32_t dmaaddrp; 3220 uint32_t *rbr_vaddrp; 3221 uint32_t bkaddr; 3222 nxge_status_t status = NXGE_OK; 3223 int i; 3224 uint32_t nxge_port_rcr_size; 3225 3226 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3227 "==> nxge_map_rxdma_channel_cfg_ring")); 3228 3229 cntl_dmap = *dma_cntl_p; 3230 3231 /* Map in the receive block ring */ 3232 rbrp = *rbr_p; 3233 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3234 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3235 /* 3236 * Zero out buffer block ring descriptors. 3237 */ 3238 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3239 3240 rcfga_p = &(rbrp->rbr_cfga); 3241 rcfgb_p = &(rbrp->rbr_cfgb); 3242 kick_p = &(rbrp->rbr_kick); 3243 rcfga_p->value = 0; 3244 rcfgb_p->value = 0; 3245 kick_p->value = 0; 3246 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3247 rcfga_p->value = (rbrp->rbr_addr & 3248 (RBR_CFIG_A_STDADDR_MASK | 3249 RBR_CFIG_A_STDADDR_BASE_MASK)); 3250 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3251 3252 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3253 rcfgb_p->bits.ldw.vld0 = 1; 3254 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3255 rcfgb_p->bits.ldw.vld1 = 1; 3256 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3257 rcfgb_p->bits.ldw.vld2 = 1; 3258 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3259 3260 /* 3261 * For each buffer block, enter receive block address to the ring. 3262 */ 3263 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3264 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3265 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3266 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3267 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3268 3269 rx_msg_ring = rbrp->rx_msg_ring; 3270 for (i = 0; i < rbrp->tnblocks; i++) { 3271 rx_msg_p = rx_msg_ring[i]; 3272 rx_msg_p->nxgep = nxgep; 3273 rx_msg_p->rx_rbr_p = rbrp; 3274 bkaddr = (uint32_t) 3275 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3276 >> RBR_BKADDR_SHIFT)); 3277 rx_msg_p->free = B_FALSE; 3278 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3279 3280 *rbr_vaddrp++ = bkaddr; 3281 } 3282 3283 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3284 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3285 3286 rbrp->rbr_rd_index = 0; 3287 3288 rbrp->rbr_consumed = 0; 3289 rbrp->rbr_use_bcopy = B_TRUE; 3290 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3291 /* 3292 * Do bcopy on packets greater than bcopy size once 3293 * the lo threshold is reached. 3294 * This lo threshold should be less than the hi threshold. 3295 * 3296 * Do bcopy on every packet once the hi threshold is reached. 3297 */ 3298 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3299 /* default it to use hi */ 3300 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3301 } 3302 3303 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3304 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3305 } 3306 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3307 3308 switch (nxge_rx_threshold_hi) { 3309 default: 3310 case NXGE_RX_COPY_NONE: 3311 /* Do not do bcopy at all */ 3312 rbrp->rbr_use_bcopy = B_FALSE; 3313 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3314 break; 3315 3316 case NXGE_RX_COPY_1: 3317 case NXGE_RX_COPY_2: 3318 case NXGE_RX_COPY_3: 3319 case NXGE_RX_COPY_4: 3320 case NXGE_RX_COPY_5: 3321 case NXGE_RX_COPY_6: 3322 case NXGE_RX_COPY_7: 3323 rbrp->rbr_threshold_hi = 3324 rbrp->rbb_max * 3325 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3326 break; 3327 3328 case NXGE_RX_COPY_ALL: 3329 rbrp->rbr_threshold_hi = 0; 3330 break; 3331 } 3332 3333 switch (nxge_rx_threshold_lo) { 3334 default: 3335 case NXGE_RX_COPY_NONE: 3336 /* Do not do bcopy at all */ 3337 if (rbrp->rbr_use_bcopy) { 3338 rbrp->rbr_use_bcopy = B_FALSE; 3339 } 3340 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3341 break; 3342 3343 case NXGE_RX_COPY_1: 3344 case NXGE_RX_COPY_2: 3345 case NXGE_RX_COPY_3: 3346 case NXGE_RX_COPY_4: 3347 case NXGE_RX_COPY_5: 3348 case NXGE_RX_COPY_6: 3349 case NXGE_RX_COPY_7: 3350 rbrp->rbr_threshold_lo = 3351 rbrp->rbb_max * 3352 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3353 break; 3354 3355 case NXGE_RX_COPY_ALL: 3356 rbrp->rbr_threshold_lo = 0; 3357 break; 3358 } 3359 3360 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3361 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3362 "rbb_max %d " 3363 "rbrp->rbr_bufsize_type %d " 3364 "rbb_threshold_hi %d " 3365 "rbb_threshold_lo %d", 3366 dma_channel, 3367 rbrp->rbb_max, 3368 rbrp->rbr_bufsize_type, 3369 rbrp->rbr_threshold_hi, 3370 rbrp->rbr_threshold_lo)); 3371 3372 rbrp->page_valid.value = 0; 3373 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3374 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3375 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3376 rbrp->page_hdl.value = 0; 3377 3378 rbrp->page_valid.bits.ldw.page0 = 1; 3379 rbrp->page_valid.bits.ldw.page1 = 1; 3380 3381 /* Map in the receive completion ring */ 3382 rcrp = (p_rx_rcr_ring_t) 3383 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3384 rcrp->rdc = dma_channel; 3385 3386 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3387 rcrp->comp_size = nxge_port_rcr_size; 3388 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3389 3390 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3391 3392 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3393 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3394 sizeof (rcr_entry_t)); 3395 rcrp->comp_rd_index = 0; 3396 rcrp->comp_wt_index = 0; 3397 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3398 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3399 #if defined(__i386) 3400 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3401 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3402 #else 3403 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3404 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3405 #endif 3406 3407 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3408 (nxge_port_rcr_size - 1); 3409 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3410 (nxge_port_rcr_size - 1); 3411 3412 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3413 "==> nxge_map_rxdma_channel_cfg_ring: " 3414 "channel %d " 3415 "rbr_vaddrp $%p " 3416 "rcr_desc_rd_head_p $%p " 3417 "rcr_desc_rd_head_pp $%p " 3418 "rcr_desc_rd_last_p $%p " 3419 "rcr_desc_rd_last_pp $%p ", 3420 dma_channel, 3421 rbr_vaddrp, 3422 rcrp->rcr_desc_rd_head_p, 3423 rcrp->rcr_desc_rd_head_pp, 3424 rcrp->rcr_desc_last_p, 3425 rcrp->rcr_desc_last_pp)); 3426 3427 /* 3428 * Zero out buffer block ring descriptors. 3429 */ 3430 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3431 rcrp->intr_timeout = nxgep->intr_timeout; 3432 rcrp->intr_threshold = nxgep->intr_threshold; 3433 rcrp->full_hdr_flag = B_FALSE; 3434 rcrp->sw_priv_hdr_len = 0; 3435 3436 cfga_p = &(rcrp->rcr_cfga); 3437 cfgb_p = &(rcrp->rcr_cfgb); 3438 cfga_p->value = 0; 3439 cfgb_p->value = 0; 3440 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3441 cfga_p->value = (rcrp->rcr_addr & 3442 (RCRCFIG_A_STADDR_MASK | 3443 RCRCFIG_A_STADDR_BASE_MASK)); 3444 3445 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3446 RCRCFIG_A_LEN_SHIF); 3447 3448 /* 3449 * Timeout should be set based on the system clock divider. 3450 * The following timeout value of 1 assumes that the 3451 * granularity (1000) is 3 microseconds running at 300MHz. 3452 */ 3453 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3454 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3455 cfgb_p->bits.ldw.entout = 1; 3456 3457 /* Map in the mailbox */ 3458 mboxp = (p_rx_mbox_t) 3459 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3460 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3461 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3462 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3463 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3464 cfig1_p->value = cfig2_p->value = 0; 3465 3466 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3467 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3468 "==> nxge_map_rxdma_channel_cfg_ring: " 3469 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3470 dma_channel, cfig1_p->value, cfig2_p->value, 3471 mboxp->mbox_addr)); 3472 3473 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3474 & 0xfff); 3475 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3476 3477 3478 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3479 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3480 RXDMA_CFIG2_MBADDR_L_MASK); 3481 3482 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3483 3484 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3485 "==> nxge_map_rxdma_channel_cfg_ring: " 3486 "channel %d damaddrp $%p " 3487 "cfg1 0x%016llx cfig2 0x%016llx", 3488 dma_channel, dmaaddrp, 3489 cfig1_p->value, cfig2_p->value)); 3490 3491 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3492 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3493 3494 rbrp->rx_rcr_p = rcrp; 3495 rcrp->rx_rbr_p = rbrp; 3496 *rcr_p = rcrp; 3497 *rx_mbox_p = mboxp; 3498 3499 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3500 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3501 3502 return (status); 3503 } 3504 3505 /*ARGSUSED*/ 3506 static void 3507 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3508 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3509 { 3510 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3511 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3512 rcr_p->rdc)); 3513 3514 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3515 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3516 3517 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3518 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3519 } 3520 3521 static nxge_status_t 3522 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3523 p_nxge_dma_common_t *dma_buf_p, 3524 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3525 { 3526 p_rx_rbr_ring_t rbrp; 3527 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3528 p_rx_msg_t *rx_msg_ring; 3529 p_rx_msg_t rx_msg_p; 3530 p_mblk_t mblk_p; 3531 3532 rxring_info_t *ring_info; 3533 nxge_status_t status = NXGE_OK; 3534 int i, j, index; 3535 uint32_t size, bsize, nblocks, nmsgs; 3536 3537 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3538 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3539 channel)); 3540 3541 dma_bufp = tmp_bufp = *dma_buf_p; 3542 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3543 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3544 "chunks bufp 0x%016llx", 3545 channel, num_chunks, dma_bufp)); 3546 3547 nmsgs = 0; 3548 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3549 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3550 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3551 "bufp 0x%016llx nblocks %d nmsgs %d", 3552 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3553 nmsgs += tmp_bufp->nblocks; 3554 } 3555 if (!nmsgs) { 3556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3557 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3558 "no msg blocks", 3559 channel)); 3560 status = NXGE_ERROR; 3561 goto nxge_map_rxdma_channel_buf_ring_exit; 3562 } 3563 3564 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3565 3566 size = nmsgs * sizeof (p_rx_msg_t); 3567 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3568 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3569 KM_SLEEP); 3570 3571 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3572 (void *)nxgep->interrupt_cookie); 3573 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3574 (void *)nxgep->interrupt_cookie); 3575 rbrp->rdc = channel; 3576 rbrp->num_blocks = num_chunks; 3577 rbrp->tnblocks = nmsgs; 3578 rbrp->rbb_max = nmsgs; 3579 rbrp->rbr_max_size = nmsgs; 3580 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3581 3582 /* 3583 * Buffer sizes suggested by NIU architect. 3584 * 256, 512 and 2K. 3585 */ 3586 3587 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3588 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3589 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3590 3591 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3592 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3593 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3594 3595 rbrp->block_size = nxgep->rx_default_block_size; 3596 3597 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3598 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3599 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3600 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3601 } else { 3602 if (rbrp->block_size >= 0x2000) { 3603 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3604 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3605 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3606 } else { 3607 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3608 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3609 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3610 } 3611 } 3612 3613 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3614 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3615 "actual rbr max %d rbb_max %d nmsgs %d " 3616 "rbrp->block_size %d default_block_size %d " 3617 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3618 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3619 rbrp->block_size, nxgep->rx_default_block_size, 3620 nxge_rbr_size, nxge_rbr_spare_size)); 3621 3622 /* Map in buffers from the buffer pool. */ 3623 index = 0; 3624 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3625 bsize = dma_bufp->block_size; 3626 nblocks = dma_bufp->nblocks; 3627 #if defined(__i386) 3628 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3629 #else 3630 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3631 #endif 3632 ring_info->buffer[i].buf_index = i; 3633 ring_info->buffer[i].buf_size = dma_bufp->alength; 3634 ring_info->buffer[i].start_index = index; 3635 #if defined(__i386) 3636 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3637 #else 3638 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3639 #endif 3640 3641 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3642 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3643 "chunk %d" 3644 " nblocks %d chunk_size %x block_size 0x%x " 3645 "dma_bufp $%p", channel, i, 3646 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3647 dma_bufp)); 3648 3649 for (j = 0; j < nblocks; j++) { 3650 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3651 dma_bufp)) == NULL) { 3652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3653 "allocb failed (index %d i %d j %d)", 3654 index, i, j)); 3655 goto nxge_map_rxdma_channel_buf_ring_fail1; 3656 } 3657 rx_msg_ring[index] = rx_msg_p; 3658 rx_msg_p->block_index = index; 3659 rx_msg_p->shifted_addr = (uint32_t) 3660 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3661 RBR_BKADDR_SHIFT)); 3662 3663 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3664 "index %d j %d rx_msg_p $%p mblk %p", 3665 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3666 3667 mblk_p = rx_msg_p->rx_mblk_p; 3668 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3669 3670 rbrp->rbr_ref_cnt++; 3671 index++; 3672 rx_msg_p->buf_dma.dma_channel = channel; 3673 } 3674 3675 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3676 if (dma_bufp->contig_alloc_type) { 3677 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3678 } 3679 3680 if (dma_bufp->kmem_alloc_type) { 3681 rbrp->rbr_alloc_type = KMEM_ALLOC; 3682 } 3683 3684 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3685 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3686 "chunk %d" 3687 " nblocks %d chunk_size %x block_size 0x%x " 3688 "dma_bufp $%p", 3689 channel, i, 3690 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3691 dma_bufp)); 3692 } 3693 if (i < rbrp->num_blocks) { 3694 goto nxge_map_rxdma_channel_buf_ring_fail1; 3695 } 3696 3697 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3698 "nxge_map_rxdma_channel_buf_ring: done buf init " 3699 "channel %d msg block entries %d", 3700 channel, index)); 3701 ring_info->block_size_mask = bsize - 1; 3702 rbrp->rx_msg_ring = rx_msg_ring; 3703 rbrp->dma_bufp = dma_buf_p; 3704 rbrp->ring_info = ring_info; 3705 3706 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3707 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3708 " nxge_map_rxdma_channel_buf_ring: " 3709 "channel %d done buf info init", channel)); 3710 3711 /* 3712 * Finally, permit nxge_freeb() to call nxge_post_page(). 3713 */ 3714 rbrp->rbr_state = RBR_POSTING; 3715 3716 *rbr_p = rbrp; 3717 goto nxge_map_rxdma_channel_buf_ring_exit; 3718 3719 nxge_map_rxdma_channel_buf_ring_fail1: 3720 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3721 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3722 channel, status)); 3723 3724 index--; 3725 for (; index >= 0; index--) { 3726 rx_msg_p = rx_msg_ring[index]; 3727 if (rx_msg_p != NULL) { 3728 freeb(rx_msg_p->rx_mblk_p); 3729 rx_msg_ring[index] = NULL; 3730 } 3731 } 3732 nxge_map_rxdma_channel_buf_ring_fail: 3733 MUTEX_DESTROY(&rbrp->post_lock); 3734 MUTEX_DESTROY(&rbrp->lock); 3735 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3736 KMEM_FREE(rx_msg_ring, size); 3737 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3738 3739 status = NXGE_ERROR; 3740 3741 nxge_map_rxdma_channel_buf_ring_exit: 3742 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3743 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3744 3745 return (status); 3746 } 3747 3748 /*ARGSUSED*/ 3749 static void 3750 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3751 p_rx_rbr_ring_t rbr_p) 3752 { 3753 p_rx_msg_t *rx_msg_ring; 3754 p_rx_msg_t rx_msg_p; 3755 rxring_info_t *ring_info; 3756 int i; 3757 uint32_t size; 3758 #ifdef NXGE_DEBUG 3759 int num_chunks; 3760 #endif 3761 3762 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3763 "==> nxge_unmap_rxdma_channel_buf_ring")); 3764 if (rbr_p == NULL) { 3765 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3766 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3767 return; 3768 } 3769 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3770 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3771 rbr_p->rdc)); 3772 3773 rx_msg_ring = rbr_p->rx_msg_ring; 3774 ring_info = rbr_p->ring_info; 3775 3776 if (rx_msg_ring == NULL || ring_info == NULL) { 3777 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3778 "<== nxge_unmap_rxdma_channel_buf_ring: " 3779 "rx_msg_ring $%p ring_info $%p", 3780 rx_msg_p, ring_info)); 3781 return; 3782 } 3783 3784 #ifdef NXGE_DEBUG 3785 num_chunks = rbr_p->num_blocks; 3786 #endif 3787 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3788 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3789 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3790 "tnblocks %d (max %d) size ptrs %d ", 3791 rbr_p->rdc, num_chunks, 3792 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3793 3794 for (i = 0; i < rbr_p->tnblocks; i++) { 3795 rx_msg_p = rx_msg_ring[i]; 3796 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3797 " nxge_unmap_rxdma_channel_buf_ring: " 3798 "rx_msg_p $%p", 3799 rx_msg_p)); 3800 if (rx_msg_p != NULL) { 3801 freeb(rx_msg_p->rx_mblk_p); 3802 rx_msg_ring[i] = NULL; 3803 } 3804 } 3805 3806 /* 3807 * We no longer may use the mutex <post_lock>. By setting 3808 * <rbr_state> to anything but POSTING, we prevent 3809 * nxge_post_page() from accessing a dead mutex. 3810 */ 3811 rbr_p->rbr_state = RBR_UNMAPPING; 3812 MUTEX_DESTROY(&rbr_p->post_lock); 3813 3814 MUTEX_DESTROY(&rbr_p->lock); 3815 3816 if (rbr_p->rbr_ref_cnt == 0) { 3817 /* 3818 * This is the normal state of affairs. 3819 * Need to free the following buffers: 3820 * - data buffers 3821 * - rx_msg ring 3822 * - ring_info 3823 * - rbr ring 3824 */ 3825 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3826 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 3827 nxge_rxdma_databuf_free(rbr_p); 3828 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3829 KMEM_FREE(rx_msg_ring, size); 3830 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3831 } else { 3832 /* 3833 * Some of our buffers are still being used. 3834 * Therefore, tell nxge_freeb() this ring is 3835 * unmapped, so it may free <rbr_p> for us. 3836 */ 3837 rbr_p->rbr_state = RBR_UNMAPPED; 3838 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3839 "unmap_rxdma_buf_ring: %d %s outstanding.", 3840 rbr_p->rbr_ref_cnt, 3841 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3842 } 3843 3844 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3845 "<== nxge_unmap_rxdma_channel_buf_ring")); 3846 } 3847 3848 /* 3849 * nxge_rxdma_hw_start_common 3850 * 3851 * Arguments: 3852 * nxgep 3853 * 3854 * Notes: 3855 * 3856 * NPI/NXGE function calls: 3857 * nxge_init_fzc_rx_common(); 3858 * nxge_init_fzc_rxdma_port(); 3859 * 3860 * Registers accessed: 3861 * 3862 * Context: 3863 * Service domain 3864 */ 3865 static nxge_status_t 3866 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3867 { 3868 nxge_status_t status = NXGE_OK; 3869 3870 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3871 3872 /* 3873 * Load the sharable parameters by writing to the 3874 * function zero control registers. These FZC registers 3875 * should be initialized only once for the entire chip. 3876 */ 3877 (void) nxge_init_fzc_rx_common(nxgep); 3878 3879 /* 3880 * Initialize the RXDMA port specific FZC control configurations. 3881 * These FZC registers are pertaining to each port. 3882 */ 3883 (void) nxge_init_fzc_rxdma_port(nxgep); 3884 3885 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3886 3887 return (status); 3888 } 3889 3890 static nxge_status_t 3891 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 3892 { 3893 int i, ndmas; 3894 p_rx_rbr_rings_t rx_rbr_rings; 3895 p_rx_rbr_ring_t *rbr_rings; 3896 p_rx_rcr_rings_t rx_rcr_rings; 3897 p_rx_rcr_ring_t *rcr_rings; 3898 p_rx_mbox_areas_t rx_mbox_areas_p; 3899 p_rx_mbox_t *rx_mbox_p; 3900 nxge_status_t status = NXGE_OK; 3901 3902 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3903 3904 rx_rbr_rings = nxgep->rx_rbr_rings; 3905 rx_rcr_rings = nxgep->rx_rcr_rings; 3906 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3907 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3908 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3909 return (NXGE_ERROR); 3910 } 3911 ndmas = rx_rbr_rings->ndmas; 3912 if (ndmas == 0) { 3913 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3914 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3915 return (NXGE_ERROR); 3916 } 3917 3918 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3919 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3920 3921 rbr_rings = rx_rbr_rings->rbr_rings; 3922 rcr_rings = rx_rcr_rings->rcr_rings; 3923 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3924 if (rx_mbox_areas_p) { 3925 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3926 } 3927 3928 i = channel; 3929 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3930 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3931 ndmas, channel)); 3932 status = nxge_rxdma_start_channel(nxgep, channel, 3933 (p_rx_rbr_ring_t)rbr_rings[i], 3934 (p_rx_rcr_ring_t)rcr_rings[i], 3935 (p_rx_mbox_t)rx_mbox_p[i]); 3936 if (status != NXGE_OK) { 3937 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3938 "==> nxge_rxdma_hw_start: disable " 3939 "(status 0x%x channel %d)", status, channel)); 3940 return (status); 3941 } 3942 3943 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3944 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3945 rx_rbr_rings, rx_rcr_rings)); 3946 3947 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3948 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3949 3950 return (status); 3951 } 3952 3953 static void 3954 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 3955 { 3956 p_rx_rbr_rings_t rx_rbr_rings; 3957 p_rx_rcr_rings_t rx_rcr_rings; 3958 3959 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3960 3961 rx_rbr_rings = nxgep->rx_rbr_rings; 3962 rx_rcr_rings = nxgep->rx_rcr_rings; 3963 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3964 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3965 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3966 return; 3967 } 3968 3969 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3970 "==> nxge_rxdma_hw_stop(channel %d)", 3971 channel)); 3972 (void) nxge_rxdma_stop_channel(nxgep, channel); 3973 3974 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3975 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3976 rx_rbr_rings, rx_rcr_rings)); 3977 3978 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3979 } 3980 3981 3982 static nxge_status_t 3983 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3984 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3985 3986 { 3987 npi_handle_t handle; 3988 npi_status_t rs = NPI_SUCCESS; 3989 rx_dma_ctl_stat_t cs; 3990 rx_dma_ent_msk_t ent_mask; 3991 nxge_status_t status = NXGE_OK; 3992 3993 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3994 3995 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3996 3997 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3998 "npi handle addr $%p acc $%p", 3999 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4000 4001 /* Reset RXDMA channel, but not if you're a guest. */ 4002 if (!isLDOMguest(nxgep)) { 4003 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4004 if (rs != NPI_SUCCESS) { 4005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4006 "==> nxge_init_fzc_rdc: " 4007 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4008 channel, rs)); 4009 return (NXGE_ERROR | rs); 4010 } 4011 4012 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4013 "==> nxge_rxdma_start_channel: reset done: channel %d", 4014 channel)); 4015 } 4016 4017 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4018 if (isLDOMguest(nxgep)) 4019 (void) nxge_rdc_lp_conf(nxgep, channel); 4020 #endif 4021 4022 /* 4023 * Initialize the RXDMA channel specific FZC control 4024 * configurations. These FZC registers are pertaining 4025 * to each RX channel (logical pages). 4026 */ 4027 if (!isLDOMguest(nxgep)) { 4028 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4029 if (status != NXGE_OK) { 4030 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4031 "==> nxge_rxdma_start_channel: " 4032 "init fzc rxdma failed (0x%08x channel %d)", 4033 status, channel)); 4034 return (status); 4035 } 4036 4037 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4038 "==> nxge_rxdma_start_channel: fzc done")); 4039 } 4040 4041 /* Set up the interrupt event masks. */ 4042 ent_mask.value = 0; 4043 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4044 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4045 &ent_mask); 4046 if (rs != NPI_SUCCESS) { 4047 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4048 "==> nxge_rxdma_start_channel: " 4049 "init rxdma event masks failed " 4050 "(0x%08x channel %d)", 4051 status, channel)); 4052 return (NXGE_ERROR | rs); 4053 } 4054 4055 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4056 "==> nxge_rxdma_start_channel: " 4057 "event done: channel %d (mask 0x%016llx)", 4058 channel, ent_mask.value)); 4059 4060 /* Initialize the receive DMA control and status register */ 4061 cs.value = 0; 4062 cs.bits.hdw.mex = 1; 4063 cs.bits.hdw.rcrthres = 1; 4064 cs.bits.hdw.rcrto = 1; 4065 cs.bits.hdw.rbr_empty = 1; 4066 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4067 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4068 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4069 if (status != NXGE_OK) { 4070 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4071 "==> nxge_rxdma_start_channel: " 4072 "init rxdma control register failed (0x%08x channel %d", 4073 status, channel)); 4074 return (status); 4075 } 4076 4077 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4078 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4079 4080 /* 4081 * Load RXDMA descriptors, buffers, mailbox, 4082 * initialise the receive DMA channels and 4083 * enable each DMA channel. 4084 */ 4085 status = nxge_enable_rxdma_channel(nxgep, 4086 channel, rbr_p, rcr_p, mbox_p); 4087 4088 if (status != NXGE_OK) { 4089 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4090 " nxge_rxdma_start_channel: " 4091 " enable rxdma failed (0x%08x channel %d)", 4092 status, channel)); 4093 return (status); 4094 } 4095 4096 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4097 "==> nxge_rxdma_start_channel: enabled channel %d")); 4098 4099 if (isLDOMguest(nxgep)) { 4100 /* Add interrupt handler for this channel. */ 4101 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4102 != NXGE_OK) { 4103 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4104 " nxge_rxdma_start_channel: " 4105 " nxge_hio_intr_add failed (0x%08x channel %d)", 4106 status, channel)); 4107 } 4108 } 4109 4110 ent_mask.value = 0; 4111 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4112 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4113 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4114 &ent_mask); 4115 if (rs != NPI_SUCCESS) { 4116 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4117 "==> nxge_rxdma_start_channel: " 4118 "init rxdma event masks failed (0x%08x channel %d)", 4119 status, channel)); 4120 return (NXGE_ERROR | rs); 4121 } 4122 4123 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4124 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4125 4126 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4127 4128 return (NXGE_OK); 4129 } 4130 4131 static nxge_status_t 4132 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4133 { 4134 npi_handle_t handle; 4135 npi_status_t rs = NPI_SUCCESS; 4136 rx_dma_ctl_stat_t cs; 4137 rx_dma_ent_msk_t ent_mask; 4138 nxge_status_t status = NXGE_OK; 4139 4140 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4141 4142 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4143 4144 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4145 "npi handle addr $%p acc $%p", 4146 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4147 4148 if (!isLDOMguest(nxgep)) { 4149 /* 4150 * Stop RxMAC = A.9.2.6 4151 */ 4152 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4153 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4154 "nxge_rxdma_stop_channel: " 4155 "Failed to disable RxMAC")); 4156 } 4157 4158 /* 4159 * Drain IPP Port = A.9.3.6 4160 */ 4161 (void) nxge_ipp_drain(nxgep); 4162 } 4163 4164 /* Reset RXDMA channel */ 4165 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4166 if (rs != NPI_SUCCESS) { 4167 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4168 " nxge_rxdma_stop_channel: " 4169 " reset rxdma failed (0x%08x channel %d)", 4170 rs, channel)); 4171 return (NXGE_ERROR | rs); 4172 } 4173 4174 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4175 "==> nxge_rxdma_stop_channel: reset done")); 4176 4177 /* Set up the interrupt event masks. */ 4178 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4179 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4180 &ent_mask); 4181 if (rs != NPI_SUCCESS) { 4182 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4183 "==> nxge_rxdma_stop_channel: " 4184 "set rxdma event masks failed (0x%08x channel %d)", 4185 rs, channel)); 4186 return (NXGE_ERROR | rs); 4187 } 4188 4189 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4190 "==> nxge_rxdma_stop_channel: event done")); 4191 4192 /* 4193 * Initialize the receive DMA control and status register 4194 */ 4195 cs.value = 0; 4196 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4197 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4198 " to default (all 0s) 0x%08x", cs.value)); 4199 if (status != NXGE_OK) { 4200 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4201 " nxge_rxdma_stop_channel: init rxdma" 4202 " control register failed (0x%08x channel %d", 4203 status, channel)); 4204 return (status); 4205 } 4206 4207 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4208 "==> nxge_rxdma_stop_channel: control done")); 4209 4210 /* 4211 * Make sure channel is disabled. 4212 */ 4213 status = nxge_disable_rxdma_channel(nxgep, channel); 4214 if (status != NXGE_OK) { 4215 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4216 " nxge_rxdma_stop_channel: " 4217 " init enable rxdma failed (0x%08x channel %d)", 4218 status, channel)); 4219 return (status); 4220 } 4221 4222 if (!isLDOMguest(nxgep)) { 4223 /* 4224 * Enable RxMAC = A.9.2.10 4225 */ 4226 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4227 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4228 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4229 } 4230 } 4231 4232 NXGE_DEBUG_MSG((nxgep, 4233 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4234 4235 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4236 4237 return (NXGE_OK); 4238 } 4239 4240 nxge_status_t 4241 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4242 { 4243 npi_handle_t handle; 4244 p_nxge_rdc_sys_stats_t statsp; 4245 rx_ctl_dat_fifo_stat_t stat; 4246 uint32_t zcp_err_status; 4247 uint32_t ipp_err_status; 4248 nxge_status_t status = NXGE_OK; 4249 npi_status_t rs = NPI_SUCCESS; 4250 boolean_t my_err = B_FALSE; 4251 4252 handle = nxgep->npi_handle; 4253 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4254 4255 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4256 4257 if (rs != NPI_SUCCESS) 4258 return (NXGE_ERROR | rs); 4259 4260 if (stat.bits.ldw.id_mismatch) { 4261 statsp->id_mismatch++; 4262 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4263 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4264 /* Global fatal error encountered */ 4265 } 4266 4267 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4268 switch (nxgep->mac.portnum) { 4269 case 0: 4270 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4271 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4272 my_err = B_TRUE; 4273 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4274 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4275 } 4276 break; 4277 case 1: 4278 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4279 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4280 my_err = B_TRUE; 4281 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4282 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4283 } 4284 break; 4285 case 2: 4286 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4287 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4288 my_err = B_TRUE; 4289 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4290 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4291 } 4292 break; 4293 case 3: 4294 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4295 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4296 my_err = B_TRUE; 4297 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4298 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4299 } 4300 break; 4301 default: 4302 return (NXGE_ERROR); 4303 } 4304 } 4305 4306 if (my_err) { 4307 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4308 zcp_err_status); 4309 if (status != NXGE_OK) 4310 return (status); 4311 } 4312 4313 return (NXGE_OK); 4314 } 4315 4316 static nxge_status_t 4317 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4318 uint32_t zcp_status) 4319 { 4320 boolean_t rxport_fatal = B_FALSE; 4321 p_nxge_rdc_sys_stats_t statsp; 4322 nxge_status_t status = NXGE_OK; 4323 uint8_t portn; 4324 4325 portn = nxgep->mac.portnum; 4326 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4327 4328 if (ipp_status & (0x1 << portn)) { 4329 statsp->ipp_eop_err++; 4330 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4331 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4332 rxport_fatal = B_TRUE; 4333 } 4334 4335 if (zcp_status & (0x1 << portn)) { 4336 statsp->zcp_eop_err++; 4337 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4338 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4339 rxport_fatal = B_TRUE; 4340 } 4341 4342 if (rxport_fatal) { 4343 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4344 " nxge_rxdma_handle_port_error: " 4345 " fatal error on Port #%d\n", 4346 portn)); 4347 status = nxge_rx_port_fatal_err_recover(nxgep); 4348 if (status == NXGE_OK) { 4349 FM_SERVICE_RESTORED(nxgep); 4350 } 4351 } 4352 4353 return (status); 4354 } 4355 4356 static nxge_status_t 4357 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4358 { 4359 npi_handle_t handle; 4360 npi_status_t rs = NPI_SUCCESS; 4361 nxge_status_t status = NXGE_OK; 4362 p_rx_rbr_ring_t rbrp; 4363 p_rx_rcr_ring_t rcrp; 4364 p_rx_mbox_t mboxp; 4365 rx_dma_ent_msk_t ent_mask; 4366 p_nxge_dma_common_t dmap; 4367 int ring_idx; 4368 uint32_t ref_cnt; 4369 p_rx_msg_t rx_msg_p; 4370 int i; 4371 uint32_t nxge_port_rcr_size; 4372 4373 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4375 "Recovering from RxDMAChannel#%d error...", channel)); 4376 4377 /* 4378 * Stop the dma channel waits for the stop done. 4379 * If the stop done bit is not set, then create 4380 * an error. 4381 */ 4382 4383 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4384 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4385 4386 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4387 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4388 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4389 4390 MUTEX_ENTER(&rcrp->lock); 4391 MUTEX_ENTER(&rbrp->lock); 4392 MUTEX_ENTER(&rbrp->post_lock); 4393 4394 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4395 4396 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4397 if (rs != NPI_SUCCESS) { 4398 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4399 "nxge_disable_rxdma_channel:failed")); 4400 goto fail; 4401 } 4402 4403 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4404 4405 /* Disable interrupt */ 4406 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4407 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4408 if (rs != NPI_SUCCESS) { 4409 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4410 "nxge_rxdma_stop_channel: " 4411 "set rxdma event masks failed (channel %d)", 4412 channel)); 4413 } 4414 4415 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4416 4417 /* Reset RXDMA channel */ 4418 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4419 if (rs != NPI_SUCCESS) { 4420 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4421 "nxge_rxdma_fatal_err_recover: " 4422 " reset rxdma failed (channel %d)", channel)); 4423 goto fail; 4424 } 4425 4426 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4427 4428 mboxp = 4429 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4430 4431 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4432 rbrp->rbr_rd_index = 0; 4433 4434 rcrp->comp_rd_index = 0; 4435 rcrp->comp_wt_index = 0; 4436 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4437 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4438 #if defined(__i386) 4439 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4440 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4441 #else 4442 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4443 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4444 #endif 4445 4446 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4447 (nxge_port_rcr_size - 1); 4448 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4449 (nxge_port_rcr_size - 1); 4450 4451 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4452 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4453 4454 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4455 4456 for (i = 0; i < rbrp->rbr_max_size; i++) { 4457 rx_msg_p = rbrp->rx_msg_ring[i]; 4458 ref_cnt = rx_msg_p->ref_cnt; 4459 if (ref_cnt != 1) { 4460 if (rx_msg_p->cur_usage_cnt != 4461 rx_msg_p->max_usage_cnt) { 4462 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4463 "buf[%d]: cur_usage_cnt = %d " 4464 "max_usage_cnt = %d\n", i, 4465 rx_msg_p->cur_usage_cnt, 4466 rx_msg_p->max_usage_cnt)); 4467 } else { 4468 /* Buffer can be re-posted */ 4469 rx_msg_p->free = B_TRUE; 4470 rx_msg_p->cur_usage_cnt = 0; 4471 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4472 rx_msg_p->pkt_buf_size = 0; 4473 } 4474 } 4475 } 4476 4477 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4478 4479 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4480 if (status != NXGE_OK) { 4481 goto fail; 4482 } 4483 4484 MUTEX_EXIT(&rbrp->post_lock); 4485 MUTEX_EXIT(&rbrp->lock); 4486 MUTEX_EXIT(&rcrp->lock); 4487 4488 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4489 "Recovery Successful, RxDMAChannel#%d Restored", 4490 channel)); 4491 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4492 4493 return (NXGE_OK); 4494 fail: 4495 MUTEX_EXIT(&rbrp->post_lock); 4496 MUTEX_EXIT(&rbrp->lock); 4497 MUTEX_EXIT(&rcrp->lock); 4498 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4499 4500 return (NXGE_ERROR | rs); 4501 } 4502 4503 nxge_status_t 4504 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4505 { 4506 nxge_grp_set_t *set = &nxgep->rx_set; 4507 nxge_status_t status = NXGE_OK; 4508 int rdc; 4509 4510 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4511 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4512 "Recovering from RxPort error...")); 4513 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4514 4515 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4516 goto fail; 4517 4518 NXGE_DELAY(1000); 4519 4520 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4521 4522 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4523 if ((1 << rdc) & set->owned.map) { 4524 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4525 != NXGE_OK) { 4526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4527 "Could not recover channel %d", rdc)); 4528 } 4529 } 4530 } 4531 4532 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4533 4534 /* Reset IPP */ 4535 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4536 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4537 "nxge_rx_port_fatal_err_recover: " 4538 "Failed to reset IPP")); 4539 goto fail; 4540 } 4541 4542 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4543 4544 /* Reset RxMAC */ 4545 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4546 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4547 "nxge_rx_port_fatal_err_recover: " 4548 "Failed to reset RxMAC")); 4549 goto fail; 4550 } 4551 4552 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4553 4554 /* Re-Initialize IPP */ 4555 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4557 "nxge_rx_port_fatal_err_recover: " 4558 "Failed to init IPP")); 4559 goto fail; 4560 } 4561 4562 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4563 4564 /* Re-Initialize RxMAC */ 4565 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4566 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4567 "nxge_rx_port_fatal_err_recover: " 4568 "Failed to reset RxMAC")); 4569 goto fail; 4570 } 4571 4572 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4573 4574 /* Re-enable RxMAC */ 4575 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4576 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4577 "nxge_rx_port_fatal_err_recover: " 4578 "Failed to enable RxMAC")); 4579 goto fail; 4580 } 4581 4582 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4583 "Recovery Successful, RxPort Restored")); 4584 4585 return (NXGE_OK); 4586 fail: 4587 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4588 return (status); 4589 } 4590 4591 void 4592 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4593 { 4594 rx_dma_ctl_stat_t cs; 4595 rx_ctl_dat_fifo_stat_t cdfs; 4596 4597 switch (err_id) { 4598 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4599 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4600 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4601 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4602 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4603 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4604 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4605 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4606 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4607 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4608 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4609 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4610 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4611 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4612 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4613 chan, &cs.value); 4614 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4615 cs.bits.hdw.rcr_ack_err = 1; 4616 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4617 cs.bits.hdw.dc_fifo_err = 1; 4618 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4619 cs.bits.hdw.rcr_sha_par = 1; 4620 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4621 cs.bits.hdw.rbr_pre_par = 1; 4622 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4623 cs.bits.hdw.rbr_tmout = 1; 4624 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4625 cs.bits.hdw.rsp_cnt_err = 1; 4626 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4627 cs.bits.hdw.byte_en_bus = 1; 4628 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4629 cs.bits.hdw.rsp_dat_err = 1; 4630 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4631 cs.bits.hdw.config_err = 1; 4632 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4633 cs.bits.hdw.rcrincon = 1; 4634 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4635 cs.bits.hdw.rcrfull = 1; 4636 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4637 cs.bits.hdw.rbrfull = 1; 4638 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4639 cs.bits.hdw.rbrlogpage = 1; 4640 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4641 cs.bits.hdw.cfiglogpage = 1; 4642 #if defined(__i386) 4643 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4644 cs.value); 4645 #else 4646 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4647 cs.value); 4648 #endif 4649 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4650 chan, cs.value); 4651 break; 4652 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4653 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4654 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4655 cdfs.value = 0; 4656 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4657 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4658 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4659 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4660 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4661 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4662 #if defined(__i386) 4663 cmn_err(CE_NOTE, 4664 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4665 cdfs.value); 4666 #else 4667 cmn_err(CE_NOTE, 4668 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4669 cdfs.value); 4670 #endif 4671 NXGE_REG_WR64(nxgep->npi_handle, 4672 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4673 break; 4674 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4675 break; 4676 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4677 break; 4678 } 4679 } 4680 4681 static void 4682 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4683 { 4684 rxring_info_t *ring_info; 4685 int index; 4686 uint32_t chunk_size; 4687 uint64_t kaddr; 4688 uint_t num_blocks; 4689 4690 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4691 4692 if (rbr_p == NULL) { 4693 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4694 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4695 return; 4696 } 4697 4698 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4699 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4700 "==> nxge_rxdma_databuf_free: DDI")); 4701 return; 4702 } 4703 4704 ring_info = rbr_p->ring_info; 4705 if (ring_info == NULL) { 4706 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4707 "==> nxge_rxdma_databuf_free: NULL ring info")); 4708 return; 4709 } 4710 num_blocks = rbr_p->num_blocks; 4711 for (index = 0; index < num_blocks; index++) { 4712 kaddr = ring_info->buffer[index].kaddr; 4713 chunk_size = ring_info->buffer[index].buf_size; 4714 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4715 "==> nxge_rxdma_databuf_free: free chunk %d " 4716 "kaddrp $%p chunk size %d", 4717 index, kaddr, chunk_size)); 4718 if (kaddr == NULL) continue; 4719 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4720 ring_info->buffer[index].kaddr = NULL; 4721 } 4722 4723 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4724 } 4725 4726 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4727 extern void contig_mem_free(void *, size_t); 4728 #endif 4729 4730 void 4731 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4732 { 4733 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4734 4735 if (kaddr == NULL || !buf_size) { 4736 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4737 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4738 kaddr, buf_size)); 4739 return; 4740 } 4741 4742 switch (alloc_type) { 4743 case KMEM_ALLOC: 4744 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4745 "==> nxge_free_buf: freeing kmem $%p size %d", 4746 kaddr, buf_size)); 4747 #if defined(__i386) 4748 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4749 #else 4750 KMEM_FREE((void *)kaddr, buf_size); 4751 #endif 4752 break; 4753 4754 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4755 case CONTIG_MEM_ALLOC: 4756 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4757 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4758 kaddr, buf_size)); 4759 contig_mem_free((void *)kaddr, buf_size); 4760 break; 4761 #endif 4762 4763 default: 4764 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4765 "<== nxge_free_buf: unsupported alloc type %d", 4766 alloc_type)); 4767 return; 4768 } 4769 4770 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 4771 } 4772