1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_rxdma.h> 30 #include <sys/nxge/nxge_hio.h> 31 32 #if !defined(_BIG_ENDIAN) 33 #include <npi_rx_rd32.h> 34 #endif 35 #include <npi_rx_rd64.h> 36 #include <npi_rx_wr64.h> 37 38 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 39 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 40 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 41 (rdc + nxgep->pt_config.hw_config.start_rdc) 42 43 /* 44 * Globals: tunable parameters (/etc/system or adb) 45 * 46 */ 47 extern uint32_t nxge_rbr_size; 48 extern uint32_t nxge_rcr_size; 49 extern uint32_t nxge_rbr_spare_size; 50 51 extern uint32_t nxge_mblks_pending; 52 53 /* 54 * Tunable to reduce the amount of time spent in the 55 * ISR doing Rx Processing. 56 */ 57 extern uint32_t nxge_max_rx_pkts; 58 boolean_t nxge_jumbo_enable; 59 60 /* 61 * Tunables to manage the receive buffer blocks. 62 * 63 * nxge_rx_threshold_hi: copy all buffers. 64 * nxge_rx_bcopy_size_type: receive buffer block size type. 65 * nxge_rx_threshold_lo: copy only up to tunable block size type. 66 */ 67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 68 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 69 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 70 71 extern uint32_t nxge_cksum_enable; 72 73 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 74 static void nxge_unmap_rxdma(p_nxge_t, int); 75 76 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 77 78 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 79 static void nxge_rxdma_hw_stop(p_nxge_t, int); 80 81 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 82 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 83 uint32_t, 84 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 85 p_rx_mbox_t *); 86 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 87 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 88 89 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 90 uint16_t, 91 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 92 p_rx_rcr_ring_t *, p_rx_mbox_t *); 93 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 94 p_rx_rcr_ring_t, p_rx_mbox_t); 95 96 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 97 uint16_t, 98 p_nxge_dma_common_t *, 99 p_rx_rbr_ring_t *, uint32_t); 100 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 101 p_rx_rbr_ring_t); 102 103 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 104 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 105 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 106 107 static mblk_t * 108 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 109 110 static void nxge_receive_packet(p_nxge_t, 111 p_rx_rcr_ring_t, 112 p_rcr_entry_t, 113 boolean_t *, 114 mblk_t **, mblk_t **); 115 116 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 117 118 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 119 static void nxge_freeb(p_rx_msg_t); 120 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 121 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 122 123 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 124 uint32_t, uint32_t); 125 126 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 127 p_rx_rbr_ring_t); 128 129 130 static nxge_status_t 131 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 132 133 nxge_status_t 134 nxge_rx_port_fatal_err_recover(p_nxge_t); 135 136 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 137 138 nxge_status_t 139 nxge_init_rxdma_channels(p_nxge_t nxgep) 140 { 141 nxge_grp_set_t *set = &nxgep->rx_set; 142 int i, count; 143 144 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 145 146 if (!isLDOMguest(nxgep)) { 147 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 148 cmn_err(CE_NOTE, "hw_start_common"); 149 return (NXGE_ERROR); 150 } 151 } 152 153 /* 154 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 155 * We only have 8 hardware RDC tables, but we may have 156 * up to 16 logical (software-defined) groups of RDCS, 157 * if we make use of layer 3 & 4 hardware classification. 158 */ 159 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 160 if ((1 << i) & set->lg.map) { 161 int channel; 162 nxge_grp_t *group = set->group[i]; 163 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 164 if ((1 << channel) & group->map) { 165 if ((nxge_grp_dc_add(nxgep, 166 (vr_handle_t)group, 167 VP_BOUND_RX, channel))) 168 return (NXGE_ERROR); 169 } 170 } 171 } 172 if (++count == set->lg.count) 173 break; 174 } 175 176 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 177 178 return (NXGE_OK); 179 } 180 181 nxge_status_t 182 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 183 { 184 nxge_status_t status; 185 186 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 187 188 status = nxge_map_rxdma(nxge, channel); 189 if (status != NXGE_OK) { 190 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 191 "<== nxge_init_rxdma: status 0x%x", status)); 192 return (status); 193 } 194 195 status = nxge_rxdma_hw_start(nxge, channel); 196 if (status != NXGE_OK) { 197 nxge_unmap_rxdma(nxge, channel); 198 } 199 200 if (!nxge->statsp->rdc_ksp[channel]) 201 nxge_setup_rdc_kstats(nxge, channel); 202 203 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 204 "<== nxge_init_rxdma_channel: status 0x%x", status)); 205 206 return (status); 207 } 208 209 void 210 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 211 { 212 nxge_grp_set_t *set = &nxgep->rx_set; 213 int rdc; 214 215 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 216 217 if (set->owned.map == 0) { 218 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 219 "nxge_uninit_rxdma_channels: no channels")); 220 return; 221 } 222 223 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 224 if ((1 << rdc) & set->owned.map) { 225 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 226 } 227 } 228 229 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 230 } 231 232 void 233 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 234 { 235 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 236 237 if (nxgep->statsp->rdc_ksp[channel]) { 238 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 239 nxgep->statsp->rdc_ksp[channel] = 0; 240 } 241 242 nxge_rxdma_hw_stop(nxgep, channel); 243 nxge_unmap_rxdma(nxgep, channel); 244 245 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 246 } 247 248 nxge_status_t 249 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 250 { 251 npi_handle_t handle; 252 npi_status_t rs = NPI_SUCCESS; 253 nxge_status_t status = NXGE_OK; 254 255 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 256 257 handle = NXGE_DEV_NPI_HANDLE(nxgep); 258 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 259 260 if (rs != NPI_SUCCESS) { 261 status = NXGE_ERROR | rs; 262 } 263 264 return (status); 265 } 266 267 void 268 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 269 { 270 nxge_grp_set_t *set = &nxgep->rx_set; 271 int rdc; 272 273 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 274 275 if (!isLDOMguest(nxgep)) { 276 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 277 (void) npi_rxdma_dump_fzc_regs(handle); 278 } 279 280 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 281 NXGE_DEBUG_MSG((nxgep, TX_CTL, 282 "nxge_rxdma_regs_dump_channels: " 283 "NULL ring pointer(s)")); 284 return; 285 } 286 287 if (set->owned.map == 0) { 288 NXGE_DEBUG_MSG((nxgep, RX_CTL, 289 "nxge_rxdma_regs_dump_channels: no channels")); 290 return; 291 } 292 293 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 294 if ((1 << rdc) & set->owned.map) { 295 rx_rbr_ring_t *ring = 296 nxgep->rx_rbr_rings->rbr_rings[rdc]; 297 if (ring) { 298 (void) nxge_dump_rxdma_channel(nxgep, rdc); 299 } 300 } 301 } 302 303 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 304 } 305 306 nxge_status_t 307 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 308 { 309 npi_handle_t handle; 310 npi_status_t rs = NPI_SUCCESS; 311 nxge_status_t status = NXGE_OK; 312 313 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 314 315 handle = NXGE_DEV_NPI_HANDLE(nxgep); 316 rs = npi_rxdma_dump_rdc_regs(handle, channel); 317 318 if (rs != NPI_SUCCESS) { 319 status = NXGE_ERROR | rs; 320 } 321 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 322 return (status); 323 } 324 325 nxge_status_t 326 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 327 p_rx_dma_ent_msk_t mask_p) 328 { 329 npi_handle_t handle; 330 npi_status_t rs = NPI_SUCCESS; 331 nxge_status_t status = NXGE_OK; 332 333 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 334 "<== nxge_init_rxdma_channel_event_mask")); 335 336 handle = NXGE_DEV_NPI_HANDLE(nxgep); 337 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 338 if (rs != NPI_SUCCESS) { 339 status = NXGE_ERROR | rs; 340 } 341 342 return (status); 343 } 344 345 nxge_status_t 346 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 347 p_rx_dma_ctl_stat_t cs_p) 348 { 349 npi_handle_t handle; 350 npi_status_t rs = NPI_SUCCESS; 351 nxge_status_t status = NXGE_OK; 352 353 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 354 "<== nxge_init_rxdma_channel_cntl_stat")); 355 356 handle = NXGE_DEV_NPI_HANDLE(nxgep); 357 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 358 359 if (rs != NPI_SUCCESS) { 360 status = NXGE_ERROR | rs; 361 } 362 363 return (status); 364 } 365 366 /* 367 * nxge_rxdma_cfg_rdcgrp_default_rdc 368 * 369 * Set the default RDC for an RDC Group (Table) 370 * 371 * Arguments: 372 * nxgep 373 * rdcgrp The group to modify 374 * rdc The new default RDC. 375 * 376 * Notes: 377 * 378 * NPI/NXGE function calls: 379 * npi_rxdma_cfg_rdc_table_default_rdc() 380 * 381 * Registers accessed: 382 * RDC_TBL_REG: FZC_ZCP + 0x10000 383 * 384 * Context: 385 * Service domain 386 */ 387 nxge_status_t 388 nxge_rxdma_cfg_rdcgrp_default_rdc( 389 p_nxge_t nxgep, 390 uint8_t rdcgrp, 391 uint8_t rdc) 392 { 393 npi_handle_t handle; 394 npi_status_t rs = NPI_SUCCESS; 395 p_nxge_dma_pt_cfg_t p_dma_cfgp; 396 p_nxge_rdc_grp_t rdc_grp_p; 397 uint8_t actual_rdcgrp, actual_rdc; 398 399 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 400 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 401 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 402 403 handle = NXGE_DEV_NPI_HANDLE(nxgep); 404 405 /* 406 * This has to be rewritten. Do we even allow this anymore? 407 */ 408 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 409 RDC_MAP_IN(rdc_grp_p->map, rdc); 410 rdc_grp_p->def_rdc = rdc; 411 412 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 413 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 414 415 rs = npi_rxdma_cfg_rdc_table_default_rdc( 416 handle, actual_rdcgrp, actual_rdc); 417 418 if (rs != NPI_SUCCESS) { 419 return (NXGE_ERROR | rs); 420 } 421 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 422 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 423 return (NXGE_OK); 424 } 425 426 nxge_status_t 427 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 428 { 429 npi_handle_t handle; 430 431 uint8_t actual_rdc; 432 npi_status_t rs = NPI_SUCCESS; 433 434 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 435 " ==> nxge_rxdma_cfg_port_default_rdc")); 436 437 handle = NXGE_DEV_NPI_HANDLE(nxgep); 438 actual_rdc = rdc; /* XXX Hack! */ 439 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 440 441 442 if (rs != NPI_SUCCESS) { 443 return (NXGE_ERROR | rs); 444 } 445 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 446 " <== nxge_rxdma_cfg_port_default_rdc")); 447 448 return (NXGE_OK); 449 } 450 451 nxge_status_t 452 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 453 uint16_t pkts) 454 { 455 npi_status_t rs = NPI_SUCCESS; 456 npi_handle_t handle; 457 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 458 " ==> nxge_rxdma_cfg_rcr_threshold")); 459 handle = NXGE_DEV_NPI_HANDLE(nxgep); 460 461 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 462 463 if (rs != NPI_SUCCESS) { 464 return (NXGE_ERROR | rs); 465 } 466 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 467 return (NXGE_OK); 468 } 469 470 nxge_status_t 471 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 472 uint16_t tout, uint8_t enable) 473 { 474 npi_status_t rs = NPI_SUCCESS; 475 npi_handle_t handle; 476 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 477 handle = NXGE_DEV_NPI_HANDLE(nxgep); 478 if (enable == 0) { 479 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 480 } else { 481 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 482 tout); 483 } 484 485 if (rs != NPI_SUCCESS) { 486 return (NXGE_ERROR | rs); 487 } 488 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 489 return (NXGE_OK); 490 } 491 492 nxge_status_t 493 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 494 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 495 { 496 npi_handle_t handle; 497 rdc_desc_cfg_t rdc_desc; 498 p_rcrcfig_b_t cfgb_p; 499 npi_status_t rs = NPI_SUCCESS; 500 501 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 502 handle = NXGE_DEV_NPI_HANDLE(nxgep); 503 /* 504 * Use configuration data composed at init time. 505 * Write to hardware the receive ring configurations. 506 */ 507 rdc_desc.mbox_enable = 1; 508 rdc_desc.mbox_addr = mbox_p->mbox_addr; 509 NXGE_DEBUG_MSG((nxgep, RX_CTL, 510 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 511 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 512 513 rdc_desc.rbr_len = rbr_p->rbb_max; 514 rdc_desc.rbr_addr = rbr_p->rbr_addr; 515 516 switch (nxgep->rx_bksize_code) { 517 case RBR_BKSIZE_4K: 518 rdc_desc.page_size = SIZE_4KB; 519 break; 520 case RBR_BKSIZE_8K: 521 rdc_desc.page_size = SIZE_8KB; 522 break; 523 case RBR_BKSIZE_16K: 524 rdc_desc.page_size = SIZE_16KB; 525 break; 526 case RBR_BKSIZE_32K: 527 rdc_desc.page_size = SIZE_32KB; 528 break; 529 } 530 531 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 532 rdc_desc.valid0 = 1; 533 534 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 535 rdc_desc.valid1 = 1; 536 537 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 538 rdc_desc.valid2 = 1; 539 540 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 541 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 542 543 rdc_desc.rcr_len = rcr_p->comp_size; 544 rdc_desc.rcr_addr = rcr_p->rcr_addr; 545 546 cfgb_p = &(rcr_p->rcr_cfgb); 547 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 548 /* For now, disable this timeout in a guest domain. */ 549 if (isLDOMguest(nxgep)) { 550 rdc_desc.rcr_timeout = 0; 551 rdc_desc.rcr_timeout_enable = 0; 552 } else { 553 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 554 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 555 } 556 557 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 558 "rbr_len qlen %d pagesize code %d rcr_len %d", 559 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 560 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 561 "size 0 %d size 1 %d size 2 %d", 562 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 563 rbr_p->npi_pkt_buf_size2)); 564 565 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 566 if (rs != NPI_SUCCESS) { 567 return (NXGE_ERROR | rs); 568 } 569 570 /* 571 * Enable the timeout and threshold. 572 */ 573 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 574 rdc_desc.rcr_threshold); 575 if (rs != NPI_SUCCESS) { 576 return (NXGE_ERROR | rs); 577 } 578 579 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 580 rdc_desc.rcr_timeout); 581 if (rs != NPI_SUCCESS) { 582 return (NXGE_ERROR | rs); 583 } 584 585 /* Enable the DMA */ 586 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 587 if (rs != NPI_SUCCESS) { 588 return (NXGE_ERROR | rs); 589 } 590 591 /* Kick the DMA engine. */ 592 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 593 /* Clear the rbr empty bit */ 594 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 595 596 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 597 598 return (NXGE_OK); 599 } 600 601 nxge_status_t 602 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 603 { 604 npi_handle_t handle; 605 npi_status_t rs = NPI_SUCCESS; 606 607 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 608 handle = NXGE_DEV_NPI_HANDLE(nxgep); 609 610 /* disable the DMA */ 611 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 612 if (rs != NPI_SUCCESS) { 613 NXGE_DEBUG_MSG((nxgep, RX_CTL, 614 "<== nxge_disable_rxdma_channel:failed (0x%x)", 615 rs)); 616 return (NXGE_ERROR | rs); 617 } 618 619 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 620 return (NXGE_OK); 621 } 622 623 nxge_status_t 624 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 625 { 626 npi_handle_t handle; 627 nxge_status_t status = NXGE_OK; 628 629 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 630 "<== nxge_init_rxdma_channel_rcrflush")); 631 632 handle = NXGE_DEV_NPI_HANDLE(nxgep); 633 npi_rxdma_rdc_rcr_flush(handle, channel); 634 635 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 636 "<== nxge_init_rxdma_channel_rcrflsh")); 637 return (status); 638 639 } 640 641 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 642 643 #define TO_LEFT -1 644 #define TO_RIGHT 1 645 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 646 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 647 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 648 #define NO_HINT 0xffffffff 649 650 /*ARGSUSED*/ 651 nxge_status_t 652 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 653 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 654 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 655 { 656 int bufsize; 657 uint64_t pktbuf_pp; 658 uint64_t dvma_addr; 659 rxring_info_t *ring_info; 660 int base_side, end_side; 661 int r_index, l_index, anchor_index; 662 int found, search_done; 663 uint32_t offset, chunk_size, block_size, page_size_mask; 664 uint32_t chunk_index, block_index, total_index; 665 int max_iterations, iteration; 666 rxbuf_index_info_t *bufinfo; 667 668 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 669 670 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 671 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 672 pkt_buf_addr_pp, 673 pktbufsz_type)); 674 #if defined(__i386) 675 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 676 #else 677 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 678 #endif 679 680 switch (pktbufsz_type) { 681 case 0: 682 bufsize = rbr_p->pkt_buf_size0; 683 break; 684 case 1: 685 bufsize = rbr_p->pkt_buf_size1; 686 break; 687 case 2: 688 bufsize = rbr_p->pkt_buf_size2; 689 break; 690 case RCR_SINGLE_BLOCK: 691 bufsize = 0; 692 anchor_index = 0; 693 break; 694 default: 695 return (NXGE_ERROR); 696 } 697 698 if (rbr_p->num_blocks == 1) { 699 anchor_index = 0; 700 ring_info = rbr_p->ring_info; 701 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 702 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 703 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 704 "buf_pp $%p btype %d anchor_index %d " 705 "bufinfo $%p", 706 pkt_buf_addr_pp, 707 pktbufsz_type, 708 anchor_index, 709 bufinfo)); 710 711 goto found_index; 712 } 713 714 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 715 "==> nxge_rxbuf_pp_to_vp: " 716 "buf_pp $%p btype %d anchor_index %d", 717 pkt_buf_addr_pp, 718 pktbufsz_type, 719 anchor_index)); 720 721 ring_info = rbr_p->ring_info; 722 found = B_FALSE; 723 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 724 iteration = 0; 725 max_iterations = ring_info->max_iterations; 726 /* 727 * First check if this block has been seen 728 * recently. This is indicated by a hint which 729 * is initialized when the first buffer of the block 730 * is seen. The hint is reset when the last buffer of 731 * the block has been processed. 732 * As three block sizes are supported, three hints 733 * are kept. The idea behind the hints is that once 734 * the hardware uses a block for a buffer of that 735 * size, it will use it exclusively for that size 736 * and will use it until it is exhausted. It is assumed 737 * that there would a single block being used for the same 738 * buffer sizes at any given time. 739 */ 740 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 741 anchor_index = ring_info->hint[pktbufsz_type]; 742 dvma_addr = bufinfo[anchor_index].dvma_addr; 743 chunk_size = bufinfo[anchor_index].buf_size; 744 if ((pktbuf_pp >= dvma_addr) && 745 (pktbuf_pp < (dvma_addr + chunk_size))) { 746 found = B_TRUE; 747 /* 748 * check if this is the last buffer in the block 749 * If so, then reset the hint for the size; 750 */ 751 752 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 753 ring_info->hint[pktbufsz_type] = NO_HINT; 754 } 755 } 756 757 if (found == B_FALSE) { 758 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 759 "==> nxge_rxbuf_pp_to_vp: (!found)" 760 "buf_pp $%p btype %d anchor_index %d", 761 pkt_buf_addr_pp, 762 pktbufsz_type, 763 anchor_index)); 764 765 /* 766 * This is the first buffer of the block of this 767 * size. Need to search the whole information 768 * array. 769 * the search algorithm uses a binary tree search 770 * algorithm. It assumes that the information is 771 * already sorted with increasing order 772 * info[0] < info[1] < info[2] .... < info[n-1] 773 * where n is the size of the information array 774 */ 775 r_index = rbr_p->num_blocks - 1; 776 l_index = 0; 777 search_done = B_FALSE; 778 anchor_index = MID_INDEX(r_index, l_index); 779 while (search_done == B_FALSE) { 780 if ((r_index == l_index) || 781 (iteration >= max_iterations)) 782 search_done = B_TRUE; 783 end_side = TO_RIGHT; /* to the right */ 784 base_side = TO_LEFT; /* to the left */ 785 /* read the DVMA address information and sort it */ 786 dvma_addr = bufinfo[anchor_index].dvma_addr; 787 chunk_size = bufinfo[anchor_index].buf_size; 788 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 789 "==> nxge_rxbuf_pp_to_vp: (searching)" 790 "buf_pp $%p btype %d " 791 "anchor_index %d chunk_size %d dvmaaddr $%p", 792 pkt_buf_addr_pp, 793 pktbufsz_type, 794 anchor_index, 795 chunk_size, 796 dvma_addr)); 797 798 if (pktbuf_pp >= dvma_addr) 799 base_side = TO_RIGHT; /* to the right */ 800 if (pktbuf_pp < (dvma_addr + chunk_size)) 801 end_side = TO_LEFT; /* to the left */ 802 803 switch (base_side + end_side) { 804 case IN_MIDDLE: 805 /* found */ 806 found = B_TRUE; 807 search_done = B_TRUE; 808 if ((pktbuf_pp + bufsize) < 809 (dvma_addr + chunk_size)) 810 ring_info->hint[pktbufsz_type] = 811 bufinfo[anchor_index].buf_index; 812 break; 813 case BOTH_RIGHT: 814 /* not found: go to the right */ 815 l_index = anchor_index + 1; 816 anchor_index = 817 MID_INDEX(r_index, l_index); 818 break; 819 820 case BOTH_LEFT: 821 /* not found: go to the left */ 822 r_index = anchor_index - 1; 823 anchor_index = MID_INDEX(r_index, 824 l_index); 825 break; 826 default: /* should not come here */ 827 return (NXGE_ERROR); 828 } 829 iteration++; 830 } 831 832 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 833 "==> nxge_rxbuf_pp_to_vp: (search done)" 834 "buf_pp $%p btype %d anchor_index %d", 835 pkt_buf_addr_pp, 836 pktbufsz_type, 837 anchor_index)); 838 } 839 840 if (found == B_FALSE) { 841 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 842 "==> nxge_rxbuf_pp_to_vp: (search failed)" 843 "buf_pp $%p btype %d anchor_index %d", 844 pkt_buf_addr_pp, 845 pktbufsz_type, 846 anchor_index)); 847 return (NXGE_ERROR); 848 } 849 850 found_index: 851 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 852 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 853 "buf_pp $%p btype %d bufsize %d anchor_index %d", 854 pkt_buf_addr_pp, 855 pktbufsz_type, 856 bufsize, 857 anchor_index)); 858 859 /* index of the first block in this chunk */ 860 chunk_index = bufinfo[anchor_index].start_index; 861 dvma_addr = bufinfo[anchor_index].dvma_addr; 862 page_size_mask = ring_info->block_size_mask; 863 864 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 865 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 866 "buf_pp $%p btype %d bufsize %d " 867 "anchor_index %d chunk_index %d dvma $%p", 868 pkt_buf_addr_pp, 869 pktbufsz_type, 870 bufsize, 871 anchor_index, 872 chunk_index, 873 dvma_addr)); 874 875 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 876 block_size = rbr_p->block_size; /* System block(page) size */ 877 878 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 879 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 880 "buf_pp $%p btype %d bufsize %d " 881 "anchor_index %d chunk_index %d dvma $%p " 882 "offset %d block_size %d", 883 pkt_buf_addr_pp, 884 pktbufsz_type, 885 bufsize, 886 anchor_index, 887 chunk_index, 888 dvma_addr, 889 offset, 890 block_size)); 891 892 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 893 894 block_index = (offset / block_size); /* index within chunk */ 895 total_index = chunk_index + block_index; 896 897 898 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 899 "==> nxge_rxbuf_pp_to_vp: " 900 "total_index %d dvma_addr $%p " 901 "offset %d block_size %d " 902 "block_index %d ", 903 total_index, dvma_addr, 904 offset, block_size, 905 block_index)); 906 #if defined(__i386) 907 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 908 (uint32_t)offset); 909 #else 910 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 911 (uint64_t)offset); 912 #endif 913 914 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 915 "==> nxge_rxbuf_pp_to_vp: " 916 "total_index %d dvma_addr $%p " 917 "offset %d block_size %d " 918 "block_index %d " 919 "*pkt_buf_addr_p $%p", 920 total_index, dvma_addr, 921 offset, block_size, 922 block_index, 923 *pkt_buf_addr_p)); 924 925 926 *msg_index = total_index; 927 *bufoffset = (offset & page_size_mask); 928 929 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 930 "==> nxge_rxbuf_pp_to_vp: get msg index: " 931 "msg_index %d bufoffset_index %d", 932 *msg_index, 933 *bufoffset)); 934 935 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 936 937 return (NXGE_OK); 938 } 939 940 /* 941 * used by quick sort (qsort) function 942 * to perform comparison 943 */ 944 static int 945 nxge_sort_compare(const void *p1, const void *p2) 946 { 947 948 rxbuf_index_info_t *a, *b; 949 950 a = (rxbuf_index_info_t *)p1; 951 b = (rxbuf_index_info_t *)p2; 952 953 if (a->dvma_addr > b->dvma_addr) 954 return (1); 955 if (a->dvma_addr < b->dvma_addr) 956 return (-1); 957 return (0); 958 } 959 960 961 962 /* 963 * grabbed this sort implementation from common/syscall/avl.c 964 * 965 */ 966 /* 967 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 968 * v = Ptr to array/vector of objs 969 * n = # objs in the array 970 * s = size of each obj (must be multiples of a word size) 971 * f = ptr to function to compare two objs 972 * returns (-1 = less than, 0 = equal, 1 = greater than 973 */ 974 void 975 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 976 { 977 int g, i, j, ii; 978 unsigned int *p1, *p2; 979 unsigned int tmp; 980 981 /* No work to do */ 982 if (v == NULL || n <= 1) 983 return; 984 /* Sanity check on arguments */ 985 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 986 ASSERT(s > 0); 987 988 for (g = n / 2; g > 0; g /= 2) { 989 for (i = g; i < n; i++) { 990 for (j = i - g; j >= 0 && 991 (*f)(v + j * s, v + (j + g) * s) == 1; 992 j -= g) { 993 p1 = (unsigned *)(v + j * s); 994 p2 = (unsigned *)(v + (j + g) * s); 995 for (ii = 0; ii < s / 4; ii++) { 996 tmp = *p1; 997 *p1++ = *p2; 998 *p2++ = tmp; 999 } 1000 } 1001 } 1002 } 1003 } 1004 1005 /* 1006 * Initialize data structures required for rxdma 1007 * buffer dvma->vmem address lookup 1008 */ 1009 /*ARGSUSED*/ 1010 static nxge_status_t 1011 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1012 { 1013 1014 int index; 1015 rxring_info_t *ring_info; 1016 int max_iteration = 0, max_index = 0; 1017 1018 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1019 1020 ring_info = rbrp->ring_info; 1021 ring_info->hint[0] = NO_HINT; 1022 ring_info->hint[1] = NO_HINT; 1023 ring_info->hint[2] = NO_HINT; 1024 max_index = rbrp->num_blocks; 1025 1026 /* read the DVMA address information and sort it */ 1027 /* do init of the information array */ 1028 1029 1030 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1031 " nxge_rxbuf_index_info_init Sort ptrs")); 1032 1033 /* sort the array */ 1034 nxge_ksort((void *)ring_info->buffer, max_index, 1035 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1036 1037 1038 1039 for (index = 0; index < max_index; index++) { 1040 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1041 " nxge_rxbuf_index_info_init: sorted chunk %d " 1042 " ioaddr $%p kaddr $%p size %x", 1043 index, ring_info->buffer[index].dvma_addr, 1044 ring_info->buffer[index].kaddr, 1045 ring_info->buffer[index].buf_size)); 1046 } 1047 1048 max_iteration = 0; 1049 while (max_index >= (1ULL << max_iteration)) 1050 max_iteration++; 1051 ring_info->max_iterations = max_iteration + 1; 1052 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1053 " nxge_rxbuf_index_info_init Find max iter %d", 1054 ring_info->max_iterations)); 1055 1056 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1057 return (NXGE_OK); 1058 } 1059 1060 /* ARGSUSED */ 1061 void 1062 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1063 { 1064 #ifdef NXGE_DEBUG 1065 1066 uint32_t bptr; 1067 uint64_t pp; 1068 1069 bptr = entry_p->bits.hdw.pkt_buf_addr; 1070 1071 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1072 "\trcr entry $%p " 1073 "\trcr entry 0x%0llx " 1074 "\trcr entry 0x%08x " 1075 "\trcr entry 0x%08x " 1076 "\tvalue 0x%0llx\n" 1077 "\tmulti = %d\n" 1078 "\tpkt_type = 0x%x\n" 1079 "\tzero_copy = %d\n" 1080 "\tnoport = %d\n" 1081 "\tpromis = %d\n" 1082 "\terror = 0x%04x\n" 1083 "\tdcf_err = 0x%01x\n" 1084 "\tl2_len = %d\n" 1085 "\tpktbufsize = %d\n" 1086 "\tpkt_buf_addr = $%p\n" 1087 "\tpkt_buf_addr (<< 6) = $%p\n", 1088 entry_p, 1089 *(int64_t *)entry_p, 1090 *(int32_t *)entry_p, 1091 *(int32_t *)((char *)entry_p + 32), 1092 entry_p->value, 1093 entry_p->bits.hdw.multi, 1094 entry_p->bits.hdw.pkt_type, 1095 entry_p->bits.hdw.zero_copy, 1096 entry_p->bits.hdw.noport, 1097 entry_p->bits.hdw.promis, 1098 entry_p->bits.hdw.error, 1099 entry_p->bits.hdw.dcf_err, 1100 entry_p->bits.hdw.l2_len, 1101 entry_p->bits.hdw.pktbufsz, 1102 bptr, 1103 entry_p->bits.ldw.pkt_buf_addr)); 1104 1105 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1106 RCR_PKT_BUF_ADDR_SHIFT; 1107 1108 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1109 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1110 #endif 1111 } 1112 1113 void 1114 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1115 { 1116 npi_handle_t handle; 1117 rbr_stat_t rbr_stat; 1118 addr44_t hd_addr; 1119 addr44_t tail_addr; 1120 uint16_t qlen; 1121 1122 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1123 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1124 1125 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1126 1127 /* RBR head */ 1128 hd_addr.addr = 0; 1129 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1130 #if defined(__i386) 1131 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1132 (void *)(uint32_t)hd_addr.addr); 1133 #else 1134 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1135 (void *)hd_addr.addr); 1136 #endif 1137 1138 /* RBR stats */ 1139 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1140 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1141 1142 /* RCR tail */ 1143 tail_addr.addr = 0; 1144 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1145 #if defined(__i386) 1146 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1147 (void *)(uint32_t)tail_addr.addr); 1148 #else 1149 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1150 (void *)tail_addr.addr); 1151 #endif 1152 1153 /* RCR qlen */ 1154 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1155 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1156 1157 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1158 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1159 } 1160 1161 void 1162 nxge_rxdma_stop(p_nxge_t nxgep) 1163 { 1164 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1165 1166 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1167 (void) nxge_rx_mac_disable(nxgep); 1168 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1169 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1170 } 1171 1172 void 1173 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1174 { 1175 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1176 1177 (void) nxge_rxdma_stop(nxgep); 1178 (void) nxge_uninit_rxdma_channels(nxgep); 1179 (void) nxge_init_rxdma_channels(nxgep); 1180 1181 #ifndef AXIS_DEBUG_LB 1182 (void) nxge_xcvr_init(nxgep); 1183 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1184 #endif 1185 (void) nxge_rx_mac_enable(nxgep); 1186 1187 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1188 } 1189 1190 nxge_status_t 1191 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1192 { 1193 nxge_grp_set_t *set = &nxgep->rx_set; 1194 nxge_status_t status; 1195 npi_status_t rs; 1196 int rdc; 1197 1198 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1199 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1200 1201 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1202 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1203 "<== nxge_rxdma_mode: not initialized")); 1204 return (NXGE_ERROR); 1205 } 1206 1207 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1208 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1209 "<== nxge_tx_port_fatal_err_recover: " 1210 "NULL ring pointer(s)")); 1211 return (NXGE_ERROR); 1212 } 1213 1214 if (set->owned.map == 0) { 1215 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1216 "nxge_rxdma_regs_dump_channels: no channels")); 1217 return (NULL); 1218 } 1219 1220 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1221 if ((1 << rdc) & set->owned.map) { 1222 rx_rbr_ring_t *ring = 1223 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1224 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1225 if (ring) { 1226 if (enable) { 1227 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1228 "==> nxge_rxdma_hw_mode: " 1229 "channel %d (enable)", rdc)); 1230 rs = npi_rxdma_cfg_rdc_enable 1231 (handle, rdc); 1232 } else { 1233 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1234 "==> nxge_rxdma_hw_mode: " 1235 "channel %d disable)", rdc)); 1236 rs = npi_rxdma_cfg_rdc_disable 1237 (handle, rdc); 1238 } 1239 } 1240 } 1241 } 1242 1243 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1244 1245 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1246 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1247 1248 return (status); 1249 } 1250 1251 void 1252 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1253 { 1254 npi_handle_t handle; 1255 1256 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1257 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1258 1259 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1260 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1261 1262 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1263 } 1264 1265 void 1266 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1267 { 1268 npi_handle_t handle; 1269 1270 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1271 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1272 1273 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1274 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1275 1276 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1277 } 1278 1279 void 1280 nxge_hw_start_rx(p_nxge_t nxgep) 1281 { 1282 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1283 1284 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1285 (void) nxge_rx_mac_enable(nxgep); 1286 1287 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1288 } 1289 1290 /*ARGSUSED*/ 1291 void 1292 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1293 { 1294 nxge_grp_set_t *set = &nxgep->rx_set; 1295 int rdc; 1296 1297 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1298 1299 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1300 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1301 "<== nxge_tx_port_fatal_err_recover: " 1302 "NULL ring pointer(s)")); 1303 return; 1304 } 1305 1306 if (set->owned.map == 0) { 1307 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1308 "nxge_rxdma_regs_dump_channels: no channels")); 1309 return; 1310 } 1311 1312 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1313 if ((1 << rdc) & set->owned.map) { 1314 rx_rbr_ring_t *ring = 1315 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1316 if (ring) { 1317 nxge_rxdma_hw_stop(nxgep, rdc); 1318 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1319 "==> nxge_fixup_rxdma_rings: " 1320 "channel %d ring $%px", 1321 rdc, ring)); 1322 (void) nxge_rxdma_fixup_channel 1323 (nxgep, rdc, rdc); 1324 } 1325 } 1326 } 1327 1328 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1329 } 1330 1331 void 1332 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1333 { 1334 int i; 1335 1336 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1337 i = nxge_rxdma_get_ring_index(nxgep, channel); 1338 if (i < 0) { 1339 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1340 "<== nxge_rxdma_fix_channel: no entry found")); 1341 return; 1342 } 1343 1344 nxge_rxdma_fixup_channel(nxgep, channel, i); 1345 1346 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1347 } 1348 1349 void 1350 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1351 { 1352 int ndmas; 1353 p_rx_rbr_rings_t rx_rbr_rings; 1354 p_rx_rbr_ring_t *rbr_rings; 1355 p_rx_rcr_rings_t rx_rcr_rings; 1356 p_rx_rcr_ring_t *rcr_rings; 1357 p_rx_mbox_areas_t rx_mbox_areas_p; 1358 p_rx_mbox_t *rx_mbox_p; 1359 p_nxge_dma_pool_t dma_buf_poolp; 1360 p_nxge_dma_pool_t dma_cntl_poolp; 1361 p_rx_rbr_ring_t rbrp; 1362 p_rx_rcr_ring_t rcrp; 1363 p_rx_mbox_t mboxp; 1364 p_nxge_dma_common_t dmap; 1365 nxge_status_t status = NXGE_OK; 1366 1367 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1368 1369 (void) nxge_rxdma_stop_channel(nxgep, channel); 1370 1371 dma_buf_poolp = nxgep->rx_buf_pool_p; 1372 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1373 1374 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1375 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1376 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1377 return; 1378 } 1379 1380 ndmas = dma_buf_poolp->ndmas; 1381 if (!ndmas) { 1382 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1383 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1384 return; 1385 } 1386 1387 rx_rbr_rings = nxgep->rx_rbr_rings; 1388 rx_rcr_rings = nxgep->rx_rcr_rings; 1389 rbr_rings = rx_rbr_rings->rbr_rings; 1390 rcr_rings = rx_rcr_rings->rcr_rings; 1391 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1392 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1393 1394 /* Reinitialize the receive block and completion rings */ 1395 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1396 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1397 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1398 1399 1400 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1401 rbrp->rbr_rd_index = 0; 1402 rcrp->comp_rd_index = 0; 1403 rcrp->comp_wt_index = 0; 1404 1405 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1406 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1407 1408 status = nxge_rxdma_start_channel(nxgep, channel, 1409 rbrp, rcrp, mboxp); 1410 if (status != NXGE_OK) { 1411 goto nxge_rxdma_fixup_channel_fail; 1412 } 1413 if (status != NXGE_OK) { 1414 goto nxge_rxdma_fixup_channel_fail; 1415 } 1416 1417 nxge_rxdma_fixup_channel_fail: 1418 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1419 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1420 1421 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1422 } 1423 1424 /* ARGSUSED */ 1425 int 1426 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1427 { 1428 return (channel); 1429 } 1430 1431 p_rx_rbr_ring_t 1432 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1433 { 1434 nxge_grp_set_t *set = &nxgep->rx_set; 1435 nxge_channel_t rdc; 1436 1437 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1438 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1439 1440 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1441 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1442 "<== nxge_rxdma_get_rbr_ring: " 1443 "NULL ring pointer(s)")); 1444 return (NULL); 1445 } 1446 1447 if (set->owned.map == 0) { 1448 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1449 "<== nxge_rxdma_get_rbr_ring: no channels")); 1450 return (NULL); 1451 } 1452 1453 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1454 if ((1 << rdc) & set->owned.map) { 1455 rx_rbr_ring_t *ring = 1456 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1457 if (ring) { 1458 if (channel == ring->rdc) { 1459 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1460 "==> nxge_rxdma_get_rbr_ring: " 1461 "channel %d ring $%p", rdc, ring)); 1462 return (ring); 1463 } 1464 } 1465 } 1466 } 1467 1468 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1469 "<== nxge_rxdma_get_rbr_ring: not found")); 1470 1471 return (NULL); 1472 } 1473 1474 p_rx_rcr_ring_t 1475 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1476 { 1477 nxge_grp_set_t *set = &nxgep->rx_set; 1478 nxge_channel_t rdc; 1479 1480 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1481 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1482 1483 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1484 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1485 "<== nxge_rxdma_get_rcr_ring: " 1486 "NULL ring pointer(s)")); 1487 return (NULL); 1488 } 1489 1490 if (set->owned.map == 0) { 1491 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1492 "<== nxge_rxdma_get_rbr_ring: no channels")); 1493 return (NULL); 1494 } 1495 1496 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1497 if ((1 << rdc) & set->owned.map) { 1498 rx_rcr_ring_t *ring = 1499 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1500 if (ring) { 1501 if (channel == ring->rdc) { 1502 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1503 "==> nxge_rxdma_get_rcr_ring: " 1504 "channel %d ring $%p", rdc, ring)); 1505 return (ring); 1506 } 1507 } 1508 } 1509 } 1510 1511 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1512 "<== nxge_rxdma_get_rcr_ring: not found")); 1513 1514 return (NULL); 1515 } 1516 1517 /* 1518 * Static functions start here. 1519 */ 1520 static p_rx_msg_t 1521 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1522 { 1523 p_rx_msg_t nxge_mp = NULL; 1524 p_nxge_dma_common_t dmamsg_p; 1525 uchar_t *buffer; 1526 1527 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1528 if (nxge_mp == NULL) { 1529 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1530 "Allocation of a rx msg failed.")); 1531 goto nxge_allocb_exit; 1532 } 1533 1534 nxge_mp->use_buf_pool = B_FALSE; 1535 if (dmabuf_p) { 1536 nxge_mp->use_buf_pool = B_TRUE; 1537 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1538 *dmamsg_p = *dmabuf_p; 1539 dmamsg_p->nblocks = 1; 1540 dmamsg_p->block_size = size; 1541 dmamsg_p->alength = size; 1542 buffer = (uchar_t *)dmabuf_p->kaddrp; 1543 1544 dmabuf_p->kaddrp = (void *) 1545 ((char *)dmabuf_p->kaddrp + size); 1546 dmabuf_p->ioaddr_pp = (void *) 1547 ((char *)dmabuf_p->ioaddr_pp + size); 1548 dmabuf_p->alength -= size; 1549 dmabuf_p->offset += size; 1550 dmabuf_p->dma_cookie.dmac_laddress += size; 1551 dmabuf_p->dma_cookie.dmac_size -= size; 1552 1553 } else { 1554 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1555 if (buffer == NULL) { 1556 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1557 "Allocation of a receive page failed.")); 1558 goto nxge_allocb_fail1; 1559 } 1560 } 1561 1562 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1563 if (nxge_mp->rx_mblk_p == NULL) { 1564 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1565 goto nxge_allocb_fail2; 1566 } 1567 1568 nxge_mp->buffer = buffer; 1569 nxge_mp->block_size = size; 1570 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1571 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1572 nxge_mp->ref_cnt = 1; 1573 nxge_mp->free = B_TRUE; 1574 nxge_mp->rx_use_bcopy = B_FALSE; 1575 1576 atomic_inc_32(&nxge_mblks_pending); 1577 1578 goto nxge_allocb_exit; 1579 1580 nxge_allocb_fail2: 1581 if (!nxge_mp->use_buf_pool) { 1582 KMEM_FREE(buffer, size); 1583 } 1584 1585 nxge_allocb_fail1: 1586 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1587 nxge_mp = NULL; 1588 1589 nxge_allocb_exit: 1590 return (nxge_mp); 1591 } 1592 1593 p_mblk_t 1594 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1595 { 1596 p_mblk_t mp; 1597 1598 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1599 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1600 "offset = 0x%08X " 1601 "size = 0x%08X", 1602 nxge_mp, offset, size)); 1603 1604 mp = desballoc(&nxge_mp->buffer[offset], size, 1605 0, &nxge_mp->freeb); 1606 if (mp == NULL) { 1607 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1608 goto nxge_dupb_exit; 1609 } 1610 atomic_inc_32(&nxge_mp->ref_cnt); 1611 1612 1613 nxge_dupb_exit: 1614 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1615 nxge_mp)); 1616 return (mp); 1617 } 1618 1619 p_mblk_t 1620 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1621 { 1622 p_mblk_t mp; 1623 uchar_t *dp; 1624 1625 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1626 if (mp == NULL) { 1627 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1628 goto nxge_dupb_bcopy_exit; 1629 } 1630 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1631 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1632 mp->b_wptr = dp + size; 1633 1634 nxge_dupb_bcopy_exit: 1635 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1636 nxge_mp)); 1637 return (mp); 1638 } 1639 1640 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1641 p_rx_msg_t rx_msg_p); 1642 1643 void 1644 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1645 { 1646 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1647 1648 /* Reuse this buffer */ 1649 rx_msg_p->free = B_FALSE; 1650 rx_msg_p->cur_usage_cnt = 0; 1651 rx_msg_p->max_usage_cnt = 0; 1652 rx_msg_p->pkt_buf_size = 0; 1653 1654 if (rx_rbr_p->rbr_use_bcopy) { 1655 rx_msg_p->rx_use_bcopy = B_FALSE; 1656 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1657 } 1658 1659 /* 1660 * Get the rbr header pointer and its offset index. 1661 */ 1662 MUTEX_ENTER(&rx_rbr_p->post_lock); 1663 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1664 rx_rbr_p->rbr_wrap_mask); 1665 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1666 MUTEX_EXIT(&rx_rbr_p->post_lock); 1667 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1668 rx_rbr_p->rdc, 1); 1669 1670 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1671 "<== nxge_post_page (channel %d post_next_index %d)", 1672 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1673 1674 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1675 } 1676 1677 void 1678 nxge_freeb(p_rx_msg_t rx_msg_p) 1679 { 1680 size_t size; 1681 uchar_t *buffer = NULL; 1682 int ref_cnt; 1683 boolean_t free_state = B_FALSE; 1684 1685 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1686 1687 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1688 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1689 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1690 rx_msg_p, nxge_mblks_pending)); 1691 1692 /* 1693 * First we need to get the free state, then 1694 * atomic decrement the reference count to prevent 1695 * the race condition with the interrupt thread that 1696 * is processing a loaned up buffer block. 1697 */ 1698 free_state = rx_msg_p->free; 1699 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1700 if (!ref_cnt) { 1701 atomic_dec_32(&nxge_mblks_pending); 1702 buffer = rx_msg_p->buffer; 1703 size = rx_msg_p->block_size; 1704 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1705 "will free: rx_msg_p = $%p (block pending %d)", 1706 rx_msg_p, nxge_mblks_pending)); 1707 1708 if (!rx_msg_p->use_buf_pool) { 1709 KMEM_FREE(buffer, size); 1710 } 1711 1712 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1713 1714 if (ring) { 1715 /* 1716 * Decrement the receive buffer ring's reference 1717 * count, too. 1718 */ 1719 atomic_dec_32(&ring->rbr_ref_cnt); 1720 1721 /* 1722 * Free the receive buffer ring, if 1723 * 1. all the receive buffers have been freed 1724 * 2. and we are in the proper state (that is, 1725 * we are not UNMAPPING). 1726 */ 1727 if (ring->rbr_ref_cnt == 0 && 1728 ring->rbr_state == RBR_UNMAPPED) { 1729 /* 1730 * Free receive data buffers, 1731 * buffer index information 1732 * (rxring_info) and 1733 * the message block ring. 1734 */ 1735 NXGE_DEBUG_MSG((NULL, RX_CTL, 1736 "nxge_freeb:rx_msg_p = $%p " 1737 "(block pending %d) free buffers", 1738 rx_msg_p, nxge_mblks_pending)); 1739 nxge_rxdma_databuf_free(ring); 1740 if (ring->ring_info) { 1741 KMEM_FREE(ring->ring_info, 1742 sizeof (rxring_info_t)); 1743 } 1744 1745 if (ring->rx_msg_ring) { 1746 KMEM_FREE(ring->rx_msg_ring, 1747 ring->tnblocks * 1748 sizeof (p_rx_msg_t)); 1749 } 1750 KMEM_FREE(ring, sizeof (*ring)); 1751 } 1752 } 1753 return; 1754 } 1755 1756 /* 1757 * Repost buffer. 1758 */ 1759 if (free_state && (ref_cnt == 1) && ring) { 1760 NXGE_DEBUG_MSG((NULL, RX_CTL, 1761 "nxge_freeb: post page $%p:", rx_msg_p)); 1762 if (ring->rbr_state == RBR_POSTING) 1763 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1764 } 1765 1766 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1767 } 1768 1769 uint_t 1770 nxge_rx_intr(void *arg1, void *arg2) 1771 { 1772 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1773 p_nxge_t nxgep = (p_nxge_t)arg2; 1774 p_nxge_ldg_t ldgp; 1775 uint8_t channel; 1776 npi_handle_t handle; 1777 rx_dma_ctl_stat_t cs; 1778 1779 #ifdef NXGE_DEBUG 1780 rxdma_cfig1_t cfg; 1781 #endif 1782 uint_t serviced = DDI_INTR_UNCLAIMED; 1783 1784 if (ldvp == NULL) { 1785 NXGE_DEBUG_MSG((NULL, INT_CTL, 1786 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1787 nxgep, ldvp)); 1788 1789 return (DDI_INTR_CLAIMED); 1790 } 1791 1792 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1793 nxgep = ldvp->nxgep; 1794 } 1795 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1796 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1797 nxgep, ldvp)); 1798 1799 /* 1800 * This interrupt handler is for a specific 1801 * receive dma channel. 1802 */ 1803 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1804 /* 1805 * Get the control and status for this channel. 1806 */ 1807 channel = ldvp->channel; 1808 ldgp = ldvp->ldgp; 1809 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1810 1811 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1812 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1813 channel, 1814 cs.value, 1815 cs.bits.hdw.rcrto, 1816 cs.bits.hdw.rcrthres)); 1817 1818 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1819 serviced = DDI_INTR_CLAIMED; 1820 1821 /* error events. */ 1822 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1823 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1824 } 1825 1826 nxge_intr_exit: 1827 /* 1828 * Enable the mailbox update interrupt if we want 1829 * to use mailbox. We probably don't need to use 1830 * mailbox as it only saves us one pio read. 1831 * Also write 1 to rcrthres and rcrto to clear 1832 * these two edge triggered bits. 1833 */ 1834 1835 cs.value &= RX_DMA_CTL_STAT_WR1C; 1836 cs.bits.hdw.mex = 1; 1837 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1838 cs.value); 1839 1840 /* 1841 * Rearm this logical group if this is a single device 1842 * group. 1843 */ 1844 if (ldgp->nldvs == 1) { 1845 ldgimgm_t mgm; 1846 mgm.value = 0; 1847 mgm.bits.ldw.arm = 1; 1848 mgm.bits.ldw.timer = ldgp->ldg_timer; 1849 if (isLDOMguest(nxgep)) { 1850 nxge_hio_ldgimgn(nxgep, ldgp); 1851 } else { 1852 NXGE_REG_WR64(handle, 1853 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1854 mgm.value); 1855 } 1856 } 1857 1858 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1859 serviced)); 1860 return (serviced); 1861 } 1862 1863 /* 1864 * Process the packets received in the specified logical device 1865 * and pass up a chain of message blocks to the upper layer. 1866 */ 1867 static void 1868 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 1869 { 1870 p_mblk_t mp; 1871 p_rx_rcr_ring_t rcrp; 1872 1873 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1874 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 1875 if (rcrp->poll_flag) { 1876 /* It is in the poll mode */ 1877 return; 1878 } 1879 1880 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 1881 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1882 "<== nxge_rx_pkts_vring: no mp")); 1883 return; 1884 } 1885 1886 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1887 mp)); 1888 1889 #ifdef NXGE_DEBUG 1890 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1891 "==> nxge_rx_pkts_vring:calling mac_rx " 1892 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1893 "mac_handle $%p", 1894 mp->b_wptr - mp->b_rptr, 1895 mp, mp->b_cont, mp->b_next, 1896 rcrp, rcrp->rcr_mac_handle)); 1897 1898 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1899 "==> nxge_rx_pkts_vring: dump packets " 1900 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1901 mp, 1902 mp->b_rptr, 1903 mp->b_wptr, 1904 nxge_dump_packet((char *)mp->b_rptr, 1905 mp->b_wptr - mp->b_rptr))); 1906 if (mp->b_cont) { 1907 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1908 "==> nxge_rx_pkts_vring: dump b_cont packets " 1909 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1910 mp->b_cont, 1911 mp->b_cont->b_rptr, 1912 mp->b_cont->b_wptr, 1913 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1914 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1915 } 1916 if (mp->b_next) { 1917 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1918 "==> nxge_rx_pkts_vring: dump next packets " 1919 "(b_rptr $%p): %s", 1920 mp->b_next->b_rptr, 1921 nxge_dump_packet((char *)mp->b_next->b_rptr, 1922 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1923 } 1924 #endif 1925 1926 if (!isLDOMguest(nxgep)) 1927 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1928 #if defined(sun4v) 1929 else { /* isLDOMguest(nxgep) */ 1930 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1931 nxgep->nxge_hw_p->hio; 1932 nx_vio_fp_t *vio = &nhd->hio.vio; 1933 1934 if (vio->cb.vio_net_rx_cb) { 1935 (*vio->cb.vio_net_rx_cb) 1936 (nxgep->hio_vr->vhp, mp); 1937 } 1938 } 1939 #endif 1940 } 1941 1942 1943 /* 1944 * This routine is the main packet receive processing function. 1945 * It gets the packet type, error code, and buffer related 1946 * information from the receive completion entry. 1947 * How many completion entries to process is based on the number of packets 1948 * queued by the hardware, a hardware maintained tail pointer 1949 * and a configurable receive packet count. 1950 * 1951 * A chain of message blocks will be created as result of processing 1952 * the completion entries. This chain of message blocks will be returned and 1953 * a hardware control status register will be updated with the number of 1954 * packets were removed from the hardware queue. 1955 * 1956 */ 1957 static mblk_t * 1958 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1959 int bytes_to_pickup) 1960 { 1961 npi_handle_t handle; 1962 uint8_t channel; 1963 uint32_t comp_rd_index; 1964 p_rcr_entry_t rcr_desc_rd_head_p; 1965 p_rcr_entry_t rcr_desc_rd_head_pp; 1966 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1967 uint16_t qlen, nrcr_read, npkt_read; 1968 uint32_t qlen_hw; 1969 boolean_t multi; 1970 rcrcfig_b_t rcr_cfg_b; 1971 int totallen = 0; 1972 #if defined(_BIG_ENDIAN) 1973 npi_status_t rs = NPI_SUCCESS; 1974 #endif 1975 1976 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 1977 "channel %d", rcr_p->rdc)); 1978 1979 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1980 return (NULL); 1981 } 1982 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1983 channel = rcr_p->rdc; 1984 1985 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1986 "==> nxge_rx_pkts: START: rcr channel %d " 1987 "head_p $%p head_pp $%p index %d ", 1988 channel, rcr_p->rcr_desc_rd_head_p, 1989 rcr_p->rcr_desc_rd_head_pp, 1990 rcr_p->comp_rd_index)); 1991 1992 1993 #if !defined(_BIG_ENDIAN) 1994 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 1995 #else 1996 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1997 if (rs != NPI_SUCCESS) { 1998 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 1999 "channel %d, get qlen failed 0x%08x", 2000 channel, rs)); 2001 return (NULL); 2002 } 2003 #endif 2004 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2005 "qlen %d", channel, qlen)); 2006 2007 2008 2009 if (!qlen) { 2010 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2011 "==> nxge_rx_pkts:rcr channel %d " 2012 "qlen %d (no pkts)", channel, qlen)); 2013 2014 return (NULL); 2015 } 2016 2017 comp_rd_index = rcr_p->comp_rd_index; 2018 2019 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2020 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2021 nrcr_read = npkt_read = 0; 2022 2023 /* 2024 * Number of packets queued 2025 * (The jumbo or multi packet will be counted as only one 2026 * packets and it may take up more than one completion entry). 2027 */ 2028 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2029 qlen : nxge_max_rx_pkts; 2030 head_mp = NULL; 2031 tail_mp = &head_mp; 2032 nmp = mp_cont = NULL; 2033 multi = B_FALSE; 2034 2035 while (qlen_hw) { 2036 2037 #ifdef NXGE_DEBUG 2038 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2039 #endif 2040 /* 2041 * Process one completion ring entry. 2042 */ 2043 nxge_receive_packet(nxgep, 2044 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2045 2046 /* 2047 * message chaining modes 2048 */ 2049 if (nmp) { 2050 nmp->b_next = NULL; 2051 if (!multi && !mp_cont) { /* frame fits a partition */ 2052 *tail_mp = nmp; 2053 tail_mp = &nmp->b_next; 2054 totallen += MBLKL(nmp); 2055 nmp = NULL; 2056 } else if (multi && !mp_cont) { /* first segment */ 2057 *tail_mp = nmp; 2058 tail_mp = &nmp->b_cont; 2059 totallen += MBLKL(nmp); 2060 } else if (multi && mp_cont) { /* mid of multi segs */ 2061 *tail_mp = mp_cont; 2062 tail_mp = &mp_cont->b_cont; 2063 totallen += MBLKL(mp_cont); 2064 } else if (!multi && mp_cont) { /* last segment */ 2065 *tail_mp = mp_cont; 2066 tail_mp = &nmp->b_next; 2067 totallen += MBLKL(mp_cont); 2068 nmp = NULL; 2069 } 2070 } 2071 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2072 "==> nxge_rx_pkts: loop: rcr channel %d " 2073 "before updating: multi %d " 2074 "nrcr_read %d " 2075 "npk read %d " 2076 "head_pp $%p index %d ", 2077 channel, 2078 multi, 2079 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2080 comp_rd_index)); 2081 2082 if (!multi) { 2083 qlen_hw--; 2084 npkt_read++; 2085 } 2086 2087 /* 2088 * Update the next read entry. 2089 */ 2090 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2091 rcr_p->comp_wrap_mask); 2092 2093 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2094 rcr_p->rcr_desc_first_p, 2095 rcr_p->rcr_desc_last_p); 2096 2097 nrcr_read++; 2098 2099 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2100 "<== nxge_rx_pkts: (SAM, process one packet) " 2101 "nrcr_read %d", 2102 nrcr_read)); 2103 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2104 "==> nxge_rx_pkts: loop: rcr channel %d " 2105 "multi %d " 2106 "nrcr_read %d " 2107 "npk read %d " 2108 "head_pp $%p index %d ", 2109 channel, 2110 multi, 2111 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2112 comp_rd_index)); 2113 2114 if ((bytes_to_pickup != -1) && 2115 (totallen >= bytes_to_pickup)) { 2116 break; 2117 } 2118 } 2119 2120 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2121 rcr_p->comp_rd_index = comp_rd_index; 2122 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2123 2124 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2125 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2126 rcr_p->intr_timeout = nxgep->intr_timeout; 2127 rcr_p->intr_threshold = nxgep->intr_threshold; 2128 rcr_cfg_b.value = 0x0ULL; 2129 if (rcr_p->intr_timeout) 2130 rcr_cfg_b.bits.ldw.entout = 1; 2131 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2132 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2133 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2134 channel, rcr_cfg_b.value); 2135 } 2136 2137 cs.bits.ldw.pktread = npkt_read; 2138 cs.bits.ldw.ptrread = nrcr_read; 2139 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2140 channel, cs.value); 2141 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2142 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2143 "head_pp $%p index %016llx ", 2144 channel, 2145 rcr_p->rcr_desc_rd_head_pp, 2146 rcr_p->comp_rd_index)); 2147 /* 2148 * Update RCR buffer pointer read and number of packets 2149 * read. 2150 */ 2151 2152 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2153 return (head_mp); 2154 } 2155 2156 void 2157 nxge_receive_packet(p_nxge_t nxgep, 2158 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2159 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2160 { 2161 p_mblk_t nmp = NULL; 2162 uint64_t multi; 2163 uint64_t dcf_err; 2164 uint8_t channel; 2165 2166 boolean_t first_entry = B_TRUE; 2167 boolean_t is_tcp_udp = B_FALSE; 2168 boolean_t buffer_free = B_FALSE; 2169 boolean_t error_send_up = B_FALSE; 2170 uint8_t error_type; 2171 uint16_t l2_len; 2172 uint16_t skip_len; 2173 uint8_t pktbufsz_type; 2174 uint64_t rcr_entry; 2175 uint64_t *pkt_buf_addr_pp; 2176 uint64_t *pkt_buf_addr_p; 2177 uint32_t buf_offset; 2178 uint32_t bsize; 2179 uint32_t error_disp_cnt; 2180 uint32_t msg_index; 2181 p_rx_rbr_ring_t rx_rbr_p; 2182 p_rx_msg_t *rx_msg_ring_p; 2183 p_rx_msg_t rx_msg_p; 2184 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2185 nxge_status_t status = NXGE_OK; 2186 boolean_t is_valid = B_FALSE; 2187 p_nxge_rx_ring_stats_t rdc_stats; 2188 uint32_t bytes_read; 2189 uint64_t pkt_type; 2190 uint64_t frag; 2191 boolean_t pkt_too_long_err = B_FALSE; 2192 #ifdef NXGE_DEBUG 2193 int dump_len; 2194 #endif 2195 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2196 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2197 2198 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2199 2200 multi = (rcr_entry & RCR_MULTI_MASK); 2201 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2202 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2203 2204 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2205 frag = (rcr_entry & RCR_FRAG_MASK); 2206 2207 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2208 2209 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2210 RCR_PKTBUFSZ_SHIFT); 2211 #if defined(__i386) 2212 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2213 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2214 #else 2215 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2216 RCR_PKT_BUF_ADDR_SHIFT); 2217 #endif 2218 2219 channel = rcr_p->rdc; 2220 2221 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2222 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2223 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2224 "error_type 0x%x pkt_type 0x%x " 2225 "pktbufsz_type %d ", 2226 rcr_desc_rd_head_p, 2227 rcr_entry, pkt_buf_addr_pp, l2_len, 2228 multi, 2229 error_type, 2230 pkt_type, 2231 pktbufsz_type)); 2232 2233 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2234 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2235 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2236 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2237 rcr_entry, pkt_buf_addr_pp, l2_len, 2238 multi, 2239 error_type, 2240 pkt_type)); 2241 2242 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2243 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2244 "full pkt_buf_addr_pp $%p l2_len %d", 2245 rcr_entry, pkt_buf_addr_pp, l2_len)); 2246 2247 /* get the stats ptr */ 2248 rdc_stats = rcr_p->rdc_stats; 2249 2250 if (!l2_len) { 2251 2252 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2253 "<== nxge_receive_packet: failed: l2 length is 0.")); 2254 return; 2255 } 2256 2257 /* 2258 * Sofware workaround for BMAC hardware limitation that allows 2259 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2260 * instead of 0x2400 for jumbo. 2261 */ 2262 if (l2_len > nxgep->mac.maxframesize) { 2263 pkt_too_long_err = B_TRUE; 2264 } 2265 2266 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2267 l2_len -= ETHERFCSL; 2268 2269 /* shift 6 bits to get the full io address */ 2270 #if defined(__i386) 2271 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2272 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2273 #else 2274 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2275 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2276 #endif 2277 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2278 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2279 "full pkt_buf_addr_pp $%p l2_len %d", 2280 rcr_entry, pkt_buf_addr_pp, l2_len)); 2281 2282 rx_rbr_p = rcr_p->rx_rbr_p; 2283 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2284 2285 if (first_entry) { 2286 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2287 RXDMA_HDR_SIZE_DEFAULT); 2288 2289 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2290 "==> nxge_receive_packet: first entry 0x%016llx " 2291 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2292 rcr_entry, pkt_buf_addr_pp, l2_len, 2293 hdr_size)); 2294 } 2295 2296 MUTEX_ENTER(&rcr_p->lock); 2297 MUTEX_ENTER(&rx_rbr_p->lock); 2298 2299 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2300 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2301 "full pkt_buf_addr_pp $%p l2_len %d", 2302 rcr_entry, pkt_buf_addr_pp, l2_len)); 2303 2304 /* 2305 * Packet buffer address in the completion entry points 2306 * to the starting buffer address (offset 0). 2307 * Use the starting buffer address to locate the corresponding 2308 * kernel address. 2309 */ 2310 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2311 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2312 &buf_offset, 2313 &msg_index); 2314 2315 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2316 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2317 "full pkt_buf_addr_pp $%p l2_len %d", 2318 rcr_entry, pkt_buf_addr_pp, l2_len)); 2319 2320 if (status != NXGE_OK) { 2321 MUTEX_EXIT(&rx_rbr_p->lock); 2322 MUTEX_EXIT(&rcr_p->lock); 2323 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2324 "<== nxge_receive_packet: found vaddr failed %d", 2325 status)); 2326 return; 2327 } 2328 2329 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2330 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2331 "full pkt_buf_addr_pp $%p l2_len %d", 2332 rcr_entry, pkt_buf_addr_pp, l2_len)); 2333 2334 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2335 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2336 "full pkt_buf_addr_pp $%p l2_len %d", 2337 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2338 2339 rx_msg_p = rx_msg_ring_p[msg_index]; 2340 2341 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2342 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2343 "full pkt_buf_addr_pp $%p l2_len %d", 2344 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2345 2346 switch (pktbufsz_type) { 2347 case RCR_PKTBUFSZ_0: 2348 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2349 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2350 "==> nxge_receive_packet: 0 buf %d", bsize)); 2351 break; 2352 case RCR_PKTBUFSZ_1: 2353 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2354 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2355 "==> nxge_receive_packet: 1 buf %d", bsize)); 2356 break; 2357 case RCR_PKTBUFSZ_2: 2358 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2359 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2360 "==> nxge_receive_packet: 2 buf %d", bsize)); 2361 break; 2362 case RCR_SINGLE_BLOCK: 2363 bsize = rx_msg_p->block_size; 2364 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2365 "==> nxge_receive_packet: single %d", bsize)); 2366 2367 break; 2368 default: 2369 MUTEX_EXIT(&rx_rbr_p->lock); 2370 MUTEX_EXIT(&rcr_p->lock); 2371 return; 2372 } 2373 2374 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2375 (buf_offset + sw_offset_bytes), 2376 (hdr_size + l2_len), 2377 DDI_DMA_SYNC_FORCPU); 2378 2379 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2380 "==> nxge_receive_packet: after first dump:usage count")); 2381 2382 if (rx_msg_p->cur_usage_cnt == 0) { 2383 if (rx_rbr_p->rbr_use_bcopy) { 2384 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2385 if (rx_rbr_p->rbr_consumed < 2386 rx_rbr_p->rbr_threshold_hi) { 2387 if (rx_rbr_p->rbr_threshold_lo == 0 || 2388 ((rx_rbr_p->rbr_consumed >= 2389 rx_rbr_p->rbr_threshold_lo) && 2390 (rx_rbr_p->rbr_bufsize_type >= 2391 pktbufsz_type))) { 2392 rx_msg_p->rx_use_bcopy = B_TRUE; 2393 } 2394 } else { 2395 rx_msg_p->rx_use_bcopy = B_TRUE; 2396 } 2397 } 2398 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2399 "==> nxge_receive_packet: buf %d (new block) ", 2400 bsize)); 2401 2402 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2403 rx_msg_p->pkt_buf_size = bsize; 2404 rx_msg_p->cur_usage_cnt = 1; 2405 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2406 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2407 "==> nxge_receive_packet: buf %d " 2408 "(single block) ", 2409 bsize)); 2410 /* 2411 * Buffer can be reused once the free function 2412 * is called. 2413 */ 2414 rx_msg_p->max_usage_cnt = 1; 2415 buffer_free = B_TRUE; 2416 } else { 2417 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2418 if (rx_msg_p->max_usage_cnt == 1) { 2419 buffer_free = B_TRUE; 2420 } 2421 } 2422 } else { 2423 rx_msg_p->cur_usage_cnt++; 2424 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2425 buffer_free = B_TRUE; 2426 } 2427 } 2428 2429 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2430 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2431 msg_index, l2_len, 2432 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2433 2434 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2435 rdc_stats->ierrors++; 2436 if (dcf_err) { 2437 rdc_stats->dcf_err++; 2438 #ifdef NXGE_DEBUG 2439 if (!rdc_stats->dcf_err) { 2440 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2441 "nxge_receive_packet: channel %d dcf_err rcr" 2442 " 0x%llx", channel, rcr_entry)); 2443 } 2444 #endif 2445 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2446 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2447 } else if (pkt_too_long_err) { 2448 rdc_stats->pkt_too_long_err++; 2449 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2450 " channel %d packet length [%d] > " 2451 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2452 nxgep->mac.maxframesize)); 2453 } else { 2454 /* Update error stats */ 2455 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2456 rdc_stats->errlog.compl_err_type = error_type; 2457 2458 switch (error_type) { 2459 /* 2460 * Do not send FMA ereport for RCR_L2_ERROR and 2461 * RCR_L4_CSUM_ERROR because most likely they indicate 2462 * back pressure rather than HW failures. 2463 */ 2464 case RCR_L2_ERROR: 2465 rdc_stats->l2_err++; 2466 if (rdc_stats->l2_err < 2467 error_disp_cnt) { 2468 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2469 " nxge_receive_packet:" 2470 " channel %d RCR L2_ERROR", 2471 channel)); 2472 } 2473 break; 2474 case RCR_L4_CSUM_ERROR: 2475 error_send_up = B_TRUE; 2476 rdc_stats->l4_cksum_err++; 2477 if (rdc_stats->l4_cksum_err < 2478 error_disp_cnt) { 2479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2480 " nxge_receive_packet:" 2481 " channel %d" 2482 " RCR L4_CSUM_ERROR", channel)); 2483 } 2484 break; 2485 /* 2486 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2487 * RCR_ZCP_SOFT_ERROR because they reflect the same 2488 * FFLP and ZCP errors that have been reported by 2489 * nxge_fflp.c and nxge_zcp.c. 2490 */ 2491 case RCR_FFLP_SOFT_ERROR: 2492 error_send_up = B_TRUE; 2493 rdc_stats->fflp_soft_err++; 2494 if (rdc_stats->fflp_soft_err < 2495 error_disp_cnt) { 2496 NXGE_ERROR_MSG((nxgep, 2497 NXGE_ERR_CTL, 2498 " nxge_receive_packet:" 2499 " channel %d" 2500 " RCR FFLP_SOFT_ERROR", channel)); 2501 } 2502 break; 2503 case RCR_ZCP_SOFT_ERROR: 2504 error_send_up = B_TRUE; 2505 rdc_stats->fflp_soft_err++; 2506 if (rdc_stats->zcp_soft_err < 2507 error_disp_cnt) 2508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2509 " nxge_receive_packet: Channel %d" 2510 " RCR ZCP_SOFT_ERROR", channel)); 2511 break; 2512 default: 2513 rdc_stats->rcr_unknown_err++; 2514 if (rdc_stats->rcr_unknown_err 2515 < error_disp_cnt) { 2516 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2517 " nxge_receive_packet: Channel %d" 2518 " RCR entry 0x%llx error 0x%x", 2519 rcr_entry, channel, error_type)); 2520 } 2521 break; 2522 } 2523 } 2524 2525 /* 2526 * Update and repost buffer block if max usage 2527 * count is reached. 2528 */ 2529 if (error_send_up == B_FALSE) { 2530 atomic_inc_32(&rx_msg_p->ref_cnt); 2531 if (buffer_free == B_TRUE) { 2532 rx_msg_p->free = B_TRUE; 2533 } 2534 2535 MUTEX_EXIT(&rx_rbr_p->lock); 2536 MUTEX_EXIT(&rcr_p->lock); 2537 nxge_freeb(rx_msg_p); 2538 return; 2539 } 2540 } 2541 2542 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2543 "==> nxge_receive_packet: DMA sync second ")); 2544 2545 bytes_read = rcr_p->rcvd_pkt_bytes; 2546 skip_len = sw_offset_bytes + hdr_size; 2547 if (!rx_msg_p->rx_use_bcopy) { 2548 /* 2549 * For loaned up buffers, the driver reference count 2550 * will be incremented first and then the free state. 2551 */ 2552 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2553 if (first_entry) { 2554 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2555 if (l2_len < bsize - skip_len) { 2556 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2557 } else { 2558 nmp->b_wptr = &nmp->b_rptr[bsize 2559 - skip_len]; 2560 } 2561 } else { 2562 if (l2_len - bytes_read < bsize) { 2563 nmp->b_wptr = 2564 &nmp->b_rptr[l2_len - bytes_read]; 2565 } else { 2566 nmp->b_wptr = &nmp->b_rptr[bsize]; 2567 } 2568 } 2569 } 2570 } else { 2571 if (first_entry) { 2572 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2573 l2_len < bsize - skip_len ? 2574 l2_len : bsize - skip_len); 2575 } else { 2576 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2577 l2_len - bytes_read < bsize ? 2578 l2_len - bytes_read : bsize); 2579 } 2580 } 2581 if (nmp != NULL) { 2582 if (first_entry) 2583 bytes_read = nmp->b_wptr - nmp->b_rptr; 2584 else 2585 bytes_read += nmp->b_wptr - nmp->b_rptr; 2586 2587 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2588 "==> nxge_receive_packet after dupb: " 2589 "rbr consumed %d " 2590 "pktbufsz_type %d " 2591 "nmp $%p rptr $%p wptr $%p " 2592 "buf_offset %d bzise %d l2_len %d skip_len %d", 2593 rx_rbr_p->rbr_consumed, 2594 pktbufsz_type, 2595 nmp, nmp->b_rptr, nmp->b_wptr, 2596 buf_offset, bsize, l2_len, skip_len)); 2597 } else { 2598 cmn_err(CE_WARN, "!nxge_receive_packet: " 2599 "update stats (error)"); 2600 atomic_inc_32(&rx_msg_p->ref_cnt); 2601 if (buffer_free == B_TRUE) { 2602 rx_msg_p->free = B_TRUE; 2603 } 2604 MUTEX_EXIT(&rx_rbr_p->lock); 2605 MUTEX_EXIT(&rcr_p->lock); 2606 nxge_freeb(rx_msg_p); 2607 return; 2608 } 2609 2610 if (buffer_free == B_TRUE) { 2611 rx_msg_p->free = B_TRUE; 2612 } 2613 /* 2614 * ERROR, FRAG and PKT_TYPE are only reported 2615 * in the first entry. 2616 * If a packet is not fragmented and no error bit is set, then 2617 * L4 checksum is OK. 2618 */ 2619 is_valid = (nmp != NULL); 2620 if (first_entry) { 2621 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 2622 rdc_stats->ibytes += skip_len + l2_len < bsize ? 2623 l2_len : bsize; 2624 } else { 2625 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2626 l2_len - bytes_read : bsize; 2627 } 2628 2629 rcr_p->rcvd_pkt_bytes = bytes_read; 2630 2631 MUTEX_EXIT(&rx_rbr_p->lock); 2632 MUTEX_EXIT(&rcr_p->lock); 2633 2634 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2635 atomic_inc_32(&rx_msg_p->ref_cnt); 2636 nxge_freeb(rx_msg_p); 2637 } 2638 2639 if (is_valid) { 2640 nmp->b_cont = NULL; 2641 if (first_entry) { 2642 *mp = nmp; 2643 *mp_cont = NULL; 2644 } else { 2645 *mp_cont = nmp; 2646 } 2647 } 2648 2649 /* 2650 * Update stats and hardware checksuming. 2651 */ 2652 if (is_valid && !multi) { 2653 /* 2654 * If the checksum flag nxge_chksum_enable 2655 * is enabled, TCP and UDP packets can be sent 2656 * up with good checksum. If the checksum flag 2657 * is not set, checksum reporting will apply to 2658 * TCP packets only (workaround for a hardware bug). 2659 */ 2660 if (nxge_cksum_enable) { 2661 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2662 pkt_type == RCR_PKT_IS_UDP) ? 2663 B_TRUE: B_FALSE); 2664 } else { 2665 /* TCP checksum only. */ 2666 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2667 B_TRUE: B_FALSE); 2668 } 2669 2670 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2671 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2672 is_valid, multi, is_tcp_udp, frag, error_type)); 2673 2674 if (is_tcp_udp && !frag && !error_type) { 2675 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2676 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2677 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2678 "==> nxge_receive_packet: Full tcp/udp cksum " 2679 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2680 "error %d", 2681 is_valid, multi, is_tcp_udp, frag, error_type)); 2682 } 2683 } 2684 2685 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2686 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2687 2688 *multi_p = (multi == RCR_MULTI_MASK); 2689 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2690 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2691 *multi_p, nmp, *mp, *mp_cont)); 2692 } 2693 2694 /*ARGSUSED*/ 2695 static nxge_status_t 2696 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2697 { 2698 p_nxge_rx_ring_stats_t rdc_stats; 2699 npi_handle_t handle; 2700 npi_status_t rs; 2701 boolean_t rxchan_fatal = B_FALSE; 2702 boolean_t rxport_fatal = B_FALSE; 2703 uint8_t portn; 2704 nxge_status_t status = NXGE_OK; 2705 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2706 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2707 2708 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2709 portn = nxgep->mac.portnum; 2710 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2711 2712 if (cs.bits.hdw.rbr_tmout) { 2713 rdc_stats->rx_rbr_tmout++; 2714 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2715 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2716 rxchan_fatal = B_TRUE; 2717 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2718 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2719 } 2720 if (cs.bits.hdw.rsp_cnt_err) { 2721 rdc_stats->rsp_cnt_err++; 2722 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2723 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2724 rxchan_fatal = B_TRUE; 2725 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2726 "==> nxge_rx_err_evnts(channel %d): " 2727 "rsp_cnt_err", channel)); 2728 } 2729 if (cs.bits.hdw.byte_en_bus) { 2730 rdc_stats->byte_en_bus++; 2731 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2732 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2733 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2734 "==> nxge_rx_err_evnts(channel %d): " 2735 "fatal error: byte_en_bus", channel)); 2736 rxchan_fatal = B_TRUE; 2737 } 2738 if (cs.bits.hdw.rsp_dat_err) { 2739 rdc_stats->rsp_dat_err++; 2740 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2741 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2742 rxchan_fatal = B_TRUE; 2743 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2744 "==> nxge_rx_err_evnts(channel %d): " 2745 "fatal error: rsp_dat_err", channel)); 2746 } 2747 if (cs.bits.hdw.rcr_ack_err) { 2748 rdc_stats->rcr_ack_err++; 2749 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2750 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2751 rxchan_fatal = B_TRUE; 2752 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2753 "==> nxge_rx_err_evnts(channel %d): " 2754 "fatal error: rcr_ack_err", channel)); 2755 } 2756 if (cs.bits.hdw.dc_fifo_err) { 2757 rdc_stats->dc_fifo_err++; 2758 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2759 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2760 /* This is not a fatal error! */ 2761 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2762 "==> nxge_rx_err_evnts(channel %d): " 2763 "dc_fifo_err", channel)); 2764 rxport_fatal = B_TRUE; 2765 } 2766 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2767 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2768 &rdc_stats->errlog.pre_par, 2769 &rdc_stats->errlog.sha_par)) 2770 != NPI_SUCCESS) { 2771 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2772 "==> nxge_rx_err_evnts(channel %d): " 2773 "rcr_sha_par: get perr", channel)); 2774 return (NXGE_ERROR | rs); 2775 } 2776 if (cs.bits.hdw.rcr_sha_par) { 2777 rdc_stats->rcr_sha_par++; 2778 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2779 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2780 rxchan_fatal = B_TRUE; 2781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2782 "==> nxge_rx_err_evnts(channel %d): " 2783 "fatal error: rcr_sha_par", channel)); 2784 } 2785 if (cs.bits.hdw.rbr_pre_par) { 2786 rdc_stats->rbr_pre_par++; 2787 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2788 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2789 rxchan_fatal = B_TRUE; 2790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2791 "==> nxge_rx_err_evnts(channel %d): " 2792 "fatal error: rbr_pre_par", channel)); 2793 } 2794 } 2795 /* 2796 * The Following 4 status bits are for information, the system 2797 * is running fine. There is no need to send FMA ereports or 2798 * log messages. 2799 */ 2800 if (cs.bits.hdw.port_drop_pkt) { 2801 rdc_stats->port_drop_pkt++; 2802 } 2803 if (cs.bits.hdw.wred_drop) { 2804 rdc_stats->wred_drop++; 2805 } 2806 if (cs.bits.hdw.rbr_pre_empty) { 2807 rdc_stats->rbr_pre_empty++; 2808 } 2809 if (cs.bits.hdw.rcr_shadow_full) { 2810 rdc_stats->rcr_shadow_full++; 2811 } 2812 if (cs.bits.hdw.config_err) { 2813 rdc_stats->config_err++; 2814 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2815 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2816 rxchan_fatal = B_TRUE; 2817 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2818 "==> nxge_rx_err_evnts(channel %d): " 2819 "config error", channel)); 2820 } 2821 if (cs.bits.hdw.rcrincon) { 2822 rdc_stats->rcrincon++; 2823 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2824 NXGE_FM_EREPORT_RDMC_RCRINCON); 2825 rxchan_fatal = B_TRUE; 2826 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2827 "==> nxge_rx_err_evnts(channel %d): " 2828 "fatal error: rcrincon error", channel)); 2829 } 2830 if (cs.bits.hdw.rcrfull) { 2831 rdc_stats->rcrfull++; 2832 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2833 NXGE_FM_EREPORT_RDMC_RCRFULL); 2834 rxchan_fatal = B_TRUE; 2835 if (rdc_stats->rcrfull < error_disp_cnt) 2836 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2837 "==> nxge_rx_err_evnts(channel %d): " 2838 "fatal error: rcrfull error", channel)); 2839 } 2840 if (cs.bits.hdw.rbr_empty) { 2841 /* 2842 * This bit is for information, there is no need 2843 * send FMA ereport or log a message. 2844 */ 2845 rdc_stats->rbr_empty++; 2846 } 2847 if (cs.bits.hdw.rbrfull) { 2848 rdc_stats->rbrfull++; 2849 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2850 NXGE_FM_EREPORT_RDMC_RBRFULL); 2851 rxchan_fatal = B_TRUE; 2852 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2853 "==> nxge_rx_err_evnts(channel %d): " 2854 "fatal error: rbr_full error", channel)); 2855 } 2856 if (cs.bits.hdw.rbrlogpage) { 2857 rdc_stats->rbrlogpage++; 2858 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2859 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2860 rxchan_fatal = B_TRUE; 2861 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2862 "==> nxge_rx_err_evnts(channel %d): " 2863 "fatal error: rbr logical page error", channel)); 2864 } 2865 if (cs.bits.hdw.cfiglogpage) { 2866 rdc_stats->cfiglogpage++; 2867 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2868 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2869 rxchan_fatal = B_TRUE; 2870 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2871 "==> nxge_rx_err_evnts(channel %d): " 2872 "fatal error: cfig logical page error", channel)); 2873 } 2874 2875 if (rxport_fatal) { 2876 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2877 " nxge_rx_err_evnts: fatal error on Port #%d\n", 2878 portn)); 2879 if (isLDOMguest(nxgep)) { 2880 status = NXGE_ERROR; 2881 } else { 2882 status = nxge_ipp_fatal_err_recover(nxgep); 2883 if (status == NXGE_OK) { 2884 FM_SERVICE_RESTORED(nxgep); 2885 } 2886 } 2887 } 2888 2889 if (rxchan_fatal) { 2890 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2891 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 2892 channel)); 2893 if (isLDOMguest(nxgep)) { 2894 status = NXGE_ERROR; 2895 } else { 2896 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2897 if (status == NXGE_OK) { 2898 FM_SERVICE_RESTORED(nxgep); 2899 } 2900 } 2901 } 2902 2903 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2904 2905 return (status); 2906 } 2907 2908 /* 2909 * nxge_rdc_hvio_setup 2910 * 2911 * This code appears to setup some Hypervisor variables. 2912 * 2913 * Arguments: 2914 * nxgep 2915 * channel 2916 * 2917 * Notes: 2918 * What does NIU_LP_WORKAROUND mean? 2919 * 2920 * NPI/NXGE function calls: 2921 * na 2922 * 2923 * Context: 2924 * Any domain 2925 */ 2926 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2927 static void 2928 nxge_rdc_hvio_setup( 2929 nxge_t *nxgep, int channel) 2930 { 2931 nxge_dma_common_t *dma_common; 2932 nxge_dma_common_t *dma_control; 2933 rx_rbr_ring_t *ring; 2934 2935 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 2936 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2937 2938 ring->hv_set = B_FALSE; 2939 2940 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 2941 dma_common->orig_ioaddr_pp; 2942 ring->hv_rx_buf_ioaddr_size = (uint64_t) 2943 dma_common->orig_alength; 2944 2945 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2946 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 2947 channel, ring->hv_rx_buf_base_ioaddr_pp, 2948 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 2949 dma_common->orig_alength, dma_common->orig_alength)); 2950 2951 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2952 2953 ring->hv_rx_cntl_base_ioaddr_pp = 2954 (uint64_t)dma_control->orig_ioaddr_pp; 2955 ring->hv_rx_cntl_ioaddr_size = 2956 (uint64_t)dma_control->orig_alength; 2957 2958 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2959 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 2960 channel, ring->hv_rx_cntl_base_ioaddr_pp, 2961 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 2962 dma_control->orig_alength, dma_control->orig_alength)); 2963 } 2964 #endif 2965 2966 /* 2967 * nxge_map_rxdma 2968 * 2969 * Map an RDC into our kernel space. 2970 * 2971 * Arguments: 2972 * nxgep 2973 * channel The channel to map. 2974 * 2975 * Notes: 2976 * 1. Allocate & initialise a memory pool, if necessary. 2977 * 2. Allocate however many receive buffers are required. 2978 * 3. Setup buffers, descriptors, and mailbox. 2979 * 2980 * NPI/NXGE function calls: 2981 * nxge_alloc_rx_mem_pool() 2982 * nxge_alloc_rbb() 2983 * nxge_map_rxdma_channel() 2984 * 2985 * Registers accessed: 2986 * 2987 * Context: 2988 * Any domain 2989 */ 2990 static nxge_status_t 2991 nxge_map_rxdma(p_nxge_t nxgep, int channel) 2992 { 2993 nxge_dma_common_t **data; 2994 nxge_dma_common_t **control; 2995 rx_rbr_ring_t **rbr_ring; 2996 rx_rcr_ring_t **rcr_ring; 2997 rx_mbox_t **mailbox; 2998 uint32_t chunks; 2999 3000 nxge_status_t status; 3001 3002 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3003 3004 if (!nxgep->rx_buf_pool_p) { 3005 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3007 "<== nxge_map_rxdma: buf not allocated")); 3008 return (NXGE_ERROR); 3009 } 3010 } 3011 3012 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3013 return (NXGE_ERROR); 3014 3015 /* 3016 * Timeout should be set based on the system clock divider. 3017 * The following timeout value of 1 assumes that the 3018 * granularity (1000) is 3 microseconds running at 300MHz. 3019 */ 3020 3021 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 3022 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 3023 3024 /* 3025 * Map descriptors from the buffer polls for each dma channel. 3026 */ 3027 3028 /* 3029 * Set up and prepare buffer blocks, descriptors 3030 * and mailbox. 3031 */ 3032 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3033 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3034 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3035 3036 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3037 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3038 3039 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3040 3041 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3042 chunks, control, rcr_ring, mailbox); 3043 if (status != NXGE_OK) { 3044 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3045 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3046 "returned 0x%x", 3047 channel, status)); 3048 return (status); 3049 } 3050 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3051 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3052 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3053 &nxgep->statsp->rdc_stats[channel]; 3054 3055 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3056 if (!isLDOMguest(nxgep)) 3057 nxge_rdc_hvio_setup(nxgep, channel); 3058 #endif 3059 3060 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3061 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3062 3063 return (status); 3064 } 3065 3066 static void 3067 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3068 { 3069 rx_rbr_ring_t *rbr_ring; 3070 rx_rcr_ring_t *rcr_ring; 3071 rx_mbox_t *mailbox; 3072 3073 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3074 3075 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3076 !nxgep->rx_mbox_areas_p) 3077 return; 3078 3079 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3080 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3081 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3082 3083 if (!rbr_ring || !rcr_ring || !mailbox) 3084 return; 3085 3086 (void) nxge_unmap_rxdma_channel( 3087 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3088 3089 nxge_free_rxb(nxgep, channel); 3090 3091 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3092 } 3093 3094 nxge_status_t 3095 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3096 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3097 uint32_t num_chunks, 3098 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3099 p_rx_mbox_t *rx_mbox_p) 3100 { 3101 int status = NXGE_OK; 3102 3103 /* 3104 * Set up and prepare buffer blocks, descriptors 3105 * and mailbox. 3106 */ 3107 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3108 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3109 /* 3110 * Receive buffer blocks 3111 */ 3112 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3113 dma_buf_p, rbr_p, num_chunks); 3114 if (status != NXGE_OK) { 3115 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3116 "==> nxge_map_rxdma_channel (channel %d): " 3117 "map buffer failed 0x%x", channel, status)); 3118 goto nxge_map_rxdma_channel_exit; 3119 } 3120 3121 /* 3122 * Receive block ring, completion ring and mailbox. 3123 */ 3124 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3125 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3126 if (status != NXGE_OK) { 3127 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3128 "==> nxge_map_rxdma_channel (channel %d): " 3129 "map config failed 0x%x", channel, status)); 3130 goto nxge_map_rxdma_channel_fail2; 3131 } 3132 3133 goto nxge_map_rxdma_channel_exit; 3134 3135 nxge_map_rxdma_channel_fail3: 3136 /* Free rbr, rcr */ 3137 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3138 "==> nxge_map_rxdma_channel: free rbr/rcr " 3139 "(status 0x%x channel %d)", 3140 status, channel)); 3141 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3142 *rcr_p, *rx_mbox_p); 3143 3144 nxge_map_rxdma_channel_fail2: 3145 /* Free buffer blocks */ 3146 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3147 "==> nxge_map_rxdma_channel: free rx buffers" 3148 "(nxgep 0x%x status 0x%x channel %d)", 3149 nxgep, status, channel)); 3150 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3151 3152 status = NXGE_ERROR; 3153 3154 nxge_map_rxdma_channel_exit: 3155 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3156 "<== nxge_map_rxdma_channel: " 3157 "(nxgep 0x%x status 0x%x channel %d)", 3158 nxgep, status, channel)); 3159 3160 return (status); 3161 } 3162 3163 /*ARGSUSED*/ 3164 static void 3165 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3166 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3167 { 3168 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3169 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3170 3171 /* 3172 * unmap receive block ring, completion ring and mailbox. 3173 */ 3174 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3175 rcr_p, rx_mbox_p); 3176 3177 /* unmap buffer blocks */ 3178 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3179 3180 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3181 } 3182 3183 /*ARGSUSED*/ 3184 static nxge_status_t 3185 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3186 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3187 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3188 { 3189 p_rx_rbr_ring_t rbrp; 3190 p_rx_rcr_ring_t rcrp; 3191 p_rx_mbox_t mboxp; 3192 p_nxge_dma_common_t cntl_dmap; 3193 p_nxge_dma_common_t dmap; 3194 p_rx_msg_t *rx_msg_ring; 3195 p_rx_msg_t rx_msg_p; 3196 p_rbr_cfig_a_t rcfga_p; 3197 p_rbr_cfig_b_t rcfgb_p; 3198 p_rcrcfig_a_t cfga_p; 3199 p_rcrcfig_b_t cfgb_p; 3200 p_rxdma_cfig1_t cfig1_p; 3201 p_rxdma_cfig2_t cfig2_p; 3202 p_rbr_kick_t kick_p; 3203 uint32_t dmaaddrp; 3204 uint32_t *rbr_vaddrp; 3205 uint32_t bkaddr; 3206 nxge_status_t status = NXGE_OK; 3207 int i; 3208 uint32_t nxge_port_rcr_size; 3209 3210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3211 "==> nxge_map_rxdma_channel_cfg_ring")); 3212 3213 cntl_dmap = *dma_cntl_p; 3214 3215 /* Map in the receive block ring */ 3216 rbrp = *rbr_p; 3217 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3218 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3219 /* 3220 * Zero out buffer block ring descriptors. 3221 */ 3222 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3223 3224 rcfga_p = &(rbrp->rbr_cfga); 3225 rcfgb_p = &(rbrp->rbr_cfgb); 3226 kick_p = &(rbrp->rbr_kick); 3227 rcfga_p->value = 0; 3228 rcfgb_p->value = 0; 3229 kick_p->value = 0; 3230 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3231 rcfga_p->value = (rbrp->rbr_addr & 3232 (RBR_CFIG_A_STDADDR_MASK | 3233 RBR_CFIG_A_STDADDR_BASE_MASK)); 3234 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3235 3236 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3237 rcfgb_p->bits.ldw.vld0 = 1; 3238 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3239 rcfgb_p->bits.ldw.vld1 = 1; 3240 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3241 rcfgb_p->bits.ldw.vld2 = 1; 3242 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3243 3244 /* 3245 * For each buffer block, enter receive block address to the ring. 3246 */ 3247 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3248 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3249 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3250 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3251 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3252 3253 rx_msg_ring = rbrp->rx_msg_ring; 3254 for (i = 0; i < rbrp->tnblocks; i++) { 3255 rx_msg_p = rx_msg_ring[i]; 3256 rx_msg_p->nxgep = nxgep; 3257 rx_msg_p->rx_rbr_p = rbrp; 3258 bkaddr = (uint32_t) 3259 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3260 >> RBR_BKADDR_SHIFT)); 3261 rx_msg_p->free = B_FALSE; 3262 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3263 3264 *rbr_vaddrp++ = bkaddr; 3265 } 3266 3267 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3268 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3269 3270 rbrp->rbr_rd_index = 0; 3271 3272 rbrp->rbr_consumed = 0; 3273 rbrp->rbr_use_bcopy = B_TRUE; 3274 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3275 /* 3276 * Do bcopy on packets greater than bcopy size once 3277 * the lo threshold is reached. 3278 * This lo threshold should be less than the hi threshold. 3279 * 3280 * Do bcopy on every packet once the hi threshold is reached. 3281 */ 3282 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3283 /* default it to use hi */ 3284 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3285 } 3286 3287 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3288 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3289 } 3290 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3291 3292 switch (nxge_rx_threshold_hi) { 3293 default: 3294 case NXGE_RX_COPY_NONE: 3295 /* Do not do bcopy at all */ 3296 rbrp->rbr_use_bcopy = B_FALSE; 3297 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3298 break; 3299 3300 case NXGE_RX_COPY_1: 3301 case NXGE_RX_COPY_2: 3302 case NXGE_RX_COPY_3: 3303 case NXGE_RX_COPY_4: 3304 case NXGE_RX_COPY_5: 3305 case NXGE_RX_COPY_6: 3306 case NXGE_RX_COPY_7: 3307 rbrp->rbr_threshold_hi = 3308 rbrp->rbb_max * 3309 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3310 break; 3311 3312 case NXGE_RX_COPY_ALL: 3313 rbrp->rbr_threshold_hi = 0; 3314 break; 3315 } 3316 3317 switch (nxge_rx_threshold_lo) { 3318 default: 3319 case NXGE_RX_COPY_NONE: 3320 /* Do not do bcopy at all */ 3321 if (rbrp->rbr_use_bcopy) { 3322 rbrp->rbr_use_bcopy = B_FALSE; 3323 } 3324 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3325 break; 3326 3327 case NXGE_RX_COPY_1: 3328 case NXGE_RX_COPY_2: 3329 case NXGE_RX_COPY_3: 3330 case NXGE_RX_COPY_4: 3331 case NXGE_RX_COPY_5: 3332 case NXGE_RX_COPY_6: 3333 case NXGE_RX_COPY_7: 3334 rbrp->rbr_threshold_lo = 3335 rbrp->rbb_max * 3336 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3337 break; 3338 3339 case NXGE_RX_COPY_ALL: 3340 rbrp->rbr_threshold_lo = 0; 3341 break; 3342 } 3343 3344 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3345 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3346 "rbb_max %d " 3347 "rbrp->rbr_bufsize_type %d " 3348 "rbb_threshold_hi %d " 3349 "rbb_threshold_lo %d", 3350 dma_channel, 3351 rbrp->rbb_max, 3352 rbrp->rbr_bufsize_type, 3353 rbrp->rbr_threshold_hi, 3354 rbrp->rbr_threshold_lo)); 3355 3356 rbrp->page_valid.value = 0; 3357 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3358 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3359 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3360 rbrp->page_hdl.value = 0; 3361 3362 rbrp->page_valid.bits.ldw.page0 = 1; 3363 rbrp->page_valid.bits.ldw.page1 = 1; 3364 3365 /* Map in the receive completion ring */ 3366 rcrp = (p_rx_rcr_ring_t) 3367 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3368 rcrp->rdc = dma_channel; 3369 3370 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3371 rcrp->comp_size = nxge_port_rcr_size; 3372 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3373 3374 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3375 3376 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3377 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3378 sizeof (rcr_entry_t)); 3379 rcrp->comp_rd_index = 0; 3380 rcrp->comp_wt_index = 0; 3381 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3382 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3383 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3384 #if defined(__i386) 3385 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3386 #else 3387 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3388 #endif 3389 3390 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3391 (nxge_port_rcr_size - 1); 3392 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3393 (nxge_port_rcr_size - 1); 3394 3395 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3396 "==> nxge_map_rxdma_channel_cfg_ring: " 3397 "channel %d " 3398 "rbr_vaddrp $%p " 3399 "rcr_desc_rd_head_p $%p " 3400 "rcr_desc_rd_head_pp $%p " 3401 "rcr_desc_rd_last_p $%p " 3402 "rcr_desc_rd_last_pp $%p ", 3403 dma_channel, 3404 rbr_vaddrp, 3405 rcrp->rcr_desc_rd_head_p, 3406 rcrp->rcr_desc_rd_head_pp, 3407 rcrp->rcr_desc_last_p, 3408 rcrp->rcr_desc_last_pp)); 3409 3410 /* 3411 * Zero out buffer block ring descriptors. 3412 */ 3413 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3414 rcrp->intr_timeout = nxgep->intr_timeout; 3415 rcrp->intr_threshold = nxgep->intr_threshold; 3416 rcrp->full_hdr_flag = B_FALSE; 3417 rcrp->sw_priv_hdr_len = 0; 3418 3419 cfga_p = &(rcrp->rcr_cfga); 3420 cfgb_p = &(rcrp->rcr_cfgb); 3421 cfga_p->value = 0; 3422 cfgb_p->value = 0; 3423 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3424 cfga_p->value = (rcrp->rcr_addr & 3425 (RCRCFIG_A_STADDR_MASK | 3426 RCRCFIG_A_STADDR_BASE_MASK)); 3427 3428 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3429 RCRCFIG_A_LEN_SHIF); 3430 3431 /* 3432 * Timeout should be set based on the system clock divider. 3433 * The following timeout value of 1 assumes that the 3434 * granularity (1000) is 3 microseconds running at 300MHz. 3435 */ 3436 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3437 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3438 cfgb_p->bits.ldw.entout = 1; 3439 3440 /* Map in the mailbox */ 3441 mboxp = (p_rx_mbox_t) 3442 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3443 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3444 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3445 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3446 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3447 cfig1_p->value = cfig2_p->value = 0; 3448 3449 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3450 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3451 "==> nxge_map_rxdma_channel_cfg_ring: " 3452 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3453 dma_channel, cfig1_p->value, cfig2_p->value, 3454 mboxp->mbox_addr)); 3455 3456 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3457 & 0xfff); 3458 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3459 3460 3461 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3462 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3463 RXDMA_CFIG2_MBADDR_L_MASK); 3464 3465 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3466 3467 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3468 "==> nxge_map_rxdma_channel_cfg_ring: " 3469 "channel %d damaddrp $%p " 3470 "cfg1 0x%016llx cfig2 0x%016llx", 3471 dma_channel, dmaaddrp, 3472 cfig1_p->value, cfig2_p->value)); 3473 3474 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3475 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3476 3477 rbrp->rx_rcr_p = rcrp; 3478 rcrp->rx_rbr_p = rbrp; 3479 *rcr_p = rcrp; 3480 *rx_mbox_p = mboxp; 3481 3482 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3483 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3484 3485 return (status); 3486 } 3487 3488 /*ARGSUSED*/ 3489 static void 3490 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3491 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3492 { 3493 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3494 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3495 rcr_p->rdc)); 3496 3497 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3498 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3499 3500 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3501 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3502 } 3503 3504 static nxge_status_t 3505 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3506 p_nxge_dma_common_t *dma_buf_p, 3507 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3508 { 3509 p_rx_rbr_ring_t rbrp; 3510 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3511 p_rx_msg_t *rx_msg_ring; 3512 p_rx_msg_t rx_msg_p; 3513 p_mblk_t mblk_p; 3514 3515 rxring_info_t *ring_info; 3516 nxge_status_t status = NXGE_OK; 3517 int i, j, index; 3518 uint32_t size, bsize, nblocks, nmsgs; 3519 3520 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3521 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3522 channel)); 3523 3524 dma_bufp = tmp_bufp = *dma_buf_p; 3525 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3526 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3527 "chunks bufp 0x%016llx", 3528 channel, num_chunks, dma_bufp)); 3529 3530 nmsgs = 0; 3531 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3532 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3533 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3534 "bufp 0x%016llx nblocks %d nmsgs %d", 3535 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3536 nmsgs += tmp_bufp->nblocks; 3537 } 3538 if (!nmsgs) { 3539 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3540 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3541 "no msg blocks", 3542 channel)); 3543 status = NXGE_ERROR; 3544 goto nxge_map_rxdma_channel_buf_ring_exit; 3545 } 3546 3547 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3548 3549 size = nmsgs * sizeof (p_rx_msg_t); 3550 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3551 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3552 KM_SLEEP); 3553 3554 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3555 (void *)nxgep->interrupt_cookie); 3556 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3557 (void *)nxgep->interrupt_cookie); 3558 rbrp->rdc = channel; 3559 rbrp->num_blocks = num_chunks; 3560 rbrp->tnblocks = nmsgs; 3561 rbrp->rbb_max = nmsgs; 3562 rbrp->rbr_max_size = nmsgs; 3563 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3564 3565 /* 3566 * Buffer sizes suggested by NIU architect. 3567 * 256, 512 and 2K. 3568 */ 3569 3570 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3571 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3572 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3573 3574 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3575 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3576 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3577 3578 rbrp->block_size = nxgep->rx_default_block_size; 3579 3580 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3581 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3582 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3583 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3584 } else { 3585 if (rbrp->block_size >= 0x2000) { 3586 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3587 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3588 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3589 } else { 3590 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3591 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3592 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3593 } 3594 } 3595 3596 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3597 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3598 "actual rbr max %d rbb_max %d nmsgs %d " 3599 "rbrp->block_size %d default_block_size %d " 3600 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3601 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3602 rbrp->block_size, nxgep->rx_default_block_size, 3603 nxge_rbr_size, nxge_rbr_spare_size)); 3604 3605 /* Map in buffers from the buffer pool. */ 3606 index = 0; 3607 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3608 bsize = dma_bufp->block_size; 3609 nblocks = dma_bufp->nblocks; 3610 #if defined(__i386) 3611 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3612 #else 3613 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3614 #endif 3615 ring_info->buffer[i].buf_index = i; 3616 ring_info->buffer[i].buf_size = dma_bufp->alength; 3617 ring_info->buffer[i].start_index = index; 3618 #if defined(__i386) 3619 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3620 #else 3621 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3622 #endif 3623 3624 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3625 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3626 "chunk %d" 3627 " nblocks %d chunk_size %x block_size 0x%x " 3628 "dma_bufp $%p", channel, i, 3629 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3630 dma_bufp)); 3631 3632 for (j = 0; j < nblocks; j++) { 3633 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3634 dma_bufp)) == NULL) { 3635 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3636 "allocb failed (index %d i %d j %d)", 3637 index, i, j)); 3638 goto nxge_map_rxdma_channel_buf_ring_fail1; 3639 } 3640 rx_msg_ring[index] = rx_msg_p; 3641 rx_msg_p->block_index = index; 3642 rx_msg_p->shifted_addr = (uint32_t) 3643 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3644 RBR_BKADDR_SHIFT)); 3645 3646 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3647 "index %d j %d rx_msg_p $%p mblk %p", 3648 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3649 3650 mblk_p = rx_msg_p->rx_mblk_p; 3651 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3652 3653 rbrp->rbr_ref_cnt++; 3654 index++; 3655 rx_msg_p->buf_dma.dma_channel = channel; 3656 } 3657 3658 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3659 if (dma_bufp->contig_alloc_type) { 3660 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3661 } 3662 3663 if (dma_bufp->kmem_alloc_type) { 3664 rbrp->rbr_alloc_type = KMEM_ALLOC; 3665 } 3666 3667 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3668 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3669 "chunk %d" 3670 " nblocks %d chunk_size %x block_size 0x%x " 3671 "dma_bufp $%p", 3672 channel, i, 3673 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3674 dma_bufp)); 3675 } 3676 if (i < rbrp->num_blocks) { 3677 goto nxge_map_rxdma_channel_buf_ring_fail1; 3678 } 3679 3680 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3681 "nxge_map_rxdma_channel_buf_ring: done buf init " 3682 "channel %d msg block entries %d", 3683 channel, index)); 3684 ring_info->block_size_mask = bsize - 1; 3685 rbrp->rx_msg_ring = rx_msg_ring; 3686 rbrp->dma_bufp = dma_buf_p; 3687 rbrp->ring_info = ring_info; 3688 3689 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3690 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3691 " nxge_map_rxdma_channel_buf_ring: " 3692 "channel %d done buf info init", channel)); 3693 3694 /* 3695 * Finally, permit nxge_freeb() to call nxge_post_page(). 3696 */ 3697 rbrp->rbr_state = RBR_POSTING; 3698 3699 *rbr_p = rbrp; 3700 goto nxge_map_rxdma_channel_buf_ring_exit; 3701 3702 nxge_map_rxdma_channel_buf_ring_fail1: 3703 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3704 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3705 channel, status)); 3706 3707 index--; 3708 for (; index >= 0; index--) { 3709 rx_msg_p = rx_msg_ring[index]; 3710 if (rx_msg_p != NULL) { 3711 freeb(rx_msg_p->rx_mblk_p); 3712 rx_msg_ring[index] = NULL; 3713 } 3714 } 3715 nxge_map_rxdma_channel_buf_ring_fail: 3716 MUTEX_DESTROY(&rbrp->post_lock); 3717 MUTEX_DESTROY(&rbrp->lock); 3718 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3719 KMEM_FREE(rx_msg_ring, size); 3720 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3721 3722 status = NXGE_ERROR; 3723 3724 nxge_map_rxdma_channel_buf_ring_exit: 3725 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3726 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3727 3728 return (status); 3729 } 3730 3731 /*ARGSUSED*/ 3732 static void 3733 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3734 p_rx_rbr_ring_t rbr_p) 3735 { 3736 p_rx_msg_t *rx_msg_ring; 3737 p_rx_msg_t rx_msg_p; 3738 rxring_info_t *ring_info; 3739 int i; 3740 uint32_t size; 3741 #ifdef NXGE_DEBUG 3742 int num_chunks; 3743 #endif 3744 3745 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3746 "==> nxge_unmap_rxdma_channel_buf_ring")); 3747 if (rbr_p == NULL) { 3748 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3749 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3750 return; 3751 } 3752 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3753 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3754 rbr_p->rdc)); 3755 3756 rx_msg_ring = rbr_p->rx_msg_ring; 3757 ring_info = rbr_p->ring_info; 3758 3759 if (rx_msg_ring == NULL || ring_info == NULL) { 3760 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3761 "<== nxge_unmap_rxdma_channel_buf_ring: " 3762 "rx_msg_ring $%p ring_info $%p", 3763 rx_msg_p, ring_info)); 3764 return; 3765 } 3766 3767 #ifdef NXGE_DEBUG 3768 num_chunks = rbr_p->num_blocks; 3769 #endif 3770 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3771 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3772 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3773 "tnblocks %d (max %d) size ptrs %d ", 3774 rbr_p->rdc, num_chunks, 3775 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3776 3777 for (i = 0; i < rbr_p->tnblocks; i++) { 3778 rx_msg_p = rx_msg_ring[i]; 3779 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3780 " nxge_unmap_rxdma_channel_buf_ring: " 3781 "rx_msg_p $%p", 3782 rx_msg_p)); 3783 if (rx_msg_p != NULL) { 3784 freeb(rx_msg_p->rx_mblk_p); 3785 rx_msg_ring[i] = NULL; 3786 } 3787 } 3788 3789 /* 3790 * We no longer may use the mutex <post_lock>. By setting 3791 * <rbr_state> to anything but POSTING, we prevent 3792 * nxge_post_page() from accessing a dead mutex. 3793 */ 3794 rbr_p->rbr_state = RBR_UNMAPPING; 3795 MUTEX_DESTROY(&rbr_p->post_lock); 3796 3797 MUTEX_DESTROY(&rbr_p->lock); 3798 3799 if (rbr_p->rbr_ref_cnt == 0) { 3800 /* 3801 * This is the normal state of affairs. 3802 * Need to free the following buffers: 3803 * - data buffers 3804 * - rx_msg ring 3805 * - ring_info 3806 * - rbr ring 3807 */ 3808 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3809 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 3810 nxge_rxdma_databuf_free(rbr_p); 3811 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3812 KMEM_FREE(rx_msg_ring, size); 3813 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3814 } else { 3815 /* 3816 * Some of our buffers are still being used. 3817 * Therefore, tell nxge_freeb() this ring is 3818 * unmapped, so it may free <rbr_p> for us. 3819 */ 3820 rbr_p->rbr_state = RBR_UNMAPPED; 3821 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3822 "unmap_rxdma_buf_ring: %d %s outstanding.", 3823 rbr_p->rbr_ref_cnt, 3824 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3825 } 3826 3827 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3828 "<== nxge_unmap_rxdma_channel_buf_ring")); 3829 } 3830 3831 /* 3832 * nxge_rxdma_hw_start_common 3833 * 3834 * Arguments: 3835 * nxgep 3836 * 3837 * Notes: 3838 * 3839 * NPI/NXGE function calls: 3840 * nxge_init_fzc_rx_common(); 3841 * nxge_init_fzc_rxdma_port(); 3842 * 3843 * Registers accessed: 3844 * 3845 * Context: 3846 * Service domain 3847 */ 3848 static nxge_status_t 3849 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3850 { 3851 nxge_status_t status = NXGE_OK; 3852 3853 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3854 3855 /* 3856 * Load the sharable parameters by writing to the 3857 * function zero control registers. These FZC registers 3858 * should be initialized only once for the entire chip. 3859 */ 3860 (void) nxge_init_fzc_rx_common(nxgep); 3861 3862 /* 3863 * Initialize the RXDMA port specific FZC control configurations. 3864 * These FZC registers are pertaining to each port. 3865 */ 3866 (void) nxge_init_fzc_rxdma_port(nxgep); 3867 3868 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3869 3870 return (status); 3871 } 3872 3873 static nxge_status_t 3874 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 3875 { 3876 int i, ndmas; 3877 p_rx_rbr_rings_t rx_rbr_rings; 3878 p_rx_rbr_ring_t *rbr_rings; 3879 p_rx_rcr_rings_t rx_rcr_rings; 3880 p_rx_rcr_ring_t *rcr_rings; 3881 p_rx_mbox_areas_t rx_mbox_areas_p; 3882 p_rx_mbox_t *rx_mbox_p; 3883 nxge_status_t status = NXGE_OK; 3884 3885 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3886 3887 rx_rbr_rings = nxgep->rx_rbr_rings; 3888 rx_rcr_rings = nxgep->rx_rcr_rings; 3889 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3890 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3891 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3892 return (NXGE_ERROR); 3893 } 3894 ndmas = rx_rbr_rings->ndmas; 3895 if (ndmas == 0) { 3896 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3897 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3898 return (NXGE_ERROR); 3899 } 3900 3901 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3902 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3903 3904 rbr_rings = rx_rbr_rings->rbr_rings; 3905 rcr_rings = rx_rcr_rings->rcr_rings; 3906 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3907 if (rx_mbox_areas_p) { 3908 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3909 } 3910 3911 i = channel; 3912 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3913 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3914 ndmas, channel)); 3915 status = nxge_rxdma_start_channel(nxgep, channel, 3916 (p_rx_rbr_ring_t)rbr_rings[i], 3917 (p_rx_rcr_ring_t)rcr_rings[i], 3918 (p_rx_mbox_t)rx_mbox_p[i]); 3919 if (status != NXGE_OK) { 3920 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3921 "==> nxge_rxdma_hw_start: disable " 3922 "(status 0x%x channel %d)", status, channel)); 3923 return (status); 3924 } 3925 3926 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3927 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3928 rx_rbr_rings, rx_rcr_rings)); 3929 3930 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3931 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3932 3933 return (status); 3934 } 3935 3936 static void 3937 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 3938 { 3939 p_rx_rbr_rings_t rx_rbr_rings; 3940 p_rx_rcr_rings_t rx_rcr_rings; 3941 3942 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3943 3944 rx_rbr_rings = nxgep->rx_rbr_rings; 3945 rx_rcr_rings = nxgep->rx_rcr_rings; 3946 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3947 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3948 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3949 return; 3950 } 3951 3952 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3953 "==> nxge_rxdma_hw_stop(channel %d)", 3954 channel)); 3955 (void) nxge_rxdma_stop_channel(nxgep, channel); 3956 3957 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3958 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3959 rx_rbr_rings, rx_rcr_rings)); 3960 3961 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3962 } 3963 3964 3965 static nxge_status_t 3966 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3967 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3968 3969 { 3970 npi_handle_t handle; 3971 npi_status_t rs = NPI_SUCCESS; 3972 rx_dma_ctl_stat_t cs; 3973 rx_dma_ent_msk_t ent_mask; 3974 nxge_status_t status = NXGE_OK; 3975 3976 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3977 3978 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3979 3980 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3981 "npi handle addr $%p acc $%p", 3982 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 3983 3984 /* Reset RXDMA channel, but not if you're a guest. */ 3985 if (!isLDOMguest(nxgep)) { 3986 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3987 if (rs != NPI_SUCCESS) { 3988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3989 "==> nxge_init_fzc_rdc: " 3990 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 3991 channel, rs)); 3992 return (NXGE_ERROR | rs); 3993 } 3994 3995 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3996 "==> nxge_rxdma_start_channel: reset done: channel %d", 3997 channel)); 3998 } 3999 4000 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4001 if (isLDOMguest(nxgep)) 4002 (void) nxge_rdc_lp_conf(nxgep, channel); 4003 #endif 4004 4005 /* 4006 * Initialize the RXDMA channel specific FZC control 4007 * configurations. These FZC registers are pertaining 4008 * to each RX channel (logical pages). 4009 */ 4010 if (!isLDOMguest(nxgep)) { 4011 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4012 if (status != NXGE_OK) { 4013 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4014 "==> nxge_rxdma_start_channel: " 4015 "init fzc rxdma failed (0x%08x channel %d)", 4016 status, channel)); 4017 return (status); 4018 } 4019 4020 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4021 "==> nxge_rxdma_start_channel: fzc done")); 4022 } 4023 4024 /* Set up the interrupt event masks. */ 4025 ent_mask.value = 0; 4026 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4027 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4028 &ent_mask); 4029 if (rs != NPI_SUCCESS) { 4030 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4031 "==> nxge_rxdma_start_channel: " 4032 "init rxdma event masks failed " 4033 "(0x%08x channel %d)", 4034 status, channel)); 4035 return (NXGE_ERROR | rs); 4036 } 4037 4038 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4039 "==> nxge_rxdma_start_channel: " 4040 "event done: channel %d (mask 0x%016llx)", 4041 channel, ent_mask.value)); 4042 4043 /* Initialize the receive DMA control and status register */ 4044 cs.value = 0; 4045 cs.bits.hdw.mex = 1; 4046 cs.bits.hdw.rcrthres = 1; 4047 cs.bits.hdw.rcrto = 1; 4048 cs.bits.hdw.rbr_empty = 1; 4049 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4050 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4051 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4052 if (status != NXGE_OK) { 4053 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4054 "==> nxge_rxdma_start_channel: " 4055 "init rxdma control register failed (0x%08x channel %d", 4056 status, channel)); 4057 return (status); 4058 } 4059 4060 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4061 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4062 4063 /* 4064 * Load RXDMA descriptors, buffers, mailbox, 4065 * initialise the receive DMA channels and 4066 * enable each DMA channel. 4067 */ 4068 status = nxge_enable_rxdma_channel(nxgep, 4069 channel, rbr_p, rcr_p, mbox_p); 4070 4071 if (status != NXGE_OK) { 4072 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4073 " nxge_rxdma_start_channel: " 4074 " enable rxdma failed (0x%08x channel %d)", 4075 status, channel)); 4076 return (status); 4077 } 4078 4079 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4080 "==> nxge_rxdma_start_channel: enabled channel %d")); 4081 4082 if (isLDOMguest(nxgep)) { 4083 /* Add interrupt handler for this channel. */ 4084 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4085 != NXGE_OK) { 4086 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4087 " nxge_rxdma_start_channel: " 4088 " nxge_hio_intr_add failed (0x%08x channel %d)", 4089 status, channel)); 4090 } 4091 } 4092 4093 ent_mask.value = 0; 4094 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4095 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4096 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4097 &ent_mask); 4098 if (rs != NPI_SUCCESS) { 4099 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4100 "==> nxge_rxdma_start_channel: " 4101 "init rxdma event masks failed (0x%08x channel %d)", 4102 status, channel)); 4103 return (NXGE_ERROR | rs); 4104 } 4105 4106 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4107 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4108 4109 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4110 4111 return (NXGE_OK); 4112 } 4113 4114 static nxge_status_t 4115 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4116 { 4117 npi_handle_t handle; 4118 npi_status_t rs = NPI_SUCCESS; 4119 rx_dma_ctl_stat_t cs; 4120 rx_dma_ent_msk_t ent_mask; 4121 nxge_status_t status = NXGE_OK; 4122 4123 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4124 4125 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4126 4127 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4128 "npi handle addr $%p acc $%p", 4129 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4130 4131 /* Reset RXDMA channel */ 4132 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4133 if (rs != NPI_SUCCESS) { 4134 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4135 " nxge_rxdma_stop_channel: " 4136 " reset rxdma failed (0x%08x channel %d)", 4137 rs, channel)); 4138 return (NXGE_ERROR | rs); 4139 } 4140 4141 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4142 "==> nxge_rxdma_stop_channel: reset done")); 4143 4144 /* Set up the interrupt event masks. */ 4145 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4146 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4147 &ent_mask); 4148 if (rs != NPI_SUCCESS) { 4149 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4150 "==> nxge_rxdma_stop_channel: " 4151 "set rxdma event masks failed (0x%08x channel %d)", 4152 rs, channel)); 4153 return (NXGE_ERROR | rs); 4154 } 4155 4156 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4157 "==> nxge_rxdma_stop_channel: event done")); 4158 4159 /* Initialize the receive DMA control and status register */ 4160 cs.value = 0; 4161 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4162 &cs); 4163 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4164 " to default (all 0s) 0x%08x", cs.value)); 4165 if (status != NXGE_OK) { 4166 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4167 " nxge_rxdma_stop_channel: init rxdma" 4168 " control register failed (0x%08x channel %d", 4169 status, channel)); 4170 return (status); 4171 } 4172 4173 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4174 "==> nxge_rxdma_stop_channel: control done")); 4175 4176 /* disable dma channel */ 4177 status = nxge_disable_rxdma_channel(nxgep, channel); 4178 4179 if (status != NXGE_OK) { 4180 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4181 " nxge_rxdma_stop_channel: " 4182 " init enable rxdma failed (0x%08x channel %d)", 4183 status, channel)); 4184 return (status); 4185 } 4186 4187 NXGE_DEBUG_MSG((nxgep, 4188 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4189 4190 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4191 4192 return (NXGE_OK); 4193 } 4194 4195 nxge_status_t 4196 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4197 { 4198 npi_handle_t handle; 4199 p_nxge_rdc_sys_stats_t statsp; 4200 rx_ctl_dat_fifo_stat_t stat; 4201 uint32_t zcp_err_status; 4202 uint32_t ipp_err_status; 4203 nxge_status_t status = NXGE_OK; 4204 npi_status_t rs = NPI_SUCCESS; 4205 boolean_t my_err = B_FALSE; 4206 4207 handle = nxgep->npi_handle; 4208 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4209 4210 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4211 4212 if (rs != NPI_SUCCESS) 4213 return (NXGE_ERROR | rs); 4214 4215 if (stat.bits.ldw.id_mismatch) { 4216 statsp->id_mismatch++; 4217 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4218 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4219 /* Global fatal error encountered */ 4220 } 4221 4222 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4223 switch (nxgep->mac.portnum) { 4224 case 0: 4225 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4226 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4227 my_err = B_TRUE; 4228 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4229 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4230 } 4231 break; 4232 case 1: 4233 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4234 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4235 my_err = B_TRUE; 4236 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4237 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4238 } 4239 break; 4240 case 2: 4241 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4242 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4243 my_err = B_TRUE; 4244 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4245 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4246 } 4247 break; 4248 case 3: 4249 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4250 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4251 my_err = B_TRUE; 4252 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4253 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4254 } 4255 break; 4256 default: 4257 return (NXGE_ERROR); 4258 } 4259 } 4260 4261 if (my_err) { 4262 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4263 zcp_err_status); 4264 if (status != NXGE_OK) 4265 return (status); 4266 } 4267 4268 return (NXGE_OK); 4269 } 4270 4271 static nxge_status_t 4272 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4273 uint32_t zcp_status) 4274 { 4275 boolean_t rxport_fatal = B_FALSE; 4276 p_nxge_rdc_sys_stats_t statsp; 4277 nxge_status_t status = NXGE_OK; 4278 uint8_t portn; 4279 4280 portn = nxgep->mac.portnum; 4281 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4282 4283 if (ipp_status & (0x1 << portn)) { 4284 statsp->ipp_eop_err++; 4285 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4286 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4287 rxport_fatal = B_TRUE; 4288 } 4289 4290 if (zcp_status & (0x1 << portn)) { 4291 statsp->zcp_eop_err++; 4292 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4293 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4294 rxport_fatal = B_TRUE; 4295 } 4296 4297 if (rxport_fatal) { 4298 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4299 " nxge_rxdma_handle_port_error: " 4300 " fatal error on Port #%d\n", 4301 portn)); 4302 status = nxge_rx_port_fatal_err_recover(nxgep); 4303 if (status == NXGE_OK) { 4304 FM_SERVICE_RESTORED(nxgep); 4305 } 4306 } 4307 4308 return (status); 4309 } 4310 4311 static nxge_status_t 4312 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4313 { 4314 npi_handle_t handle; 4315 npi_status_t rs = NPI_SUCCESS; 4316 nxge_status_t status = NXGE_OK; 4317 p_rx_rbr_ring_t rbrp; 4318 p_rx_rcr_ring_t rcrp; 4319 p_rx_mbox_t mboxp; 4320 rx_dma_ent_msk_t ent_mask; 4321 p_nxge_dma_common_t dmap; 4322 int ring_idx; 4323 uint32_t ref_cnt; 4324 p_rx_msg_t rx_msg_p; 4325 int i; 4326 uint32_t nxge_port_rcr_size; 4327 4328 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4329 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4330 "Recovering from RxDMAChannel#%d error...", channel)); 4331 4332 /* 4333 * Stop the dma channel waits for the stop done. 4334 * If the stop done bit is not set, then create 4335 * an error. 4336 */ 4337 4338 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4339 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4340 4341 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4342 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4343 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4344 4345 MUTEX_ENTER(&rcrp->lock); 4346 MUTEX_ENTER(&rbrp->lock); 4347 MUTEX_ENTER(&rbrp->post_lock); 4348 4349 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4350 4351 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4352 if (rs != NPI_SUCCESS) { 4353 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4354 "nxge_disable_rxdma_channel:failed")); 4355 goto fail; 4356 } 4357 4358 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4359 4360 /* Disable interrupt */ 4361 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4362 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4363 if (rs != NPI_SUCCESS) { 4364 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4365 "nxge_rxdma_stop_channel: " 4366 "set rxdma event masks failed (channel %d)", 4367 channel)); 4368 } 4369 4370 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4371 4372 /* Reset RXDMA channel */ 4373 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4374 if (rs != NPI_SUCCESS) { 4375 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4376 "nxge_rxdma_fatal_err_recover: " 4377 " reset rxdma failed (channel %d)", channel)); 4378 goto fail; 4379 } 4380 4381 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4382 4383 mboxp = 4384 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4385 4386 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4387 rbrp->rbr_rd_index = 0; 4388 4389 rcrp->comp_rd_index = 0; 4390 rcrp->comp_wt_index = 0; 4391 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4392 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4393 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4394 #if defined(__i386) 4395 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4396 #else 4397 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4398 #endif 4399 4400 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4401 (nxge_port_rcr_size - 1); 4402 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4403 (nxge_port_rcr_size - 1); 4404 4405 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4406 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4407 4408 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4409 4410 for (i = 0; i < rbrp->rbr_max_size; i++) { 4411 rx_msg_p = rbrp->rx_msg_ring[i]; 4412 ref_cnt = rx_msg_p->ref_cnt; 4413 if (ref_cnt != 1) { 4414 if (rx_msg_p->cur_usage_cnt != 4415 rx_msg_p->max_usage_cnt) { 4416 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4417 "buf[%d]: cur_usage_cnt = %d " 4418 "max_usage_cnt = %d\n", i, 4419 rx_msg_p->cur_usage_cnt, 4420 rx_msg_p->max_usage_cnt)); 4421 } else { 4422 /* Buffer can be re-posted */ 4423 rx_msg_p->free = B_TRUE; 4424 rx_msg_p->cur_usage_cnt = 0; 4425 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4426 rx_msg_p->pkt_buf_size = 0; 4427 } 4428 } 4429 } 4430 4431 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4432 4433 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4434 if (status != NXGE_OK) { 4435 goto fail; 4436 } 4437 4438 MUTEX_EXIT(&rbrp->post_lock); 4439 MUTEX_EXIT(&rbrp->lock); 4440 MUTEX_EXIT(&rcrp->lock); 4441 4442 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4443 "Recovery Successful, RxDMAChannel#%d Restored", 4444 channel)); 4445 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4446 4447 return (NXGE_OK); 4448 fail: 4449 MUTEX_EXIT(&rbrp->post_lock); 4450 MUTEX_EXIT(&rbrp->lock); 4451 MUTEX_EXIT(&rcrp->lock); 4452 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4453 4454 return (NXGE_ERROR | rs); 4455 } 4456 4457 nxge_status_t 4458 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4459 { 4460 nxge_grp_set_t *set = &nxgep->rx_set; 4461 nxge_status_t status = NXGE_OK; 4462 int rdc; 4463 4464 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4465 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4466 "Recovering from RxPort error...")); 4467 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4468 4469 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4470 goto fail; 4471 4472 NXGE_DELAY(1000); 4473 4474 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4475 4476 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4477 if ((1 << rdc) & set->owned.map) { 4478 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4479 != NXGE_OK) { 4480 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4481 "Could not recover channel %d", rdc)); 4482 } 4483 } 4484 } 4485 4486 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4487 4488 /* Reset IPP */ 4489 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4490 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4491 "nxge_rx_port_fatal_err_recover: " 4492 "Failed to reset IPP")); 4493 goto fail; 4494 } 4495 4496 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4497 4498 /* Reset RxMAC */ 4499 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4500 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4501 "nxge_rx_port_fatal_err_recover: " 4502 "Failed to reset RxMAC")); 4503 goto fail; 4504 } 4505 4506 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4507 4508 /* Re-Initialize IPP */ 4509 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4510 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4511 "nxge_rx_port_fatal_err_recover: " 4512 "Failed to init IPP")); 4513 goto fail; 4514 } 4515 4516 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4517 4518 /* Re-Initialize RxMAC */ 4519 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4521 "nxge_rx_port_fatal_err_recover: " 4522 "Failed to reset RxMAC")); 4523 goto fail; 4524 } 4525 4526 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4527 4528 /* Re-enable RxMAC */ 4529 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4530 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4531 "nxge_rx_port_fatal_err_recover: " 4532 "Failed to enable RxMAC")); 4533 goto fail; 4534 } 4535 4536 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4537 "Recovery Successful, RxPort Restored")); 4538 4539 return (NXGE_OK); 4540 fail: 4541 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4542 return (status); 4543 } 4544 4545 void 4546 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4547 { 4548 rx_dma_ctl_stat_t cs; 4549 rx_ctl_dat_fifo_stat_t cdfs; 4550 4551 switch (err_id) { 4552 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4553 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4554 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4555 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4556 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4557 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4558 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4559 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4560 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4561 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4562 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4563 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4564 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4565 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4566 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4567 chan, &cs.value); 4568 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4569 cs.bits.hdw.rcr_ack_err = 1; 4570 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4571 cs.bits.hdw.dc_fifo_err = 1; 4572 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4573 cs.bits.hdw.rcr_sha_par = 1; 4574 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4575 cs.bits.hdw.rbr_pre_par = 1; 4576 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4577 cs.bits.hdw.rbr_tmout = 1; 4578 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4579 cs.bits.hdw.rsp_cnt_err = 1; 4580 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4581 cs.bits.hdw.byte_en_bus = 1; 4582 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4583 cs.bits.hdw.rsp_dat_err = 1; 4584 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4585 cs.bits.hdw.config_err = 1; 4586 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4587 cs.bits.hdw.rcrincon = 1; 4588 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4589 cs.bits.hdw.rcrfull = 1; 4590 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4591 cs.bits.hdw.rbrfull = 1; 4592 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4593 cs.bits.hdw.rbrlogpage = 1; 4594 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4595 cs.bits.hdw.cfiglogpage = 1; 4596 #if defined(__i386) 4597 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4598 cs.value); 4599 #else 4600 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4601 cs.value); 4602 #endif 4603 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4604 chan, cs.value); 4605 break; 4606 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4607 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4608 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4609 cdfs.value = 0; 4610 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4611 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4612 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4613 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4614 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4615 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4616 #if defined(__i386) 4617 cmn_err(CE_NOTE, 4618 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4619 cdfs.value); 4620 #else 4621 cmn_err(CE_NOTE, 4622 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4623 cdfs.value); 4624 #endif 4625 NXGE_REG_WR64(nxgep->npi_handle, 4626 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4627 break; 4628 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4629 break; 4630 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4631 break; 4632 } 4633 } 4634 4635 static void 4636 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4637 { 4638 rxring_info_t *ring_info; 4639 int index; 4640 uint32_t chunk_size; 4641 uint64_t kaddr; 4642 uint_t num_blocks; 4643 4644 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4645 4646 if (rbr_p == NULL) { 4647 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4648 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4649 return; 4650 } 4651 4652 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4653 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4654 "==> nxge_rxdma_databuf_free: DDI")); 4655 return; 4656 } 4657 4658 ring_info = rbr_p->ring_info; 4659 if (ring_info == NULL) { 4660 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4661 "==> nxge_rxdma_databuf_free: NULL ring info")); 4662 return; 4663 } 4664 num_blocks = rbr_p->num_blocks; 4665 for (index = 0; index < num_blocks; index++) { 4666 kaddr = ring_info->buffer[index].kaddr; 4667 chunk_size = ring_info->buffer[index].buf_size; 4668 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4669 "==> nxge_rxdma_databuf_free: free chunk %d " 4670 "kaddrp $%p chunk size %d", 4671 index, kaddr, chunk_size)); 4672 if (kaddr == NULL) continue; 4673 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4674 ring_info->buffer[index].kaddr = NULL; 4675 } 4676 4677 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4678 } 4679 4680 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4681 extern void contig_mem_free(void *, size_t); 4682 #endif 4683 4684 void 4685 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4686 { 4687 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4688 4689 if (kaddr == NULL || !buf_size) { 4690 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4691 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4692 kaddr, buf_size)); 4693 return; 4694 } 4695 4696 switch (alloc_type) { 4697 case KMEM_ALLOC: 4698 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4699 "==> nxge_free_buf: freeing kmem $%p size %d", 4700 kaddr, buf_size)); 4701 #if defined(__i386) 4702 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4703 #else 4704 KMEM_FREE((void *)kaddr, buf_size); 4705 #endif 4706 break; 4707 4708 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4709 case CONTIG_MEM_ALLOC: 4710 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4711 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4712 kaddr, buf_size)); 4713 contig_mem_free((void *)kaddr, buf_size); 4714 break; 4715 #endif 4716 4717 default: 4718 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4719 "<== nxge_free_buf: unsupported alloc type %d", 4720 alloc_type)); 4721 return; 4722 } 4723 4724 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 4725 } 4726