1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * Globals: tunable parameters (/etc/system or adb) 43 * 44 */ 45 extern uint32_t nxge_rbr_size; 46 extern uint32_t nxge_rcr_size; 47 extern uint32_t nxge_rbr_spare_size; 48 49 extern uint32_t nxge_mblks_pending; 50 51 /* 52 * Tunable to reduce the amount of time spent in the 53 * ISR doing Rx Processing. 54 */ 55 extern uint32_t nxge_max_rx_pkts; 56 boolean_t nxge_jumbo_enable; 57 58 /* 59 * Tunables to manage the receive buffer blocks. 60 * 61 * nxge_rx_threshold_hi: copy all buffers. 62 * nxge_rx_bcopy_size_type: receive buffer block size type. 63 * nxge_rx_threshold_lo: copy only up to tunable block size type. 64 */ 65 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 66 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 68 69 extern uint32_t nxge_cksum_offload; 70 71 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 72 static void nxge_unmap_rxdma(p_nxge_t, int); 73 74 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 75 76 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 77 static void nxge_rxdma_hw_stop(p_nxge_t, int); 78 79 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 80 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 81 uint32_t, 82 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 83 p_rx_mbox_t *); 84 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 85 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 86 87 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 88 uint16_t, 89 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 90 p_rx_rcr_ring_t *, p_rx_mbox_t *); 91 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 92 p_rx_rcr_ring_t, p_rx_mbox_t); 93 94 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 95 uint16_t, 96 p_nxge_dma_common_t *, 97 p_rx_rbr_ring_t *, uint32_t); 98 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 99 p_rx_rbr_ring_t); 100 101 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 102 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 103 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 104 105 static mblk_t * 106 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 107 108 static void nxge_receive_packet(p_nxge_t, 109 p_rx_rcr_ring_t, 110 p_rcr_entry_t, 111 boolean_t *, 112 mblk_t **, mblk_t **); 113 114 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 115 116 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 117 static void nxge_freeb(p_rx_msg_t); 118 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 120 121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 122 uint32_t, uint32_t); 123 124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 125 p_rx_rbr_ring_t); 126 127 128 static nxge_status_t 129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 130 131 nxge_status_t 132 nxge_rx_port_fatal_err_recover(p_nxge_t); 133 134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135 136 nxge_status_t 137 nxge_init_rxdma_channels(p_nxge_t nxgep) 138 { 139 nxge_grp_set_t *set = &nxgep->rx_set; 140 int i, count, rdc, channel; 141 nxge_grp_t *group; 142 143 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 144 145 if (!isLDOMguest(nxgep)) { 146 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 147 cmn_err(CE_NOTE, "hw_start_common"); 148 return (NXGE_ERROR); 149 } 150 } 151 152 /* 153 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 154 * We only have 8 hardware RDC tables, but we may have 155 * up to 16 logical (software-defined) groups of RDCS, 156 * if we make use of layer 3 & 4 hardware classification. 157 */ 158 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 159 if ((1 << i) & set->lg.map) { 160 group = set->group[i]; 161 162 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 163 if ((1 << channel) & group->map) { 164 if ((nxge_grp_dc_add(nxgep, 165 group, VP_BOUND_RX, channel))) 166 goto init_rxdma_channels_exit; 167 } 168 } 169 } 170 if (++count == set->lg.count) 171 break; 172 } 173 174 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 175 return (NXGE_OK); 176 177 init_rxdma_channels_exit: 178 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 179 if ((1 << i) & set->lg.map) { 180 group = set->group[i]; 181 182 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 183 if ((1 << rdc) & group->map) { 184 nxge_grp_dc_remove(nxgep, 185 VP_BOUND_RX, rdc); 186 } 187 } 188 } 189 190 if (++count == set->lg.count) 191 break; 192 } 193 194 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 195 return (NXGE_ERROR); 196 } 197 198 nxge_status_t 199 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 200 { 201 nxge_status_t status; 202 203 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 204 205 status = nxge_map_rxdma(nxge, channel); 206 if (status != NXGE_OK) { 207 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 208 "<== nxge_init_rxdma: status 0x%x", status)); 209 return (status); 210 } 211 212 status = nxge_rxdma_hw_start(nxge, channel); 213 if (status != NXGE_OK) { 214 nxge_unmap_rxdma(nxge, channel); 215 } 216 217 if (!nxge->statsp->rdc_ksp[channel]) 218 nxge_setup_rdc_kstats(nxge, channel); 219 220 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 221 "<== nxge_init_rxdma_channel: status 0x%x", status)); 222 223 return (status); 224 } 225 226 void 227 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 228 { 229 nxge_grp_set_t *set = &nxgep->rx_set; 230 int rdc; 231 232 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 233 234 if (set->owned.map == 0) { 235 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 236 "nxge_uninit_rxdma_channels: no channels")); 237 return; 238 } 239 240 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 241 if ((1 << rdc) & set->owned.map) { 242 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 243 } 244 } 245 246 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 247 } 248 249 void 250 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 251 { 252 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 253 254 if (nxgep->statsp->rdc_ksp[channel]) { 255 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 256 nxgep->statsp->rdc_ksp[channel] = 0; 257 } 258 259 nxge_rxdma_hw_stop(nxgep, channel); 260 nxge_unmap_rxdma(nxgep, channel); 261 262 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 263 } 264 265 nxge_status_t 266 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 267 { 268 npi_handle_t handle; 269 npi_status_t rs = NPI_SUCCESS; 270 nxge_status_t status = NXGE_OK; 271 272 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 273 274 handle = NXGE_DEV_NPI_HANDLE(nxgep); 275 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 276 277 if (rs != NPI_SUCCESS) { 278 status = NXGE_ERROR | rs; 279 } 280 281 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 282 283 return (status); 284 } 285 286 void 287 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 288 { 289 nxge_grp_set_t *set = &nxgep->rx_set; 290 int rdc; 291 292 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 293 294 if (!isLDOMguest(nxgep)) { 295 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 296 (void) npi_rxdma_dump_fzc_regs(handle); 297 } 298 299 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 300 NXGE_DEBUG_MSG((nxgep, TX_CTL, 301 "nxge_rxdma_regs_dump_channels: " 302 "NULL ring pointer(s)")); 303 return; 304 } 305 306 if (set->owned.map == 0) { 307 NXGE_DEBUG_MSG((nxgep, RX_CTL, 308 "nxge_rxdma_regs_dump_channels: no channels")); 309 return; 310 } 311 312 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 313 if ((1 << rdc) & set->owned.map) { 314 rx_rbr_ring_t *ring = 315 nxgep->rx_rbr_rings->rbr_rings[rdc]; 316 if (ring) { 317 (void) nxge_dump_rxdma_channel(nxgep, rdc); 318 } 319 } 320 } 321 322 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 323 } 324 325 nxge_status_t 326 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 327 { 328 npi_handle_t handle; 329 npi_status_t rs = NPI_SUCCESS; 330 nxge_status_t status = NXGE_OK; 331 332 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 333 334 handle = NXGE_DEV_NPI_HANDLE(nxgep); 335 rs = npi_rxdma_dump_rdc_regs(handle, channel); 336 337 if (rs != NPI_SUCCESS) { 338 status = NXGE_ERROR | rs; 339 } 340 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 341 return (status); 342 } 343 344 nxge_status_t 345 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 346 p_rx_dma_ent_msk_t mask_p) 347 { 348 npi_handle_t handle; 349 npi_status_t rs = NPI_SUCCESS; 350 nxge_status_t status = NXGE_OK; 351 352 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 353 "<== nxge_init_rxdma_channel_event_mask")); 354 355 handle = NXGE_DEV_NPI_HANDLE(nxgep); 356 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 357 if (rs != NPI_SUCCESS) { 358 status = NXGE_ERROR | rs; 359 } 360 361 return (status); 362 } 363 364 nxge_status_t 365 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 366 p_rx_dma_ctl_stat_t cs_p) 367 { 368 npi_handle_t handle; 369 npi_status_t rs = NPI_SUCCESS; 370 nxge_status_t status = NXGE_OK; 371 372 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 373 "<== nxge_init_rxdma_channel_cntl_stat")); 374 375 handle = NXGE_DEV_NPI_HANDLE(nxgep); 376 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 377 378 if (rs != NPI_SUCCESS) { 379 status = NXGE_ERROR | rs; 380 } 381 382 return (status); 383 } 384 385 /* 386 * nxge_rxdma_cfg_rdcgrp_default_rdc 387 * 388 * Set the default RDC for an RDC Group (Table) 389 * 390 * Arguments: 391 * nxgep 392 * rdcgrp The group to modify 393 * rdc The new default RDC. 394 * 395 * Notes: 396 * 397 * NPI/NXGE function calls: 398 * npi_rxdma_cfg_rdc_table_default_rdc() 399 * 400 * Registers accessed: 401 * RDC_TBL_REG: FZC_ZCP + 0x10000 402 * 403 * Context: 404 * Service domain 405 */ 406 nxge_status_t 407 nxge_rxdma_cfg_rdcgrp_default_rdc( 408 p_nxge_t nxgep, 409 uint8_t rdcgrp, 410 uint8_t rdc) 411 { 412 npi_handle_t handle; 413 npi_status_t rs = NPI_SUCCESS; 414 p_nxge_dma_pt_cfg_t p_dma_cfgp; 415 p_nxge_rdc_grp_t rdc_grp_p; 416 uint8_t actual_rdcgrp, actual_rdc; 417 418 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 419 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 420 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 421 422 handle = NXGE_DEV_NPI_HANDLE(nxgep); 423 424 /* 425 * This has to be rewritten. Do we even allow this anymore? 426 */ 427 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 428 RDC_MAP_IN(rdc_grp_p->map, rdc); 429 rdc_grp_p->def_rdc = rdc; 430 431 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 432 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 433 434 rs = npi_rxdma_cfg_rdc_table_default_rdc( 435 handle, actual_rdcgrp, actual_rdc); 436 437 if (rs != NPI_SUCCESS) { 438 return (NXGE_ERROR | rs); 439 } 440 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 441 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 442 return (NXGE_OK); 443 } 444 445 nxge_status_t 446 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 447 { 448 npi_handle_t handle; 449 450 uint8_t actual_rdc; 451 npi_status_t rs = NPI_SUCCESS; 452 453 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 454 " ==> nxge_rxdma_cfg_port_default_rdc")); 455 456 handle = NXGE_DEV_NPI_HANDLE(nxgep); 457 actual_rdc = rdc; /* XXX Hack! */ 458 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 459 460 461 if (rs != NPI_SUCCESS) { 462 return (NXGE_ERROR | rs); 463 } 464 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 465 " <== nxge_rxdma_cfg_port_default_rdc")); 466 467 return (NXGE_OK); 468 } 469 470 nxge_status_t 471 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 472 uint16_t pkts) 473 { 474 npi_status_t rs = NPI_SUCCESS; 475 npi_handle_t handle; 476 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 477 " ==> nxge_rxdma_cfg_rcr_threshold")); 478 handle = NXGE_DEV_NPI_HANDLE(nxgep); 479 480 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 481 482 if (rs != NPI_SUCCESS) { 483 return (NXGE_ERROR | rs); 484 } 485 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 486 return (NXGE_OK); 487 } 488 489 nxge_status_t 490 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 491 uint16_t tout, uint8_t enable) 492 { 493 npi_status_t rs = NPI_SUCCESS; 494 npi_handle_t handle; 495 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 496 handle = NXGE_DEV_NPI_HANDLE(nxgep); 497 if (enable == 0) { 498 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 499 } else { 500 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 501 tout); 502 } 503 504 if (rs != NPI_SUCCESS) { 505 return (NXGE_ERROR | rs); 506 } 507 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 508 return (NXGE_OK); 509 } 510 511 nxge_status_t 512 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 513 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 514 { 515 npi_handle_t handle; 516 rdc_desc_cfg_t rdc_desc; 517 p_rcrcfig_b_t cfgb_p; 518 npi_status_t rs = NPI_SUCCESS; 519 520 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 521 handle = NXGE_DEV_NPI_HANDLE(nxgep); 522 /* 523 * Use configuration data composed at init time. 524 * Write to hardware the receive ring configurations. 525 */ 526 rdc_desc.mbox_enable = 1; 527 rdc_desc.mbox_addr = mbox_p->mbox_addr; 528 NXGE_DEBUG_MSG((nxgep, RX_CTL, 529 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 530 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 531 532 rdc_desc.rbr_len = rbr_p->rbb_max; 533 rdc_desc.rbr_addr = rbr_p->rbr_addr; 534 535 switch (nxgep->rx_bksize_code) { 536 case RBR_BKSIZE_4K: 537 rdc_desc.page_size = SIZE_4KB; 538 break; 539 case RBR_BKSIZE_8K: 540 rdc_desc.page_size = SIZE_8KB; 541 break; 542 case RBR_BKSIZE_16K: 543 rdc_desc.page_size = SIZE_16KB; 544 break; 545 case RBR_BKSIZE_32K: 546 rdc_desc.page_size = SIZE_32KB; 547 break; 548 } 549 550 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 551 rdc_desc.valid0 = 1; 552 553 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 554 rdc_desc.valid1 = 1; 555 556 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 557 rdc_desc.valid2 = 1; 558 559 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 560 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 561 562 rdc_desc.rcr_len = rcr_p->comp_size; 563 rdc_desc.rcr_addr = rcr_p->rcr_addr; 564 565 cfgb_p = &(rcr_p->rcr_cfgb); 566 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 567 /* For now, disable this timeout in a guest domain. */ 568 if (isLDOMguest(nxgep)) { 569 rdc_desc.rcr_timeout = 0; 570 rdc_desc.rcr_timeout_enable = 0; 571 } else { 572 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 573 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 574 } 575 576 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 577 "rbr_len qlen %d pagesize code %d rcr_len %d", 578 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 579 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 580 "size 0 %d size 1 %d size 2 %d", 581 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 582 rbr_p->npi_pkt_buf_size2)); 583 584 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 585 if (rs != NPI_SUCCESS) { 586 return (NXGE_ERROR | rs); 587 } 588 589 /* 590 * Enable the timeout and threshold. 591 */ 592 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 593 rdc_desc.rcr_threshold); 594 if (rs != NPI_SUCCESS) { 595 return (NXGE_ERROR | rs); 596 } 597 598 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 599 rdc_desc.rcr_timeout); 600 if (rs != NPI_SUCCESS) { 601 return (NXGE_ERROR | rs); 602 } 603 604 /* Enable the DMA */ 605 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 606 if (rs != NPI_SUCCESS) { 607 return (NXGE_ERROR | rs); 608 } 609 610 /* Kick the DMA engine. */ 611 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 612 /* Clear the rbr empty bit */ 613 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 614 615 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 616 617 return (NXGE_OK); 618 } 619 620 nxge_status_t 621 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 622 { 623 npi_handle_t handle; 624 npi_status_t rs = NPI_SUCCESS; 625 626 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 627 handle = NXGE_DEV_NPI_HANDLE(nxgep); 628 629 /* disable the DMA */ 630 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 631 if (rs != NPI_SUCCESS) { 632 NXGE_DEBUG_MSG((nxgep, RX_CTL, 633 "<== nxge_disable_rxdma_channel:failed (0x%x)", 634 rs)); 635 return (NXGE_ERROR | rs); 636 } 637 638 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 639 return (NXGE_OK); 640 } 641 642 nxge_status_t 643 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 644 { 645 npi_handle_t handle; 646 nxge_status_t status = NXGE_OK; 647 648 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 649 "<== nxge_init_rxdma_channel_rcrflush")); 650 651 handle = NXGE_DEV_NPI_HANDLE(nxgep); 652 npi_rxdma_rdc_rcr_flush(handle, channel); 653 654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 655 "<== nxge_init_rxdma_channel_rcrflsh")); 656 return (status); 657 658 } 659 660 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 661 662 #define TO_LEFT -1 663 #define TO_RIGHT 1 664 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 665 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 666 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 667 #define NO_HINT 0xffffffff 668 669 /*ARGSUSED*/ 670 nxge_status_t 671 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 672 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 673 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 674 { 675 int bufsize; 676 uint64_t pktbuf_pp; 677 uint64_t dvma_addr; 678 rxring_info_t *ring_info; 679 int base_side, end_side; 680 int r_index, l_index, anchor_index; 681 int found, search_done; 682 uint32_t offset, chunk_size, block_size, page_size_mask; 683 uint32_t chunk_index, block_index, total_index; 684 int max_iterations, iteration; 685 rxbuf_index_info_t *bufinfo; 686 687 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 688 689 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 690 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 691 pkt_buf_addr_pp, 692 pktbufsz_type)); 693 #if defined(__i386) 694 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 695 #else 696 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 697 #endif 698 699 switch (pktbufsz_type) { 700 case 0: 701 bufsize = rbr_p->pkt_buf_size0; 702 break; 703 case 1: 704 bufsize = rbr_p->pkt_buf_size1; 705 break; 706 case 2: 707 bufsize = rbr_p->pkt_buf_size2; 708 break; 709 case RCR_SINGLE_BLOCK: 710 bufsize = 0; 711 anchor_index = 0; 712 break; 713 default: 714 return (NXGE_ERROR); 715 } 716 717 if (rbr_p->num_blocks == 1) { 718 anchor_index = 0; 719 ring_info = rbr_p->ring_info; 720 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 721 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 722 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 723 "buf_pp $%p btype %d anchor_index %d " 724 "bufinfo $%p", 725 pkt_buf_addr_pp, 726 pktbufsz_type, 727 anchor_index, 728 bufinfo)); 729 730 goto found_index; 731 } 732 733 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 734 "==> nxge_rxbuf_pp_to_vp: " 735 "buf_pp $%p btype %d anchor_index %d", 736 pkt_buf_addr_pp, 737 pktbufsz_type, 738 anchor_index)); 739 740 ring_info = rbr_p->ring_info; 741 found = B_FALSE; 742 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 743 iteration = 0; 744 max_iterations = ring_info->max_iterations; 745 /* 746 * First check if this block has been seen 747 * recently. This is indicated by a hint which 748 * is initialized when the first buffer of the block 749 * is seen. The hint is reset when the last buffer of 750 * the block has been processed. 751 * As three block sizes are supported, three hints 752 * are kept. The idea behind the hints is that once 753 * the hardware uses a block for a buffer of that 754 * size, it will use it exclusively for that size 755 * and will use it until it is exhausted. It is assumed 756 * that there would a single block being used for the same 757 * buffer sizes at any given time. 758 */ 759 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 760 anchor_index = ring_info->hint[pktbufsz_type]; 761 dvma_addr = bufinfo[anchor_index].dvma_addr; 762 chunk_size = bufinfo[anchor_index].buf_size; 763 if ((pktbuf_pp >= dvma_addr) && 764 (pktbuf_pp < (dvma_addr + chunk_size))) { 765 found = B_TRUE; 766 /* 767 * check if this is the last buffer in the block 768 * If so, then reset the hint for the size; 769 */ 770 771 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 772 ring_info->hint[pktbufsz_type] = NO_HINT; 773 } 774 } 775 776 if (found == B_FALSE) { 777 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 778 "==> nxge_rxbuf_pp_to_vp: (!found)" 779 "buf_pp $%p btype %d anchor_index %d", 780 pkt_buf_addr_pp, 781 pktbufsz_type, 782 anchor_index)); 783 784 /* 785 * This is the first buffer of the block of this 786 * size. Need to search the whole information 787 * array. 788 * the search algorithm uses a binary tree search 789 * algorithm. It assumes that the information is 790 * already sorted with increasing order 791 * info[0] < info[1] < info[2] .... < info[n-1] 792 * where n is the size of the information array 793 */ 794 r_index = rbr_p->num_blocks - 1; 795 l_index = 0; 796 search_done = B_FALSE; 797 anchor_index = MID_INDEX(r_index, l_index); 798 while (search_done == B_FALSE) { 799 if ((r_index == l_index) || 800 (iteration >= max_iterations)) 801 search_done = B_TRUE; 802 end_side = TO_RIGHT; /* to the right */ 803 base_side = TO_LEFT; /* to the left */ 804 /* read the DVMA address information and sort it */ 805 dvma_addr = bufinfo[anchor_index].dvma_addr; 806 chunk_size = bufinfo[anchor_index].buf_size; 807 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 808 "==> nxge_rxbuf_pp_to_vp: (searching)" 809 "buf_pp $%p btype %d " 810 "anchor_index %d chunk_size %d dvmaaddr $%p", 811 pkt_buf_addr_pp, 812 pktbufsz_type, 813 anchor_index, 814 chunk_size, 815 dvma_addr)); 816 817 if (pktbuf_pp >= dvma_addr) 818 base_side = TO_RIGHT; /* to the right */ 819 if (pktbuf_pp < (dvma_addr + chunk_size)) 820 end_side = TO_LEFT; /* to the left */ 821 822 switch (base_side + end_side) { 823 case IN_MIDDLE: 824 /* found */ 825 found = B_TRUE; 826 search_done = B_TRUE; 827 if ((pktbuf_pp + bufsize) < 828 (dvma_addr + chunk_size)) 829 ring_info->hint[pktbufsz_type] = 830 bufinfo[anchor_index].buf_index; 831 break; 832 case BOTH_RIGHT: 833 /* not found: go to the right */ 834 l_index = anchor_index + 1; 835 anchor_index = MID_INDEX(r_index, l_index); 836 break; 837 838 case BOTH_LEFT: 839 /* not found: go to the left */ 840 r_index = anchor_index - 1; 841 anchor_index = MID_INDEX(r_index, l_index); 842 break; 843 default: /* should not come here */ 844 return (NXGE_ERROR); 845 } 846 iteration++; 847 } 848 849 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 850 "==> nxge_rxbuf_pp_to_vp: (search done)" 851 "buf_pp $%p btype %d anchor_index %d", 852 pkt_buf_addr_pp, 853 pktbufsz_type, 854 anchor_index)); 855 } 856 857 if (found == B_FALSE) { 858 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 859 "==> nxge_rxbuf_pp_to_vp: (search failed)" 860 "buf_pp $%p btype %d anchor_index %d", 861 pkt_buf_addr_pp, 862 pktbufsz_type, 863 anchor_index)); 864 return (NXGE_ERROR); 865 } 866 867 found_index: 868 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 869 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 870 "buf_pp $%p btype %d bufsize %d anchor_index %d", 871 pkt_buf_addr_pp, 872 pktbufsz_type, 873 bufsize, 874 anchor_index)); 875 876 /* index of the first block in this chunk */ 877 chunk_index = bufinfo[anchor_index].start_index; 878 dvma_addr = bufinfo[anchor_index].dvma_addr; 879 page_size_mask = ring_info->block_size_mask; 880 881 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 882 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 883 "buf_pp $%p btype %d bufsize %d " 884 "anchor_index %d chunk_index %d dvma $%p", 885 pkt_buf_addr_pp, 886 pktbufsz_type, 887 bufsize, 888 anchor_index, 889 chunk_index, 890 dvma_addr)); 891 892 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 893 block_size = rbr_p->block_size; /* System block(page) size */ 894 895 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 896 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 897 "buf_pp $%p btype %d bufsize %d " 898 "anchor_index %d chunk_index %d dvma $%p " 899 "offset %d block_size %d", 900 pkt_buf_addr_pp, 901 pktbufsz_type, 902 bufsize, 903 anchor_index, 904 chunk_index, 905 dvma_addr, 906 offset, 907 block_size)); 908 909 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 910 911 block_index = (offset / block_size); /* index within chunk */ 912 total_index = chunk_index + block_index; 913 914 915 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 916 "==> nxge_rxbuf_pp_to_vp: " 917 "total_index %d dvma_addr $%p " 918 "offset %d block_size %d " 919 "block_index %d ", 920 total_index, dvma_addr, 921 offset, block_size, 922 block_index)); 923 #if defined(__i386) 924 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 925 (uint32_t)offset); 926 #else 927 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 928 (uint64_t)offset); 929 #endif 930 931 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 932 "==> nxge_rxbuf_pp_to_vp: " 933 "total_index %d dvma_addr $%p " 934 "offset %d block_size %d " 935 "block_index %d " 936 "*pkt_buf_addr_p $%p", 937 total_index, dvma_addr, 938 offset, block_size, 939 block_index, 940 *pkt_buf_addr_p)); 941 942 943 *msg_index = total_index; 944 *bufoffset = (offset & page_size_mask); 945 946 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 947 "==> nxge_rxbuf_pp_to_vp: get msg index: " 948 "msg_index %d bufoffset_index %d", 949 *msg_index, 950 *bufoffset)); 951 952 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 953 954 return (NXGE_OK); 955 } 956 957 /* 958 * used by quick sort (qsort) function 959 * to perform comparison 960 */ 961 static int 962 nxge_sort_compare(const void *p1, const void *p2) 963 { 964 965 rxbuf_index_info_t *a, *b; 966 967 a = (rxbuf_index_info_t *)p1; 968 b = (rxbuf_index_info_t *)p2; 969 970 if (a->dvma_addr > b->dvma_addr) 971 return (1); 972 if (a->dvma_addr < b->dvma_addr) 973 return (-1); 974 return (0); 975 } 976 977 978 979 /* 980 * grabbed this sort implementation from common/syscall/avl.c 981 * 982 */ 983 /* 984 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 985 * v = Ptr to array/vector of objs 986 * n = # objs in the array 987 * s = size of each obj (must be multiples of a word size) 988 * f = ptr to function to compare two objs 989 * returns (-1 = less than, 0 = equal, 1 = greater than 990 */ 991 void 992 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 993 { 994 int g, i, j, ii; 995 unsigned int *p1, *p2; 996 unsigned int tmp; 997 998 /* No work to do */ 999 if (v == NULL || n <= 1) 1000 return; 1001 /* Sanity check on arguments */ 1002 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1003 ASSERT(s > 0); 1004 1005 for (g = n / 2; g > 0; g /= 2) { 1006 for (i = g; i < n; i++) { 1007 for (j = i - g; j >= 0 && 1008 (*f)(v + j * s, v + (j + g) * s) == 1; 1009 j -= g) { 1010 p1 = (unsigned *)(v + j * s); 1011 p2 = (unsigned *)(v + (j + g) * s); 1012 for (ii = 0; ii < s / 4; ii++) { 1013 tmp = *p1; 1014 *p1++ = *p2; 1015 *p2++ = tmp; 1016 } 1017 } 1018 } 1019 } 1020 } 1021 1022 /* 1023 * Initialize data structures required for rxdma 1024 * buffer dvma->vmem address lookup 1025 */ 1026 /*ARGSUSED*/ 1027 static nxge_status_t 1028 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1029 { 1030 1031 int index; 1032 rxring_info_t *ring_info; 1033 int max_iteration = 0, max_index = 0; 1034 1035 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1036 1037 ring_info = rbrp->ring_info; 1038 ring_info->hint[0] = NO_HINT; 1039 ring_info->hint[1] = NO_HINT; 1040 ring_info->hint[2] = NO_HINT; 1041 max_index = rbrp->num_blocks; 1042 1043 /* read the DVMA address information and sort it */ 1044 /* do init of the information array */ 1045 1046 1047 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1048 " nxge_rxbuf_index_info_init Sort ptrs")); 1049 1050 /* sort the array */ 1051 nxge_ksort((void *)ring_info->buffer, max_index, 1052 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1053 1054 1055 1056 for (index = 0; index < max_index; index++) { 1057 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1058 " nxge_rxbuf_index_info_init: sorted chunk %d " 1059 " ioaddr $%p kaddr $%p size %x", 1060 index, ring_info->buffer[index].dvma_addr, 1061 ring_info->buffer[index].kaddr, 1062 ring_info->buffer[index].buf_size)); 1063 } 1064 1065 max_iteration = 0; 1066 while (max_index >= (1ULL << max_iteration)) 1067 max_iteration++; 1068 ring_info->max_iterations = max_iteration + 1; 1069 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1070 " nxge_rxbuf_index_info_init Find max iter %d", 1071 ring_info->max_iterations)); 1072 1073 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1074 return (NXGE_OK); 1075 } 1076 1077 /* ARGSUSED */ 1078 void 1079 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1080 { 1081 #ifdef NXGE_DEBUG 1082 1083 uint32_t bptr; 1084 uint64_t pp; 1085 1086 bptr = entry_p->bits.hdw.pkt_buf_addr; 1087 1088 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1089 "\trcr entry $%p " 1090 "\trcr entry 0x%0llx " 1091 "\trcr entry 0x%08x " 1092 "\trcr entry 0x%08x " 1093 "\tvalue 0x%0llx\n" 1094 "\tmulti = %d\n" 1095 "\tpkt_type = 0x%x\n" 1096 "\tzero_copy = %d\n" 1097 "\tnoport = %d\n" 1098 "\tpromis = %d\n" 1099 "\terror = 0x%04x\n" 1100 "\tdcf_err = 0x%01x\n" 1101 "\tl2_len = %d\n" 1102 "\tpktbufsize = %d\n" 1103 "\tpkt_buf_addr = $%p\n" 1104 "\tpkt_buf_addr (<< 6) = $%p\n", 1105 entry_p, 1106 *(int64_t *)entry_p, 1107 *(int32_t *)entry_p, 1108 *(int32_t *)((char *)entry_p + 32), 1109 entry_p->value, 1110 entry_p->bits.hdw.multi, 1111 entry_p->bits.hdw.pkt_type, 1112 entry_p->bits.hdw.zero_copy, 1113 entry_p->bits.hdw.noport, 1114 entry_p->bits.hdw.promis, 1115 entry_p->bits.hdw.error, 1116 entry_p->bits.hdw.dcf_err, 1117 entry_p->bits.hdw.l2_len, 1118 entry_p->bits.hdw.pktbufsz, 1119 bptr, 1120 entry_p->bits.ldw.pkt_buf_addr)); 1121 1122 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1123 RCR_PKT_BUF_ADDR_SHIFT; 1124 1125 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1126 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1127 #endif 1128 } 1129 1130 void 1131 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1132 { 1133 npi_handle_t handle; 1134 rbr_stat_t rbr_stat; 1135 addr44_t hd_addr; 1136 addr44_t tail_addr; 1137 uint16_t qlen; 1138 1139 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1140 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1141 1142 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1143 1144 /* RBR head */ 1145 hd_addr.addr = 0; 1146 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1147 #if defined(__i386) 1148 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1149 (void *)(uint32_t)hd_addr.addr); 1150 #else 1151 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1152 (void *)hd_addr.addr); 1153 #endif 1154 1155 /* RBR stats */ 1156 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1157 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1158 1159 /* RCR tail */ 1160 tail_addr.addr = 0; 1161 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1162 #if defined(__i386) 1163 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1164 (void *)(uint32_t)tail_addr.addr); 1165 #else 1166 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1167 (void *)tail_addr.addr); 1168 #endif 1169 1170 /* RCR qlen */ 1171 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1172 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1173 1174 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1175 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1176 } 1177 1178 void 1179 nxge_rxdma_stop(p_nxge_t nxgep) 1180 { 1181 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1182 1183 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1184 (void) nxge_rx_mac_disable(nxgep); 1185 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1186 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1187 } 1188 1189 void 1190 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1191 { 1192 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1193 1194 (void) nxge_rxdma_stop(nxgep); 1195 (void) nxge_uninit_rxdma_channels(nxgep); 1196 (void) nxge_init_rxdma_channels(nxgep); 1197 1198 #ifndef AXIS_DEBUG_LB 1199 (void) nxge_xcvr_init(nxgep); 1200 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1201 #endif 1202 (void) nxge_rx_mac_enable(nxgep); 1203 1204 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1205 } 1206 1207 nxge_status_t 1208 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1209 { 1210 nxge_grp_set_t *set = &nxgep->rx_set; 1211 nxge_status_t status; 1212 npi_status_t rs; 1213 int rdc; 1214 1215 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1216 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1217 1218 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1219 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1220 "<== nxge_rxdma_mode: not initialized")); 1221 return (NXGE_ERROR); 1222 } 1223 1224 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1225 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1226 "<== nxge_tx_port_fatal_err_recover: " 1227 "NULL ring pointer(s)")); 1228 return (NXGE_ERROR); 1229 } 1230 1231 if (set->owned.map == 0) { 1232 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1233 "nxge_rxdma_regs_dump_channels: no channels")); 1234 return (NULL); 1235 } 1236 1237 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1238 if ((1 << rdc) & set->owned.map) { 1239 rx_rbr_ring_t *ring = 1240 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1241 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1242 if (ring) { 1243 if (enable) { 1244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1245 "==> nxge_rxdma_hw_mode: " 1246 "channel %d (enable)", rdc)); 1247 rs = npi_rxdma_cfg_rdc_enable 1248 (handle, rdc); 1249 } else { 1250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1251 "==> nxge_rxdma_hw_mode: " 1252 "channel %d disable)", rdc)); 1253 rs = npi_rxdma_cfg_rdc_disable 1254 (handle, rdc); 1255 } 1256 } 1257 } 1258 } 1259 1260 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1261 1262 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1263 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1264 1265 return (status); 1266 } 1267 1268 void 1269 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1270 { 1271 npi_handle_t handle; 1272 1273 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1274 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1275 1276 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1277 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1278 1279 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1280 } 1281 1282 void 1283 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1284 { 1285 npi_handle_t handle; 1286 1287 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1288 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1289 1290 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1291 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1292 1293 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1294 } 1295 1296 void 1297 nxge_hw_start_rx(p_nxge_t nxgep) 1298 { 1299 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1300 1301 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1302 (void) nxge_rx_mac_enable(nxgep); 1303 1304 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1305 } 1306 1307 /*ARGSUSED*/ 1308 void 1309 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1310 { 1311 nxge_grp_set_t *set = &nxgep->rx_set; 1312 int rdc; 1313 1314 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1315 1316 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1317 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1318 "<== nxge_tx_port_fatal_err_recover: " 1319 "NULL ring pointer(s)")); 1320 return; 1321 } 1322 1323 if (set->owned.map == 0) { 1324 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1325 "nxge_rxdma_regs_dump_channels: no channels")); 1326 return; 1327 } 1328 1329 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1330 if ((1 << rdc) & set->owned.map) { 1331 rx_rbr_ring_t *ring = 1332 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1333 if (ring) { 1334 nxge_rxdma_hw_stop(nxgep, rdc); 1335 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1336 "==> nxge_fixup_rxdma_rings: " 1337 "channel %d ring $%px", 1338 rdc, ring)); 1339 (void) nxge_rxdma_fixup_channel 1340 (nxgep, rdc, rdc); 1341 } 1342 } 1343 } 1344 1345 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1346 } 1347 1348 void 1349 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1350 { 1351 int i; 1352 1353 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1354 i = nxge_rxdma_get_ring_index(nxgep, channel); 1355 if (i < 0) { 1356 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1357 "<== nxge_rxdma_fix_channel: no entry found")); 1358 return; 1359 } 1360 1361 nxge_rxdma_fixup_channel(nxgep, channel, i); 1362 1363 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1364 } 1365 1366 void 1367 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1368 { 1369 int ndmas; 1370 p_rx_rbr_rings_t rx_rbr_rings; 1371 p_rx_rbr_ring_t *rbr_rings; 1372 p_rx_rcr_rings_t rx_rcr_rings; 1373 p_rx_rcr_ring_t *rcr_rings; 1374 p_rx_mbox_areas_t rx_mbox_areas_p; 1375 p_rx_mbox_t *rx_mbox_p; 1376 p_nxge_dma_pool_t dma_buf_poolp; 1377 p_nxge_dma_pool_t dma_cntl_poolp; 1378 p_rx_rbr_ring_t rbrp; 1379 p_rx_rcr_ring_t rcrp; 1380 p_rx_mbox_t mboxp; 1381 p_nxge_dma_common_t dmap; 1382 nxge_status_t status = NXGE_OK; 1383 1384 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1385 1386 (void) nxge_rxdma_stop_channel(nxgep, channel); 1387 1388 dma_buf_poolp = nxgep->rx_buf_pool_p; 1389 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1390 1391 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1392 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1393 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1394 return; 1395 } 1396 1397 ndmas = dma_buf_poolp->ndmas; 1398 if (!ndmas) { 1399 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1400 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1401 return; 1402 } 1403 1404 rx_rbr_rings = nxgep->rx_rbr_rings; 1405 rx_rcr_rings = nxgep->rx_rcr_rings; 1406 rbr_rings = rx_rbr_rings->rbr_rings; 1407 rcr_rings = rx_rcr_rings->rcr_rings; 1408 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1409 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1410 1411 /* Reinitialize the receive block and completion rings */ 1412 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1413 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1414 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1415 1416 1417 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1418 rbrp->rbr_rd_index = 0; 1419 rcrp->comp_rd_index = 0; 1420 rcrp->comp_wt_index = 0; 1421 1422 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1423 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1424 1425 status = nxge_rxdma_start_channel(nxgep, channel, 1426 rbrp, rcrp, mboxp); 1427 if (status != NXGE_OK) { 1428 goto nxge_rxdma_fixup_channel_fail; 1429 } 1430 if (status != NXGE_OK) { 1431 goto nxge_rxdma_fixup_channel_fail; 1432 } 1433 1434 nxge_rxdma_fixup_channel_fail: 1435 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1436 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1437 1438 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1439 } 1440 1441 /* ARGSUSED */ 1442 int 1443 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1444 { 1445 return (channel); 1446 } 1447 1448 p_rx_rbr_ring_t 1449 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1450 { 1451 nxge_grp_set_t *set = &nxgep->rx_set; 1452 nxge_channel_t rdc; 1453 1454 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1455 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1456 1457 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1458 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1459 "<== nxge_rxdma_get_rbr_ring: " 1460 "NULL ring pointer(s)")); 1461 return (NULL); 1462 } 1463 1464 if (set->owned.map == 0) { 1465 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1466 "<== nxge_rxdma_get_rbr_ring: no channels")); 1467 return (NULL); 1468 } 1469 1470 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1471 if ((1 << rdc) & set->owned.map) { 1472 rx_rbr_ring_t *ring = 1473 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1474 if (ring) { 1475 if (channel == ring->rdc) { 1476 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1477 "==> nxge_rxdma_get_rbr_ring: " 1478 "channel %d ring $%p", rdc, ring)); 1479 return (ring); 1480 } 1481 } 1482 } 1483 } 1484 1485 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1486 "<== nxge_rxdma_get_rbr_ring: not found")); 1487 1488 return (NULL); 1489 } 1490 1491 p_rx_rcr_ring_t 1492 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1493 { 1494 nxge_grp_set_t *set = &nxgep->rx_set; 1495 nxge_channel_t rdc; 1496 1497 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1498 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1499 1500 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1501 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1502 "<== nxge_rxdma_get_rcr_ring: " 1503 "NULL ring pointer(s)")); 1504 return (NULL); 1505 } 1506 1507 if (set->owned.map == 0) { 1508 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1509 "<== nxge_rxdma_get_rbr_ring: no channels")); 1510 return (NULL); 1511 } 1512 1513 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1514 if ((1 << rdc) & set->owned.map) { 1515 rx_rcr_ring_t *ring = 1516 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1517 if (ring) { 1518 if (channel == ring->rdc) { 1519 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1520 "==> nxge_rxdma_get_rcr_ring: " 1521 "channel %d ring $%p", rdc, ring)); 1522 return (ring); 1523 } 1524 } 1525 } 1526 } 1527 1528 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1529 "<== nxge_rxdma_get_rcr_ring: not found")); 1530 1531 return (NULL); 1532 } 1533 1534 /* 1535 * Static functions start here. 1536 */ 1537 static p_rx_msg_t 1538 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1539 { 1540 p_rx_msg_t nxge_mp = NULL; 1541 p_nxge_dma_common_t dmamsg_p; 1542 uchar_t *buffer; 1543 1544 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1545 if (nxge_mp == NULL) { 1546 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1547 "Allocation of a rx msg failed.")); 1548 goto nxge_allocb_exit; 1549 } 1550 1551 nxge_mp->use_buf_pool = B_FALSE; 1552 if (dmabuf_p) { 1553 nxge_mp->use_buf_pool = B_TRUE; 1554 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1555 *dmamsg_p = *dmabuf_p; 1556 dmamsg_p->nblocks = 1; 1557 dmamsg_p->block_size = size; 1558 dmamsg_p->alength = size; 1559 buffer = (uchar_t *)dmabuf_p->kaddrp; 1560 1561 dmabuf_p->kaddrp = (void *) 1562 ((char *)dmabuf_p->kaddrp + size); 1563 dmabuf_p->ioaddr_pp = (void *) 1564 ((char *)dmabuf_p->ioaddr_pp + size); 1565 dmabuf_p->alength -= size; 1566 dmabuf_p->offset += size; 1567 dmabuf_p->dma_cookie.dmac_laddress += size; 1568 dmabuf_p->dma_cookie.dmac_size -= size; 1569 1570 } else { 1571 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1572 if (buffer == NULL) { 1573 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1574 "Allocation of a receive page failed.")); 1575 goto nxge_allocb_fail1; 1576 } 1577 } 1578 1579 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1580 if (nxge_mp->rx_mblk_p == NULL) { 1581 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1582 goto nxge_allocb_fail2; 1583 } 1584 1585 nxge_mp->buffer = buffer; 1586 nxge_mp->block_size = size; 1587 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1588 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1589 nxge_mp->ref_cnt = 1; 1590 nxge_mp->free = B_TRUE; 1591 nxge_mp->rx_use_bcopy = B_FALSE; 1592 1593 atomic_inc_32(&nxge_mblks_pending); 1594 1595 goto nxge_allocb_exit; 1596 1597 nxge_allocb_fail2: 1598 if (!nxge_mp->use_buf_pool) { 1599 KMEM_FREE(buffer, size); 1600 } 1601 1602 nxge_allocb_fail1: 1603 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1604 nxge_mp = NULL; 1605 1606 nxge_allocb_exit: 1607 return (nxge_mp); 1608 } 1609 1610 p_mblk_t 1611 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1612 { 1613 p_mblk_t mp; 1614 1615 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1616 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1617 "offset = 0x%08X " 1618 "size = 0x%08X", 1619 nxge_mp, offset, size)); 1620 1621 mp = desballoc(&nxge_mp->buffer[offset], size, 1622 0, &nxge_mp->freeb); 1623 if (mp == NULL) { 1624 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1625 goto nxge_dupb_exit; 1626 } 1627 atomic_inc_32(&nxge_mp->ref_cnt); 1628 1629 1630 nxge_dupb_exit: 1631 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1632 nxge_mp)); 1633 return (mp); 1634 } 1635 1636 p_mblk_t 1637 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1638 { 1639 p_mblk_t mp; 1640 uchar_t *dp; 1641 1642 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1643 if (mp == NULL) { 1644 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1645 goto nxge_dupb_bcopy_exit; 1646 } 1647 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1648 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1649 mp->b_wptr = dp + size; 1650 1651 nxge_dupb_bcopy_exit: 1652 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1653 nxge_mp)); 1654 return (mp); 1655 } 1656 1657 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1658 p_rx_msg_t rx_msg_p); 1659 1660 void 1661 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1662 { 1663 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1664 1665 /* Reuse this buffer */ 1666 rx_msg_p->free = B_FALSE; 1667 rx_msg_p->cur_usage_cnt = 0; 1668 rx_msg_p->max_usage_cnt = 0; 1669 rx_msg_p->pkt_buf_size = 0; 1670 1671 if (rx_rbr_p->rbr_use_bcopy) { 1672 rx_msg_p->rx_use_bcopy = B_FALSE; 1673 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1674 } 1675 1676 /* 1677 * Get the rbr header pointer and its offset index. 1678 */ 1679 MUTEX_ENTER(&rx_rbr_p->post_lock); 1680 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1681 rx_rbr_p->rbr_wrap_mask); 1682 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1683 MUTEX_EXIT(&rx_rbr_p->post_lock); 1684 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1685 rx_rbr_p->rdc, 1); 1686 1687 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1688 "<== nxge_post_page (channel %d post_next_index %d)", 1689 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1690 1691 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1692 } 1693 1694 void 1695 nxge_freeb(p_rx_msg_t rx_msg_p) 1696 { 1697 size_t size; 1698 uchar_t *buffer = NULL; 1699 int ref_cnt; 1700 boolean_t free_state = B_FALSE; 1701 1702 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1703 1704 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1705 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1706 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1707 rx_msg_p, nxge_mblks_pending)); 1708 1709 /* 1710 * First we need to get the free state, then 1711 * atomic decrement the reference count to prevent 1712 * the race condition with the interrupt thread that 1713 * is processing a loaned up buffer block. 1714 */ 1715 free_state = rx_msg_p->free; 1716 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1717 if (!ref_cnt) { 1718 atomic_dec_32(&nxge_mblks_pending); 1719 buffer = rx_msg_p->buffer; 1720 size = rx_msg_p->block_size; 1721 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1722 "will free: rx_msg_p = $%p (block pending %d)", 1723 rx_msg_p, nxge_mblks_pending)); 1724 1725 if (!rx_msg_p->use_buf_pool) { 1726 KMEM_FREE(buffer, size); 1727 } 1728 1729 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1730 1731 if (ring) { 1732 /* 1733 * Decrement the receive buffer ring's reference 1734 * count, too. 1735 */ 1736 atomic_dec_32(&ring->rbr_ref_cnt); 1737 1738 /* 1739 * Free the receive buffer ring, if 1740 * 1. all the receive buffers have been freed 1741 * 2. and we are in the proper state (that is, 1742 * we are not UNMAPPING). 1743 */ 1744 if (ring->rbr_ref_cnt == 0 && 1745 ring->rbr_state == RBR_UNMAPPED) { 1746 /* 1747 * Free receive data buffers, 1748 * buffer index information 1749 * (rxring_info) and 1750 * the message block ring. 1751 */ 1752 NXGE_DEBUG_MSG((NULL, RX_CTL, 1753 "nxge_freeb:rx_msg_p = $%p " 1754 "(block pending %d) free buffers", 1755 rx_msg_p, nxge_mblks_pending)); 1756 nxge_rxdma_databuf_free(ring); 1757 if (ring->ring_info) { 1758 KMEM_FREE(ring->ring_info, 1759 sizeof (rxring_info_t)); 1760 } 1761 1762 if (ring->rx_msg_ring) { 1763 KMEM_FREE(ring->rx_msg_ring, 1764 ring->tnblocks * 1765 sizeof (p_rx_msg_t)); 1766 } 1767 KMEM_FREE(ring, sizeof (*ring)); 1768 } 1769 } 1770 return; 1771 } 1772 1773 /* 1774 * Repost buffer. 1775 */ 1776 if (free_state && (ref_cnt == 1) && ring) { 1777 NXGE_DEBUG_MSG((NULL, RX_CTL, 1778 "nxge_freeb: post page $%p:", rx_msg_p)); 1779 if (ring->rbr_state == RBR_POSTING) 1780 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1781 } 1782 1783 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1784 } 1785 1786 uint_t 1787 nxge_rx_intr(void *arg1, void *arg2) 1788 { 1789 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1790 p_nxge_t nxgep = (p_nxge_t)arg2; 1791 p_nxge_ldg_t ldgp; 1792 uint8_t channel; 1793 npi_handle_t handle; 1794 rx_dma_ctl_stat_t cs; 1795 1796 #ifdef NXGE_DEBUG 1797 rxdma_cfig1_t cfg; 1798 #endif 1799 uint_t serviced = DDI_INTR_UNCLAIMED; 1800 1801 if (ldvp == NULL) { 1802 NXGE_DEBUG_MSG((NULL, INT_CTL, 1803 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1804 nxgep, ldvp)); 1805 1806 return (DDI_INTR_CLAIMED); 1807 } 1808 1809 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1810 nxgep = ldvp->nxgep; 1811 } 1812 1813 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1814 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1815 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1816 "<== nxge_rx_intr: interface not started or intialized")); 1817 return (DDI_INTR_CLAIMED); 1818 } 1819 1820 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1821 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1822 nxgep, ldvp)); 1823 1824 /* 1825 * This interrupt handler is for a specific 1826 * receive dma channel. 1827 */ 1828 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1829 /* 1830 * Get the control and status for this channel. 1831 */ 1832 channel = ldvp->channel; 1833 ldgp = ldvp->ldgp; 1834 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1835 1836 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1837 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1838 channel, 1839 cs.value, 1840 cs.bits.hdw.rcrto, 1841 cs.bits.hdw.rcrthres)); 1842 1843 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1844 serviced = DDI_INTR_CLAIMED; 1845 1846 /* error events. */ 1847 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1848 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1849 } 1850 1851 nxge_intr_exit: 1852 /* 1853 * Enable the mailbox update interrupt if we want 1854 * to use mailbox. We probably don't need to use 1855 * mailbox as it only saves us one pio read. 1856 * Also write 1 to rcrthres and rcrto to clear 1857 * these two edge triggered bits. 1858 */ 1859 1860 cs.value &= RX_DMA_CTL_STAT_WR1C; 1861 cs.bits.hdw.mex = 1; 1862 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1863 cs.value); 1864 1865 /* 1866 * Rearm this logical group if this is a single device 1867 * group. 1868 */ 1869 if (ldgp->nldvs == 1) { 1870 ldgimgm_t mgm; 1871 mgm.value = 0; 1872 mgm.bits.ldw.arm = 1; 1873 mgm.bits.ldw.timer = ldgp->ldg_timer; 1874 if (isLDOMguest(nxgep)) { 1875 nxge_hio_ldgimgn(nxgep, ldgp); 1876 } else { 1877 NXGE_REG_WR64(handle, 1878 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1879 mgm.value); 1880 } 1881 } 1882 1883 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1884 serviced)); 1885 return (serviced); 1886 } 1887 1888 /* 1889 * Process the packets received in the specified logical device 1890 * and pass up a chain of message blocks to the upper layer. 1891 */ 1892 static void 1893 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 1894 { 1895 p_mblk_t mp; 1896 p_rx_rcr_ring_t rcrp; 1897 1898 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1899 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 1900 if (rcrp->poll_flag) { 1901 /* It is in the poll mode */ 1902 return; 1903 } 1904 1905 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 1906 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1907 "<== nxge_rx_pkts_vring: no mp")); 1908 return; 1909 } 1910 1911 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1912 mp)); 1913 1914 #ifdef NXGE_DEBUG 1915 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1916 "==> nxge_rx_pkts_vring:calling mac_rx " 1917 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1918 "mac_handle $%p", 1919 mp->b_wptr - mp->b_rptr, 1920 mp, mp->b_cont, mp->b_next, 1921 rcrp, rcrp->rcr_mac_handle)); 1922 1923 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1924 "==> nxge_rx_pkts_vring: dump packets " 1925 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1926 mp, 1927 mp->b_rptr, 1928 mp->b_wptr, 1929 nxge_dump_packet((char *)mp->b_rptr, 1930 mp->b_wptr - mp->b_rptr))); 1931 if (mp->b_cont) { 1932 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1933 "==> nxge_rx_pkts_vring: dump b_cont packets " 1934 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1935 mp->b_cont, 1936 mp->b_cont->b_rptr, 1937 mp->b_cont->b_wptr, 1938 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1939 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1940 } 1941 if (mp->b_next) { 1942 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1943 "==> nxge_rx_pkts_vring: dump next packets " 1944 "(b_rptr $%p): %s", 1945 mp->b_next->b_rptr, 1946 nxge_dump_packet((char *)mp->b_next->b_rptr, 1947 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1948 } 1949 #endif 1950 1951 if (!isLDOMguest(nxgep)) 1952 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1953 #if defined(sun4v) 1954 else { /* isLDOMguest(nxgep) */ 1955 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1956 nxgep->nxge_hw_p->hio; 1957 nx_vio_fp_t *vio = &nhd->hio.vio; 1958 1959 if (vio->cb.vio_net_rx_cb) { 1960 (*vio->cb.vio_net_rx_cb) 1961 (nxgep->hio_vr->vhp, mp); 1962 } 1963 } 1964 #endif 1965 } 1966 1967 1968 /* 1969 * This routine is the main packet receive processing function. 1970 * It gets the packet type, error code, and buffer related 1971 * information from the receive completion entry. 1972 * How many completion entries to process is based on the number of packets 1973 * queued by the hardware, a hardware maintained tail pointer 1974 * and a configurable receive packet count. 1975 * 1976 * A chain of message blocks will be created as result of processing 1977 * the completion entries. This chain of message blocks will be returned and 1978 * a hardware control status register will be updated with the number of 1979 * packets were removed from the hardware queue. 1980 * 1981 */ 1982 static mblk_t * 1983 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1984 int bytes_to_pickup) 1985 { 1986 npi_handle_t handle; 1987 uint8_t channel; 1988 uint32_t comp_rd_index; 1989 p_rcr_entry_t rcr_desc_rd_head_p; 1990 p_rcr_entry_t rcr_desc_rd_head_pp; 1991 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1992 uint16_t qlen, nrcr_read, npkt_read; 1993 uint32_t qlen_hw; 1994 boolean_t multi; 1995 rcrcfig_b_t rcr_cfg_b; 1996 int totallen = 0; 1997 #if defined(_BIG_ENDIAN) 1998 npi_status_t rs = NPI_SUCCESS; 1999 #endif 2000 2001 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2002 "channel %d", rcr_p->rdc)); 2003 2004 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 2005 return (NULL); 2006 } 2007 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2008 channel = rcr_p->rdc; 2009 2010 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2011 "==> nxge_rx_pkts: START: rcr channel %d " 2012 "head_p $%p head_pp $%p index %d ", 2013 channel, rcr_p->rcr_desc_rd_head_p, 2014 rcr_p->rcr_desc_rd_head_pp, 2015 rcr_p->comp_rd_index)); 2016 2017 2018 #if !defined(_BIG_ENDIAN) 2019 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2020 #else 2021 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2022 if (rs != NPI_SUCCESS) { 2023 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2024 "channel %d, get qlen failed 0x%08x", 2025 channel, rs)); 2026 return (NULL); 2027 } 2028 #endif 2029 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2030 "qlen %d", channel, qlen)); 2031 2032 2033 2034 if (!qlen) { 2035 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2036 "==> nxge_rx_pkts:rcr channel %d " 2037 "qlen %d (no pkts)", channel, qlen)); 2038 2039 return (NULL); 2040 } 2041 2042 comp_rd_index = rcr_p->comp_rd_index; 2043 2044 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2045 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2046 nrcr_read = npkt_read = 0; 2047 2048 /* 2049 * Number of packets queued 2050 * (The jumbo or multi packet will be counted as only one 2051 * packets and it may take up more than one completion entry). 2052 */ 2053 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2054 qlen : nxge_max_rx_pkts; 2055 head_mp = NULL; 2056 tail_mp = &head_mp; 2057 nmp = mp_cont = NULL; 2058 multi = B_FALSE; 2059 2060 while (qlen_hw) { 2061 2062 #ifdef NXGE_DEBUG 2063 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2064 #endif 2065 /* 2066 * Process one completion ring entry. 2067 */ 2068 nxge_receive_packet(nxgep, 2069 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2070 2071 /* 2072 * message chaining modes 2073 */ 2074 if (nmp) { 2075 nmp->b_next = NULL; 2076 if (!multi && !mp_cont) { /* frame fits a partition */ 2077 *tail_mp = nmp; 2078 tail_mp = &nmp->b_next; 2079 totallen += MBLKL(nmp); 2080 nmp = NULL; 2081 } else if (multi && !mp_cont) { /* first segment */ 2082 *tail_mp = nmp; 2083 tail_mp = &nmp->b_cont; 2084 totallen += MBLKL(nmp); 2085 } else if (multi && mp_cont) { /* mid of multi segs */ 2086 *tail_mp = mp_cont; 2087 tail_mp = &mp_cont->b_cont; 2088 totallen += MBLKL(mp_cont); 2089 } else if (!multi && mp_cont) { /* last segment */ 2090 *tail_mp = mp_cont; 2091 tail_mp = &nmp->b_next; 2092 totallen += MBLKL(mp_cont); 2093 nmp = NULL; 2094 } 2095 } 2096 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2097 "==> nxge_rx_pkts: loop: rcr channel %d " 2098 "before updating: multi %d " 2099 "nrcr_read %d " 2100 "npk read %d " 2101 "head_pp $%p index %d ", 2102 channel, 2103 multi, 2104 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2105 comp_rd_index)); 2106 2107 if (!multi) { 2108 qlen_hw--; 2109 npkt_read++; 2110 } 2111 2112 /* 2113 * Update the next read entry. 2114 */ 2115 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2116 rcr_p->comp_wrap_mask); 2117 2118 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2119 rcr_p->rcr_desc_first_p, 2120 rcr_p->rcr_desc_last_p); 2121 2122 nrcr_read++; 2123 2124 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2125 "<== nxge_rx_pkts: (SAM, process one packet) " 2126 "nrcr_read %d", 2127 nrcr_read)); 2128 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2129 "==> nxge_rx_pkts: loop: rcr channel %d " 2130 "multi %d " 2131 "nrcr_read %d " 2132 "npk read %d " 2133 "head_pp $%p index %d ", 2134 channel, 2135 multi, 2136 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2137 comp_rd_index)); 2138 2139 if ((bytes_to_pickup != -1) && 2140 (totallen >= bytes_to_pickup)) { 2141 break; 2142 } 2143 } 2144 2145 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2146 rcr_p->comp_rd_index = comp_rd_index; 2147 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2148 2149 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2150 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2151 rcr_p->intr_timeout = nxgep->intr_timeout; 2152 rcr_p->intr_threshold = nxgep->intr_threshold; 2153 rcr_cfg_b.value = 0x0ULL; 2154 if (rcr_p->intr_timeout) 2155 rcr_cfg_b.bits.ldw.entout = 1; 2156 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2157 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2158 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2159 channel, rcr_cfg_b.value); 2160 } 2161 2162 cs.bits.ldw.pktread = npkt_read; 2163 cs.bits.ldw.ptrread = nrcr_read; 2164 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2165 channel, cs.value); 2166 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2167 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2168 "head_pp $%p index %016llx ", 2169 channel, 2170 rcr_p->rcr_desc_rd_head_pp, 2171 rcr_p->comp_rd_index)); 2172 /* 2173 * Update RCR buffer pointer read and number of packets 2174 * read. 2175 */ 2176 2177 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2178 return (head_mp); 2179 } 2180 2181 void 2182 nxge_receive_packet(p_nxge_t nxgep, 2183 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2184 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2185 { 2186 p_mblk_t nmp = NULL; 2187 uint64_t multi; 2188 uint64_t dcf_err; 2189 uint8_t channel; 2190 2191 boolean_t first_entry = B_TRUE; 2192 boolean_t is_tcp_udp = B_FALSE; 2193 boolean_t buffer_free = B_FALSE; 2194 boolean_t error_send_up = B_FALSE; 2195 uint8_t error_type; 2196 uint16_t l2_len; 2197 uint16_t skip_len; 2198 uint8_t pktbufsz_type; 2199 uint64_t rcr_entry; 2200 uint64_t *pkt_buf_addr_pp; 2201 uint64_t *pkt_buf_addr_p; 2202 uint32_t buf_offset; 2203 uint32_t bsize; 2204 uint32_t error_disp_cnt; 2205 uint32_t msg_index; 2206 p_rx_rbr_ring_t rx_rbr_p; 2207 p_rx_msg_t *rx_msg_ring_p; 2208 p_rx_msg_t rx_msg_p; 2209 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2210 nxge_status_t status = NXGE_OK; 2211 boolean_t is_valid = B_FALSE; 2212 p_nxge_rx_ring_stats_t rdc_stats; 2213 uint32_t bytes_read; 2214 uint64_t pkt_type; 2215 uint64_t frag; 2216 boolean_t pkt_too_long_err = B_FALSE; 2217 #ifdef NXGE_DEBUG 2218 int dump_len; 2219 #endif 2220 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2221 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2222 2223 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2224 2225 multi = (rcr_entry & RCR_MULTI_MASK); 2226 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2227 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2228 2229 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2230 frag = (rcr_entry & RCR_FRAG_MASK); 2231 2232 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2233 2234 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2235 RCR_PKTBUFSZ_SHIFT); 2236 #if defined(__i386) 2237 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2238 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2239 #else 2240 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2241 RCR_PKT_BUF_ADDR_SHIFT); 2242 #endif 2243 2244 channel = rcr_p->rdc; 2245 2246 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2247 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2248 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2249 "error_type 0x%x pkt_type 0x%x " 2250 "pktbufsz_type %d ", 2251 rcr_desc_rd_head_p, 2252 rcr_entry, pkt_buf_addr_pp, l2_len, 2253 multi, 2254 error_type, 2255 pkt_type, 2256 pktbufsz_type)); 2257 2258 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2259 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2260 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2261 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2262 rcr_entry, pkt_buf_addr_pp, l2_len, 2263 multi, 2264 error_type, 2265 pkt_type)); 2266 2267 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2268 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2269 "full pkt_buf_addr_pp $%p l2_len %d", 2270 rcr_entry, pkt_buf_addr_pp, l2_len)); 2271 2272 /* get the stats ptr */ 2273 rdc_stats = rcr_p->rdc_stats; 2274 2275 if (!l2_len) { 2276 2277 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2278 "<== nxge_receive_packet: failed: l2 length is 0.")); 2279 return; 2280 } 2281 2282 /* 2283 * Sofware workaround for BMAC hardware limitation that allows 2284 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2285 * instead of 0x2400 for jumbo. 2286 */ 2287 if (l2_len > nxgep->mac.maxframesize) { 2288 pkt_too_long_err = B_TRUE; 2289 } 2290 2291 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2292 l2_len -= ETHERFCSL; 2293 2294 /* shift 6 bits to get the full io address */ 2295 #if defined(__i386) 2296 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2297 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2298 #else 2299 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2300 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2301 #endif 2302 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2303 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2304 "full pkt_buf_addr_pp $%p l2_len %d", 2305 rcr_entry, pkt_buf_addr_pp, l2_len)); 2306 2307 rx_rbr_p = rcr_p->rx_rbr_p; 2308 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2309 2310 if (first_entry) { 2311 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2312 RXDMA_HDR_SIZE_DEFAULT); 2313 2314 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2315 "==> nxge_receive_packet: first entry 0x%016llx " 2316 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2317 rcr_entry, pkt_buf_addr_pp, l2_len, 2318 hdr_size)); 2319 } 2320 2321 MUTEX_ENTER(&rcr_p->lock); 2322 MUTEX_ENTER(&rx_rbr_p->lock); 2323 2324 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2325 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2326 "full pkt_buf_addr_pp $%p l2_len %d", 2327 rcr_entry, pkt_buf_addr_pp, l2_len)); 2328 2329 /* 2330 * Packet buffer address in the completion entry points 2331 * to the starting buffer address (offset 0). 2332 * Use the starting buffer address to locate the corresponding 2333 * kernel address. 2334 */ 2335 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2336 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2337 &buf_offset, 2338 &msg_index); 2339 2340 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2341 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2342 "full pkt_buf_addr_pp $%p l2_len %d", 2343 rcr_entry, pkt_buf_addr_pp, l2_len)); 2344 2345 if (status != NXGE_OK) { 2346 MUTEX_EXIT(&rx_rbr_p->lock); 2347 MUTEX_EXIT(&rcr_p->lock); 2348 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2349 "<== nxge_receive_packet: found vaddr failed %d", 2350 status)); 2351 return; 2352 } 2353 2354 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2355 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2356 "full pkt_buf_addr_pp $%p l2_len %d", 2357 rcr_entry, pkt_buf_addr_pp, l2_len)); 2358 2359 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2360 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2361 "full pkt_buf_addr_pp $%p l2_len %d", 2362 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2363 2364 rx_msg_p = rx_msg_ring_p[msg_index]; 2365 2366 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2367 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2368 "full pkt_buf_addr_pp $%p l2_len %d", 2369 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2370 2371 switch (pktbufsz_type) { 2372 case RCR_PKTBUFSZ_0: 2373 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2374 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2375 "==> nxge_receive_packet: 0 buf %d", bsize)); 2376 break; 2377 case RCR_PKTBUFSZ_1: 2378 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2379 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2380 "==> nxge_receive_packet: 1 buf %d", bsize)); 2381 break; 2382 case RCR_PKTBUFSZ_2: 2383 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2384 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2385 "==> nxge_receive_packet: 2 buf %d", bsize)); 2386 break; 2387 case RCR_SINGLE_BLOCK: 2388 bsize = rx_msg_p->block_size; 2389 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2390 "==> nxge_receive_packet: single %d", bsize)); 2391 2392 break; 2393 default: 2394 MUTEX_EXIT(&rx_rbr_p->lock); 2395 MUTEX_EXIT(&rcr_p->lock); 2396 return; 2397 } 2398 2399 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2400 (buf_offset + sw_offset_bytes), 2401 (hdr_size + l2_len), 2402 DDI_DMA_SYNC_FORCPU); 2403 2404 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2405 "==> nxge_receive_packet: after first dump:usage count")); 2406 2407 if (rx_msg_p->cur_usage_cnt == 0) { 2408 if (rx_rbr_p->rbr_use_bcopy) { 2409 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2410 if (rx_rbr_p->rbr_consumed < 2411 rx_rbr_p->rbr_threshold_hi) { 2412 if (rx_rbr_p->rbr_threshold_lo == 0 || 2413 ((rx_rbr_p->rbr_consumed >= 2414 rx_rbr_p->rbr_threshold_lo) && 2415 (rx_rbr_p->rbr_bufsize_type >= 2416 pktbufsz_type))) { 2417 rx_msg_p->rx_use_bcopy = B_TRUE; 2418 } 2419 } else { 2420 rx_msg_p->rx_use_bcopy = B_TRUE; 2421 } 2422 } 2423 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2424 "==> nxge_receive_packet: buf %d (new block) ", 2425 bsize)); 2426 2427 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2428 rx_msg_p->pkt_buf_size = bsize; 2429 rx_msg_p->cur_usage_cnt = 1; 2430 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2431 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2432 "==> nxge_receive_packet: buf %d " 2433 "(single block) ", 2434 bsize)); 2435 /* 2436 * Buffer can be reused once the free function 2437 * is called. 2438 */ 2439 rx_msg_p->max_usage_cnt = 1; 2440 buffer_free = B_TRUE; 2441 } else { 2442 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2443 if (rx_msg_p->max_usage_cnt == 1) { 2444 buffer_free = B_TRUE; 2445 } 2446 } 2447 } else { 2448 rx_msg_p->cur_usage_cnt++; 2449 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2450 buffer_free = B_TRUE; 2451 } 2452 } 2453 2454 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2455 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2456 msg_index, l2_len, 2457 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2458 2459 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2460 rdc_stats->ierrors++; 2461 if (dcf_err) { 2462 rdc_stats->dcf_err++; 2463 #ifdef NXGE_DEBUG 2464 if (!rdc_stats->dcf_err) { 2465 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2466 "nxge_receive_packet: channel %d dcf_err rcr" 2467 " 0x%llx", channel, rcr_entry)); 2468 } 2469 #endif 2470 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2471 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2472 } else if (pkt_too_long_err) { 2473 rdc_stats->pkt_too_long_err++; 2474 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2475 " channel %d packet length [%d] > " 2476 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2477 nxgep->mac.maxframesize)); 2478 } else { 2479 /* Update error stats */ 2480 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2481 rdc_stats->errlog.compl_err_type = error_type; 2482 2483 switch (error_type) { 2484 /* 2485 * Do not send FMA ereport for RCR_L2_ERROR and 2486 * RCR_L4_CSUM_ERROR because most likely they indicate 2487 * back pressure rather than HW failures. 2488 */ 2489 case RCR_L2_ERROR: 2490 rdc_stats->l2_err++; 2491 if (rdc_stats->l2_err < 2492 error_disp_cnt) { 2493 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2494 " nxge_receive_packet:" 2495 " channel %d RCR L2_ERROR", 2496 channel)); 2497 } 2498 break; 2499 case RCR_L4_CSUM_ERROR: 2500 error_send_up = B_TRUE; 2501 rdc_stats->l4_cksum_err++; 2502 if (rdc_stats->l4_cksum_err < 2503 error_disp_cnt) { 2504 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2505 " nxge_receive_packet:" 2506 " channel %d" 2507 " RCR L4_CSUM_ERROR", channel)); 2508 } 2509 break; 2510 /* 2511 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2512 * RCR_ZCP_SOFT_ERROR because they reflect the same 2513 * FFLP and ZCP errors that have been reported by 2514 * nxge_fflp.c and nxge_zcp.c. 2515 */ 2516 case RCR_FFLP_SOFT_ERROR: 2517 error_send_up = B_TRUE; 2518 rdc_stats->fflp_soft_err++; 2519 if (rdc_stats->fflp_soft_err < 2520 error_disp_cnt) { 2521 NXGE_ERROR_MSG((nxgep, 2522 NXGE_ERR_CTL, 2523 " nxge_receive_packet:" 2524 " channel %d" 2525 " RCR FFLP_SOFT_ERROR", channel)); 2526 } 2527 break; 2528 case RCR_ZCP_SOFT_ERROR: 2529 error_send_up = B_TRUE; 2530 rdc_stats->fflp_soft_err++; 2531 if (rdc_stats->zcp_soft_err < 2532 error_disp_cnt) 2533 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2534 " nxge_receive_packet: Channel %d" 2535 " RCR ZCP_SOFT_ERROR", channel)); 2536 break; 2537 default: 2538 rdc_stats->rcr_unknown_err++; 2539 if (rdc_stats->rcr_unknown_err 2540 < error_disp_cnt) { 2541 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2542 " nxge_receive_packet: Channel %d" 2543 " RCR entry 0x%llx error 0x%x", 2544 rcr_entry, channel, error_type)); 2545 } 2546 break; 2547 } 2548 } 2549 2550 /* 2551 * Update and repost buffer block if max usage 2552 * count is reached. 2553 */ 2554 if (error_send_up == B_FALSE) { 2555 atomic_inc_32(&rx_msg_p->ref_cnt); 2556 if (buffer_free == B_TRUE) { 2557 rx_msg_p->free = B_TRUE; 2558 } 2559 2560 MUTEX_EXIT(&rx_rbr_p->lock); 2561 MUTEX_EXIT(&rcr_p->lock); 2562 nxge_freeb(rx_msg_p); 2563 return; 2564 } 2565 } 2566 2567 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2568 "==> nxge_receive_packet: DMA sync second ")); 2569 2570 bytes_read = rcr_p->rcvd_pkt_bytes; 2571 skip_len = sw_offset_bytes + hdr_size; 2572 if (!rx_msg_p->rx_use_bcopy) { 2573 /* 2574 * For loaned up buffers, the driver reference count 2575 * will be incremented first and then the free state. 2576 */ 2577 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2578 if (first_entry) { 2579 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2580 if (l2_len < bsize - skip_len) { 2581 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2582 } else { 2583 nmp->b_wptr = &nmp->b_rptr[bsize 2584 - skip_len]; 2585 } 2586 } else { 2587 if (l2_len - bytes_read < bsize) { 2588 nmp->b_wptr = 2589 &nmp->b_rptr[l2_len - bytes_read]; 2590 } else { 2591 nmp->b_wptr = &nmp->b_rptr[bsize]; 2592 } 2593 } 2594 } 2595 } else { 2596 if (first_entry) { 2597 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2598 l2_len < bsize - skip_len ? 2599 l2_len : bsize - skip_len); 2600 } else { 2601 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2602 l2_len - bytes_read < bsize ? 2603 l2_len - bytes_read : bsize); 2604 } 2605 } 2606 if (nmp != NULL) { 2607 if (first_entry) { 2608 /* 2609 * Jumbo packets may be received with more than one 2610 * buffer, increment ipackets for the first entry only. 2611 */ 2612 rdc_stats->ipackets++; 2613 2614 /* Update ibytes for kstat. */ 2615 rdc_stats->ibytes += skip_len 2616 + l2_len < bsize ? l2_len : bsize; 2617 /* 2618 * Update the number of bytes read so far for the 2619 * current frame. 2620 */ 2621 bytes_read = nmp->b_wptr - nmp->b_rptr; 2622 } else { 2623 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2624 l2_len - bytes_read : bsize; 2625 bytes_read += nmp->b_wptr - nmp->b_rptr; 2626 } 2627 2628 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2629 "==> nxge_receive_packet after dupb: " 2630 "rbr consumed %d " 2631 "pktbufsz_type %d " 2632 "nmp $%p rptr $%p wptr $%p " 2633 "buf_offset %d bzise %d l2_len %d skip_len %d", 2634 rx_rbr_p->rbr_consumed, 2635 pktbufsz_type, 2636 nmp, nmp->b_rptr, nmp->b_wptr, 2637 buf_offset, bsize, l2_len, skip_len)); 2638 } else { 2639 cmn_err(CE_WARN, "!nxge_receive_packet: " 2640 "update stats (error)"); 2641 atomic_inc_32(&rx_msg_p->ref_cnt); 2642 if (buffer_free == B_TRUE) { 2643 rx_msg_p->free = B_TRUE; 2644 } 2645 MUTEX_EXIT(&rx_rbr_p->lock); 2646 MUTEX_EXIT(&rcr_p->lock); 2647 nxge_freeb(rx_msg_p); 2648 return; 2649 } 2650 2651 if (buffer_free == B_TRUE) { 2652 rx_msg_p->free = B_TRUE; 2653 } 2654 2655 is_valid = (nmp != NULL); 2656 2657 rcr_p->rcvd_pkt_bytes = bytes_read; 2658 2659 MUTEX_EXIT(&rx_rbr_p->lock); 2660 MUTEX_EXIT(&rcr_p->lock); 2661 2662 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2663 atomic_inc_32(&rx_msg_p->ref_cnt); 2664 nxge_freeb(rx_msg_p); 2665 } 2666 2667 if (is_valid) { 2668 nmp->b_cont = NULL; 2669 if (first_entry) { 2670 *mp = nmp; 2671 *mp_cont = NULL; 2672 } else { 2673 *mp_cont = nmp; 2674 } 2675 } 2676 2677 /* 2678 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2679 * If a packet is not fragmented and no error bit is set, then 2680 * L4 checksum is OK. 2681 */ 2682 2683 if (is_valid && !multi) { 2684 /* 2685 * Update hardware checksuming. 2686 * 2687 * If the checksum flag nxge_chksum_offload 2688 * is 1, TCP and UDP packets can be sent 2689 * up with good checksum. If the checksum flag 2690 * is set to 0, checksum reporting will apply to 2691 * TCP packets only (workaround for a hardware bug). 2692 * If the checksum flag nxge_cksum_offload is 2693 * greater than 1, both TCP and UDP packets 2694 * will not be reported its hardware checksum results. 2695 */ 2696 if (nxge_cksum_offload == 1) { 2697 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2698 pkt_type == RCR_PKT_IS_UDP) ? 2699 B_TRUE: B_FALSE); 2700 } else if (!nxge_cksum_offload) { 2701 /* TCP checksum only. */ 2702 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2703 B_TRUE: B_FALSE); 2704 } 2705 2706 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2707 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2708 is_valid, multi, is_tcp_udp, frag, error_type)); 2709 2710 if (is_tcp_udp && !frag && !error_type) { 2711 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2712 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2713 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2714 "==> nxge_receive_packet: Full tcp/udp cksum " 2715 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2716 "error %d", 2717 is_valid, multi, is_tcp_udp, frag, error_type)); 2718 } 2719 } 2720 2721 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2722 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2723 2724 *multi_p = (multi == RCR_MULTI_MASK); 2725 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2726 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2727 *multi_p, nmp, *mp, *mp_cont)); 2728 } 2729 2730 /*ARGSUSED*/ 2731 static nxge_status_t 2732 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2733 { 2734 p_nxge_rx_ring_stats_t rdc_stats; 2735 npi_handle_t handle; 2736 npi_status_t rs; 2737 boolean_t rxchan_fatal = B_FALSE; 2738 boolean_t rxport_fatal = B_FALSE; 2739 uint8_t portn; 2740 nxge_status_t status = NXGE_OK; 2741 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2742 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2743 2744 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2745 portn = nxgep->mac.portnum; 2746 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2747 2748 if (cs.bits.hdw.rbr_tmout) { 2749 rdc_stats->rx_rbr_tmout++; 2750 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2751 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2752 rxchan_fatal = B_TRUE; 2753 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2754 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2755 } 2756 if (cs.bits.hdw.rsp_cnt_err) { 2757 rdc_stats->rsp_cnt_err++; 2758 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2759 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2760 rxchan_fatal = B_TRUE; 2761 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2762 "==> nxge_rx_err_evnts(channel %d): " 2763 "rsp_cnt_err", channel)); 2764 } 2765 if (cs.bits.hdw.byte_en_bus) { 2766 rdc_stats->byte_en_bus++; 2767 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2768 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2769 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2770 "==> nxge_rx_err_evnts(channel %d): " 2771 "fatal error: byte_en_bus", channel)); 2772 rxchan_fatal = B_TRUE; 2773 } 2774 if (cs.bits.hdw.rsp_dat_err) { 2775 rdc_stats->rsp_dat_err++; 2776 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2777 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2778 rxchan_fatal = B_TRUE; 2779 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2780 "==> nxge_rx_err_evnts(channel %d): " 2781 "fatal error: rsp_dat_err", channel)); 2782 } 2783 if (cs.bits.hdw.rcr_ack_err) { 2784 rdc_stats->rcr_ack_err++; 2785 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2786 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2787 rxchan_fatal = B_TRUE; 2788 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2789 "==> nxge_rx_err_evnts(channel %d): " 2790 "fatal error: rcr_ack_err", channel)); 2791 } 2792 if (cs.bits.hdw.dc_fifo_err) { 2793 rdc_stats->dc_fifo_err++; 2794 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2795 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2796 /* This is not a fatal error! */ 2797 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2798 "==> nxge_rx_err_evnts(channel %d): " 2799 "dc_fifo_err", channel)); 2800 rxport_fatal = B_TRUE; 2801 } 2802 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2803 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2804 &rdc_stats->errlog.pre_par, 2805 &rdc_stats->errlog.sha_par)) 2806 != NPI_SUCCESS) { 2807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2808 "==> nxge_rx_err_evnts(channel %d): " 2809 "rcr_sha_par: get perr", channel)); 2810 return (NXGE_ERROR | rs); 2811 } 2812 if (cs.bits.hdw.rcr_sha_par) { 2813 rdc_stats->rcr_sha_par++; 2814 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2815 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2816 rxchan_fatal = B_TRUE; 2817 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2818 "==> nxge_rx_err_evnts(channel %d): " 2819 "fatal error: rcr_sha_par", channel)); 2820 } 2821 if (cs.bits.hdw.rbr_pre_par) { 2822 rdc_stats->rbr_pre_par++; 2823 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2824 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2825 rxchan_fatal = B_TRUE; 2826 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2827 "==> nxge_rx_err_evnts(channel %d): " 2828 "fatal error: rbr_pre_par", channel)); 2829 } 2830 } 2831 /* 2832 * The Following 4 status bits are for information, the system 2833 * is running fine. There is no need to send FMA ereports or 2834 * log messages. 2835 */ 2836 if (cs.bits.hdw.port_drop_pkt) { 2837 rdc_stats->port_drop_pkt++; 2838 } 2839 if (cs.bits.hdw.wred_drop) { 2840 rdc_stats->wred_drop++; 2841 } 2842 if (cs.bits.hdw.rbr_pre_empty) { 2843 rdc_stats->rbr_pre_empty++; 2844 } 2845 if (cs.bits.hdw.rcr_shadow_full) { 2846 rdc_stats->rcr_shadow_full++; 2847 } 2848 if (cs.bits.hdw.config_err) { 2849 rdc_stats->config_err++; 2850 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2851 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2852 rxchan_fatal = B_TRUE; 2853 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2854 "==> nxge_rx_err_evnts(channel %d): " 2855 "config error", channel)); 2856 } 2857 if (cs.bits.hdw.rcrincon) { 2858 rdc_stats->rcrincon++; 2859 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2860 NXGE_FM_EREPORT_RDMC_RCRINCON); 2861 rxchan_fatal = B_TRUE; 2862 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2863 "==> nxge_rx_err_evnts(channel %d): " 2864 "fatal error: rcrincon error", channel)); 2865 } 2866 if (cs.bits.hdw.rcrfull) { 2867 rdc_stats->rcrfull++; 2868 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2869 NXGE_FM_EREPORT_RDMC_RCRFULL); 2870 rxchan_fatal = B_TRUE; 2871 if (rdc_stats->rcrfull < error_disp_cnt) 2872 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2873 "==> nxge_rx_err_evnts(channel %d): " 2874 "fatal error: rcrfull error", channel)); 2875 } 2876 if (cs.bits.hdw.rbr_empty) { 2877 /* 2878 * This bit is for information, there is no need 2879 * send FMA ereport or log a message. 2880 */ 2881 rdc_stats->rbr_empty++; 2882 } 2883 if (cs.bits.hdw.rbrfull) { 2884 rdc_stats->rbrfull++; 2885 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2886 NXGE_FM_EREPORT_RDMC_RBRFULL); 2887 rxchan_fatal = B_TRUE; 2888 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2889 "==> nxge_rx_err_evnts(channel %d): " 2890 "fatal error: rbr_full error", channel)); 2891 } 2892 if (cs.bits.hdw.rbrlogpage) { 2893 rdc_stats->rbrlogpage++; 2894 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2895 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2896 rxchan_fatal = B_TRUE; 2897 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2898 "==> nxge_rx_err_evnts(channel %d): " 2899 "fatal error: rbr logical page error", channel)); 2900 } 2901 if (cs.bits.hdw.cfiglogpage) { 2902 rdc_stats->cfiglogpage++; 2903 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2904 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2905 rxchan_fatal = B_TRUE; 2906 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2907 "==> nxge_rx_err_evnts(channel %d): " 2908 "fatal error: cfig logical page error", channel)); 2909 } 2910 2911 if (rxport_fatal) { 2912 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2913 " nxge_rx_err_evnts: fatal error on Port #%d\n", 2914 portn)); 2915 if (isLDOMguest(nxgep)) { 2916 status = NXGE_ERROR; 2917 } else { 2918 status = nxge_ipp_fatal_err_recover(nxgep); 2919 if (status == NXGE_OK) { 2920 FM_SERVICE_RESTORED(nxgep); 2921 } 2922 } 2923 } 2924 2925 if (rxchan_fatal) { 2926 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2927 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 2928 channel)); 2929 if (isLDOMguest(nxgep)) { 2930 status = NXGE_ERROR; 2931 } else { 2932 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2933 if (status == NXGE_OK) { 2934 FM_SERVICE_RESTORED(nxgep); 2935 } 2936 } 2937 } 2938 2939 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2940 2941 return (status); 2942 } 2943 2944 /* 2945 * nxge_rdc_hvio_setup 2946 * 2947 * This code appears to setup some Hypervisor variables. 2948 * 2949 * Arguments: 2950 * nxgep 2951 * channel 2952 * 2953 * Notes: 2954 * What does NIU_LP_WORKAROUND mean? 2955 * 2956 * NPI/NXGE function calls: 2957 * na 2958 * 2959 * Context: 2960 * Any domain 2961 */ 2962 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2963 static void 2964 nxge_rdc_hvio_setup( 2965 nxge_t *nxgep, int channel) 2966 { 2967 nxge_dma_common_t *dma_common; 2968 nxge_dma_common_t *dma_control; 2969 rx_rbr_ring_t *ring; 2970 2971 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 2972 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2973 2974 ring->hv_set = B_FALSE; 2975 2976 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 2977 dma_common->orig_ioaddr_pp; 2978 ring->hv_rx_buf_ioaddr_size = (uint64_t) 2979 dma_common->orig_alength; 2980 2981 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2982 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 2983 channel, ring->hv_rx_buf_base_ioaddr_pp, 2984 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 2985 dma_common->orig_alength, dma_common->orig_alength)); 2986 2987 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2988 2989 ring->hv_rx_cntl_base_ioaddr_pp = 2990 (uint64_t)dma_control->orig_ioaddr_pp; 2991 ring->hv_rx_cntl_ioaddr_size = 2992 (uint64_t)dma_control->orig_alength; 2993 2994 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2995 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 2996 channel, ring->hv_rx_cntl_base_ioaddr_pp, 2997 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 2998 dma_control->orig_alength, dma_control->orig_alength)); 2999 } 3000 #endif 3001 3002 /* 3003 * nxge_map_rxdma 3004 * 3005 * Map an RDC into our kernel space. 3006 * 3007 * Arguments: 3008 * nxgep 3009 * channel The channel to map. 3010 * 3011 * Notes: 3012 * 1. Allocate & initialise a memory pool, if necessary. 3013 * 2. Allocate however many receive buffers are required. 3014 * 3. Setup buffers, descriptors, and mailbox. 3015 * 3016 * NPI/NXGE function calls: 3017 * nxge_alloc_rx_mem_pool() 3018 * nxge_alloc_rbb() 3019 * nxge_map_rxdma_channel() 3020 * 3021 * Registers accessed: 3022 * 3023 * Context: 3024 * Any domain 3025 */ 3026 static nxge_status_t 3027 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3028 { 3029 nxge_dma_common_t **data; 3030 nxge_dma_common_t **control; 3031 rx_rbr_ring_t **rbr_ring; 3032 rx_rcr_ring_t **rcr_ring; 3033 rx_mbox_t **mailbox; 3034 uint32_t chunks; 3035 3036 nxge_status_t status; 3037 3038 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3039 3040 if (!nxgep->rx_buf_pool_p) { 3041 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3042 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3043 "<== nxge_map_rxdma: buf not allocated")); 3044 return (NXGE_ERROR); 3045 } 3046 } 3047 3048 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3049 return (NXGE_ERROR); 3050 3051 /* 3052 * Timeout should be set based on the system clock divider. 3053 * The following timeout value of 1 assumes that the 3054 * granularity (1000) is 3 microseconds running at 300MHz. 3055 */ 3056 3057 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 3058 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 3059 3060 /* 3061 * Map descriptors from the buffer polls for each dma channel. 3062 */ 3063 3064 /* 3065 * Set up and prepare buffer blocks, descriptors 3066 * and mailbox. 3067 */ 3068 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3069 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3070 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3071 3072 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3073 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3074 3075 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3076 3077 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3078 chunks, control, rcr_ring, mailbox); 3079 if (status != NXGE_OK) { 3080 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3081 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3082 "returned 0x%x", 3083 channel, status)); 3084 return (status); 3085 } 3086 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3087 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3088 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3089 &nxgep->statsp->rdc_stats[channel]; 3090 3091 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3092 if (!isLDOMguest(nxgep)) 3093 nxge_rdc_hvio_setup(nxgep, channel); 3094 #endif 3095 3096 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3097 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3098 3099 return (status); 3100 } 3101 3102 static void 3103 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3104 { 3105 rx_rbr_ring_t *rbr_ring; 3106 rx_rcr_ring_t *rcr_ring; 3107 rx_mbox_t *mailbox; 3108 3109 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3110 3111 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3112 !nxgep->rx_mbox_areas_p) 3113 return; 3114 3115 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3116 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3117 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3118 3119 if (!rbr_ring || !rcr_ring || !mailbox) 3120 return; 3121 3122 (void) nxge_unmap_rxdma_channel( 3123 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3124 3125 nxge_free_rxb(nxgep, channel); 3126 3127 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3128 } 3129 3130 nxge_status_t 3131 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3132 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3133 uint32_t num_chunks, 3134 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3135 p_rx_mbox_t *rx_mbox_p) 3136 { 3137 int status = NXGE_OK; 3138 3139 /* 3140 * Set up and prepare buffer blocks, descriptors 3141 * and mailbox. 3142 */ 3143 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3144 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3145 /* 3146 * Receive buffer blocks 3147 */ 3148 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3149 dma_buf_p, rbr_p, num_chunks); 3150 if (status != NXGE_OK) { 3151 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3152 "==> nxge_map_rxdma_channel (channel %d): " 3153 "map buffer failed 0x%x", channel, status)); 3154 goto nxge_map_rxdma_channel_exit; 3155 } 3156 3157 /* 3158 * Receive block ring, completion ring and mailbox. 3159 */ 3160 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3161 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3162 if (status != NXGE_OK) { 3163 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3164 "==> nxge_map_rxdma_channel (channel %d): " 3165 "map config failed 0x%x", channel, status)); 3166 goto nxge_map_rxdma_channel_fail2; 3167 } 3168 3169 goto nxge_map_rxdma_channel_exit; 3170 3171 nxge_map_rxdma_channel_fail3: 3172 /* Free rbr, rcr */ 3173 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3174 "==> nxge_map_rxdma_channel: free rbr/rcr " 3175 "(status 0x%x channel %d)", 3176 status, channel)); 3177 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3178 *rcr_p, *rx_mbox_p); 3179 3180 nxge_map_rxdma_channel_fail2: 3181 /* Free buffer blocks */ 3182 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3183 "==> nxge_map_rxdma_channel: free rx buffers" 3184 "(nxgep 0x%x status 0x%x channel %d)", 3185 nxgep, status, channel)); 3186 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3187 3188 status = NXGE_ERROR; 3189 3190 nxge_map_rxdma_channel_exit: 3191 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3192 "<== nxge_map_rxdma_channel: " 3193 "(nxgep 0x%x status 0x%x channel %d)", 3194 nxgep, status, channel)); 3195 3196 return (status); 3197 } 3198 3199 /*ARGSUSED*/ 3200 static void 3201 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3202 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3203 { 3204 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3205 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3206 3207 /* 3208 * unmap receive block ring, completion ring and mailbox. 3209 */ 3210 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3211 rcr_p, rx_mbox_p); 3212 3213 /* unmap buffer blocks */ 3214 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3215 3216 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3217 } 3218 3219 /*ARGSUSED*/ 3220 static nxge_status_t 3221 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3222 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3223 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3224 { 3225 p_rx_rbr_ring_t rbrp; 3226 p_rx_rcr_ring_t rcrp; 3227 p_rx_mbox_t mboxp; 3228 p_nxge_dma_common_t cntl_dmap; 3229 p_nxge_dma_common_t dmap; 3230 p_rx_msg_t *rx_msg_ring; 3231 p_rx_msg_t rx_msg_p; 3232 p_rbr_cfig_a_t rcfga_p; 3233 p_rbr_cfig_b_t rcfgb_p; 3234 p_rcrcfig_a_t cfga_p; 3235 p_rcrcfig_b_t cfgb_p; 3236 p_rxdma_cfig1_t cfig1_p; 3237 p_rxdma_cfig2_t cfig2_p; 3238 p_rbr_kick_t kick_p; 3239 uint32_t dmaaddrp; 3240 uint32_t *rbr_vaddrp; 3241 uint32_t bkaddr; 3242 nxge_status_t status = NXGE_OK; 3243 int i; 3244 uint32_t nxge_port_rcr_size; 3245 3246 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3247 "==> nxge_map_rxdma_channel_cfg_ring")); 3248 3249 cntl_dmap = *dma_cntl_p; 3250 3251 /* Map in the receive block ring */ 3252 rbrp = *rbr_p; 3253 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3254 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3255 /* 3256 * Zero out buffer block ring descriptors. 3257 */ 3258 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3259 3260 rcfga_p = &(rbrp->rbr_cfga); 3261 rcfgb_p = &(rbrp->rbr_cfgb); 3262 kick_p = &(rbrp->rbr_kick); 3263 rcfga_p->value = 0; 3264 rcfgb_p->value = 0; 3265 kick_p->value = 0; 3266 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3267 rcfga_p->value = (rbrp->rbr_addr & 3268 (RBR_CFIG_A_STDADDR_MASK | 3269 RBR_CFIG_A_STDADDR_BASE_MASK)); 3270 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3271 3272 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3273 rcfgb_p->bits.ldw.vld0 = 1; 3274 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3275 rcfgb_p->bits.ldw.vld1 = 1; 3276 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3277 rcfgb_p->bits.ldw.vld2 = 1; 3278 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3279 3280 /* 3281 * For each buffer block, enter receive block address to the ring. 3282 */ 3283 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3284 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3285 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3286 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3287 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3288 3289 rx_msg_ring = rbrp->rx_msg_ring; 3290 for (i = 0; i < rbrp->tnblocks; i++) { 3291 rx_msg_p = rx_msg_ring[i]; 3292 rx_msg_p->nxgep = nxgep; 3293 rx_msg_p->rx_rbr_p = rbrp; 3294 bkaddr = (uint32_t) 3295 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3296 >> RBR_BKADDR_SHIFT)); 3297 rx_msg_p->free = B_FALSE; 3298 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3299 3300 *rbr_vaddrp++ = bkaddr; 3301 } 3302 3303 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3304 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3305 3306 rbrp->rbr_rd_index = 0; 3307 3308 rbrp->rbr_consumed = 0; 3309 rbrp->rbr_use_bcopy = B_TRUE; 3310 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3311 /* 3312 * Do bcopy on packets greater than bcopy size once 3313 * the lo threshold is reached. 3314 * This lo threshold should be less than the hi threshold. 3315 * 3316 * Do bcopy on every packet once the hi threshold is reached. 3317 */ 3318 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3319 /* default it to use hi */ 3320 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3321 } 3322 3323 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3324 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3325 } 3326 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3327 3328 switch (nxge_rx_threshold_hi) { 3329 default: 3330 case NXGE_RX_COPY_NONE: 3331 /* Do not do bcopy at all */ 3332 rbrp->rbr_use_bcopy = B_FALSE; 3333 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3334 break; 3335 3336 case NXGE_RX_COPY_1: 3337 case NXGE_RX_COPY_2: 3338 case NXGE_RX_COPY_3: 3339 case NXGE_RX_COPY_4: 3340 case NXGE_RX_COPY_5: 3341 case NXGE_RX_COPY_6: 3342 case NXGE_RX_COPY_7: 3343 rbrp->rbr_threshold_hi = 3344 rbrp->rbb_max * 3345 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3346 break; 3347 3348 case NXGE_RX_COPY_ALL: 3349 rbrp->rbr_threshold_hi = 0; 3350 break; 3351 } 3352 3353 switch (nxge_rx_threshold_lo) { 3354 default: 3355 case NXGE_RX_COPY_NONE: 3356 /* Do not do bcopy at all */ 3357 if (rbrp->rbr_use_bcopy) { 3358 rbrp->rbr_use_bcopy = B_FALSE; 3359 } 3360 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3361 break; 3362 3363 case NXGE_RX_COPY_1: 3364 case NXGE_RX_COPY_2: 3365 case NXGE_RX_COPY_3: 3366 case NXGE_RX_COPY_4: 3367 case NXGE_RX_COPY_5: 3368 case NXGE_RX_COPY_6: 3369 case NXGE_RX_COPY_7: 3370 rbrp->rbr_threshold_lo = 3371 rbrp->rbb_max * 3372 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3373 break; 3374 3375 case NXGE_RX_COPY_ALL: 3376 rbrp->rbr_threshold_lo = 0; 3377 break; 3378 } 3379 3380 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3381 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3382 "rbb_max %d " 3383 "rbrp->rbr_bufsize_type %d " 3384 "rbb_threshold_hi %d " 3385 "rbb_threshold_lo %d", 3386 dma_channel, 3387 rbrp->rbb_max, 3388 rbrp->rbr_bufsize_type, 3389 rbrp->rbr_threshold_hi, 3390 rbrp->rbr_threshold_lo)); 3391 3392 rbrp->page_valid.value = 0; 3393 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3394 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3395 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3396 rbrp->page_hdl.value = 0; 3397 3398 rbrp->page_valid.bits.ldw.page0 = 1; 3399 rbrp->page_valid.bits.ldw.page1 = 1; 3400 3401 /* Map in the receive completion ring */ 3402 rcrp = (p_rx_rcr_ring_t) 3403 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3404 rcrp->rdc = dma_channel; 3405 3406 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3407 rcrp->comp_size = nxge_port_rcr_size; 3408 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3409 3410 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3411 3412 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3413 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3414 sizeof (rcr_entry_t)); 3415 rcrp->comp_rd_index = 0; 3416 rcrp->comp_wt_index = 0; 3417 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3418 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3419 #if defined(__i386) 3420 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3421 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3422 #else 3423 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3424 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3425 #endif 3426 3427 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3428 (nxge_port_rcr_size - 1); 3429 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3430 (nxge_port_rcr_size - 1); 3431 3432 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3433 "==> nxge_map_rxdma_channel_cfg_ring: " 3434 "channel %d " 3435 "rbr_vaddrp $%p " 3436 "rcr_desc_rd_head_p $%p " 3437 "rcr_desc_rd_head_pp $%p " 3438 "rcr_desc_rd_last_p $%p " 3439 "rcr_desc_rd_last_pp $%p ", 3440 dma_channel, 3441 rbr_vaddrp, 3442 rcrp->rcr_desc_rd_head_p, 3443 rcrp->rcr_desc_rd_head_pp, 3444 rcrp->rcr_desc_last_p, 3445 rcrp->rcr_desc_last_pp)); 3446 3447 /* 3448 * Zero out buffer block ring descriptors. 3449 */ 3450 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3451 rcrp->intr_timeout = nxgep->intr_timeout; 3452 rcrp->intr_threshold = nxgep->intr_threshold; 3453 rcrp->full_hdr_flag = B_FALSE; 3454 rcrp->sw_priv_hdr_len = 0; 3455 3456 cfga_p = &(rcrp->rcr_cfga); 3457 cfgb_p = &(rcrp->rcr_cfgb); 3458 cfga_p->value = 0; 3459 cfgb_p->value = 0; 3460 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3461 cfga_p->value = (rcrp->rcr_addr & 3462 (RCRCFIG_A_STADDR_MASK | 3463 RCRCFIG_A_STADDR_BASE_MASK)); 3464 3465 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3466 RCRCFIG_A_LEN_SHIF); 3467 3468 /* 3469 * Timeout should be set based on the system clock divider. 3470 * The following timeout value of 1 assumes that the 3471 * granularity (1000) is 3 microseconds running at 300MHz. 3472 */ 3473 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3474 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3475 cfgb_p->bits.ldw.entout = 1; 3476 3477 /* Map in the mailbox */ 3478 mboxp = (p_rx_mbox_t) 3479 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3480 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3481 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3482 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3483 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3484 cfig1_p->value = cfig2_p->value = 0; 3485 3486 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3487 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3488 "==> nxge_map_rxdma_channel_cfg_ring: " 3489 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3490 dma_channel, cfig1_p->value, cfig2_p->value, 3491 mboxp->mbox_addr)); 3492 3493 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3494 & 0xfff); 3495 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3496 3497 3498 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3499 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3500 RXDMA_CFIG2_MBADDR_L_MASK); 3501 3502 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3503 3504 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3505 "==> nxge_map_rxdma_channel_cfg_ring: " 3506 "channel %d damaddrp $%p " 3507 "cfg1 0x%016llx cfig2 0x%016llx", 3508 dma_channel, dmaaddrp, 3509 cfig1_p->value, cfig2_p->value)); 3510 3511 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3512 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3513 3514 rbrp->rx_rcr_p = rcrp; 3515 rcrp->rx_rbr_p = rbrp; 3516 *rcr_p = rcrp; 3517 *rx_mbox_p = mboxp; 3518 3519 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3520 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3521 3522 return (status); 3523 } 3524 3525 /*ARGSUSED*/ 3526 static void 3527 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3528 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3529 { 3530 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3531 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3532 rcr_p->rdc)); 3533 3534 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3535 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3536 3537 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3538 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3539 } 3540 3541 static nxge_status_t 3542 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3543 p_nxge_dma_common_t *dma_buf_p, 3544 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3545 { 3546 p_rx_rbr_ring_t rbrp; 3547 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3548 p_rx_msg_t *rx_msg_ring; 3549 p_rx_msg_t rx_msg_p; 3550 p_mblk_t mblk_p; 3551 3552 rxring_info_t *ring_info; 3553 nxge_status_t status = NXGE_OK; 3554 int i, j, index; 3555 uint32_t size, bsize, nblocks, nmsgs; 3556 3557 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3558 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3559 channel)); 3560 3561 dma_bufp = tmp_bufp = *dma_buf_p; 3562 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3563 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3564 "chunks bufp 0x%016llx", 3565 channel, num_chunks, dma_bufp)); 3566 3567 nmsgs = 0; 3568 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3569 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3570 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3571 "bufp 0x%016llx nblocks %d nmsgs %d", 3572 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3573 nmsgs += tmp_bufp->nblocks; 3574 } 3575 if (!nmsgs) { 3576 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3577 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3578 "no msg blocks", 3579 channel)); 3580 status = NXGE_ERROR; 3581 goto nxge_map_rxdma_channel_buf_ring_exit; 3582 } 3583 3584 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3585 3586 size = nmsgs * sizeof (p_rx_msg_t); 3587 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3588 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3589 KM_SLEEP); 3590 3591 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3592 (void *)nxgep->interrupt_cookie); 3593 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3594 (void *)nxgep->interrupt_cookie); 3595 rbrp->rdc = channel; 3596 rbrp->num_blocks = num_chunks; 3597 rbrp->tnblocks = nmsgs; 3598 rbrp->rbb_max = nmsgs; 3599 rbrp->rbr_max_size = nmsgs; 3600 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3601 3602 /* 3603 * Buffer sizes suggested by NIU architect. 3604 * 256, 512 and 2K. 3605 */ 3606 3607 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3608 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3609 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3610 3611 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3612 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3613 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3614 3615 rbrp->block_size = nxgep->rx_default_block_size; 3616 3617 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3618 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3619 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3620 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3621 } else { 3622 if (rbrp->block_size >= 0x2000) { 3623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3624 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3625 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3626 } else { 3627 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3628 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3629 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3630 } 3631 } 3632 3633 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3634 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3635 "actual rbr max %d rbb_max %d nmsgs %d " 3636 "rbrp->block_size %d default_block_size %d " 3637 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3638 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3639 rbrp->block_size, nxgep->rx_default_block_size, 3640 nxge_rbr_size, nxge_rbr_spare_size)); 3641 3642 /* Map in buffers from the buffer pool. */ 3643 index = 0; 3644 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3645 bsize = dma_bufp->block_size; 3646 nblocks = dma_bufp->nblocks; 3647 #if defined(__i386) 3648 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3649 #else 3650 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3651 #endif 3652 ring_info->buffer[i].buf_index = i; 3653 ring_info->buffer[i].buf_size = dma_bufp->alength; 3654 ring_info->buffer[i].start_index = index; 3655 #if defined(__i386) 3656 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3657 #else 3658 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3659 #endif 3660 3661 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3662 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3663 "chunk %d" 3664 " nblocks %d chunk_size %x block_size 0x%x " 3665 "dma_bufp $%p", channel, i, 3666 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3667 dma_bufp)); 3668 3669 for (j = 0; j < nblocks; j++) { 3670 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3671 dma_bufp)) == NULL) { 3672 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3673 "allocb failed (index %d i %d j %d)", 3674 index, i, j)); 3675 goto nxge_map_rxdma_channel_buf_ring_fail1; 3676 } 3677 rx_msg_ring[index] = rx_msg_p; 3678 rx_msg_p->block_index = index; 3679 rx_msg_p->shifted_addr = (uint32_t) 3680 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3681 RBR_BKADDR_SHIFT)); 3682 3683 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3684 "index %d j %d rx_msg_p $%p mblk %p", 3685 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3686 3687 mblk_p = rx_msg_p->rx_mblk_p; 3688 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3689 3690 rbrp->rbr_ref_cnt++; 3691 index++; 3692 rx_msg_p->buf_dma.dma_channel = channel; 3693 } 3694 3695 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3696 if (dma_bufp->contig_alloc_type) { 3697 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3698 } 3699 3700 if (dma_bufp->kmem_alloc_type) { 3701 rbrp->rbr_alloc_type = KMEM_ALLOC; 3702 } 3703 3704 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3705 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3706 "chunk %d" 3707 " nblocks %d chunk_size %x block_size 0x%x " 3708 "dma_bufp $%p", 3709 channel, i, 3710 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3711 dma_bufp)); 3712 } 3713 if (i < rbrp->num_blocks) { 3714 goto nxge_map_rxdma_channel_buf_ring_fail1; 3715 } 3716 3717 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3718 "nxge_map_rxdma_channel_buf_ring: done buf init " 3719 "channel %d msg block entries %d", 3720 channel, index)); 3721 ring_info->block_size_mask = bsize - 1; 3722 rbrp->rx_msg_ring = rx_msg_ring; 3723 rbrp->dma_bufp = dma_buf_p; 3724 rbrp->ring_info = ring_info; 3725 3726 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3727 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3728 " nxge_map_rxdma_channel_buf_ring: " 3729 "channel %d done buf info init", channel)); 3730 3731 /* 3732 * Finally, permit nxge_freeb() to call nxge_post_page(). 3733 */ 3734 rbrp->rbr_state = RBR_POSTING; 3735 3736 *rbr_p = rbrp; 3737 goto nxge_map_rxdma_channel_buf_ring_exit; 3738 3739 nxge_map_rxdma_channel_buf_ring_fail1: 3740 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3741 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3742 channel, status)); 3743 3744 index--; 3745 for (; index >= 0; index--) { 3746 rx_msg_p = rx_msg_ring[index]; 3747 if (rx_msg_p != NULL) { 3748 freeb(rx_msg_p->rx_mblk_p); 3749 rx_msg_ring[index] = NULL; 3750 } 3751 } 3752 nxge_map_rxdma_channel_buf_ring_fail: 3753 MUTEX_DESTROY(&rbrp->post_lock); 3754 MUTEX_DESTROY(&rbrp->lock); 3755 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3756 KMEM_FREE(rx_msg_ring, size); 3757 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3758 3759 status = NXGE_ERROR; 3760 3761 nxge_map_rxdma_channel_buf_ring_exit: 3762 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3763 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3764 3765 return (status); 3766 } 3767 3768 /*ARGSUSED*/ 3769 static void 3770 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3771 p_rx_rbr_ring_t rbr_p) 3772 { 3773 p_rx_msg_t *rx_msg_ring; 3774 p_rx_msg_t rx_msg_p; 3775 rxring_info_t *ring_info; 3776 int i; 3777 uint32_t size; 3778 #ifdef NXGE_DEBUG 3779 int num_chunks; 3780 #endif 3781 3782 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3783 "==> nxge_unmap_rxdma_channel_buf_ring")); 3784 if (rbr_p == NULL) { 3785 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3786 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3787 return; 3788 } 3789 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3790 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3791 rbr_p->rdc)); 3792 3793 rx_msg_ring = rbr_p->rx_msg_ring; 3794 ring_info = rbr_p->ring_info; 3795 3796 if (rx_msg_ring == NULL || ring_info == NULL) { 3797 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3798 "<== nxge_unmap_rxdma_channel_buf_ring: " 3799 "rx_msg_ring $%p ring_info $%p", 3800 rx_msg_p, ring_info)); 3801 return; 3802 } 3803 3804 #ifdef NXGE_DEBUG 3805 num_chunks = rbr_p->num_blocks; 3806 #endif 3807 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3808 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3809 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3810 "tnblocks %d (max %d) size ptrs %d ", 3811 rbr_p->rdc, num_chunks, 3812 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3813 3814 for (i = 0; i < rbr_p->tnblocks; i++) { 3815 rx_msg_p = rx_msg_ring[i]; 3816 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3817 " nxge_unmap_rxdma_channel_buf_ring: " 3818 "rx_msg_p $%p", 3819 rx_msg_p)); 3820 if (rx_msg_p != NULL) { 3821 freeb(rx_msg_p->rx_mblk_p); 3822 rx_msg_ring[i] = NULL; 3823 } 3824 } 3825 3826 /* 3827 * We no longer may use the mutex <post_lock>. By setting 3828 * <rbr_state> to anything but POSTING, we prevent 3829 * nxge_post_page() from accessing a dead mutex. 3830 */ 3831 rbr_p->rbr_state = RBR_UNMAPPING; 3832 MUTEX_DESTROY(&rbr_p->post_lock); 3833 3834 MUTEX_DESTROY(&rbr_p->lock); 3835 3836 if (rbr_p->rbr_ref_cnt == 0) { 3837 /* 3838 * This is the normal state of affairs. 3839 * Need to free the following buffers: 3840 * - data buffers 3841 * - rx_msg ring 3842 * - ring_info 3843 * - rbr ring 3844 */ 3845 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3846 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 3847 nxge_rxdma_databuf_free(rbr_p); 3848 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3849 KMEM_FREE(rx_msg_ring, size); 3850 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3851 } else { 3852 /* 3853 * Some of our buffers are still being used. 3854 * Therefore, tell nxge_freeb() this ring is 3855 * unmapped, so it may free <rbr_p> for us. 3856 */ 3857 rbr_p->rbr_state = RBR_UNMAPPED; 3858 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3859 "unmap_rxdma_buf_ring: %d %s outstanding.", 3860 rbr_p->rbr_ref_cnt, 3861 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3862 } 3863 3864 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3865 "<== nxge_unmap_rxdma_channel_buf_ring")); 3866 } 3867 3868 /* 3869 * nxge_rxdma_hw_start_common 3870 * 3871 * Arguments: 3872 * nxgep 3873 * 3874 * Notes: 3875 * 3876 * NPI/NXGE function calls: 3877 * nxge_init_fzc_rx_common(); 3878 * nxge_init_fzc_rxdma_port(); 3879 * 3880 * Registers accessed: 3881 * 3882 * Context: 3883 * Service domain 3884 */ 3885 static nxge_status_t 3886 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3887 { 3888 nxge_status_t status = NXGE_OK; 3889 3890 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3891 3892 /* 3893 * Load the sharable parameters by writing to the 3894 * function zero control registers. These FZC registers 3895 * should be initialized only once for the entire chip. 3896 */ 3897 (void) nxge_init_fzc_rx_common(nxgep); 3898 3899 /* 3900 * Initialize the RXDMA port specific FZC control configurations. 3901 * These FZC registers are pertaining to each port. 3902 */ 3903 (void) nxge_init_fzc_rxdma_port(nxgep); 3904 3905 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3906 3907 return (status); 3908 } 3909 3910 static nxge_status_t 3911 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 3912 { 3913 int i, ndmas; 3914 p_rx_rbr_rings_t rx_rbr_rings; 3915 p_rx_rbr_ring_t *rbr_rings; 3916 p_rx_rcr_rings_t rx_rcr_rings; 3917 p_rx_rcr_ring_t *rcr_rings; 3918 p_rx_mbox_areas_t rx_mbox_areas_p; 3919 p_rx_mbox_t *rx_mbox_p; 3920 nxge_status_t status = NXGE_OK; 3921 3922 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3923 3924 rx_rbr_rings = nxgep->rx_rbr_rings; 3925 rx_rcr_rings = nxgep->rx_rcr_rings; 3926 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3927 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3928 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3929 return (NXGE_ERROR); 3930 } 3931 ndmas = rx_rbr_rings->ndmas; 3932 if (ndmas == 0) { 3933 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3934 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3935 return (NXGE_ERROR); 3936 } 3937 3938 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3939 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3940 3941 rbr_rings = rx_rbr_rings->rbr_rings; 3942 rcr_rings = rx_rcr_rings->rcr_rings; 3943 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3944 if (rx_mbox_areas_p) { 3945 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3946 } 3947 3948 i = channel; 3949 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3950 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3951 ndmas, channel)); 3952 status = nxge_rxdma_start_channel(nxgep, channel, 3953 (p_rx_rbr_ring_t)rbr_rings[i], 3954 (p_rx_rcr_ring_t)rcr_rings[i], 3955 (p_rx_mbox_t)rx_mbox_p[i]); 3956 if (status != NXGE_OK) { 3957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3958 "==> nxge_rxdma_hw_start: disable " 3959 "(status 0x%x channel %d)", status, channel)); 3960 return (status); 3961 } 3962 3963 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3964 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3965 rx_rbr_rings, rx_rcr_rings)); 3966 3967 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3968 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3969 3970 return (status); 3971 } 3972 3973 static void 3974 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 3975 { 3976 p_rx_rbr_rings_t rx_rbr_rings; 3977 p_rx_rcr_rings_t rx_rcr_rings; 3978 3979 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3980 3981 rx_rbr_rings = nxgep->rx_rbr_rings; 3982 rx_rcr_rings = nxgep->rx_rcr_rings; 3983 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3984 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3985 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3986 return; 3987 } 3988 3989 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3990 "==> nxge_rxdma_hw_stop(channel %d)", 3991 channel)); 3992 (void) nxge_rxdma_stop_channel(nxgep, channel); 3993 3994 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3995 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3996 rx_rbr_rings, rx_rcr_rings)); 3997 3998 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3999 } 4000 4001 4002 static nxge_status_t 4003 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4004 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4005 4006 { 4007 npi_handle_t handle; 4008 npi_status_t rs = NPI_SUCCESS; 4009 rx_dma_ctl_stat_t cs; 4010 rx_dma_ent_msk_t ent_mask; 4011 nxge_status_t status = NXGE_OK; 4012 4013 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4014 4015 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4016 4017 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4018 "npi handle addr $%p acc $%p", 4019 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4020 4021 /* Reset RXDMA channel, but not if you're a guest. */ 4022 if (!isLDOMguest(nxgep)) { 4023 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4024 if (rs != NPI_SUCCESS) { 4025 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4026 "==> nxge_init_fzc_rdc: " 4027 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4028 channel, rs)); 4029 return (NXGE_ERROR | rs); 4030 } 4031 4032 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4033 "==> nxge_rxdma_start_channel: reset done: channel %d", 4034 channel)); 4035 } 4036 4037 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4038 if (isLDOMguest(nxgep)) 4039 (void) nxge_rdc_lp_conf(nxgep, channel); 4040 #endif 4041 4042 /* 4043 * Initialize the RXDMA channel specific FZC control 4044 * configurations. These FZC registers are pertaining 4045 * to each RX channel (logical pages). 4046 */ 4047 if (!isLDOMguest(nxgep)) { 4048 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4049 if (status != NXGE_OK) { 4050 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4051 "==> nxge_rxdma_start_channel: " 4052 "init fzc rxdma failed (0x%08x channel %d)", 4053 status, channel)); 4054 return (status); 4055 } 4056 4057 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4058 "==> nxge_rxdma_start_channel: fzc done")); 4059 } 4060 4061 /* Set up the interrupt event masks. */ 4062 ent_mask.value = 0; 4063 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4064 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4065 &ent_mask); 4066 if (rs != NPI_SUCCESS) { 4067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4068 "==> nxge_rxdma_start_channel: " 4069 "init rxdma event masks failed " 4070 "(0x%08x channel %d)", 4071 status, channel)); 4072 return (NXGE_ERROR | rs); 4073 } 4074 4075 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4076 "==> nxge_rxdma_start_channel: " 4077 "event done: channel %d (mask 0x%016llx)", 4078 channel, ent_mask.value)); 4079 4080 /* Initialize the receive DMA control and status register */ 4081 cs.value = 0; 4082 cs.bits.hdw.mex = 1; 4083 cs.bits.hdw.rcrthres = 1; 4084 cs.bits.hdw.rcrto = 1; 4085 cs.bits.hdw.rbr_empty = 1; 4086 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4087 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4088 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4089 if (status != NXGE_OK) { 4090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4091 "==> nxge_rxdma_start_channel: " 4092 "init rxdma control register failed (0x%08x channel %d", 4093 status, channel)); 4094 return (status); 4095 } 4096 4097 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4098 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4099 4100 /* 4101 * Load RXDMA descriptors, buffers, mailbox, 4102 * initialise the receive DMA channels and 4103 * enable each DMA channel. 4104 */ 4105 status = nxge_enable_rxdma_channel(nxgep, 4106 channel, rbr_p, rcr_p, mbox_p); 4107 4108 if (status != NXGE_OK) { 4109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4110 " nxge_rxdma_start_channel: " 4111 " enable rxdma failed (0x%08x channel %d)", 4112 status, channel)); 4113 return (status); 4114 } 4115 4116 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4117 "==> nxge_rxdma_start_channel: enabled channel %d")); 4118 4119 if (isLDOMguest(nxgep)) { 4120 /* Add interrupt handler for this channel. */ 4121 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4122 != NXGE_OK) { 4123 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4124 " nxge_rxdma_start_channel: " 4125 " nxge_hio_intr_add failed (0x%08x channel %d)", 4126 status, channel)); 4127 } 4128 } 4129 4130 ent_mask.value = 0; 4131 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4132 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4133 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4134 &ent_mask); 4135 if (rs != NPI_SUCCESS) { 4136 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4137 "==> nxge_rxdma_start_channel: " 4138 "init rxdma event masks failed (0x%08x channel %d)", 4139 status, channel)); 4140 return (NXGE_ERROR | rs); 4141 } 4142 4143 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4144 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4145 4146 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4147 4148 return (NXGE_OK); 4149 } 4150 4151 static nxge_status_t 4152 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4153 { 4154 npi_handle_t handle; 4155 npi_status_t rs = NPI_SUCCESS; 4156 rx_dma_ctl_stat_t cs; 4157 rx_dma_ent_msk_t ent_mask; 4158 nxge_status_t status = NXGE_OK; 4159 4160 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4161 4162 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4163 4164 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4165 "npi handle addr $%p acc $%p", 4166 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4167 4168 if (!isLDOMguest(nxgep)) { 4169 /* 4170 * Stop RxMAC = A.9.2.6 4171 */ 4172 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4173 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4174 "nxge_rxdma_stop_channel: " 4175 "Failed to disable RxMAC")); 4176 } 4177 4178 /* 4179 * Drain IPP Port = A.9.3.6 4180 */ 4181 (void) nxge_ipp_drain(nxgep); 4182 } 4183 4184 /* Reset RXDMA channel */ 4185 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4186 if (rs != NPI_SUCCESS) { 4187 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4188 " nxge_rxdma_stop_channel: " 4189 " reset rxdma failed (0x%08x channel %d)", 4190 rs, channel)); 4191 return (NXGE_ERROR | rs); 4192 } 4193 4194 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4195 "==> nxge_rxdma_stop_channel: reset done")); 4196 4197 /* Set up the interrupt event masks. */ 4198 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4199 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4200 &ent_mask); 4201 if (rs != NPI_SUCCESS) { 4202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4203 "==> nxge_rxdma_stop_channel: " 4204 "set rxdma event masks failed (0x%08x channel %d)", 4205 rs, channel)); 4206 return (NXGE_ERROR | rs); 4207 } 4208 4209 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4210 "==> nxge_rxdma_stop_channel: event done")); 4211 4212 /* 4213 * Initialize the receive DMA control and status register 4214 */ 4215 cs.value = 0; 4216 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4217 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4218 " to default (all 0s) 0x%08x", cs.value)); 4219 if (status != NXGE_OK) { 4220 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4221 " nxge_rxdma_stop_channel: init rxdma" 4222 " control register failed (0x%08x channel %d", 4223 status, channel)); 4224 return (status); 4225 } 4226 4227 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4228 "==> nxge_rxdma_stop_channel: control done")); 4229 4230 /* 4231 * Make sure channel is disabled. 4232 */ 4233 status = nxge_disable_rxdma_channel(nxgep, channel); 4234 if (status != NXGE_OK) { 4235 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4236 " nxge_rxdma_stop_channel: " 4237 " init enable rxdma failed (0x%08x channel %d)", 4238 status, channel)); 4239 return (status); 4240 } 4241 4242 if (!isLDOMguest(nxgep)) { 4243 /* 4244 * Enable RxMAC = A.9.2.10 4245 */ 4246 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4247 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4248 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4249 } 4250 } 4251 4252 NXGE_DEBUG_MSG((nxgep, 4253 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4254 4255 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4256 4257 return (NXGE_OK); 4258 } 4259 4260 nxge_status_t 4261 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4262 { 4263 npi_handle_t handle; 4264 p_nxge_rdc_sys_stats_t statsp; 4265 rx_ctl_dat_fifo_stat_t stat; 4266 uint32_t zcp_err_status; 4267 uint32_t ipp_err_status; 4268 nxge_status_t status = NXGE_OK; 4269 npi_status_t rs = NPI_SUCCESS; 4270 boolean_t my_err = B_FALSE; 4271 4272 handle = nxgep->npi_handle; 4273 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4274 4275 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4276 4277 if (rs != NPI_SUCCESS) 4278 return (NXGE_ERROR | rs); 4279 4280 if (stat.bits.ldw.id_mismatch) { 4281 statsp->id_mismatch++; 4282 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4283 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4284 /* Global fatal error encountered */ 4285 } 4286 4287 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4288 switch (nxgep->mac.portnum) { 4289 case 0: 4290 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4291 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4292 my_err = B_TRUE; 4293 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4294 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4295 } 4296 break; 4297 case 1: 4298 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4299 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4300 my_err = B_TRUE; 4301 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4302 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4303 } 4304 break; 4305 case 2: 4306 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4307 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4308 my_err = B_TRUE; 4309 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4310 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4311 } 4312 break; 4313 case 3: 4314 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4315 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4316 my_err = B_TRUE; 4317 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4318 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4319 } 4320 break; 4321 default: 4322 return (NXGE_ERROR); 4323 } 4324 } 4325 4326 if (my_err) { 4327 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4328 zcp_err_status); 4329 if (status != NXGE_OK) 4330 return (status); 4331 } 4332 4333 return (NXGE_OK); 4334 } 4335 4336 static nxge_status_t 4337 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4338 uint32_t zcp_status) 4339 { 4340 boolean_t rxport_fatal = B_FALSE; 4341 p_nxge_rdc_sys_stats_t statsp; 4342 nxge_status_t status = NXGE_OK; 4343 uint8_t portn; 4344 4345 portn = nxgep->mac.portnum; 4346 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4347 4348 if (ipp_status & (0x1 << portn)) { 4349 statsp->ipp_eop_err++; 4350 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4351 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4352 rxport_fatal = B_TRUE; 4353 } 4354 4355 if (zcp_status & (0x1 << portn)) { 4356 statsp->zcp_eop_err++; 4357 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4358 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4359 rxport_fatal = B_TRUE; 4360 } 4361 4362 if (rxport_fatal) { 4363 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4364 " nxge_rxdma_handle_port_error: " 4365 " fatal error on Port #%d\n", 4366 portn)); 4367 status = nxge_rx_port_fatal_err_recover(nxgep); 4368 if (status == NXGE_OK) { 4369 FM_SERVICE_RESTORED(nxgep); 4370 } 4371 } 4372 4373 return (status); 4374 } 4375 4376 static nxge_status_t 4377 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4378 { 4379 npi_handle_t handle; 4380 npi_status_t rs = NPI_SUCCESS; 4381 nxge_status_t status = NXGE_OK; 4382 p_rx_rbr_ring_t rbrp; 4383 p_rx_rcr_ring_t rcrp; 4384 p_rx_mbox_t mboxp; 4385 rx_dma_ent_msk_t ent_mask; 4386 p_nxge_dma_common_t dmap; 4387 int ring_idx; 4388 uint32_t ref_cnt; 4389 p_rx_msg_t rx_msg_p; 4390 int i; 4391 uint32_t nxge_port_rcr_size; 4392 4393 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4394 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4395 "Recovering from RxDMAChannel#%d error...", channel)); 4396 4397 /* 4398 * Stop the dma channel waits for the stop done. 4399 * If the stop done bit is not set, then create 4400 * an error. 4401 */ 4402 4403 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4404 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4405 4406 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4407 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4408 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4409 4410 MUTEX_ENTER(&rcrp->lock); 4411 MUTEX_ENTER(&rbrp->lock); 4412 MUTEX_ENTER(&rbrp->post_lock); 4413 4414 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4415 4416 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4417 if (rs != NPI_SUCCESS) { 4418 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4419 "nxge_disable_rxdma_channel:failed")); 4420 goto fail; 4421 } 4422 4423 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4424 4425 /* Disable interrupt */ 4426 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4427 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4428 if (rs != NPI_SUCCESS) { 4429 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4430 "nxge_rxdma_stop_channel: " 4431 "set rxdma event masks failed (channel %d)", 4432 channel)); 4433 } 4434 4435 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4436 4437 /* Reset RXDMA channel */ 4438 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4439 if (rs != NPI_SUCCESS) { 4440 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4441 "nxge_rxdma_fatal_err_recover: " 4442 " reset rxdma failed (channel %d)", channel)); 4443 goto fail; 4444 } 4445 4446 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4447 4448 mboxp = 4449 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4450 4451 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4452 rbrp->rbr_rd_index = 0; 4453 4454 rcrp->comp_rd_index = 0; 4455 rcrp->comp_wt_index = 0; 4456 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4457 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4458 #if defined(__i386) 4459 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4460 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4461 #else 4462 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4463 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4464 #endif 4465 4466 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4467 (nxge_port_rcr_size - 1); 4468 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4469 (nxge_port_rcr_size - 1); 4470 4471 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4472 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4473 4474 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4475 4476 for (i = 0; i < rbrp->rbr_max_size; i++) { 4477 rx_msg_p = rbrp->rx_msg_ring[i]; 4478 ref_cnt = rx_msg_p->ref_cnt; 4479 if (ref_cnt != 1) { 4480 if (rx_msg_p->cur_usage_cnt != 4481 rx_msg_p->max_usage_cnt) { 4482 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4483 "buf[%d]: cur_usage_cnt = %d " 4484 "max_usage_cnt = %d\n", i, 4485 rx_msg_p->cur_usage_cnt, 4486 rx_msg_p->max_usage_cnt)); 4487 } else { 4488 /* Buffer can be re-posted */ 4489 rx_msg_p->free = B_TRUE; 4490 rx_msg_p->cur_usage_cnt = 0; 4491 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4492 rx_msg_p->pkt_buf_size = 0; 4493 } 4494 } 4495 } 4496 4497 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4498 4499 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4500 if (status != NXGE_OK) { 4501 goto fail; 4502 } 4503 4504 MUTEX_EXIT(&rbrp->post_lock); 4505 MUTEX_EXIT(&rbrp->lock); 4506 MUTEX_EXIT(&rcrp->lock); 4507 4508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4509 "Recovery Successful, RxDMAChannel#%d Restored", 4510 channel)); 4511 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4512 4513 return (NXGE_OK); 4514 fail: 4515 MUTEX_EXIT(&rbrp->post_lock); 4516 MUTEX_EXIT(&rbrp->lock); 4517 MUTEX_EXIT(&rcrp->lock); 4518 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4519 4520 return (NXGE_ERROR | rs); 4521 } 4522 4523 nxge_status_t 4524 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4525 { 4526 nxge_grp_set_t *set = &nxgep->rx_set; 4527 nxge_status_t status = NXGE_OK; 4528 int rdc; 4529 4530 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4531 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4532 "Recovering from RxPort error...")); 4533 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4534 4535 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4536 goto fail; 4537 4538 NXGE_DELAY(1000); 4539 4540 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4541 4542 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4543 if ((1 << rdc) & set->owned.map) { 4544 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4545 != NXGE_OK) { 4546 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4547 "Could not recover channel %d", rdc)); 4548 } 4549 } 4550 } 4551 4552 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4553 4554 /* Reset IPP */ 4555 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4557 "nxge_rx_port_fatal_err_recover: " 4558 "Failed to reset IPP")); 4559 goto fail; 4560 } 4561 4562 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4563 4564 /* Reset RxMAC */ 4565 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4566 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4567 "nxge_rx_port_fatal_err_recover: " 4568 "Failed to reset RxMAC")); 4569 goto fail; 4570 } 4571 4572 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4573 4574 /* Re-Initialize IPP */ 4575 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4576 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4577 "nxge_rx_port_fatal_err_recover: " 4578 "Failed to init IPP")); 4579 goto fail; 4580 } 4581 4582 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4583 4584 /* Re-Initialize RxMAC */ 4585 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4586 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4587 "nxge_rx_port_fatal_err_recover: " 4588 "Failed to reset RxMAC")); 4589 goto fail; 4590 } 4591 4592 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4593 4594 /* Re-enable RxMAC */ 4595 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4596 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4597 "nxge_rx_port_fatal_err_recover: " 4598 "Failed to enable RxMAC")); 4599 goto fail; 4600 } 4601 4602 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4603 "Recovery Successful, RxPort Restored")); 4604 4605 return (NXGE_OK); 4606 fail: 4607 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4608 return (status); 4609 } 4610 4611 void 4612 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4613 { 4614 rx_dma_ctl_stat_t cs; 4615 rx_ctl_dat_fifo_stat_t cdfs; 4616 4617 switch (err_id) { 4618 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4619 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4620 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4621 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4622 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4624 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4625 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4626 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4627 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4628 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4629 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4630 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4631 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4632 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4633 chan, &cs.value); 4634 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4635 cs.bits.hdw.rcr_ack_err = 1; 4636 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4637 cs.bits.hdw.dc_fifo_err = 1; 4638 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4639 cs.bits.hdw.rcr_sha_par = 1; 4640 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4641 cs.bits.hdw.rbr_pre_par = 1; 4642 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4643 cs.bits.hdw.rbr_tmout = 1; 4644 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4645 cs.bits.hdw.rsp_cnt_err = 1; 4646 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4647 cs.bits.hdw.byte_en_bus = 1; 4648 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4649 cs.bits.hdw.rsp_dat_err = 1; 4650 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4651 cs.bits.hdw.config_err = 1; 4652 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4653 cs.bits.hdw.rcrincon = 1; 4654 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4655 cs.bits.hdw.rcrfull = 1; 4656 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4657 cs.bits.hdw.rbrfull = 1; 4658 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4659 cs.bits.hdw.rbrlogpage = 1; 4660 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4661 cs.bits.hdw.cfiglogpage = 1; 4662 #if defined(__i386) 4663 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4664 cs.value); 4665 #else 4666 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4667 cs.value); 4668 #endif 4669 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4670 chan, cs.value); 4671 break; 4672 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4673 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4674 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4675 cdfs.value = 0; 4676 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4677 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4678 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4679 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4680 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4681 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4682 #if defined(__i386) 4683 cmn_err(CE_NOTE, 4684 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4685 cdfs.value); 4686 #else 4687 cmn_err(CE_NOTE, 4688 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4689 cdfs.value); 4690 #endif 4691 NXGE_REG_WR64(nxgep->npi_handle, 4692 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4693 break; 4694 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4695 break; 4696 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4697 break; 4698 } 4699 } 4700 4701 static void 4702 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4703 { 4704 rxring_info_t *ring_info; 4705 int index; 4706 uint32_t chunk_size; 4707 uint64_t kaddr; 4708 uint_t num_blocks; 4709 4710 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4711 4712 if (rbr_p == NULL) { 4713 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4714 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4715 return; 4716 } 4717 4718 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4719 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4720 "==> nxge_rxdma_databuf_free: DDI")); 4721 return; 4722 } 4723 4724 ring_info = rbr_p->ring_info; 4725 if (ring_info == NULL) { 4726 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4727 "==> nxge_rxdma_databuf_free: NULL ring info")); 4728 return; 4729 } 4730 num_blocks = rbr_p->num_blocks; 4731 for (index = 0; index < num_blocks; index++) { 4732 kaddr = ring_info->buffer[index].kaddr; 4733 chunk_size = ring_info->buffer[index].buf_size; 4734 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4735 "==> nxge_rxdma_databuf_free: free chunk %d " 4736 "kaddrp $%p chunk size %d", 4737 index, kaddr, chunk_size)); 4738 if (kaddr == NULL) continue; 4739 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4740 ring_info->buffer[index].kaddr = NULL; 4741 } 4742 4743 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4744 } 4745 4746 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4747 extern void contig_mem_free(void *, size_t); 4748 #endif 4749 4750 void 4751 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4752 { 4753 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4754 4755 if (kaddr == NULL || !buf_size) { 4756 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4757 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4758 kaddr, buf_size)); 4759 return; 4760 } 4761 4762 switch (alloc_type) { 4763 case KMEM_ALLOC: 4764 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4765 "==> nxge_free_buf: freeing kmem $%p size %d", 4766 kaddr, buf_size)); 4767 #if defined(__i386) 4768 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4769 #else 4770 KMEM_FREE((void *)kaddr, buf_size); 4771 #endif 4772 break; 4773 4774 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4775 case CONTIG_MEM_ALLOC: 4776 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4777 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4778 kaddr, buf_size)); 4779 contig_mem_free((void *)kaddr, buf_size); 4780 break; 4781 #endif 4782 4783 default: 4784 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4785 "<== nxge_free_buf: unsupported alloc type %d", 4786 alloc_type)); 4787 return; 4788 } 4789 4790 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 4791 } 4792