1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * Globals: tunable parameters (/etc/system or adb) 43 * 44 */ 45 extern uint32_t nxge_rbr_size; 46 extern uint32_t nxge_rcr_size; 47 extern uint32_t nxge_rbr_spare_size; 48 49 extern uint32_t nxge_mblks_pending; 50 51 /* 52 * Tunable to reduce the amount of time spent in the 53 * ISR doing Rx Processing. 54 */ 55 extern uint32_t nxge_max_rx_pkts; 56 boolean_t nxge_jumbo_enable; 57 58 /* 59 * Tunables to manage the receive buffer blocks. 60 * 61 * nxge_rx_threshold_hi: copy all buffers. 62 * nxge_rx_bcopy_size_type: receive buffer block size type. 63 * nxge_rx_threshold_lo: copy only up to tunable block size type. 64 */ 65 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 66 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 68 69 extern uint32_t nxge_cksum_offload; 70 71 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 72 static void nxge_unmap_rxdma(p_nxge_t, int); 73 74 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 75 76 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 77 static void nxge_rxdma_hw_stop(p_nxge_t, int); 78 79 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 80 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 81 uint32_t, 82 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 83 p_rx_mbox_t *); 84 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 85 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 86 87 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 88 uint16_t, 89 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 90 p_rx_rcr_ring_t *, p_rx_mbox_t *); 91 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 92 p_rx_rcr_ring_t, p_rx_mbox_t); 93 94 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 95 uint16_t, 96 p_nxge_dma_common_t *, 97 p_rx_rbr_ring_t *, uint32_t); 98 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 99 p_rx_rbr_ring_t); 100 101 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 102 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 103 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 104 105 static mblk_t * 106 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 107 108 static void nxge_receive_packet(p_nxge_t, 109 p_rx_rcr_ring_t, 110 p_rcr_entry_t, 111 boolean_t *, 112 mblk_t **, mblk_t **); 113 114 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 115 116 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 117 static void nxge_freeb(p_rx_msg_t); 118 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 120 121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 122 uint32_t, uint32_t); 123 124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 125 p_rx_rbr_ring_t); 126 127 128 static nxge_status_t 129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 130 131 nxge_status_t 132 nxge_rx_port_fatal_err_recover(p_nxge_t); 133 134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135 136 nxge_status_t 137 nxge_init_rxdma_channels(p_nxge_t nxgep) 138 { 139 nxge_grp_set_t *set = &nxgep->rx_set; 140 int i, count; 141 142 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 143 144 if (!isLDOMguest(nxgep)) { 145 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 146 cmn_err(CE_NOTE, "hw_start_common"); 147 return (NXGE_ERROR); 148 } 149 } 150 151 /* 152 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 153 * We only have 8 hardware RDC tables, but we may have 154 * up to 16 logical (software-defined) groups of RDCS, 155 * if we make use of layer 3 & 4 hardware classification. 156 */ 157 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 158 if ((1 << i) & set->lg.map) { 159 int channel; 160 nxge_grp_t *group = set->group[i]; 161 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 162 if ((1 << channel) & group->map) { 163 if ((nxge_grp_dc_add(nxgep, 164 group, VP_BOUND_RX, channel))) 165 return (NXGE_ERROR); 166 } 167 } 168 } 169 if (++count == set->lg.count) 170 break; 171 } 172 173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 174 175 return (NXGE_OK); 176 } 177 178 nxge_status_t 179 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 180 { 181 nxge_status_t status; 182 183 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 184 185 status = nxge_map_rxdma(nxge, channel); 186 if (status != NXGE_OK) { 187 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 188 "<== nxge_init_rxdma: status 0x%x", status)); 189 return (status); 190 } 191 192 status = nxge_rxdma_hw_start(nxge, channel); 193 if (status != NXGE_OK) { 194 nxge_unmap_rxdma(nxge, channel); 195 } 196 197 if (!nxge->statsp->rdc_ksp[channel]) 198 nxge_setup_rdc_kstats(nxge, channel); 199 200 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 201 "<== nxge_init_rxdma_channel: status 0x%x", status)); 202 203 return (status); 204 } 205 206 void 207 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 208 { 209 nxge_grp_set_t *set = &nxgep->rx_set; 210 int rdc; 211 212 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 213 214 if (set->owned.map == 0) { 215 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 216 "nxge_uninit_rxdma_channels: no channels")); 217 return; 218 } 219 220 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 221 if ((1 << rdc) & set->owned.map) { 222 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 223 } 224 } 225 226 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 227 } 228 229 void 230 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 231 { 232 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 233 234 if (nxgep->statsp->rdc_ksp[channel]) { 235 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 236 nxgep->statsp->rdc_ksp[channel] = 0; 237 } 238 239 nxge_rxdma_hw_stop(nxgep, channel); 240 nxge_unmap_rxdma(nxgep, channel); 241 242 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 243 } 244 245 nxge_status_t 246 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 247 { 248 npi_handle_t handle; 249 npi_status_t rs = NPI_SUCCESS; 250 nxge_status_t status = NXGE_OK; 251 252 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 253 254 handle = NXGE_DEV_NPI_HANDLE(nxgep); 255 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 256 257 if (rs != NPI_SUCCESS) { 258 status = NXGE_ERROR | rs; 259 } 260 261 return (status); 262 } 263 264 void 265 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 266 { 267 nxge_grp_set_t *set = &nxgep->rx_set; 268 int rdc; 269 270 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 271 272 if (!isLDOMguest(nxgep)) { 273 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 274 (void) npi_rxdma_dump_fzc_regs(handle); 275 } 276 277 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 278 NXGE_DEBUG_MSG((nxgep, TX_CTL, 279 "nxge_rxdma_regs_dump_channels: " 280 "NULL ring pointer(s)")); 281 return; 282 } 283 284 if (set->owned.map == 0) { 285 NXGE_DEBUG_MSG((nxgep, RX_CTL, 286 "nxge_rxdma_regs_dump_channels: no channels")); 287 return; 288 } 289 290 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 291 if ((1 << rdc) & set->owned.map) { 292 rx_rbr_ring_t *ring = 293 nxgep->rx_rbr_rings->rbr_rings[rdc]; 294 if (ring) { 295 (void) nxge_dump_rxdma_channel(nxgep, rdc); 296 } 297 } 298 } 299 300 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 301 } 302 303 nxge_status_t 304 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 305 { 306 npi_handle_t handle; 307 npi_status_t rs = NPI_SUCCESS; 308 nxge_status_t status = NXGE_OK; 309 310 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 311 312 handle = NXGE_DEV_NPI_HANDLE(nxgep); 313 rs = npi_rxdma_dump_rdc_regs(handle, channel); 314 315 if (rs != NPI_SUCCESS) { 316 status = NXGE_ERROR | rs; 317 } 318 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 319 return (status); 320 } 321 322 nxge_status_t 323 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 324 p_rx_dma_ent_msk_t mask_p) 325 { 326 npi_handle_t handle; 327 npi_status_t rs = NPI_SUCCESS; 328 nxge_status_t status = NXGE_OK; 329 330 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 331 "<== nxge_init_rxdma_channel_event_mask")); 332 333 handle = NXGE_DEV_NPI_HANDLE(nxgep); 334 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 335 if (rs != NPI_SUCCESS) { 336 status = NXGE_ERROR | rs; 337 } 338 339 return (status); 340 } 341 342 nxge_status_t 343 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 344 p_rx_dma_ctl_stat_t cs_p) 345 { 346 npi_handle_t handle; 347 npi_status_t rs = NPI_SUCCESS; 348 nxge_status_t status = NXGE_OK; 349 350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 351 "<== nxge_init_rxdma_channel_cntl_stat")); 352 353 handle = NXGE_DEV_NPI_HANDLE(nxgep); 354 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 355 356 if (rs != NPI_SUCCESS) { 357 status = NXGE_ERROR | rs; 358 } 359 360 return (status); 361 } 362 363 /* 364 * nxge_rxdma_cfg_rdcgrp_default_rdc 365 * 366 * Set the default RDC for an RDC Group (Table) 367 * 368 * Arguments: 369 * nxgep 370 * rdcgrp The group to modify 371 * rdc The new default RDC. 372 * 373 * Notes: 374 * 375 * NPI/NXGE function calls: 376 * npi_rxdma_cfg_rdc_table_default_rdc() 377 * 378 * Registers accessed: 379 * RDC_TBL_REG: FZC_ZCP + 0x10000 380 * 381 * Context: 382 * Service domain 383 */ 384 nxge_status_t 385 nxge_rxdma_cfg_rdcgrp_default_rdc( 386 p_nxge_t nxgep, 387 uint8_t rdcgrp, 388 uint8_t rdc) 389 { 390 npi_handle_t handle; 391 npi_status_t rs = NPI_SUCCESS; 392 p_nxge_dma_pt_cfg_t p_dma_cfgp; 393 p_nxge_rdc_grp_t rdc_grp_p; 394 uint8_t actual_rdcgrp, actual_rdc; 395 396 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 397 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 398 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 399 400 handle = NXGE_DEV_NPI_HANDLE(nxgep); 401 402 /* 403 * This has to be rewritten. Do we even allow this anymore? 404 */ 405 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 406 RDC_MAP_IN(rdc_grp_p->map, rdc); 407 rdc_grp_p->def_rdc = rdc; 408 409 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 410 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 411 412 rs = npi_rxdma_cfg_rdc_table_default_rdc( 413 handle, actual_rdcgrp, actual_rdc); 414 415 if (rs != NPI_SUCCESS) { 416 return (NXGE_ERROR | rs); 417 } 418 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 419 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 420 return (NXGE_OK); 421 } 422 423 nxge_status_t 424 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 425 { 426 npi_handle_t handle; 427 428 uint8_t actual_rdc; 429 npi_status_t rs = NPI_SUCCESS; 430 431 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 432 " ==> nxge_rxdma_cfg_port_default_rdc")); 433 434 handle = NXGE_DEV_NPI_HANDLE(nxgep); 435 actual_rdc = rdc; /* XXX Hack! */ 436 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 437 438 439 if (rs != NPI_SUCCESS) { 440 return (NXGE_ERROR | rs); 441 } 442 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 443 " <== nxge_rxdma_cfg_port_default_rdc")); 444 445 return (NXGE_OK); 446 } 447 448 nxge_status_t 449 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 450 uint16_t pkts) 451 { 452 npi_status_t rs = NPI_SUCCESS; 453 npi_handle_t handle; 454 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 455 " ==> nxge_rxdma_cfg_rcr_threshold")); 456 handle = NXGE_DEV_NPI_HANDLE(nxgep); 457 458 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 459 460 if (rs != NPI_SUCCESS) { 461 return (NXGE_ERROR | rs); 462 } 463 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 464 return (NXGE_OK); 465 } 466 467 nxge_status_t 468 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 469 uint16_t tout, uint8_t enable) 470 { 471 npi_status_t rs = NPI_SUCCESS; 472 npi_handle_t handle; 473 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 474 handle = NXGE_DEV_NPI_HANDLE(nxgep); 475 if (enable == 0) { 476 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 477 } else { 478 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 479 tout); 480 } 481 482 if (rs != NPI_SUCCESS) { 483 return (NXGE_ERROR | rs); 484 } 485 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 486 return (NXGE_OK); 487 } 488 489 nxge_status_t 490 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 491 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 492 { 493 npi_handle_t handle; 494 rdc_desc_cfg_t rdc_desc; 495 p_rcrcfig_b_t cfgb_p; 496 npi_status_t rs = NPI_SUCCESS; 497 498 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 499 handle = NXGE_DEV_NPI_HANDLE(nxgep); 500 /* 501 * Use configuration data composed at init time. 502 * Write to hardware the receive ring configurations. 503 */ 504 rdc_desc.mbox_enable = 1; 505 rdc_desc.mbox_addr = mbox_p->mbox_addr; 506 NXGE_DEBUG_MSG((nxgep, RX_CTL, 507 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 508 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 509 510 rdc_desc.rbr_len = rbr_p->rbb_max; 511 rdc_desc.rbr_addr = rbr_p->rbr_addr; 512 513 switch (nxgep->rx_bksize_code) { 514 case RBR_BKSIZE_4K: 515 rdc_desc.page_size = SIZE_4KB; 516 break; 517 case RBR_BKSIZE_8K: 518 rdc_desc.page_size = SIZE_8KB; 519 break; 520 case RBR_BKSIZE_16K: 521 rdc_desc.page_size = SIZE_16KB; 522 break; 523 case RBR_BKSIZE_32K: 524 rdc_desc.page_size = SIZE_32KB; 525 break; 526 } 527 528 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 529 rdc_desc.valid0 = 1; 530 531 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 532 rdc_desc.valid1 = 1; 533 534 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 535 rdc_desc.valid2 = 1; 536 537 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 538 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 539 540 rdc_desc.rcr_len = rcr_p->comp_size; 541 rdc_desc.rcr_addr = rcr_p->rcr_addr; 542 543 cfgb_p = &(rcr_p->rcr_cfgb); 544 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 545 /* For now, disable this timeout in a guest domain. */ 546 if (isLDOMguest(nxgep)) { 547 rdc_desc.rcr_timeout = 0; 548 rdc_desc.rcr_timeout_enable = 0; 549 } else { 550 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 551 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 552 } 553 554 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 555 "rbr_len qlen %d pagesize code %d rcr_len %d", 556 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 557 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 558 "size 0 %d size 1 %d size 2 %d", 559 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 560 rbr_p->npi_pkt_buf_size2)); 561 562 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 563 if (rs != NPI_SUCCESS) { 564 return (NXGE_ERROR | rs); 565 } 566 567 /* 568 * Enable the timeout and threshold. 569 */ 570 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 571 rdc_desc.rcr_threshold); 572 if (rs != NPI_SUCCESS) { 573 return (NXGE_ERROR | rs); 574 } 575 576 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 577 rdc_desc.rcr_timeout); 578 if (rs != NPI_SUCCESS) { 579 return (NXGE_ERROR | rs); 580 } 581 582 /* Enable the DMA */ 583 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 584 if (rs != NPI_SUCCESS) { 585 return (NXGE_ERROR | rs); 586 } 587 588 /* Kick the DMA engine. */ 589 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 590 /* Clear the rbr empty bit */ 591 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 592 593 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 594 595 return (NXGE_OK); 596 } 597 598 nxge_status_t 599 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 600 { 601 npi_handle_t handle; 602 npi_status_t rs = NPI_SUCCESS; 603 604 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 605 handle = NXGE_DEV_NPI_HANDLE(nxgep); 606 607 /* disable the DMA */ 608 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 609 if (rs != NPI_SUCCESS) { 610 NXGE_DEBUG_MSG((nxgep, RX_CTL, 611 "<== nxge_disable_rxdma_channel:failed (0x%x)", 612 rs)); 613 return (NXGE_ERROR | rs); 614 } 615 616 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 617 return (NXGE_OK); 618 } 619 620 nxge_status_t 621 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 622 { 623 npi_handle_t handle; 624 nxge_status_t status = NXGE_OK; 625 626 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 627 "<== nxge_init_rxdma_channel_rcrflush")); 628 629 handle = NXGE_DEV_NPI_HANDLE(nxgep); 630 npi_rxdma_rdc_rcr_flush(handle, channel); 631 632 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 633 "<== nxge_init_rxdma_channel_rcrflsh")); 634 return (status); 635 636 } 637 638 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 639 640 #define TO_LEFT -1 641 #define TO_RIGHT 1 642 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 643 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 644 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 645 #define NO_HINT 0xffffffff 646 647 /*ARGSUSED*/ 648 nxge_status_t 649 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 650 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 651 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 652 { 653 int bufsize; 654 uint64_t pktbuf_pp; 655 uint64_t dvma_addr; 656 rxring_info_t *ring_info; 657 int base_side, end_side; 658 int r_index, l_index, anchor_index; 659 int found, search_done; 660 uint32_t offset, chunk_size, block_size, page_size_mask; 661 uint32_t chunk_index, block_index, total_index; 662 int max_iterations, iteration; 663 rxbuf_index_info_t *bufinfo; 664 665 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 666 667 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 668 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 669 pkt_buf_addr_pp, 670 pktbufsz_type)); 671 #if defined(__i386) 672 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 673 #else 674 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 675 #endif 676 677 switch (pktbufsz_type) { 678 case 0: 679 bufsize = rbr_p->pkt_buf_size0; 680 break; 681 case 1: 682 bufsize = rbr_p->pkt_buf_size1; 683 break; 684 case 2: 685 bufsize = rbr_p->pkt_buf_size2; 686 break; 687 case RCR_SINGLE_BLOCK: 688 bufsize = 0; 689 anchor_index = 0; 690 break; 691 default: 692 return (NXGE_ERROR); 693 } 694 695 if (rbr_p->num_blocks == 1) { 696 anchor_index = 0; 697 ring_info = rbr_p->ring_info; 698 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 699 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 700 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 701 "buf_pp $%p btype %d anchor_index %d " 702 "bufinfo $%p", 703 pkt_buf_addr_pp, 704 pktbufsz_type, 705 anchor_index, 706 bufinfo)); 707 708 goto found_index; 709 } 710 711 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 712 "==> nxge_rxbuf_pp_to_vp: " 713 "buf_pp $%p btype %d anchor_index %d", 714 pkt_buf_addr_pp, 715 pktbufsz_type, 716 anchor_index)); 717 718 ring_info = rbr_p->ring_info; 719 found = B_FALSE; 720 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 721 iteration = 0; 722 max_iterations = ring_info->max_iterations; 723 /* 724 * First check if this block has been seen 725 * recently. This is indicated by a hint which 726 * is initialized when the first buffer of the block 727 * is seen. The hint is reset when the last buffer of 728 * the block has been processed. 729 * As three block sizes are supported, three hints 730 * are kept. The idea behind the hints is that once 731 * the hardware uses a block for a buffer of that 732 * size, it will use it exclusively for that size 733 * and will use it until it is exhausted. It is assumed 734 * that there would a single block being used for the same 735 * buffer sizes at any given time. 736 */ 737 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 738 anchor_index = ring_info->hint[pktbufsz_type]; 739 dvma_addr = bufinfo[anchor_index].dvma_addr; 740 chunk_size = bufinfo[anchor_index].buf_size; 741 if ((pktbuf_pp >= dvma_addr) && 742 (pktbuf_pp < (dvma_addr + chunk_size))) { 743 found = B_TRUE; 744 /* 745 * check if this is the last buffer in the block 746 * If so, then reset the hint for the size; 747 */ 748 749 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 750 ring_info->hint[pktbufsz_type] = NO_HINT; 751 } 752 } 753 754 if (found == B_FALSE) { 755 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 756 "==> nxge_rxbuf_pp_to_vp: (!found)" 757 "buf_pp $%p btype %d anchor_index %d", 758 pkt_buf_addr_pp, 759 pktbufsz_type, 760 anchor_index)); 761 762 /* 763 * This is the first buffer of the block of this 764 * size. Need to search the whole information 765 * array. 766 * the search algorithm uses a binary tree search 767 * algorithm. It assumes that the information is 768 * already sorted with increasing order 769 * info[0] < info[1] < info[2] .... < info[n-1] 770 * where n is the size of the information array 771 */ 772 r_index = rbr_p->num_blocks - 1; 773 l_index = 0; 774 search_done = B_FALSE; 775 anchor_index = MID_INDEX(r_index, l_index); 776 while (search_done == B_FALSE) { 777 if ((r_index == l_index) || 778 (iteration >= max_iterations)) 779 search_done = B_TRUE; 780 end_side = TO_RIGHT; /* to the right */ 781 base_side = TO_LEFT; /* to the left */ 782 /* read the DVMA address information and sort it */ 783 dvma_addr = bufinfo[anchor_index].dvma_addr; 784 chunk_size = bufinfo[anchor_index].buf_size; 785 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 786 "==> nxge_rxbuf_pp_to_vp: (searching)" 787 "buf_pp $%p btype %d " 788 "anchor_index %d chunk_size %d dvmaaddr $%p", 789 pkt_buf_addr_pp, 790 pktbufsz_type, 791 anchor_index, 792 chunk_size, 793 dvma_addr)); 794 795 if (pktbuf_pp >= dvma_addr) 796 base_side = TO_RIGHT; /* to the right */ 797 if (pktbuf_pp < (dvma_addr + chunk_size)) 798 end_side = TO_LEFT; /* to the left */ 799 800 switch (base_side + end_side) { 801 case IN_MIDDLE: 802 /* found */ 803 found = B_TRUE; 804 search_done = B_TRUE; 805 if ((pktbuf_pp + bufsize) < 806 (dvma_addr + chunk_size)) 807 ring_info->hint[pktbufsz_type] = 808 bufinfo[anchor_index].buf_index; 809 break; 810 case BOTH_RIGHT: 811 /* not found: go to the right */ 812 l_index = anchor_index + 1; 813 anchor_index = MID_INDEX(r_index, l_index); 814 break; 815 816 case BOTH_LEFT: 817 /* not found: go to the left */ 818 r_index = anchor_index - 1; 819 anchor_index = MID_INDEX(r_index, l_index); 820 break; 821 default: /* should not come here */ 822 return (NXGE_ERROR); 823 } 824 iteration++; 825 } 826 827 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 828 "==> nxge_rxbuf_pp_to_vp: (search done)" 829 "buf_pp $%p btype %d anchor_index %d", 830 pkt_buf_addr_pp, 831 pktbufsz_type, 832 anchor_index)); 833 } 834 835 if (found == B_FALSE) { 836 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 837 "==> nxge_rxbuf_pp_to_vp: (search failed)" 838 "buf_pp $%p btype %d anchor_index %d", 839 pkt_buf_addr_pp, 840 pktbufsz_type, 841 anchor_index)); 842 return (NXGE_ERROR); 843 } 844 845 found_index: 846 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 847 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 848 "buf_pp $%p btype %d bufsize %d anchor_index %d", 849 pkt_buf_addr_pp, 850 pktbufsz_type, 851 bufsize, 852 anchor_index)); 853 854 /* index of the first block in this chunk */ 855 chunk_index = bufinfo[anchor_index].start_index; 856 dvma_addr = bufinfo[anchor_index].dvma_addr; 857 page_size_mask = ring_info->block_size_mask; 858 859 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 860 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 861 "buf_pp $%p btype %d bufsize %d " 862 "anchor_index %d chunk_index %d dvma $%p", 863 pkt_buf_addr_pp, 864 pktbufsz_type, 865 bufsize, 866 anchor_index, 867 chunk_index, 868 dvma_addr)); 869 870 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 871 block_size = rbr_p->block_size; /* System block(page) size */ 872 873 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 874 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 875 "buf_pp $%p btype %d bufsize %d " 876 "anchor_index %d chunk_index %d dvma $%p " 877 "offset %d block_size %d", 878 pkt_buf_addr_pp, 879 pktbufsz_type, 880 bufsize, 881 anchor_index, 882 chunk_index, 883 dvma_addr, 884 offset, 885 block_size)); 886 887 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 888 889 block_index = (offset / block_size); /* index within chunk */ 890 total_index = chunk_index + block_index; 891 892 893 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 894 "==> nxge_rxbuf_pp_to_vp: " 895 "total_index %d dvma_addr $%p " 896 "offset %d block_size %d " 897 "block_index %d ", 898 total_index, dvma_addr, 899 offset, block_size, 900 block_index)); 901 #if defined(__i386) 902 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 903 (uint32_t)offset); 904 #else 905 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 906 (uint64_t)offset); 907 #endif 908 909 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 910 "==> nxge_rxbuf_pp_to_vp: " 911 "total_index %d dvma_addr $%p " 912 "offset %d block_size %d " 913 "block_index %d " 914 "*pkt_buf_addr_p $%p", 915 total_index, dvma_addr, 916 offset, block_size, 917 block_index, 918 *pkt_buf_addr_p)); 919 920 921 *msg_index = total_index; 922 *bufoffset = (offset & page_size_mask); 923 924 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 925 "==> nxge_rxbuf_pp_to_vp: get msg index: " 926 "msg_index %d bufoffset_index %d", 927 *msg_index, 928 *bufoffset)); 929 930 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 931 932 return (NXGE_OK); 933 } 934 935 /* 936 * used by quick sort (qsort) function 937 * to perform comparison 938 */ 939 static int 940 nxge_sort_compare(const void *p1, const void *p2) 941 { 942 943 rxbuf_index_info_t *a, *b; 944 945 a = (rxbuf_index_info_t *)p1; 946 b = (rxbuf_index_info_t *)p2; 947 948 if (a->dvma_addr > b->dvma_addr) 949 return (1); 950 if (a->dvma_addr < b->dvma_addr) 951 return (-1); 952 return (0); 953 } 954 955 956 957 /* 958 * grabbed this sort implementation from common/syscall/avl.c 959 * 960 */ 961 /* 962 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 963 * v = Ptr to array/vector of objs 964 * n = # objs in the array 965 * s = size of each obj (must be multiples of a word size) 966 * f = ptr to function to compare two objs 967 * returns (-1 = less than, 0 = equal, 1 = greater than 968 */ 969 void 970 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 971 { 972 int g, i, j, ii; 973 unsigned int *p1, *p2; 974 unsigned int tmp; 975 976 /* No work to do */ 977 if (v == NULL || n <= 1) 978 return; 979 /* Sanity check on arguments */ 980 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 981 ASSERT(s > 0); 982 983 for (g = n / 2; g > 0; g /= 2) { 984 for (i = g; i < n; i++) { 985 for (j = i - g; j >= 0 && 986 (*f)(v + j * s, v + (j + g) * s) == 1; 987 j -= g) { 988 p1 = (unsigned *)(v + j * s); 989 p2 = (unsigned *)(v + (j + g) * s); 990 for (ii = 0; ii < s / 4; ii++) { 991 tmp = *p1; 992 *p1++ = *p2; 993 *p2++ = tmp; 994 } 995 } 996 } 997 } 998 } 999 1000 /* 1001 * Initialize data structures required for rxdma 1002 * buffer dvma->vmem address lookup 1003 */ 1004 /*ARGSUSED*/ 1005 static nxge_status_t 1006 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1007 { 1008 1009 int index; 1010 rxring_info_t *ring_info; 1011 int max_iteration = 0, max_index = 0; 1012 1013 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1014 1015 ring_info = rbrp->ring_info; 1016 ring_info->hint[0] = NO_HINT; 1017 ring_info->hint[1] = NO_HINT; 1018 ring_info->hint[2] = NO_HINT; 1019 max_index = rbrp->num_blocks; 1020 1021 /* read the DVMA address information and sort it */ 1022 /* do init of the information array */ 1023 1024 1025 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1026 " nxge_rxbuf_index_info_init Sort ptrs")); 1027 1028 /* sort the array */ 1029 nxge_ksort((void *)ring_info->buffer, max_index, 1030 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1031 1032 1033 1034 for (index = 0; index < max_index; index++) { 1035 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1036 " nxge_rxbuf_index_info_init: sorted chunk %d " 1037 " ioaddr $%p kaddr $%p size %x", 1038 index, ring_info->buffer[index].dvma_addr, 1039 ring_info->buffer[index].kaddr, 1040 ring_info->buffer[index].buf_size)); 1041 } 1042 1043 max_iteration = 0; 1044 while (max_index >= (1ULL << max_iteration)) 1045 max_iteration++; 1046 ring_info->max_iterations = max_iteration + 1; 1047 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1048 " nxge_rxbuf_index_info_init Find max iter %d", 1049 ring_info->max_iterations)); 1050 1051 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1052 return (NXGE_OK); 1053 } 1054 1055 /* ARGSUSED */ 1056 void 1057 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1058 { 1059 #ifdef NXGE_DEBUG 1060 1061 uint32_t bptr; 1062 uint64_t pp; 1063 1064 bptr = entry_p->bits.hdw.pkt_buf_addr; 1065 1066 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1067 "\trcr entry $%p " 1068 "\trcr entry 0x%0llx " 1069 "\trcr entry 0x%08x " 1070 "\trcr entry 0x%08x " 1071 "\tvalue 0x%0llx\n" 1072 "\tmulti = %d\n" 1073 "\tpkt_type = 0x%x\n" 1074 "\tzero_copy = %d\n" 1075 "\tnoport = %d\n" 1076 "\tpromis = %d\n" 1077 "\terror = 0x%04x\n" 1078 "\tdcf_err = 0x%01x\n" 1079 "\tl2_len = %d\n" 1080 "\tpktbufsize = %d\n" 1081 "\tpkt_buf_addr = $%p\n" 1082 "\tpkt_buf_addr (<< 6) = $%p\n", 1083 entry_p, 1084 *(int64_t *)entry_p, 1085 *(int32_t *)entry_p, 1086 *(int32_t *)((char *)entry_p + 32), 1087 entry_p->value, 1088 entry_p->bits.hdw.multi, 1089 entry_p->bits.hdw.pkt_type, 1090 entry_p->bits.hdw.zero_copy, 1091 entry_p->bits.hdw.noport, 1092 entry_p->bits.hdw.promis, 1093 entry_p->bits.hdw.error, 1094 entry_p->bits.hdw.dcf_err, 1095 entry_p->bits.hdw.l2_len, 1096 entry_p->bits.hdw.pktbufsz, 1097 bptr, 1098 entry_p->bits.ldw.pkt_buf_addr)); 1099 1100 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1101 RCR_PKT_BUF_ADDR_SHIFT; 1102 1103 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1104 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1105 #endif 1106 } 1107 1108 void 1109 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1110 { 1111 npi_handle_t handle; 1112 rbr_stat_t rbr_stat; 1113 addr44_t hd_addr; 1114 addr44_t tail_addr; 1115 uint16_t qlen; 1116 1117 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1118 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1119 1120 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1121 1122 /* RBR head */ 1123 hd_addr.addr = 0; 1124 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1125 #if defined(__i386) 1126 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1127 (void *)(uint32_t)hd_addr.addr); 1128 #else 1129 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1130 (void *)hd_addr.addr); 1131 #endif 1132 1133 /* RBR stats */ 1134 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1135 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1136 1137 /* RCR tail */ 1138 tail_addr.addr = 0; 1139 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1140 #if defined(__i386) 1141 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1142 (void *)(uint32_t)tail_addr.addr); 1143 #else 1144 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1145 (void *)tail_addr.addr); 1146 #endif 1147 1148 /* RCR qlen */ 1149 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1150 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1151 1152 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1153 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1154 } 1155 1156 void 1157 nxge_rxdma_stop(p_nxge_t nxgep) 1158 { 1159 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1160 1161 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1162 (void) nxge_rx_mac_disable(nxgep); 1163 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1164 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1165 } 1166 1167 void 1168 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1169 { 1170 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1171 1172 (void) nxge_rxdma_stop(nxgep); 1173 (void) nxge_uninit_rxdma_channels(nxgep); 1174 (void) nxge_init_rxdma_channels(nxgep); 1175 1176 #ifndef AXIS_DEBUG_LB 1177 (void) nxge_xcvr_init(nxgep); 1178 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1179 #endif 1180 (void) nxge_rx_mac_enable(nxgep); 1181 1182 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1183 } 1184 1185 nxge_status_t 1186 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1187 { 1188 nxge_grp_set_t *set = &nxgep->rx_set; 1189 nxge_status_t status; 1190 npi_status_t rs; 1191 int rdc; 1192 1193 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1194 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1195 1196 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1197 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1198 "<== nxge_rxdma_mode: not initialized")); 1199 return (NXGE_ERROR); 1200 } 1201 1202 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1203 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1204 "<== nxge_tx_port_fatal_err_recover: " 1205 "NULL ring pointer(s)")); 1206 return (NXGE_ERROR); 1207 } 1208 1209 if (set->owned.map == 0) { 1210 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1211 "nxge_rxdma_regs_dump_channels: no channels")); 1212 return (NULL); 1213 } 1214 1215 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1216 if ((1 << rdc) & set->owned.map) { 1217 rx_rbr_ring_t *ring = 1218 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1219 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1220 if (ring) { 1221 if (enable) { 1222 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1223 "==> nxge_rxdma_hw_mode: " 1224 "channel %d (enable)", rdc)); 1225 rs = npi_rxdma_cfg_rdc_enable 1226 (handle, rdc); 1227 } else { 1228 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1229 "==> nxge_rxdma_hw_mode: " 1230 "channel %d disable)", rdc)); 1231 rs = npi_rxdma_cfg_rdc_disable 1232 (handle, rdc); 1233 } 1234 } 1235 } 1236 } 1237 1238 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1239 1240 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1241 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1242 1243 return (status); 1244 } 1245 1246 void 1247 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1248 { 1249 npi_handle_t handle; 1250 1251 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1252 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1253 1254 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1255 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1256 1257 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1258 } 1259 1260 void 1261 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1262 { 1263 npi_handle_t handle; 1264 1265 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1266 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1267 1268 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1269 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1270 1271 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1272 } 1273 1274 void 1275 nxge_hw_start_rx(p_nxge_t nxgep) 1276 { 1277 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1278 1279 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1280 (void) nxge_rx_mac_enable(nxgep); 1281 1282 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1283 } 1284 1285 /*ARGSUSED*/ 1286 void 1287 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1288 { 1289 nxge_grp_set_t *set = &nxgep->rx_set; 1290 int rdc; 1291 1292 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1293 1294 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1295 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1296 "<== nxge_tx_port_fatal_err_recover: " 1297 "NULL ring pointer(s)")); 1298 return; 1299 } 1300 1301 if (set->owned.map == 0) { 1302 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1303 "nxge_rxdma_regs_dump_channels: no channels")); 1304 return; 1305 } 1306 1307 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1308 if ((1 << rdc) & set->owned.map) { 1309 rx_rbr_ring_t *ring = 1310 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1311 if (ring) { 1312 nxge_rxdma_hw_stop(nxgep, rdc); 1313 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1314 "==> nxge_fixup_rxdma_rings: " 1315 "channel %d ring $%px", 1316 rdc, ring)); 1317 (void) nxge_rxdma_fixup_channel 1318 (nxgep, rdc, rdc); 1319 } 1320 } 1321 } 1322 1323 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1324 } 1325 1326 void 1327 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1328 { 1329 int i; 1330 1331 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1332 i = nxge_rxdma_get_ring_index(nxgep, channel); 1333 if (i < 0) { 1334 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1335 "<== nxge_rxdma_fix_channel: no entry found")); 1336 return; 1337 } 1338 1339 nxge_rxdma_fixup_channel(nxgep, channel, i); 1340 1341 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1342 } 1343 1344 void 1345 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1346 { 1347 int ndmas; 1348 p_rx_rbr_rings_t rx_rbr_rings; 1349 p_rx_rbr_ring_t *rbr_rings; 1350 p_rx_rcr_rings_t rx_rcr_rings; 1351 p_rx_rcr_ring_t *rcr_rings; 1352 p_rx_mbox_areas_t rx_mbox_areas_p; 1353 p_rx_mbox_t *rx_mbox_p; 1354 p_nxge_dma_pool_t dma_buf_poolp; 1355 p_nxge_dma_pool_t dma_cntl_poolp; 1356 p_rx_rbr_ring_t rbrp; 1357 p_rx_rcr_ring_t rcrp; 1358 p_rx_mbox_t mboxp; 1359 p_nxge_dma_common_t dmap; 1360 nxge_status_t status = NXGE_OK; 1361 1362 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1363 1364 (void) nxge_rxdma_stop_channel(nxgep, channel); 1365 1366 dma_buf_poolp = nxgep->rx_buf_pool_p; 1367 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1368 1369 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1370 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1371 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1372 return; 1373 } 1374 1375 ndmas = dma_buf_poolp->ndmas; 1376 if (!ndmas) { 1377 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1378 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1379 return; 1380 } 1381 1382 rx_rbr_rings = nxgep->rx_rbr_rings; 1383 rx_rcr_rings = nxgep->rx_rcr_rings; 1384 rbr_rings = rx_rbr_rings->rbr_rings; 1385 rcr_rings = rx_rcr_rings->rcr_rings; 1386 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1387 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1388 1389 /* Reinitialize the receive block and completion rings */ 1390 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1391 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1392 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1393 1394 1395 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1396 rbrp->rbr_rd_index = 0; 1397 rcrp->comp_rd_index = 0; 1398 rcrp->comp_wt_index = 0; 1399 1400 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1401 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1402 1403 status = nxge_rxdma_start_channel(nxgep, channel, 1404 rbrp, rcrp, mboxp); 1405 if (status != NXGE_OK) { 1406 goto nxge_rxdma_fixup_channel_fail; 1407 } 1408 if (status != NXGE_OK) { 1409 goto nxge_rxdma_fixup_channel_fail; 1410 } 1411 1412 nxge_rxdma_fixup_channel_fail: 1413 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1414 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1415 1416 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1417 } 1418 1419 /* ARGSUSED */ 1420 int 1421 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1422 { 1423 return (channel); 1424 } 1425 1426 p_rx_rbr_ring_t 1427 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1428 { 1429 nxge_grp_set_t *set = &nxgep->rx_set; 1430 nxge_channel_t rdc; 1431 1432 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1433 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1434 1435 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1436 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1437 "<== nxge_rxdma_get_rbr_ring: " 1438 "NULL ring pointer(s)")); 1439 return (NULL); 1440 } 1441 1442 if (set->owned.map == 0) { 1443 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1444 "<== nxge_rxdma_get_rbr_ring: no channels")); 1445 return (NULL); 1446 } 1447 1448 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1449 if ((1 << rdc) & set->owned.map) { 1450 rx_rbr_ring_t *ring = 1451 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1452 if (ring) { 1453 if (channel == ring->rdc) { 1454 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1455 "==> nxge_rxdma_get_rbr_ring: " 1456 "channel %d ring $%p", rdc, ring)); 1457 return (ring); 1458 } 1459 } 1460 } 1461 } 1462 1463 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1464 "<== nxge_rxdma_get_rbr_ring: not found")); 1465 1466 return (NULL); 1467 } 1468 1469 p_rx_rcr_ring_t 1470 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1471 { 1472 nxge_grp_set_t *set = &nxgep->rx_set; 1473 nxge_channel_t rdc; 1474 1475 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1476 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1477 1478 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1479 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1480 "<== nxge_rxdma_get_rcr_ring: " 1481 "NULL ring pointer(s)")); 1482 return (NULL); 1483 } 1484 1485 if (set->owned.map == 0) { 1486 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1487 "<== nxge_rxdma_get_rbr_ring: no channels")); 1488 return (NULL); 1489 } 1490 1491 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1492 if ((1 << rdc) & set->owned.map) { 1493 rx_rcr_ring_t *ring = 1494 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1495 if (ring) { 1496 if (channel == ring->rdc) { 1497 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1498 "==> nxge_rxdma_get_rcr_ring: " 1499 "channel %d ring $%p", rdc, ring)); 1500 return (ring); 1501 } 1502 } 1503 } 1504 } 1505 1506 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1507 "<== nxge_rxdma_get_rcr_ring: not found")); 1508 1509 return (NULL); 1510 } 1511 1512 /* 1513 * Static functions start here. 1514 */ 1515 static p_rx_msg_t 1516 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1517 { 1518 p_rx_msg_t nxge_mp = NULL; 1519 p_nxge_dma_common_t dmamsg_p; 1520 uchar_t *buffer; 1521 1522 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1523 if (nxge_mp == NULL) { 1524 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1525 "Allocation of a rx msg failed.")); 1526 goto nxge_allocb_exit; 1527 } 1528 1529 nxge_mp->use_buf_pool = B_FALSE; 1530 if (dmabuf_p) { 1531 nxge_mp->use_buf_pool = B_TRUE; 1532 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1533 *dmamsg_p = *dmabuf_p; 1534 dmamsg_p->nblocks = 1; 1535 dmamsg_p->block_size = size; 1536 dmamsg_p->alength = size; 1537 buffer = (uchar_t *)dmabuf_p->kaddrp; 1538 1539 dmabuf_p->kaddrp = (void *) 1540 ((char *)dmabuf_p->kaddrp + size); 1541 dmabuf_p->ioaddr_pp = (void *) 1542 ((char *)dmabuf_p->ioaddr_pp + size); 1543 dmabuf_p->alength -= size; 1544 dmabuf_p->offset += size; 1545 dmabuf_p->dma_cookie.dmac_laddress += size; 1546 dmabuf_p->dma_cookie.dmac_size -= size; 1547 1548 } else { 1549 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1550 if (buffer == NULL) { 1551 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1552 "Allocation of a receive page failed.")); 1553 goto nxge_allocb_fail1; 1554 } 1555 } 1556 1557 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1558 if (nxge_mp->rx_mblk_p == NULL) { 1559 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1560 goto nxge_allocb_fail2; 1561 } 1562 1563 nxge_mp->buffer = buffer; 1564 nxge_mp->block_size = size; 1565 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1566 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1567 nxge_mp->ref_cnt = 1; 1568 nxge_mp->free = B_TRUE; 1569 nxge_mp->rx_use_bcopy = B_FALSE; 1570 1571 atomic_inc_32(&nxge_mblks_pending); 1572 1573 goto nxge_allocb_exit; 1574 1575 nxge_allocb_fail2: 1576 if (!nxge_mp->use_buf_pool) { 1577 KMEM_FREE(buffer, size); 1578 } 1579 1580 nxge_allocb_fail1: 1581 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1582 nxge_mp = NULL; 1583 1584 nxge_allocb_exit: 1585 return (nxge_mp); 1586 } 1587 1588 p_mblk_t 1589 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1590 { 1591 p_mblk_t mp; 1592 1593 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1594 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1595 "offset = 0x%08X " 1596 "size = 0x%08X", 1597 nxge_mp, offset, size)); 1598 1599 mp = desballoc(&nxge_mp->buffer[offset], size, 1600 0, &nxge_mp->freeb); 1601 if (mp == NULL) { 1602 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1603 goto nxge_dupb_exit; 1604 } 1605 atomic_inc_32(&nxge_mp->ref_cnt); 1606 1607 1608 nxge_dupb_exit: 1609 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1610 nxge_mp)); 1611 return (mp); 1612 } 1613 1614 p_mblk_t 1615 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1616 { 1617 p_mblk_t mp; 1618 uchar_t *dp; 1619 1620 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1621 if (mp == NULL) { 1622 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1623 goto nxge_dupb_bcopy_exit; 1624 } 1625 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1626 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1627 mp->b_wptr = dp + size; 1628 1629 nxge_dupb_bcopy_exit: 1630 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1631 nxge_mp)); 1632 return (mp); 1633 } 1634 1635 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1636 p_rx_msg_t rx_msg_p); 1637 1638 void 1639 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1640 { 1641 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1642 1643 /* Reuse this buffer */ 1644 rx_msg_p->free = B_FALSE; 1645 rx_msg_p->cur_usage_cnt = 0; 1646 rx_msg_p->max_usage_cnt = 0; 1647 rx_msg_p->pkt_buf_size = 0; 1648 1649 if (rx_rbr_p->rbr_use_bcopy) { 1650 rx_msg_p->rx_use_bcopy = B_FALSE; 1651 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1652 } 1653 1654 /* 1655 * Get the rbr header pointer and its offset index. 1656 */ 1657 MUTEX_ENTER(&rx_rbr_p->post_lock); 1658 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1659 rx_rbr_p->rbr_wrap_mask); 1660 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1661 MUTEX_EXIT(&rx_rbr_p->post_lock); 1662 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1663 rx_rbr_p->rdc, 1); 1664 1665 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1666 "<== nxge_post_page (channel %d post_next_index %d)", 1667 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1668 1669 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1670 } 1671 1672 void 1673 nxge_freeb(p_rx_msg_t rx_msg_p) 1674 { 1675 size_t size; 1676 uchar_t *buffer = NULL; 1677 int ref_cnt; 1678 boolean_t free_state = B_FALSE; 1679 1680 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1681 1682 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1683 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1684 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1685 rx_msg_p, nxge_mblks_pending)); 1686 1687 /* 1688 * First we need to get the free state, then 1689 * atomic decrement the reference count to prevent 1690 * the race condition with the interrupt thread that 1691 * is processing a loaned up buffer block. 1692 */ 1693 free_state = rx_msg_p->free; 1694 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1695 if (!ref_cnt) { 1696 atomic_dec_32(&nxge_mblks_pending); 1697 buffer = rx_msg_p->buffer; 1698 size = rx_msg_p->block_size; 1699 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1700 "will free: rx_msg_p = $%p (block pending %d)", 1701 rx_msg_p, nxge_mblks_pending)); 1702 1703 if (!rx_msg_p->use_buf_pool) { 1704 KMEM_FREE(buffer, size); 1705 } 1706 1707 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1708 1709 if (ring) { 1710 /* 1711 * Decrement the receive buffer ring's reference 1712 * count, too. 1713 */ 1714 atomic_dec_32(&ring->rbr_ref_cnt); 1715 1716 /* 1717 * Free the receive buffer ring, if 1718 * 1. all the receive buffers have been freed 1719 * 2. and we are in the proper state (that is, 1720 * we are not UNMAPPING). 1721 */ 1722 if (ring->rbr_ref_cnt == 0 && 1723 ring->rbr_state == RBR_UNMAPPED) { 1724 /* 1725 * Free receive data buffers, 1726 * buffer index information 1727 * (rxring_info) and 1728 * the message block ring. 1729 */ 1730 NXGE_DEBUG_MSG((NULL, RX_CTL, 1731 "nxge_freeb:rx_msg_p = $%p " 1732 "(block pending %d) free buffers", 1733 rx_msg_p, nxge_mblks_pending)); 1734 nxge_rxdma_databuf_free(ring); 1735 if (ring->ring_info) { 1736 KMEM_FREE(ring->ring_info, 1737 sizeof (rxring_info_t)); 1738 } 1739 1740 if (ring->rx_msg_ring) { 1741 KMEM_FREE(ring->rx_msg_ring, 1742 ring->tnblocks * 1743 sizeof (p_rx_msg_t)); 1744 } 1745 KMEM_FREE(ring, sizeof (*ring)); 1746 } 1747 } 1748 return; 1749 } 1750 1751 /* 1752 * Repost buffer. 1753 */ 1754 if (free_state && (ref_cnt == 1) && ring) { 1755 NXGE_DEBUG_MSG((NULL, RX_CTL, 1756 "nxge_freeb: post page $%p:", rx_msg_p)); 1757 if (ring->rbr_state == RBR_POSTING) 1758 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1759 } 1760 1761 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1762 } 1763 1764 uint_t 1765 nxge_rx_intr(void *arg1, void *arg2) 1766 { 1767 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1768 p_nxge_t nxgep = (p_nxge_t)arg2; 1769 p_nxge_ldg_t ldgp; 1770 uint8_t channel; 1771 npi_handle_t handle; 1772 rx_dma_ctl_stat_t cs; 1773 1774 #ifdef NXGE_DEBUG 1775 rxdma_cfig1_t cfg; 1776 #endif 1777 uint_t serviced = DDI_INTR_UNCLAIMED; 1778 1779 if (ldvp == NULL) { 1780 NXGE_DEBUG_MSG((NULL, INT_CTL, 1781 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1782 nxgep, ldvp)); 1783 1784 return (DDI_INTR_CLAIMED); 1785 } 1786 1787 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1788 nxgep = ldvp->nxgep; 1789 } 1790 1791 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1792 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1793 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1794 "<== nxge_rx_intr: interface not started or intialized")); 1795 return (DDI_INTR_CLAIMED); 1796 } 1797 1798 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1799 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1800 nxgep, ldvp)); 1801 1802 /* 1803 * This interrupt handler is for a specific 1804 * receive dma channel. 1805 */ 1806 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1807 /* 1808 * Get the control and status for this channel. 1809 */ 1810 channel = ldvp->channel; 1811 ldgp = ldvp->ldgp; 1812 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1813 1814 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1815 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1816 channel, 1817 cs.value, 1818 cs.bits.hdw.rcrto, 1819 cs.bits.hdw.rcrthres)); 1820 1821 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1822 serviced = DDI_INTR_CLAIMED; 1823 1824 /* error events. */ 1825 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1826 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1827 } 1828 1829 nxge_intr_exit: 1830 /* 1831 * Enable the mailbox update interrupt if we want 1832 * to use mailbox. We probably don't need to use 1833 * mailbox as it only saves us one pio read. 1834 * Also write 1 to rcrthres and rcrto to clear 1835 * these two edge triggered bits. 1836 */ 1837 1838 cs.value &= RX_DMA_CTL_STAT_WR1C; 1839 cs.bits.hdw.mex = 1; 1840 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1841 cs.value); 1842 1843 /* 1844 * Rearm this logical group if this is a single device 1845 * group. 1846 */ 1847 if (ldgp->nldvs == 1) { 1848 ldgimgm_t mgm; 1849 mgm.value = 0; 1850 mgm.bits.ldw.arm = 1; 1851 mgm.bits.ldw.timer = ldgp->ldg_timer; 1852 if (isLDOMguest(nxgep)) { 1853 nxge_hio_ldgimgn(nxgep, ldgp); 1854 } else { 1855 NXGE_REG_WR64(handle, 1856 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1857 mgm.value); 1858 } 1859 } 1860 1861 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1862 serviced)); 1863 return (serviced); 1864 } 1865 1866 /* 1867 * Process the packets received in the specified logical device 1868 * and pass up a chain of message blocks to the upper layer. 1869 */ 1870 static void 1871 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 1872 { 1873 p_mblk_t mp; 1874 p_rx_rcr_ring_t rcrp; 1875 1876 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1877 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 1878 if (rcrp->poll_flag) { 1879 /* It is in the poll mode */ 1880 return; 1881 } 1882 1883 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 1884 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1885 "<== nxge_rx_pkts_vring: no mp")); 1886 return; 1887 } 1888 1889 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1890 mp)); 1891 1892 #ifdef NXGE_DEBUG 1893 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1894 "==> nxge_rx_pkts_vring:calling mac_rx " 1895 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1896 "mac_handle $%p", 1897 mp->b_wptr - mp->b_rptr, 1898 mp, mp->b_cont, mp->b_next, 1899 rcrp, rcrp->rcr_mac_handle)); 1900 1901 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1902 "==> nxge_rx_pkts_vring: dump packets " 1903 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1904 mp, 1905 mp->b_rptr, 1906 mp->b_wptr, 1907 nxge_dump_packet((char *)mp->b_rptr, 1908 mp->b_wptr - mp->b_rptr))); 1909 if (mp->b_cont) { 1910 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1911 "==> nxge_rx_pkts_vring: dump b_cont packets " 1912 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1913 mp->b_cont, 1914 mp->b_cont->b_rptr, 1915 mp->b_cont->b_wptr, 1916 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1917 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1918 } 1919 if (mp->b_next) { 1920 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1921 "==> nxge_rx_pkts_vring: dump next packets " 1922 "(b_rptr $%p): %s", 1923 mp->b_next->b_rptr, 1924 nxge_dump_packet((char *)mp->b_next->b_rptr, 1925 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1926 } 1927 #endif 1928 1929 if (!isLDOMguest(nxgep)) 1930 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1931 #if defined(sun4v) 1932 else { /* isLDOMguest(nxgep) */ 1933 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1934 nxgep->nxge_hw_p->hio; 1935 nx_vio_fp_t *vio = &nhd->hio.vio; 1936 1937 if (vio->cb.vio_net_rx_cb) { 1938 (*vio->cb.vio_net_rx_cb) 1939 (nxgep->hio_vr->vhp, mp); 1940 } 1941 } 1942 #endif 1943 } 1944 1945 1946 /* 1947 * This routine is the main packet receive processing function. 1948 * It gets the packet type, error code, and buffer related 1949 * information from the receive completion entry. 1950 * How many completion entries to process is based on the number of packets 1951 * queued by the hardware, a hardware maintained tail pointer 1952 * and a configurable receive packet count. 1953 * 1954 * A chain of message blocks will be created as result of processing 1955 * the completion entries. This chain of message blocks will be returned and 1956 * a hardware control status register will be updated with the number of 1957 * packets were removed from the hardware queue. 1958 * 1959 */ 1960 static mblk_t * 1961 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1962 int bytes_to_pickup) 1963 { 1964 npi_handle_t handle; 1965 uint8_t channel; 1966 uint32_t comp_rd_index; 1967 p_rcr_entry_t rcr_desc_rd_head_p; 1968 p_rcr_entry_t rcr_desc_rd_head_pp; 1969 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1970 uint16_t qlen, nrcr_read, npkt_read; 1971 uint32_t qlen_hw; 1972 boolean_t multi; 1973 rcrcfig_b_t rcr_cfg_b; 1974 int totallen = 0; 1975 #if defined(_BIG_ENDIAN) 1976 npi_status_t rs = NPI_SUCCESS; 1977 #endif 1978 1979 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 1980 "channel %d", rcr_p->rdc)); 1981 1982 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1983 return (NULL); 1984 } 1985 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1986 channel = rcr_p->rdc; 1987 1988 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1989 "==> nxge_rx_pkts: START: rcr channel %d " 1990 "head_p $%p head_pp $%p index %d ", 1991 channel, rcr_p->rcr_desc_rd_head_p, 1992 rcr_p->rcr_desc_rd_head_pp, 1993 rcr_p->comp_rd_index)); 1994 1995 1996 #if !defined(_BIG_ENDIAN) 1997 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 1998 #else 1999 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2000 if (rs != NPI_SUCCESS) { 2001 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2002 "channel %d, get qlen failed 0x%08x", 2003 channel, rs)); 2004 return (NULL); 2005 } 2006 #endif 2007 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2008 "qlen %d", channel, qlen)); 2009 2010 2011 2012 if (!qlen) { 2013 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2014 "==> nxge_rx_pkts:rcr channel %d " 2015 "qlen %d (no pkts)", channel, qlen)); 2016 2017 return (NULL); 2018 } 2019 2020 comp_rd_index = rcr_p->comp_rd_index; 2021 2022 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2023 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2024 nrcr_read = npkt_read = 0; 2025 2026 /* 2027 * Number of packets queued 2028 * (The jumbo or multi packet will be counted as only one 2029 * packets and it may take up more than one completion entry). 2030 */ 2031 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2032 qlen : nxge_max_rx_pkts; 2033 head_mp = NULL; 2034 tail_mp = &head_mp; 2035 nmp = mp_cont = NULL; 2036 multi = B_FALSE; 2037 2038 while (qlen_hw) { 2039 2040 #ifdef NXGE_DEBUG 2041 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2042 #endif 2043 /* 2044 * Process one completion ring entry. 2045 */ 2046 nxge_receive_packet(nxgep, 2047 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2048 2049 /* 2050 * message chaining modes 2051 */ 2052 if (nmp) { 2053 nmp->b_next = NULL; 2054 if (!multi && !mp_cont) { /* frame fits a partition */ 2055 *tail_mp = nmp; 2056 tail_mp = &nmp->b_next; 2057 totallen += MBLKL(nmp); 2058 nmp = NULL; 2059 } else if (multi && !mp_cont) { /* first segment */ 2060 *tail_mp = nmp; 2061 tail_mp = &nmp->b_cont; 2062 totallen += MBLKL(nmp); 2063 } else if (multi && mp_cont) { /* mid of multi segs */ 2064 *tail_mp = mp_cont; 2065 tail_mp = &mp_cont->b_cont; 2066 totallen += MBLKL(mp_cont); 2067 } else if (!multi && mp_cont) { /* last segment */ 2068 *tail_mp = mp_cont; 2069 tail_mp = &nmp->b_next; 2070 totallen += MBLKL(mp_cont); 2071 nmp = NULL; 2072 } 2073 } 2074 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2075 "==> nxge_rx_pkts: loop: rcr channel %d " 2076 "before updating: multi %d " 2077 "nrcr_read %d " 2078 "npk read %d " 2079 "head_pp $%p index %d ", 2080 channel, 2081 multi, 2082 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2083 comp_rd_index)); 2084 2085 if (!multi) { 2086 qlen_hw--; 2087 npkt_read++; 2088 } 2089 2090 /* 2091 * Update the next read entry. 2092 */ 2093 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2094 rcr_p->comp_wrap_mask); 2095 2096 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2097 rcr_p->rcr_desc_first_p, 2098 rcr_p->rcr_desc_last_p); 2099 2100 nrcr_read++; 2101 2102 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2103 "<== nxge_rx_pkts: (SAM, process one packet) " 2104 "nrcr_read %d", 2105 nrcr_read)); 2106 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2107 "==> nxge_rx_pkts: loop: rcr channel %d " 2108 "multi %d " 2109 "nrcr_read %d " 2110 "npk read %d " 2111 "head_pp $%p index %d ", 2112 channel, 2113 multi, 2114 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2115 comp_rd_index)); 2116 2117 if ((bytes_to_pickup != -1) && 2118 (totallen >= bytes_to_pickup)) { 2119 break; 2120 } 2121 } 2122 2123 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2124 rcr_p->comp_rd_index = comp_rd_index; 2125 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2126 2127 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2128 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2129 rcr_p->intr_timeout = nxgep->intr_timeout; 2130 rcr_p->intr_threshold = nxgep->intr_threshold; 2131 rcr_cfg_b.value = 0x0ULL; 2132 if (rcr_p->intr_timeout) 2133 rcr_cfg_b.bits.ldw.entout = 1; 2134 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2135 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2136 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2137 channel, rcr_cfg_b.value); 2138 } 2139 2140 cs.bits.ldw.pktread = npkt_read; 2141 cs.bits.ldw.ptrread = nrcr_read; 2142 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2143 channel, cs.value); 2144 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2145 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2146 "head_pp $%p index %016llx ", 2147 channel, 2148 rcr_p->rcr_desc_rd_head_pp, 2149 rcr_p->comp_rd_index)); 2150 /* 2151 * Update RCR buffer pointer read and number of packets 2152 * read. 2153 */ 2154 2155 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2156 return (head_mp); 2157 } 2158 2159 void 2160 nxge_receive_packet(p_nxge_t nxgep, 2161 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2162 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2163 { 2164 p_mblk_t nmp = NULL; 2165 uint64_t multi; 2166 uint64_t dcf_err; 2167 uint8_t channel; 2168 2169 boolean_t first_entry = B_TRUE; 2170 boolean_t is_tcp_udp = B_FALSE; 2171 boolean_t buffer_free = B_FALSE; 2172 boolean_t error_send_up = B_FALSE; 2173 uint8_t error_type; 2174 uint16_t l2_len; 2175 uint16_t skip_len; 2176 uint8_t pktbufsz_type; 2177 uint64_t rcr_entry; 2178 uint64_t *pkt_buf_addr_pp; 2179 uint64_t *pkt_buf_addr_p; 2180 uint32_t buf_offset; 2181 uint32_t bsize; 2182 uint32_t error_disp_cnt; 2183 uint32_t msg_index; 2184 p_rx_rbr_ring_t rx_rbr_p; 2185 p_rx_msg_t *rx_msg_ring_p; 2186 p_rx_msg_t rx_msg_p; 2187 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2188 nxge_status_t status = NXGE_OK; 2189 boolean_t is_valid = B_FALSE; 2190 p_nxge_rx_ring_stats_t rdc_stats; 2191 uint32_t bytes_read; 2192 uint64_t pkt_type; 2193 uint64_t frag; 2194 boolean_t pkt_too_long_err = B_FALSE; 2195 #ifdef NXGE_DEBUG 2196 int dump_len; 2197 #endif 2198 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2199 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2200 2201 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2202 2203 multi = (rcr_entry & RCR_MULTI_MASK); 2204 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2205 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2206 2207 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2208 frag = (rcr_entry & RCR_FRAG_MASK); 2209 2210 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2211 2212 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2213 RCR_PKTBUFSZ_SHIFT); 2214 #if defined(__i386) 2215 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2216 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2217 #else 2218 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2219 RCR_PKT_BUF_ADDR_SHIFT); 2220 #endif 2221 2222 channel = rcr_p->rdc; 2223 2224 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2225 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2226 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2227 "error_type 0x%x pkt_type 0x%x " 2228 "pktbufsz_type %d ", 2229 rcr_desc_rd_head_p, 2230 rcr_entry, pkt_buf_addr_pp, l2_len, 2231 multi, 2232 error_type, 2233 pkt_type, 2234 pktbufsz_type)); 2235 2236 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2237 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2238 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2239 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2240 rcr_entry, pkt_buf_addr_pp, l2_len, 2241 multi, 2242 error_type, 2243 pkt_type)); 2244 2245 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2246 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2247 "full pkt_buf_addr_pp $%p l2_len %d", 2248 rcr_entry, pkt_buf_addr_pp, l2_len)); 2249 2250 /* get the stats ptr */ 2251 rdc_stats = rcr_p->rdc_stats; 2252 2253 if (!l2_len) { 2254 2255 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2256 "<== nxge_receive_packet: failed: l2 length is 0.")); 2257 return; 2258 } 2259 2260 /* 2261 * Sofware workaround for BMAC hardware limitation that allows 2262 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2263 * instead of 0x2400 for jumbo. 2264 */ 2265 if (l2_len > nxgep->mac.maxframesize) { 2266 pkt_too_long_err = B_TRUE; 2267 } 2268 2269 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2270 l2_len -= ETHERFCSL; 2271 2272 /* shift 6 bits to get the full io address */ 2273 #if defined(__i386) 2274 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2275 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2276 #else 2277 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2278 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2279 #endif 2280 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2281 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2282 "full pkt_buf_addr_pp $%p l2_len %d", 2283 rcr_entry, pkt_buf_addr_pp, l2_len)); 2284 2285 rx_rbr_p = rcr_p->rx_rbr_p; 2286 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2287 2288 if (first_entry) { 2289 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2290 RXDMA_HDR_SIZE_DEFAULT); 2291 2292 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2293 "==> nxge_receive_packet: first entry 0x%016llx " 2294 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2295 rcr_entry, pkt_buf_addr_pp, l2_len, 2296 hdr_size)); 2297 } 2298 2299 MUTEX_ENTER(&rcr_p->lock); 2300 MUTEX_ENTER(&rx_rbr_p->lock); 2301 2302 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2303 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2304 "full pkt_buf_addr_pp $%p l2_len %d", 2305 rcr_entry, pkt_buf_addr_pp, l2_len)); 2306 2307 /* 2308 * Packet buffer address in the completion entry points 2309 * to the starting buffer address (offset 0). 2310 * Use the starting buffer address to locate the corresponding 2311 * kernel address. 2312 */ 2313 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2314 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2315 &buf_offset, 2316 &msg_index); 2317 2318 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2319 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2320 "full pkt_buf_addr_pp $%p l2_len %d", 2321 rcr_entry, pkt_buf_addr_pp, l2_len)); 2322 2323 if (status != NXGE_OK) { 2324 MUTEX_EXIT(&rx_rbr_p->lock); 2325 MUTEX_EXIT(&rcr_p->lock); 2326 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2327 "<== nxge_receive_packet: found vaddr failed %d", 2328 status)); 2329 return; 2330 } 2331 2332 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2333 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2334 "full pkt_buf_addr_pp $%p l2_len %d", 2335 rcr_entry, pkt_buf_addr_pp, l2_len)); 2336 2337 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2338 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2339 "full pkt_buf_addr_pp $%p l2_len %d", 2340 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2341 2342 rx_msg_p = rx_msg_ring_p[msg_index]; 2343 2344 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2345 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2346 "full pkt_buf_addr_pp $%p l2_len %d", 2347 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2348 2349 switch (pktbufsz_type) { 2350 case RCR_PKTBUFSZ_0: 2351 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2352 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2353 "==> nxge_receive_packet: 0 buf %d", bsize)); 2354 break; 2355 case RCR_PKTBUFSZ_1: 2356 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2357 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2358 "==> nxge_receive_packet: 1 buf %d", bsize)); 2359 break; 2360 case RCR_PKTBUFSZ_2: 2361 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2362 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2363 "==> nxge_receive_packet: 2 buf %d", bsize)); 2364 break; 2365 case RCR_SINGLE_BLOCK: 2366 bsize = rx_msg_p->block_size; 2367 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2368 "==> nxge_receive_packet: single %d", bsize)); 2369 2370 break; 2371 default: 2372 MUTEX_EXIT(&rx_rbr_p->lock); 2373 MUTEX_EXIT(&rcr_p->lock); 2374 return; 2375 } 2376 2377 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2378 (buf_offset + sw_offset_bytes), 2379 (hdr_size + l2_len), 2380 DDI_DMA_SYNC_FORCPU); 2381 2382 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2383 "==> nxge_receive_packet: after first dump:usage count")); 2384 2385 if (rx_msg_p->cur_usage_cnt == 0) { 2386 if (rx_rbr_p->rbr_use_bcopy) { 2387 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2388 if (rx_rbr_p->rbr_consumed < 2389 rx_rbr_p->rbr_threshold_hi) { 2390 if (rx_rbr_p->rbr_threshold_lo == 0 || 2391 ((rx_rbr_p->rbr_consumed >= 2392 rx_rbr_p->rbr_threshold_lo) && 2393 (rx_rbr_p->rbr_bufsize_type >= 2394 pktbufsz_type))) { 2395 rx_msg_p->rx_use_bcopy = B_TRUE; 2396 } 2397 } else { 2398 rx_msg_p->rx_use_bcopy = B_TRUE; 2399 } 2400 } 2401 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2402 "==> nxge_receive_packet: buf %d (new block) ", 2403 bsize)); 2404 2405 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2406 rx_msg_p->pkt_buf_size = bsize; 2407 rx_msg_p->cur_usage_cnt = 1; 2408 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2409 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2410 "==> nxge_receive_packet: buf %d " 2411 "(single block) ", 2412 bsize)); 2413 /* 2414 * Buffer can be reused once the free function 2415 * is called. 2416 */ 2417 rx_msg_p->max_usage_cnt = 1; 2418 buffer_free = B_TRUE; 2419 } else { 2420 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2421 if (rx_msg_p->max_usage_cnt == 1) { 2422 buffer_free = B_TRUE; 2423 } 2424 } 2425 } else { 2426 rx_msg_p->cur_usage_cnt++; 2427 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2428 buffer_free = B_TRUE; 2429 } 2430 } 2431 2432 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2433 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2434 msg_index, l2_len, 2435 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2436 2437 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2438 rdc_stats->ierrors++; 2439 if (dcf_err) { 2440 rdc_stats->dcf_err++; 2441 #ifdef NXGE_DEBUG 2442 if (!rdc_stats->dcf_err) { 2443 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2444 "nxge_receive_packet: channel %d dcf_err rcr" 2445 " 0x%llx", channel, rcr_entry)); 2446 } 2447 #endif 2448 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2449 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2450 } else if (pkt_too_long_err) { 2451 rdc_stats->pkt_too_long_err++; 2452 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2453 " channel %d packet length [%d] > " 2454 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2455 nxgep->mac.maxframesize)); 2456 } else { 2457 /* Update error stats */ 2458 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2459 rdc_stats->errlog.compl_err_type = error_type; 2460 2461 switch (error_type) { 2462 /* 2463 * Do not send FMA ereport for RCR_L2_ERROR and 2464 * RCR_L4_CSUM_ERROR because most likely they indicate 2465 * back pressure rather than HW failures. 2466 */ 2467 case RCR_L2_ERROR: 2468 rdc_stats->l2_err++; 2469 if (rdc_stats->l2_err < 2470 error_disp_cnt) { 2471 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2472 " nxge_receive_packet:" 2473 " channel %d RCR L2_ERROR", 2474 channel)); 2475 } 2476 break; 2477 case RCR_L4_CSUM_ERROR: 2478 error_send_up = B_TRUE; 2479 rdc_stats->l4_cksum_err++; 2480 if (rdc_stats->l4_cksum_err < 2481 error_disp_cnt) { 2482 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2483 " nxge_receive_packet:" 2484 " channel %d" 2485 " RCR L4_CSUM_ERROR", channel)); 2486 } 2487 break; 2488 /* 2489 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2490 * RCR_ZCP_SOFT_ERROR because they reflect the same 2491 * FFLP and ZCP errors that have been reported by 2492 * nxge_fflp.c and nxge_zcp.c. 2493 */ 2494 case RCR_FFLP_SOFT_ERROR: 2495 error_send_up = B_TRUE; 2496 rdc_stats->fflp_soft_err++; 2497 if (rdc_stats->fflp_soft_err < 2498 error_disp_cnt) { 2499 NXGE_ERROR_MSG((nxgep, 2500 NXGE_ERR_CTL, 2501 " nxge_receive_packet:" 2502 " channel %d" 2503 " RCR FFLP_SOFT_ERROR", channel)); 2504 } 2505 break; 2506 case RCR_ZCP_SOFT_ERROR: 2507 error_send_up = B_TRUE; 2508 rdc_stats->fflp_soft_err++; 2509 if (rdc_stats->zcp_soft_err < 2510 error_disp_cnt) 2511 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2512 " nxge_receive_packet: Channel %d" 2513 " RCR ZCP_SOFT_ERROR", channel)); 2514 break; 2515 default: 2516 rdc_stats->rcr_unknown_err++; 2517 if (rdc_stats->rcr_unknown_err 2518 < error_disp_cnt) { 2519 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2520 " nxge_receive_packet: Channel %d" 2521 " RCR entry 0x%llx error 0x%x", 2522 rcr_entry, channel, error_type)); 2523 } 2524 break; 2525 } 2526 } 2527 2528 /* 2529 * Update and repost buffer block if max usage 2530 * count is reached. 2531 */ 2532 if (error_send_up == B_FALSE) { 2533 atomic_inc_32(&rx_msg_p->ref_cnt); 2534 if (buffer_free == B_TRUE) { 2535 rx_msg_p->free = B_TRUE; 2536 } 2537 2538 MUTEX_EXIT(&rx_rbr_p->lock); 2539 MUTEX_EXIT(&rcr_p->lock); 2540 nxge_freeb(rx_msg_p); 2541 return; 2542 } 2543 } 2544 2545 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2546 "==> nxge_receive_packet: DMA sync second ")); 2547 2548 bytes_read = rcr_p->rcvd_pkt_bytes; 2549 skip_len = sw_offset_bytes + hdr_size; 2550 if (!rx_msg_p->rx_use_bcopy) { 2551 /* 2552 * For loaned up buffers, the driver reference count 2553 * will be incremented first and then the free state. 2554 */ 2555 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2556 if (first_entry) { 2557 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2558 if (l2_len < bsize - skip_len) { 2559 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2560 } else { 2561 nmp->b_wptr = &nmp->b_rptr[bsize 2562 - skip_len]; 2563 } 2564 } else { 2565 if (l2_len - bytes_read < bsize) { 2566 nmp->b_wptr = 2567 &nmp->b_rptr[l2_len - bytes_read]; 2568 } else { 2569 nmp->b_wptr = &nmp->b_rptr[bsize]; 2570 } 2571 } 2572 } 2573 } else { 2574 if (first_entry) { 2575 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2576 l2_len < bsize - skip_len ? 2577 l2_len : bsize - skip_len); 2578 } else { 2579 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2580 l2_len - bytes_read < bsize ? 2581 l2_len - bytes_read : bsize); 2582 } 2583 } 2584 if (nmp != NULL) { 2585 if (first_entry) { 2586 /* 2587 * Jumbo packets may be received with more than one 2588 * buffer, increment ipackets for the first entry only. 2589 */ 2590 rdc_stats->ipackets++; 2591 2592 /* Update ibytes for kstat. */ 2593 rdc_stats->ibytes += skip_len 2594 + l2_len < bsize ? l2_len : bsize; 2595 /* 2596 * Update the number of bytes read so far for the 2597 * current frame. 2598 */ 2599 bytes_read = nmp->b_wptr - nmp->b_rptr; 2600 } else { 2601 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2602 l2_len - bytes_read : bsize; 2603 bytes_read += nmp->b_wptr - nmp->b_rptr; 2604 } 2605 2606 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2607 "==> nxge_receive_packet after dupb: " 2608 "rbr consumed %d " 2609 "pktbufsz_type %d " 2610 "nmp $%p rptr $%p wptr $%p " 2611 "buf_offset %d bzise %d l2_len %d skip_len %d", 2612 rx_rbr_p->rbr_consumed, 2613 pktbufsz_type, 2614 nmp, nmp->b_rptr, nmp->b_wptr, 2615 buf_offset, bsize, l2_len, skip_len)); 2616 } else { 2617 cmn_err(CE_WARN, "!nxge_receive_packet: " 2618 "update stats (error)"); 2619 atomic_inc_32(&rx_msg_p->ref_cnt); 2620 if (buffer_free == B_TRUE) { 2621 rx_msg_p->free = B_TRUE; 2622 } 2623 MUTEX_EXIT(&rx_rbr_p->lock); 2624 MUTEX_EXIT(&rcr_p->lock); 2625 nxge_freeb(rx_msg_p); 2626 return; 2627 } 2628 2629 if (buffer_free == B_TRUE) { 2630 rx_msg_p->free = B_TRUE; 2631 } 2632 2633 is_valid = (nmp != NULL); 2634 2635 rcr_p->rcvd_pkt_bytes = bytes_read; 2636 2637 MUTEX_EXIT(&rx_rbr_p->lock); 2638 MUTEX_EXIT(&rcr_p->lock); 2639 2640 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2641 atomic_inc_32(&rx_msg_p->ref_cnt); 2642 nxge_freeb(rx_msg_p); 2643 } 2644 2645 if (is_valid) { 2646 nmp->b_cont = NULL; 2647 if (first_entry) { 2648 *mp = nmp; 2649 *mp_cont = NULL; 2650 } else { 2651 *mp_cont = nmp; 2652 } 2653 } 2654 2655 /* 2656 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2657 * If a packet is not fragmented and no error bit is set, then 2658 * L4 checksum is OK. 2659 */ 2660 2661 if (is_valid && !multi) { 2662 /* 2663 * Update hardware checksuming. 2664 * 2665 * If the checksum flag nxge_chksum_offload 2666 * is 1, TCP and UDP packets can be sent 2667 * up with good checksum. If the checksum flag 2668 * is set to 0, checksum reporting will apply to 2669 * TCP packets only (workaround for a hardware bug). 2670 * If the checksum flag nxge_cksum_offload is 2671 * greater than 1, both TCP and UDP packets 2672 * will not be reported its hardware checksum results. 2673 */ 2674 if (nxge_cksum_offload == 1) { 2675 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2676 pkt_type == RCR_PKT_IS_UDP) ? 2677 B_TRUE: B_FALSE); 2678 } else if (!nxge_cksum_offload) { 2679 /* TCP checksum only. */ 2680 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2681 B_TRUE: B_FALSE); 2682 } 2683 2684 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2685 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2686 is_valid, multi, is_tcp_udp, frag, error_type)); 2687 2688 if (is_tcp_udp && !frag && !error_type) { 2689 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2690 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2691 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2692 "==> nxge_receive_packet: Full tcp/udp cksum " 2693 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2694 "error %d", 2695 is_valid, multi, is_tcp_udp, frag, error_type)); 2696 } 2697 } 2698 2699 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2700 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2701 2702 *multi_p = (multi == RCR_MULTI_MASK); 2703 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2704 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2705 *multi_p, nmp, *mp, *mp_cont)); 2706 } 2707 2708 /*ARGSUSED*/ 2709 static nxge_status_t 2710 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2711 { 2712 p_nxge_rx_ring_stats_t rdc_stats; 2713 npi_handle_t handle; 2714 npi_status_t rs; 2715 boolean_t rxchan_fatal = B_FALSE; 2716 boolean_t rxport_fatal = B_FALSE; 2717 uint8_t portn; 2718 nxge_status_t status = NXGE_OK; 2719 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2720 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2721 2722 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2723 portn = nxgep->mac.portnum; 2724 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2725 2726 if (cs.bits.hdw.rbr_tmout) { 2727 rdc_stats->rx_rbr_tmout++; 2728 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2729 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2730 rxchan_fatal = B_TRUE; 2731 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2732 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2733 } 2734 if (cs.bits.hdw.rsp_cnt_err) { 2735 rdc_stats->rsp_cnt_err++; 2736 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2737 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2738 rxchan_fatal = B_TRUE; 2739 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2740 "==> nxge_rx_err_evnts(channel %d): " 2741 "rsp_cnt_err", channel)); 2742 } 2743 if (cs.bits.hdw.byte_en_bus) { 2744 rdc_stats->byte_en_bus++; 2745 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2746 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2747 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2748 "==> nxge_rx_err_evnts(channel %d): " 2749 "fatal error: byte_en_bus", channel)); 2750 rxchan_fatal = B_TRUE; 2751 } 2752 if (cs.bits.hdw.rsp_dat_err) { 2753 rdc_stats->rsp_dat_err++; 2754 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2755 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2756 rxchan_fatal = B_TRUE; 2757 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2758 "==> nxge_rx_err_evnts(channel %d): " 2759 "fatal error: rsp_dat_err", channel)); 2760 } 2761 if (cs.bits.hdw.rcr_ack_err) { 2762 rdc_stats->rcr_ack_err++; 2763 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2764 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2765 rxchan_fatal = B_TRUE; 2766 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2767 "==> nxge_rx_err_evnts(channel %d): " 2768 "fatal error: rcr_ack_err", channel)); 2769 } 2770 if (cs.bits.hdw.dc_fifo_err) { 2771 rdc_stats->dc_fifo_err++; 2772 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2773 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2774 /* This is not a fatal error! */ 2775 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2776 "==> nxge_rx_err_evnts(channel %d): " 2777 "dc_fifo_err", channel)); 2778 rxport_fatal = B_TRUE; 2779 } 2780 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2781 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2782 &rdc_stats->errlog.pre_par, 2783 &rdc_stats->errlog.sha_par)) 2784 != NPI_SUCCESS) { 2785 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2786 "==> nxge_rx_err_evnts(channel %d): " 2787 "rcr_sha_par: get perr", channel)); 2788 return (NXGE_ERROR | rs); 2789 } 2790 if (cs.bits.hdw.rcr_sha_par) { 2791 rdc_stats->rcr_sha_par++; 2792 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2793 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2794 rxchan_fatal = B_TRUE; 2795 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2796 "==> nxge_rx_err_evnts(channel %d): " 2797 "fatal error: rcr_sha_par", channel)); 2798 } 2799 if (cs.bits.hdw.rbr_pre_par) { 2800 rdc_stats->rbr_pre_par++; 2801 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2802 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2803 rxchan_fatal = B_TRUE; 2804 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2805 "==> nxge_rx_err_evnts(channel %d): " 2806 "fatal error: rbr_pre_par", channel)); 2807 } 2808 } 2809 /* 2810 * The Following 4 status bits are for information, the system 2811 * is running fine. There is no need to send FMA ereports or 2812 * log messages. 2813 */ 2814 if (cs.bits.hdw.port_drop_pkt) { 2815 rdc_stats->port_drop_pkt++; 2816 } 2817 if (cs.bits.hdw.wred_drop) { 2818 rdc_stats->wred_drop++; 2819 } 2820 if (cs.bits.hdw.rbr_pre_empty) { 2821 rdc_stats->rbr_pre_empty++; 2822 } 2823 if (cs.bits.hdw.rcr_shadow_full) { 2824 rdc_stats->rcr_shadow_full++; 2825 } 2826 if (cs.bits.hdw.config_err) { 2827 rdc_stats->config_err++; 2828 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2829 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2830 rxchan_fatal = B_TRUE; 2831 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2832 "==> nxge_rx_err_evnts(channel %d): " 2833 "config error", channel)); 2834 } 2835 if (cs.bits.hdw.rcrincon) { 2836 rdc_stats->rcrincon++; 2837 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2838 NXGE_FM_EREPORT_RDMC_RCRINCON); 2839 rxchan_fatal = B_TRUE; 2840 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2841 "==> nxge_rx_err_evnts(channel %d): " 2842 "fatal error: rcrincon error", channel)); 2843 } 2844 if (cs.bits.hdw.rcrfull) { 2845 rdc_stats->rcrfull++; 2846 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2847 NXGE_FM_EREPORT_RDMC_RCRFULL); 2848 rxchan_fatal = B_TRUE; 2849 if (rdc_stats->rcrfull < error_disp_cnt) 2850 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2851 "==> nxge_rx_err_evnts(channel %d): " 2852 "fatal error: rcrfull error", channel)); 2853 } 2854 if (cs.bits.hdw.rbr_empty) { 2855 /* 2856 * This bit is for information, there is no need 2857 * send FMA ereport or log a message. 2858 */ 2859 rdc_stats->rbr_empty++; 2860 } 2861 if (cs.bits.hdw.rbrfull) { 2862 rdc_stats->rbrfull++; 2863 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2864 NXGE_FM_EREPORT_RDMC_RBRFULL); 2865 rxchan_fatal = B_TRUE; 2866 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2867 "==> nxge_rx_err_evnts(channel %d): " 2868 "fatal error: rbr_full error", channel)); 2869 } 2870 if (cs.bits.hdw.rbrlogpage) { 2871 rdc_stats->rbrlogpage++; 2872 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2873 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2874 rxchan_fatal = B_TRUE; 2875 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2876 "==> nxge_rx_err_evnts(channel %d): " 2877 "fatal error: rbr logical page error", channel)); 2878 } 2879 if (cs.bits.hdw.cfiglogpage) { 2880 rdc_stats->cfiglogpage++; 2881 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2882 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2883 rxchan_fatal = B_TRUE; 2884 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2885 "==> nxge_rx_err_evnts(channel %d): " 2886 "fatal error: cfig logical page error", channel)); 2887 } 2888 2889 if (rxport_fatal) { 2890 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2891 " nxge_rx_err_evnts: fatal error on Port #%d\n", 2892 portn)); 2893 if (isLDOMguest(nxgep)) { 2894 status = NXGE_ERROR; 2895 } else { 2896 status = nxge_ipp_fatal_err_recover(nxgep); 2897 if (status == NXGE_OK) { 2898 FM_SERVICE_RESTORED(nxgep); 2899 } 2900 } 2901 } 2902 2903 if (rxchan_fatal) { 2904 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2905 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 2906 channel)); 2907 if (isLDOMguest(nxgep)) { 2908 status = NXGE_ERROR; 2909 } else { 2910 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2911 if (status == NXGE_OK) { 2912 FM_SERVICE_RESTORED(nxgep); 2913 } 2914 } 2915 } 2916 2917 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2918 2919 return (status); 2920 } 2921 2922 /* 2923 * nxge_rdc_hvio_setup 2924 * 2925 * This code appears to setup some Hypervisor variables. 2926 * 2927 * Arguments: 2928 * nxgep 2929 * channel 2930 * 2931 * Notes: 2932 * What does NIU_LP_WORKAROUND mean? 2933 * 2934 * NPI/NXGE function calls: 2935 * na 2936 * 2937 * Context: 2938 * Any domain 2939 */ 2940 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2941 static void 2942 nxge_rdc_hvio_setup( 2943 nxge_t *nxgep, int channel) 2944 { 2945 nxge_dma_common_t *dma_common; 2946 nxge_dma_common_t *dma_control; 2947 rx_rbr_ring_t *ring; 2948 2949 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 2950 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 2951 2952 ring->hv_set = B_FALSE; 2953 2954 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 2955 dma_common->orig_ioaddr_pp; 2956 ring->hv_rx_buf_ioaddr_size = (uint64_t) 2957 dma_common->orig_alength; 2958 2959 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2960 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 2961 channel, ring->hv_rx_buf_base_ioaddr_pp, 2962 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 2963 dma_common->orig_alength, dma_common->orig_alength)); 2964 2965 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 2966 2967 ring->hv_rx_cntl_base_ioaddr_pp = 2968 (uint64_t)dma_control->orig_ioaddr_pp; 2969 ring->hv_rx_cntl_ioaddr_size = 2970 (uint64_t)dma_control->orig_alength; 2971 2972 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 2973 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 2974 channel, ring->hv_rx_cntl_base_ioaddr_pp, 2975 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 2976 dma_control->orig_alength, dma_control->orig_alength)); 2977 } 2978 #endif 2979 2980 /* 2981 * nxge_map_rxdma 2982 * 2983 * Map an RDC into our kernel space. 2984 * 2985 * Arguments: 2986 * nxgep 2987 * channel The channel to map. 2988 * 2989 * Notes: 2990 * 1. Allocate & initialise a memory pool, if necessary. 2991 * 2. Allocate however many receive buffers are required. 2992 * 3. Setup buffers, descriptors, and mailbox. 2993 * 2994 * NPI/NXGE function calls: 2995 * nxge_alloc_rx_mem_pool() 2996 * nxge_alloc_rbb() 2997 * nxge_map_rxdma_channel() 2998 * 2999 * Registers accessed: 3000 * 3001 * Context: 3002 * Any domain 3003 */ 3004 static nxge_status_t 3005 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3006 { 3007 nxge_dma_common_t **data; 3008 nxge_dma_common_t **control; 3009 rx_rbr_ring_t **rbr_ring; 3010 rx_rcr_ring_t **rcr_ring; 3011 rx_mbox_t **mailbox; 3012 uint32_t chunks; 3013 3014 nxge_status_t status; 3015 3016 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3017 3018 if (!nxgep->rx_buf_pool_p) { 3019 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3020 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3021 "<== nxge_map_rxdma: buf not allocated")); 3022 return (NXGE_ERROR); 3023 } 3024 } 3025 3026 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3027 return (NXGE_ERROR); 3028 3029 /* 3030 * Timeout should be set based on the system clock divider. 3031 * The following timeout value of 1 assumes that the 3032 * granularity (1000) is 3 microseconds running at 300MHz. 3033 */ 3034 3035 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 3036 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 3037 3038 /* 3039 * Map descriptors from the buffer polls for each dma channel. 3040 */ 3041 3042 /* 3043 * Set up and prepare buffer blocks, descriptors 3044 * and mailbox. 3045 */ 3046 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3047 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3048 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3049 3050 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3051 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3052 3053 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3054 3055 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3056 chunks, control, rcr_ring, mailbox); 3057 if (status != NXGE_OK) { 3058 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3059 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3060 "returned 0x%x", 3061 channel, status)); 3062 return (status); 3063 } 3064 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3065 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3066 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3067 &nxgep->statsp->rdc_stats[channel]; 3068 3069 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3070 if (!isLDOMguest(nxgep)) 3071 nxge_rdc_hvio_setup(nxgep, channel); 3072 #endif 3073 3074 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3075 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3076 3077 return (status); 3078 } 3079 3080 static void 3081 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3082 { 3083 rx_rbr_ring_t *rbr_ring; 3084 rx_rcr_ring_t *rcr_ring; 3085 rx_mbox_t *mailbox; 3086 3087 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3088 3089 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3090 !nxgep->rx_mbox_areas_p) 3091 return; 3092 3093 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3094 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3095 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3096 3097 if (!rbr_ring || !rcr_ring || !mailbox) 3098 return; 3099 3100 (void) nxge_unmap_rxdma_channel( 3101 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3102 3103 nxge_free_rxb(nxgep, channel); 3104 3105 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3106 } 3107 3108 nxge_status_t 3109 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3110 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3111 uint32_t num_chunks, 3112 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3113 p_rx_mbox_t *rx_mbox_p) 3114 { 3115 int status = NXGE_OK; 3116 3117 /* 3118 * Set up and prepare buffer blocks, descriptors 3119 * and mailbox. 3120 */ 3121 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3122 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3123 /* 3124 * Receive buffer blocks 3125 */ 3126 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3127 dma_buf_p, rbr_p, num_chunks); 3128 if (status != NXGE_OK) { 3129 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3130 "==> nxge_map_rxdma_channel (channel %d): " 3131 "map buffer failed 0x%x", channel, status)); 3132 goto nxge_map_rxdma_channel_exit; 3133 } 3134 3135 /* 3136 * Receive block ring, completion ring and mailbox. 3137 */ 3138 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3139 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3140 if (status != NXGE_OK) { 3141 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3142 "==> nxge_map_rxdma_channel (channel %d): " 3143 "map config failed 0x%x", channel, status)); 3144 goto nxge_map_rxdma_channel_fail2; 3145 } 3146 3147 goto nxge_map_rxdma_channel_exit; 3148 3149 nxge_map_rxdma_channel_fail3: 3150 /* Free rbr, rcr */ 3151 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3152 "==> nxge_map_rxdma_channel: free rbr/rcr " 3153 "(status 0x%x channel %d)", 3154 status, channel)); 3155 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3156 *rcr_p, *rx_mbox_p); 3157 3158 nxge_map_rxdma_channel_fail2: 3159 /* Free buffer blocks */ 3160 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3161 "==> nxge_map_rxdma_channel: free rx buffers" 3162 "(nxgep 0x%x status 0x%x channel %d)", 3163 nxgep, status, channel)); 3164 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3165 3166 status = NXGE_ERROR; 3167 3168 nxge_map_rxdma_channel_exit: 3169 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3170 "<== nxge_map_rxdma_channel: " 3171 "(nxgep 0x%x status 0x%x channel %d)", 3172 nxgep, status, channel)); 3173 3174 return (status); 3175 } 3176 3177 /*ARGSUSED*/ 3178 static void 3179 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3180 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3181 { 3182 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3183 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3184 3185 /* 3186 * unmap receive block ring, completion ring and mailbox. 3187 */ 3188 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3189 rcr_p, rx_mbox_p); 3190 3191 /* unmap buffer blocks */ 3192 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3193 3194 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3195 } 3196 3197 /*ARGSUSED*/ 3198 static nxge_status_t 3199 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3200 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3201 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3202 { 3203 p_rx_rbr_ring_t rbrp; 3204 p_rx_rcr_ring_t rcrp; 3205 p_rx_mbox_t mboxp; 3206 p_nxge_dma_common_t cntl_dmap; 3207 p_nxge_dma_common_t dmap; 3208 p_rx_msg_t *rx_msg_ring; 3209 p_rx_msg_t rx_msg_p; 3210 p_rbr_cfig_a_t rcfga_p; 3211 p_rbr_cfig_b_t rcfgb_p; 3212 p_rcrcfig_a_t cfga_p; 3213 p_rcrcfig_b_t cfgb_p; 3214 p_rxdma_cfig1_t cfig1_p; 3215 p_rxdma_cfig2_t cfig2_p; 3216 p_rbr_kick_t kick_p; 3217 uint32_t dmaaddrp; 3218 uint32_t *rbr_vaddrp; 3219 uint32_t bkaddr; 3220 nxge_status_t status = NXGE_OK; 3221 int i; 3222 uint32_t nxge_port_rcr_size; 3223 3224 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3225 "==> nxge_map_rxdma_channel_cfg_ring")); 3226 3227 cntl_dmap = *dma_cntl_p; 3228 3229 /* Map in the receive block ring */ 3230 rbrp = *rbr_p; 3231 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3232 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3233 /* 3234 * Zero out buffer block ring descriptors. 3235 */ 3236 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3237 3238 rcfga_p = &(rbrp->rbr_cfga); 3239 rcfgb_p = &(rbrp->rbr_cfgb); 3240 kick_p = &(rbrp->rbr_kick); 3241 rcfga_p->value = 0; 3242 rcfgb_p->value = 0; 3243 kick_p->value = 0; 3244 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3245 rcfga_p->value = (rbrp->rbr_addr & 3246 (RBR_CFIG_A_STDADDR_MASK | 3247 RBR_CFIG_A_STDADDR_BASE_MASK)); 3248 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3249 3250 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3251 rcfgb_p->bits.ldw.vld0 = 1; 3252 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3253 rcfgb_p->bits.ldw.vld1 = 1; 3254 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3255 rcfgb_p->bits.ldw.vld2 = 1; 3256 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3257 3258 /* 3259 * For each buffer block, enter receive block address to the ring. 3260 */ 3261 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3262 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3263 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3264 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3265 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3266 3267 rx_msg_ring = rbrp->rx_msg_ring; 3268 for (i = 0; i < rbrp->tnblocks; i++) { 3269 rx_msg_p = rx_msg_ring[i]; 3270 rx_msg_p->nxgep = nxgep; 3271 rx_msg_p->rx_rbr_p = rbrp; 3272 bkaddr = (uint32_t) 3273 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3274 >> RBR_BKADDR_SHIFT)); 3275 rx_msg_p->free = B_FALSE; 3276 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3277 3278 *rbr_vaddrp++ = bkaddr; 3279 } 3280 3281 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3282 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3283 3284 rbrp->rbr_rd_index = 0; 3285 3286 rbrp->rbr_consumed = 0; 3287 rbrp->rbr_use_bcopy = B_TRUE; 3288 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3289 /* 3290 * Do bcopy on packets greater than bcopy size once 3291 * the lo threshold is reached. 3292 * This lo threshold should be less than the hi threshold. 3293 * 3294 * Do bcopy on every packet once the hi threshold is reached. 3295 */ 3296 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3297 /* default it to use hi */ 3298 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3299 } 3300 3301 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3302 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3303 } 3304 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3305 3306 switch (nxge_rx_threshold_hi) { 3307 default: 3308 case NXGE_RX_COPY_NONE: 3309 /* Do not do bcopy at all */ 3310 rbrp->rbr_use_bcopy = B_FALSE; 3311 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3312 break; 3313 3314 case NXGE_RX_COPY_1: 3315 case NXGE_RX_COPY_2: 3316 case NXGE_RX_COPY_3: 3317 case NXGE_RX_COPY_4: 3318 case NXGE_RX_COPY_5: 3319 case NXGE_RX_COPY_6: 3320 case NXGE_RX_COPY_7: 3321 rbrp->rbr_threshold_hi = 3322 rbrp->rbb_max * 3323 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3324 break; 3325 3326 case NXGE_RX_COPY_ALL: 3327 rbrp->rbr_threshold_hi = 0; 3328 break; 3329 } 3330 3331 switch (nxge_rx_threshold_lo) { 3332 default: 3333 case NXGE_RX_COPY_NONE: 3334 /* Do not do bcopy at all */ 3335 if (rbrp->rbr_use_bcopy) { 3336 rbrp->rbr_use_bcopy = B_FALSE; 3337 } 3338 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3339 break; 3340 3341 case NXGE_RX_COPY_1: 3342 case NXGE_RX_COPY_2: 3343 case NXGE_RX_COPY_3: 3344 case NXGE_RX_COPY_4: 3345 case NXGE_RX_COPY_5: 3346 case NXGE_RX_COPY_6: 3347 case NXGE_RX_COPY_7: 3348 rbrp->rbr_threshold_lo = 3349 rbrp->rbb_max * 3350 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3351 break; 3352 3353 case NXGE_RX_COPY_ALL: 3354 rbrp->rbr_threshold_lo = 0; 3355 break; 3356 } 3357 3358 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3359 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3360 "rbb_max %d " 3361 "rbrp->rbr_bufsize_type %d " 3362 "rbb_threshold_hi %d " 3363 "rbb_threshold_lo %d", 3364 dma_channel, 3365 rbrp->rbb_max, 3366 rbrp->rbr_bufsize_type, 3367 rbrp->rbr_threshold_hi, 3368 rbrp->rbr_threshold_lo)); 3369 3370 rbrp->page_valid.value = 0; 3371 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3372 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3373 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3374 rbrp->page_hdl.value = 0; 3375 3376 rbrp->page_valid.bits.ldw.page0 = 1; 3377 rbrp->page_valid.bits.ldw.page1 = 1; 3378 3379 /* Map in the receive completion ring */ 3380 rcrp = (p_rx_rcr_ring_t) 3381 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3382 rcrp->rdc = dma_channel; 3383 3384 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3385 rcrp->comp_size = nxge_port_rcr_size; 3386 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3387 3388 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3389 3390 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3391 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3392 sizeof (rcr_entry_t)); 3393 rcrp->comp_rd_index = 0; 3394 rcrp->comp_wt_index = 0; 3395 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3396 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3397 #if defined(__i386) 3398 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3399 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3400 #else 3401 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3402 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3403 #endif 3404 3405 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3406 (nxge_port_rcr_size - 1); 3407 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3408 (nxge_port_rcr_size - 1); 3409 3410 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3411 "==> nxge_map_rxdma_channel_cfg_ring: " 3412 "channel %d " 3413 "rbr_vaddrp $%p " 3414 "rcr_desc_rd_head_p $%p " 3415 "rcr_desc_rd_head_pp $%p " 3416 "rcr_desc_rd_last_p $%p " 3417 "rcr_desc_rd_last_pp $%p ", 3418 dma_channel, 3419 rbr_vaddrp, 3420 rcrp->rcr_desc_rd_head_p, 3421 rcrp->rcr_desc_rd_head_pp, 3422 rcrp->rcr_desc_last_p, 3423 rcrp->rcr_desc_last_pp)); 3424 3425 /* 3426 * Zero out buffer block ring descriptors. 3427 */ 3428 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3429 rcrp->intr_timeout = nxgep->intr_timeout; 3430 rcrp->intr_threshold = nxgep->intr_threshold; 3431 rcrp->full_hdr_flag = B_FALSE; 3432 rcrp->sw_priv_hdr_len = 0; 3433 3434 cfga_p = &(rcrp->rcr_cfga); 3435 cfgb_p = &(rcrp->rcr_cfgb); 3436 cfga_p->value = 0; 3437 cfgb_p->value = 0; 3438 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3439 cfga_p->value = (rcrp->rcr_addr & 3440 (RCRCFIG_A_STADDR_MASK | 3441 RCRCFIG_A_STADDR_BASE_MASK)); 3442 3443 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3444 RCRCFIG_A_LEN_SHIF); 3445 3446 /* 3447 * Timeout should be set based on the system clock divider. 3448 * The following timeout value of 1 assumes that the 3449 * granularity (1000) is 3 microseconds running at 300MHz. 3450 */ 3451 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3452 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3453 cfgb_p->bits.ldw.entout = 1; 3454 3455 /* Map in the mailbox */ 3456 mboxp = (p_rx_mbox_t) 3457 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3458 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3459 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3460 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3461 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3462 cfig1_p->value = cfig2_p->value = 0; 3463 3464 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3465 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3466 "==> nxge_map_rxdma_channel_cfg_ring: " 3467 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3468 dma_channel, cfig1_p->value, cfig2_p->value, 3469 mboxp->mbox_addr)); 3470 3471 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3472 & 0xfff); 3473 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3474 3475 3476 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3477 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3478 RXDMA_CFIG2_MBADDR_L_MASK); 3479 3480 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3481 3482 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3483 "==> nxge_map_rxdma_channel_cfg_ring: " 3484 "channel %d damaddrp $%p " 3485 "cfg1 0x%016llx cfig2 0x%016llx", 3486 dma_channel, dmaaddrp, 3487 cfig1_p->value, cfig2_p->value)); 3488 3489 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3490 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3491 3492 rbrp->rx_rcr_p = rcrp; 3493 rcrp->rx_rbr_p = rbrp; 3494 *rcr_p = rcrp; 3495 *rx_mbox_p = mboxp; 3496 3497 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3498 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3499 3500 return (status); 3501 } 3502 3503 /*ARGSUSED*/ 3504 static void 3505 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3506 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3507 { 3508 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3509 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3510 rcr_p->rdc)); 3511 3512 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3513 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3514 3515 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3516 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3517 } 3518 3519 static nxge_status_t 3520 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3521 p_nxge_dma_common_t *dma_buf_p, 3522 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3523 { 3524 p_rx_rbr_ring_t rbrp; 3525 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3526 p_rx_msg_t *rx_msg_ring; 3527 p_rx_msg_t rx_msg_p; 3528 p_mblk_t mblk_p; 3529 3530 rxring_info_t *ring_info; 3531 nxge_status_t status = NXGE_OK; 3532 int i, j, index; 3533 uint32_t size, bsize, nblocks, nmsgs; 3534 3535 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3536 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3537 channel)); 3538 3539 dma_bufp = tmp_bufp = *dma_buf_p; 3540 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3541 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3542 "chunks bufp 0x%016llx", 3543 channel, num_chunks, dma_bufp)); 3544 3545 nmsgs = 0; 3546 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3547 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3548 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3549 "bufp 0x%016llx nblocks %d nmsgs %d", 3550 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3551 nmsgs += tmp_bufp->nblocks; 3552 } 3553 if (!nmsgs) { 3554 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3555 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3556 "no msg blocks", 3557 channel)); 3558 status = NXGE_ERROR; 3559 goto nxge_map_rxdma_channel_buf_ring_exit; 3560 } 3561 3562 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3563 3564 size = nmsgs * sizeof (p_rx_msg_t); 3565 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3566 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3567 KM_SLEEP); 3568 3569 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3570 (void *)nxgep->interrupt_cookie); 3571 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3572 (void *)nxgep->interrupt_cookie); 3573 rbrp->rdc = channel; 3574 rbrp->num_blocks = num_chunks; 3575 rbrp->tnblocks = nmsgs; 3576 rbrp->rbb_max = nmsgs; 3577 rbrp->rbr_max_size = nmsgs; 3578 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3579 3580 /* 3581 * Buffer sizes suggested by NIU architect. 3582 * 256, 512 and 2K. 3583 */ 3584 3585 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3586 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3587 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3588 3589 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3590 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3591 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3592 3593 rbrp->block_size = nxgep->rx_default_block_size; 3594 3595 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3596 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3597 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3598 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3599 } else { 3600 if (rbrp->block_size >= 0x2000) { 3601 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3602 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3603 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3604 } else { 3605 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3606 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3607 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3608 } 3609 } 3610 3611 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3612 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3613 "actual rbr max %d rbb_max %d nmsgs %d " 3614 "rbrp->block_size %d default_block_size %d " 3615 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3616 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3617 rbrp->block_size, nxgep->rx_default_block_size, 3618 nxge_rbr_size, nxge_rbr_spare_size)); 3619 3620 /* Map in buffers from the buffer pool. */ 3621 index = 0; 3622 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3623 bsize = dma_bufp->block_size; 3624 nblocks = dma_bufp->nblocks; 3625 #if defined(__i386) 3626 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3627 #else 3628 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3629 #endif 3630 ring_info->buffer[i].buf_index = i; 3631 ring_info->buffer[i].buf_size = dma_bufp->alength; 3632 ring_info->buffer[i].start_index = index; 3633 #if defined(__i386) 3634 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3635 #else 3636 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3637 #endif 3638 3639 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3640 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3641 "chunk %d" 3642 " nblocks %d chunk_size %x block_size 0x%x " 3643 "dma_bufp $%p", channel, i, 3644 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3645 dma_bufp)); 3646 3647 for (j = 0; j < nblocks; j++) { 3648 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3649 dma_bufp)) == NULL) { 3650 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3651 "allocb failed (index %d i %d j %d)", 3652 index, i, j)); 3653 goto nxge_map_rxdma_channel_buf_ring_fail1; 3654 } 3655 rx_msg_ring[index] = rx_msg_p; 3656 rx_msg_p->block_index = index; 3657 rx_msg_p->shifted_addr = (uint32_t) 3658 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3659 RBR_BKADDR_SHIFT)); 3660 3661 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3662 "index %d j %d rx_msg_p $%p mblk %p", 3663 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3664 3665 mblk_p = rx_msg_p->rx_mblk_p; 3666 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3667 3668 rbrp->rbr_ref_cnt++; 3669 index++; 3670 rx_msg_p->buf_dma.dma_channel = channel; 3671 } 3672 3673 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3674 if (dma_bufp->contig_alloc_type) { 3675 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3676 } 3677 3678 if (dma_bufp->kmem_alloc_type) { 3679 rbrp->rbr_alloc_type = KMEM_ALLOC; 3680 } 3681 3682 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3683 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3684 "chunk %d" 3685 " nblocks %d chunk_size %x block_size 0x%x " 3686 "dma_bufp $%p", 3687 channel, i, 3688 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3689 dma_bufp)); 3690 } 3691 if (i < rbrp->num_blocks) { 3692 goto nxge_map_rxdma_channel_buf_ring_fail1; 3693 } 3694 3695 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3696 "nxge_map_rxdma_channel_buf_ring: done buf init " 3697 "channel %d msg block entries %d", 3698 channel, index)); 3699 ring_info->block_size_mask = bsize - 1; 3700 rbrp->rx_msg_ring = rx_msg_ring; 3701 rbrp->dma_bufp = dma_buf_p; 3702 rbrp->ring_info = ring_info; 3703 3704 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3705 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3706 " nxge_map_rxdma_channel_buf_ring: " 3707 "channel %d done buf info init", channel)); 3708 3709 /* 3710 * Finally, permit nxge_freeb() to call nxge_post_page(). 3711 */ 3712 rbrp->rbr_state = RBR_POSTING; 3713 3714 *rbr_p = rbrp; 3715 goto nxge_map_rxdma_channel_buf_ring_exit; 3716 3717 nxge_map_rxdma_channel_buf_ring_fail1: 3718 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3719 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3720 channel, status)); 3721 3722 index--; 3723 for (; index >= 0; index--) { 3724 rx_msg_p = rx_msg_ring[index]; 3725 if (rx_msg_p != NULL) { 3726 freeb(rx_msg_p->rx_mblk_p); 3727 rx_msg_ring[index] = NULL; 3728 } 3729 } 3730 nxge_map_rxdma_channel_buf_ring_fail: 3731 MUTEX_DESTROY(&rbrp->post_lock); 3732 MUTEX_DESTROY(&rbrp->lock); 3733 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3734 KMEM_FREE(rx_msg_ring, size); 3735 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3736 3737 status = NXGE_ERROR; 3738 3739 nxge_map_rxdma_channel_buf_ring_exit: 3740 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3741 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3742 3743 return (status); 3744 } 3745 3746 /*ARGSUSED*/ 3747 static void 3748 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3749 p_rx_rbr_ring_t rbr_p) 3750 { 3751 p_rx_msg_t *rx_msg_ring; 3752 p_rx_msg_t rx_msg_p; 3753 rxring_info_t *ring_info; 3754 int i; 3755 uint32_t size; 3756 #ifdef NXGE_DEBUG 3757 int num_chunks; 3758 #endif 3759 3760 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3761 "==> nxge_unmap_rxdma_channel_buf_ring")); 3762 if (rbr_p == NULL) { 3763 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3764 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3765 return; 3766 } 3767 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3768 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3769 rbr_p->rdc)); 3770 3771 rx_msg_ring = rbr_p->rx_msg_ring; 3772 ring_info = rbr_p->ring_info; 3773 3774 if (rx_msg_ring == NULL || ring_info == NULL) { 3775 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3776 "<== nxge_unmap_rxdma_channel_buf_ring: " 3777 "rx_msg_ring $%p ring_info $%p", 3778 rx_msg_p, ring_info)); 3779 return; 3780 } 3781 3782 #ifdef NXGE_DEBUG 3783 num_chunks = rbr_p->num_blocks; 3784 #endif 3785 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3786 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3787 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3788 "tnblocks %d (max %d) size ptrs %d ", 3789 rbr_p->rdc, num_chunks, 3790 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3791 3792 for (i = 0; i < rbr_p->tnblocks; i++) { 3793 rx_msg_p = rx_msg_ring[i]; 3794 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3795 " nxge_unmap_rxdma_channel_buf_ring: " 3796 "rx_msg_p $%p", 3797 rx_msg_p)); 3798 if (rx_msg_p != NULL) { 3799 freeb(rx_msg_p->rx_mblk_p); 3800 rx_msg_ring[i] = NULL; 3801 } 3802 } 3803 3804 /* 3805 * We no longer may use the mutex <post_lock>. By setting 3806 * <rbr_state> to anything but POSTING, we prevent 3807 * nxge_post_page() from accessing a dead mutex. 3808 */ 3809 rbr_p->rbr_state = RBR_UNMAPPING; 3810 MUTEX_DESTROY(&rbr_p->post_lock); 3811 3812 MUTEX_DESTROY(&rbr_p->lock); 3813 3814 if (rbr_p->rbr_ref_cnt == 0) { 3815 /* 3816 * This is the normal state of affairs. 3817 * Need to free the following buffers: 3818 * - data buffers 3819 * - rx_msg ring 3820 * - ring_info 3821 * - rbr ring 3822 */ 3823 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3824 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 3825 nxge_rxdma_databuf_free(rbr_p); 3826 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3827 KMEM_FREE(rx_msg_ring, size); 3828 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3829 } else { 3830 /* 3831 * Some of our buffers are still being used. 3832 * Therefore, tell nxge_freeb() this ring is 3833 * unmapped, so it may free <rbr_p> for us. 3834 */ 3835 rbr_p->rbr_state = RBR_UNMAPPED; 3836 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3837 "unmap_rxdma_buf_ring: %d %s outstanding.", 3838 rbr_p->rbr_ref_cnt, 3839 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3840 } 3841 3842 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3843 "<== nxge_unmap_rxdma_channel_buf_ring")); 3844 } 3845 3846 /* 3847 * nxge_rxdma_hw_start_common 3848 * 3849 * Arguments: 3850 * nxgep 3851 * 3852 * Notes: 3853 * 3854 * NPI/NXGE function calls: 3855 * nxge_init_fzc_rx_common(); 3856 * nxge_init_fzc_rxdma_port(); 3857 * 3858 * Registers accessed: 3859 * 3860 * Context: 3861 * Service domain 3862 */ 3863 static nxge_status_t 3864 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3865 { 3866 nxge_status_t status = NXGE_OK; 3867 3868 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3869 3870 /* 3871 * Load the sharable parameters by writing to the 3872 * function zero control registers. These FZC registers 3873 * should be initialized only once for the entire chip. 3874 */ 3875 (void) nxge_init_fzc_rx_common(nxgep); 3876 3877 /* 3878 * Initialize the RXDMA port specific FZC control configurations. 3879 * These FZC registers are pertaining to each port. 3880 */ 3881 (void) nxge_init_fzc_rxdma_port(nxgep); 3882 3883 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3884 3885 return (status); 3886 } 3887 3888 static nxge_status_t 3889 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 3890 { 3891 int i, ndmas; 3892 p_rx_rbr_rings_t rx_rbr_rings; 3893 p_rx_rbr_ring_t *rbr_rings; 3894 p_rx_rcr_rings_t rx_rcr_rings; 3895 p_rx_rcr_ring_t *rcr_rings; 3896 p_rx_mbox_areas_t rx_mbox_areas_p; 3897 p_rx_mbox_t *rx_mbox_p; 3898 nxge_status_t status = NXGE_OK; 3899 3900 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3901 3902 rx_rbr_rings = nxgep->rx_rbr_rings; 3903 rx_rcr_rings = nxgep->rx_rcr_rings; 3904 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3905 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3906 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3907 return (NXGE_ERROR); 3908 } 3909 ndmas = rx_rbr_rings->ndmas; 3910 if (ndmas == 0) { 3911 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3912 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3913 return (NXGE_ERROR); 3914 } 3915 3916 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3917 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3918 3919 rbr_rings = rx_rbr_rings->rbr_rings; 3920 rcr_rings = rx_rcr_rings->rcr_rings; 3921 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3922 if (rx_mbox_areas_p) { 3923 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3924 } 3925 3926 i = channel; 3927 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3928 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3929 ndmas, channel)); 3930 status = nxge_rxdma_start_channel(nxgep, channel, 3931 (p_rx_rbr_ring_t)rbr_rings[i], 3932 (p_rx_rcr_ring_t)rcr_rings[i], 3933 (p_rx_mbox_t)rx_mbox_p[i]); 3934 if (status != NXGE_OK) { 3935 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3936 "==> nxge_rxdma_hw_start: disable " 3937 "(status 0x%x channel %d)", status, channel)); 3938 return (status); 3939 } 3940 3941 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3942 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3943 rx_rbr_rings, rx_rcr_rings)); 3944 3945 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3946 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3947 3948 return (status); 3949 } 3950 3951 static void 3952 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 3953 { 3954 p_rx_rbr_rings_t rx_rbr_rings; 3955 p_rx_rcr_rings_t rx_rcr_rings; 3956 3957 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3958 3959 rx_rbr_rings = nxgep->rx_rbr_rings; 3960 rx_rcr_rings = nxgep->rx_rcr_rings; 3961 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3962 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3963 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3964 return; 3965 } 3966 3967 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3968 "==> nxge_rxdma_hw_stop(channel %d)", 3969 channel)); 3970 (void) nxge_rxdma_stop_channel(nxgep, channel); 3971 3972 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3973 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3974 rx_rbr_rings, rx_rcr_rings)); 3975 3976 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3977 } 3978 3979 3980 static nxge_status_t 3981 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3982 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3983 3984 { 3985 npi_handle_t handle; 3986 npi_status_t rs = NPI_SUCCESS; 3987 rx_dma_ctl_stat_t cs; 3988 rx_dma_ent_msk_t ent_mask; 3989 nxge_status_t status = NXGE_OK; 3990 3991 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3992 3993 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3994 3995 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3996 "npi handle addr $%p acc $%p", 3997 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 3998 3999 /* Reset RXDMA channel, but not if you're a guest. */ 4000 if (!isLDOMguest(nxgep)) { 4001 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4002 if (rs != NPI_SUCCESS) { 4003 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4004 "==> nxge_init_fzc_rdc: " 4005 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4006 channel, rs)); 4007 return (NXGE_ERROR | rs); 4008 } 4009 4010 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4011 "==> nxge_rxdma_start_channel: reset done: channel %d", 4012 channel)); 4013 } 4014 4015 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4016 if (isLDOMguest(nxgep)) 4017 (void) nxge_rdc_lp_conf(nxgep, channel); 4018 #endif 4019 4020 /* 4021 * Initialize the RXDMA channel specific FZC control 4022 * configurations. These FZC registers are pertaining 4023 * to each RX channel (logical pages). 4024 */ 4025 if (!isLDOMguest(nxgep)) { 4026 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4027 if (status != NXGE_OK) { 4028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4029 "==> nxge_rxdma_start_channel: " 4030 "init fzc rxdma failed (0x%08x channel %d)", 4031 status, channel)); 4032 return (status); 4033 } 4034 4035 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4036 "==> nxge_rxdma_start_channel: fzc done")); 4037 } 4038 4039 /* Set up the interrupt event masks. */ 4040 ent_mask.value = 0; 4041 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4042 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4043 &ent_mask); 4044 if (rs != NPI_SUCCESS) { 4045 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4046 "==> nxge_rxdma_start_channel: " 4047 "init rxdma event masks failed " 4048 "(0x%08x channel %d)", 4049 status, channel)); 4050 return (NXGE_ERROR | rs); 4051 } 4052 4053 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4054 "==> nxge_rxdma_start_channel: " 4055 "event done: channel %d (mask 0x%016llx)", 4056 channel, ent_mask.value)); 4057 4058 /* Initialize the receive DMA control and status register */ 4059 cs.value = 0; 4060 cs.bits.hdw.mex = 1; 4061 cs.bits.hdw.rcrthres = 1; 4062 cs.bits.hdw.rcrto = 1; 4063 cs.bits.hdw.rbr_empty = 1; 4064 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4065 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4066 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4067 if (status != NXGE_OK) { 4068 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4069 "==> nxge_rxdma_start_channel: " 4070 "init rxdma control register failed (0x%08x channel %d", 4071 status, channel)); 4072 return (status); 4073 } 4074 4075 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4076 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4077 4078 /* 4079 * Load RXDMA descriptors, buffers, mailbox, 4080 * initialise the receive DMA channels and 4081 * enable each DMA channel. 4082 */ 4083 status = nxge_enable_rxdma_channel(nxgep, 4084 channel, rbr_p, rcr_p, mbox_p); 4085 4086 if (status != NXGE_OK) { 4087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4088 " nxge_rxdma_start_channel: " 4089 " enable rxdma failed (0x%08x channel %d)", 4090 status, channel)); 4091 return (status); 4092 } 4093 4094 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4095 "==> nxge_rxdma_start_channel: enabled channel %d")); 4096 4097 if (isLDOMguest(nxgep)) { 4098 /* Add interrupt handler for this channel. */ 4099 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4100 != NXGE_OK) { 4101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4102 " nxge_rxdma_start_channel: " 4103 " nxge_hio_intr_add failed (0x%08x channel %d)", 4104 status, channel)); 4105 } 4106 } 4107 4108 ent_mask.value = 0; 4109 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4110 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4111 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4112 &ent_mask); 4113 if (rs != NPI_SUCCESS) { 4114 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4115 "==> nxge_rxdma_start_channel: " 4116 "init rxdma event masks failed (0x%08x channel %d)", 4117 status, channel)); 4118 return (NXGE_ERROR | rs); 4119 } 4120 4121 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4122 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4123 4124 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4125 4126 return (NXGE_OK); 4127 } 4128 4129 static nxge_status_t 4130 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4131 { 4132 npi_handle_t handle; 4133 npi_status_t rs = NPI_SUCCESS; 4134 rx_dma_ctl_stat_t cs; 4135 rx_dma_ent_msk_t ent_mask; 4136 nxge_status_t status = NXGE_OK; 4137 4138 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4139 4140 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4141 4142 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4143 "npi handle addr $%p acc $%p", 4144 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4145 4146 /* Reset RXDMA channel */ 4147 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4148 if (rs != NPI_SUCCESS) { 4149 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4150 " nxge_rxdma_stop_channel: " 4151 " reset rxdma failed (0x%08x channel %d)", 4152 rs, channel)); 4153 return (NXGE_ERROR | rs); 4154 } 4155 4156 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4157 "==> nxge_rxdma_stop_channel: reset done")); 4158 4159 /* Set up the interrupt event masks. */ 4160 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4161 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4162 &ent_mask); 4163 if (rs != NPI_SUCCESS) { 4164 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4165 "==> nxge_rxdma_stop_channel: " 4166 "set rxdma event masks failed (0x%08x channel %d)", 4167 rs, channel)); 4168 return (NXGE_ERROR | rs); 4169 } 4170 4171 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4172 "==> nxge_rxdma_stop_channel: event done")); 4173 4174 /* Initialize the receive DMA control and status register */ 4175 cs.value = 0; 4176 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4177 &cs); 4178 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4179 " to default (all 0s) 0x%08x", cs.value)); 4180 if (status != NXGE_OK) { 4181 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4182 " nxge_rxdma_stop_channel: init rxdma" 4183 " control register failed (0x%08x channel %d", 4184 status, channel)); 4185 return (status); 4186 } 4187 4188 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4189 "==> nxge_rxdma_stop_channel: control done")); 4190 4191 /* disable dma channel */ 4192 status = nxge_disable_rxdma_channel(nxgep, channel); 4193 4194 if (status != NXGE_OK) { 4195 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4196 " nxge_rxdma_stop_channel: " 4197 " init enable rxdma failed (0x%08x channel %d)", 4198 status, channel)); 4199 return (status); 4200 } 4201 4202 NXGE_DEBUG_MSG((nxgep, 4203 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4204 4205 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4206 4207 return (NXGE_OK); 4208 } 4209 4210 nxge_status_t 4211 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4212 { 4213 npi_handle_t handle; 4214 p_nxge_rdc_sys_stats_t statsp; 4215 rx_ctl_dat_fifo_stat_t stat; 4216 uint32_t zcp_err_status; 4217 uint32_t ipp_err_status; 4218 nxge_status_t status = NXGE_OK; 4219 npi_status_t rs = NPI_SUCCESS; 4220 boolean_t my_err = B_FALSE; 4221 4222 handle = nxgep->npi_handle; 4223 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4224 4225 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4226 4227 if (rs != NPI_SUCCESS) 4228 return (NXGE_ERROR | rs); 4229 4230 if (stat.bits.ldw.id_mismatch) { 4231 statsp->id_mismatch++; 4232 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4233 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4234 /* Global fatal error encountered */ 4235 } 4236 4237 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4238 switch (nxgep->mac.portnum) { 4239 case 0: 4240 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4241 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4242 my_err = B_TRUE; 4243 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4244 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4245 } 4246 break; 4247 case 1: 4248 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4249 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4250 my_err = B_TRUE; 4251 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4252 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4253 } 4254 break; 4255 case 2: 4256 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4257 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4258 my_err = B_TRUE; 4259 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4260 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4261 } 4262 break; 4263 case 3: 4264 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4265 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4266 my_err = B_TRUE; 4267 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4268 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4269 } 4270 break; 4271 default: 4272 return (NXGE_ERROR); 4273 } 4274 } 4275 4276 if (my_err) { 4277 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4278 zcp_err_status); 4279 if (status != NXGE_OK) 4280 return (status); 4281 } 4282 4283 return (NXGE_OK); 4284 } 4285 4286 static nxge_status_t 4287 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4288 uint32_t zcp_status) 4289 { 4290 boolean_t rxport_fatal = B_FALSE; 4291 p_nxge_rdc_sys_stats_t statsp; 4292 nxge_status_t status = NXGE_OK; 4293 uint8_t portn; 4294 4295 portn = nxgep->mac.portnum; 4296 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4297 4298 if (ipp_status & (0x1 << portn)) { 4299 statsp->ipp_eop_err++; 4300 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4301 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4302 rxport_fatal = B_TRUE; 4303 } 4304 4305 if (zcp_status & (0x1 << portn)) { 4306 statsp->zcp_eop_err++; 4307 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4308 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4309 rxport_fatal = B_TRUE; 4310 } 4311 4312 if (rxport_fatal) { 4313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4314 " nxge_rxdma_handle_port_error: " 4315 " fatal error on Port #%d\n", 4316 portn)); 4317 status = nxge_rx_port_fatal_err_recover(nxgep); 4318 if (status == NXGE_OK) { 4319 FM_SERVICE_RESTORED(nxgep); 4320 } 4321 } 4322 4323 return (status); 4324 } 4325 4326 static nxge_status_t 4327 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4328 { 4329 npi_handle_t handle; 4330 npi_status_t rs = NPI_SUCCESS; 4331 nxge_status_t status = NXGE_OK; 4332 p_rx_rbr_ring_t rbrp; 4333 p_rx_rcr_ring_t rcrp; 4334 p_rx_mbox_t mboxp; 4335 rx_dma_ent_msk_t ent_mask; 4336 p_nxge_dma_common_t dmap; 4337 int ring_idx; 4338 uint32_t ref_cnt; 4339 p_rx_msg_t rx_msg_p; 4340 int i; 4341 uint32_t nxge_port_rcr_size; 4342 4343 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4344 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4345 "Recovering from RxDMAChannel#%d error...", channel)); 4346 4347 /* 4348 * Stop the dma channel waits for the stop done. 4349 * If the stop done bit is not set, then create 4350 * an error. 4351 */ 4352 4353 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4354 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4355 4356 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4357 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4358 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4359 4360 MUTEX_ENTER(&rcrp->lock); 4361 MUTEX_ENTER(&rbrp->lock); 4362 MUTEX_ENTER(&rbrp->post_lock); 4363 4364 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4365 4366 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4367 if (rs != NPI_SUCCESS) { 4368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4369 "nxge_disable_rxdma_channel:failed")); 4370 goto fail; 4371 } 4372 4373 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4374 4375 /* Disable interrupt */ 4376 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4377 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4378 if (rs != NPI_SUCCESS) { 4379 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4380 "nxge_rxdma_stop_channel: " 4381 "set rxdma event masks failed (channel %d)", 4382 channel)); 4383 } 4384 4385 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4386 4387 /* Reset RXDMA channel */ 4388 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4389 if (rs != NPI_SUCCESS) { 4390 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4391 "nxge_rxdma_fatal_err_recover: " 4392 " reset rxdma failed (channel %d)", channel)); 4393 goto fail; 4394 } 4395 4396 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4397 4398 mboxp = 4399 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4400 4401 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4402 rbrp->rbr_rd_index = 0; 4403 4404 rcrp->comp_rd_index = 0; 4405 rcrp->comp_wt_index = 0; 4406 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4407 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4408 #if defined(__i386) 4409 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4410 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4411 #else 4412 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4413 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4414 #endif 4415 4416 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4417 (nxge_port_rcr_size - 1); 4418 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4419 (nxge_port_rcr_size - 1); 4420 4421 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4422 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4423 4424 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4425 4426 for (i = 0; i < rbrp->rbr_max_size; i++) { 4427 rx_msg_p = rbrp->rx_msg_ring[i]; 4428 ref_cnt = rx_msg_p->ref_cnt; 4429 if (ref_cnt != 1) { 4430 if (rx_msg_p->cur_usage_cnt != 4431 rx_msg_p->max_usage_cnt) { 4432 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4433 "buf[%d]: cur_usage_cnt = %d " 4434 "max_usage_cnt = %d\n", i, 4435 rx_msg_p->cur_usage_cnt, 4436 rx_msg_p->max_usage_cnt)); 4437 } else { 4438 /* Buffer can be re-posted */ 4439 rx_msg_p->free = B_TRUE; 4440 rx_msg_p->cur_usage_cnt = 0; 4441 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4442 rx_msg_p->pkt_buf_size = 0; 4443 } 4444 } 4445 } 4446 4447 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4448 4449 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4450 if (status != NXGE_OK) { 4451 goto fail; 4452 } 4453 4454 MUTEX_EXIT(&rbrp->post_lock); 4455 MUTEX_EXIT(&rbrp->lock); 4456 MUTEX_EXIT(&rcrp->lock); 4457 4458 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4459 "Recovery Successful, RxDMAChannel#%d Restored", 4460 channel)); 4461 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4462 4463 return (NXGE_OK); 4464 fail: 4465 MUTEX_EXIT(&rbrp->post_lock); 4466 MUTEX_EXIT(&rbrp->lock); 4467 MUTEX_EXIT(&rcrp->lock); 4468 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4469 4470 return (NXGE_ERROR | rs); 4471 } 4472 4473 nxge_status_t 4474 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4475 { 4476 nxge_grp_set_t *set = &nxgep->rx_set; 4477 nxge_status_t status = NXGE_OK; 4478 int rdc; 4479 4480 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4481 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4482 "Recovering from RxPort error...")); 4483 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4484 4485 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4486 goto fail; 4487 4488 NXGE_DELAY(1000); 4489 4490 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4491 4492 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4493 if ((1 << rdc) & set->owned.map) { 4494 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4495 != NXGE_OK) { 4496 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4497 "Could not recover channel %d", rdc)); 4498 } 4499 } 4500 } 4501 4502 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4503 4504 /* Reset IPP */ 4505 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4506 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4507 "nxge_rx_port_fatal_err_recover: " 4508 "Failed to reset IPP")); 4509 goto fail; 4510 } 4511 4512 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4513 4514 /* Reset RxMAC */ 4515 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4516 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4517 "nxge_rx_port_fatal_err_recover: " 4518 "Failed to reset RxMAC")); 4519 goto fail; 4520 } 4521 4522 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4523 4524 /* Re-Initialize IPP */ 4525 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4526 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4527 "nxge_rx_port_fatal_err_recover: " 4528 "Failed to init IPP")); 4529 goto fail; 4530 } 4531 4532 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4533 4534 /* Re-Initialize RxMAC */ 4535 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4536 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4537 "nxge_rx_port_fatal_err_recover: " 4538 "Failed to reset RxMAC")); 4539 goto fail; 4540 } 4541 4542 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4543 4544 /* Re-enable RxMAC */ 4545 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4546 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4547 "nxge_rx_port_fatal_err_recover: " 4548 "Failed to enable RxMAC")); 4549 goto fail; 4550 } 4551 4552 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4553 "Recovery Successful, RxPort Restored")); 4554 4555 return (NXGE_OK); 4556 fail: 4557 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4558 return (status); 4559 } 4560 4561 void 4562 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4563 { 4564 rx_dma_ctl_stat_t cs; 4565 rx_ctl_dat_fifo_stat_t cdfs; 4566 4567 switch (err_id) { 4568 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4569 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4570 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4571 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4572 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4573 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4574 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4575 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4576 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4577 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4578 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4579 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4580 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4581 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4582 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4583 chan, &cs.value); 4584 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4585 cs.bits.hdw.rcr_ack_err = 1; 4586 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4587 cs.bits.hdw.dc_fifo_err = 1; 4588 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4589 cs.bits.hdw.rcr_sha_par = 1; 4590 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4591 cs.bits.hdw.rbr_pre_par = 1; 4592 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4593 cs.bits.hdw.rbr_tmout = 1; 4594 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4595 cs.bits.hdw.rsp_cnt_err = 1; 4596 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4597 cs.bits.hdw.byte_en_bus = 1; 4598 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4599 cs.bits.hdw.rsp_dat_err = 1; 4600 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4601 cs.bits.hdw.config_err = 1; 4602 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4603 cs.bits.hdw.rcrincon = 1; 4604 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4605 cs.bits.hdw.rcrfull = 1; 4606 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4607 cs.bits.hdw.rbrfull = 1; 4608 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4609 cs.bits.hdw.rbrlogpage = 1; 4610 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4611 cs.bits.hdw.cfiglogpage = 1; 4612 #if defined(__i386) 4613 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4614 cs.value); 4615 #else 4616 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4617 cs.value); 4618 #endif 4619 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4620 chan, cs.value); 4621 break; 4622 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4624 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4625 cdfs.value = 0; 4626 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4627 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4628 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4629 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4630 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4631 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4632 #if defined(__i386) 4633 cmn_err(CE_NOTE, 4634 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4635 cdfs.value); 4636 #else 4637 cmn_err(CE_NOTE, 4638 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4639 cdfs.value); 4640 #endif 4641 NXGE_REG_WR64(nxgep->npi_handle, 4642 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4643 break; 4644 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4645 break; 4646 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4647 break; 4648 } 4649 } 4650 4651 static void 4652 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4653 { 4654 rxring_info_t *ring_info; 4655 int index; 4656 uint32_t chunk_size; 4657 uint64_t kaddr; 4658 uint_t num_blocks; 4659 4660 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4661 4662 if (rbr_p == NULL) { 4663 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4664 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4665 return; 4666 } 4667 4668 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4669 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4670 "==> nxge_rxdma_databuf_free: DDI")); 4671 return; 4672 } 4673 4674 ring_info = rbr_p->ring_info; 4675 if (ring_info == NULL) { 4676 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4677 "==> nxge_rxdma_databuf_free: NULL ring info")); 4678 return; 4679 } 4680 num_blocks = rbr_p->num_blocks; 4681 for (index = 0; index < num_blocks; index++) { 4682 kaddr = ring_info->buffer[index].kaddr; 4683 chunk_size = ring_info->buffer[index].buf_size; 4684 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4685 "==> nxge_rxdma_databuf_free: free chunk %d " 4686 "kaddrp $%p chunk size %d", 4687 index, kaddr, chunk_size)); 4688 if (kaddr == NULL) continue; 4689 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4690 ring_info->buffer[index].kaddr = NULL; 4691 } 4692 4693 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4694 } 4695 4696 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4697 extern void contig_mem_free(void *, size_t); 4698 #endif 4699 4700 void 4701 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4702 { 4703 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4704 4705 if (kaddr == NULL || !buf_size) { 4706 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4707 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4708 kaddr, buf_size)); 4709 return; 4710 } 4711 4712 switch (alloc_type) { 4713 case KMEM_ALLOC: 4714 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4715 "==> nxge_free_buf: freeing kmem $%p size %d", 4716 kaddr, buf_size)); 4717 #if defined(__i386) 4718 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4719 #else 4720 KMEM_FREE((void *)kaddr, buf_size); 4721 #endif 4722 break; 4723 4724 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4725 case CONTIG_MEM_ALLOC: 4726 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4727 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4728 kaddr, buf_size)); 4729 contig_mem_free((void *)kaddr, buf_size); 4730 break; 4731 #endif 4732 4733 default: 4734 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4735 "<== nxge_free_buf: unsupported alloc type %d", 4736 alloc_type)); 4737 return; 4738 } 4739 4740 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 4741 } 4742