1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * Globals: tunable parameters (/etc/system or adb) 43 * 44 */ 45 extern uint32_t nxge_rbr_size; 46 extern uint32_t nxge_rcr_size; 47 extern uint32_t nxge_rbr_spare_size; 48 49 extern uint32_t nxge_mblks_pending; 50 51 /* 52 * Tunable to reduce the amount of time spent in the 53 * ISR doing Rx Processing. 54 */ 55 extern uint32_t nxge_max_rx_pkts; 56 boolean_t nxge_jumbo_enable; 57 58 /* 59 * Tunables to manage the receive buffer blocks. 60 * 61 * nxge_rx_threshold_hi: copy all buffers. 62 * nxge_rx_bcopy_size_type: receive buffer block size type. 63 * nxge_rx_threshold_lo: copy only up to tunable block size type. 64 */ 65 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 66 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 68 69 extern uint32_t nxge_cksum_offload; 70 71 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 72 static void nxge_unmap_rxdma(p_nxge_t, int); 73 74 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 75 76 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 77 static void nxge_rxdma_hw_stop(p_nxge_t, int); 78 79 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 80 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 81 uint32_t, 82 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 83 p_rx_mbox_t *); 84 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 85 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 86 87 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 88 uint16_t, 89 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 90 p_rx_rcr_ring_t *, p_rx_mbox_t *); 91 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 92 p_rx_rcr_ring_t, p_rx_mbox_t); 93 94 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 95 uint16_t, 96 p_nxge_dma_common_t *, 97 p_rx_rbr_ring_t *, uint32_t); 98 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 99 p_rx_rbr_ring_t); 100 101 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 102 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 103 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 104 105 static mblk_t * 106 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 107 108 static void nxge_receive_packet(p_nxge_t, 109 p_rx_rcr_ring_t, 110 p_rcr_entry_t, 111 boolean_t *, 112 mblk_t **, mblk_t **); 113 114 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 115 116 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 117 static void nxge_freeb(p_rx_msg_t); 118 static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 120 121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 122 uint32_t, uint32_t); 123 124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 125 p_rx_rbr_ring_t); 126 127 128 static nxge_status_t 129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 130 131 nxge_status_t 132 nxge_rx_port_fatal_err_recover(p_nxge_t); 133 134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135 136 nxge_status_t 137 nxge_init_rxdma_channels(p_nxge_t nxgep) 138 { 139 nxge_grp_set_t *set = &nxgep->rx_set; 140 int i, count, channel; 141 nxge_grp_t *group; 142 dc_map_t map; 143 int dev_gindex; 144 145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 146 147 if (!isLDOMguest(nxgep)) { 148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 149 cmn_err(CE_NOTE, "hw_start_common"); 150 return (NXGE_ERROR); 151 } 152 } 153 154 /* 155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 156 * We only have 8 hardware RDC tables, but we may have 157 * up to 16 logical (software-defined) groups of RDCS, 158 * if we make use of layer 3 & 4 hardware classification. 159 */ 160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 161 if ((1 << i) & set->lg.map) { 162 group = set->group[i]; 163 dev_gindex = 164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 165 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 167 if ((1 << channel) & map) { 168 if ((nxge_grp_dc_add(nxgep, 169 group, VP_BOUND_RX, channel))) 170 goto init_rxdma_channels_exit; 171 } 172 } 173 } 174 if (++count == set->lg.count) 175 break; 176 } 177 178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 179 return (NXGE_OK); 180 181 init_rxdma_channels_exit: 182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 183 if ((1 << i) & set->lg.map) { 184 group = set->group[i]; 185 dev_gindex = 186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 187 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 189 if ((1 << channel) & map) { 190 nxge_grp_dc_remove(nxgep, 191 VP_BOUND_RX, channel); 192 } 193 } 194 } 195 if (++count == set->lg.count) 196 break; 197 } 198 199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 200 return (NXGE_ERROR); 201 } 202 203 nxge_status_t 204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 205 { 206 nxge_status_t status; 207 208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 209 210 status = nxge_map_rxdma(nxge, channel); 211 if (status != NXGE_OK) { 212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 213 "<== nxge_init_rxdma: status 0x%x", status)); 214 return (status); 215 } 216 217 #if defined(sun4v) 218 if (isLDOMguest(nxge)) { 219 /* set rcr_ring */ 220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 221 222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 223 if (status != NXGE_OK) { 224 nxge_unmap_rxdma(nxge, channel); 225 return (status); 226 } 227 } 228 #endif 229 230 status = nxge_rxdma_hw_start(nxge, channel); 231 if (status != NXGE_OK) { 232 nxge_unmap_rxdma(nxge, channel); 233 } 234 235 if (!nxge->statsp->rdc_ksp[channel]) 236 nxge_setup_rdc_kstats(nxge, channel); 237 238 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 239 "<== nxge_init_rxdma_channel: status 0x%x", status)); 240 241 return (status); 242 } 243 244 void 245 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 246 { 247 nxge_grp_set_t *set = &nxgep->rx_set; 248 int rdc; 249 250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 251 252 if (set->owned.map == 0) { 253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 254 "nxge_uninit_rxdma_channels: no channels")); 255 return; 256 } 257 258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 259 if ((1 << rdc) & set->owned.map) { 260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 261 } 262 } 263 264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 265 } 266 267 void 268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 269 { 270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 271 272 if (nxgep->statsp->rdc_ksp[channel]) { 273 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 274 nxgep->statsp->rdc_ksp[channel] = 0; 275 } 276 277 nxge_rxdma_hw_stop(nxgep, channel); 278 nxge_unmap_rxdma(nxgep, channel); 279 280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 281 } 282 283 nxge_status_t 284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 285 { 286 npi_handle_t handle; 287 npi_status_t rs = NPI_SUCCESS; 288 nxge_status_t status = NXGE_OK; 289 290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 291 292 handle = NXGE_DEV_NPI_HANDLE(nxgep); 293 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 294 295 if (rs != NPI_SUCCESS) { 296 status = NXGE_ERROR | rs; 297 } 298 299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 300 301 return (status); 302 } 303 304 void 305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 306 { 307 nxge_grp_set_t *set = &nxgep->rx_set; 308 int rdc; 309 310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 311 312 if (!isLDOMguest(nxgep)) { 313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 314 (void) npi_rxdma_dump_fzc_regs(handle); 315 } 316 317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 318 NXGE_DEBUG_MSG((nxgep, TX_CTL, 319 "nxge_rxdma_regs_dump_channels: " 320 "NULL ring pointer(s)")); 321 return; 322 } 323 324 if (set->owned.map == 0) { 325 NXGE_DEBUG_MSG((nxgep, RX_CTL, 326 "nxge_rxdma_regs_dump_channels: no channels")); 327 return; 328 } 329 330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 331 if ((1 << rdc) & set->owned.map) { 332 rx_rbr_ring_t *ring = 333 nxgep->rx_rbr_rings->rbr_rings[rdc]; 334 if (ring) { 335 (void) nxge_dump_rxdma_channel(nxgep, rdc); 336 } 337 } 338 } 339 340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 341 } 342 343 nxge_status_t 344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 345 { 346 npi_handle_t handle; 347 npi_status_t rs = NPI_SUCCESS; 348 nxge_status_t status = NXGE_OK; 349 350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 351 352 handle = NXGE_DEV_NPI_HANDLE(nxgep); 353 rs = npi_rxdma_dump_rdc_regs(handle, channel); 354 355 if (rs != NPI_SUCCESS) { 356 status = NXGE_ERROR | rs; 357 } 358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 359 return (status); 360 } 361 362 nxge_status_t 363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 364 p_rx_dma_ent_msk_t mask_p) 365 { 366 npi_handle_t handle; 367 npi_status_t rs = NPI_SUCCESS; 368 nxge_status_t status = NXGE_OK; 369 370 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 371 "<== nxge_init_rxdma_channel_event_mask")); 372 373 handle = NXGE_DEV_NPI_HANDLE(nxgep); 374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 375 if (rs != NPI_SUCCESS) { 376 status = NXGE_ERROR | rs; 377 } 378 379 return (status); 380 } 381 382 nxge_status_t 383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 384 p_rx_dma_ctl_stat_t cs_p) 385 { 386 npi_handle_t handle; 387 npi_status_t rs = NPI_SUCCESS; 388 nxge_status_t status = NXGE_OK; 389 390 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 391 "<== nxge_init_rxdma_channel_cntl_stat")); 392 393 handle = NXGE_DEV_NPI_HANDLE(nxgep); 394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 395 396 if (rs != NPI_SUCCESS) { 397 status = NXGE_ERROR | rs; 398 } 399 400 return (status); 401 } 402 403 /* 404 * nxge_rxdma_cfg_rdcgrp_default_rdc 405 * 406 * Set the default RDC for an RDC Group (Table) 407 * 408 * Arguments: 409 * nxgep 410 * rdcgrp The group to modify 411 * rdc The new default RDC. 412 * 413 * Notes: 414 * 415 * NPI/NXGE function calls: 416 * npi_rxdma_cfg_rdc_table_default_rdc() 417 * 418 * Registers accessed: 419 * RDC_TBL_REG: FZC_ZCP + 0x10000 420 * 421 * Context: 422 * Service domain 423 */ 424 nxge_status_t 425 nxge_rxdma_cfg_rdcgrp_default_rdc( 426 p_nxge_t nxgep, 427 uint8_t rdcgrp, 428 uint8_t rdc) 429 { 430 npi_handle_t handle; 431 npi_status_t rs = NPI_SUCCESS; 432 p_nxge_dma_pt_cfg_t p_dma_cfgp; 433 p_nxge_rdc_grp_t rdc_grp_p; 434 uint8_t actual_rdcgrp, actual_rdc; 435 436 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 439 440 handle = NXGE_DEV_NPI_HANDLE(nxgep); 441 442 /* 443 * This has to be rewritten. Do we even allow this anymore? 444 */ 445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 446 RDC_MAP_IN(rdc_grp_p->map, rdc); 447 rdc_grp_p->def_rdc = rdc; 448 449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 451 452 rs = npi_rxdma_cfg_rdc_table_default_rdc( 453 handle, actual_rdcgrp, actual_rdc); 454 455 if (rs != NPI_SUCCESS) { 456 return (NXGE_ERROR | rs); 457 } 458 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 460 return (NXGE_OK); 461 } 462 463 nxge_status_t 464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 465 { 466 npi_handle_t handle; 467 468 uint8_t actual_rdc; 469 npi_status_t rs = NPI_SUCCESS; 470 471 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 472 " ==> nxge_rxdma_cfg_port_default_rdc")); 473 474 handle = NXGE_DEV_NPI_HANDLE(nxgep); 475 actual_rdc = rdc; /* XXX Hack! */ 476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 477 478 479 if (rs != NPI_SUCCESS) { 480 return (NXGE_ERROR | rs); 481 } 482 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 483 " <== nxge_rxdma_cfg_port_default_rdc")); 484 485 return (NXGE_OK); 486 } 487 488 nxge_status_t 489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 490 uint16_t pkts) 491 { 492 npi_status_t rs = NPI_SUCCESS; 493 npi_handle_t handle; 494 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 495 " ==> nxge_rxdma_cfg_rcr_threshold")); 496 handle = NXGE_DEV_NPI_HANDLE(nxgep); 497 498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 499 500 if (rs != NPI_SUCCESS) { 501 return (NXGE_ERROR | rs); 502 } 503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 504 return (NXGE_OK); 505 } 506 507 nxge_status_t 508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 509 uint16_t tout, uint8_t enable) 510 { 511 npi_status_t rs = NPI_SUCCESS; 512 npi_handle_t handle; 513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 514 handle = NXGE_DEV_NPI_HANDLE(nxgep); 515 if (enable == 0) { 516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 517 } else { 518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 519 tout); 520 } 521 522 if (rs != NPI_SUCCESS) { 523 return (NXGE_ERROR | rs); 524 } 525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 526 return (NXGE_OK); 527 } 528 529 nxge_status_t 530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 532 { 533 npi_handle_t handle; 534 rdc_desc_cfg_t rdc_desc; 535 p_rcrcfig_b_t cfgb_p; 536 npi_status_t rs = NPI_SUCCESS; 537 538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 539 handle = NXGE_DEV_NPI_HANDLE(nxgep); 540 /* 541 * Use configuration data composed at init time. 542 * Write to hardware the receive ring configurations. 543 */ 544 rdc_desc.mbox_enable = 1; 545 rdc_desc.mbox_addr = mbox_p->mbox_addr; 546 NXGE_DEBUG_MSG((nxgep, RX_CTL, 547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 548 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 549 550 rdc_desc.rbr_len = rbr_p->rbb_max; 551 rdc_desc.rbr_addr = rbr_p->rbr_addr; 552 553 switch (nxgep->rx_bksize_code) { 554 case RBR_BKSIZE_4K: 555 rdc_desc.page_size = SIZE_4KB; 556 break; 557 case RBR_BKSIZE_8K: 558 rdc_desc.page_size = SIZE_8KB; 559 break; 560 case RBR_BKSIZE_16K: 561 rdc_desc.page_size = SIZE_16KB; 562 break; 563 case RBR_BKSIZE_32K: 564 rdc_desc.page_size = SIZE_32KB; 565 break; 566 } 567 568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 569 rdc_desc.valid0 = 1; 570 571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 572 rdc_desc.valid1 = 1; 573 574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 575 rdc_desc.valid2 = 1; 576 577 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 578 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 579 580 rdc_desc.rcr_len = rcr_p->comp_size; 581 rdc_desc.rcr_addr = rcr_p->rcr_addr; 582 583 cfgb_p = &(rcr_p->rcr_cfgb); 584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 585 /* For now, disable this timeout in a guest domain. */ 586 if (isLDOMguest(nxgep)) { 587 rdc_desc.rcr_timeout = 0; 588 rdc_desc.rcr_timeout_enable = 0; 589 } else { 590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 592 } 593 594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 595 "rbr_len qlen %d pagesize code %d rcr_len %d", 596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 598 "size 0 %d size 1 %d size 2 %d", 599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 600 rbr_p->npi_pkt_buf_size2)); 601 602 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 603 if (rs != NPI_SUCCESS) { 604 return (NXGE_ERROR | rs); 605 } 606 607 /* 608 * Enable the timeout and threshold. 609 */ 610 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 611 rdc_desc.rcr_threshold); 612 if (rs != NPI_SUCCESS) { 613 return (NXGE_ERROR | rs); 614 } 615 616 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 617 rdc_desc.rcr_timeout); 618 if (rs != NPI_SUCCESS) { 619 return (NXGE_ERROR | rs); 620 } 621 622 if (!isLDOMguest(nxgep)) { 623 /* Enable the DMA */ 624 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 625 if (rs != NPI_SUCCESS) { 626 return (NXGE_ERROR | rs); 627 } 628 } 629 630 /* Kick the DMA engine. */ 631 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 632 633 if (!isLDOMguest(nxgep)) { 634 /* Clear the rbr empty bit */ 635 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 636 } 637 638 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 639 640 return (NXGE_OK); 641 } 642 643 nxge_status_t 644 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 645 { 646 npi_handle_t handle; 647 npi_status_t rs = NPI_SUCCESS; 648 649 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 650 handle = NXGE_DEV_NPI_HANDLE(nxgep); 651 652 /* disable the DMA */ 653 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 654 if (rs != NPI_SUCCESS) { 655 NXGE_DEBUG_MSG((nxgep, RX_CTL, 656 "<== nxge_disable_rxdma_channel:failed (0x%x)", 657 rs)); 658 return (NXGE_ERROR | rs); 659 } 660 661 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 662 return (NXGE_OK); 663 } 664 665 nxge_status_t 666 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 667 { 668 npi_handle_t handle; 669 nxge_status_t status = NXGE_OK; 670 671 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 672 "<== nxge_init_rxdma_channel_rcrflush")); 673 674 handle = NXGE_DEV_NPI_HANDLE(nxgep); 675 npi_rxdma_rdc_rcr_flush(handle, channel); 676 677 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 678 "<== nxge_init_rxdma_channel_rcrflsh")); 679 return (status); 680 681 } 682 683 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 684 685 #define TO_LEFT -1 686 #define TO_RIGHT 1 687 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 688 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 689 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 690 #define NO_HINT 0xffffffff 691 692 /*ARGSUSED*/ 693 nxge_status_t 694 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 695 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 696 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 697 { 698 int bufsize; 699 uint64_t pktbuf_pp; 700 uint64_t dvma_addr; 701 rxring_info_t *ring_info; 702 int base_side, end_side; 703 int r_index, l_index, anchor_index; 704 int found, search_done; 705 uint32_t offset, chunk_size, block_size, page_size_mask; 706 uint32_t chunk_index, block_index, total_index; 707 int max_iterations, iteration; 708 rxbuf_index_info_t *bufinfo; 709 710 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 711 712 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 713 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 714 pkt_buf_addr_pp, 715 pktbufsz_type)); 716 #if defined(__i386) 717 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 718 #else 719 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 720 #endif 721 722 switch (pktbufsz_type) { 723 case 0: 724 bufsize = rbr_p->pkt_buf_size0; 725 break; 726 case 1: 727 bufsize = rbr_p->pkt_buf_size1; 728 break; 729 case 2: 730 bufsize = rbr_p->pkt_buf_size2; 731 break; 732 case RCR_SINGLE_BLOCK: 733 bufsize = 0; 734 anchor_index = 0; 735 break; 736 default: 737 return (NXGE_ERROR); 738 } 739 740 if (rbr_p->num_blocks == 1) { 741 anchor_index = 0; 742 ring_info = rbr_p->ring_info; 743 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 744 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 745 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 746 "buf_pp $%p btype %d anchor_index %d " 747 "bufinfo $%p", 748 pkt_buf_addr_pp, 749 pktbufsz_type, 750 anchor_index, 751 bufinfo)); 752 753 goto found_index; 754 } 755 756 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 757 "==> nxge_rxbuf_pp_to_vp: " 758 "buf_pp $%p btype %d anchor_index %d", 759 pkt_buf_addr_pp, 760 pktbufsz_type, 761 anchor_index)); 762 763 ring_info = rbr_p->ring_info; 764 found = B_FALSE; 765 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 766 iteration = 0; 767 max_iterations = ring_info->max_iterations; 768 /* 769 * First check if this block has been seen 770 * recently. This is indicated by a hint which 771 * is initialized when the first buffer of the block 772 * is seen. The hint is reset when the last buffer of 773 * the block has been processed. 774 * As three block sizes are supported, three hints 775 * are kept. The idea behind the hints is that once 776 * the hardware uses a block for a buffer of that 777 * size, it will use it exclusively for that size 778 * and will use it until it is exhausted. It is assumed 779 * that there would a single block being used for the same 780 * buffer sizes at any given time. 781 */ 782 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 783 anchor_index = ring_info->hint[pktbufsz_type]; 784 dvma_addr = bufinfo[anchor_index].dvma_addr; 785 chunk_size = bufinfo[anchor_index].buf_size; 786 if ((pktbuf_pp >= dvma_addr) && 787 (pktbuf_pp < (dvma_addr + chunk_size))) { 788 found = B_TRUE; 789 /* 790 * check if this is the last buffer in the block 791 * If so, then reset the hint for the size; 792 */ 793 794 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 795 ring_info->hint[pktbufsz_type] = NO_HINT; 796 } 797 } 798 799 if (found == B_FALSE) { 800 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 801 "==> nxge_rxbuf_pp_to_vp: (!found)" 802 "buf_pp $%p btype %d anchor_index %d", 803 pkt_buf_addr_pp, 804 pktbufsz_type, 805 anchor_index)); 806 807 /* 808 * This is the first buffer of the block of this 809 * size. Need to search the whole information 810 * array. 811 * the search algorithm uses a binary tree search 812 * algorithm. It assumes that the information is 813 * already sorted with increasing order 814 * info[0] < info[1] < info[2] .... < info[n-1] 815 * where n is the size of the information array 816 */ 817 r_index = rbr_p->num_blocks - 1; 818 l_index = 0; 819 search_done = B_FALSE; 820 anchor_index = MID_INDEX(r_index, l_index); 821 while (search_done == B_FALSE) { 822 if ((r_index == l_index) || 823 (iteration >= max_iterations)) 824 search_done = B_TRUE; 825 end_side = TO_RIGHT; /* to the right */ 826 base_side = TO_LEFT; /* to the left */ 827 /* read the DVMA address information and sort it */ 828 dvma_addr = bufinfo[anchor_index].dvma_addr; 829 chunk_size = bufinfo[anchor_index].buf_size; 830 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 831 "==> nxge_rxbuf_pp_to_vp: (searching)" 832 "buf_pp $%p btype %d " 833 "anchor_index %d chunk_size %d dvmaaddr $%p", 834 pkt_buf_addr_pp, 835 pktbufsz_type, 836 anchor_index, 837 chunk_size, 838 dvma_addr)); 839 840 if (pktbuf_pp >= dvma_addr) 841 base_side = TO_RIGHT; /* to the right */ 842 if (pktbuf_pp < (dvma_addr + chunk_size)) 843 end_side = TO_LEFT; /* to the left */ 844 845 switch (base_side + end_side) { 846 case IN_MIDDLE: 847 /* found */ 848 found = B_TRUE; 849 search_done = B_TRUE; 850 if ((pktbuf_pp + bufsize) < 851 (dvma_addr + chunk_size)) 852 ring_info->hint[pktbufsz_type] = 853 bufinfo[anchor_index].buf_index; 854 break; 855 case BOTH_RIGHT: 856 /* not found: go to the right */ 857 l_index = anchor_index + 1; 858 anchor_index = MID_INDEX(r_index, l_index); 859 break; 860 861 case BOTH_LEFT: 862 /* not found: go to the left */ 863 r_index = anchor_index - 1; 864 anchor_index = MID_INDEX(r_index, l_index); 865 break; 866 default: /* should not come here */ 867 return (NXGE_ERROR); 868 } 869 iteration++; 870 } 871 872 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 873 "==> nxge_rxbuf_pp_to_vp: (search done)" 874 "buf_pp $%p btype %d anchor_index %d", 875 pkt_buf_addr_pp, 876 pktbufsz_type, 877 anchor_index)); 878 } 879 880 if (found == B_FALSE) { 881 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 882 "==> nxge_rxbuf_pp_to_vp: (search failed)" 883 "buf_pp $%p btype %d anchor_index %d", 884 pkt_buf_addr_pp, 885 pktbufsz_type, 886 anchor_index)); 887 return (NXGE_ERROR); 888 } 889 890 found_index: 891 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 892 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 893 "buf_pp $%p btype %d bufsize %d anchor_index %d", 894 pkt_buf_addr_pp, 895 pktbufsz_type, 896 bufsize, 897 anchor_index)); 898 899 /* index of the first block in this chunk */ 900 chunk_index = bufinfo[anchor_index].start_index; 901 dvma_addr = bufinfo[anchor_index].dvma_addr; 902 page_size_mask = ring_info->block_size_mask; 903 904 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 905 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 906 "buf_pp $%p btype %d bufsize %d " 907 "anchor_index %d chunk_index %d dvma $%p", 908 pkt_buf_addr_pp, 909 pktbufsz_type, 910 bufsize, 911 anchor_index, 912 chunk_index, 913 dvma_addr)); 914 915 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 916 block_size = rbr_p->block_size; /* System block(page) size */ 917 918 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 919 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 920 "buf_pp $%p btype %d bufsize %d " 921 "anchor_index %d chunk_index %d dvma $%p " 922 "offset %d block_size %d", 923 pkt_buf_addr_pp, 924 pktbufsz_type, 925 bufsize, 926 anchor_index, 927 chunk_index, 928 dvma_addr, 929 offset, 930 block_size)); 931 932 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 933 934 block_index = (offset / block_size); /* index within chunk */ 935 total_index = chunk_index + block_index; 936 937 938 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 939 "==> nxge_rxbuf_pp_to_vp: " 940 "total_index %d dvma_addr $%p " 941 "offset %d block_size %d " 942 "block_index %d ", 943 total_index, dvma_addr, 944 offset, block_size, 945 block_index)); 946 #if defined(__i386) 947 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 948 (uint32_t)offset); 949 #else 950 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 951 (uint64_t)offset); 952 #endif 953 954 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 955 "==> nxge_rxbuf_pp_to_vp: " 956 "total_index %d dvma_addr $%p " 957 "offset %d block_size %d " 958 "block_index %d " 959 "*pkt_buf_addr_p $%p", 960 total_index, dvma_addr, 961 offset, block_size, 962 block_index, 963 *pkt_buf_addr_p)); 964 965 966 *msg_index = total_index; 967 *bufoffset = (offset & page_size_mask); 968 969 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 970 "==> nxge_rxbuf_pp_to_vp: get msg index: " 971 "msg_index %d bufoffset_index %d", 972 *msg_index, 973 *bufoffset)); 974 975 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 976 977 return (NXGE_OK); 978 } 979 980 /* 981 * used by quick sort (qsort) function 982 * to perform comparison 983 */ 984 static int 985 nxge_sort_compare(const void *p1, const void *p2) 986 { 987 988 rxbuf_index_info_t *a, *b; 989 990 a = (rxbuf_index_info_t *)p1; 991 b = (rxbuf_index_info_t *)p2; 992 993 if (a->dvma_addr > b->dvma_addr) 994 return (1); 995 if (a->dvma_addr < b->dvma_addr) 996 return (-1); 997 return (0); 998 } 999 1000 1001 1002 /* 1003 * grabbed this sort implementation from common/syscall/avl.c 1004 * 1005 */ 1006 /* 1007 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 1008 * v = Ptr to array/vector of objs 1009 * n = # objs in the array 1010 * s = size of each obj (must be multiples of a word size) 1011 * f = ptr to function to compare two objs 1012 * returns (-1 = less than, 0 = equal, 1 = greater than 1013 */ 1014 void 1015 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1016 { 1017 int g, i, j, ii; 1018 unsigned int *p1, *p2; 1019 unsigned int tmp; 1020 1021 /* No work to do */ 1022 if (v == NULL || n <= 1) 1023 return; 1024 /* Sanity check on arguments */ 1025 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1026 ASSERT(s > 0); 1027 1028 for (g = n / 2; g > 0; g /= 2) { 1029 for (i = g; i < n; i++) { 1030 for (j = i - g; j >= 0 && 1031 (*f)(v + j * s, v + (j + g) * s) == 1; 1032 j -= g) { 1033 p1 = (unsigned *)(v + j * s); 1034 p2 = (unsigned *)(v + (j + g) * s); 1035 for (ii = 0; ii < s / 4; ii++) { 1036 tmp = *p1; 1037 *p1++ = *p2; 1038 *p2++ = tmp; 1039 } 1040 } 1041 } 1042 } 1043 } 1044 1045 /* 1046 * Initialize data structures required for rxdma 1047 * buffer dvma->vmem address lookup 1048 */ 1049 /*ARGSUSED*/ 1050 static nxge_status_t 1051 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1052 { 1053 1054 int index; 1055 rxring_info_t *ring_info; 1056 int max_iteration = 0, max_index = 0; 1057 1058 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1059 1060 ring_info = rbrp->ring_info; 1061 ring_info->hint[0] = NO_HINT; 1062 ring_info->hint[1] = NO_HINT; 1063 ring_info->hint[2] = NO_HINT; 1064 max_index = rbrp->num_blocks; 1065 1066 /* read the DVMA address information and sort it */ 1067 /* do init of the information array */ 1068 1069 1070 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1071 " nxge_rxbuf_index_info_init Sort ptrs")); 1072 1073 /* sort the array */ 1074 nxge_ksort((void *)ring_info->buffer, max_index, 1075 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1076 1077 1078 1079 for (index = 0; index < max_index; index++) { 1080 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1081 " nxge_rxbuf_index_info_init: sorted chunk %d " 1082 " ioaddr $%p kaddr $%p size %x", 1083 index, ring_info->buffer[index].dvma_addr, 1084 ring_info->buffer[index].kaddr, 1085 ring_info->buffer[index].buf_size)); 1086 } 1087 1088 max_iteration = 0; 1089 while (max_index >= (1ULL << max_iteration)) 1090 max_iteration++; 1091 ring_info->max_iterations = max_iteration + 1; 1092 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1093 " nxge_rxbuf_index_info_init Find max iter %d", 1094 ring_info->max_iterations)); 1095 1096 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1097 return (NXGE_OK); 1098 } 1099 1100 /* ARGSUSED */ 1101 void 1102 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1103 { 1104 #ifdef NXGE_DEBUG 1105 1106 uint32_t bptr; 1107 uint64_t pp; 1108 1109 bptr = entry_p->bits.hdw.pkt_buf_addr; 1110 1111 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1112 "\trcr entry $%p " 1113 "\trcr entry 0x%0llx " 1114 "\trcr entry 0x%08x " 1115 "\trcr entry 0x%08x " 1116 "\tvalue 0x%0llx\n" 1117 "\tmulti = %d\n" 1118 "\tpkt_type = 0x%x\n" 1119 "\tzero_copy = %d\n" 1120 "\tnoport = %d\n" 1121 "\tpromis = %d\n" 1122 "\terror = 0x%04x\n" 1123 "\tdcf_err = 0x%01x\n" 1124 "\tl2_len = %d\n" 1125 "\tpktbufsize = %d\n" 1126 "\tpkt_buf_addr = $%p\n" 1127 "\tpkt_buf_addr (<< 6) = $%p\n", 1128 entry_p, 1129 *(int64_t *)entry_p, 1130 *(int32_t *)entry_p, 1131 *(int32_t *)((char *)entry_p + 32), 1132 entry_p->value, 1133 entry_p->bits.hdw.multi, 1134 entry_p->bits.hdw.pkt_type, 1135 entry_p->bits.hdw.zero_copy, 1136 entry_p->bits.hdw.noport, 1137 entry_p->bits.hdw.promis, 1138 entry_p->bits.hdw.error, 1139 entry_p->bits.hdw.dcf_err, 1140 entry_p->bits.hdw.l2_len, 1141 entry_p->bits.hdw.pktbufsz, 1142 bptr, 1143 entry_p->bits.ldw.pkt_buf_addr)); 1144 1145 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1146 RCR_PKT_BUF_ADDR_SHIFT; 1147 1148 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1149 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1150 #endif 1151 } 1152 1153 void 1154 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1155 { 1156 npi_handle_t handle; 1157 rbr_stat_t rbr_stat; 1158 addr44_t hd_addr; 1159 addr44_t tail_addr; 1160 uint16_t qlen; 1161 1162 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1163 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1164 1165 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1166 1167 /* RBR head */ 1168 hd_addr.addr = 0; 1169 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1170 #if defined(__i386) 1171 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1172 (void *)(uint32_t)hd_addr.addr); 1173 #else 1174 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1175 (void *)hd_addr.addr); 1176 #endif 1177 1178 /* RBR stats */ 1179 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1180 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1181 1182 /* RCR tail */ 1183 tail_addr.addr = 0; 1184 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1185 #if defined(__i386) 1186 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1187 (void *)(uint32_t)tail_addr.addr); 1188 #else 1189 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1190 (void *)tail_addr.addr); 1191 #endif 1192 1193 /* RCR qlen */ 1194 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1195 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1196 1197 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1198 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1199 } 1200 1201 nxge_status_t 1202 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1203 { 1204 nxge_grp_set_t *set = &nxgep->rx_set; 1205 nxge_status_t status; 1206 npi_status_t rs; 1207 int rdc; 1208 1209 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1210 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1211 1212 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1213 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1214 "<== nxge_rxdma_mode: not initialized")); 1215 return (NXGE_ERROR); 1216 } 1217 1218 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1219 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1220 "<== nxge_tx_port_fatal_err_recover: " 1221 "NULL ring pointer(s)")); 1222 return (NXGE_ERROR); 1223 } 1224 1225 if (set->owned.map == 0) { 1226 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1227 "nxge_rxdma_regs_dump_channels: no channels")); 1228 return (NULL); 1229 } 1230 1231 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1232 if ((1 << rdc) & set->owned.map) { 1233 rx_rbr_ring_t *ring = 1234 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1235 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1236 if (ring) { 1237 if (enable) { 1238 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1239 "==> nxge_rxdma_hw_mode: " 1240 "channel %d (enable)", rdc)); 1241 rs = npi_rxdma_cfg_rdc_enable 1242 (handle, rdc); 1243 } else { 1244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1245 "==> nxge_rxdma_hw_mode: " 1246 "channel %d disable)", rdc)); 1247 rs = npi_rxdma_cfg_rdc_disable 1248 (handle, rdc); 1249 } 1250 } 1251 } 1252 } 1253 1254 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1255 1256 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1257 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1258 1259 return (status); 1260 } 1261 1262 void 1263 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1264 { 1265 npi_handle_t handle; 1266 1267 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1268 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1269 1270 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1271 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1272 1273 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1274 } 1275 1276 void 1277 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1278 { 1279 npi_handle_t handle; 1280 1281 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1282 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1283 1284 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1285 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1286 1287 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1288 } 1289 1290 void 1291 nxge_hw_start_rx(p_nxge_t nxgep) 1292 { 1293 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1294 1295 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1296 (void) nxge_rx_mac_enable(nxgep); 1297 1298 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1299 } 1300 1301 /*ARGSUSED*/ 1302 void 1303 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1304 { 1305 nxge_grp_set_t *set = &nxgep->rx_set; 1306 int rdc; 1307 1308 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1309 1310 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1311 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1312 "<== nxge_tx_port_fatal_err_recover: " 1313 "NULL ring pointer(s)")); 1314 return; 1315 } 1316 1317 if (set->owned.map == 0) { 1318 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1319 "nxge_rxdma_regs_dump_channels: no channels")); 1320 return; 1321 } 1322 1323 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1324 if ((1 << rdc) & set->owned.map) { 1325 rx_rbr_ring_t *ring = 1326 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1327 if (ring) { 1328 nxge_rxdma_hw_stop(nxgep, rdc); 1329 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1330 "==> nxge_fixup_rxdma_rings: " 1331 "channel %d ring $%px", 1332 rdc, ring)); 1333 (void) nxge_rxdma_fixup_channel 1334 (nxgep, rdc, rdc); 1335 } 1336 } 1337 } 1338 1339 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1340 } 1341 1342 void 1343 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1344 { 1345 int i; 1346 1347 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1348 i = nxge_rxdma_get_ring_index(nxgep, channel); 1349 if (i < 0) { 1350 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1351 "<== nxge_rxdma_fix_channel: no entry found")); 1352 return; 1353 } 1354 1355 nxge_rxdma_fixup_channel(nxgep, channel, i); 1356 1357 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1358 } 1359 1360 void 1361 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1362 { 1363 int ndmas; 1364 p_rx_rbr_rings_t rx_rbr_rings; 1365 p_rx_rbr_ring_t *rbr_rings; 1366 p_rx_rcr_rings_t rx_rcr_rings; 1367 p_rx_rcr_ring_t *rcr_rings; 1368 p_rx_mbox_areas_t rx_mbox_areas_p; 1369 p_rx_mbox_t *rx_mbox_p; 1370 p_nxge_dma_pool_t dma_buf_poolp; 1371 p_nxge_dma_pool_t dma_cntl_poolp; 1372 p_rx_rbr_ring_t rbrp; 1373 p_rx_rcr_ring_t rcrp; 1374 p_rx_mbox_t mboxp; 1375 p_nxge_dma_common_t dmap; 1376 nxge_status_t status = NXGE_OK; 1377 1378 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1379 1380 (void) nxge_rxdma_stop_channel(nxgep, channel); 1381 1382 dma_buf_poolp = nxgep->rx_buf_pool_p; 1383 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1384 1385 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1386 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1387 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1388 return; 1389 } 1390 1391 ndmas = dma_buf_poolp->ndmas; 1392 if (!ndmas) { 1393 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1394 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1395 return; 1396 } 1397 1398 rx_rbr_rings = nxgep->rx_rbr_rings; 1399 rx_rcr_rings = nxgep->rx_rcr_rings; 1400 rbr_rings = rx_rbr_rings->rbr_rings; 1401 rcr_rings = rx_rcr_rings->rcr_rings; 1402 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1403 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1404 1405 /* Reinitialize the receive block and completion rings */ 1406 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1407 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1408 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1409 1410 1411 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1412 rbrp->rbr_rd_index = 0; 1413 rcrp->comp_rd_index = 0; 1414 rcrp->comp_wt_index = 0; 1415 1416 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1417 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1418 1419 status = nxge_rxdma_start_channel(nxgep, channel, 1420 rbrp, rcrp, mboxp); 1421 if (status != NXGE_OK) { 1422 goto nxge_rxdma_fixup_channel_fail; 1423 } 1424 if (status != NXGE_OK) { 1425 goto nxge_rxdma_fixup_channel_fail; 1426 } 1427 1428 nxge_rxdma_fixup_channel_fail: 1429 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1430 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1431 1432 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1433 } 1434 1435 /* 1436 * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 1437 * map <channel> to an index into nxgep->rx_rbr_rings. 1438 * (device ring index -> port ring index) 1439 */ 1440 int 1441 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1442 { 1443 int i, ndmas; 1444 uint16_t rdc; 1445 p_rx_rbr_rings_t rx_rbr_rings; 1446 p_rx_rbr_ring_t *rbr_rings; 1447 1448 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1449 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1450 1451 rx_rbr_rings = nxgep->rx_rbr_rings; 1452 if (rx_rbr_rings == NULL) { 1453 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1454 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1455 return (-1); 1456 } 1457 ndmas = rx_rbr_rings->ndmas; 1458 if (!ndmas) { 1459 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1460 "<== nxge_rxdma_get_ring_index: no channel")); 1461 return (-1); 1462 } 1463 1464 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1465 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1466 1467 rbr_rings = rx_rbr_rings->rbr_rings; 1468 for (i = 0; i < ndmas; i++) { 1469 rdc = rbr_rings[i]->rdc; 1470 if (channel == rdc) { 1471 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1472 "==> nxge_rxdma_get_rbr_ring: channel %d " 1473 "(index %d) ring %d", channel, i, rbr_rings[i])); 1474 return (i); 1475 } 1476 } 1477 1478 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1479 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1480 1481 return (-1); 1482 } 1483 1484 p_rx_rbr_ring_t 1485 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1486 { 1487 nxge_grp_set_t *set = &nxgep->rx_set; 1488 nxge_channel_t rdc; 1489 1490 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1491 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1492 1493 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1494 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1495 "<== nxge_rxdma_get_rbr_ring: " 1496 "NULL ring pointer(s)")); 1497 return (NULL); 1498 } 1499 1500 if (set->owned.map == 0) { 1501 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1502 "<== nxge_rxdma_get_rbr_ring: no channels")); 1503 return (NULL); 1504 } 1505 1506 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1507 if ((1 << rdc) & set->owned.map) { 1508 rx_rbr_ring_t *ring = 1509 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1510 if (ring) { 1511 if (channel == ring->rdc) { 1512 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1513 "==> nxge_rxdma_get_rbr_ring: " 1514 "channel %d ring $%p", rdc, ring)); 1515 return (ring); 1516 } 1517 } 1518 } 1519 } 1520 1521 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1522 "<== nxge_rxdma_get_rbr_ring: not found")); 1523 1524 return (NULL); 1525 } 1526 1527 p_rx_rcr_ring_t 1528 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1529 { 1530 nxge_grp_set_t *set = &nxgep->rx_set; 1531 nxge_channel_t rdc; 1532 1533 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1534 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1535 1536 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1537 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1538 "<== nxge_rxdma_get_rcr_ring: " 1539 "NULL ring pointer(s)")); 1540 return (NULL); 1541 } 1542 1543 if (set->owned.map == 0) { 1544 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1545 "<== nxge_rxdma_get_rbr_ring: no channels")); 1546 return (NULL); 1547 } 1548 1549 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1550 if ((1 << rdc) & set->owned.map) { 1551 rx_rcr_ring_t *ring = 1552 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1553 if (ring) { 1554 if (channel == ring->rdc) { 1555 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1556 "==> nxge_rxdma_get_rcr_ring: " 1557 "channel %d ring $%p", rdc, ring)); 1558 return (ring); 1559 } 1560 } 1561 } 1562 } 1563 1564 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1565 "<== nxge_rxdma_get_rcr_ring: not found")); 1566 1567 return (NULL); 1568 } 1569 1570 /* 1571 * Static functions start here. 1572 */ 1573 static p_rx_msg_t 1574 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1575 { 1576 p_rx_msg_t nxge_mp = NULL; 1577 p_nxge_dma_common_t dmamsg_p; 1578 uchar_t *buffer; 1579 1580 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1581 if (nxge_mp == NULL) { 1582 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1583 "Allocation of a rx msg failed.")); 1584 goto nxge_allocb_exit; 1585 } 1586 1587 nxge_mp->use_buf_pool = B_FALSE; 1588 if (dmabuf_p) { 1589 nxge_mp->use_buf_pool = B_TRUE; 1590 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1591 *dmamsg_p = *dmabuf_p; 1592 dmamsg_p->nblocks = 1; 1593 dmamsg_p->block_size = size; 1594 dmamsg_p->alength = size; 1595 buffer = (uchar_t *)dmabuf_p->kaddrp; 1596 1597 dmabuf_p->kaddrp = (void *) 1598 ((char *)dmabuf_p->kaddrp + size); 1599 dmabuf_p->ioaddr_pp = (void *) 1600 ((char *)dmabuf_p->ioaddr_pp + size); 1601 dmabuf_p->alength -= size; 1602 dmabuf_p->offset += size; 1603 dmabuf_p->dma_cookie.dmac_laddress += size; 1604 dmabuf_p->dma_cookie.dmac_size -= size; 1605 1606 } else { 1607 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1608 if (buffer == NULL) { 1609 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1610 "Allocation of a receive page failed.")); 1611 goto nxge_allocb_fail1; 1612 } 1613 } 1614 1615 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1616 if (nxge_mp->rx_mblk_p == NULL) { 1617 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1618 goto nxge_allocb_fail2; 1619 } 1620 1621 nxge_mp->buffer = buffer; 1622 nxge_mp->block_size = size; 1623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1624 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1625 nxge_mp->ref_cnt = 1; 1626 nxge_mp->free = B_TRUE; 1627 nxge_mp->rx_use_bcopy = B_FALSE; 1628 1629 atomic_inc_32(&nxge_mblks_pending); 1630 1631 goto nxge_allocb_exit; 1632 1633 nxge_allocb_fail2: 1634 if (!nxge_mp->use_buf_pool) { 1635 KMEM_FREE(buffer, size); 1636 } 1637 1638 nxge_allocb_fail1: 1639 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1640 nxge_mp = NULL; 1641 1642 nxge_allocb_exit: 1643 return (nxge_mp); 1644 } 1645 1646 p_mblk_t 1647 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1648 { 1649 p_mblk_t mp; 1650 1651 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1652 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1653 "offset = 0x%08X " 1654 "size = 0x%08X", 1655 nxge_mp, offset, size)); 1656 1657 mp = desballoc(&nxge_mp->buffer[offset], size, 1658 0, &nxge_mp->freeb); 1659 if (mp == NULL) { 1660 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1661 goto nxge_dupb_exit; 1662 } 1663 atomic_inc_32(&nxge_mp->ref_cnt); 1664 1665 1666 nxge_dupb_exit: 1667 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1668 nxge_mp)); 1669 return (mp); 1670 } 1671 1672 p_mblk_t 1673 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1674 { 1675 p_mblk_t mp; 1676 uchar_t *dp; 1677 1678 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1679 if (mp == NULL) { 1680 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1681 goto nxge_dupb_bcopy_exit; 1682 } 1683 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1684 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1685 mp->b_wptr = dp + size; 1686 1687 nxge_dupb_bcopy_exit: 1688 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1689 nxge_mp)); 1690 return (mp); 1691 } 1692 1693 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1694 p_rx_msg_t rx_msg_p); 1695 1696 void 1697 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1698 { 1699 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1700 1701 /* Reuse this buffer */ 1702 rx_msg_p->free = B_FALSE; 1703 rx_msg_p->cur_usage_cnt = 0; 1704 rx_msg_p->max_usage_cnt = 0; 1705 rx_msg_p->pkt_buf_size = 0; 1706 1707 if (rx_rbr_p->rbr_use_bcopy) { 1708 rx_msg_p->rx_use_bcopy = B_FALSE; 1709 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1710 } 1711 1712 /* 1713 * Get the rbr header pointer and its offset index. 1714 */ 1715 MUTEX_ENTER(&rx_rbr_p->post_lock); 1716 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1717 rx_rbr_p->rbr_wrap_mask); 1718 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1719 MUTEX_EXIT(&rx_rbr_p->post_lock); 1720 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1721 rx_rbr_p->rdc, 1); 1722 1723 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1724 "<== nxge_post_page (channel %d post_next_index %d)", 1725 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1726 1727 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1728 } 1729 1730 void 1731 nxge_freeb(p_rx_msg_t rx_msg_p) 1732 { 1733 size_t size; 1734 uchar_t *buffer = NULL; 1735 int ref_cnt; 1736 boolean_t free_state = B_FALSE; 1737 1738 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1739 1740 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1741 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1742 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1743 rx_msg_p, nxge_mblks_pending)); 1744 1745 /* 1746 * First we need to get the free state, then 1747 * atomic decrement the reference count to prevent 1748 * the race condition with the interrupt thread that 1749 * is processing a loaned up buffer block. 1750 */ 1751 free_state = rx_msg_p->free; 1752 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1753 if (!ref_cnt) { 1754 atomic_dec_32(&nxge_mblks_pending); 1755 buffer = rx_msg_p->buffer; 1756 size = rx_msg_p->block_size; 1757 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1758 "will free: rx_msg_p = $%p (block pending %d)", 1759 rx_msg_p, nxge_mblks_pending)); 1760 1761 if (!rx_msg_p->use_buf_pool) { 1762 KMEM_FREE(buffer, size); 1763 } 1764 1765 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1766 1767 if (ring) { 1768 /* 1769 * Decrement the receive buffer ring's reference 1770 * count, too. 1771 */ 1772 atomic_dec_32(&ring->rbr_ref_cnt); 1773 1774 /* 1775 * Free the receive buffer ring, if 1776 * 1. all the receive buffers have been freed 1777 * 2. and we are in the proper state (that is, 1778 * we are not UNMAPPING). 1779 */ 1780 if (ring->rbr_ref_cnt == 0 && 1781 ring->rbr_state == RBR_UNMAPPED) { 1782 /* 1783 * Free receive data buffers, 1784 * buffer index information 1785 * (rxring_info) and 1786 * the message block ring. 1787 */ 1788 NXGE_DEBUG_MSG((NULL, RX_CTL, 1789 "nxge_freeb:rx_msg_p = $%p " 1790 "(block pending %d) free buffers", 1791 rx_msg_p, nxge_mblks_pending)); 1792 nxge_rxdma_databuf_free(ring); 1793 if (ring->ring_info) { 1794 KMEM_FREE(ring->ring_info, 1795 sizeof (rxring_info_t)); 1796 } 1797 1798 if (ring->rx_msg_ring) { 1799 KMEM_FREE(ring->rx_msg_ring, 1800 ring->tnblocks * 1801 sizeof (p_rx_msg_t)); 1802 } 1803 KMEM_FREE(ring, sizeof (*ring)); 1804 } 1805 } 1806 return; 1807 } 1808 1809 /* 1810 * Repost buffer. 1811 */ 1812 if (free_state && (ref_cnt == 1) && ring) { 1813 NXGE_DEBUG_MSG((NULL, RX_CTL, 1814 "nxge_freeb: post page $%p:", rx_msg_p)); 1815 if (ring->rbr_state == RBR_POSTING) 1816 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1817 } 1818 1819 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1820 } 1821 1822 uint_t 1823 nxge_rx_intr(void *arg1, void *arg2) 1824 { 1825 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1826 p_nxge_t nxgep = (p_nxge_t)arg2; 1827 p_nxge_ldg_t ldgp; 1828 uint8_t channel; 1829 npi_handle_t handle; 1830 rx_dma_ctl_stat_t cs; 1831 p_rx_rcr_ring_t rcr_ring; 1832 mblk_t *mp; 1833 1834 if (ldvp == NULL) { 1835 NXGE_DEBUG_MSG((NULL, INT_CTL, 1836 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1837 nxgep, ldvp)); 1838 return (DDI_INTR_CLAIMED); 1839 } 1840 1841 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1842 nxgep = ldvp->nxgep; 1843 } 1844 1845 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1846 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1847 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1848 "<== nxge_rx_intr: interface not started or intialized")); 1849 return (DDI_INTR_CLAIMED); 1850 } 1851 1852 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1853 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1854 nxgep, ldvp)); 1855 1856 /* 1857 * Get the PIO handle. 1858 */ 1859 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1860 1861 /* 1862 * Get the ring to enable us to process packets. 1863 */ 1864 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1865 1866 /* 1867 * The RCR ring lock must be held when packets 1868 * are being processed and the hardware registers are 1869 * being read or written to prevent race condition 1870 * among the interrupt thread, the polling thread 1871 * (will cause fatal errors such as rcrincon bit set) 1872 * and the setting of the poll_flag. 1873 */ 1874 MUTEX_ENTER(&rcr_ring->lock); 1875 1876 /* 1877 * Get the control and status for this channel. 1878 */ 1879 channel = ldvp->channel; 1880 ldgp = ldvp->ldgp; 1881 1882 if (!isLDOMguest(nxgep) && (!nxgep->rx_channel_started[channel])) { 1883 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1884 "<== nxge_rx_intr: channel is not started")); 1885 1886 /* 1887 * We received an interrupt before the ring is started. 1888 */ 1889 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1890 &cs.value); 1891 cs.value &= RX_DMA_CTL_STAT_WR1C; 1892 cs.bits.hdw.mex = 1; 1893 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1894 cs.value); 1895 1896 /* 1897 * Rearm this logical group if this is a single device 1898 * group. 1899 */ 1900 if (ldgp->nldvs == 1) { 1901 if (isLDOMguest(nxgep)) { 1902 nxge_hio_ldgimgn(nxgep, ldgp); 1903 } else { 1904 ldgimgm_t mgm; 1905 1906 mgm.value = 0; 1907 mgm.bits.ldw.arm = 1; 1908 mgm.bits.ldw.timer = ldgp->ldg_timer; 1909 1910 NXGE_REG_WR64(handle, 1911 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1912 mgm.value); 1913 } 1914 } 1915 MUTEX_EXIT(&rcr_ring->lock); 1916 return (DDI_INTR_CLAIMED); 1917 } 1918 1919 ASSERT(rcr_ring->ldgp == ldgp); 1920 ASSERT(rcr_ring->ldvp == ldvp); 1921 1922 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1923 1924 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1925 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1926 channel, 1927 cs.value, 1928 cs.bits.hdw.rcrto, 1929 cs.bits.hdw.rcrthres)); 1930 1931 mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1932 1933 /* error events. */ 1934 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1935 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1936 } 1937 1938 /* 1939 * Enable the mailbox update interrupt if we want 1940 * to use mailbox. We probably don't need to use 1941 * mailbox as it only saves us one pio read. 1942 * Also write 1 to rcrthres and rcrto to clear 1943 * these two edge triggered bits. 1944 */ 1945 cs.value &= RX_DMA_CTL_STAT_WR1C; 1946 cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 1947 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1948 cs.value); 1949 1950 /* 1951 * If the polling mode is enabled, disable the interrupt. 1952 */ 1953 if (rcr_ring->poll_flag) { 1954 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1955 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1956 "(disabling interrupts)", channel, ldgp, ldvp)); 1957 /* 1958 * Disarm this logical group if this is a single device 1959 * group. 1960 */ 1961 if (ldgp->nldvs == 1) { 1962 ldgimgm_t mgm; 1963 mgm.value = 0; 1964 mgm.bits.ldw.arm = 0; 1965 NXGE_REG_WR64(handle, 1966 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1967 } 1968 } else { 1969 /* 1970 * Rearm this logical group if this is a single device 1971 * group. 1972 */ 1973 if (ldgp->nldvs == 1) { 1974 if (isLDOMguest(nxgep)) { 1975 nxge_hio_ldgimgn(nxgep, ldgp); 1976 } else { 1977 ldgimgm_t mgm; 1978 1979 mgm.value = 0; 1980 mgm.bits.ldw.arm = 1; 1981 mgm.bits.ldw.timer = ldgp->ldg_timer; 1982 1983 NXGE_REG_WR64(handle, 1984 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1985 mgm.value); 1986 } 1987 } 1988 1989 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1990 "==> nxge_rx_intr: rdc %d ldgp $%p " 1991 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1992 } 1993 MUTEX_EXIT(&rcr_ring->lock); 1994 1995 if (mp) { 1996 if (!isLDOMguest(nxgep)) 1997 mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1998 rcr_ring->rcr_gen_num); 1999 #if defined(sun4v) 2000 else { /* isLDOMguest(nxgep) */ 2001 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 2002 nxgep->nxge_hw_p->hio; 2003 nx_vio_fp_t *vio = &nhd->hio.vio; 2004 2005 if (vio->cb.vio_net_rx_cb) { 2006 (*vio->cb.vio_net_rx_cb) 2007 (nxgep->hio_vr->vhp, mp); 2008 } 2009 } 2010 #endif 2011 } 2012 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 2013 return (DDI_INTR_CLAIMED); 2014 } 2015 2016 /* 2017 * Process the packets received in the specified logical device 2018 * and pass up a chain of message blocks to the upper layer. 2019 * The RCR ring lock must be held before calling this function. 2020 */ 2021 static mblk_t * 2022 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 2023 { 2024 p_mblk_t mp; 2025 p_rx_rcr_ring_t rcrp; 2026 2027 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 2028 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 2029 2030 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2031 "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 2032 "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 2033 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 2034 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2035 "<== nxge_rx_pkts_vring: no mp")); 2036 return (NULL); 2037 } 2038 2039 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 2040 mp)); 2041 2042 #ifdef NXGE_DEBUG 2043 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2044 "==> nxge_rx_pkts_vring:calling mac_rx " 2045 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 2046 "mac_handle $%p", 2047 mp->b_wptr - mp->b_rptr, 2048 mp, mp->b_cont, mp->b_next, 2049 rcrp, rcrp->rcr_mac_handle)); 2050 2051 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2052 "==> nxge_rx_pkts_vring: dump packets " 2053 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 2054 mp, 2055 mp->b_rptr, 2056 mp->b_wptr, 2057 nxge_dump_packet((char *)mp->b_rptr, 2058 mp->b_wptr - mp->b_rptr))); 2059 if (mp->b_cont) { 2060 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2061 "==> nxge_rx_pkts_vring: dump b_cont packets " 2062 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 2063 mp->b_cont, 2064 mp->b_cont->b_rptr, 2065 mp->b_cont->b_wptr, 2066 nxge_dump_packet((char *)mp->b_cont->b_rptr, 2067 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 2068 } 2069 if (mp->b_next) { 2070 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2071 "==> nxge_rx_pkts_vring: dump next packets " 2072 "(b_rptr $%p): %s", 2073 mp->b_next->b_rptr, 2074 nxge_dump_packet((char *)mp->b_next->b_rptr, 2075 mp->b_next->b_wptr - mp->b_next->b_rptr))); 2076 } 2077 #endif 2078 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2079 "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 2080 rcrp->rdc, rcrp->rcr_mac_handle)); 2081 2082 return (mp); 2083 } 2084 2085 2086 /* 2087 * This routine is the main packet receive processing function. 2088 * It gets the packet type, error code, and buffer related 2089 * information from the receive completion entry. 2090 * How many completion entries to process is based on the number of packets 2091 * queued by the hardware, a hardware maintained tail pointer 2092 * and a configurable receive packet count. 2093 * 2094 * A chain of message blocks will be created as result of processing 2095 * the completion entries. This chain of message blocks will be returned and 2096 * a hardware control status register will be updated with the number of 2097 * packets were removed from the hardware queue. 2098 * 2099 * The RCR ring lock is held when entering this function. 2100 */ 2101 static mblk_t * 2102 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 2103 int bytes_to_pickup) 2104 { 2105 npi_handle_t handle; 2106 uint8_t channel; 2107 uint32_t comp_rd_index; 2108 p_rcr_entry_t rcr_desc_rd_head_p; 2109 p_rcr_entry_t rcr_desc_rd_head_pp; 2110 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 2111 uint16_t qlen, nrcr_read, npkt_read; 2112 uint32_t qlen_hw; 2113 boolean_t multi; 2114 rcrcfig_b_t rcr_cfg_b; 2115 int totallen = 0; 2116 #if defined(_BIG_ENDIAN) 2117 npi_status_t rs = NPI_SUCCESS; 2118 #endif 2119 2120 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 2121 "channel %d", rcr_p->rdc)); 2122 2123 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 2124 return (NULL); 2125 } 2126 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2127 channel = rcr_p->rdc; 2128 2129 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2130 "==> nxge_rx_pkts: START: rcr channel %d " 2131 "head_p $%p head_pp $%p index %d ", 2132 channel, rcr_p->rcr_desc_rd_head_p, 2133 rcr_p->rcr_desc_rd_head_pp, 2134 rcr_p->comp_rd_index)); 2135 2136 2137 #if !defined(_BIG_ENDIAN) 2138 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2139 #else 2140 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2141 if (rs != NPI_SUCCESS) { 2142 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2143 "channel %d, get qlen failed 0x%08x", 2144 channel, rs)); 2145 return (NULL); 2146 } 2147 #endif 2148 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2149 "qlen %d", channel, qlen)); 2150 2151 2152 2153 if (!qlen) { 2154 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2155 "==> nxge_rx_pkts:rcr channel %d " 2156 "qlen %d (no pkts)", channel, qlen)); 2157 2158 return (NULL); 2159 } 2160 2161 comp_rd_index = rcr_p->comp_rd_index; 2162 2163 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2164 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2165 nrcr_read = npkt_read = 0; 2166 2167 /* 2168 * Number of packets queued 2169 * (The jumbo or multi packet will be counted as only one 2170 * packets and it may take up more than one completion entry). 2171 */ 2172 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2173 qlen : nxge_max_rx_pkts; 2174 head_mp = NULL; 2175 tail_mp = &head_mp; 2176 nmp = mp_cont = NULL; 2177 multi = B_FALSE; 2178 2179 while (qlen_hw) { 2180 2181 #ifdef NXGE_DEBUG 2182 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2183 #endif 2184 /* 2185 * Process one completion ring entry. 2186 */ 2187 nxge_receive_packet(nxgep, 2188 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2189 2190 /* 2191 * message chaining modes 2192 */ 2193 if (nmp) { 2194 nmp->b_next = NULL; 2195 if (!multi && !mp_cont) { /* frame fits a partition */ 2196 *tail_mp = nmp; 2197 tail_mp = &nmp->b_next; 2198 totallen += MBLKL(nmp); 2199 nmp = NULL; 2200 } else if (multi && !mp_cont) { /* first segment */ 2201 *tail_mp = nmp; 2202 tail_mp = &nmp->b_cont; 2203 totallen += MBLKL(nmp); 2204 } else if (multi && mp_cont) { /* mid of multi segs */ 2205 *tail_mp = mp_cont; 2206 tail_mp = &mp_cont->b_cont; 2207 totallen += MBLKL(mp_cont); 2208 } else if (!multi && mp_cont) { /* last segment */ 2209 *tail_mp = mp_cont; 2210 tail_mp = &nmp->b_next; 2211 totallen += MBLKL(mp_cont); 2212 nmp = NULL; 2213 } 2214 } 2215 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2216 "==> nxge_rx_pkts: loop: rcr channel %d " 2217 "before updating: multi %d " 2218 "nrcr_read %d " 2219 "npk read %d " 2220 "head_pp $%p index %d ", 2221 channel, 2222 multi, 2223 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2224 comp_rd_index)); 2225 2226 if (!multi) { 2227 qlen_hw--; 2228 npkt_read++; 2229 } 2230 2231 /* 2232 * Update the next read entry. 2233 */ 2234 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2235 rcr_p->comp_wrap_mask); 2236 2237 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2238 rcr_p->rcr_desc_first_p, 2239 rcr_p->rcr_desc_last_p); 2240 2241 nrcr_read++; 2242 2243 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2244 "<== nxge_rx_pkts: (SAM, process one packet) " 2245 "nrcr_read %d", 2246 nrcr_read)); 2247 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2248 "==> nxge_rx_pkts: loop: rcr channel %d " 2249 "multi %d " 2250 "nrcr_read %d " 2251 "npk read %d " 2252 "head_pp $%p index %d ", 2253 channel, 2254 multi, 2255 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2256 comp_rd_index)); 2257 2258 if ((bytes_to_pickup != -1) && 2259 (totallen >= bytes_to_pickup)) { 2260 break; 2261 } 2262 } 2263 2264 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2265 rcr_p->comp_rd_index = comp_rd_index; 2266 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2267 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2268 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2269 2270 rcr_p->intr_timeout = (nxgep->intr_timeout < 2271 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2272 nxgep->intr_timeout; 2273 2274 rcr_p->intr_threshold = (nxgep->intr_threshold < 2275 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2276 nxgep->intr_threshold; 2277 2278 rcr_cfg_b.value = 0x0ULL; 2279 rcr_cfg_b.bits.ldw.entout = 1; 2280 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2281 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2282 2283 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2284 channel, rcr_cfg_b.value); 2285 } 2286 2287 cs.bits.ldw.pktread = npkt_read; 2288 cs.bits.ldw.ptrread = nrcr_read; 2289 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2290 channel, cs.value); 2291 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2292 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2293 "head_pp $%p index %016llx ", 2294 channel, 2295 rcr_p->rcr_desc_rd_head_pp, 2296 rcr_p->comp_rd_index)); 2297 /* 2298 * Update RCR buffer pointer read and number of packets 2299 * read. 2300 */ 2301 2302 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2303 "channel %d", rcr_p->rdc)); 2304 2305 return (head_mp); 2306 } 2307 2308 void 2309 nxge_receive_packet(p_nxge_t nxgep, 2310 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2311 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2312 { 2313 p_mblk_t nmp = NULL; 2314 uint64_t multi; 2315 uint64_t dcf_err; 2316 uint8_t channel; 2317 2318 boolean_t first_entry = B_TRUE; 2319 boolean_t is_tcp_udp = B_FALSE; 2320 boolean_t buffer_free = B_FALSE; 2321 boolean_t error_send_up = B_FALSE; 2322 uint8_t error_type; 2323 uint16_t l2_len; 2324 uint16_t skip_len; 2325 uint8_t pktbufsz_type; 2326 uint64_t rcr_entry; 2327 uint64_t *pkt_buf_addr_pp; 2328 uint64_t *pkt_buf_addr_p; 2329 uint32_t buf_offset; 2330 uint32_t bsize; 2331 uint32_t error_disp_cnt; 2332 uint32_t msg_index; 2333 p_rx_rbr_ring_t rx_rbr_p; 2334 p_rx_msg_t *rx_msg_ring_p; 2335 p_rx_msg_t rx_msg_p; 2336 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2337 nxge_status_t status = NXGE_OK; 2338 boolean_t is_valid = B_FALSE; 2339 p_nxge_rx_ring_stats_t rdc_stats; 2340 uint32_t bytes_read; 2341 uint64_t pkt_type; 2342 uint64_t frag; 2343 boolean_t pkt_too_long_err = B_FALSE; 2344 #ifdef NXGE_DEBUG 2345 int dump_len; 2346 #endif 2347 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2348 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2349 2350 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2351 2352 multi = (rcr_entry & RCR_MULTI_MASK); 2353 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2354 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2355 2356 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2357 frag = (rcr_entry & RCR_FRAG_MASK); 2358 2359 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2360 2361 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2362 RCR_PKTBUFSZ_SHIFT); 2363 #if defined(__i386) 2364 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2365 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2366 #else 2367 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2368 RCR_PKT_BUF_ADDR_SHIFT); 2369 #endif 2370 2371 channel = rcr_p->rdc; 2372 2373 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2374 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2375 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2376 "error_type 0x%x pkt_type 0x%x " 2377 "pktbufsz_type %d ", 2378 rcr_desc_rd_head_p, 2379 rcr_entry, pkt_buf_addr_pp, l2_len, 2380 multi, 2381 error_type, 2382 pkt_type, 2383 pktbufsz_type)); 2384 2385 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2386 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2387 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2388 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2389 rcr_entry, pkt_buf_addr_pp, l2_len, 2390 multi, 2391 error_type, 2392 pkt_type)); 2393 2394 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2395 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2396 "full pkt_buf_addr_pp $%p l2_len %d", 2397 rcr_entry, pkt_buf_addr_pp, l2_len)); 2398 2399 /* get the stats ptr */ 2400 rdc_stats = rcr_p->rdc_stats; 2401 2402 if (!l2_len) { 2403 2404 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2405 "<== nxge_receive_packet: failed: l2 length is 0.")); 2406 return; 2407 } 2408 2409 /* 2410 * Software workaround for BMAC hardware limitation that allows 2411 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2412 * instead of 0x2400 for jumbo. 2413 */ 2414 if (l2_len > nxgep->mac.maxframesize) { 2415 pkt_too_long_err = B_TRUE; 2416 } 2417 2418 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2419 l2_len -= ETHERFCSL; 2420 2421 /* shift 6 bits to get the full io address */ 2422 #if defined(__i386) 2423 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2424 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2425 #else 2426 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2427 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2428 #endif 2429 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2430 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2431 "full pkt_buf_addr_pp $%p l2_len %d", 2432 rcr_entry, pkt_buf_addr_pp, l2_len)); 2433 2434 rx_rbr_p = rcr_p->rx_rbr_p; 2435 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2436 2437 if (first_entry) { 2438 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2439 RXDMA_HDR_SIZE_DEFAULT); 2440 2441 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2442 "==> nxge_receive_packet: first entry 0x%016llx " 2443 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2444 rcr_entry, pkt_buf_addr_pp, l2_len, 2445 hdr_size)); 2446 } 2447 2448 MUTEX_ENTER(&rx_rbr_p->lock); 2449 2450 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2451 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2452 "full pkt_buf_addr_pp $%p l2_len %d", 2453 rcr_entry, pkt_buf_addr_pp, l2_len)); 2454 2455 /* 2456 * Packet buffer address in the completion entry points 2457 * to the starting buffer address (offset 0). 2458 * Use the starting buffer address to locate the corresponding 2459 * kernel address. 2460 */ 2461 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2462 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2463 &buf_offset, 2464 &msg_index); 2465 2466 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2467 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2468 "full pkt_buf_addr_pp $%p l2_len %d", 2469 rcr_entry, pkt_buf_addr_pp, l2_len)); 2470 2471 if (status != NXGE_OK) { 2472 MUTEX_EXIT(&rx_rbr_p->lock); 2473 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2474 "<== nxge_receive_packet: found vaddr failed %d", 2475 status)); 2476 return; 2477 } 2478 2479 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2480 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2481 "full pkt_buf_addr_pp $%p l2_len %d", 2482 rcr_entry, pkt_buf_addr_pp, l2_len)); 2483 2484 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2485 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2486 "full pkt_buf_addr_pp $%p l2_len %d", 2487 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2488 2489 rx_msg_p = rx_msg_ring_p[msg_index]; 2490 2491 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2492 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2493 "full pkt_buf_addr_pp $%p l2_len %d", 2494 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2495 2496 switch (pktbufsz_type) { 2497 case RCR_PKTBUFSZ_0: 2498 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2499 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2500 "==> nxge_receive_packet: 0 buf %d", bsize)); 2501 break; 2502 case RCR_PKTBUFSZ_1: 2503 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2504 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2505 "==> nxge_receive_packet: 1 buf %d", bsize)); 2506 break; 2507 case RCR_PKTBUFSZ_2: 2508 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2509 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2510 "==> nxge_receive_packet: 2 buf %d", bsize)); 2511 break; 2512 case RCR_SINGLE_BLOCK: 2513 bsize = rx_msg_p->block_size; 2514 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2515 "==> nxge_receive_packet: single %d", bsize)); 2516 2517 break; 2518 default: 2519 MUTEX_EXIT(&rx_rbr_p->lock); 2520 return; 2521 } 2522 2523 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2524 (buf_offset + sw_offset_bytes), 2525 (hdr_size + l2_len), 2526 DDI_DMA_SYNC_FORCPU); 2527 2528 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2529 "==> nxge_receive_packet: after first dump:usage count")); 2530 2531 if (rx_msg_p->cur_usage_cnt == 0) { 2532 if (rx_rbr_p->rbr_use_bcopy) { 2533 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2534 if (rx_rbr_p->rbr_consumed < 2535 rx_rbr_p->rbr_threshold_hi) { 2536 if (rx_rbr_p->rbr_threshold_lo == 0 || 2537 ((rx_rbr_p->rbr_consumed >= 2538 rx_rbr_p->rbr_threshold_lo) && 2539 (rx_rbr_p->rbr_bufsize_type >= 2540 pktbufsz_type))) { 2541 rx_msg_p->rx_use_bcopy = B_TRUE; 2542 } 2543 } else { 2544 rx_msg_p->rx_use_bcopy = B_TRUE; 2545 } 2546 } 2547 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2548 "==> nxge_receive_packet: buf %d (new block) ", 2549 bsize)); 2550 2551 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2552 rx_msg_p->pkt_buf_size = bsize; 2553 rx_msg_p->cur_usage_cnt = 1; 2554 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2555 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2556 "==> nxge_receive_packet: buf %d " 2557 "(single block) ", 2558 bsize)); 2559 /* 2560 * Buffer can be reused once the free function 2561 * is called. 2562 */ 2563 rx_msg_p->max_usage_cnt = 1; 2564 buffer_free = B_TRUE; 2565 } else { 2566 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2567 if (rx_msg_p->max_usage_cnt == 1) { 2568 buffer_free = B_TRUE; 2569 } 2570 } 2571 } else { 2572 rx_msg_p->cur_usage_cnt++; 2573 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2574 buffer_free = B_TRUE; 2575 } 2576 } 2577 2578 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2579 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2580 msg_index, l2_len, 2581 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2582 2583 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2584 rdc_stats->ierrors++; 2585 if (dcf_err) { 2586 rdc_stats->dcf_err++; 2587 #ifdef NXGE_DEBUG 2588 if (!rdc_stats->dcf_err) { 2589 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2590 "nxge_receive_packet: channel %d dcf_err rcr" 2591 " 0x%llx", channel, rcr_entry)); 2592 } 2593 #endif 2594 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2595 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2596 } else if (pkt_too_long_err) { 2597 rdc_stats->pkt_too_long_err++; 2598 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2599 " channel %d packet length [%d] > " 2600 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2601 nxgep->mac.maxframesize)); 2602 } else { 2603 /* Update error stats */ 2604 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2605 rdc_stats->errlog.compl_err_type = error_type; 2606 2607 switch (error_type) { 2608 /* 2609 * Do not send FMA ereport for RCR_L2_ERROR and 2610 * RCR_L4_CSUM_ERROR because most likely they indicate 2611 * back pressure rather than HW failures. 2612 */ 2613 case RCR_L2_ERROR: 2614 rdc_stats->l2_err++; 2615 if (rdc_stats->l2_err < 2616 error_disp_cnt) { 2617 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2618 " nxge_receive_packet:" 2619 " channel %d RCR L2_ERROR", 2620 channel)); 2621 } 2622 break; 2623 case RCR_L4_CSUM_ERROR: 2624 error_send_up = B_TRUE; 2625 rdc_stats->l4_cksum_err++; 2626 if (rdc_stats->l4_cksum_err < 2627 error_disp_cnt) { 2628 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2629 " nxge_receive_packet:" 2630 " channel %d" 2631 " RCR L4_CSUM_ERROR", channel)); 2632 } 2633 break; 2634 /* 2635 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2636 * RCR_ZCP_SOFT_ERROR because they reflect the same 2637 * FFLP and ZCP errors that have been reported by 2638 * nxge_fflp.c and nxge_zcp.c. 2639 */ 2640 case RCR_FFLP_SOFT_ERROR: 2641 error_send_up = B_TRUE; 2642 rdc_stats->fflp_soft_err++; 2643 if (rdc_stats->fflp_soft_err < 2644 error_disp_cnt) { 2645 NXGE_ERROR_MSG((nxgep, 2646 NXGE_ERR_CTL, 2647 " nxge_receive_packet:" 2648 " channel %d" 2649 " RCR FFLP_SOFT_ERROR", channel)); 2650 } 2651 break; 2652 case RCR_ZCP_SOFT_ERROR: 2653 error_send_up = B_TRUE; 2654 rdc_stats->fflp_soft_err++; 2655 if (rdc_stats->zcp_soft_err < 2656 error_disp_cnt) 2657 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2658 " nxge_receive_packet: Channel %d" 2659 " RCR ZCP_SOFT_ERROR", channel)); 2660 break; 2661 default: 2662 rdc_stats->rcr_unknown_err++; 2663 if (rdc_stats->rcr_unknown_err 2664 < error_disp_cnt) { 2665 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2666 " nxge_receive_packet: Channel %d" 2667 " RCR entry 0x%llx error 0x%x", 2668 rcr_entry, channel, error_type)); 2669 } 2670 break; 2671 } 2672 } 2673 2674 /* 2675 * Update and repost buffer block if max usage 2676 * count is reached. 2677 */ 2678 if (error_send_up == B_FALSE) { 2679 atomic_inc_32(&rx_msg_p->ref_cnt); 2680 if (buffer_free == B_TRUE) { 2681 rx_msg_p->free = B_TRUE; 2682 } 2683 2684 MUTEX_EXIT(&rx_rbr_p->lock); 2685 nxge_freeb(rx_msg_p); 2686 return; 2687 } 2688 } 2689 2690 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2691 "==> nxge_receive_packet: DMA sync second ")); 2692 2693 bytes_read = rcr_p->rcvd_pkt_bytes; 2694 skip_len = sw_offset_bytes + hdr_size; 2695 if (!rx_msg_p->rx_use_bcopy) { 2696 /* 2697 * For loaned up buffers, the driver reference count 2698 * will be incremented first and then the free state. 2699 */ 2700 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2701 if (first_entry) { 2702 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2703 if (l2_len < bsize - skip_len) { 2704 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2705 } else { 2706 nmp->b_wptr = &nmp->b_rptr[bsize 2707 - skip_len]; 2708 } 2709 } else { 2710 if (l2_len - bytes_read < bsize) { 2711 nmp->b_wptr = 2712 &nmp->b_rptr[l2_len - bytes_read]; 2713 } else { 2714 nmp->b_wptr = &nmp->b_rptr[bsize]; 2715 } 2716 } 2717 } 2718 } else { 2719 if (first_entry) { 2720 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2721 l2_len < bsize - skip_len ? 2722 l2_len : bsize - skip_len); 2723 } else { 2724 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2725 l2_len - bytes_read < bsize ? 2726 l2_len - bytes_read : bsize); 2727 } 2728 } 2729 if (nmp != NULL) { 2730 if (first_entry) { 2731 /* 2732 * Jumbo packets may be received with more than one 2733 * buffer, increment ipackets for the first entry only. 2734 */ 2735 rdc_stats->ipackets++; 2736 2737 /* Update ibytes for kstat. */ 2738 rdc_stats->ibytes += skip_len 2739 + l2_len < bsize ? l2_len : bsize; 2740 /* 2741 * Update the number of bytes read so far for the 2742 * current frame. 2743 */ 2744 bytes_read = nmp->b_wptr - nmp->b_rptr; 2745 } else { 2746 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2747 l2_len - bytes_read : bsize; 2748 bytes_read += nmp->b_wptr - nmp->b_rptr; 2749 } 2750 2751 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2752 "==> nxge_receive_packet after dupb: " 2753 "rbr consumed %d " 2754 "pktbufsz_type %d " 2755 "nmp $%p rptr $%p wptr $%p " 2756 "buf_offset %d bzise %d l2_len %d skip_len %d", 2757 rx_rbr_p->rbr_consumed, 2758 pktbufsz_type, 2759 nmp, nmp->b_rptr, nmp->b_wptr, 2760 buf_offset, bsize, l2_len, skip_len)); 2761 } else { 2762 cmn_err(CE_WARN, "!nxge_receive_packet: " 2763 "update stats (error)"); 2764 atomic_inc_32(&rx_msg_p->ref_cnt); 2765 if (buffer_free == B_TRUE) { 2766 rx_msg_p->free = B_TRUE; 2767 } 2768 MUTEX_EXIT(&rx_rbr_p->lock); 2769 nxge_freeb(rx_msg_p); 2770 return; 2771 } 2772 2773 if (buffer_free == B_TRUE) { 2774 rx_msg_p->free = B_TRUE; 2775 } 2776 2777 is_valid = (nmp != NULL); 2778 2779 rcr_p->rcvd_pkt_bytes = bytes_read; 2780 2781 MUTEX_EXIT(&rx_rbr_p->lock); 2782 2783 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2784 atomic_inc_32(&rx_msg_p->ref_cnt); 2785 nxge_freeb(rx_msg_p); 2786 } 2787 2788 if (is_valid) { 2789 nmp->b_cont = NULL; 2790 if (first_entry) { 2791 *mp = nmp; 2792 *mp_cont = NULL; 2793 } else { 2794 *mp_cont = nmp; 2795 } 2796 } 2797 2798 /* 2799 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2800 * If a packet is not fragmented and no error bit is set, then 2801 * L4 checksum is OK. 2802 */ 2803 2804 if (is_valid && !multi) { 2805 /* 2806 * If the checksum flag nxge_chksum_offload 2807 * is 1, TCP and UDP packets can be sent 2808 * up with good checksum. If the checksum flag 2809 * is set to 0, checksum reporting will apply to 2810 * TCP packets only (workaround for a hardware bug). 2811 * If the checksum flag nxge_cksum_offload is 2812 * greater than 1, both TCP and UDP packets 2813 * will not be reported its hardware checksum results. 2814 */ 2815 if (nxge_cksum_offload == 1) { 2816 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2817 pkt_type == RCR_PKT_IS_UDP) ? 2818 B_TRUE: B_FALSE); 2819 } else if (!nxge_cksum_offload) { 2820 /* TCP checksum only. */ 2821 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2822 B_TRUE: B_FALSE); 2823 } 2824 2825 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2826 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2827 is_valid, multi, is_tcp_udp, frag, error_type)); 2828 2829 if (is_tcp_udp && !frag && !error_type) { 2830 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2831 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2832 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2833 "==> nxge_receive_packet: Full tcp/udp cksum " 2834 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2835 "error %d", 2836 is_valid, multi, is_tcp_udp, frag, error_type)); 2837 } 2838 } 2839 2840 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2841 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2842 2843 *multi_p = (multi == RCR_MULTI_MASK); 2844 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2845 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2846 *multi_p, nmp, *mp, *mp_cont)); 2847 } 2848 2849 /* 2850 * Enable polling for a ring. Interrupt for the ring is disabled when 2851 * the nxge interrupt comes (see nxge_rx_intr). 2852 */ 2853 int 2854 nxge_enable_poll(void *arg) 2855 { 2856 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2857 p_rx_rcr_ring_t ringp; 2858 p_nxge_t nxgep; 2859 p_nxge_ldg_t ldgp; 2860 uint32_t channel; 2861 2862 if (ring_handle == NULL) { 2863 return (0); 2864 } 2865 2866 nxgep = ring_handle->nxgep; 2867 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2868 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2869 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2870 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2871 ldgp = ringp->ldgp; 2872 if (ldgp == NULL) { 2873 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2874 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2875 ringp->rdc)); 2876 return (0); 2877 } 2878 2879 MUTEX_ENTER(&ringp->lock); 2880 /* enable polling */ 2881 if (ringp->poll_flag == 0) { 2882 ringp->poll_flag = 1; 2883 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2884 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2885 ringp->rdc)); 2886 } 2887 2888 MUTEX_EXIT(&ringp->lock); 2889 return (0); 2890 } 2891 /* 2892 * Disable polling for a ring and enable its interrupt. 2893 */ 2894 int 2895 nxge_disable_poll(void *arg) 2896 { 2897 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2898 p_rx_rcr_ring_t ringp; 2899 p_nxge_t nxgep; 2900 uint32_t channel; 2901 2902 if (ring_handle == NULL) { 2903 return (0); 2904 } 2905 2906 nxgep = ring_handle->nxgep; 2907 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2908 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2909 2910 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2911 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2912 2913 MUTEX_ENTER(&ringp->lock); 2914 2915 /* disable polling: enable interrupt */ 2916 if (ringp->poll_flag) { 2917 npi_handle_t handle; 2918 rx_dma_ctl_stat_t cs; 2919 uint8_t channel; 2920 p_nxge_ldg_t ldgp; 2921 2922 /* 2923 * Get the control and status for this channel. 2924 */ 2925 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2926 channel = ringp->rdc; 2927 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2928 channel, &cs.value); 2929 2930 /* 2931 * Enable mailbox update 2932 * Since packets were not read and the hardware uses 2933 * bits pktread and ptrread to update the queue 2934 * length, we need to set both bits to 0. 2935 */ 2936 cs.bits.ldw.pktread = 0; 2937 cs.bits.ldw.ptrread = 0; 2938 cs.bits.hdw.mex = 1; 2939 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2940 cs.value); 2941 2942 /* 2943 * Rearm this logical group if this is a single device 2944 * group. 2945 */ 2946 ldgp = ringp->ldgp; 2947 if (ldgp == NULL) { 2948 ringp->poll_flag = 0; 2949 MUTEX_EXIT(&ringp->lock); 2950 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2951 "==> nxge_disable_poll: no ldgp rdc %d " 2952 "(still set poll to 0", ringp->rdc)); 2953 return (0); 2954 } 2955 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2956 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2957 ringp->rdc, ldgp)); 2958 if (ldgp->nldvs == 1) { 2959 ldgimgm_t mgm; 2960 mgm.value = 0; 2961 mgm.bits.ldw.arm = 1; 2962 mgm.bits.ldw.timer = ldgp->ldg_timer; 2963 NXGE_REG_WR64(handle, 2964 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2965 } 2966 ringp->poll_flag = 0; 2967 } 2968 2969 MUTEX_EXIT(&ringp->lock); 2970 return (0); 2971 } 2972 2973 /* 2974 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2975 */ 2976 mblk_t * 2977 nxge_rx_poll(void *arg, int bytes_to_pickup) 2978 { 2979 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2980 p_rx_rcr_ring_t rcr_p; 2981 p_nxge_t nxgep; 2982 npi_handle_t handle; 2983 rx_dma_ctl_stat_t cs; 2984 mblk_t *mblk; 2985 p_nxge_ldv_t ldvp; 2986 uint32_t channel; 2987 2988 nxgep = ring_handle->nxgep; 2989 2990 /* 2991 * Get the control and status for this channel. 2992 */ 2993 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2994 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2995 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2996 MUTEX_ENTER(&rcr_p->lock); 2997 ASSERT(rcr_p->poll_flag == 1); 2998 2999 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 3000 3001 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3002 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 3003 rcr_p->rdc, rcr_p->poll_flag)); 3004 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 3005 3006 ldvp = rcr_p->ldvp; 3007 /* error events. */ 3008 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 3009 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 3010 } 3011 3012 MUTEX_EXIT(&rcr_p->lock); 3013 3014 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3015 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 3016 return (mblk); 3017 } 3018 3019 3020 /*ARGSUSED*/ 3021 static nxge_status_t 3022 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 3023 { 3024 p_nxge_rx_ring_stats_t rdc_stats; 3025 npi_handle_t handle; 3026 npi_status_t rs; 3027 boolean_t rxchan_fatal = B_FALSE; 3028 boolean_t rxport_fatal = B_FALSE; 3029 uint8_t portn; 3030 nxge_status_t status = NXGE_OK; 3031 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 3032 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 3033 3034 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3035 portn = nxgep->mac.portnum; 3036 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 3037 3038 if (cs.bits.hdw.rbr_tmout) { 3039 rdc_stats->rx_rbr_tmout++; 3040 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3041 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 3042 rxchan_fatal = B_TRUE; 3043 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3044 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 3045 } 3046 if (cs.bits.hdw.rsp_cnt_err) { 3047 rdc_stats->rsp_cnt_err++; 3048 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3049 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 3050 rxchan_fatal = B_TRUE; 3051 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3052 "==> nxge_rx_err_evnts(channel %d): " 3053 "rsp_cnt_err", channel)); 3054 } 3055 if (cs.bits.hdw.byte_en_bus) { 3056 rdc_stats->byte_en_bus++; 3057 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3058 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 3059 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3060 "==> nxge_rx_err_evnts(channel %d): " 3061 "fatal error: byte_en_bus", channel)); 3062 rxchan_fatal = B_TRUE; 3063 } 3064 if (cs.bits.hdw.rsp_dat_err) { 3065 rdc_stats->rsp_dat_err++; 3066 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3067 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 3068 rxchan_fatal = B_TRUE; 3069 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3070 "==> nxge_rx_err_evnts(channel %d): " 3071 "fatal error: rsp_dat_err", channel)); 3072 } 3073 if (cs.bits.hdw.rcr_ack_err) { 3074 rdc_stats->rcr_ack_err++; 3075 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3076 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 3077 rxchan_fatal = B_TRUE; 3078 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3079 "==> nxge_rx_err_evnts(channel %d): " 3080 "fatal error: rcr_ack_err", channel)); 3081 } 3082 if (cs.bits.hdw.dc_fifo_err) { 3083 rdc_stats->dc_fifo_err++; 3084 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3085 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 3086 /* This is not a fatal error! */ 3087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3088 "==> nxge_rx_err_evnts(channel %d): " 3089 "dc_fifo_err", channel)); 3090 rxport_fatal = B_TRUE; 3091 } 3092 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 3093 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 3094 &rdc_stats->errlog.pre_par, 3095 &rdc_stats->errlog.sha_par)) 3096 != NPI_SUCCESS) { 3097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3098 "==> nxge_rx_err_evnts(channel %d): " 3099 "rcr_sha_par: get perr", channel)); 3100 return (NXGE_ERROR | rs); 3101 } 3102 if (cs.bits.hdw.rcr_sha_par) { 3103 rdc_stats->rcr_sha_par++; 3104 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3105 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 3106 rxchan_fatal = B_TRUE; 3107 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3108 "==> nxge_rx_err_evnts(channel %d): " 3109 "fatal error: rcr_sha_par", channel)); 3110 } 3111 if (cs.bits.hdw.rbr_pre_par) { 3112 rdc_stats->rbr_pre_par++; 3113 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3114 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 3115 rxchan_fatal = B_TRUE; 3116 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3117 "==> nxge_rx_err_evnts(channel %d): " 3118 "fatal error: rbr_pre_par", channel)); 3119 } 3120 } 3121 /* 3122 * The Following 4 status bits are for information, the system 3123 * is running fine. There is no need to send FMA ereports or 3124 * log messages. 3125 */ 3126 if (cs.bits.hdw.port_drop_pkt) { 3127 rdc_stats->port_drop_pkt++; 3128 } 3129 if (cs.bits.hdw.wred_drop) { 3130 rdc_stats->wred_drop++; 3131 } 3132 if (cs.bits.hdw.rbr_pre_empty) { 3133 rdc_stats->rbr_pre_empty++; 3134 } 3135 if (cs.bits.hdw.rcr_shadow_full) { 3136 rdc_stats->rcr_shadow_full++; 3137 } 3138 if (cs.bits.hdw.config_err) { 3139 rdc_stats->config_err++; 3140 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3141 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3142 rxchan_fatal = B_TRUE; 3143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3144 "==> nxge_rx_err_evnts(channel %d): " 3145 "config error", channel)); 3146 } 3147 if (cs.bits.hdw.rcrincon) { 3148 rdc_stats->rcrincon++; 3149 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3150 NXGE_FM_EREPORT_RDMC_RCRINCON); 3151 rxchan_fatal = B_TRUE; 3152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3153 "==> nxge_rx_err_evnts(channel %d): " 3154 "fatal error: rcrincon error", channel)); 3155 } 3156 if (cs.bits.hdw.rcrfull) { 3157 rdc_stats->rcrfull++; 3158 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3159 NXGE_FM_EREPORT_RDMC_RCRFULL); 3160 rxchan_fatal = B_TRUE; 3161 if (rdc_stats->rcrfull < error_disp_cnt) 3162 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3163 "==> nxge_rx_err_evnts(channel %d): " 3164 "fatal error: rcrfull error", channel)); 3165 } 3166 if (cs.bits.hdw.rbr_empty) { 3167 /* 3168 * This bit is for information, there is no need 3169 * send FMA ereport or log a message. 3170 */ 3171 rdc_stats->rbr_empty++; 3172 } 3173 if (cs.bits.hdw.rbrfull) { 3174 rdc_stats->rbrfull++; 3175 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3176 NXGE_FM_EREPORT_RDMC_RBRFULL); 3177 rxchan_fatal = B_TRUE; 3178 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3179 "==> nxge_rx_err_evnts(channel %d): " 3180 "fatal error: rbr_full error", channel)); 3181 } 3182 if (cs.bits.hdw.rbrlogpage) { 3183 rdc_stats->rbrlogpage++; 3184 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3185 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3186 rxchan_fatal = B_TRUE; 3187 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3188 "==> nxge_rx_err_evnts(channel %d): " 3189 "fatal error: rbr logical page error", channel)); 3190 } 3191 if (cs.bits.hdw.cfiglogpage) { 3192 rdc_stats->cfiglogpage++; 3193 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3194 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3195 rxchan_fatal = B_TRUE; 3196 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3197 "==> nxge_rx_err_evnts(channel %d): " 3198 "fatal error: cfig logical page error", channel)); 3199 } 3200 3201 if (rxport_fatal) { 3202 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3203 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3204 portn)); 3205 if (isLDOMguest(nxgep)) { 3206 status = NXGE_ERROR; 3207 } else { 3208 status = nxge_ipp_fatal_err_recover(nxgep); 3209 if (status == NXGE_OK) { 3210 FM_SERVICE_RESTORED(nxgep); 3211 } 3212 } 3213 } 3214 3215 if (rxchan_fatal) { 3216 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3217 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3218 channel)); 3219 if (isLDOMguest(nxgep)) { 3220 status = NXGE_ERROR; 3221 } else { 3222 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3223 if (status == NXGE_OK) { 3224 FM_SERVICE_RESTORED(nxgep); 3225 } 3226 } 3227 } 3228 3229 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3230 3231 return (status); 3232 } 3233 3234 /* 3235 * nxge_rdc_hvio_setup 3236 * 3237 * This code appears to setup some Hypervisor variables. 3238 * 3239 * Arguments: 3240 * nxgep 3241 * channel 3242 * 3243 * Notes: 3244 * What does NIU_LP_WORKAROUND mean? 3245 * 3246 * NPI/NXGE function calls: 3247 * na 3248 * 3249 * Context: 3250 * Any domain 3251 */ 3252 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3253 static void 3254 nxge_rdc_hvio_setup( 3255 nxge_t *nxgep, int channel) 3256 { 3257 nxge_dma_common_t *dma_common; 3258 nxge_dma_common_t *dma_control; 3259 rx_rbr_ring_t *ring; 3260 3261 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3262 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3263 3264 ring->hv_set = B_FALSE; 3265 3266 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3267 dma_common->orig_ioaddr_pp; 3268 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3269 dma_common->orig_alength; 3270 3271 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3272 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3273 channel, ring->hv_rx_buf_base_ioaddr_pp, 3274 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3275 dma_common->orig_alength, dma_common->orig_alength)); 3276 3277 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3278 3279 ring->hv_rx_cntl_base_ioaddr_pp = 3280 (uint64_t)dma_control->orig_ioaddr_pp; 3281 ring->hv_rx_cntl_ioaddr_size = 3282 (uint64_t)dma_control->orig_alength; 3283 3284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3285 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3286 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3287 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3288 dma_control->orig_alength, dma_control->orig_alength)); 3289 } 3290 #endif 3291 3292 /* 3293 * nxge_map_rxdma 3294 * 3295 * Map an RDC into our kernel space. 3296 * 3297 * Arguments: 3298 * nxgep 3299 * channel The channel to map. 3300 * 3301 * Notes: 3302 * 1. Allocate & initialise a memory pool, if necessary. 3303 * 2. Allocate however many receive buffers are required. 3304 * 3. Setup buffers, descriptors, and mailbox. 3305 * 3306 * NPI/NXGE function calls: 3307 * nxge_alloc_rx_mem_pool() 3308 * nxge_alloc_rbb() 3309 * nxge_map_rxdma_channel() 3310 * 3311 * Registers accessed: 3312 * 3313 * Context: 3314 * Any domain 3315 */ 3316 static nxge_status_t 3317 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3318 { 3319 nxge_dma_common_t **data; 3320 nxge_dma_common_t **control; 3321 rx_rbr_ring_t **rbr_ring; 3322 rx_rcr_ring_t **rcr_ring; 3323 rx_mbox_t **mailbox; 3324 uint32_t chunks; 3325 3326 nxge_status_t status; 3327 3328 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3329 3330 if (!nxgep->rx_buf_pool_p) { 3331 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3333 "<== nxge_map_rxdma: buf not allocated")); 3334 return (NXGE_ERROR); 3335 } 3336 } 3337 3338 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3339 return (NXGE_ERROR); 3340 3341 /* 3342 * Map descriptors from the buffer polls for each dma channel. 3343 */ 3344 3345 /* 3346 * Set up and prepare buffer blocks, descriptors 3347 * and mailbox. 3348 */ 3349 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3350 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3351 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3352 3353 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3354 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3355 3356 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3357 3358 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3359 chunks, control, rcr_ring, mailbox); 3360 if (status != NXGE_OK) { 3361 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3362 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3363 "returned 0x%x", 3364 channel, status)); 3365 return (status); 3366 } 3367 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3368 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3369 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3370 &nxgep->statsp->rdc_stats[channel]; 3371 3372 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3373 if (!isLDOMguest(nxgep)) 3374 nxge_rdc_hvio_setup(nxgep, channel); 3375 #endif 3376 3377 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3378 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3379 3380 return (status); 3381 } 3382 3383 static void 3384 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3385 { 3386 rx_rbr_ring_t *rbr_ring; 3387 rx_rcr_ring_t *rcr_ring; 3388 rx_mbox_t *mailbox; 3389 3390 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3391 3392 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3393 !nxgep->rx_mbox_areas_p) 3394 return; 3395 3396 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3397 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3398 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3399 3400 if (!rbr_ring || !rcr_ring || !mailbox) 3401 return; 3402 3403 (void) nxge_unmap_rxdma_channel( 3404 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3405 3406 nxge_free_rxb(nxgep, channel); 3407 3408 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3409 } 3410 3411 nxge_status_t 3412 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3413 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3414 uint32_t num_chunks, 3415 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3416 p_rx_mbox_t *rx_mbox_p) 3417 { 3418 int status = NXGE_OK; 3419 3420 /* 3421 * Set up and prepare buffer blocks, descriptors 3422 * and mailbox. 3423 */ 3424 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3425 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3426 /* 3427 * Receive buffer blocks 3428 */ 3429 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3430 dma_buf_p, rbr_p, num_chunks); 3431 if (status != NXGE_OK) { 3432 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3433 "==> nxge_map_rxdma_channel (channel %d): " 3434 "map buffer failed 0x%x", channel, status)); 3435 goto nxge_map_rxdma_channel_exit; 3436 } 3437 3438 /* 3439 * Receive block ring, completion ring and mailbox. 3440 */ 3441 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3442 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3443 if (status != NXGE_OK) { 3444 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3445 "==> nxge_map_rxdma_channel (channel %d): " 3446 "map config failed 0x%x", channel, status)); 3447 goto nxge_map_rxdma_channel_fail2; 3448 } 3449 3450 goto nxge_map_rxdma_channel_exit; 3451 3452 nxge_map_rxdma_channel_fail3: 3453 /* Free rbr, rcr */ 3454 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3455 "==> nxge_map_rxdma_channel: free rbr/rcr " 3456 "(status 0x%x channel %d)", 3457 status, channel)); 3458 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3459 *rcr_p, *rx_mbox_p); 3460 3461 nxge_map_rxdma_channel_fail2: 3462 /* Free buffer blocks */ 3463 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3464 "==> nxge_map_rxdma_channel: free rx buffers" 3465 "(nxgep 0x%x status 0x%x channel %d)", 3466 nxgep, status, channel)); 3467 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3468 3469 status = NXGE_ERROR; 3470 3471 nxge_map_rxdma_channel_exit: 3472 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3473 "<== nxge_map_rxdma_channel: " 3474 "(nxgep 0x%x status 0x%x channel %d)", 3475 nxgep, status, channel)); 3476 3477 return (status); 3478 } 3479 3480 /*ARGSUSED*/ 3481 static void 3482 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3483 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3484 { 3485 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3486 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3487 3488 /* 3489 * unmap receive block ring, completion ring and mailbox. 3490 */ 3491 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3492 rcr_p, rx_mbox_p); 3493 3494 /* unmap buffer blocks */ 3495 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3496 3497 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3498 } 3499 3500 /*ARGSUSED*/ 3501 static nxge_status_t 3502 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3503 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3504 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3505 { 3506 p_rx_rbr_ring_t rbrp; 3507 p_rx_rcr_ring_t rcrp; 3508 p_rx_mbox_t mboxp; 3509 p_nxge_dma_common_t cntl_dmap; 3510 p_nxge_dma_common_t dmap; 3511 p_rx_msg_t *rx_msg_ring; 3512 p_rx_msg_t rx_msg_p; 3513 p_rbr_cfig_a_t rcfga_p; 3514 p_rbr_cfig_b_t rcfgb_p; 3515 p_rcrcfig_a_t cfga_p; 3516 p_rcrcfig_b_t cfgb_p; 3517 p_rxdma_cfig1_t cfig1_p; 3518 p_rxdma_cfig2_t cfig2_p; 3519 p_rbr_kick_t kick_p; 3520 uint32_t dmaaddrp; 3521 uint32_t *rbr_vaddrp; 3522 uint32_t bkaddr; 3523 nxge_status_t status = NXGE_OK; 3524 int i; 3525 uint32_t nxge_port_rcr_size; 3526 3527 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3528 "==> nxge_map_rxdma_channel_cfg_ring")); 3529 3530 cntl_dmap = *dma_cntl_p; 3531 3532 /* Map in the receive block ring */ 3533 rbrp = *rbr_p; 3534 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3535 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3536 /* 3537 * Zero out buffer block ring descriptors. 3538 */ 3539 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3540 3541 rcfga_p = &(rbrp->rbr_cfga); 3542 rcfgb_p = &(rbrp->rbr_cfgb); 3543 kick_p = &(rbrp->rbr_kick); 3544 rcfga_p->value = 0; 3545 rcfgb_p->value = 0; 3546 kick_p->value = 0; 3547 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3548 rcfga_p->value = (rbrp->rbr_addr & 3549 (RBR_CFIG_A_STDADDR_MASK | 3550 RBR_CFIG_A_STDADDR_BASE_MASK)); 3551 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3552 3553 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3554 rcfgb_p->bits.ldw.vld0 = 1; 3555 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3556 rcfgb_p->bits.ldw.vld1 = 1; 3557 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3558 rcfgb_p->bits.ldw.vld2 = 1; 3559 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3560 3561 /* 3562 * For each buffer block, enter receive block address to the ring. 3563 */ 3564 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3565 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3566 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3567 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3568 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3569 3570 rx_msg_ring = rbrp->rx_msg_ring; 3571 for (i = 0; i < rbrp->tnblocks; i++) { 3572 rx_msg_p = rx_msg_ring[i]; 3573 rx_msg_p->nxgep = nxgep; 3574 rx_msg_p->rx_rbr_p = rbrp; 3575 bkaddr = (uint32_t) 3576 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3577 >> RBR_BKADDR_SHIFT)); 3578 rx_msg_p->free = B_FALSE; 3579 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3580 3581 *rbr_vaddrp++ = bkaddr; 3582 } 3583 3584 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3585 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3586 3587 rbrp->rbr_rd_index = 0; 3588 3589 rbrp->rbr_consumed = 0; 3590 rbrp->rbr_use_bcopy = B_TRUE; 3591 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3592 /* 3593 * Do bcopy on packets greater than bcopy size once 3594 * the lo threshold is reached. 3595 * This lo threshold should be less than the hi threshold. 3596 * 3597 * Do bcopy on every packet once the hi threshold is reached. 3598 */ 3599 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3600 /* default it to use hi */ 3601 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3602 } 3603 3604 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3605 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3606 } 3607 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3608 3609 switch (nxge_rx_threshold_hi) { 3610 default: 3611 case NXGE_RX_COPY_NONE: 3612 /* Do not do bcopy at all */ 3613 rbrp->rbr_use_bcopy = B_FALSE; 3614 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3615 break; 3616 3617 case NXGE_RX_COPY_1: 3618 case NXGE_RX_COPY_2: 3619 case NXGE_RX_COPY_3: 3620 case NXGE_RX_COPY_4: 3621 case NXGE_RX_COPY_5: 3622 case NXGE_RX_COPY_6: 3623 case NXGE_RX_COPY_7: 3624 rbrp->rbr_threshold_hi = 3625 rbrp->rbb_max * 3626 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3627 break; 3628 3629 case NXGE_RX_COPY_ALL: 3630 rbrp->rbr_threshold_hi = 0; 3631 break; 3632 } 3633 3634 switch (nxge_rx_threshold_lo) { 3635 default: 3636 case NXGE_RX_COPY_NONE: 3637 /* Do not do bcopy at all */ 3638 if (rbrp->rbr_use_bcopy) { 3639 rbrp->rbr_use_bcopy = B_FALSE; 3640 } 3641 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3642 break; 3643 3644 case NXGE_RX_COPY_1: 3645 case NXGE_RX_COPY_2: 3646 case NXGE_RX_COPY_3: 3647 case NXGE_RX_COPY_4: 3648 case NXGE_RX_COPY_5: 3649 case NXGE_RX_COPY_6: 3650 case NXGE_RX_COPY_7: 3651 rbrp->rbr_threshold_lo = 3652 rbrp->rbb_max * 3653 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3654 break; 3655 3656 case NXGE_RX_COPY_ALL: 3657 rbrp->rbr_threshold_lo = 0; 3658 break; 3659 } 3660 3661 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3662 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3663 "rbb_max %d " 3664 "rbrp->rbr_bufsize_type %d " 3665 "rbb_threshold_hi %d " 3666 "rbb_threshold_lo %d", 3667 dma_channel, 3668 rbrp->rbb_max, 3669 rbrp->rbr_bufsize_type, 3670 rbrp->rbr_threshold_hi, 3671 rbrp->rbr_threshold_lo)); 3672 3673 rbrp->page_valid.value = 0; 3674 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3675 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3676 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3677 rbrp->page_hdl.value = 0; 3678 3679 rbrp->page_valid.bits.ldw.page0 = 1; 3680 rbrp->page_valid.bits.ldw.page1 = 1; 3681 3682 /* Map in the receive completion ring */ 3683 rcrp = (p_rx_rcr_ring_t) 3684 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3685 rcrp->rdc = dma_channel; 3686 3687 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3688 rcrp->comp_size = nxge_port_rcr_size; 3689 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3690 3691 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3692 3693 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3694 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3695 sizeof (rcr_entry_t)); 3696 rcrp->comp_rd_index = 0; 3697 rcrp->comp_wt_index = 0; 3698 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3699 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3700 #if defined(__i386) 3701 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3702 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3703 #else 3704 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3705 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3706 #endif 3707 3708 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3709 (nxge_port_rcr_size - 1); 3710 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3711 (nxge_port_rcr_size - 1); 3712 3713 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3714 "==> nxge_map_rxdma_channel_cfg_ring: " 3715 "channel %d " 3716 "rbr_vaddrp $%p " 3717 "rcr_desc_rd_head_p $%p " 3718 "rcr_desc_rd_head_pp $%p " 3719 "rcr_desc_rd_last_p $%p " 3720 "rcr_desc_rd_last_pp $%p ", 3721 dma_channel, 3722 rbr_vaddrp, 3723 rcrp->rcr_desc_rd_head_p, 3724 rcrp->rcr_desc_rd_head_pp, 3725 rcrp->rcr_desc_last_p, 3726 rcrp->rcr_desc_last_pp)); 3727 3728 /* 3729 * Zero out buffer block ring descriptors. 3730 */ 3731 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3732 3733 rcrp->intr_timeout = (nxgep->intr_timeout < 3734 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3735 nxgep->intr_timeout; 3736 3737 rcrp->intr_threshold = (nxgep->intr_threshold < 3738 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3739 nxgep->intr_threshold; 3740 3741 rcrp->full_hdr_flag = B_FALSE; 3742 rcrp->sw_priv_hdr_len = 0; 3743 3744 cfga_p = &(rcrp->rcr_cfga); 3745 cfgb_p = &(rcrp->rcr_cfgb); 3746 cfga_p->value = 0; 3747 cfgb_p->value = 0; 3748 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3749 cfga_p->value = (rcrp->rcr_addr & 3750 (RCRCFIG_A_STADDR_MASK | 3751 RCRCFIG_A_STADDR_BASE_MASK)); 3752 3753 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3754 RCRCFIG_A_LEN_SHIF); 3755 3756 /* 3757 * Timeout should be set based on the system clock divider. 3758 * A timeout value of 1 assumes that the 3759 * granularity (1000) is 3 microseconds running at 300MHz. 3760 */ 3761 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3762 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3763 cfgb_p->bits.ldw.entout = 1; 3764 3765 /* Map in the mailbox */ 3766 mboxp = (p_rx_mbox_t) 3767 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3768 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3769 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3770 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3771 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3772 cfig1_p->value = cfig2_p->value = 0; 3773 3774 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3775 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3776 "==> nxge_map_rxdma_channel_cfg_ring: " 3777 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3778 dma_channel, cfig1_p->value, cfig2_p->value, 3779 mboxp->mbox_addr)); 3780 3781 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3782 & 0xfff); 3783 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3784 3785 3786 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3787 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3788 RXDMA_CFIG2_MBADDR_L_MASK); 3789 3790 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3791 3792 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3793 "==> nxge_map_rxdma_channel_cfg_ring: " 3794 "channel %d damaddrp $%p " 3795 "cfg1 0x%016llx cfig2 0x%016llx", 3796 dma_channel, dmaaddrp, 3797 cfig1_p->value, cfig2_p->value)); 3798 3799 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3800 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3801 3802 rbrp->rx_rcr_p = rcrp; 3803 rcrp->rx_rbr_p = rbrp; 3804 *rcr_p = rcrp; 3805 *rx_mbox_p = mboxp; 3806 3807 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3808 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3809 3810 return (status); 3811 } 3812 3813 /*ARGSUSED*/ 3814 static void 3815 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3816 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3817 { 3818 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3819 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3820 rcr_p->rdc)); 3821 3822 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3823 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3824 3825 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3826 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3827 } 3828 3829 static nxge_status_t 3830 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3831 p_nxge_dma_common_t *dma_buf_p, 3832 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3833 { 3834 p_rx_rbr_ring_t rbrp; 3835 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3836 p_rx_msg_t *rx_msg_ring; 3837 p_rx_msg_t rx_msg_p; 3838 p_mblk_t mblk_p; 3839 3840 rxring_info_t *ring_info; 3841 nxge_status_t status = NXGE_OK; 3842 int i, j, index; 3843 uint32_t size, bsize, nblocks, nmsgs; 3844 3845 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3846 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3847 channel)); 3848 3849 dma_bufp = tmp_bufp = *dma_buf_p; 3850 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3851 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3852 "chunks bufp 0x%016llx", 3853 channel, num_chunks, dma_bufp)); 3854 3855 nmsgs = 0; 3856 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3857 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3858 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3859 "bufp 0x%016llx nblocks %d nmsgs %d", 3860 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3861 nmsgs += tmp_bufp->nblocks; 3862 } 3863 if (!nmsgs) { 3864 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3865 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3866 "no msg blocks", 3867 channel)); 3868 status = NXGE_ERROR; 3869 goto nxge_map_rxdma_channel_buf_ring_exit; 3870 } 3871 3872 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3873 3874 size = nmsgs * sizeof (p_rx_msg_t); 3875 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3876 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3877 KM_SLEEP); 3878 3879 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3880 (void *)nxgep->interrupt_cookie); 3881 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3882 (void *)nxgep->interrupt_cookie); 3883 rbrp->rdc = channel; 3884 rbrp->num_blocks = num_chunks; 3885 rbrp->tnblocks = nmsgs; 3886 rbrp->rbb_max = nmsgs; 3887 rbrp->rbr_max_size = nmsgs; 3888 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3889 3890 /* 3891 * Buffer sizes suggested by NIU architect. 3892 * 256, 512 and 2K. 3893 */ 3894 3895 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3896 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3897 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3898 3899 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3900 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3901 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3902 3903 rbrp->block_size = nxgep->rx_default_block_size; 3904 3905 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3906 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3907 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3908 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3909 } else { 3910 if (rbrp->block_size >= 0x2000) { 3911 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3912 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3913 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3914 } else { 3915 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3916 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3917 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3918 } 3919 } 3920 3921 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3922 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3923 "actual rbr max %d rbb_max %d nmsgs %d " 3924 "rbrp->block_size %d default_block_size %d " 3925 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3926 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3927 rbrp->block_size, nxgep->rx_default_block_size, 3928 nxge_rbr_size, nxge_rbr_spare_size)); 3929 3930 /* Map in buffers from the buffer pool. */ 3931 index = 0; 3932 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3933 bsize = dma_bufp->block_size; 3934 nblocks = dma_bufp->nblocks; 3935 #if defined(__i386) 3936 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3937 #else 3938 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3939 #endif 3940 ring_info->buffer[i].buf_index = i; 3941 ring_info->buffer[i].buf_size = dma_bufp->alength; 3942 ring_info->buffer[i].start_index = index; 3943 #if defined(__i386) 3944 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3945 #else 3946 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3947 #endif 3948 3949 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3950 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3951 "chunk %d" 3952 " nblocks %d chunk_size %x block_size 0x%x " 3953 "dma_bufp $%p", channel, i, 3954 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3955 dma_bufp)); 3956 3957 for (j = 0; j < nblocks; j++) { 3958 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3959 dma_bufp)) == NULL) { 3960 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3961 "allocb failed (index %d i %d j %d)", 3962 index, i, j)); 3963 goto nxge_map_rxdma_channel_buf_ring_fail1; 3964 } 3965 rx_msg_ring[index] = rx_msg_p; 3966 rx_msg_p->block_index = index; 3967 rx_msg_p->shifted_addr = (uint32_t) 3968 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3969 RBR_BKADDR_SHIFT)); 3970 3971 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3972 "index %d j %d rx_msg_p $%p mblk %p", 3973 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3974 3975 mblk_p = rx_msg_p->rx_mblk_p; 3976 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3977 3978 rbrp->rbr_ref_cnt++; 3979 index++; 3980 rx_msg_p->buf_dma.dma_channel = channel; 3981 } 3982 3983 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3984 if (dma_bufp->contig_alloc_type) { 3985 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3986 } 3987 3988 if (dma_bufp->kmem_alloc_type) { 3989 rbrp->rbr_alloc_type = KMEM_ALLOC; 3990 } 3991 3992 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3993 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3994 "chunk %d" 3995 " nblocks %d chunk_size %x block_size 0x%x " 3996 "dma_bufp $%p", 3997 channel, i, 3998 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3999 dma_bufp)); 4000 } 4001 if (i < rbrp->num_blocks) { 4002 goto nxge_map_rxdma_channel_buf_ring_fail1; 4003 } 4004 4005 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4006 "nxge_map_rxdma_channel_buf_ring: done buf init " 4007 "channel %d msg block entries %d", 4008 channel, index)); 4009 ring_info->block_size_mask = bsize - 1; 4010 rbrp->rx_msg_ring = rx_msg_ring; 4011 rbrp->dma_bufp = dma_buf_p; 4012 rbrp->ring_info = ring_info; 4013 4014 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 4015 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4016 " nxge_map_rxdma_channel_buf_ring: " 4017 "channel %d done buf info init", channel)); 4018 4019 /* 4020 * Finally, permit nxge_freeb() to call nxge_post_page(). 4021 */ 4022 rbrp->rbr_state = RBR_POSTING; 4023 4024 *rbr_p = rbrp; 4025 goto nxge_map_rxdma_channel_buf_ring_exit; 4026 4027 nxge_map_rxdma_channel_buf_ring_fail1: 4028 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4029 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 4030 channel, status)); 4031 4032 index--; 4033 for (; index >= 0; index--) { 4034 rx_msg_p = rx_msg_ring[index]; 4035 if (rx_msg_p != NULL) { 4036 freeb(rx_msg_p->rx_mblk_p); 4037 rx_msg_ring[index] = NULL; 4038 } 4039 } 4040 nxge_map_rxdma_channel_buf_ring_fail: 4041 MUTEX_DESTROY(&rbrp->post_lock); 4042 MUTEX_DESTROY(&rbrp->lock); 4043 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4044 KMEM_FREE(rx_msg_ring, size); 4045 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 4046 4047 status = NXGE_ERROR; 4048 4049 nxge_map_rxdma_channel_buf_ring_exit: 4050 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4051 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 4052 4053 return (status); 4054 } 4055 4056 /*ARGSUSED*/ 4057 static void 4058 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 4059 p_rx_rbr_ring_t rbr_p) 4060 { 4061 p_rx_msg_t *rx_msg_ring; 4062 p_rx_msg_t rx_msg_p; 4063 rxring_info_t *ring_info; 4064 int i; 4065 uint32_t size; 4066 #ifdef NXGE_DEBUG 4067 int num_chunks; 4068 #endif 4069 4070 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4071 "==> nxge_unmap_rxdma_channel_buf_ring")); 4072 if (rbr_p == NULL) { 4073 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4074 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 4075 return; 4076 } 4077 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4078 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 4079 rbr_p->rdc)); 4080 4081 rx_msg_ring = rbr_p->rx_msg_ring; 4082 ring_info = rbr_p->ring_info; 4083 4084 if (rx_msg_ring == NULL || ring_info == NULL) { 4085 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4086 "<== nxge_unmap_rxdma_channel_buf_ring: " 4087 "rx_msg_ring $%p ring_info $%p", 4088 rx_msg_p, ring_info)); 4089 return; 4090 } 4091 4092 #ifdef NXGE_DEBUG 4093 num_chunks = rbr_p->num_blocks; 4094 #endif 4095 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 4096 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4097 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 4098 "tnblocks %d (max %d) size ptrs %d ", 4099 rbr_p->rdc, num_chunks, 4100 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 4101 4102 for (i = 0; i < rbr_p->tnblocks; i++) { 4103 rx_msg_p = rx_msg_ring[i]; 4104 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4105 " nxge_unmap_rxdma_channel_buf_ring: " 4106 "rx_msg_p $%p", 4107 rx_msg_p)); 4108 if (rx_msg_p != NULL) { 4109 freeb(rx_msg_p->rx_mblk_p); 4110 rx_msg_ring[i] = NULL; 4111 } 4112 } 4113 4114 /* 4115 * We no longer may use the mutex <post_lock>. By setting 4116 * <rbr_state> to anything but POSTING, we prevent 4117 * nxge_post_page() from accessing a dead mutex. 4118 */ 4119 rbr_p->rbr_state = RBR_UNMAPPING; 4120 MUTEX_DESTROY(&rbr_p->post_lock); 4121 4122 MUTEX_DESTROY(&rbr_p->lock); 4123 4124 if (rbr_p->rbr_ref_cnt == 0) { 4125 /* 4126 * This is the normal state of affairs. 4127 * Need to free the following buffers: 4128 * - data buffers 4129 * - rx_msg ring 4130 * - ring_info 4131 * - rbr ring 4132 */ 4133 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4134 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4135 nxge_rxdma_databuf_free(rbr_p); 4136 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4137 KMEM_FREE(rx_msg_ring, size); 4138 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4139 } else { 4140 /* 4141 * Some of our buffers are still being used. 4142 * Therefore, tell nxge_freeb() this ring is 4143 * unmapped, so it may free <rbr_p> for us. 4144 */ 4145 rbr_p->rbr_state = RBR_UNMAPPED; 4146 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4147 "unmap_rxdma_buf_ring: %d %s outstanding.", 4148 rbr_p->rbr_ref_cnt, 4149 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4150 } 4151 4152 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4153 "<== nxge_unmap_rxdma_channel_buf_ring")); 4154 } 4155 4156 /* 4157 * nxge_rxdma_hw_start_common 4158 * 4159 * Arguments: 4160 * nxgep 4161 * 4162 * Notes: 4163 * 4164 * NPI/NXGE function calls: 4165 * nxge_init_fzc_rx_common(); 4166 * nxge_init_fzc_rxdma_port(); 4167 * 4168 * Registers accessed: 4169 * 4170 * Context: 4171 * Service domain 4172 */ 4173 static nxge_status_t 4174 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4175 { 4176 nxge_status_t status = NXGE_OK; 4177 4178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4179 4180 /* 4181 * Load the sharable parameters by writing to the 4182 * function zero control registers. These FZC registers 4183 * should be initialized only once for the entire chip. 4184 */ 4185 (void) nxge_init_fzc_rx_common(nxgep); 4186 4187 /* 4188 * Initialize the RXDMA port specific FZC control configurations. 4189 * These FZC registers are pertaining to each port. 4190 */ 4191 (void) nxge_init_fzc_rxdma_port(nxgep); 4192 4193 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4194 4195 return (status); 4196 } 4197 4198 static nxge_status_t 4199 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4200 { 4201 int i, ndmas; 4202 p_rx_rbr_rings_t rx_rbr_rings; 4203 p_rx_rbr_ring_t *rbr_rings; 4204 p_rx_rcr_rings_t rx_rcr_rings; 4205 p_rx_rcr_ring_t *rcr_rings; 4206 p_rx_mbox_areas_t rx_mbox_areas_p; 4207 p_rx_mbox_t *rx_mbox_p; 4208 nxge_status_t status = NXGE_OK; 4209 4210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4211 4212 rx_rbr_rings = nxgep->rx_rbr_rings; 4213 rx_rcr_rings = nxgep->rx_rcr_rings; 4214 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4215 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4216 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4217 return (NXGE_ERROR); 4218 } 4219 ndmas = rx_rbr_rings->ndmas; 4220 if (ndmas == 0) { 4221 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4222 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4223 return (NXGE_ERROR); 4224 } 4225 4226 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4227 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4228 4229 rbr_rings = rx_rbr_rings->rbr_rings; 4230 rcr_rings = rx_rcr_rings->rcr_rings; 4231 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4232 if (rx_mbox_areas_p) { 4233 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4234 } 4235 4236 i = channel; 4237 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4238 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4239 ndmas, channel)); 4240 status = nxge_rxdma_start_channel(nxgep, channel, 4241 (p_rx_rbr_ring_t)rbr_rings[i], 4242 (p_rx_rcr_ring_t)rcr_rings[i], 4243 (p_rx_mbox_t)rx_mbox_p[i]); 4244 if (status != NXGE_OK) { 4245 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4246 "==> nxge_rxdma_hw_start: disable " 4247 "(status 0x%x channel %d)", status, channel)); 4248 return (status); 4249 } 4250 4251 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4252 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4253 rx_rbr_rings, rx_rcr_rings)); 4254 4255 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4256 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4257 4258 return (status); 4259 } 4260 4261 static void 4262 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4263 { 4264 p_rx_rbr_rings_t rx_rbr_rings; 4265 p_rx_rcr_rings_t rx_rcr_rings; 4266 4267 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4268 4269 rx_rbr_rings = nxgep->rx_rbr_rings; 4270 rx_rcr_rings = nxgep->rx_rcr_rings; 4271 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4272 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4273 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4274 return; 4275 } 4276 4277 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4278 "==> nxge_rxdma_hw_stop(channel %d)", 4279 channel)); 4280 (void) nxge_rxdma_stop_channel(nxgep, channel); 4281 4282 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4283 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4284 rx_rbr_rings, rx_rcr_rings)); 4285 4286 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4287 } 4288 4289 4290 static nxge_status_t 4291 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4292 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4293 4294 { 4295 npi_handle_t handle; 4296 npi_status_t rs = NPI_SUCCESS; 4297 rx_dma_ctl_stat_t cs; 4298 rx_dma_ent_msk_t ent_mask; 4299 nxge_status_t status = NXGE_OK; 4300 4301 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4302 4303 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4304 4305 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4306 "npi handle addr $%p acc $%p", 4307 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4308 4309 /* Reset RXDMA channel, but not if you're a guest. */ 4310 if (!isLDOMguest(nxgep)) { 4311 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4312 if (rs != NPI_SUCCESS) { 4313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4314 "==> nxge_init_fzc_rdc: " 4315 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4316 channel, rs)); 4317 return (NXGE_ERROR | rs); 4318 } 4319 4320 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4321 "==> nxge_rxdma_start_channel: reset done: channel %d", 4322 channel)); 4323 } 4324 4325 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4326 if (isLDOMguest(nxgep)) 4327 (void) nxge_rdc_lp_conf(nxgep, channel); 4328 #endif 4329 4330 /* 4331 * Initialize the RXDMA channel specific FZC control 4332 * configurations. These FZC registers are pertaining 4333 * to each RX channel (logical pages). 4334 */ 4335 if (!isLDOMguest(nxgep)) { 4336 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4337 if (status != NXGE_OK) { 4338 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4339 "==> nxge_rxdma_start_channel: " 4340 "init fzc rxdma failed (0x%08x channel %d)", 4341 status, channel)); 4342 return (status); 4343 } 4344 4345 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4346 "==> nxge_rxdma_start_channel: fzc done")); 4347 } 4348 4349 /* Set up the interrupt event masks. */ 4350 ent_mask.value = 0; 4351 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4352 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4353 &ent_mask); 4354 if (rs != NPI_SUCCESS) { 4355 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4356 "==> nxge_rxdma_start_channel: " 4357 "init rxdma event masks failed " 4358 "(0x%08x channel %d)", 4359 status, channel)); 4360 return (NXGE_ERROR | rs); 4361 } 4362 4363 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4364 "==> nxge_rxdma_start_channel: " 4365 "event done: channel %d (mask 0x%016llx)", 4366 channel, ent_mask.value)); 4367 4368 /* Initialize the receive DMA control and status register */ 4369 cs.value = 0; 4370 cs.bits.hdw.mex = 1; 4371 cs.bits.hdw.rcrthres = 1; 4372 cs.bits.hdw.rcrto = 1; 4373 cs.bits.hdw.rbr_empty = 1; 4374 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4375 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4376 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4377 if (status != NXGE_OK) { 4378 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4379 "==> nxge_rxdma_start_channel: " 4380 "init rxdma control register failed (0x%08x channel %d", 4381 status, channel)); 4382 return (status); 4383 } 4384 4385 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4386 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4387 4388 /* 4389 * Load RXDMA descriptors, buffers, mailbox, 4390 * initialise the receive DMA channels and 4391 * enable each DMA channel. 4392 */ 4393 status = nxge_enable_rxdma_channel(nxgep, 4394 channel, rbr_p, rcr_p, mbox_p); 4395 4396 if (status != NXGE_OK) { 4397 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4398 " nxge_rxdma_start_channel: " 4399 " enable rxdma failed (0x%08x channel %d)", 4400 status, channel)); 4401 return (status); 4402 } 4403 4404 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4405 "==> nxge_rxdma_start_channel: enabled channel %d")); 4406 4407 if (isLDOMguest(nxgep)) { 4408 /* Add interrupt handler for this channel. */ 4409 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4410 != NXGE_OK) { 4411 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4412 " nxge_rxdma_start_channel: " 4413 " nxge_hio_intr_add failed (0x%08x channel %d)", 4414 status, channel)); 4415 } 4416 } 4417 4418 ent_mask.value = 0; 4419 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4420 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4421 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4422 &ent_mask); 4423 if (rs != NPI_SUCCESS) { 4424 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4425 "==> nxge_rxdma_start_channel: " 4426 "init rxdma event masks failed (0x%08x channel %d)", 4427 status, channel)); 4428 return (NXGE_ERROR | rs); 4429 } 4430 4431 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4432 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4433 4434 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4435 4436 return (NXGE_OK); 4437 } 4438 4439 static nxge_status_t 4440 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4441 { 4442 npi_handle_t handle; 4443 npi_status_t rs = NPI_SUCCESS; 4444 rx_dma_ctl_stat_t cs; 4445 rx_dma_ent_msk_t ent_mask; 4446 nxge_status_t status = NXGE_OK; 4447 4448 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4449 4450 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4451 4452 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4453 "npi handle addr $%p acc $%p", 4454 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4455 4456 if (!isLDOMguest(nxgep)) { 4457 /* 4458 * Stop RxMAC = A.9.2.6 4459 */ 4460 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4461 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4462 "nxge_rxdma_stop_channel: " 4463 "Failed to disable RxMAC")); 4464 } 4465 4466 /* 4467 * Drain IPP Port = A.9.3.6 4468 */ 4469 (void) nxge_ipp_drain(nxgep); 4470 } 4471 4472 /* Reset RXDMA channel */ 4473 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4474 if (rs != NPI_SUCCESS) { 4475 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4476 " nxge_rxdma_stop_channel: " 4477 " reset rxdma failed (0x%08x channel %d)", 4478 rs, channel)); 4479 return (NXGE_ERROR | rs); 4480 } 4481 4482 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4483 "==> nxge_rxdma_stop_channel: reset done")); 4484 4485 /* Set up the interrupt event masks. */ 4486 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4487 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4488 &ent_mask); 4489 if (rs != NPI_SUCCESS) { 4490 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4491 "==> nxge_rxdma_stop_channel: " 4492 "set rxdma event masks failed (0x%08x channel %d)", 4493 rs, channel)); 4494 return (NXGE_ERROR | rs); 4495 } 4496 4497 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4498 "==> nxge_rxdma_stop_channel: event done")); 4499 4500 /* 4501 * Initialize the receive DMA control and status register 4502 */ 4503 cs.value = 0; 4504 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4505 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4506 " to default (all 0s) 0x%08x", cs.value)); 4507 if (status != NXGE_OK) { 4508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4509 " nxge_rxdma_stop_channel: init rxdma" 4510 " control register failed (0x%08x channel %d", 4511 status, channel)); 4512 return (status); 4513 } 4514 4515 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4516 "==> nxge_rxdma_stop_channel: control done")); 4517 4518 /* 4519 * Make sure channel is disabled. 4520 */ 4521 status = nxge_disable_rxdma_channel(nxgep, channel); 4522 4523 if (status != NXGE_OK) { 4524 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4525 " nxge_rxdma_stop_channel: " 4526 " init enable rxdma failed (0x%08x channel %d)", 4527 status, channel)); 4528 return (status); 4529 } 4530 4531 if (!isLDOMguest(nxgep)) { 4532 /* 4533 * Enable RxMAC = A.9.2.10 4534 */ 4535 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4536 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4537 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4538 } 4539 } 4540 4541 NXGE_DEBUG_MSG((nxgep, 4542 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4543 4544 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4545 4546 return (NXGE_OK); 4547 } 4548 4549 nxge_status_t 4550 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4551 { 4552 npi_handle_t handle; 4553 p_nxge_rdc_sys_stats_t statsp; 4554 rx_ctl_dat_fifo_stat_t stat; 4555 uint32_t zcp_err_status; 4556 uint32_t ipp_err_status; 4557 nxge_status_t status = NXGE_OK; 4558 npi_status_t rs = NPI_SUCCESS; 4559 boolean_t my_err = B_FALSE; 4560 4561 handle = nxgep->npi_handle; 4562 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4563 4564 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4565 4566 if (rs != NPI_SUCCESS) 4567 return (NXGE_ERROR | rs); 4568 4569 if (stat.bits.ldw.id_mismatch) { 4570 statsp->id_mismatch++; 4571 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4572 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4573 /* Global fatal error encountered */ 4574 } 4575 4576 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4577 switch (nxgep->mac.portnum) { 4578 case 0: 4579 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4580 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4581 my_err = B_TRUE; 4582 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4583 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4584 } 4585 break; 4586 case 1: 4587 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4588 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4589 my_err = B_TRUE; 4590 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4591 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4592 } 4593 break; 4594 case 2: 4595 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4596 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4597 my_err = B_TRUE; 4598 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4599 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4600 } 4601 break; 4602 case 3: 4603 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4604 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4605 my_err = B_TRUE; 4606 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4607 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4608 } 4609 break; 4610 default: 4611 return (NXGE_ERROR); 4612 } 4613 } 4614 4615 if (my_err) { 4616 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4617 zcp_err_status); 4618 if (status != NXGE_OK) 4619 return (status); 4620 } 4621 4622 return (NXGE_OK); 4623 } 4624 4625 static nxge_status_t 4626 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4627 uint32_t zcp_status) 4628 { 4629 boolean_t rxport_fatal = B_FALSE; 4630 p_nxge_rdc_sys_stats_t statsp; 4631 nxge_status_t status = NXGE_OK; 4632 uint8_t portn; 4633 4634 portn = nxgep->mac.portnum; 4635 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4636 4637 if (ipp_status & (0x1 << portn)) { 4638 statsp->ipp_eop_err++; 4639 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4640 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4641 rxport_fatal = B_TRUE; 4642 } 4643 4644 if (zcp_status & (0x1 << portn)) { 4645 statsp->zcp_eop_err++; 4646 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4647 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4648 rxport_fatal = B_TRUE; 4649 } 4650 4651 if (rxport_fatal) { 4652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4653 " nxge_rxdma_handle_port_error: " 4654 " fatal error on Port #%d\n", 4655 portn)); 4656 status = nxge_rx_port_fatal_err_recover(nxgep); 4657 if (status == NXGE_OK) { 4658 FM_SERVICE_RESTORED(nxgep); 4659 } 4660 } 4661 4662 return (status); 4663 } 4664 4665 static nxge_status_t 4666 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4667 { 4668 npi_handle_t handle; 4669 npi_status_t rs = NPI_SUCCESS; 4670 nxge_status_t status = NXGE_OK; 4671 p_rx_rbr_ring_t rbrp; 4672 p_rx_rcr_ring_t rcrp; 4673 p_rx_mbox_t mboxp; 4674 rx_dma_ent_msk_t ent_mask; 4675 p_nxge_dma_common_t dmap; 4676 int ring_idx; 4677 uint32_t ref_cnt; 4678 p_rx_msg_t rx_msg_p; 4679 int i; 4680 uint32_t nxge_port_rcr_size; 4681 4682 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4683 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4684 "Recovering from RxDMAChannel#%d error...", channel)); 4685 4686 /* 4687 * Stop the dma channel waits for the stop done. 4688 * If the stop done bit is not set, then create 4689 * an error. 4690 */ 4691 4692 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4693 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4694 4695 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4696 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4697 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4698 4699 MUTEX_ENTER(&rcrp->lock); 4700 MUTEX_ENTER(&rbrp->lock); 4701 MUTEX_ENTER(&rbrp->post_lock); 4702 4703 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4704 4705 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4706 if (rs != NPI_SUCCESS) { 4707 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4708 "nxge_disable_rxdma_channel:failed")); 4709 goto fail; 4710 } 4711 4712 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4713 4714 /* Disable interrupt */ 4715 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4716 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4717 if (rs != NPI_SUCCESS) { 4718 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4719 "nxge_rxdma_stop_channel: " 4720 "set rxdma event masks failed (channel %d)", 4721 channel)); 4722 } 4723 4724 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4725 4726 /* Reset RXDMA channel */ 4727 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4728 if (rs != NPI_SUCCESS) { 4729 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4730 "nxge_rxdma_fatal_err_recover: " 4731 " reset rxdma failed (channel %d)", channel)); 4732 goto fail; 4733 } 4734 4735 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4736 4737 mboxp = 4738 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4739 4740 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4741 rbrp->rbr_rd_index = 0; 4742 4743 rcrp->comp_rd_index = 0; 4744 rcrp->comp_wt_index = 0; 4745 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4746 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4747 #if defined(__i386) 4748 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4749 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4750 #else 4751 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4752 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4753 #endif 4754 4755 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4756 (nxge_port_rcr_size - 1); 4757 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4758 (nxge_port_rcr_size - 1); 4759 4760 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4761 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4762 4763 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4764 4765 for (i = 0; i < rbrp->rbr_max_size; i++) { 4766 rx_msg_p = rbrp->rx_msg_ring[i]; 4767 ref_cnt = rx_msg_p->ref_cnt; 4768 if (ref_cnt != 1) { 4769 if (rx_msg_p->cur_usage_cnt != 4770 rx_msg_p->max_usage_cnt) { 4771 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4772 "buf[%d]: cur_usage_cnt = %d " 4773 "max_usage_cnt = %d\n", i, 4774 rx_msg_p->cur_usage_cnt, 4775 rx_msg_p->max_usage_cnt)); 4776 } else { 4777 /* Buffer can be re-posted */ 4778 rx_msg_p->free = B_TRUE; 4779 rx_msg_p->cur_usage_cnt = 0; 4780 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4781 rx_msg_p->pkt_buf_size = 0; 4782 } 4783 } 4784 } 4785 4786 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4787 4788 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4789 if (status != NXGE_OK) { 4790 goto fail; 4791 } 4792 4793 MUTEX_EXIT(&rbrp->post_lock); 4794 MUTEX_EXIT(&rbrp->lock); 4795 MUTEX_EXIT(&rcrp->lock); 4796 4797 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4798 "Recovery Successful, RxDMAChannel#%d Restored", 4799 channel)); 4800 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4801 4802 return (NXGE_OK); 4803 fail: 4804 MUTEX_EXIT(&rbrp->post_lock); 4805 MUTEX_EXIT(&rbrp->lock); 4806 MUTEX_EXIT(&rcrp->lock); 4807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4808 4809 return (NXGE_ERROR | rs); 4810 } 4811 4812 nxge_status_t 4813 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4814 { 4815 nxge_grp_set_t *set = &nxgep->rx_set; 4816 nxge_status_t status = NXGE_OK; 4817 int rdc; 4818 4819 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4820 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4821 "Recovering from RxPort error...")); 4822 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4823 4824 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4825 goto fail; 4826 4827 NXGE_DELAY(1000); 4828 4829 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4830 4831 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4832 if ((1 << rdc) & set->owned.map) { 4833 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4834 != NXGE_OK) { 4835 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4836 "Could not recover channel %d", rdc)); 4837 } 4838 } 4839 } 4840 4841 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4842 4843 /* Reset IPP */ 4844 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4845 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4846 "nxge_rx_port_fatal_err_recover: " 4847 "Failed to reset IPP")); 4848 goto fail; 4849 } 4850 4851 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4852 4853 /* Reset RxMAC */ 4854 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4855 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4856 "nxge_rx_port_fatal_err_recover: " 4857 "Failed to reset RxMAC")); 4858 goto fail; 4859 } 4860 4861 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4862 4863 /* Re-Initialize IPP */ 4864 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4865 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4866 "nxge_rx_port_fatal_err_recover: " 4867 "Failed to init IPP")); 4868 goto fail; 4869 } 4870 4871 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4872 4873 /* Re-Initialize RxMAC */ 4874 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4875 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4876 "nxge_rx_port_fatal_err_recover: " 4877 "Failed to reset RxMAC")); 4878 goto fail; 4879 } 4880 4881 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4882 4883 /* Re-enable RxMAC */ 4884 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4885 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4886 "nxge_rx_port_fatal_err_recover: " 4887 "Failed to enable RxMAC")); 4888 goto fail; 4889 } 4890 4891 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4892 "Recovery Successful, RxPort Restored")); 4893 4894 return (NXGE_OK); 4895 fail: 4896 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4897 return (status); 4898 } 4899 4900 void 4901 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4902 { 4903 rx_dma_ctl_stat_t cs; 4904 rx_ctl_dat_fifo_stat_t cdfs; 4905 4906 switch (err_id) { 4907 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4908 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4909 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4910 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4911 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4912 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4913 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4914 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4915 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4916 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4917 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4918 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4919 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4920 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4921 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4922 chan, &cs.value); 4923 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4924 cs.bits.hdw.rcr_ack_err = 1; 4925 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4926 cs.bits.hdw.dc_fifo_err = 1; 4927 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4928 cs.bits.hdw.rcr_sha_par = 1; 4929 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4930 cs.bits.hdw.rbr_pre_par = 1; 4931 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4932 cs.bits.hdw.rbr_tmout = 1; 4933 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4934 cs.bits.hdw.rsp_cnt_err = 1; 4935 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4936 cs.bits.hdw.byte_en_bus = 1; 4937 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4938 cs.bits.hdw.rsp_dat_err = 1; 4939 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4940 cs.bits.hdw.config_err = 1; 4941 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4942 cs.bits.hdw.rcrincon = 1; 4943 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4944 cs.bits.hdw.rcrfull = 1; 4945 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4946 cs.bits.hdw.rbrfull = 1; 4947 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4948 cs.bits.hdw.rbrlogpage = 1; 4949 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4950 cs.bits.hdw.cfiglogpage = 1; 4951 #if defined(__i386) 4952 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4953 cs.value); 4954 #else 4955 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4956 cs.value); 4957 #endif 4958 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4959 chan, cs.value); 4960 break; 4961 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4962 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4963 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4964 cdfs.value = 0; 4965 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4966 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4967 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4968 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4969 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4970 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4971 #if defined(__i386) 4972 cmn_err(CE_NOTE, 4973 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4974 cdfs.value); 4975 #else 4976 cmn_err(CE_NOTE, 4977 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4978 cdfs.value); 4979 #endif 4980 NXGE_REG_WR64(nxgep->npi_handle, 4981 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4982 break; 4983 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4984 break; 4985 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4986 break; 4987 } 4988 } 4989 4990 static void 4991 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4992 { 4993 rxring_info_t *ring_info; 4994 int index; 4995 uint32_t chunk_size; 4996 uint64_t kaddr; 4997 uint_t num_blocks; 4998 4999 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 5000 5001 if (rbr_p == NULL) { 5002 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5003 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 5004 return; 5005 } 5006 5007 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 5008 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5009 "<== nxge_rxdma_databuf_free: DDI")); 5010 return; 5011 } 5012 5013 ring_info = rbr_p->ring_info; 5014 if (ring_info == NULL) { 5015 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5016 "==> nxge_rxdma_databuf_free: NULL ring info")); 5017 return; 5018 } 5019 num_blocks = rbr_p->num_blocks; 5020 for (index = 0; index < num_blocks; index++) { 5021 kaddr = ring_info->buffer[index].kaddr; 5022 chunk_size = ring_info->buffer[index].buf_size; 5023 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5024 "==> nxge_rxdma_databuf_free: free chunk %d " 5025 "kaddrp $%p chunk size %d", 5026 index, kaddr, chunk_size)); 5027 if (kaddr == NULL) continue; 5028 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 5029 ring_info->buffer[index].kaddr = NULL; 5030 } 5031 5032 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 5033 } 5034 5035 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5036 extern void contig_mem_free(void *, size_t); 5037 #endif 5038 5039 void 5040 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 5041 { 5042 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 5043 5044 if (kaddr == NULL || !buf_size) { 5045 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5046 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 5047 kaddr, buf_size)); 5048 return; 5049 } 5050 5051 switch (alloc_type) { 5052 case KMEM_ALLOC: 5053 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5054 "==> nxge_free_buf: freeing kmem $%p size %d", 5055 kaddr, buf_size)); 5056 #if defined(__i386) 5057 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 5058 #else 5059 KMEM_FREE((void *)kaddr, buf_size); 5060 #endif 5061 break; 5062 5063 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5064 case CONTIG_MEM_ALLOC: 5065 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5066 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 5067 kaddr, buf_size)); 5068 contig_mem_free((void *)kaddr, buf_size); 5069 break; 5070 #endif 5071 5072 default: 5073 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5074 "<== nxge_free_buf: unsupported alloc type %d", 5075 alloc_type)); 5076 return; 5077 } 5078 5079 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5080 } 5081