1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/nxge/nxge_impl.h> 28 #include <sys/nxge/nxge_rxdma.h> 29 #include <sys/nxge/nxge_hio.h> 30 31 #if !defined(_BIG_ENDIAN) 32 #include <npi_rx_rd32.h> 33 #endif 34 #include <npi_rx_rd64.h> 35 #include <npi_rx_wr64.h> 36 37 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 38 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 39 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 40 (rdc + nxgep->pt_config.hw_config.start_rdc) 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 extern uint32_t nxge_rbr_size; 47 extern uint32_t nxge_rcr_size; 48 extern uint32_t nxge_rbr_spare_size; 49 extern uint16_t nxge_rdc_buf_offset; 50 51 extern uint32_t nxge_mblks_pending; 52 53 /* 54 * Tunable to reduce the amount of time spent in the 55 * ISR doing Rx Processing. 56 */ 57 extern uint32_t nxge_max_rx_pkts; 58 59 /* 60 * Tunables to manage the receive buffer blocks. 61 * 62 * nxge_rx_threshold_hi: copy all buffers. 63 * nxge_rx_bcopy_size_type: receive buffer block size type. 64 * nxge_rx_threshold_lo: copy only up to tunable block size type. 65 */ 66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 67 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 68 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 69 70 extern uint32_t nxge_cksum_offload; 71 72 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 73 static void nxge_unmap_rxdma(p_nxge_t, int); 74 75 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 76 77 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 78 static void nxge_rxdma_hw_stop(p_nxge_t, int); 79 80 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 81 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 82 uint32_t, 83 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 84 p_rx_mbox_t *); 85 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 86 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 87 88 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 89 uint16_t, 90 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 91 p_rx_rcr_ring_t *, p_rx_mbox_t *); 92 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 93 p_rx_rcr_ring_t, p_rx_mbox_t); 94 95 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 96 uint16_t, 97 p_nxge_dma_common_t *, 98 p_rx_rbr_ring_t *, uint32_t); 99 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 100 p_rx_rbr_ring_t); 101 102 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 103 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 104 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 105 106 static mblk_t * 107 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 108 109 static void nxge_receive_packet(p_nxge_t, 110 p_rx_rcr_ring_t, 111 p_rcr_entry_t, 112 boolean_t *, 113 mblk_t **, mblk_t **); 114 115 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 116 117 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 118 static void nxge_freeb(p_rx_msg_t); 119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 120 121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 122 uint32_t, uint32_t); 123 124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 125 p_rx_rbr_ring_t); 126 127 128 static nxge_status_t 129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 130 131 nxge_status_t 132 nxge_rx_port_fatal_err_recover(p_nxge_t); 133 134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135 136 nxge_status_t 137 nxge_init_rxdma_channels(p_nxge_t nxgep) 138 { 139 nxge_grp_set_t *set = &nxgep->rx_set; 140 int i, count, channel; 141 nxge_grp_t *group; 142 dc_map_t map; 143 int dev_gindex; 144 145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 146 147 if (!isLDOMguest(nxgep)) { 148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 149 cmn_err(CE_NOTE, "hw_start_common"); 150 return (NXGE_ERROR); 151 } 152 } 153 154 /* 155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 156 * We only have 8 hardware RDC tables, but we may have 157 * up to 16 logical (software-defined) groups of RDCS, 158 * if we make use of layer 3 & 4 hardware classification. 159 */ 160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 161 if ((1 << i) & set->lg.map) { 162 group = set->group[i]; 163 dev_gindex = 164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 165 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 167 if ((1 << channel) & map) { 168 if ((nxge_grp_dc_add(nxgep, 169 group, VP_BOUND_RX, channel))) 170 goto init_rxdma_channels_exit; 171 } 172 } 173 } 174 if (++count == set->lg.count) 175 break; 176 } 177 178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 179 return (NXGE_OK); 180 181 init_rxdma_channels_exit: 182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 183 if ((1 << i) & set->lg.map) { 184 group = set->group[i]; 185 dev_gindex = 186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 187 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 189 if ((1 << channel) & map) { 190 nxge_grp_dc_remove(nxgep, 191 VP_BOUND_RX, channel); 192 } 193 } 194 } 195 if (++count == set->lg.count) 196 break; 197 } 198 199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 200 return (NXGE_ERROR); 201 } 202 203 nxge_status_t 204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 205 { 206 nxge_status_t status; 207 208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 209 210 status = nxge_map_rxdma(nxge, channel); 211 if (status != NXGE_OK) { 212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 213 "<== nxge_init_rxdma: status 0x%x", status)); 214 return (status); 215 } 216 217 #if defined(sun4v) 218 if (isLDOMguest(nxge)) { 219 /* set rcr_ring */ 220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 221 222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 223 if (status != NXGE_OK) { 224 nxge_unmap_rxdma(nxge, channel); 225 return (status); 226 } 227 } 228 #endif 229 230 status = nxge_rxdma_hw_start(nxge, channel); 231 if (status != NXGE_OK) { 232 nxge_unmap_rxdma(nxge, channel); 233 } 234 235 if (!nxge->statsp->rdc_ksp[channel]) 236 nxge_setup_rdc_kstats(nxge, channel); 237 238 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 239 "<== nxge_init_rxdma_channel: status 0x%x", status)); 240 241 return (status); 242 } 243 244 void 245 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 246 { 247 nxge_grp_set_t *set = &nxgep->rx_set; 248 int rdc; 249 250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 251 252 if (set->owned.map == 0) { 253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 254 "nxge_uninit_rxdma_channels: no channels")); 255 return; 256 } 257 258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 259 if ((1 << rdc) & set->owned.map) { 260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 261 } 262 } 263 264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 265 } 266 267 void 268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 269 { 270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 271 272 if (nxgep->statsp->rdc_ksp[channel]) { 273 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 274 nxgep->statsp->rdc_ksp[channel] = 0; 275 } 276 277 nxge_rxdma_hw_stop(nxgep, channel); 278 nxge_unmap_rxdma(nxgep, channel); 279 280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 281 } 282 283 nxge_status_t 284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 285 { 286 npi_handle_t handle; 287 npi_status_t rs = NPI_SUCCESS; 288 nxge_status_t status = NXGE_OK; 289 290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 291 292 handle = NXGE_DEV_NPI_HANDLE(nxgep); 293 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 294 295 if (rs != NPI_SUCCESS) { 296 status = NXGE_ERROR | rs; 297 } 298 299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 300 301 return (status); 302 } 303 304 void 305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 306 { 307 nxge_grp_set_t *set = &nxgep->rx_set; 308 int rdc; 309 310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 311 312 if (!isLDOMguest(nxgep)) { 313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 314 (void) npi_rxdma_dump_fzc_regs(handle); 315 } 316 317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 318 NXGE_DEBUG_MSG((nxgep, TX_CTL, 319 "nxge_rxdma_regs_dump_channels: " 320 "NULL ring pointer(s)")); 321 return; 322 } 323 324 if (set->owned.map == 0) { 325 NXGE_DEBUG_MSG((nxgep, RX_CTL, 326 "nxge_rxdma_regs_dump_channels: no channels")); 327 return; 328 } 329 330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 331 if ((1 << rdc) & set->owned.map) { 332 rx_rbr_ring_t *ring = 333 nxgep->rx_rbr_rings->rbr_rings[rdc]; 334 if (ring) { 335 (void) nxge_dump_rxdma_channel(nxgep, rdc); 336 } 337 } 338 } 339 340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 341 } 342 343 nxge_status_t 344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 345 { 346 npi_handle_t handle; 347 npi_status_t rs = NPI_SUCCESS; 348 nxge_status_t status = NXGE_OK; 349 350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 351 352 handle = NXGE_DEV_NPI_HANDLE(nxgep); 353 rs = npi_rxdma_dump_rdc_regs(handle, channel); 354 355 if (rs != NPI_SUCCESS) { 356 status = NXGE_ERROR | rs; 357 } 358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 359 return (status); 360 } 361 362 nxge_status_t 363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 364 p_rx_dma_ent_msk_t mask_p) 365 { 366 npi_handle_t handle; 367 npi_status_t rs = NPI_SUCCESS; 368 nxge_status_t status = NXGE_OK; 369 370 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 371 "<== nxge_init_rxdma_channel_event_mask")); 372 373 handle = NXGE_DEV_NPI_HANDLE(nxgep); 374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 375 if (rs != NPI_SUCCESS) { 376 status = NXGE_ERROR | rs; 377 } 378 379 return (status); 380 } 381 382 nxge_status_t 383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 384 p_rx_dma_ctl_stat_t cs_p) 385 { 386 npi_handle_t handle; 387 npi_status_t rs = NPI_SUCCESS; 388 nxge_status_t status = NXGE_OK; 389 390 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 391 "<== nxge_init_rxdma_channel_cntl_stat")); 392 393 handle = NXGE_DEV_NPI_HANDLE(nxgep); 394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 395 396 if (rs != NPI_SUCCESS) { 397 status = NXGE_ERROR | rs; 398 } 399 400 return (status); 401 } 402 403 /* 404 * nxge_rxdma_cfg_rdcgrp_default_rdc 405 * 406 * Set the default RDC for an RDC Group (Table) 407 * 408 * Arguments: 409 * nxgep 410 * rdcgrp The group to modify 411 * rdc The new default RDC. 412 * 413 * Notes: 414 * 415 * NPI/NXGE function calls: 416 * npi_rxdma_cfg_rdc_table_default_rdc() 417 * 418 * Registers accessed: 419 * RDC_TBL_REG: FZC_ZCP + 0x10000 420 * 421 * Context: 422 * Service domain 423 */ 424 nxge_status_t 425 nxge_rxdma_cfg_rdcgrp_default_rdc( 426 p_nxge_t nxgep, 427 uint8_t rdcgrp, 428 uint8_t rdc) 429 { 430 npi_handle_t handle; 431 npi_status_t rs = NPI_SUCCESS; 432 p_nxge_dma_pt_cfg_t p_dma_cfgp; 433 p_nxge_rdc_grp_t rdc_grp_p; 434 uint8_t actual_rdcgrp, actual_rdc; 435 436 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 439 440 handle = NXGE_DEV_NPI_HANDLE(nxgep); 441 442 /* 443 * This has to be rewritten. Do we even allow this anymore? 444 */ 445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 446 RDC_MAP_IN(rdc_grp_p->map, rdc); 447 rdc_grp_p->def_rdc = rdc; 448 449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 451 452 rs = npi_rxdma_cfg_rdc_table_default_rdc( 453 handle, actual_rdcgrp, actual_rdc); 454 455 if (rs != NPI_SUCCESS) { 456 return (NXGE_ERROR | rs); 457 } 458 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 460 return (NXGE_OK); 461 } 462 463 nxge_status_t 464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 465 { 466 npi_handle_t handle; 467 468 uint8_t actual_rdc; 469 npi_status_t rs = NPI_SUCCESS; 470 471 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 472 " ==> nxge_rxdma_cfg_port_default_rdc")); 473 474 handle = NXGE_DEV_NPI_HANDLE(nxgep); 475 actual_rdc = rdc; /* XXX Hack! */ 476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 477 478 479 if (rs != NPI_SUCCESS) { 480 return (NXGE_ERROR | rs); 481 } 482 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 483 " <== nxge_rxdma_cfg_port_default_rdc")); 484 485 return (NXGE_OK); 486 } 487 488 nxge_status_t 489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 490 uint16_t pkts) 491 { 492 npi_status_t rs = NPI_SUCCESS; 493 npi_handle_t handle; 494 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 495 " ==> nxge_rxdma_cfg_rcr_threshold")); 496 handle = NXGE_DEV_NPI_HANDLE(nxgep); 497 498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 499 500 if (rs != NPI_SUCCESS) { 501 return (NXGE_ERROR | rs); 502 } 503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 504 return (NXGE_OK); 505 } 506 507 nxge_status_t 508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 509 uint16_t tout, uint8_t enable) 510 { 511 npi_status_t rs = NPI_SUCCESS; 512 npi_handle_t handle; 513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 514 handle = NXGE_DEV_NPI_HANDLE(nxgep); 515 if (enable == 0) { 516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 517 } else { 518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 519 tout); 520 } 521 522 if (rs != NPI_SUCCESS) { 523 return (NXGE_ERROR | rs); 524 } 525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 526 return (NXGE_OK); 527 } 528 529 nxge_status_t 530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 532 { 533 npi_handle_t handle; 534 rdc_desc_cfg_t rdc_desc; 535 p_rcrcfig_b_t cfgb_p; 536 npi_status_t rs = NPI_SUCCESS; 537 538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 539 handle = NXGE_DEV_NPI_HANDLE(nxgep); 540 /* 541 * Use configuration data composed at init time. 542 * Write to hardware the receive ring configurations. 543 */ 544 rdc_desc.mbox_enable = 1; 545 rdc_desc.mbox_addr = mbox_p->mbox_addr; 546 NXGE_DEBUG_MSG((nxgep, RX_CTL, 547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 548 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 549 550 rdc_desc.rbr_len = rbr_p->rbb_max; 551 rdc_desc.rbr_addr = rbr_p->rbr_addr; 552 553 switch (nxgep->rx_bksize_code) { 554 case RBR_BKSIZE_4K: 555 rdc_desc.page_size = SIZE_4KB; 556 break; 557 case RBR_BKSIZE_8K: 558 rdc_desc.page_size = SIZE_8KB; 559 break; 560 case RBR_BKSIZE_16K: 561 rdc_desc.page_size = SIZE_16KB; 562 break; 563 case RBR_BKSIZE_32K: 564 rdc_desc.page_size = SIZE_32KB; 565 break; 566 } 567 568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 569 rdc_desc.valid0 = 1; 570 571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 572 rdc_desc.valid1 = 1; 573 574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 575 rdc_desc.valid2 = 1; 576 577 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 578 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 579 580 rdc_desc.rcr_len = rcr_p->comp_size; 581 rdc_desc.rcr_addr = rcr_p->rcr_addr; 582 583 cfgb_p = &(rcr_p->rcr_cfgb); 584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 585 /* For now, disable this timeout in a guest domain. */ 586 if (isLDOMguest(nxgep)) { 587 rdc_desc.rcr_timeout = 0; 588 rdc_desc.rcr_timeout_enable = 0; 589 } else { 590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 592 } 593 594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 595 "rbr_len qlen %d pagesize code %d rcr_len %d", 596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 598 "size 0 %d size 1 %d size 2 %d", 599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 600 rbr_p->npi_pkt_buf_size2)); 601 602 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) 603 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, 604 &rdc_desc, B_TRUE); 605 else 606 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, 607 &rdc_desc, B_FALSE); 608 if (rs != NPI_SUCCESS) { 609 return (NXGE_ERROR | rs); 610 } 611 612 /* 613 * Enable the timeout and threshold. 614 */ 615 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 616 rdc_desc.rcr_threshold); 617 if (rs != NPI_SUCCESS) { 618 return (NXGE_ERROR | rs); 619 } 620 621 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 622 rdc_desc.rcr_timeout); 623 if (rs != NPI_SUCCESS) { 624 return (NXGE_ERROR | rs); 625 } 626 627 if (!isLDOMguest(nxgep)) { 628 /* Enable the DMA */ 629 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 630 if (rs != NPI_SUCCESS) { 631 return (NXGE_ERROR | rs); 632 } 633 } 634 635 /* Kick the DMA engine. */ 636 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 637 638 if (!isLDOMguest(nxgep)) { 639 /* Clear the rbr empty bit */ 640 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 641 } 642 643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 644 645 return (NXGE_OK); 646 } 647 648 nxge_status_t 649 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 650 { 651 npi_handle_t handle; 652 npi_status_t rs = NPI_SUCCESS; 653 654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 655 handle = NXGE_DEV_NPI_HANDLE(nxgep); 656 657 /* disable the DMA */ 658 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 659 if (rs != NPI_SUCCESS) { 660 NXGE_DEBUG_MSG((nxgep, RX_CTL, 661 "<== nxge_disable_rxdma_channel:failed (0x%x)", 662 rs)); 663 return (NXGE_ERROR | rs); 664 } 665 666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 667 return (NXGE_OK); 668 } 669 670 nxge_status_t 671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 672 { 673 npi_handle_t handle; 674 nxge_status_t status = NXGE_OK; 675 676 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 677 "<== nxge_init_rxdma_channel_rcrflush")); 678 679 handle = NXGE_DEV_NPI_HANDLE(nxgep); 680 npi_rxdma_rdc_rcr_flush(handle, channel); 681 682 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 683 "<== nxge_init_rxdma_channel_rcrflsh")); 684 return (status); 685 686 } 687 688 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 689 690 #define TO_LEFT -1 691 #define TO_RIGHT 1 692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 693 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 695 #define NO_HINT 0xffffffff 696 697 /*ARGSUSED*/ 698 nxge_status_t 699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 700 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 701 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 702 { 703 int bufsize; 704 uint64_t pktbuf_pp; 705 uint64_t dvma_addr; 706 rxring_info_t *ring_info; 707 int base_side, end_side; 708 int r_index, l_index, anchor_index; 709 int found, search_done; 710 uint32_t offset, chunk_size, block_size, page_size_mask; 711 uint32_t chunk_index, block_index, total_index; 712 int max_iterations, iteration; 713 rxbuf_index_info_t *bufinfo; 714 715 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 716 717 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 719 pkt_buf_addr_pp, 720 pktbufsz_type)); 721 #if defined(__i386) 722 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 723 #else 724 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 725 #endif 726 727 switch (pktbufsz_type) { 728 case 0: 729 bufsize = rbr_p->pkt_buf_size0; 730 break; 731 case 1: 732 bufsize = rbr_p->pkt_buf_size1; 733 break; 734 case 2: 735 bufsize = rbr_p->pkt_buf_size2; 736 break; 737 case RCR_SINGLE_BLOCK: 738 bufsize = 0; 739 anchor_index = 0; 740 break; 741 default: 742 return (NXGE_ERROR); 743 } 744 745 if (rbr_p->num_blocks == 1) { 746 anchor_index = 0; 747 ring_info = rbr_p->ring_info; 748 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 749 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 750 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 751 "buf_pp $%p btype %d anchor_index %d " 752 "bufinfo $%p", 753 pkt_buf_addr_pp, 754 pktbufsz_type, 755 anchor_index, 756 bufinfo)); 757 758 goto found_index; 759 } 760 761 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 762 "==> nxge_rxbuf_pp_to_vp: " 763 "buf_pp $%p btype %d anchor_index %d", 764 pkt_buf_addr_pp, 765 pktbufsz_type, 766 anchor_index)); 767 768 ring_info = rbr_p->ring_info; 769 found = B_FALSE; 770 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 771 iteration = 0; 772 max_iterations = ring_info->max_iterations; 773 /* 774 * First check if this block has been seen 775 * recently. This is indicated by a hint which 776 * is initialized when the first buffer of the block 777 * is seen. The hint is reset when the last buffer of 778 * the block has been processed. 779 * As three block sizes are supported, three hints 780 * are kept. The idea behind the hints is that once 781 * the hardware uses a block for a buffer of that 782 * size, it will use it exclusively for that size 783 * and will use it until it is exhausted. It is assumed 784 * that there would a single block being used for the same 785 * buffer sizes at any given time. 786 */ 787 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 788 anchor_index = ring_info->hint[pktbufsz_type]; 789 dvma_addr = bufinfo[anchor_index].dvma_addr; 790 chunk_size = bufinfo[anchor_index].buf_size; 791 if ((pktbuf_pp >= dvma_addr) && 792 (pktbuf_pp < (dvma_addr + chunk_size))) { 793 found = B_TRUE; 794 /* 795 * check if this is the last buffer in the block 796 * If so, then reset the hint for the size; 797 */ 798 799 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 800 ring_info->hint[pktbufsz_type] = NO_HINT; 801 } 802 } 803 804 if (found == B_FALSE) { 805 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 806 "==> nxge_rxbuf_pp_to_vp: (!found)" 807 "buf_pp $%p btype %d anchor_index %d", 808 pkt_buf_addr_pp, 809 pktbufsz_type, 810 anchor_index)); 811 812 /* 813 * This is the first buffer of the block of this 814 * size. Need to search the whole information 815 * array. 816 * the search algorithm uses a binary tree search 817 * algorithm. It assumes that the information is 818 * already sorted with increasing order 819 * info[0] < info[1] < info[2] .... < info[n-1] 820 * where n is the size of the information array 821 */ 822 r_index = rbr_p->num_blocks - 1; 823 l_index = 0; 824 search_done = B_FALSE; 825 anchor_index = MID_INDEX(r_index, l_index); 826 while (search_done == B_FALSE) { 827 if ((r_index == l_index) || 828 (iteration >= max_iterations)) 829 search_done = B_TRUE; 830 end_side = TO_RIGHT; /* to the right */ 831 base_side = TO_LEFT; /* to the left */ 832 /* read the DVMA address information and sort it */ 833 dvma_addr = bufinfo[anchor_index].dvma_addr; 834 chunk_size = bufinfo[anchor_index].buf_size; 835 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 836 "==> nxge_rxbuf_pp_to_vp: (searching)" 837 "buf_pp $%p btype %d " 838 "anchor_index %d chunk_size %d dvmaaddr $%p", 839 pkt_buf_addr_pp, 840 pktbufsz_type, 841 anchor_index, 842 chunk_size, 843 dvma_addr)); 844 845 if (pktbuf_pp >= dvma_addr) 846 base_side = TO_RIGHT; /* to the right */ 847 if (pktbuf_pp < (dvma_addr + chunk_size)) 848 end_side = TO_LEFT; /* to the left */ 849 850 switch (base_side + end_side) { 851 case IN_MIDDLE: 852 /* found */ 853 found = B_TRUE; 854 search_done = B_TRUE; 855 if ((pktbuf_pp + bufsize) < 856 (dvma_addr + chunk_size)) 857 ring_info->hint[pktbufsz_type] = 858 bufinfo[anchor_index].buf_index; 859 break; 860 case BOTH_RIGHT: 861 /* not found: go to the right */ 862 l_index = anchor_index + 1; 863 anchor_index = MID_INDEX(r_index, l_index); 864 break; 865 866 case BOTH_LEFT: 867 /* not found: go to the left */ 868 r_index = anchor_index - 1; 869 anchor_index = MID_INDEX(r_index, l_index); 870 break; 871 default: /* should not come here */ 872 return (NXGE_ERROR); 873 } 874 iteration++; 875 } 876 877 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 878 "==> nxge_rxbuf_pp_to_vp: (search done)" 879 "buf_pp $%p btype %d anchor_index %d", 880 pkt_buf_addr_pp, 881 pktbufsz_type, 882 anchor_index)); 883 } 884 885 if (found == B_FALSE) { 886 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 887 "==> nxge_rxbuf_pp_to_vp: (search failed)" 888 "buf_pp $%p btype %d anchor_index %d", 889 pkt_buf_addr_pp, 890 pktbufsz_type, 891 anchor_index)); 892 return (NXGE_ERROR); 893 } 894 895 found_index: 896 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 897 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 898 "buf_pp $%p btype %d bufsize %d anchor_index %d", 899 pkt_buf_addr_pp, 900 pktbufsz_type, 901 bufsize, 902 anchor_index)); 903 904 /* index of the first block in this chunk */ 905 chunk_index = bufinfo[anchor_index].start_index; 906 dvma_addr = bufinfo[anchor_index].dvma_addr; 907 page_size_mask = ring_info->block_size_mask; 908 909 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 910 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 911 "buf_pp $%p btype %d bufsize %d " 912 "anchor_index %d chunk_index %d dvma $%p", 913 pkt_buf_addr_pp, 914 pktbufsz_type, 915 bufsize, 916 anchor_index, 917 chunk_index, 918 dvma_addr)); 919 920 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 921 block_size = rbr_p->block_size; /* System block(page) size */ 922 923 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 924 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 925 "buf_pp $%p btype %d bufsize %d " 926 "anchor_index %d chunk_index %d dvma $%p " 927 "offset %d block_size %d", 928 pkt_buf_addr_pp, 929 pktbufsz_type, 930 bufsize, 931 anchor_index, 932 chunk_index, 933 dvma_addr, 934 offset, 935 block_size)); 936 937 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 938 939 block_index = (offset / block_size); /* index within chunk */ 940 total_index = chunk_index + block_index; 941 942 943 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 944 "==> nxge_rxbuf_pp_to_vp: " 945 "total_index %d dvma_addr $%p " 946 "offset %d block_size %d " 947 "block_index %d ", 948 total_index, dvma_addr, 949 offset, block_size, 950 block_index)); 951 #if defined(__i386) 952 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 953 (uint32_t)offset); 954 #else 955 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 956 (uint64_t)offset); 957 #endif 958 959 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 960 "==> nxge_rxbuf_pp_to_vp: " 961 "total_index %d dvma_addr $%p " 962 "offset %d block_size %d " 963 "block_index %d " 964 "*pkt_buf_addr_p $%p", 965 total_index, dvma_addr, 966 offset, block_size, 967 block_index, 968 *pkt_buf_addr_p)); 969 970 971 *msg_index = total_index; 972 *bufoffset = (offset & page_size_mask); 973 974 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 975 "==> nxge_rxbuf_pp_to_vp: get msg index: " 976 "msg_index %d bufoffset_index %d", 977 *msg_index, 978 *bufoffset)); 979 980 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 981 982 return (NXGE_OK); 983 } 984 985 /* 986 * used by quick sort (qsort) function 987 * to perform comparison 988 */ 989 static int 990 nxge_sort_compare(const void *p1, const void *p2) 991 { 992 993 rxbuf_index_info_t *a, *b; 994 995 a = (rxbuf_index_info_t *)p1; 996 b = (rxbuf_index_info_t *)p2; 997 998 if (a->dvma_addr > b->dvma_addr) 999 return (1); 1000 if (a->dvma_addr < b->dvma_addr) 1001 return (-1); 1002 return (0); 1003 } 1004 1005 1006 1007 /* 1008 * grabbed this sort implementation from common/syscall/avl.c 1009 * 1010 */ 1011 /* 1012 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 1013 * v = Ptr to array/vector of objs 1014 * n = # objs in the array 1015 * s = size of each obj (must be multiples of a word size) 1016 * f = ptr to function to compare two objs 1017 * returns (-1 = less than, 0 = equal, 1 = greater than 1018 */ 1019 void 1020 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1021 { 1022 int g, i, j, ii; 1023 unsigned int *p1, *p2; 1024 unsigned int tmp; 1025 1026 /* No work to do */ 1027 if (v == NULL || n <= 1) 1028 return; 1029 /* Sanity check on arguments */ 1030 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1031 ASSERT(s > 0); 1032 1033 for (g = n / 2; g > 0; g /= 2) { 1034 for (i = g; i < n; i++) { 1035 for (j = i - g; j >= 0 && 1036 (*f)(v + j * s, v + (j + g) * s) == 1; 1037 j -= g) { 1038 p1 = (unsigned *)(v + j * s); 1039 p2 = (unsigned *)(v + (j + g) * s); 1040 for (ii = 0; ii < s / 4; ii++) { 1041 tmp = *p1; 1042 *p1++ = *p2; 1043 *p2++ = tmp; 1044 } 1045 } 1046 } 1047 } 1048 } 1049 1050 /* 1051 * Initialize data structures required for rxdma 1052 * buffer dvma->vmem address lookup 1053 */ 1054 /*ARGSUSED*/ 1055 static nxge_status_t 1056 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1057 { 1058 1059 int index; 1060 rxring_info_t *ring_info; 1061 int max_iteration = 0, max_index = 0; 1062 1063 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1064 1065 ring_info = rbrp->ring_info; 1066 ring_info->hint[0] = NO_HINT; 1067 ring_info->hint[1] = NO_HINT; 1068 ring_info->hint[2] = NO_HINT; 1069 max_index = rbrp->num_blocks; 1070 1071 /* read the DVMA address information and sort it */ 1072 /* do init of the information array */ 1073 1074 1075 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1076 " nxge_rxbuf_index_info_init Sort ptrs")); 1077 1078 /* sort the array */ 1079 nxge_ksort((void *)ring_info->buffer, max_index, 1080 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1081 1082 1083 1084 for (index = 0; index < max_index; index++) { 1085 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1086 " nxge_rxbuf_index_info_init: sorted chunk %d " 1087 " ioaddr $%p kaddr $%p size %x", 1088 index, ring_info->buffer[index].dvma_addr, 1089 ring_info->buffer[index].kaddr, 1090 ring_info->buffer[index].buf_size)); 1091 } 1092 1093 max_iteration = 0; 1094 while (max_index >= (1ULL << max_iteration)) 1095 max_iteration++; 1096 ring_info->max_iterations = max_iteration + 1; 1097 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1098 " nxge_rxbuf_index_info_init Find max iter %d", 1099 ring_info->max_iterations)); 1100 1101 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1102 return (NXGE_OK); 1103 } 1104 1105 /* ARGSUSED */ 1106 void 1107 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1108 { 1109 #ifdef NXGE_DEBUG 1110 1111 uint32_t bptr; 1112 uint64_t pp; 1113 1114 bptr = entry_p->bits.hdw.pkt_buf_addr; 1115 1116 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1117 "\trcr entry $%p " 1118 "\trcr entry 0x%0llx " 1119 "\trcr entry 0x%08x " 1120 "\trcr entry 0x%08x " 1121 "\tvalue 0x%0llx\n" 1122 "\tmulti = %d\n" 1123 "\tpkt_type = 0x%x\n" 1124 "\tzero_copy = %d\n" 1125 "\tnoport = %d\n" 1126 "\tpromis = %d\n" 1127 "\terror = 0x%04x\n" 1128 "\tdcf_err = 0x%01x\n" 1129 "\tl2_len = %d\n" 1130 "\tpktbufsize = %d\n" 1131 "\tpkt_buf_addr = $%p\n" 1132 "\tpkt_buf_addr (<< 6) = $%p\n", 1133 entry_p, 1134 *(int64_t *)entry_p, 1135 *(int32_t *)entry_p, 1136 *(int32_t *)((char *)entry_p + 32), 1137 entry_p->value, 1138 entry_p->bits.hdw.multi, 1139 entry_p->bits.hdw.pkt_type, 1140 entry_p->bits.hdw.zero_copy, 1141 entry_p->bits.hdw.noport, 1142 entry_p->bits.hdw.promis, 1143 entry_p->bits.hdw.error, 1144 entry_p->bits.hdw.dcf_err, 1145 entry_p->bits.hdw.l2_len, 1146 entry_p->bits.hdw.pktbufsz, 1147 bptr, 1148 entry_p->bits.ldw.pkt_buf_addr)); 1149 1150 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1151 RCR_PKT_BUF_ADDR_SHIFT; 1152 1153 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1154 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1155 #endif 1156 } 1157 1158 void 1159 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1160 { 1161 npi_handle_t handle; 1162 rbr_stat_t rbr_stat; 1163 addr44_t hd_addr; 1164 addr44_t tail_addr; 1165 uint16_t qlen; 1166 1167 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1168 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1169 1170 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1171 1172 /* RBR head */ 1173 hd_addr.addr = 0; 1174 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1175 #if defined(__i386) 1176 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1177 (void *)(uint32_t)hd_addr.addr); 1178 #else 1179 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1180 (void *)hd_addr.addr); 1181 #endif 1182 1183 /* RBR stats */ 1184 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1185 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1186 1187 /* RCR tail */ 1188 tail_addr.addr = 0; 1189 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1190 #if defined(__i386) 1191 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1192 (void *)(uint32_t)tail_addr.addr); 1193 #else 1194 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1195 (void *)tail_addr.addr); 1196 #endif 1197 1198 /* RCR qlen */ 1199 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1200 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1201 1202 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1203 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1204 } 1205 1206 nxge_status_t 1207 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1208 { 1209 nxge_grp_set_t *set = &nxgep->rx_set; 1210 nxge_status_t status; 1211 npi_status_t rs; 1212 int rdc; 1213 1214 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1215 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1216 1217 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1219 "<== nxge_rxdma_mode: not initialized")); 1220 return (NXGE_ERROR); 1221 } 1222 1223 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1224 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1225 "<== nxge_tx_port_fatal_err_recover: " 1226 "NULL ring pointer(s)")); 1227 return (NXGE_ERROR); 1228 } 1229 1230 if (set->owned.map == 0) { 1231 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1232 "nxge_rxdma_regs_dump_channels: no channels")); 1233 return (0); 1234 } 1235 1236 rs = 0; 1237 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1238 if ((1 << rdc) & set->owned.map) { 1239 rx_rbr_ring_t *ring = 1240 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1241 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1242 if (ring) { 1243 if (enable) { 1244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1245 "==> nxge_rxdma_hw_mode: " 1246 "channel %d (enable)", rdc)); 1247 rs = npi_rxdma_cfg_rdc_enable 1248 (handle, rdc); 1249 } else { 1250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1251 "==> nxge_rxdma_hw_mode: " 1252 "channel %d disable)", rdc)); 1253 rs = npi_rxdma_cfg_rdc_disable 1254 (handle, rdc); 1255 } 1256 } 1257 } 1258 } 1259 1260 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1261 1262 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1263 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1264 1265 return (status); 1266 } 1267 1268 void 1269 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1270 { 1271 npi_handle_t handle; 1272 1273 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1274 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1275 1276 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1277 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1278 1279 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1280 } 1281 1282 void 1283 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1284 { 1285 npi_handle_t handle; 1286 1287 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1288 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1289 1290 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1291 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1292 1293 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1294 } 1295 1296 void 1297 nxge_hw_start_rx(p_nxge_t nxgep) 1298 { 1299 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1300 1301 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1302 (void) nxge_rx_mac_enable(nxgep); 1303 1304 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1305 } 1306 1307 /*ARGSUSED*/ 1308 void 1309 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1310 { 1311 nxge_grp_set_t *set = &nxgep->rx_set; 1312 int rdc; 1313 1314 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1315 1316 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1317 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1318 "<== nxge_tx_port_fatal_err_recover: " 1319 "NULL ring pointer(s)")); 1320 return; 1321 } 1322 1323 if (set->owned.map == 0) { 1324 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1325 "nxge_rxdma_regs_dump_channels: no channels")); 1326 return; 1327 } 1328 1329 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1330 if ((1 << rdc) & set->owned.map) { 1331 rx_rbr_ring_t *ring = 1332 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1333 if (ring) { 1334 nxge_rxdma_hw_stop(nxgep, rdc); 1335 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1336 "==> nxge_fixup_rxdma_rings: " 1337 "channel %d ring $%px", 1338 rdc, ring)); 1339 (void) nxge_rxdma_fix_channel(nxgep, rdc); 1340 } 1341 } 1342 } 1343 1344 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1345 } 1346 1347 void 1348 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1349 { 1350 int ndmas; 1351 p_rx_rbr_rings_t rx_rbr_rings; 1352 p_rx_rbr_ring_t *rbr_rings; 1353 p_rx_rcr_rings_t rx_rcr_rings; 1354 p_rx_rcr_ring_t *rcr_rings; 1355 p_rx_mbox_areas_t rx_mbox_areas_p; 1356 p_rx_mbox_t *rx_mbox_p; 1357 p_nxge_dma_pool_t dma_buf_poolp; 1358 p_nxge_dma_pool_t dma_cntl_poolp; 1359 p_rx_rbr_ring_t rbrp; 1360 p_rx_rcr_ring_t rcrp; 1361 p_rx_mbox_t mboxp; 1362 p_nxge_dma_common_t dmap; 1363 nxge_status_t status = NXGE_OK; 1364 1365 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1366 1367 (void) nxge_rxdma_stop_channel(nxgep, channel); 1368 1369 dma_buf_poolp = nxgep->rx_buf_pool_p; 1370 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1371 1372 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1373 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1374 "<== nxge_rxdma_fix_channel: buf not allocated")); 1375 return; 1376 } 1377 1378 ndmas = dma_buf_poolp->ndmas; 1379 if (!ndmas) { 1380 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1381 "<== nxge_rxdma_fix_channel: no dma allocated")); 1382 return; 1383 } 1384 1385 rx_rbr_rings = nxgep->rx_rbr_rings; 1386 rx_rcr_rings = nxgep->rx_rcr_rings; 1387 rbr_rings = rx_rbr_rings->rbr_rings; 1388 rcr_rings = rx_rcr_rings->rcr_rings; 1389 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1390 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1391 1392 /* Reinitialize the receive block and completion rings */ 1393 rbrp = (p_rx_rbr_ring_t)rbr_rings[channel], 1394 rcrp = (p_rx_rcr_ring_t)rcr_rings[channel], 1395 mboxp = (p_rx_mbox_t)rx_mbox_p[channel]; 1396 1397 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1398 rbrp->rbr_rd_index = 0; 1399 rcrp->comp_rd_index = 0; 1400 rcrp->comp_wt_index = 0; 1401 1402 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1403 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1404 1405 status = nxge_rxdma_start_channel(nxgep, channel, 1406 rbrp, rcrp, mboxp); 1407 if (status != NXGE_OK) { 1408 goto nxge_rxdma_fix_channel_fail; 1409 } 1410 1411 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1412 "<== nxge_rxdma_fix_channel: success (0x%08x)", status)); 1413 return; 1414 1415 nxge_rxdma_fix_channel_fail: 1416 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1417 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status)); 1418 } 1419 1420 p_rx_rbr_ring_t 1421 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1422 { 1423 nxge_grp_set_t *set = &nxgep->rx_set; 1424 nxge_channel_t rdc; 1425 1426 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1427 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1428 1429 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1430 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1431 "<== nxge_rxdma_get_rbr_ring: " 1432 "NULL ring pointer(s)")); 1433 return (NULL); 1434 } 1435 1436 if (set->owned.map == 0) { 1437 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1438 "<== nxge_rxdma_get_rbr_ring: no channels")); 1439 return (NULL); 1440 } 1441 1442 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1443 if ((1 << rdc) & set->owned.map) { 1444 rx_rbr_ring_t *ring = 1445 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1446 if (ring) { 1447 if (channel == ring->rdc) { 1448 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1449 "==> nxge_rxdma_get_rbr_ring: " 1450 "channel %d ring $%p", rdc, ring)); 1451 return (ring); 1452 } 1453 } 1454 } 1455 } 1456 1457 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1458 "<== nxge_rxdma_get_rbr_ring: not found")); 1459 1460 return (NULL); 1461 } 1462 1463 p_rx_rcr_ring_t 1464 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1465 { 1466 nxge_grp_set_t *set = &nxgep->rx_set; 1467 nxge_channel_t rdc; 1468 1469 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1470 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1471 1472 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1473 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1474 "<== nxge_rxdma_get_rcr_ring: " 1475 "NULL ring pointer(s)")); 1476 return (NULL); 1477 } 1478 1479 if (set->owned.map == 0) { 1480 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1481 "<== nxge_rxdma_get_rbr_ring: no channels")); 1482 return (NULL); 1483 } 1484 1485 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1486 if ((1 << rdc) & set->owned.map) { 1487 rx_rcr_ring_t *ring = 1488 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1489 if (ring) { 1490 if (channel == ring->rdc) { 1491 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1492 "==> nxge_rxdma_get_rcr_ring: " 1493 "channel %d ring $%p", rdc, ring)); 1494 return (ring); 1495 } 1496 } 1497 } 1498 } 1499 1500 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1501 "<== nxge_rxdma_get_rcr_ring: not found")); 1502 1503 return (NULL); 1504 } 1505 1506 /* 1507 * Static functions start here. 1508 */ 1509 static p_rx_msg_t 1510 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1511 { 1512 p_rx_msg_t nxge_mp = NULL; 1513 p_nxge_dma_common_t dmamsg_p; 1514 uchar_t *buffer; 1515 1516 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1517 if (nxge_mp == NULL) { 1518 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1519 "Allocation of a rx msg failed.")); 1520 goto nxge_allocb_exit; 1521 } 1522 1523 nxge_mp->use_buf_pool = B_FALSE; 1524 if (dmabuf_p) { 1525 nxge_mp->use_buf_pool = B_TRUE; 1526 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1527 *dmamsg_p = *dmabuf_p; 1528 dmamsg_p->nblocks = 1; 1529 dmamsg_p->block_size = size; 1530 dmamsg_p->alength = size; 1531 buffer = (uchar_t *)dmabuf_p->kaddrp; 1532 1533 dmabuf_p->kaddrp = (void *) 1534 ((char *)dmabuf_p->kaddrp + size); 1535 dmabuf_p->ioaddr_pp = (void *) 1536 ((char *)dmabuf_p->ioaddr_pp + size); 1537 dmabuf_p->alength -= size; 1538 dmabuf_p->offset += size; 1539 dmabuf_p->dma_cookie.dmac_laddress += size; 1540 dmabuf_p->dma_cookie.dmac_size -= size; 1541 1542 } else { 1543 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1544 if (buffer == NULL) { 1545 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1546 "Allocation of a receive page failed.")); 1547 goto nxge_allocb_fail1; 1548 } 1549 } 1550 1551 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1552 if (nxge_mp->rx_mblk_p == NULL) { 1553 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1554 goto nxge_allocb_fail2; 1555 } 1556 1557 nxge_mp->buffer = buffer; 1558 nxge_mp->block_size = size; 1559 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1560 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1561 nxge_mp->ref_cnt = 1; 1562 nxge_mp->free = B_TRUE; 1563 nxge_mp->rx_use_bcopy = B_FALSE; 1564 1565 atomic_inc_32(&nxge_mblks_pending); 1566 1567 goto nxge_allocb_exit; 1568 1569 nxge_allocb_fail2: 1570 if (!nxge_mp->use_buf_pool) { 1571 KMEM_FREE(buffer, size); 1572 } 1573 1574 nxge_allocb_fail1: 1575 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1576 nxge_mp = NULL; 1577 1578 nxge_allocb_exit: 1579 return (nxge_mp); 1580 } 1581 1582 p_mblk_t 1583 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1584 { 1585 p_mblk_t mp; 1586 1587 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1588 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1589 "offset = 0x%08X " 1590 "size = 0x%08X", 1591 nxge_mp, offset, size)); 1592 1593 mp = desballoc(&nxge_mp->buffer[offset], size, 1594 0, &nxge_mp->freeb); 1595 if (mp == NULL) { 1596 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1597 goto nxge_dupb_exit; 1598 } 1599 atomic_inc_32(&nxge_mp->ref_cnt); 1600 1601 1602 nxge_dupb_exit: 1603 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1604 nxge_mp)); 1605 return (mp); 1606 } 1607 1608 p_mblk_t 1609 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1610 { 1611 p_mblk_t mp; 1612 uchar_t *dp; 1613 1614 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1615 if (mp == NULL) { 1616 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1617 goto nxge_dupb_bcopy_exit; 1618 } 1619 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1620 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1621 mp->b_wptr = dp + size; 1622 1623 nxge_dupb_bcopy_exit: 1624 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1625 nxge_mp)); 1626 return (mp); 1627 } 1628 1629 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1630 p_rx_msg_t rx_msg_p); 1631 1632 void 1633 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1634 { 1635 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1636 1637 /* Reuse this buffer */ 1638 rx_msg_p->free = B_FALSE; 1639 rx_msg_p->cur_usage_cnt = 0; 1640 rx_msg_p->max_usage_cnt = 0; 1641 rx_msg_p->pkt_buf_size = 0; 1642 1643 if (rx_rbr_p->rbr_use_bcopy) { 1644 rx_msg_p->rx_use_bcopy = B_FALSE; 1645 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1646 } 1647 1648 /* 1649 * Get the rbr header pointer and its offset index. 1650 */ 1651 MUTEX_ENTER(&rx_rbr_p->post_lock); 1652 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1653 rx_rbr_p->rbr_wrap_mask); 1654 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1655 MUTEX_EXIT(&rx_rbr_p->post_lock); 1656 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1657 rx_rbr_p->rdc, 1); 1658 1659 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1660 "<== nxge_post_page (channel %d post_next_index %d)", 1661 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1662 1663 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1664 } 1665 1666 void 1667 nxge_freeb(p_rx_msg_t rx_msg_p) 1668 { 1669 size_t size; 1670 uchar_t *buffer = NULL; 1671 int ref_cnt; 1672 boolean_t free_state = B_FALSE; 1673 1674 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1675 1676 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1677 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1678 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1679 rx_msg_p, nxge_mblks_pending)); 1680 1681 /* 1682 * First we need to get the free state, then 1683 * atomic decrement the reference count to prevent 1684 * the race condition with the interrupt thread that 1685 * is processing a loaned up buffer block. 1686 */ 1687 free_state = rx_msg_p->free; 1688 ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt); 1689 if (!ref_cnt) { 1690 atomic_dec_32(&nxge_mblks_pending); 1691 buffer = rx_msg_p->buffer; 1692 size = rx_msg_p->block_size; 1693 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1694 "will free: rx_msg_p = $%p (block pending %d)", 1695 rx_msg_p, nxge_mblks_pending)); 1696 1697 if (!rx_msg_p->use_buf_pool) { 1698 KMEM_FREE(buffer, size); 1699 } 1700 1701 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1702 1703 if (ring) { 1704 /* 1705 * Decrement the receive buffer ring's reference 1706 * count, too. 1707 */ 1708 atomic_dec_32(&ring->rbr_ref_cnt); 1709 1710 /* 1711 * Free the receive buffer ring, if 1712 * 1. all the receive buffers have been freed 1713 * 2. and we are in the proper state (that is, 1714 * we are not UNMAPPING). 1715 */ 1716 if (ring->rbr_ref_cnt == 0 && 1717 ring->rbr_state == RBR_UNMAPPED) { 1718 /* 1719 * Free receive data buffers, 1720 * buffer index information 1721 * (rxring_info) and 1722 * the message block ring. 1723 */ 1724 NXGE_DEBUG_MSG((NULL, RX_CTL, 1725 "nxge_freeb:rx_msg_p = $%p " 1726 "(block pending %d) free buffers", 1727 rx_msg_p, nxge_mblks_pending)); 1728 nxge_rxdma_databuf_free(ring); 1729 if (ring->ring_info) { 1730 KMEM_FREE(ring->ring_info, 1731 sizeof (rxring_info_t)); 1732 } 1733 1734 if (ring->rx_msg_ring) { 1735 KMEM_FREE(ring->rx_msg_ring, 1736 ring->tnblocks * 1737 sizeof (p_rx_msg_t)); 1738 } 1739 KMEM_FREE(ring, sizeof (*ring)); 1740 } 1741 } 1742 return; 1743 } 1744 1745 /* 1746 * Repost buffer. 1747 */ 1748 if (free_state && (ref_cnt == 1) && ring) { 1749 NXGE_DEBUG_MSG((NULL, RX_CTL, 1750 "nxge_freeb: post page $%p:", rx_msg_p)); 1751 if (ring->rbr_state == RBR_POSTING) 1752 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1753 } 1754 1755 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1756 } 1757 1758 uint_t 1759 nxge_rx_intr(char *arg1, char *arg2) 1760 { 1761 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1762 p_nxge_t nxgep = (p_nxge_t)arg2; 1763 p_nxge_ldg_t ldgp; 1764 uint8_t channel; 1765 npi_handle_t handle; 1766 rx_dma_ctl_stat_t cs; 1767 p_rx_rcr_ring_t rcrp; 1768 mblk_t *mp = NULL; 1769 1770 if (ldvp == NULL) { 1771 NXGE_DEBUG_MSG((NULL, INT_CTL, 1772 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1773 nxgep, ldvp)); 1774 return (DDI_INTR_CLAIMED); 1775 } 1776 1777 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1778 nxgep = ldvp->nxgep; 1779 } 1780 1781 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1782 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1783 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1784 "<== nxge_rx_intr: interface not started or intialized")); 1785 return (DDI_INTR_CLAIMED); 1786 } 1787 1788 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1789 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1790 nxgep, ldvp)); 1791 1792 /* 1793 * Get the PIO handle. 1794 */ 1795 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1796 1797 /* 1798 * Get the ring to enable us to process packets. 1799 */ 1800 rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1801 1802 /* 1803 * The RCR ring lock must be held when packets 1804 * are being processed and the hardware registers are 1805 * being read or written to prevent race condition 1806 * among the interrupt thread, the polling thread 1807 * (will cause fatal errors such as rcrincon bit set) 1808 * and the setting of the poll_flag. 1809 */ 1810 MUTEX_ENTER(&rcrp->lock); 1811 1812 /* 1813 * Get the control and status for this channel. 1814 */ 1815 channel = ldvp->channel; 1816 ldgp = ldvp->ldgp; 1817 1818 if (!isLDOMguest(nxgep) && (!rcrp->started)) { 1819 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1820 "<== nxge_rx_intr: channel is not started")); 1821 1822 /* 1823 * We received an interrupt before the ring is started. 1824 */ 1825 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1826 &cs.value); 1827 cs.value &= RX_DMA_CTL_STAT_WR1C; 1828 cs.bits.hdw.mex = 1; 1829 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1830 cs.value); 1831 1832 /* 1833 * Rearm this logical group if this is a single device 1834 * group. 1835 */ 1836 if (ldgp->nldvs == 1) { 1837 if (isLDOMguest(nxgep)) { 1838 nxge_hio_ldgimgn(nxgep, ldgp); 1839 } else { 1840 ldgimgm_t mgm; 1841 1842 mgm.value = 0; 1843 mgm.bits.ldw.arm = 1; 1844 mgm.bits.ldw.timer = ldgp->ldg_timer; 1845 1846 NXGE_REG_WR64(handle, 1847 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1848 mgm.value); 1849 } 1850 } 1851 MUTEX_EXIT(&rcrp->lock); 1852 return (DDI_INTR_CLAIMED); 1853 } 1854 1855 ASSERT(rcrp->ldgp == ldgp); 1856 ASSERT(rcrp->ldvp == ldvp); 1857 1858 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1859 1860 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1861 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1862 channel, 1863 cs.value, 1864 cs.bits.hdw.rcrto, 1865 cs.bits.hdw.rcrthres)); 1866 1867 if (!rcrp->poll_flag) { 1868 mp = nxge_rx_pkts(nxgep, rcrp, cs, -1); 1869 } 1870 1871 /* error events. */ 1872 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1873 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1874 } 1875 1876 /* 1877 * Enable the mailbox update interrupt if we want 1878 * to use mailbox. We probably don't need to use 1879 * mailbox as it only saves us one pio read. 1880 * Also write 1 to rcrthres and rcrto to clear 1881 * these two edge triggered bits. 1882 */ 1883 cs.value &= RX_DMA_CTL_STAT_WR1C; 1884 cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1; 1885 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1886 cs.value); 1887 1888 /* 1889 * If the polling mode is enabled, disable the interrupt. 1890 */ 1891 if (rcrp->poll_flag) { 1892 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1893 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1894 "(disabling interrupts)", channel, ldgp, ldvp)); 1895 1896 /* 1897 * Disarm this logical group if this is a single device 1898 * group. 1899 */ 1900 if (ldgp->nldvs == 1) { 1901 if (isLDOMguest(nxgep)) { 1902 ldgp->arm = B_FALSE; 1903 nxge_hio_ldgimgn(nxgep, ldgp); 1904 } else { 1905 ldgimgm_t mgm; 1906 mgm.value = 0; 1907 mgm.bits.ldw.arm = 0; 1908 NXGE_REG_WR64(handle, 1909 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1910 mgm.value); 1911 } 1912 } 1913 } else { 1914 /* 1915 * Rearm this logical group if this is a single device 1916 * group. 1917 */ 1918 if (ldgp->nldvs == 1) { 1919 if (isLDOMguest(nxgep)) { 1920 nxge_hio_ldgimgn(nxgep, ldgp); 1921 } else { 1922 ldgimgm_t mgm; 1923 1924 mgm.value = 0; 1925 mgm.bits.ldw.arm = 1; 1926 mgm.bits.ldw.timer = ldgp->ldg_timer; 1927 1928 NXGE_REG_WR64(handle, 1929 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1930 mgm.value); 1931 } 1932 } 1933 1934 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1935 "==> nxge_rx_intr: rdc %d ldgp $%p " 1936 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1937 } 1938 MUTEX_EXIT(&rcrp->lock); 1939 1940 if (mp != NULL) { 1941 mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp, 1942 rcrp->rcr_gen_num); 1943 } 1944 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1945 return (DDI_INTR_CLAIMED); 1946 } 1947 1948 /* 1949 * This routine is the main packet receive processing function. 1950 * It gets the packet type, error code, and buffer related 1951 * information from the receive completion entry. 1952 * How many completion entries to process is based on the number of packets 1953 * queued by the hardware, a hardware maintained tail pointer 1954 * and a configurable receive packet count. 1955 * 1956 * A chain of message blocks will be created as result of processing 1957 * the completion entries. This chain of message blocks will be returned and 1958 * a hardware control status register will be updated with the number of 1959 * packets were removed from the hardware queue. 1960 * 1961 * The RCR ring lock is held when entering this function. 1962 */ 1963 static mblk_t * 1964 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1965 int bytes_to_pickup) 1966 { 1967 npi_handle_t handle; 1968 uint8_t channel; 1969 uint32_t comp_rd_index; 1970 p_rcr_entry_t rcr_desc_rd_head_p; 1971 p_rcr_entry_t rcr_desc_rd_head_pp; 1972 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1973 uint16_t qlen, nrcr_read, npkt_read; 1974 uint32_t qlen_hw; 1975 boolean_t multi; 1976 rcrcfig_b_t rcr_cfg_b; 1977 int totallen = 0; 1978 #if defined(_BIG_ENDIAN) 1979 npi_status_t rs = NPI_SUCCESS; 1980 #endif 1981 1982 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 1983 "channel %d", rcr_p->rdc)); 1984 1985 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1986 return (NULL); 1987 } 1988 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1989 channel = rcr_p->rdc; 1990 1991 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1992 "==> nxge_rx_pkts: START: rcr channel %d " 1993 "head_p $%p head_pp $%p index %d ", 1994 channel, rcr_p->rcr_desc_rd_head_p, 1995 rcr_p->rcr_desc_rd_head_pp, 1996 rcr_p->comp_rd_index)); 1997 1998 1999 #if !defined(_BIG_ENDIAN) 2000 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2001 #else 2002 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2003 if (rs != NPI_SUCCESS) { 2004 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2005 "channel %d, get qlen failed 0x%08x", 2006 channel, rs)); 2007 return (NULL); 2008 } 2009 #endif 2010 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2011 "qlen %d", channel, qlen)); 2012 2013 2014 2015 if (!qlen) { 2016 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2017 "==> nxge_rx_pkts:rcr channel %d " 2018 "qlen %d (no pkts)", channel, qlen)); 2019 2020 return (NULL); 2021 } 2022 2023 comp_rd_index = rcr_p->comp_rd_index; 2024 2025 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2026 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2027 nrcr_read = npkt_read = 0; 2028 2029 /* 2030 * Number of packets queued 2031 * (The jumbo or multi packet will be counted as only one 2032 * packets and it may take up more than one completion entry). 2033 */ 2034 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2035 qlen : nxge_max_rx_pkts; 2036 head_mp = NULL; 2037 tail_mp = &head_mp; 2038 nmp = mp_cont = NULL; 2039 multi = B_FALSE; 2040 2041 while (qlen_hw) { 2042 2043 #ifdef NXGE_DEBUG 2044 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2045 #endif 2046 /* 2047 * Process one completion ring entry. 2048 */ 2049 nxge_receive_packet(nxgep, 2050 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2051 2052 /* 2053 * message chaining modes 2054 */ 2055 if (nmp) { 2056 nmp->b_next = NULL; 2057 if (!multi && !mp_cont) { /* frame fits a partition */ 2058 *tail_mp = nmp; 2059 tail_mp = &nmp->b_next; 2060 totallen += MBLKL(nmp); 2061 nmp = NULL; 2062 } else if (multi && !mp_cont) { /* first segment */ 2063 *tail_mp = nmp; 2064 tail_mp = &nmp->b_cont; 2065 totallen += MBLKL(nmp); 2066 } else if (multi && mp_cont) { /* mid of multi segs */ 2067 *tail_mp = mp_cont; 2068 tail_mp = &mp_cont->b_cont; 2069 totallen += MBLKL(mp_cont); 2070 } else if (!multi && mp_cont) { /* last segment */ 2071 *tail_mp = mp_cont; 2072 tail_mp = &nmp->b_next; 2073 totallen += MBLKL(mp_cont); 2074 nmp = NULL; 2075 } 2076 } 2077 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2078 "==> nxge_rx_pkts: loop: rcr channel %d " 2079 "before updating: multi %d " 2080 "nrcr_read %d " 2081 "npk read %d " 2082 "head_pp $%p index %d ", 2083 channel, 2084 multi, 2085 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2086 comp_rd_index)); 2087 2088 if (!multi) { 2089 qlen_hw--; 2090 npkt_read++; 2091 } 2092 2093 /* 2094 * Update the next read entry. 2095 */ 2096 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2097 rcr_p->comp_wrap_mask); 2098 2099 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2100 rcr_p->rcr_desc_first_p, 2101 rcr_p->rcr_desc_last_p); 2102 2103 nrcr_read++; 2104 2105 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2106 "<== nxge_rx_pkts: (SAM, process one packet) " 2107 "nrcr_read %d", 2108 nrcr_read)); 2109 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2110 "==> nxge_rx_pkts: loop: rcr channel %d " 2111 "multi %d " 2112 "nrcr_read %d " 2113 "npk read %d " 2114 "head_pp $%p index %d ", 2115 channel, 2116 multi, 2117 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2118 comp_rd_index)); 2119 2120 if ((bytes_to_pickup != -1) && 2121 (totallen >= bytes_to_pickup)) { 2122 break; 2123 } 2124 } 2125 2126 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2127 rcr_p->comp_rd_index = comp_rd_index; 2128 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2129 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2130 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2131 2132 rcr_p->intr_timeout = (nxgep->intr_timeout < 2133 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2134 nxgep->intr_timeout; 2135 2136 rcr_p->intr_threshold = (nxgep->intr_threshold < 2137 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2138 nxgep->intr_threshold; 2139 2140 rcr_cfg_b.value = 0x0ULL; 2141 rcr_cfg_b.bits.ldw.entout = 1; 2142 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2143 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2144 2145 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2146 channel, rcr_cfg_b.value); 2147 } 2148 2149 cs.bits.ldw.pktread = npkt_read; 2150 cs.bits.ldw.ptrread = nrcr_read; 2151 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2152 channel, cs.value); 2153 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2154 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2155 "head_pp $%p index %016llx ", 2156 channel, 2157 rcr_p->rcr_desc_rd_head_pp, 2158 rcr_p->comp_rd_index)); 2159 /* 2160 * Update RCR buffer pointer read and number of packets 2161 * read. 2162 */ 2163 2164 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2165 "channel %d", rcr_p->rdc)); 2166 2167 return (head_mp); 2168 } 2169 2170 void 2171 nxge_receive_packet(p_nxge_t nxgep, 2172 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2173 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2174 { 2175 p_mblk_t nmp = NULL; 2176 uint64_t multi; 2177 uint64_t dcf_err; 2178 uint8_t channel; 2179 2180 boolean_t first_entry = B_TRUE; 2181 boolean_t is_tcp_udp = B_FALSE; 2182 boolean_t buffer_free = B_FALSE; 2183 boolean_t error_send_up = B_FALSE; 2184 uint8_t error_type; 2185 uint16_t l2_len; 2186 uint16_t skip_len; 2187 uint8_t pktbufsz_type; 2188 uint64_t rcr_entry; 2189 uint64_t *pkt_buf_addr_pp; 2190 uint64_t *pkt_buf_addr_p; 2191 uint32_t buf_offset; 2192 uint32_t bsize; 2193 uint32_t error_disp_cnt; 2194 uint32_t msg_index; 2195 p_rx_rbr_ring_t rx_rbr_p; 2196 p_rx_msg_t *rx_msg_ring_p; 2197 p_rx_msg_t rx_msg_p; 2198 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2199 nxge_status_t status = NXGE_OK; 2200 boolean_t is_valid = B_FALSE; 2201 p_nxge_rx_ring_stats_t rdc_stats; 2202 uint32_t bytes_read; 2203 uint64_t pkt_type; 2204 uint64_t frag; 2205 boolean_t pkt_too_long_err = B_FALSE; 2206 #ifdef NXGE_DEBUG 2207 int dump_len; 2208 #endif 2209 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2210 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2211 2212 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2213 2214 multi = (rcr_entry & RCR_MULTI_MASK); 2215 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2216 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2217 2218 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2219 frag = (rcr_entry & RCR_FRAG_MASK); 2220 2221 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2222 2223 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2224 RCR_PKTBUFSZ_SHIFT); 2225 #if defined(__i386) 2226 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2227 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2228 #else 2229 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2230 RCR_PKT_BUF_ADDR_SHIFT); 2231 #endif 2232 2233 channel = rcr_p->rdc; 2234 2235 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2236 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2237 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2238 "error_type 0x%x pkt_type 0x%x " 2239 "pktbufsz_type %d ", 2240 rcr_desc_rd_head_p, 2241 rcr_entry, pkt_buf_addr_pp, l2_len, 2242 multi, 2243 error_type, 2244 pkt_type, 2245 pktbufsz_type)); 2246 2247 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2248 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2249 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2250 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2251 rcr_entry, pkt_buf_addr_pp, l2_len, 2252 multi, 2253 error_type, 2254 pkt_type)); 2255 2256 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2257 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2258 "full pkt_buf_addr_pp $%p l2_len %d", 2259 rcr_entry, pkt_buf_addr_pp, l2_len)); 2260 2261 /* get the stats ptr */ 2262 rdc_stats = rcr_p->rdc_stats; 2263 2264 if (!l2_len) { 2265 2266 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2267 "<== nxge_receive_packet: failed: l2 length is 0.")); 2268 return; 2269 } 2270 2271 /* 2272 * Software workaround for BMAC hardware limitation that allows 2273 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2274 * instead of 0x2400 for jumbo. 2275 */ 2276 if (l2_len > nxgep->mac.maxframesize) { 2277 pkt_too_long_err = B_TRUE; 2278 } 2279 2280 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2281 l2_len -= ETHERFCSL; 2282 2283 /* shift 6 bits to get the full io address */ 2284 #if defined(__i386) 2285 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2286 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2287 #else 2288 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2289 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2290 #endif 2291 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2292 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2293 "full pkt_buf_addr_pp $%p l2_len %d", 2294 rcr_entry, pkt_buf_addr_pp, l2_len)); 2295 2296 rx_rbr_p = rcr_p->rx_rbr_p; 2297 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2298 2299 if (first_entry) { 2300 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2301 RXDMA_HDR_SIZE_DEFAULT); 2302 2303 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2304 "==> nxge_receive_packet: first entry 0x%016llx " 2305 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2306 rcr_entry, pkt_buf_addr_pp, l2_len, 2307 hdr_size)); 2308 } 2309 2310 MUTEX_ENTER(&rx_rbr_p->lock); 2311 2312 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2313 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2314 "full pkt_buf_addr_pp $%p l2_len %d", 2315 rcr_entry, pkt_buf_addr_pp, l2_len)); 2316 2317 /* 2318 * Packet buffer address in the completion entry points 2319 * to the starting buffer address (offset 0). 2320 * Use the starting buffer address to locate the corresponding 2321 * kernel address. 2322 */ 2323 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2324 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2325 &buf_offset, 2326 &msg_index); 2327 2328 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2329 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2330 "full pkt_buf_addr_pp $%p l2_len %d", 2331 rcr_entry, pkt_buf_addr_pp, l2_len)); 2332 2333 if (status != NXGE_OK) { 2334 MUTEX_EXIT(&rx_rbr_p->lock); 2335 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2336 "<== nxge_receive_packet: found vaddr failed %d", 2337 status)); 2338 return; 2339 } 2340 2341 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2342 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2343 "full pkt_buf_addr_pp $%p l2_len %d", 2344 rcr_entry, pkt_buf_addr_pp, l2_len)); 2345 2346 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2347 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2348 "full pkt_buf_addr_pp $%p l2_len %d", 2349 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2350 2351 rx_msg_p = rx_msg_ring_p[msg_index]; 2352 2353 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2354 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2355 "full pkt_buf_addr_pp $%p l2_len %d", 2356 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2357 2358 switch (pktbufsz_type) { 2359 case RCR_PKTBUFSZ_0: 2360 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2361 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2362 "==> nxge_receive_packet: 0 buf %d", bsize)); 2363 break; 2364 case RCR_PKTBUFSZ_1: 2365 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2366 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2367 "==> nxge_receive_packet: 1 buf %d", bsize)); 2368 break; 2369 case RCR_PKTBUFSZ_2: 2370 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2371 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2372 "==> nxge_receive_packet: 2 buf %d", bsize)); 2373 break; 2374 case RCR_SINGLE_BLOCK: 2375 bsize = rx_msg_p->block_size; 2376 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2377 "==> nxge_receive_packet: single %d", bsize)); 2378 2379 break; 2380 default: 2381 MUTEX_EXIT(&rx_rbr_p->lock); 2382 return; 2383 } 2384 2385 switch (nxge_rdc_buf_offset) { 2386 case SW_OFFSET_NO_OFFSET: 2387 sw_offset_bytes = 0; 2388 break; 2389 case SW_OFFSET_64: 2390 sw_offset_bytes = 64; 2391 break; 2392 case SW_OFFSET_128: 2393 sw_offset_bytes = 128; 2394 break; 2395 case SW_OFFSET_192: 2396 sw_offset_bytes = 192; 2397 break; 2398 case SW_OFFSET_256: 2399 sw_offset_bytes = 256; 2400 break; 2401 case SW_OFFSET_320: 2402 sw_offset_bytes = 320; 2403 break; 2404 case SW_OFFSET_384: 2405 sw_offset_bytes = 384; 2406 break; 2407 case SW_OFFSET_448: 2408 sw_offset_bytes = 448; 2409 break; 2410 default: 2411 sw_offset_bytes = 0; 2412 break; 2413 } 2414 2415 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2416 (buf_offset + sw_offset_bytes), 2417 (hdr_size + l2_len), 2418 DDI_DMA_SYNC_FORCPU); 2419 2420 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2421 "==> nxge_receive_packet: after first dump:usage count")); 2422 2423 if (rx_msg_p->cur_usage_cnt == 0) { 2424 if (rx_rbr_p->rbr_use_bcopy) { 2425 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2426 if (rx_rbr_p->rbr_consumed < 2427 rx_rbr_p->rbr_threshold_hi) { 2428 if (rx_rbr_p->rbr_threshold_lo == 0 || 2429 ((rx_rbr_p->rbr_consumed >= 2430 rx_rbr_p->rbr_threshold_lo) && 2431 (rx_rbr_p->rbr_bufsize_type >= 2432 pktbufsz_type))) { 2433 rx_msg_p->rx_use_bcopy = B_TRUE; 2434 } 2435 } else { 2436 rx_msg_p->rx_use_bcopy = B_TRUE; 2437 } 2438 } 2439 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2440 "==> nxge_receive_packet: buf %d (new block) ", 2441 bsize)); 2442 2443 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2444 rx_msg_p->pkt_buf_size = bsize; 2445 rx_msg_p->cur_usage_cnt = 1; 2446 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2447 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2448 "==> nxge_receive_packet: buf %d " 2449 "(single block) ", 2450 bsize)); 2451 /* 2452 * Buffer can be reused once the free function 2453 * is called. 2454 */ 2455 rx_msg_p->max_usage_cnt = 1; 2456 buffer_free = B_TRUE; 2457 } else { 2458 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2459 if (rx_msg_p->max_usage_cnt == 1) { 2460 buffer_free = B_TRUE; 2461 } 2462 } 2463 } else { 2464 rx_msg_p->cur_usage_cnt++; 2465 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2466 buffer_free = B_TRUE; 2467 } 2468 } 2469 2470 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2471 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2472 msg_index, l2_len, 2473 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2474 2475 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2476 rdc_stats->ierrors++; 2477 if (dcf_err) { 2478 rdc_stats->dcf_err++; 2479 #ifdef NXGE_DEBUG 2480 if (!rdc_stats->dcf_err) { 2481 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2482 "nxge_receive_packet: channel %d dcf_err rcr" 2483 " 0x%llx", channel, rcr_entry)); 2484 } 2485 #endif 2486 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0, 2487 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2488 } else if (pkt_too_long_err) { 2489 rdc_stats->pkt_too_long_err++; 2490 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2491 " channel %d packet length [%d] > " 2492 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2493 nxgep->mac.maxframesize)); 2494 } else { 2495 /* Update error stats */ 2496 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2497 rdc_stats->errlog.compl_err_type = error_type; 2498 2499 switch (error_type) { 2500 /* 2501 * Do not send FMA ereport for RCR_L2_ERROR and 2502 * RCR_L4_CSUM_ERROR because most likely they indicate 2503 * back pressure rather than HW failures. 2504 */ 2505 case RCR_L2_ERROR: 2506 rdc_stats->l2_err++; 2507 if (rdc_stats->l2_err < 2508 error_disp_cnt) { 2509 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2510 " nxge_receive_packet:" 2511 " channel %d RCR L2_ERROR", 2512 channel)); 2513 } 2514 break; 2515 case RCR_L4_CSUM_ERROR: 2516 error_send_up = B_TRUE; 2517 rdc_stats->l4_cksum_err++; 2518 if (rdc_stats->l4_cksum_err < 2519 error_disp_cnt) { 2520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2521 " nxge_receive_packet:" 2522 " channel %d" 2523 " RCR L4_CSUM_ERROR", channel)); 2524 } 2525 break; 2526 /* 2527 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2528 * RCR_ZCP_SOFT_ERROR because they reflect the same 2529 * FFLP and ZCP errors that have been reported by 2530 * nxge_fflp.c and nxge_zcp.c. 2531 */ 2532 case RCR_FFLP_SOFT_ERROR: 2533 error_send_up = B_TRUE; 2534 rdc_stats->fflp_soft_err++; 2535 if (rdc_stats->fflp_soft_err < 2536 error_disp_cnt) { 2537 NXGE_ERROR_MSG((nxgep, 2538 NXGE_ERR_CTL, 2539 " nxge_receive_packet:" 2540 " channel %d" 2541 " RCR FFLP_SOFT_ERROR", channel)); 2542 } 2543 break; 2544 case RCR_ZCP_SOFT_ERROR: 2545 error_send_up = B_TRUE; 2546 rdc_stats->fflp_soft_err++; 2547 if (rdc_stats->zcp_soft_err < 2548 error_disp_cnt) 2549 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2550 " nxge_receive_packet: Channel %d" 2551 " RCR ZCP_SOFT_ERROR", channel)); 2552 break; 2553 default: 2554 rdc_stats->rcr_unknown_err++; 2555 if (rdc_stats->rcr_unknown_err 2556 < error_disp_cnt) { 2557 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2558 " nxge_receive_packet: Channel %d" 2559 " RCR entry 0x%llx error 0x%x", 2560 rcr_entry, channel, error_type)); 2561 } 2562 break; 2563 } 2564 } 2565 2566 /* 2567 * Update and repost buffer block if max usage 2568 * count is reached. 2569 */ 2570 if (error_send_up == B_FALSE) { 2571 atomic_inc_32(&rx_msg_p->ref_cnt); 2572 if (buffer_free == B_TRUE) { 2573 rx_msg_p->free = B_TRUE; 2574 } 2575 2576 MUTEX_EXIT(&rx_rbr_p->lock); 2577 nxge_freeb(rx_msg_p); 2578 return; 2579 } 2580 } 2581 2582 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2583 "==> nxge_receive_packet: DMA sync second ")); 2584 2585 bytes_read = rcr_p->rcvd_pkt_bytes; 2586 skip_len = sw_offset_bytes + hdr_size; 2587 if (!rx_msg_p->rx_use_bcopy) { 2588 /* 2589 * For loaned up buffers, the driver reference count 2590 * will be incremented first and then the free state. 2591 */ 2592 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2593 if (first_entry) { 2594 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2595 if (l2_len < bsize - skip_len) { 2596 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2597 } else { 2598 nmp->b_wptr = &nmp->b_rptr[bsize 2599 - skip_len]; 2600 } 2601 } else { 2602 if (l2_len - bytes_read < bsize) { 2603 nmp->b_wptr = 2604 &nmp->b_rptr[l2_len - bytes_read]; 2605 } else { 2606 nmp->b_wptr = &nmp->b_rptr[bsize]; 2607 } 2608 } 2609 } 2610 } else { 2611 if (first_entry) { 2612 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2613 l2_len < bsize - skip_len ? 2614 l2_len : bsize - skip_len); 2615 } else { 2616 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2617 l2_len - bytes_read < bsize ? 2618 l2_len - bytes_read : bsize); 2619 } 2620 } 2621 if (nmp != NULL) { 2622 if (first_entry) { 2623 /* 2624 * Jumbo packets may be received with more than one 2625 * buffer, increment ipackets for the first entry only. 2626 */ 2627 rdc_stats->ipackets++; 2628 2629 /* Update ibytes for kstat. */ 2630 rdc_stats->ibytes += skip_len 2631 + l2_len < bsize ? l2_len : bsize; 2632 /* 2633 * Update the number of bytes read so far for the 2634 * current frame. 2635 */ 2636 bytes_read = nmp->b_wptr - nmp->b_rptr; 2637 } else { 2638 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2639 l2_len - bytes_read : bsize; 2640 bytes_read += nmp->b_wptr - nmp->b_rptr; 2641 } 2642 2643 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2644 "==> nxge_receive_packet after dupb: " 2645 "rbr consumed %d " 2646 "pktbufsz_type %d " 2647 "nmp $%p rptr $%p wptr $%p " 2648 "buf_offset %d bzise %d l2_len %d skip_len %d", 2649 rx_rbr_p->rbr_consumed, 2650 pktbufsz_type, 2651 nmp, nmp->b_rptr, nmp->b_wptr, 2652 buf_offset, bsize, l2_len, skip_len)); 2653 } else { 2654 cmn_err(CE_WARN, "!nxge_receive_packet: " 2655 "update stats (error)"); 2656 atomic_inc_32(&rx_msg_p->ref_cnt); 2657 if (buffer_free == B_TRUE) { 2658 rx_msg_p->free = B_TRUE; 2659 } 2660 MUTEX_EXIT(&rx_rbr_p->lock); 2661 nxge_freeb(rx_msg_p); 2662 return; 2663 } 2664 2665 if (buffer_free == B_TRUE) { 2666 rx_msg_p->free = B_TRUE; 2667 } 2668 2669 is_valid = (nmp != NULL); 2670 2671 rcr_p->rcvd_pkt_bytes = bytes_read; 2672 2673 MUTEX_EXIT(&rx_rbr_p->lock); 2674 2675 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2676 atomic_inc_32(&rx_msg_p->ref_cnt); 2677 nxge_freeb(rx_msg_p); 2678 } 2679 2680 if (is_valid) { 2681 nmp->b_cont = NULL; 2682 if (first_entry) { 2683 *mp = nmp; 2684 *mp_cont = NULL; 2685 } else { 2686 *mp_cont = nmp; 2687 } 2688 } 2689 2690 /* 2691 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2692 * If a packet is not fragmented and no error bit is set, then 2693 * L4 checksum is OK. 2694 */ 2695 2696 if (is_valid && !multi) { 2697 /* 2698 * If the checksum flag nxge_chksum_offload 2699 * is 1, TCP and UDP packets can be sent 2700 * up with good checksum. If the checksum flag 2701 * is set to 0, checksum reporting will apply to 2702 * TCP packets only (workaround for a hardware bug). 2703 * If the checksum flag nxge_cksum_offload is 2704 * greater than 1, both TCP and UDP packets 2705 * will not be reported its hardware checksum results. 2706 */ 2707 if (nxge_cksum_offload == 1) { 2708 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2709 pkt_type == RCR_PKT_IS_UDP) ? 2710 B_TRUE: B_FALSE); 2711 } else if (!nxge_cksum_offload) { 2712 /* TCP checksum only. */ 2713 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2714 B_TRUE: B_FALSE); 2715 } 2716 2717 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2718 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2719 is_valid, multi, is_tcp_udp, frag, error_type)); 2720 2721 if (is_tcp_udp && !frag && !error_type) { 2722 mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK); 2723 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2724 "==> nxge_receive_packet: Full tcp/udp cksum " 2725 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2726 "error %d", 2727 is_valid, multi, is_tcp_udp, frag, error_type)); 2728 } 2729 } 2730 2731 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2732 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2733 2734 *multi_p = (multi == RCR_MULTI_MASK); 2735 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2736 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2737 *multi_p, nmp, *mp, *mp_cont)); 2738 } 2739 2740 /* 2741 * Enable polling for a ring. Interrupt for the ring is disabled when 2742 * the nxge interrupt comes (see nxge_rx_intr). 2743 */ 2744 int 2745 nxge_enable_poll(void *arg) 2746 { 2747 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2748 p_rx_rcr_ring_t ringp; 2749 p_nxge_t nxgep; 2750 p_nxge_ldg_t ldgp; 2751 uint32_t channel; 2752 2753 if (ring_handle == NULL) { 2754 ASSERT(ring_handle != NULL); 2755 return (0); 2756 } 2757 2758 nxgep = ring_handle->nxgep; 2759 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2760 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2761 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2762 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2763 ldgp = ringp->ldgp; 2764 if (ldgp == NULL) { 2765 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2766 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2767 ringp->rdc)); 2768 return (0); 2769 } 2770 2771 MUTEX_ENTER(&ringp->lock); 2772 /* enable polling */ 2773 if (ringp->poll_flag == 0) { 2774 ringp->poll_flag = 1; 2775 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2776 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2777 ringp->rdc)); 2778 } 2779 2780 MUTEX_EXIT(&ringp->lock); 2781 return (0); 2782 } 2783 /* 2784 * Disable polling for a ring and enable its interrupt. 2785 */ 2786 int 2787 nxge_disable_poll(void *arg) 2788 { 2789 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2790 p_rx_rcr_ring_t ringp; 2791 p_nxge_t nxgep; 2792 uint32_t channel; 2793 2794 if (ring_handle == NULL) { 2795 ASSERT(ring_handle != NULL); 2796 return (0); 2797 } 2798 2799 nxgep = ring_handle->nxgep; 2800 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2801 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2802 2803 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2804 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2805 2806 MUTEX_ENTER(&ringp->lock); 2807 2808 /* disable polling: enable interrupt */ 2809 if (ringp->poll_flag) { 2810 npi_handle_t handle; 2811 rx_dma_ctl_stat_t cs; 2812 uint8_t channel; 2813 p_nxge_ldg_t ldgp; 2814 2815 /* 2816 * Get the control and status for this channel. 2817 */ 2818 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2819 channel = ringp->rdc; 2820 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2821 channel, &cs.value); 2822 2823 /* 2824 * Enable mailbox update 2825 * Since packets were not read and the hardware uses 2826 * bits pktread and ptrread to update the queue 2827 * length, we need to set both bits to 0. 2828 */ 2829 cs.bits.ldw.pktread = 0; 2830 cs.bits.ldw.ptrread = 0; 2831 cs.bits.hdw.mex = 1; 2832 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2833 cs.value); 2834 2835 /* 2836 * Rearm this logical group if this is a single device 2837 * group. 2838 */ 2839 ldgp = ringp->ldgp; 2840 if (ldgp == NULL) { 2841 ringp->poll_flag = 0; 2842 MUTEX_EXIT(&ringp->lock); 2843 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2844 "==> nxge_disable_poll: no ldgp rdc %d " 2845 "(still set poll to 0", ringp->rdc)); 2846 return (0); 2847 } 2848 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2849 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2850 ringp->rdc, ldgp)); 2851 if (ldgp->nldvs == 1) { 2852 if (isLDOMguest(nxgep)) { 2853 ldgp->arm = B_TRUE; 2854 nxge_hio_ldgimgn(nxgep, ldgp); 2855 } else { 2856 ldgimgm_t mgm; 2857 mgm.value = 0; 2858 mgm.bits.ldw.arm = 1; 2859 mgm.bits.ldw.timer = ldgp->ldg_timer; 2860 NXGE_REG_WR64(handle, 2861 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 2862 mgm.value); 2863 } 2864 } 2865 ringp->poll_flag = 0; 2866 } 2867 2868 MUTEX_EXIT(&ringp->lock); 2869 return (0); 2870 } 2871 2872 /* 2873 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2874 */ 2875 mblk_t * 2876 nxge_rx_poll(void *arg, int bytes_to_pickup) 2877 { 2878 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2879 p_rx_rcr_ring_t rcr_p; 2880 p_nxge_t nxgep; 2881 npi_handle_t handle; 2882 rx_dma_ctl_stat_t cs; 2883 mblk_t *mblk; 2884 p_nxge_ldv_t ldvp; 2885 uint32_t channel; 2886 2887 nxgep = ring_handle->nxgep; 2888 2889 /* 2890 * Get the control and status for this channel. 2891 */ 2892 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2893 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2894 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2895 MUTEX_ENTER(&rcr_p->lock); 2896 ASSERT(rcr_p->poll_flag == 1); 2897 2898 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2899 2900 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2901 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2902 rcr_p->rdc, rcr_p->poll_flag)); 2903 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2904 2905 ldvp = rcr_p->ldvp; 2906 /* error events. */ 2907 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2908 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2909 } 2910 2911 MUTEX_EXIT(&rcr_p->lock); 2912 2913 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2914 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 2915 return (mblk); 2916 } 2917 2918 2919 /*ARGSUSED*/ 2920 static nxge_status_t 2921 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2922 { 2923 p_nxge_rx_ring_stats_t rdc_stats; 2924 npi_handle_t handle; 2925 npi_status_t rs; 2926 boolean_t rxchan_fatal = B_FALSE; 2927 boolean_t rxport_fatal = B_FALSE; 2928 uint8_t portn; 2929 nxge_status_t status = NXGE_OK; 2930 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2931 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2932 2933 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2934 portn = nxgep->mac.portnum; 2935 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2936 2937 if (cs.bits.hdw.rbr_tmout) { 2938 rdc_stats->rx_rbr_tmout++; 2939 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2940 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2941 rxchan_fatal = B_TRUE; 2942 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2943 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2944 } 2945 if (cs.bits.hdw.rsp_cnt_err) { 2946 rdc_stats->rsp_cnt_err++; 2947 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2948 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2949 rxchan_fatal = B_TRUE; 2950 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2951 "==> nxge_rx_err_evnts(channel %d): " 2952 "rsp_cnt_err", channel)); 2953 } 2954 if (cs.bits.hdw.byte_en_bus) { 2955 rdc_stats->byte_en_bus++; 2956 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2957 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2958 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2959 "==> nxge_rx_err_evnts(channel %d): " 2960 "fatal error: byte_en_bus", channel)); 2961 rxchan_fatal = B_TRUE; 2962 } 2963 if (cs.bits.hdw.rsp_dat_err) { 2964 rdc_stats->rsp_dat_err++; 2965 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2966 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2967 rxchan_fatal = B_TRUE; 2968 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2969 "==> nxge_rx_err_evnts(channel %d): " 2970 "fatal error: rsp_dat_err", channel)); 2971 } 2972 if (cs.bits.hdw.rcr_ack_err) { 2973 rdc_stats->rcr_ack_err++; 2974 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2975 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2976 rxchan_fatal = B_TRUE; 2977 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2978 "==> nxge_rx_err_evnts(channel %d): " 2979 "fatal error: rcr_ack_err", channel)); 2980 } 2981 if (cs.bits.hdw.dc_fifo_err) { 2982 rdc_stats->dc_fifo_err++; 2983 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2984 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2985 /* This is not a fatal error! */ 2986 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2987 "==> nxge_rx_err_evnts(channel %d): " 2988 "dc_fifo_err", channel)); 2989 rxport_fatal = B_TRUE; 2990 } 2991 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2992 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2993 &rdc_stats->errlog.pre_par, 2994 &rdc_stats->errlog.sha_par)) 2995 != NPI_SUCCESS) { 2996 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2997 "==> nxge_rx_err_evnts(channel %d): " 2998 "rcr_sha_par: get perr", channel)); 2999 return (NXGE_ERROR | rs); 3000 } 3001 if (cs.bits.hdw.rcr_sha_par) { 3002 rdc_stats->rcr_sha_par++; 3003 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3004 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 3005 rxchan_fatal = B_TRUE; 3006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3007 "==> nxge_rx_err_evnts(channel %d): " 3008 "fatal error: rcr_sha_par", channel)); 3009 } 3010 if (cs.bits.hdw.rbr_pre_par) { 3011 rdc_stats->rbr_pre_par++; 3012 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3013 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 3014 rxchan_fatal = B_TRUE; 3015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3016 "==> nxge_rx_err_evnts(channel %d): " 3017 "fatal error: rbr_pre_par", channel)); 3018 } 3019 } 3020 /* 3021 * The Following 4 status bits are for information, the system 3022 * is running fine. There is no need to send FMA ereports or 3023 * log messages. 3024 */ 3025 if (cs.bits.hdw.port_drop_pkt) { 3026 rdc_stats->port_drop_pkt++; 3027 } 3028 if (cs.bits.hdw.wred_drop) { 3029 rdc_stats->wred_drop++; 3030 } 3031 if (cs.bits.hdw.rbr_pre_empty) { 3032 rdc_stats->rbr_pre_empty++; 3033 } 3034 if (cs.bits.hdw.rcr_shadow_full) { 3035 rdc_stats->rcr_shadow_full++; 3036 } 3037 if (cs.bits.hdw.config_err) { 3038 rdc_stats->config_err++; 3039 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3040 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3041 rxchan_fatal = B_TRUE; 3042 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3043 "==> nxge_rx_err_evnts(channel %d): " 3044 "config error", channel)); 3045 } 3046 if (cs.bits.hdw.rcrincon) { 3047 rdc_stats->rcrincon++; 3048 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3049 NXGE_FM_EREPORT_RDMC_RCRINCON); 3050 rxchan_fatal = B_TRUE; 3051 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3052 "==> nxge_rx_err_evnts(channel %d): " 3053 "fatal error: rcrincon error", channel)); 3054 } 3055 if (cs.bits.hdw.rcrfull) { 3056 rdc_stats->rcrfull++; 3057 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3058 NXGE_FM_EREPORT_RDMC_RCRFULL); 3059 rxchan_fatal = B_TRUE; 3060 if (rdc_stats->rcrfull < error_disp_cnt) { 3061 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3062 "==> nxge_rx_err_evnts(channel %d): " 3063 "fatal error: rcrfull error", channel)); 3064 } 3065 } 3066 if (cs.bits.hdw.rbr_empty) { 3067 /* 3068 * This bit is for information, there is no need 3069 * send FMA ereport or log a message. 3070 */ 3071 rdc_stats->rbr_empty++; 3072 } 3073 if (cs.bits.hdw.rbrfull) { 3074 rdc_stats->rbrfull++; 3075 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3076 NXGE_FM_EREPORT_RDMC_RBRFULL); 3077 rxchan_fatal = B_TRUE; 3078 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3079 "==> nxge_rx_err_evnts(channel %d): " 3080 "fatal error: rbr_full error", channel)); 3081 } 3082 if (cs.bits.hdw.rbrlogpage) { 3083 rdc_stats->rbrlogpage++; 3084 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3085 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3086 rxchan_fatal = B_TRUE; 3087 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3088 "==> nxge_rx_err_evnts(channel %d): " 3089 "fatal error: rbr logical page error", channel)); 3090 } 3091 if (cs.bits.hdw.cfiglogpage) { 3092 rdc_stats->cfiglogpage++; 3093 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3094 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3095 rxchan_fatal = B_TRUE; 3096 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3097 "==> nxge_rx_err_evnts(channel %d): " 3098 "fatal error: cfig logical page error", channel)); 3099 } 3100 3101 if (rxport_fatal) { 3102 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3103 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3104 portn)); 3105 if (isLDOMguest(nxgep)) { 3106 status = NXGE_ERROR; 3107 } else { 3108 status = nxge_ipp_fatal_err_recover(nxgep); 3109 if (status == NXGE_OK) { 3110 FM_SERVICE_RESTORED(nxgep); 3111 } 3112 } 3113 } 3114 3115 if (rxchan_fatal) { 3116 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3117 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3118 channel)); 3119 if (isLDOMguest(nxgep)) { 3120 status = NXGE_ERROR; 3121 } else { 3122 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3123 if (status == NXGE_OK) { 3124 FM_SERVICE_RESTORED(nxgep); 3125 } 3126 } 3127 } 3128 3129 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3130 3131 return (status); 3132 } 3133 3134 /* 3135 * nxge_rdc_hvio_setup 3136 * 3137 * This code appears to setup some Hypervisor variables. 3138 * 3139 * Arguments: 3140 * nxgep 3141 * channel 3142 * 3143 * Notes: 3144 * What does NIU_LP_WORKAROUND mean? 3145 * 3146 * NPI/NXGE function calls: 3147 * na 3148 * 3149 * Context: 3150 * Any domain 3151 */ 3152 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3153 static void 3154 nxge_rdc_hvio_setup( 3155 nxge_t *nxgep, int channel) 3156 { 3157 nxge_dma_common_t *dma_common; 3158 nxge_dma_common_t *dma_control; 3159 rx_rbr_ring_t *ring; 3160 3161 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3162 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3163 3164 ring->hv_set = B_FALSE; 3165 3166 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3167 dma_common->orig_ioaddr_pp; 3168 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3169 dma_common->orig_alength; 3170 3171 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3172 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3173 channel, ring->hv_rx_buf_base_ioaddr_pp, 3174 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3175 dma_common->orig_alength, dma_common->orig_alength)); 3176 3177 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3178 3179 ring->hv_rx_cntl_base_ioaddr_pp = 3180 (uint64_t)dma_control->orig_ioaddr_pp; 3181 ring->hv_rx_cntl_ioaddr_size = 3182 (uint64_t)dma_control->orig_alength; 3183 3184 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3185 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3186 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3187 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3188 dma_control->orig_alength, dma_control->orig_alength)); 3189 } 3190 #endif 3191 3192 /* 3193 * nxge_map_rxdma 3194 * 3195 * Map an RDC into our kernel space. 3196 * 3197 * Arguments: 3198 * nxgep 3199 * channel The channel to map. 3200 * 3201 * Notes: 3202 * 1. Allocate & initialise a memory pool, if necessary. 3203 * 2. Allocate however many receive buffers are required. 3204 * 3. Setup buffers, descriptors, and mailbox. 3205 * 3206 * NPI/NXGE function calls: 3207 * nxge_alloc_rx_mem_pool() 3208 * nxge_alloc_rbb() 3209 * nxge_map_rxdma_channel() 3210 * 3211 * Registers accessed: 3212 * 3213 * Context: 3214 * Any domain 3215 */ 3216 static nxge_status_t 3217 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3218 { 3219 nxge_dma_common_t **data; 3220 nxge_dma_common_t **control; 3221 rx_rbr_ring_t **rbr_ring; 3222 rx_rcr_ring_t **rcr_ring; 3223 rx_mbox_t **mailbox; 3224 uint32_t chunks; 3225 3226 nxge_status_t status; 3227 3228 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3229 3230 if (!nxgep->rx_buf_pool_p) { 3231 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3232 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3233 "<== nxge_map_rxdma: buf not allocated")); 3234 return (NXGE_ERROR); 3235 } 3236 } 3237 3238 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3239 return (NXGE_ERROR); 3240 3241 /* 3242 * Map descriptors from the buffer polls for each dma channel. 3243 */ 3244 3245 /* 3246 * Set up and prepare buffer blocks, descriptors 3247 * and mailbox. 3248 */ 3249 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3250 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3251 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3252 3253 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3254 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3255 3256 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3257 3258 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3259 chunks, control, rcr_ring, mailbox); 3260 if (status != NXGE_OK) { 3261 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3262 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3263 "returned 0x%x", 3264 channel, status)); 3265 return (status); 3266 } 3267 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3268 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3269 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3270 &nxgep->statsp->rdc_stats[channel]; 3271 3272 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3273 if (!isLDOMguest(nxgep)) 3274 nxge_rdc_hvio_setup(nxgep, channel); 3275 #endif 3276 3277 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3278 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3279 3280 return (status); 3281 } 3282 3283 static void 3284 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3285 { 3286 rx_rbr_ring_t *rbr_ring; 3287 rx_rcr_ring_t *rcr_ring; 3288 rx_mbox_t *mailbox; 3289 3290 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3291 3292 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3293 !nxgep->rx_mbox_areas_p) 3294 return; 3295 3296 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3297 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3298 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3299 3300 if (!rbr_ring || !rcr_ring || !mailbox) 3301 return; 3302 3303 (void) nxge_unmap_rxdma_channel( 3304 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3305 3306 nxge_free_rxb(nxgep, channel); 3307 3308 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3309 } 3310 3311 nxge_status_t 3312 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3313 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3314 uint32_t num_chunks, 3315 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3316 p_rx_mbox_t *rx_mbox_p) 3317 { 3318 int status = NXGE_OK; 3319 3320 /* 3321 * Set up and prepare buffer blocks, descriptors 3322 * and mailbox. 3323 */ 3324 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3325 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3326 /* 3327 * Receive buffer blocks 3328 */ 3329 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3330 dma_buf_p, rbr_p, num_chunks); 3331 if (status != NXGE_OK) { 3332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3333 "==> nxge_map_rxdma_channel (channel %d): " 3334 "map buffer failed 0x%x", channel, status)); 3335 goto nxge_map_rxdma_channel_exit; 3336 } 3337 3338 /* 3339 * Receive block ring, completion ring and mailbox. 3340 */ 3341 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3342 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3343 if (status != NXGE_OK) { 3344 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3345 "==> nxge_map_rxdma_channel (channel %d): " 3346 "map config failed 0x%x", channel, status)); 3347 goto nxge_map_rxdma_channel_fail2; 3348 } 3349 3350 goto nxge_map_rxdma_channel_exit; 3351 3352 nxge_map_rxdma_channel_fail3: 3353 /* Free rbr, rcr */ 3354 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3355 "==> nxge_map_rxdma_channel: free rbr/rcr " 3356 "(status 0x%x channel %d)", 3357 status, channel)); 3358 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3359 *rcr_p, *rx_mbox_p); 3360 3361 nxge_map_rxdma_channel_fail2: 3362 /* Free buffer blocks */ 3363 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3364 "==> nxge_map_rxdma_channel: free rx buffers" 3365 "(nxgep 0x%x status 0x%x channel %d)", 3366 nxgep, status, channel)); 3367 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3368 3369 status = NXGE_ERROR; 3370 3371 nxge_map_rxdma_channel_exit: 3372 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3373 "<== nxge_map_rxdma_channel: " 3374 "(nxgep 0x%x status 0x%x channel %d)", 3375 nxgep, status, channel)); 3376 3377 return (status); 3378 } 3379 3380 /*ARGSUSED*/ 3381 static void 3382 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3383 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3384 { 3385 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3386 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3387 3388 /* 3389 * unmap receive block ring, completion ring and mailbox. 3390 */ 3391 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3392 rcr_p, rx_mbox_p); 3393 3394 /* unmap buffer blocks */ 3395 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3396 3397 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3398 } 3399 3400 /*ARGSUSED*/ 3401 static nxge_status_t 3402 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3403 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3404 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3405 { 3406 p_rx_rbr_ring_t rbrp; 3407 p_rx_rcr_ring_t rcrp; 3408 p_rx_mbox_t mboxp; 3409 p_nxge_dma_common_t cntl_dmap; 3410 p_nxge_dma_common_t dmap; 3411 p_rx_msg_t *rx_msg_ring; 3412 p_rx_msg_t rx_msg_p; 3413 p_rbr_cfig_a_t rcfga_p; 3414 p_rbr_cfig_b_t rcfgb_p; 3415 p_rcrcfig_a_t cfga_p; 3416 p_rcrcfig_b_t cfgb_p; 3417 p_rxdma_cfig1_t cfig1_p; 3418 p_rxdma_cfig2_t cfig2_p; 3419 p_rbr_kick_t kick_p; 3420 uint32_t dmaaddrp; 3421 uint32_t *rbr_vaddrp; 3422 uint32_t bkaddr; 3423 nxge_status_t status = NXGE_OK; 3424 int i; 3425 uint32_t nxge_port_rcr_size; 3426 3427 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3428 "==> nxge_map_rxdma_channel_cfg_ring")); 3429 3430 cntl_dmap = *dma_cntl_p; 3431 3432 /* Map in the receive block ring */ 3433 rbrp = *rbr_p; 3434 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3435 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3436 /* 3437 * Zero out buffer block ring descriptors. 3438 */ 3439 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3440 3441 rcfga_p = &(rbrp->rbr_cfga); 3442 rcfgb_p = &(rbrp->rbr_cfgb); 3443 kick_p = &(rbrp->rbr_kick); 3444 rcfga_p->value = 0; 3445 rcfgb_p->value = 0; 3446 kick_p->value = 0; 3447 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3448 rcfga_p->value = (rbrp->rbr_addr & 3449 (RBR_CFIG_A_STDADDR_MASK | 3450 RBR_CFIG_A_STDADDR_BASE_MASK)); 3451 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3452 3453 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3454 rcfgb_p->bits.ldw.vld0 = 1; 3455 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3456 rcfgb_p->bits.ldw.vld1 = 1; 3457 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3458 rcfgb_p->bits.ldw.vld2 = 1; 3459 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3460 3461 /* 3462 * For each buffer block, enter receive block address to the ring. 3463 */ 3464 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3465 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3466 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3467 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3468 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3469 3470 rx_msg_ring = rbrp->rx_msg_ring; 3471 for (i = 0; i < rbrp->tnblocks; i++) { 3472 rx_msg_p = rx_msg_ring[i]; 3473 rx_msg_p->nxgep = nxgep; 3474 rx_msg_p->rx_rbr_p = rbrp; 3475 bkaddr = (uint32_t) 3476 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3477 >> RBR_BKADDR_SHIFT)); 3478 rx_msg_p->free = B_FALSE; 3479 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3480 3481 *rbr_vaddrp++ = bkaddr; 3482 } 3483 3484 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3485 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3486 3487 rbrp->rbr_rd_index = 0; 3488 3489 rbrp->rbr_consumed = 0; 3490 rbrp->rbr_use_bcopy = B_TRUE; 3491 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3492 /* 3493 * Do bcopy on packets greater than bcopy size once 3494 * the lo threshold is reached. 3495 * This lo threshold should be less than the hi threshold. 3496 * 3497 * Do bcopy on every packet once the hi threshold is reached. 3498 */ 3499 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3500 /* default it to use hi */ 3501 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3502 } 3503 3504 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3505 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3506 } 3507 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3508 3509 switch (nxge_rx_threshold_hi) { 3510 default: 3511 case NXGE_RX_COPY_NONE: 3512 /* Do not do bcopy at all */ 3513 rbrp->rbr_use_bcopy = B_FALSE; 3514 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3515 break; 3516 3517 case NXGE_RX_COPY_1: 3518 case NXGE_RX_COPY_2: 3519 case NXGE_RX_COPY_3: 3520 case NXGE_RX_COPY_4: 3521 case NXGE_RX_COPY_5: 3522 case NXGE_RX_COPY_6: 3523 case NXGE_RX_COPY_7: 3524 rbrp->rbr_threshold_hi = 3525 rbrp->rbb_max * 3526 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3527 break; 3528 3529 case NXGE_RX_COPY_ALL: 3530 rbrp->rbr_threshold_hi = 0; 3531 break; 3532 } 3533 3534 switch (nxge_rx_threshold_lo) { 3535 default: 3536 case NXGE_RX_COPY_NONE: 3537 /* Do not do bcopy at all */ 3538 if (rbrp->rbr_use_bcopy) { 3539 rbrp->rbr_use_bcopy = B_FALSE; 3540 } 3541 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3542 break; 3543 3544 case NXGE_RX_COPY_1: 3545 case NXGE_RX_COPY_2: 3546 case NXGE_RX_COPY_3: 3547 case NXGE_RX_COPY_4: 3548 case NXGE_RX_COPY_5: 3549 case NXGE_RX_COPY_6: 3550 case NXGE_RX_COPY_7: 3551 rbrp->rbr_threshold_lo = 3552 rbrp->rbb_max * 3553 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3554 break; 3555 3556 case NXGE_RX_COPY_ALL: 3557 rbrp->rbr_threshold_lo = 0; 3558 break; 3559 } 3560 3561 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3562 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3563 "rbb_max %d " 3564 "rbrp->rbr_bufsize_type %d " 3565 "rbb_threshold_hi %d " 3566 "rbb_threshold_lo %d", 3567 dma_channel, 3568 rbrp->rbb_max, 3569 rbrp->rbr_bufsize_type, 3570 rbrp->rbr_threshold_hi, 3571 rbrp->rbr_threshold_lo)); 3572 3573 rbrp->page_valid.value = 0; 3574 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3575 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3576 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3577 rbrp->page_hdl.value = 0; 3578 3579 rbrp->page_valid.bits.ldw.page0 = 1; 3580 rbrp->page_valid.bits.ldw.page1 = 1; 3581 3582 /* Map in the receive completion ring */ 3583 rcrp = (p_rx_rcr_ring_t) 3584 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3585 rcrp->rdc = dma_channel; 3586 3587 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3588 rcrp->comp_size = nxge_port_rcr_size; 3589 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3590 3591 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3592 3593 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3594 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3595 sizeof (rcr_entry_t)); 3596 rcrp->comp_rd_index = 0; 3597 rcrp->comp_wt_index = 0; 3598 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3599 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3600 #if defined(__i386) 3601 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3602 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3603 #else 3604 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3605 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3606 #endif 3607 3608 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3609 (nxge_port_rcr_size - 1); 3610 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3611 (nxge_port_rcr_size - 1); 3612 3613 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3614 "==> nxge_map_rxdma_channel_cfg_ring: " 3615 "channel %d " 3616 "rbr_vaddrp $%p " 3617 "rcr_desc_rd_head_p $%p " 3618 "rcr_desc_rd_head_pp $%p " 3619 "rcr_desc_rd_last_p $%p " 3620 "rcr_desc_rd_last_pp $%p ", 3621 dma_channel, 3622 rbr_vaddrp, 3623 rcrp->rcr_desc_rd_head_p, 3624 rcrp->rcr_desc_rd_head_pp, 3625 rcrp->rcr_desc_last_p, 3626 rcrp->rcr_desc_last_pp)); 3627 3628 /* 3629 * Zero out buffer block ring descriptors. 3630 */ 3631 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3632 3633 rcrp->intr_timeout = (nxgep->intr_timeout < 3634 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3635 nxgep->intr_timeout; 3636 3637 rcrp->intr_threshold = (nxgep->intr_threshold < 3638 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3639 nxgep->intr_threshold; 3640 3641 rcrp->full_hdr_flag = B_FALSE; 3642 3643 rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset; 3644 3645 3646 cfga_p = &(rcrp->rcr_cfga); 3647 cfgb_p = &(rcrp->rcr_cfgb); 3648 cfga_p->value = 0; 3649 cfgb_p->value = 0; 3650 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3651 cfga_p->value = (rcrp->rcr_addr & 3652 (RCRCFIG_A_STADDR_MASK | 3653 RCRCFIG_A_STADDR_BASE_MASK)); 3654 3655 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3656 RCRCFIG_A_LEN_SHIF); 3657 3658 /* 3659 * Timeout should be set based on the system clock divider. 3660 * A timeout value of 1 assumes that the 3661 * granularity (1000) is 3 microseconds running at 300MHz. 3662 */ 3663 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3664 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3665 cfgb_p->bits.ldw.entout = 1; 3666 3667 /* Map in the mailbox */ 3668 mboxp = (p_rx_mbox_t) 3669 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3670 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3671 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3672 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3673 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3674 cfig1_p->value = cfig2_p->value = 0; 3675 3676 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3677 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3678 "==> nxge_map_rxdma_channel_cfg_ring: " 3679 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3680 dma_channel, cfig1_p->value, cfig2_p->value, 3681 mboxp->mbox_addr)); 3682 3683 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3684 & 0xfff); 3685 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3686 3687 3688 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3689 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3690 RXDMA_CFIG2_MBADDR_L_MASK); 3691 3692 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3693 3694 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3695 "==> nxge_map_rxdma_channel_cfg_ring: " 3696 "channel %d damaddrp $%p " 3697 "cfg1 0x%016llx cfig2 0x%016llx", 3698 dma_channel, dmaaddrp, 3699 cfig1_p->value, cfig2_p->value)); 3700 3701 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3702 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) { 3703 switch (rcrp->sw_priv_hdr_len) { 3704 case SW_OFFSET_NO_OFFSET: 3705 case SW_OFFSET_64: 3706 case SW_OFFSET_128: 3707 case SW_OFFSET_192: 3708 cfig2_p->bits.ldw.offset = 3709 rcrp->sw_priv_hdr_len; 3710 cfig2_p->bits.ldw.offset256 = 0; 3711 break; 3712 case SW_OFFSET_256: 3713 case SW_OFFSET_320: 3714 case SW_OFFSET_384: 3715 case SW_OFFSET_448: 3716 cfig2_p->bits.ldw.offset = 3717 rcrp->sw_priv_hdr_len & 0x3; 3718 cfig2_p->bits.ldw.offset256 = 1; 3719 break; 3720 default: 3721 cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET; 3722 cfig2_p->bits.ldw.offset256 = 0; 3723 } 3724 } else { 3725 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3726 } 3727 3728 rbrp->rx_rcr_p = rcrp; 3729 rcrp->rx_rbr_p = rbrp; 3730 *rcr_p = rcrp; 3731 *rx_mbox_p = mboxp; 3732 3733 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3734 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3735 3736 return (status); 3737 } 3738 3739 /*ARGSUSED*/ 3740 static void 3741 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3742 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3743 { 3744 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3745 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3746 rcr_p->rdc)); 3747 3748 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3749 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3750 3751 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3752 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3753 } 3754 3755 static nxge_status_t 3756 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3757 p_nxge_dma_common_t *dma_buf_p, 3758 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3759 { 3760 p_rx_rbr_ring_t rbrp; 3761 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3762 p_rx_msg_t *rx_msg_ring; 3763 p_rx_msg_t rx_msg_p; 3764 p_mblk_t mblk_p; 3765 3766 rxring_info_t *ring_info; 3767 nxge_status_t status = NXGE_OK; 3768 int i, j, index; 3769 uint32_t size, bsize, nblocks, nmsgs; 3770 3771 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3772 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3773 channel)); 3774 3775 dma_bufp = tmp_bufp = *dma_buf_p; 3776 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3777 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3778 "chunks bufp 0x%016llx", 3779 channel, num_chunks, dma_bufp)); 3780 3781 nmsgs = 0; 3782 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3783 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3784 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3785 "bufp 0x%016llx nblocks %d nmsgs %d", 3786 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3787 nmsgs += tmp_bufp->nblocks; 3788 } 3789 if (!nmsgs) { 3790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3791 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3792 "no msg blocks", 3793 channel)); 3794 status = NXGE_ERROR; 3795 goto nxge_map_rxdma_channel_buf_ring_exit; 3796 } 3797 3798 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3799 3800 size = nmsgs * sizeof (p_rx_msg_t); 3801 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3802 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3803 KM_SLEEP); 3804 3805 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3806 (void *)nxgep->interrupt_cookie); 3807 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3808 (void *)nxgep->interrupt_cookie); 3809 rbrp->rdc = channel; 3810 rbrp->num_blocks = num_chunks; 3811 rbrp->tnblocks = nmsgs; 3812 rbrp->rbb_max = nmsgs; 3813 rbrp->rbr_max_size = nmsgs; 3814 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3815 3816 /* 3817 * Buffer sizes suggested by NIU architect. 3818 * 256, 512 and 2K. 3819 */ 3820 3821 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3822 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3823 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3824 3825 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3826 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3827 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3828 3829 rbrp->block_size = nxgep->rx_default_block_size; 3830 3831 if (!nxgep->mac.is_jumbo) { 3832 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3833 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3834 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3835 } else { 3836 if (rbrp->block_size >= 0x2000) { 3837 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3838 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3839 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3840 } else { 3841 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3842 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3843 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3844 } 3845 } 3846 3847 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3848 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3849 "actual rbr max %d rbb_max %d nmsgs %d " 3850 "rbrp->block_size %d default_block_size %d " 3851 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3852 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3853 rbrp->block_size, nxgep->rx_default_block_size, 3854 nxge_rbr_size, nxge_rbr_spare_size)); 3855 3856 /* Map in buffers from the buffer pool. */ 3857 index = 0; 3858 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3859 bsize = dma_bufp->block_size; 3860 nblocks = dma_bufp->nblocks; 3861 #if defined(__i386) 3862 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3863 #else 3864 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3865 #endif 3866 ring_info->buffer[i].buf_index = i; 3867 ring_info->buffer[i].buf_size = dma_bufp->alength; 3868 ring_info->buffer[i].start_index = index; 3869 #if defined(__i386) 3870 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3871 #else 3872 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3873 #endif 3874 3875 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3876 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3877 "chunk %d" 3878 " nblocks %d chunk_size %x block_size 0x%x " 3879 "dma_bufp $%p", channel, i, 3880 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3881 dma_bufp)); 3882 3883 for (j = 0; j < nblocks; j++) { 3884 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3885 dma_bufp)) == NULL) { 3886 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3887 "allocb failed (index %d i %d j %d)", 3888 index, i, j)); 3889 goto nxge_map_rxdma_channel_buf_ring_fail1; 3890 } 3891 rx_msg_ring[index] = rx_msg_p; 3892 rx_msg_p->block_index = index; 3893 rx_msg_p->shifted_addr = (uint32_t) 3894 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3895 RBR_BKADDR_SHIFT)); 3896 3897 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3898 "index %d j %d rx_msg_p $%p mblk %p", 3899 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3900 3901 mblk_p = rx_msg_p->rx_mblk_p; 3902 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3903 3904 rbrp->rbr_ref_cnt++; 3905 index++; 3906 rx_msg_p->buf_dma.dma_channel = channel; 3907 } 3908 3909 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3910 if (dma_bufp->contig_alloc_type) { 3911 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3912 } 3913 3914 if (dma_bufp->kmem_alloc_type) { 3915 rbrp->rbr_alloc_type = KMEM_ALLOC; 3916 } 3917 3918 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3919 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3920 "chunk %d" 3921 " nblocks %d chunk_size %x block_size 0x%x " 3922 "dma_bufp $%p", 3923 channel, i, 3924 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3925 dma_bufp)); 3926 } 3927 if (i < rbrp->num_blocks) { 3928 goto nxge_map_rxdma_channel_buf_ring_fail1; 3929 } 3930 3931 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3932 "nxge_map_rxdma_channel_buf_ring: done buf init " 3933 "channel %d msg block entries %d", 3934 channel, index)); 3935 ring_info->block_size_mask = bsize - 1; 3936 rbrp->rx_msg_ring = rx_msg_ring; 3937 rbrp->dma_bufp = dma_buf_p; 3938 rbrp->ring_info = ring_info; 3939 3940 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3941 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3942 " nxge_map_rxdma_channel_buf_ring: " 3943 "channel %d done buf info init", channel)); 3944 3945 /* 3946 * Finally, permit nxge_freeb() to call nxge_post_page(). 3947 */ 3948 rbrp->rbr_state = RBR_POSTING; 3949 3950 *rbr_p = rbrp; 3951 goto nxge_map_rxdma_channel_buf_ring_exit; 3952 3953 nxge_map_rxdma_channel_buf_ring_fail1: 3954 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3955 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3956 channel, status)); 3957 3958 index--; 3959 for (; index >= 0; index--) { 3960 rx_msg_p = rx_msg_ring[index]; 3961 if (rx_msg_p != NULL) { 3962 freeb(rx_msg_p->rx_mblk_p); 3963 rx_msg_ring[index] = NULL; 3964 } 3965 } 3966 nxge_map_rxdma_channel_buf_ring_fail: 3967 MUTEX_DESTROY(&rbrp->post_lock); 3968 MUTEX_DESTROY(&rbrp->lock); 3969 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3970 KMEM_FREE(rx_msg_ring, size); 3971 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3972 3973 status = NXGE_ERROR; 3974 3975 nxge_map_rxdma_channel_buf_ring_exit: 3976 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3977 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3978 3979 return (status); 3980 } 3981 3982 /*ARGSUSED*/ 3983 static void 3984 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3985 p_rx_rbr_ring_t rbr_p) 3986 { 3987 p_rx_msg_t *rx_msg_ring; 3988 p_rx_msg_t rx_msg_p; 3989 rxring_info_t *ring_info; 3990 int i; 3991 uint32_t size; 3992 #ifdef NXGE_DEBUG 3993 int num_chunks; 3994 #endif 3995 3996 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3997 "==> nxge_unmap_rxdma_channel_buf_ring")); 3998 if (rbr_p == NULL) { 3999 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4000 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 4001 return; 4002 } 4003 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4004 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 4005 rbr_p->rdc)); 4006 4007 rx_msg_ring = rbr_p->rx_msg_ring; 4008 ring_info = rbr_p->ring_info; 4009 4010 if (rx_msg_ring == NULL || ring_info == NULL) { 4011 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4012 "<== nxge_unmap_rxdma_channel_buf_ring: " 4013 "rx_msg_ring $%p ring_info $%p", 4014 rx_msg_p, ring_info)); 4015 return; 4016 } 4017 4018 #ifdef NXGE_DEBUG 4019 num_chunks = rbr_p->num_blocks; 4020 #endif 4021 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 4022 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4023 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 4024 "tnblocks %d (max %d) size ptrs %d ", 4025 rbr_p->rdc, num_chunks, 4026 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 4027 4028 for (i = 0; i < rbr_p->tnblocks; i++) { 4029 rx_msg_p = rx_msg_ring[i]; 4030 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4031 " nxge_unmap_rxdma_channel_buf_ring: " 4032 "rx_msg_p $%p", 4033 rx_msg_p)); 4034 if (rx_msg_p != NULL) { 4035 freeb(rx_msg_p->rx_mblk_p); 4036 rx_msg_ring[i] = NULL; 4037 } 4038 } 4039 4040 /* 4041 * We no longer may use the mutex <post_lock>. By setting 4042 * <rbr_state> to anything but POSTING, we prevent 4043 * nxge_post_page() from accessing a dead mutex. 4044 */ 4045 rbr_p->rbr_state = RBR_UNMAPPING; 4046 MUTEX_DESTROY(&rbr_p->post_lock); 4047 4048 MUTEX_DESTROY(&rbr_p->lock); 4049 4050 if (rbr_p->rbr_ref_cnt == 0) { 4051 /* 4052 * This is the normal state of affairs. 4053 * Need to free the following buffers: 4054 * - data buffers 4055 * - rx_msg ring 4056 * - ring_info 4057 * - rbr ring 4058 */ 4059 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4060 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4061 nxge_rxdma_databuf_free(rbr_p); 4062 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4063 KMEM_FREE(rx_msg_ring, size); 4064 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4065 } else { 4066 /* 4067 * Some of our buffers are still being used. 4068 * Therefore, tell nxge_freeb() this ring is 4069 * unmapped, so it may free <rbr_p> for us. 4070 */ 4071 rbr_p->rbr_state = RBR_UNMAPPED; 4072 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4073 "unmap_rxdma_buf_ring: %d %s outstanding.", 4074 rbr_p->rbr_ref_cnt, 4075 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4076 } 4077 4078 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4079 "<== nxge_unmap_rxdma_channel_buf_ring")); 4080 } 4081 4082 /* 4083 * nxge_rxdma_hw_start_common 4084 * 4085 * Arguments: 4086 * nxgep 4087 * 4088 * Notes: 4089 * 4090 * NPI/NXGE function calls: 4091 * nxge_init_fzc_rx_common(); 4092 * nxge_init_fzc_rxdma_port(); 4093 * 4094 * Registers accessed: 4095 * 4096 * Context: 4097 * Service domain 4098 */ 4099 static nxge_status_t 4100 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4101 { 4102 nxge_status_t status = NXGE_OK; 4103 4104 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4105 4106 /* 4107 * Load the sharable parameters by writing to the 4108 * function zero control registers. These FZC registers 4109 * should be initialized only once for the entire chip. 4110 */ 4111 (void) nxge_init_fzc_rx_common(nxgep); 4112 4113 /* 4114 * Initialize the RXDMA port specific FZC control configurations. 4115 * These FZC registers are pertaining to each port. 4116 */ 4117 (void) nxge_init_fzc_rxdma_port(nxgep); 4118 4119 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4120 4121 return (status); 4122 } 4123 4124 static nxge_status_t 4125 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4126 { 4127 int i, ndmas; 4128 p_rx_rbr_rings_t rx_rbr_rings; 4129 p_rx_rbr_ring_t *rbr_rings; 4130 p_rx_rcr_rings_t rx_rcr_rings; 4131 p_rx_rcr_ring_t *rcr_rings; 4132 p_rx_mbox_areas_t rx_mbox_areas_p; 4133 p_rx_mbox_t *rx_mbox_p; 4134 nxge_status_t status = NXGE_OK; 4135 4136 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4137 4138 rx_rbr_rings = nxgep->rx_rbr_rings; 4139 rx_rcr_rings = nxgep->rx_rcr_rings; 4140 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4141 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4142 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4143 return (NXGE_ERROR); 4144 } 4145 ndmas = rx_rbr_rings->ndmas; 4146 if (ndmas == 0) { 4147 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4148 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4149 return (NXGE_ERROR); 4150 } 4151 4152 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4153 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4154 4155 rbr_rings = rx_rbr_rings->rbr_rings; 4156 rcr_rings = rx_rcr_rings->rcr_rings; 4157 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4158 if (rx_mbox_areas_p) { 4159 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4160 } else { 4161 rx_mbox_p = NULL; 4162 } 4163 4164 i = channel; 4165 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4166 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4167 ndmas, channel)); 4168 status = nxge_rxdma_start_channel(nxgep, channel, 4169 (p_rx_rbr_ring_t)rbr_rings[i], 4170 (p_rx_rcr_ring_t)rcr_rings[i], 4171 (p_rx_mbox_t)rx_mbox_p[i]); 4172 if (status != NXGE_OK) { 4173 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4174 "==> nxge_rxdma_hw_start: disable " 4175 "(status 0x%x channel %d)", status, channel)); 4176 return (status); 4177 } 4178 4179 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4180 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4181 rx_rbr_rings, rx_rcr_rings)); 4182 4183 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4184 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4185 4186 return (status); 4187 } 4188 4189 static void 4190 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4191 { 4192 p_rx_rbr_rings_t rx_rbr_rings; 4193 p_rx_rcr_rings_t rx_rcr_rings; 4194 4195 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4196 4197 rx_rbr_rings = nxgep->rx_rbr_rings; 4198 rx_rcr_rings = nxgep->rx_rcr_rings; 4199 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4200 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4201 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4202 return; 4203 } 4204 4205 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4206 "==> nxge_rxdma_hw_stop(channel %d)", 4207 channel)); 4208 (void) nxge_rxdma_stop_channel(nxgep, channel); 4209 4210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4211 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4212 rx_rbr_rings, rx_rcr_rings)); 4213 4214 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4215 } 4216 4217 4218 static nxge_status_t 4219 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4220 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4221 4222 { 4223 npi_handle_t handle; 4224 npi_status_t rs = NPI_SUCCESS; 4225 rx_dma_ctl_stat_t cs; 4226 rx_dma_ent_msk_t ent_mask; 4227 nxge_status_t status = NXGE_OK; 4228 4229 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4230 4231 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4232 4233 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4234 "npi handle addr $%p acc $%p", 4235 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4236 4237 /* Reset RXDMA channel, but not if you're a guest. */ 4238 if (!isLDOMguest(nxgep)) { 4239 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4240 if (rs != NPI_SUCCESS) { 4241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4242 "==> nxge_init_fzc_rdc: " 4243 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4244 channel, rs)); 4245 return (NXGE_ERROR | rs); 4246 } 4247 4248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4249 "==> nxge_rxdma_start_channel: reset done: channel %d", 4250 channel)); 4251 } 4252 4253 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4254 if (isLDOMguest(nxgep)) 4255 (void) nxge_rdc_lp_conf(nxgep, channel); 4256 #endif 4257 4258 /* 4259 * Initialize the RXDMA channel specific FZC control 4260 * configurations. These FZC registers are pertaining 4261 * to each RX channel (logical pages). 4262 */ 4263 if (!isLDOMguest(nxgep)) { 4264 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4265 if (status != NXGE_OK) { 4266 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4267 "==> nxge_rxdma_start_channel: " 4268 "init fzc rxdma failed (0x%08x channel %d)", 4269 status, channel)); 4270 return (status); 4271 } 4272 4273 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4274 "==> nxge_rxdma_start_channel: fzc done")); 4275 } 4276 4277 /* Set up the interrupt event masks. */ 4278 ent_mask.value = 0; 4279 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4280 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4281 &ent_mask); 4282 if (rs != NPI_SUCCESS) { 4283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4284 "==> nxge_rxdma_start_channel: " 4285 "init rxdma event masks failed " 4286 "(0x%08x channel %d)", 4287 status, channel)); 4288 return (NXGE_ERROR | rs); 4289 } 4290 4291 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4292 "==> nxge_rxdma_start_channel: " 4293 "event done: channel %d (mask 0x%016llx)", 4294 channel, ent_mask.value)); 4295 4296 /* Initialize the receive DMA control and status register */ 4297 cs.value = 0; 4298 cs.bits.hdw.mex = 1; 4299 cs.bits.hdw.rcrthres = 1; 4300 cs.bits.hdw.rcrto = 1; 4301 cs.bits.hdw.rbr_empty = 1; 4302 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4303 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4304 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4305 if (status != NXGE_OK) { 4306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4307 "==> nxge_rxdma_start_channel: " 4308 "init rxdma control register failed (0x%08x channel %d", 4309 status, channel)); 4310 return (status); 4311 } 4312 4313 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4314 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4315 4316 /* 4317 * Load RXDMA descriptors, buffers, mailbox, 4318 * initialise the receive DMA channels and 4319 * enable each DMA channel. 4320 */ 4321 status = nxge_enable_rxdma_channel(nxgep, 4322 channel, rbr_p, rcr_p, mbox_p); 4323 4324 if (status != NXGE_OK) { 4325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4326 " nxge_rxdma_start_channel: " 4327 " enable rxdma failed (0x%08x channel %d)", 4328 status, channel)); 4329 return (status); 4330 } 4331 4332 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4333 "==> nxge_rxdma_start_channel: enabled channel %d")); 4334 4335 if (isLDOMguest(nxgep)) { 4336 /* Add interrupt handler for this channel. */ 4337 status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel); 4338 if (status != NXGE_OK) { 4339 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4340 " nxge_rxdma_start_channel: " 4341 " nxge_hio_intr_add failed (0x%08x channel %d)", 4342 status, channel)); 4343 return (status); 4344 } 4345 } 4346 4347 ent_mask.value = 0; 4348 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4349 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4350 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4351 &ent_mask); 4352 if (rs != NPI_SUCCESS) { 4353 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4354 "==> nxge_rxdma_start_channel: " 4355 "init rxdma event masks failed (0x%08x channel %d)", 4356 status, channel)); 4357 return (NXGE_ERROR | rs); 4358 } 4359 4360 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4361 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4362 4363 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4364 4365 return (NXGE_OK); 4366 } 4367 4368 static nxge_status_t 4369 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4370 { 4371 npi_handle_t handle; 4372 npi_status_t rs = NPI_SUCCESS; 4373 rx_dma_ctl_stat_t cs; 4374 rx_dma_ent_msk_t ent_mask; 4375 nxge_status_t status = NXGE_OK; 4376 4377 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4378 4379 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4380 4381 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4382 "npi handle addr $%p acc $%p", 4383 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4384 4385 if (!isLDOMguest(nxgep)) { 4386 /* 4387 * Stop RxMAC = A.9.2.6 4388 */ 4389 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4390 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4391 "nxge_rxdma_stop_channel: " 4392 "Failed to disable RxMAC")); 4393 } 4394 4395 /* 4396 * Drain IPP Port = A.9.3.6 4397 */ 4398 (void) nxge_ipp_drain(nxgep); 4399 } 4400 4401 /* Reset RXDMA channel */ 4402 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4403 if (rs != NPI_SUCCESS) { 4404 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4405 " nxge_rxdma_stop_channel: " 4406 " reset rxdma failed (0x%08x channel %d)", 4407 rs, channel)); 4408 return (NXGE_ERROR | rs); 4409 } 4410 4411 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4412 "==> nxge_rxdma_stop_channel: reset done")); 4413 4414 /* Set up the interrupt event masks. */ 4415 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4416 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4417 &ent_mask); 4418 if (rs != NPI_SUCCESS) { 4419 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4420 "==> nxge_rxdma_stop_channel: " 4421 "set rxdma event masks failed (0x%08x channel %d)", 4422 rs, channel)); 4423 return (NXGE_ERROR | rs); 4424 } 4425 4426 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4427 "==> nxge_rxdma_stop_channel: event done")); 4428 4429 /* 4430 * Initialize the receive DMA control and status register 4431 */ 4432 cs.value = 0; 4433 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4434 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4435 " to default (all 0s) 0x%08x", cs.value)); 4436 if (status != NXGE_OK) { 4437 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4438 " nxge_rxdma_stop_channel: init rxdma" 4439 " control register failed (0x%08x channel %d", 4440 status, channel)); 4441 return (status); 4442 } 4443 4444 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4445 "==> nxge_rxdma_stop_channel: control done")); 4446 4447 /* 4448 * Make sure channel is disabled. 4449 */ 4450 status = nxge_disable_rxdma_channel(nxgep, channel); 4451 4452 if (status != NXGE_OK) { 4453 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4454 " nxge_rxdma_stop_channel: " 4455 " init enable rxdma failed (0x%08x channel %d)", 4456 status, channel)); 4457 return (status); 4458 } 4459 4460 if (!isLDOMguest(nxgep)) { 4461 /* 4462 * Enable RxMAC = A.9.2.10 4463 */ 4464 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4465 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4466 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4467 } 4468 } 4469 4470 NXGE_DEBUG_MSG((nxgep, 4471 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4472 4473 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4474 4475 return (NXGE_OK); 4476 } 4477 4478 nxge_status_t 4479 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4480 { 4481 npi_handle_t handle; 4482 p_nxge_rdc_sys_stats_t statsp; 4483 rx_ctl_dat_fifo_stat_t stat; 4484 uint32_t zcp_err_status; 4485 uint32_t ipp_err_status; 4486 nxge_status_t status = NXGE_OK; 4487 npi_status_t rs = NPI_SUCCESS; 4488 boolean_t my_err = B_FALSE; 4489 4490 handle = nxgep->npi_handle; 4491 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4492 4493 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4494 4495 if (rs != NPI_SUCCESS) 4496 return (NXGE_ERROR | rs); 4497 4498 if (stat.bits.ldw.id_mismatch) { 4499 statsp->id_mismatch++; 4500 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0, 4501 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4502 /* Global fatal error encountered */ 4503 } 4504 4505 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4506 switch (nxgep->mac.portnum) { 4507 case 0: 4508 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4509 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4510 my_err = B_TRUE; 4511 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4512 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4513 } 4514 break; 4515 case 1: 4516 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4517 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4518 my_err = B_TRUE; 4519 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4520 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4521 } 4522 break; 4523 case 2: 4524 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4525 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4526 my_err = B_TRUE; 4527 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4528 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4529 } 4530 break; 4531 case 3: 4532 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4533 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4534 my_err = B_TRUE; 4535 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4536 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4537 } 4538 break; 4539 default: 4540 return (NXGE_ERROR); 4541 } 4542 } 4543 4544 if (my_err) { 4545 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4546 zcp_err_status); 4547 if (status != NXGE_OK) 4548 return (status); 4549 } 4550 4551 return (NXGE_OK); 4552 } 4553 4554 static nxge_status_t 4555 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4556 uint32_t zcp_status) 4557 { 4558 boolean_t rxport_fatal = B_FALSE; 4559 p_nxge_rdc_sys_stats_t statsp; 4560 nxge_status_t status = NXGE_OK; 4561 uint8_t portn; 4562 4563 portn = nxgep->mac.portnum; 4564 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4565 4566 if (ipp_status & (0x1 << portn)) { 4567 statsp->ipp_eop_err++; 4568 NXGE_FM_REPORT_ERROR(nxgep, portn, 0, 4569 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4570 rxport_fatal = B_TRUE; 4571 } 4572 4573 if (zcp_status & (0x1 << portn)) { 4574 statsp->zcp_eop_err++; 4575 NXGE_FM_REPORT_ERROR(nxgep, portn, 0, 4576 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4577 rxport_fatal = B_TRUE; 4578 } 4579 4580 if (rxport_fatal) { 4581 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4582 " nxge_rxdma_handle_port_error: " 4583 " fatal error on Port #%d\n", 4584 portn)); 4585 status = nxge_rx_port_fatal_err_recover(nxgep); 4586 if (status == NXGE_OK) { 4587 FM_SERVICE_RESTORED(nxgep); 4588 } 4589 } 4590 4591 return (status); 4592 } 4593 4594 static nxge_status_t 4595 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4596 { 4597 npi_handle_t handle; 4598 npi_status_t rs = NPI_SUCCESS; 4599 nxge_status_t status = NXGE_OK; 4600 p_rx_rbr_ring_t rbrp; 4601 p_rx_rcr_ring_t rcrp; 4602 p_rx_mbox_t mboxp; 4603 rx_dma_ent_msk_t ent_mask; 4604 p_nxge_dma_common_t dmap; 4605 uint32_t ref_cnt; 4606 p_rx_msg_t rx_msg_p; 4607 int i; 4608 uint32_t nxge_port_rcr_size; 4609 4610 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4612 "Recovering from RxDMAChannel#%d error...", channel)); 4613 4614 /* 4615 * Stop the dma channel waits for the stop done. 4616 * If the stop done bit is not set, then create 4617 * an error. 4618 */ 4619 4620 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4621 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4622 4623 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel]; 4624 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel]; 4625 4626 MUTEX_ENTER(&rbrp->lock); 4627 MUTEX_ENTER(&rbrp->post_lock); 4628 4629 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4630 4631 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4632 if (rs != NPI_SUCCESS) { 4633 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4634 "nxge_disable_rxdma_channel:failed")); 4635 goto fail; 4636 } 4637 4638 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4639 4640 /* Disable interrupt */ 4641 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4642 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4643 if (rs != NPI_SUCCESS) { 4644 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4645 "nxge_rxdma_stop_channel: " 4646 "set rxdma event masks failed (channel %d)", 4647 channel)); 4648 } 4649 4650 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4651 4652 /* Reset RXDMA channel */ 4653 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4654 if (rs != NPI_SUCCESS) { 4655 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4656 "nxge_rxdma_fatal_err_recover: " 4657 " reset rxdma failed (channel %d)", channel)); 4658 goto fail; 4659 } 4660 4661 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4662 4663 mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 4664 4665 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4666 rbrp->rbr_rd_index = 0; 4667 4668 rcrp->comp_rd_index = 0; 4669 rcrp->comp_wt_index = 0; 4670 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4671 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4672 #if defined(__i386) 4673 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4674 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4675 #else 4676 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4677 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4678 #endif 4679 4680 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4681 (nxge_port_rcr_size - 1); 4682 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4683 (nxge_port_rcr_size - 1); 4684 4685 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4686 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4687 4688 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4689 4690 for (i = 0; i < rbrp->rbr_max_size; i++) { 4691 rx_msg_p = rbrp->rx_msg_ring[i]; 4692 ref_cnt = rx_msg_p->ref_cnt; 4693 if (ref_cnt != 1) { 4694 if (rx_msg_p->cur_usage_cnt != 4695 rx_msg_p->max_usage_cnt) { 4696 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4697 "buf[%d]: cur_usage_cnt = %d " 4698 "max_usage_cnt = %d\n", i, 4699 rx_msg_p->cur_usage_cnt, 4700 rx_msg_p->max_usage_cnt)); 4701 } else { 4702 /* Buffer can be re-posted */ 4703 rx_msg_p->free = B_TRUE; 4704 rx_msg_p->cur_usage_cnt = 0; 4705 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4706 rx_msg_p->pkt_buf_size = 0; 4707 } 4708 } 4709 } 4710 4711 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4712 4713 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4714 if (status != NXGE_OK) { 4715 goto fail; 4716 } 4717 4718 MUTEX_EXIT(&rbrp->post_lock); 4719 MUTEX_EXIT(&rbrp->lock); 4720 4721 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4722 "Recovery Successful, RxDMAChannel#%d Restored", 4723 channel)); 4724 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4725 return (NXGE_OK); 4726 4727 fail: 4728 MUTEX_EXIT(&rbrp->post_lock); 4729 MUTEX_EXIT(&rbrp->lock); 4730 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4731 return (NXGE_ERROR | rs); 4732 } 4733 4734 nxge_status_t 4735 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4736 { 4737 nxge_grp_set_t *set = &nxgep->rx_set; 4738 nxge_status_t status = NXGE_OK; 4739 p_rx_rcr_ring_t rcrp; 4740 int rdc; 4741 4742 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4743 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4744 "Recovering from RxPort error...")); 4745 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4746 4747 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4748 goto fail; 4749 4750 NXGE_DELAY(1000); 4751 4752 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4753 4754 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4755 if ((1 << rdc) & set->owned.map) { 4756 rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc]; 4757 if (rcrp != NULL) { 4758 MUTEX_ENTER(&rcrp->lock); 4759 if (nxge_rxdma_fatal_err_recover(nxgep, 4760 rdc) != NXGE_OK) { 4761 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4762 "Could not recover " 4763 "channel %d", rdc)); 4764 } 4765 MUTEX_EXIT(&rcrp->lock); 4766 } 4767 } 4768 } 4769 4770 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4771 4772 /* Reset IPP */ 4773 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4774 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4775 "nxge_rx_port_fatal_err_recover: " 4776 "Failed to reset IPP")); 4777 goto fail; 4778 } 4779 4780 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4781 4782 /* Reset RxMAC */ 4783 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4784 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4785 "nxge_rx_port_fatal_err_recover: " 4786 "Failed to reset RxMAC")); 4787 goto fail; 4788 } 4789 4790 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4791 4792 /* Re-Initialize IPP */ 4793 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4795 "nxge_rx_port_fatal_err_recover: " 4796 "Failed to init IPP")); 4797 goto fail; 4798 } 4799 4800 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4801 4802 /* Re-Initialize RxMAC */ 4803 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4804 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4805 "nxge_rx_port_fatal_err_recover: " 4806 "Failed to reset RxMAC")); 4807 goto fail; 4808 } 4809 4810 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4811 4812 /* Re-enable RxMAC */ 4813 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4814 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4815 "nxge_rx_port_fatal_err_recover: " 4816 "Failed to enable RxMAC")); 4817 goto fail; 4818 } 4819 4820 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4821 "Recovery Successful, RxPort Restored")); 4822 4823 return (NXGE_OK); 4824 fail: 4825 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4826 return (status); 4827 } 4828 4829 void 4830 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4831 { 4832 rx_dma_ctl_stat_t cs; 4833 rx_ctl_dat_fifo_stat_t cdfs; 4834 4835 switch (err_id) { 4836 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4837 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4838 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4839 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4840 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4841 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4842 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4843 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4844 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4845 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4846 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4847 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4848 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4849 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4850 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4851 chan, &cs.value); 4852 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4853 cs.bits.hdw.rcr_ack_err = 1; 4854 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4855 cs.bits.hdw.dc_fifo_err = 1; 4856 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4857 cs.bits.hdw.rcr_sha_par = 1; 4858 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4859 cs.bits.hdw.rbr_pre_par = 1; 4860 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4861 cs.bits.hdw.rbr_tmout = 1; 4862 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4863 cs.bits.hdw.rsp_cnt_err = 1; 4864 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4865 cs.bits.hdw.byte_en_bus = 1; 4866 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4867 cs.bits.hdw.rsp_dat_err = 1; 4868 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4869 cs.bits.hdw.config_err = 1; 4870 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4871 cs.bits.hdw.rcrincon = 1; 4872 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4873 cs.bits.hdw.rcrfull = 1; 4874 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4875 cs.bits.hdw.rbrfull = 1; 4876 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4877 cs.bits.hdw.rbrlogpage = 1; 4878 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4879 cs.bits.hdw.cfiglogpage = 1; 4880 #if defined(__i386) 4881 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4882 cs.value); 4883 #else 4884 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4885 cs.value); 4886 #endif 4887 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4888 chan, cs.value); 4889 break; 4890 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4891 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4892 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4893 cdfs.value = 0; 4894 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4895 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4896 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4897 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4898 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4899 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4900 #if defined(__i386) 4901 cmn_err(CE_NOTE, 4902 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4903 cdfs.value); 4904 #else 4905 cmn_err(CE_NOTE, 4906 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4907 cdfs.value); 4908 #endif 4909 NXGE_REG_WR64(nxgep->npi_handle, 4910 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4911 break; 4912 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4913 break; 4914 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4915 break; 4916 } 4917 } 4918 4919 static void 4920 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4921 { 4922 rxring_info_t *ring_info; 4923 int index; 4924 uint32_t chunk_size; 4925 uint64_t kaddr; 4926 uint_t num_blocks; 4927 4928 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4929 4930 if (rbr_p == NULL) { 4931 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4932 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4933 return; 4934 } 4935 4936 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4937 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4938 "<== nxge_rxdma_databuf_free: DDI")); 4939 return; 4940 } 4941 4942 ring_info = rbr_p->ring_info; 4943 if (ring_info == NULL) { 4944 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4945 "==> nxge_rxdma_databuf_free: NULL ring info")); 4946 return; 4947 } 4948 num_blocks = rbr_p->num_blocks; 4949 for (index = 0; index < num_blocks; index++) { 4950 kaddr = ring_info->buffer[index].kaddr; 4951 chunk_size = ring_info->buffer[index].buf_size; 4952 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4953 "==> nxge_rxdma_databuf_free: free chunk %d " 4954 "kaddrp $%p chunk size %d", 4955 index, kaddr, chunk_size)); 4956 if (kaddr == 0) 4957 continue; 4958 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4959 ring_info->buffer[index].kaddr = 0; 4960 } 4961 4962 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4963 } 4964 4965 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4966 extern void contig_mem_free(void *, size_t); 4967 #endif 4968 4969 void 4970 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4971 { 4972 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4973 4974 if (kaddr == 0 || !buf_size) { 4975 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4976 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4977 kaddr, buf_size)); 4978 return; 4979 } 4980 4981 switch (alloc_type) { 4982 case KMEM_ALLOC: 4983 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4984 "==> nxge_free_buf: freeing kmem $%p size %d", 4985 kaddr, buf_size)); 4986 #if defined(__i386) 4987 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4988 #else 4989 KMEM_FREE((void *)kaddr, buf_size); 4990 #endif 4991 break; 4992 4993 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4994 case CONTIG_MEM_ALLOC: 4995 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4996 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4997 kaddr, buf_size)); 4998 contig_mem_free((void *)kaddr, buf_size); 4999 break; 5000 #endif 5001 5002 default: 5003 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5004 "<== nxge_free_buf: unsupported alloc type %d", 5005 alloc_type)); 5006 return; 5007 } 5008 5009 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5010 } 5011