1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * Globals: tunable parameters (/etc/system or adb) 43 * 44 */ 45 extern uint32_t nxge_rbr_size; 46 extern uint32_t nxge_rcr_size; 47 extern uint32_t nxge_rbr_spare_size; 48 49 extern uint32_t nxge_mblks_pending; 50 51 /* 52 * Tunable to reduce the amount of time spent in the 53 * ISR doing Rx Processing. 54 */ 55 extern uint32_t nxge_max_rx_pkts; 56 57 /* 58 * Tunables to manage the receive buffer blocks. 59 * 60 * nxge_rx_threshold_hi: copy all buffers. 61 * nxge_rx_bcopy_size_type: receive buffer block size type. 62 * nxge_rx_threshold_lo: copy only up to tunable block size type. 63 */ 64 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 65 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 67 68 extern uint32_t nxge_cksum_offload; 69 70 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 71 static void nxge_unmap_rxdma(p_nxge_t, int); 72 73 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 74 75 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 76 static void nxge_rxdma_hw_stop(p_nxge_t, int); 77 78 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 79 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 80 uint32_t, 81 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 82 p_rx_mbox_t *); 83 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 84 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 85 86 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 87 uint16_t, 88 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 89 p_rx_rcr_ring_t *, p_rx_mbox_t *); 90 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 91 p_rx_rcr_ring_t, p_rx_mbox_t); 92 93 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 94 uint16_t, 95 p_nxge_dma_common_t *, 96 p_rx_rbr_ring_t *, uint32_t); 97 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 98 p_rx_rbr_ring_t); 99 100 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 101 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 102 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 103 104 static mblk_t * 105 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 106 107 static void nxge_receive_packet(p_nxge_t, 108 p_rx_rcr_ring_t, 109 p_rcr_entry_t, 110 boolean_t *, 111 mblk_t **, mblk_t **); 112 113 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 114 115 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 116 static void nxge_freeb(p_rx_msg_t); 117 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 118 119 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 120 uint32_t, uint32_t); 121 122 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 123 p_rx_rbr_ring_t); 124 125 126 static nxge_status_t 127 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 128 129 nxge_status_t 130 nxge_rx_port_fatal_err_recover(p_nxge_t); 131 132 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 133 134 nxge_status_t 135 nxge_init_rxdma_channels(p_nxge_t nxgep) 136 { 137 nxge_grp_set_t *set = &nxgep->rx_set; 138 int i, count, channel; 139 nxge_grp_t *group; 140 dc_map_t map; 141 int dev_gindex; 142 143 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 144 145 if (!isLDOMguest(nxgep)) { 146 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 147 cmn_err(CE_NOTE, "hw_start_common"); 148 return (NXGE_ERROR); 149 } 150 } 151 152 /* 153 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 154 * We only have 8 hardware RDC tables, but we may have 155 * up to 16 logical (software-defined) groups of RDCS, 156 * if we make use of layer 3 & 4 hardware classification. 157 */ 158 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 159 if ((1 << i) & set->lg.map) { 160 group = set->group[i]; 161 dev_gindex = 162 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 163 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 164 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 165 if ((1 << channel) & map) { 166 if ((nxge_grp_dc_add(nxgep, 167 group, VP_BOUND_RX, channel))) 168 goto init_rxdma_channels_exit; 169 } 170 } 171 } 172 if (++count == set->lg.count) 173 break; 174 } 175 176 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 177 return (NXGE_OK); 178 179 init_rxdma_channels_exit: 180 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 181 if ((1 << i) & set->lg.map) { 182 group = set->group[i]; 183 dev_gindex = 184 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 185 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 186 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 187 if ((1 << channel) & map) { 188 nxge_grp_dc_remove(nxgep, 189 VP_BOUND_RX, channel); 190 } 191 } 192 } 193 if (++count == set->lg.count) 194 break; 195 } 196 197 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 198 return (NXGE_ERROR); 199 } 200 201 nxge_status_t 202 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 203 { 204 nxge_status_t status; 205 206 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 207 208 status = nxge_map_rxdma(nxge, channel); 209 if (status != NXGE_OK) { 210 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 211 "<== nxge_init_rxdma: status 0x%x", status)); 212 return (status); 213 } 214 215 #if defined(sun4v) 216 if (isLDOMguest(nxge)) { 217 /* set rcr_ring */ 218 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 219 220 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 221 if (status != NXGE_OK) { 222 nxge_unmap_rxdma(nxge, channel); 223 return (status); 224 } 225 } 226 #endif 227 228 status = nxge_rxdma_hw_start(nxge, channel); 229 if (status != NXGE_OK) { 230 nxge_unmap_rxdma(nxge, channel); 231 } 232 233 if (!nxge->statsp->rdc_ksp[channel]) 234 nxge_setup_rdc_kstats(nxge, channel); 235 236 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 237 "<== nxge_init_rxdma_channel: status 0x%x", status)); 238 239 return (status); 240 } 241 242 void 243 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 244 { 245 nxge_grp_set_t *set = &nxgep->rx_set; 246 int rdc; 247 248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 249 250 if (set->owned.map == 0) { 251 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 252 "nxge_uninit_rxdma_channels: no channels")); 253 return; 254 } 255 256 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 257 if ((1 << rdc) & set->owned.map) { 258 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 259 } 260 } 261 262 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 263 } 264 265 void 266 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 267 { 268 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 269 270 if (nxgep->statsp->rdc_ksp[channel]) { 271 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 272 nxgep->statsp->rdc_ksp[channel] = 0; 273 } 274 275 nxge_rxdma_hw_stop(nxgep, channel); 276 nxge_unmap_rxdma(nxgep, channel); 277 278 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 279 } 280 281 nxge_status_t 282 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 283 { 284 npi_handle_t handle; 285 npi_status_t rs = NPI_SUCCESS; 286 nxge_status_t status = NXGE_OK; 287 288 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 289 290 handle = NXGE_DEV_NPI_HANDLE(nxgep); 291 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 292 293 if (rs != NPI_SUCCESS) { 294 status = NXGE_ERROR | rs; 295 } 296 297 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 298 299 return (status); 300 } 301 302 void 303 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 304 { 305 nxge_grp_set_t *set = &nxgep->rx_set; 306 int rdc; 307 308 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 309 310 if (!isLDOMguest(nxgep)) { 311 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 312 (void) npi_rxdma_dump_fzc_regs(handle); 313 } 314 315 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 316 NXGE_DEBUG_MSG((nxgep, TX_CTL, 317 "nxge_rxdma_regs_dump_channels: " 318 "NULL ring pointer(s)")); 319 return; 320 } 321 322 if (set->owned.map == 0) { 323 NXGE_DEBUG_MSG((nxgep, RX_CTL, 324 "nxge_rxdma_regs_dump_channels: no channels")); 325 return; 326 } 327 328 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 329 if ((1 << rdc) & set->owned.map) { 330 rx_rbr_ring_t *ring = 331 nxgep->rx_rbr_rings->rbr_rings[rdc]; 332 if (ring) { 333 (void) nxge_dump_rxdma_channel(nxgep, rdc); 334 } 335 } 336 } 337 338 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 339 } 340 341 nxge_status_t 342 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 343 { 344 npi_handle_t handle; 345 npi_status_t rs = NPI_SUCCESS; 346 nxge_status_t status = NXGE_OK; 347 348 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 349 350 handle = NXGE_DEV_NPI_HANDLE(nxgep); 351 rs = npi_rxdma_dump_rdc_regs(handle, channel); 352 353 if (rs != NPI_SUCCESS) { 354 status = NXGE_ERROR | rs; 355 } 356 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 357 return (status); 358 } 359 360 nxge_status_t 361 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 362 p_rx_dma_ent_msk_t mask_p) 363 { 364 npi_handle_t handle; 365 npi_status_t rs = NPI_SUCCESS; 366 nxge_status_t status = NXGE_OK; 367 368 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 369 "<== nxge_init_rxdma_channel_event_mask")); 370 371 handle = NXGE_DEV_NPI_HANDLE(nxgep); 372 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 373 if (rs != NPI_SUCCESS) { 374 status = NXGE_ERROR | rs; 375 } 376 377 return (status); 378 } 379 380 nxge_status_t 381 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 382 p_rx_dma_ctl_stat_t cs_p) 383 { 384 npi_handle_t handle; 385 npi_status_t rs = NPI_SUCCESS; 386 nxge_status_t status = NXGE_OK; 387 388 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 389 "<== nxge_init_rxdma_channel_cntl_stat")); 390 391 handle = NXGE_DEV_NPI_HANDLE(nxgep); 392 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 393 394 if (rs != NPI_SUCCESS) { 395 status = NXGE_ERROR | rs; 396 } 397 398 return (status); 399 } 400 401 /* 402 * nxge_rxdma_cfg_rdcgrp_default_rdc 403 * 404 * Set the default RDC for an RDC Group (Table) 405 * 406 * Arguments: 407 * nxgep 408 * rdcgrp The group to modify 409 * rdc The new default RDC. 410 * 411 * Notes: 412 * 413 * NPI/NXGE function calls: 414 * npi_rxdma_cfg_rdc_table_default_rdc() 415 * 416 * Registers accessed: 417 * RDC_TBL_REG: FZC_ZCP + 0x10000 418 * 419 * Context: 420 * Service domain 421 */ 422 nxge_status_t 423 nxge_rxdma_cfg_rdcgrp_default_rdc( 424 p_nxge_t nxgep, 425 uint8_t rdcgrp, 426 uint8_t rdc) 427 { 428 npi_handle_t handle; 429 npi_status_t rs = NPI_SUCCESS; 430 p_nxge_dma_pt_cfg_t p_dma_cfgp; 431 p_nxge_rdc_grp_t rdc_grp_p; 432 uint8_t actual_rdcgrp, actual_rdc; 433 434 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 435 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 436 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 437 438 handle = NXGE_DEV_NPI_HANDLE(nxgep); 439 440 /* 441 * This has to be rewritten. Do we even allow this anymore? 442 */ 443 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 444 RDC_MAP_IN(rdc_grp_p->map, rdc); 445 rdc_grp_p->def_rdc = rdc; 446 447 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 448 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 449 450 rs = npi_rxdma_cfg_rdc_table_default_rdc( 451 handle, actual_rdcgrp, actual_rdc); 452 453 if (rs != NPI_SUCCESS) { 454 return (NXGE_ERROR | rs); 455 } 456 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 457 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 458 return (NXGE_OK); 459 } 460 461 nxge_status_t 462 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 463 { 464 npi_handle_t handle; 465 466 uint8_t actual_rdc; 467 npi_status_t rs = NPI_SUCCESS; 468 469 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 470 " ==> nxge_rxdma_cfg_port_default_rdc")); 471 472 handle = NXGE_DEV_NPI_HANDLE(nxgep); 473 actual_rdc = rdc; /* XXX Hack! */ 474 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 475 476 477 if (rs != NPI_SUCCESS) { 478 return (NXGE_ERROR | rs); 479 } 480 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 481 " <== nxge_rxdma_cfg_port_default_rdc")); 482 483 return (NXGE_OK); 484 } 485 486 nxge_status_t 487 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 488 uint16_t pkts) 489 { 490 npi_status_t rs = NPI_SUCCESS; 491 npi_handle_t handle; 492 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 493 " ==> nxge_rxdma_cfg_rcr_threshold")); 494 handle = NXGE_DEV_NPI_HANDLE(nxgep); 495 496 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 497 498 if (rs != NPI_SUCCESS) { 499 return (NXGE_ERROR | rs); 500 } 501 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 502 return (NXGE_OK); 503 } 504 505 nxge_status_t 506 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 507 uint16_t tout, uint8_t enable) 508 { 509 npi_status_t rs = NPI_SUCCESS; 510 npi_handle_t handle; 511 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 512 handle = NXGE_DEV_NPI_HANDLE(nxgep); 513 if (enable == 0) { 514 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 515 } else { 516 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 517 tout); 518 } 519 520 if (rs != NPI_SUCCESS) { 521 return (NXGE_ERROR | rs); 522 } 523 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 524 return (NXGE_OK); 525 } 526 527 nxge_status_t 528 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 529 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 530 { 531 npi_handle_t handle; 532 rdc_desc_cfg_t rdc_desc; 533 p_rcrcfig_b_t cfgb_p; 534 npi_status_t rs = NPI_SUCCESS; 535 536 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 537 handle = NXGE_DEV_NPI_HANDLE(nxgep); 538 /* 539 * Use configuration data composed at init time. 540 * Write to hardware the receive ring configurations. 541 */ 542 rdc_desc.mbox_enable = 1; 543 rdc_desc.mbox_addr = mbox_p->mbox_addr; 544 NXGE_DEBUG_MSG((nxgep, RX_CTL, 545 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 546 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 547 548 rdc_desc.rbr_len = rbr_p->rbb_max; 549 rdc_desc.rbr_addr = rbr_p->rbr_addr; 550 551 switch (nxgep->rx_bksize_code) { 552 case RBR_BKSIZE_4K: 553 rdc_desc.page_size = SIZE_4KB; 554 break; 555 case RBR_BKSIZE_8K: 556 rdc_desc.page_size = SIZE_8KB; 557 break; 558 case RBR_BKSIZE_16K: 559 rdc_desc.page_size = SIZE_16KB; 560 break; 561 case RBR_BKSIZE_32K: 562 rdc_desc.page_size = SIZE_32KB; 563 break; 564 } 565 566 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 567 rdc_desc.valid0 = 1; 568 569 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 570 rdc_desc.valid1 = 1; 571 572 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 573 rdc_desc.valid2 = 1; 574 575 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 576 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 577 578 rdc_desc.rcr_len = rcr_p->comp_size; 579 rdc_desc.rcr_addr = rcr_p->rcr_addr; 580 581 cfgb_p = &(rcr_p->rcr_cfgb); 582 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 583 /* For now, disable this timeout in a guest domain. */ 584 if (isLDOMguest(nxgep)) { 585 rdc_desc.rcr_timeout = 0; 586 rdc_desc.rcr_timeout_enable = 0; 587 } else { 588 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 589 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 590 } 591 592 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 593 "rbr_len qlen %d pagesize code %d rcr_len %d", 594 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 595 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 596 "size 0 %d size 1 %d size 2 %d", 597 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 598 rbr_p->npi_pkt_buf_size2)); 599 600 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 601 if (rs != NPI_SUCCESS) { 602 return (NXGE_ERROR | rs); 603 } 604 605 /* 606 * Enable the timeout and threshold. 607 */ 608 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 609 rdc_desc.rcr_threshold); 610 if (rs != NPI_SUCCESS) { 611 return (NXGE_ERROR | rs); 612 } 613 614 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 615 rdc_desc.rcr_timeout); 616 if (rs != NPI_SUCCESS) { 617 return (NXGE_ERROR | rs); 618 } 619 620 if (!isLDOMguest(nxgep)) { 621 /* Enable the DMA */ 622 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 623 if (rs != NPI_SUCCESS) { 624 return (NXGE_ERROR | rs); 625 } 626 } 627 628 /* Kick the DMA engine. */ 629 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 630 631 if (!isLDOMguest(nxgep)) { 632 /* Clear the rbr empty bit */ 633 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 634 } 635 636 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 637 638 return (NXGE_OK); 639 } 640 641 nxge_status_t 642 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 643 { 644 npi_handle_t handle; 645 npi_status_t rs = NPI_SUCCESS; 646 647 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 648 handle = NXGE_DEV_NPI_HANDLE(nxgep); 649 650 /* disable the DMA */ 651 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 652 if (rs != NPI_SUCCESS) { 653 NXGE_DEBUG_MSG((nxgep, RX_CTL, 654 "<== nxge_disable_rxdma_channel:failed (0x%x)", 655 rs)); 656 return (NXGE_ERROR | rs); 657 } 658 659 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 660 return (NXGE_OK); 661 } 662 663 nxge_status_t 664 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 665 { 666 npi_handle_t handle; 667 nxge_status_t status = NXGE_OK; 668 669 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 670 "<== nxge_init_rxdma_channel_rcrflush")); 671 672 handle = NXGE_DEV_NPI_HANDLE(nxgep); 673 npi_rxdma_rdc_rcr_flush(handle, channel); 674 675 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 676 "<== nxge_init_rxdma_channel_rcrflsh")); 677 return (status); 678 679 } 680 681 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 682 683 #define TO_LEFT -1 684 #define TO_RIGHT 1 685 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 686 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 687 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 688 #define NO_HINT 0xffffffff 689 690 /*ARGSUSED*/ 691 nxge_status_t 692 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 693 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 694 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 695 { 696 int bufsize; 697 uint64_t pktbuf_pp; 698 uint64_t dvma_addr; 699 rxring_info_t *ring_info; 700 int base_side, end_side; 701 int r_index, l_index, anchor_index; 702 int found, search_done; 703 uint32_t offset, chunk_size, block_size, page_size_mask; 704 uint32_t chunk_index, block_index, total_index; 705 int max_iterations, iteration; 706 rxbuf_index_info_t *bufinfo; 707 708 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 709 710 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 711 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 712 pkt_buf_addr_pp, 713 pktbufsz_type)); 714 #if defined(__i386) 715 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 716 #else 717 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 718 #endif 719 720 switch (pktbufsz_type) { 721 case 0: 722 bufsize = rbr_p->pkt_buf_size0; 723 break; 724 case 1: 725 bufsize = rbr_p->pkt_buf_size1; 726 break; 727 case 2: 728 bufsize = rbr_p->pkt_buf_size2; 729 break; 730 case RCR_SINGLE_BLOCK: 731 bufsize = 0; 732 anchor_index = 0; 733 break; 734 default: 735 return (NXGE_ERROR); 736 } 737 738 if (rbr_p->num_blocks == 1) { 739 anchor_index = 0; 740 ring_info = rbr_p->ring_info; 741 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 742 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 743 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 744 "buf_pp $%p btype %d anchor_index %d " 745 "bufinfo $%p", 746 pkt_buf_addr_pp, 747 pktbufsz_type, 748 anchor_index, 749 bufinfo)); 750 751 goto found_index; 752 } 753 754 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 755 "==> nxge_rxbuf_pp_to_vp: " 756 "buf_pp $%p btype %d anchor_index %d", 757 pkt_buf_addr_pp, 758 pktbufsz_type, 759 anchor_index)); 760 761 ring_info = rbr_p->ring_info; 762 found = B_FALSE; 763 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 764 iteration = 0; 765 max_iterations = ring_info->max_iterations; 766 /* 767 * First check if this block has been seen 768 * recently. This is indicated by a hint which 769 * is initialized when the first buffer of the block 770 * is seen. The hint is reset when the last buffer of 771 * the block has been processed. 772 * As three block sizes are supported, three hints 773 * are kept. The idea behind the hints is that once 774 * the hardware uses a block for a buffer of that 775 * size, it will use it exclusively for that size 776 * and will use it until it is exhausted. It is assumed 777 * that there would a single block being used for the same 778 * buffer sizes at any given time. 779 */ 780 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 781 anchor_index = ring_info->hint[pktbufsz_type]; 782 dvma_addr = bufinfo[anchor_index].dvma_addr; 783 chunk_size = bufinfo[anchor_index].buf_size; 784 if ((pktbuf_pp >= dvma_addr) && 785 (pktbuf_pp < (dvma_addr + chunk_size))) { 786 found = B_TRUE; 787 /* 788 * check if this is the last buffer in the block 789 * If so, then reset the hint for the size; 790 */ 791 792 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 793 ring_info->hint[pktbufsz_type] = NO_HINT; 794 } 795 } 796 797 if (found == B_FALSE) { 798 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 799 "==> nxge_rxbuf_pp_to_vp: (!found)" 800 "buf_pp $%p btype %d anchor_index %d", 801 pkt_buf_addr_pp, 802 pktbufsz_type, 803 anchor_index)); 804 805 /* 806 * This is the first buffer of the block of this 807 * size. Need to search the whole information 808 * array. 809 * the search algorithm uses a binary tree search 810 * algorithm. It assumes that the information is 811 * already sorted with increasing order 812 * info[0] < info[1] < info[2] .... < info[n-1] 813 * where n is the size of the information array 814 */ 815 r_index = rbr_p->num_blocks - 1; 816 l_index = 0; 817 search_done = B_FALSE; 818 anchor_index = MID_INDEX(r_index, l_index); 819 while (search_done == B_FALSE) { 820 if ((r_index == l_index) || 821 (iteration >= max_iterations)) 822 search_done = B_TRUE; 823 end_side = TO_RIGHT; /* to the right */ 824 base_side = TO_LEFT; /* to the left */ 825 /* read the DVMA address information and sort it */ 826 dvma_addr = bufinfo[anchor_index].dvma_addr; 827 chunk_size = bufinfo[anchor_index].buf_size; 828 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 829 "==> nxge_rxbuf_pp_to_vp: (searching)" 830 "buf_pp $%p btype %d " 831 "anchor_index %d chunk_size %d dvmaaddr $%p", 832 pkt_buf_addr_pp, 833 pktbufsz_type, 834 anchor_index, 835 chunk_size, 836 dvma_addr)); 837 838 if (pktbuf_pp >= dvma_addr) 839 base_side = TO_RIGHT; /* to the right */ 840 if (pktbuf_pp < (dvma_addr + chunk_size)) 841 end_side = TO_LEFT; /* to the left */ 842 843 switch (base_side + end_side) { 844 case IN_MIDDLE: 845 /* found */ 846 found = B_TRUE; 847 search_done = B_TRUE; 848 if ((pktbuf_pp + bufsize) < 849 (dvma_addr + chunk_size)) 850 ring_info->hint[pktbufsz_type] = 851 bufinfo[anchor_index].buf_index; 852 break; 853 case BOTH_RIGHT: 854 /* not found: go to the right */ 855 l_index = anchor_index + 1; 856 anchor_index = MID_INDEX(r_index, l_index); 857 break; 858 859 case BOTH_LEFT: 860 /* not found: go to the left */ 861 r_index = anchor_index - 1; 862 anchor_index = MID_INDEX(r_index, l_index); 863 break; 864 default: /* should not come here */ 865 return (NXGE_ERROR); 866 } 867 iteration++; 868 } 869 870 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 871 "==> nxge_rxbuf_pp_to_vp: (search done)" 872 "buf_pp $%p btype %d anchor_index %d", 873 pkt_buf_addr_pp, 874 pktbufsz_type, 875 anchor_index)); 876 } 877 878 if (found == B_FALSE) { 879 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 880 "==> nxge_rxbuf_pp_to_vp: (search failed)" 881 "buf_pp $%p btype %d anchor_index %d", 882 pkt_buf_addr_pp, 883 pktbufsz_type, 884 anchor_index)); 885 return (NXGE_ERROR); 886 } 887 888 found_index: 889 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 890 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 891 "buf_pp $%p btype %d bufsize %d anchor_index %d", 892 pkt_buf_addr_pp, 893 pktbufsz_type, 894 bufsize, 895 anchor_index)); 896 897 /* index of the first block in this chunk */ 898 chunk_index = bufinfo[anchor_index].start_index; 899 dvma_addr = bufinfo[anchor_index].dvma_addr; 900 page_size_mask = ring_info->block_size_mask; 901 902 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 903 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 904 "buf_pp $%p btype %d bufsize %d " 905 "anchor_index %d chunk_index %d dvma $%p", 906 pkt_buf_addr_pp, 907 pktbufsz_type, 908 bufsize, 909 anchor_index, 910 chunk_index, 911 dvma_addr)); 912 913 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 914 block_size = rbr_p->block_size; /* System block(page) size */ 915 916 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 917 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 918 "buf_pp $%p btype %d bufsize %d " 919 "anchor_index %d chunk_index %d dvma $%p " 920 "offset %d block_size %d", 921 pkt_buf_addr_pp, 922 pktbufsz_type, 923 bufsize, 924 anchor_index, 925 chunk_index, 926 dvma_addr, 927 offset, 928 block_size)); 929 930 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 931 932 block_index = (offset / block_size); /* index within chunk */ 933 total_index = chunk_index + block_index; 934 935 936 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 937 "==> nxge_rxbuf_pp_to_vp: " 938 "total_index %d dvma_addr $%p " 939 "offset %d block_size %d " 940 "block_index %d ", 941 total_index, dvma_addr, 942 offset, block_size, 943 block_index)); 944 #if defined(__i386) 945 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 946 (uint32_t)offset); 947 #else 948 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 949 (uint64_t)offset); 950 #endif 951 952 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 953 "==> nxge_rxbuf_pp_to_vp: " 954 "total_index %d dvma_addr $%p " 955 "offset %d block_size %d " 956 "block_index %d " 957 "*pkt_buf_addr_p $%p", 958 total_index, dvma_addr, 959 offset, block_size, 960 block_index, 961 *pkt_buf_addr_p)); 962 963 964 *msg_index = total_index; 965 *bufoffset = (offset & page_size_mask); 966 967 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 968 "==> nxge_rxbuf_pp_to_vp: get msg index: " 969 "msg_index %d bufoffset_index %d", 970 *msg_index, 971 *bufoffset)); 972 973 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 974 975 return (NXGE_OK); 976 } 977 978 /* 979 * used by quick sort (qsort) function 980 * to perform comparison 981 */ 982 static int 983 nxge_sort_compare(const void *p1, const void *p2) 984 { 985 986 rxbuf_index_info_t *a, *b; 987 988 a = (rxbuf_index_info_t *)p1; 989 b = (rxbuf_index_info_t *)p2; 990 991 if (a->dvma_addr > b->dvma_addr) 992 return (1); 993 if (a->dvma_addr < b->dvma_addr) 994 return (-1); 995 return (0); 996 } 997 998 999 1000 /* 1001 * grabbed this sort implementation from common/syscall/avl.c 1002 * 1003 */ 1004 /* 1005 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 1006 * v = Ptr to array/vector of objs 1007 * n = # objs in the array 1008 * s = size of each obj (must be multiples of a word size) 1009 * f = ptr to function to compare two objs 1010 * returns (-1 = less than, 0 = equal, 1 = greater than 1011 */ 1012 void 1013 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1014 { 1015 int g, i, j, ii; 1016 unsigned int *p1, *p2; 1017 unsigned int tmp; 1018 1019 /* No work to do */ 1020 if (v == NULL || n <= 1) 1021 return; 1022 /* Sanity check on arguments */ 1023 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1024 ASSERT(s > 0); 1025 1026 for (g = n / 2; g > 0; g /= 2) { 1027 for (i = g; i < n; i++) { 1028 for (j = i - g; j >= 0 && 1029 (*f)(v + j * s, v + (j + g) * s) == 1; 1030 j -= g) { 1031 p1 = (unsigned *)(v + j * s); 1032 p2 = (unsigned *)(v + (j + g) * s); 1033 for (ii = 0; ii < s / 4; ii++) { 1034 tmp = *p1; 1035 *p1++ = *p2; 1036 *p2++ = tmp; 1037 } 1038 } 1039 } 1040 } 1041 } 1042 1043 /* 1044 * Initialize data structures required for rxdma 1045 * buffer dvma->vmem address lookup 1046 */ 1047 /*ARGSUSED*/ 1048 static nxge_status_t 1049 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1050 { 1051 1052 int index; 1053 rxring_info_t *ring_info; 1054 int max_iteration = 0, max_index = 0; 1055 1056 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1057 1058 ring_info = rbrp->ring_info; 1059 ring_info->hint[0] = NO_HINT; 1060 ring_info->hint[1] = NO_HINT; 1061 ring_info->hint[2] = NO_HINT; 1062 max_index = rbrp->num_blocks; 1063 1064 /* read the DVMA address information and sort it */ 1065 /* do init of the information array */ 1066 1067 1068 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1069 " nxge_rxbuf_index_info_init Sort ptrs")); 1070 1071 /* sort the array */ 1072 nxge_ksort((void *)ring_info->buffer, max_index, 1073 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1074 1075 1076 1077 for (index = 0; index < max_index; index++) { 1078 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1079 " nxge_rxbuf_index_info_init: sorted chunk %d " 1080 " ioaddr $%p kaddr $%p size %x", 1081 index, ring_info->buffer[index].dvma_addr, 1082 ring_info->buffer[index].kaddr, 1083 ring_info->buffer[index].buf_size)); 1084 } 1085 1086 max_iteration = 0; 1087 while (max_index >= (1ULL << max_iteration)) 1088 max_iteration++; 1089 ring_info->max_iterations = max_iteration + 1; 1090 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1091 " nxge_rxbuf_index_info_init Find max iter %d", 1092 ring_info->max_iterations)); 1093 1094 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1095 return (NXGE_OK); 1096 } 1097 1098 /* ARGSUSED */ 1099 void 1100 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1101 { 1102 #ifdef NXGE_DEBUG 1103 1104 uint32_t bptr; 1105 uint64_t pp; 1106 1107 bptr = entry_p->bits.hdw.pkt_buf_addr; 1108 1109 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1110 "\trcr entry $%p " 1111 "\trcr entry 0x%0llx " 1112 "\trcr entry 0x%08x " 1113 "\trcr entry 0x%08x " 1114 "\tvalue 0x%0llx\n" 1115 "\tmulti = %d\n" 1116 "\tpkt_type = 0x%x\n" 1117 "\tzero_copy = %d\n" 1118 "\tnoport = %d\n" 1119 "\tpromis = %d\n" 1120 "\terror = 0x%04x\n" 1121 "\tdcf_err = 0x%01x\n" 1122 "\tl2_len = %d\n" 1123 "\tpktbufsize = %d\n" 1124 "\tpkt_buf_addr = $%p\n" 1125 "\tpkt_buf_addr (<< 6) = $%p\n", 1126 entry_p, 1127 *(int64_t *)entry_p, 1128 *(int32_t *)entry_p, 1129 *(int32_t *)((char *)entry_p + 32), 1130 entry_p->value, 1131 entry_p->bits.hdw.multi, 1132 entry_p->bits.hdw.pkt_type, 1133 entry_p->bits.hdw.zero_copy, 1134 entry_p->bits.hdw.noport, 1135 entry_p->bits.hdw.promis, 1136 entry_p->bits.hdw.error, 1137 entry_p->bits.hdw.dcf_err, 1138 entry_p->bits.hdw.l2_len, 1139 entry_p->bits.hdw.pktbufsz, 1140 bptr, 1141 entry_p->bits.ldw.pkt_buf_addr)); 1142 1143 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1144 RCR_PKT_BUF_ADDR_SHIFT; 1145 1146 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1147 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1148 #endif 1149 } 1150 1151 void 1152 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1153 { 1154 npi_handle_t handle; 1155 rbr_stat_t rbr_stat; 1156 addr44_t hd_addr; 1157 addr44_t tail_addr; 1158 uint16_t qlen; 1159 1160 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1161 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1162 1163 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1164 1165 /* RBR head */ 1166 hd_addr.addr = 0; 1167 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1168 #if defined(__i386) 1169 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1170 (void *)(uint32_t)hd_addr.addr); 1171 #else 1172 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1173 (void *)hd_addr.addr); 1174 #endif 1175 1176 /* RBR stats */ 1177 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1178 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1179 1180 /* RCR tail */ 1181 tail_addr.addr = 0; 1182 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1183 #if defined(__i386) 1184 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1185 (void *)(uint32_t)tail_addr.addr); 1186 #else 1187 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1188 (void *)tail_addr.addr); 1189 #endif 1190 1191 /* RCR qlen */ 1192 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1193 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1194 1195 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1196 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1197 } 1198 1199 nxge_status_t 1200 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1201 { 1202 nxge_grp_set_t *set = &nxgep->rx_set; 1203 nxge_status_t status; 1204 npi_status_t rs; 1205 int rdc; 1206 1207 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1208 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1209 1210 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1211 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1212 "<== nxge_rxdma_mode: not initialized")); 1213 return (NXGE_ERROR); 1214 } 1215 1216 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1217 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1218 "<== nxge_tx_port_fatal_err_recover: " 1219 "NULL ring pointer(s)")); 1220 return (NXGE_ERROR); 1221 } 1222 1223 if (set->owned.map == 0) { 1224 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1225 "nxge_rxdma_regs_dump_channels: no channels")); 1226 return (NULL); 1227 } 1228 1229 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1230 if ((1 << rdc) & set->owned.map) { 1231 rx_rbr_ring_t *ring = 1232 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1233 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1234 if (ring) { 1235 if (enable) { 1236 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1237 "==> nxge_rxdma_hw_mode: " 1238 "channel %d (enable)", rdc)); 1239 rs = npi_rxdma_cfg_rdc_enable 1240 (handle, rdc); 1241 } else { 1242 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1243 "==> nxge_rxdma_hw_mode: " 1244 "channel %d disable)", rdc)); 1245 rs = npi_rxdma_cfg_rdc_disable 1246 (handle, rdc); 1247 } 1248 } 1249 } 1250 } 1251 1252 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1253 1254 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1255 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1256 1257 return (status); 1258 } 1259 1260 void 1261 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1262 { 1263 npi_handle_t handle; 1264 1265 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1266 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1267 1268 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1269 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1270 1271 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1272 } 1273 1274 void 1275 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1276 { 1277 npi_handle_t handle; 1278 1279 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1280 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1281 1282 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1283 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1284 1285 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1286 } 1287 1288 void 1289 nxge_hw_start_rx(p_nxge_t nxgep) 1290 { 1291 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1292 1293 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1294 (void) nxge_rx_mac_enable(nxgep); 1295 1296 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1297 } 1298 1299 /*ARGSUSED*/ 1300 void 1301 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1302 { 1303 nxge_grp_set_t *set = &nxgep->rx_set; 1304 int rdc; 1305 1306 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1307 1308 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1309 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1310 "<== nxge_tx_port_fatal_err_recover: " 1311 "NULL ring pointer(s)")); 1312 return; 1313 } 1314 1315 if (set->owned.map == 0) { 1316 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1317 "nxge_rxdma_regs_dump_channels: no channels")); 1318 return; 1319 } 1320 1321 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1322 if ((1 << rdc) & set->owned.map) { 1323 rx_rbr_ring_t *ring = 1324 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1325 if (ring) { 1326 nxge_rxdma_hw_stop(nxgep, rdc); 1327 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1328 "==> nxge_fixup_rxdma_rings: " 1329 "channel %d ring $%px", 1330 rdc, ring)); 1331 (void) nxge_rxdma_fixup_channel 1332 (nxgep, rdc, rdc); 1333 } 1334 } 1335 } 1336 1337 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1338 } 1339 1340 void 1341 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1342 { 1343 int i; 1344 1345 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1346 i = nxge_rxdma_get_ring_index(nxgep, channel); 1347 if (i < 0) { 1348 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1349 "<== nxge_rxdma_fix_channel: no entry found")); 1350 return; 1351 } 1352 1353 nxge_rxdma_fixup_channel(nxgep, channel, i); 1354 1355 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1356 } 1357 1358 void 1359 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1360 { 1361 int ndmas; 1362 p_rx_rbr_rings_t rx_rbr_rings; 1363 p_rx_rbr_ring_t *rbr_rings; 1364 p_rx_rcr_rings_t rx_rcr_rings; 1365 p_rx_rcr_ring_t *rcr_rings; 1366 p_rx_mbox_areas_t rx_mbox_areas_p; 1367 p_rx_mbox_t *rx_mbox_p; 1368 p_nxge_dma_pool_t dma_buf_poolp; 1369 p_nxge_dma_pool_t dma_cntl_poolp; 1370 p_rx_rbr_ring_t rbrp; 1371 p_rx_rcr_ring_t rcrp; 1372 p_rx_mbox_t mboxp; 1373 p_nxge_dma_common_t dmap; 1374 nxge_status_t status = NXGE_OK; 1375 1376 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1377 1378 (void) nxge_rxdma_stop_channel(nxgep, channel); 1379 1380 dma_buf_poolp = nxgep->rx_buf_pool_p; 1381 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1382 1383 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1384 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1385 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1386 return; 1387 } 1388 1389 ndmas = dma_buf_poolp->ndmas; 1390 if (!ndmas) { 1391 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1392 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1393 return; 1394 } 1395 1396 rx_rbr_rings = nxgep->rx_rbr_rings; 1397 rx_rcr_rings = nxgep->rx_rcr_rings; 1398 rbr_rings = rx_rbr_rings->rbr_rings; 1399 rcr_rings = rx_rcr_rings->rcr_rings; 1400 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1401 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1402 1403 /* Reinitialize the receive block and completion rings */ 1404 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1405 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1406 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1407 1408 1409 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1410 rbrp->rbr_rd_index = 0; 1411 rcrp->comp_rd_index = 0; 1412 rcrp->comp_wt_index = 0; 1413 1414 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1415 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1416 1417 status = nxge_rxdma_start_channel(nxgep, channel, 1418 rbrp, rcrp, mboxp); 1419 if (status != NXGE_OK) { 1420 goto nxge_rxdma_fixup_channel_fail; 1421 } 1422 if (status != NXGE_OK) { 1423 goto nxge_rxdma_fixup_channel_fail; 1424 } 1425 1426 nxge_rxdma_fixup_channel_fail: 1427 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1428 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1429 1430 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1431 } 1432 1433 /* 1434 * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 1435 * map <channel> to an index into nxgep->rx_rbr_rings. 1436 * (device ring index -> port ring index) 1437 */ 1438 int 1439 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1440 { 1441 int i, ndmas; 1442 uint16_t rdc; 1443 p_rx_rbr_rings_t rx_rbr_rings; 1444 p_rx_rbr_ring_t *rbr_rings; 1445 1446 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1447 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1448 1449 rx_rbr_rings = nxgep->rx_rbr_rings; 1450 if (rx_rbr_rings == NULL) { 1451 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1452 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1453 return (-1); 1454 } 1455 ndmas = rx_rbr_rings->ndmas; 1456 if (!ndmas) { 1457 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1458 "<== nxge_rxdma_get_ring_index: no channel")); 1459 return (-1); 1460 } 1461 1462 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1463 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1464 1465 rbr_rings = rx_rbr_rings->rbr_rings; 1466 for (i = 0; i < ndmas; i++) { 1467 rdc = rbr_rings[i]->rdc; 1468 if (channel == rdc) { 1469 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1470 "==> nxge_rxdma_get_rbr_ring: channel %d " 1471 "(index %d) ring %d", channel, i, rbr_rings[i])); 1472 return (i); 1473 } 1474 } 1475 1476 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1477 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1478 1479 return (-1); 1480 } 1481 1482 p_rx_rbr_ring_t 1483 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1484 { 1485 nxge_grp_set_t *set = &nxgep->rx_set; 1486 nxge_channel_t rdc; 1487 1488 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1489 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1490 1491 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1492 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1493 "<== nxge_rxdma_get_rbr_ring: " 1494 "NULL ring pointer(s)")); 1495 return (NULL); 1496 } 1497 1498 if (set->owned.map == 0) { 1499 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1500 "<== nxge_rxdma_get_rbr_ring: no channels")); 1501 return (NULL); 1502 } 1503 1504 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1505 if ((1 << rdc) & set->owned.map) { 1506 rx_rbr_ring_t *ring = 1507 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1508 if (ring) { 1509 if (channel == ring->rdc) { 1510 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1511 "==> nxge_rxdma_get_rbr_ring: " 1512 "channel %d ring $%p", rdc, ring)); 1513 return (ring); 1514 } 1515 } 1516 } 1517 } 1518 1519 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1520 "<== nxge_rxdma_get_rbr_ring: not found")); 1521 1522 return (NULL); 1523 } 1524 1525 p_rx_rcr_ring_t 1526 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1527 { 1528 nxge_grp_set_t *set = &nxgep->rx_set; 1529 nxge_channel_t rdc; 1530 1531 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1532 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1533 1534 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1535 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1536 "<== nxge_rxdma_get_rcr_ring: " 1537 "NULL ring pointer(s)")); 1538 return (NULL); 1539 } 1540 1541 if (set->owned.map == 0) { 1542 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1543 "<== nxge_rxdma_get_rbr_ring: no channels")); 1544 return (NULL); 1545 } 1546 1547 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1548 if ((1 << rdc) & set->owned.map) { 1549 rx_rcr_ring_t *ring = 1550 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1551 if (ring) { 1552 if (channel == ring->rdc) { 1553 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1554 "==> nxge_rxdma_get_rcr_ring: " 1555 "channel %d ring $%p", rdc, ring)); 1556 return (ring); 1557 } 1558 } 1559 } 1560 } 1561 1562 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1563 "<== nxge_rxdma_get_rcr_ring: not found")); 1564 1565 return (NULL); 1566 } 1567 1568 /* 1569 * Static functions start here. 1570 */ 1571 static p_rx_msg_t 1572 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1573 { 1574 p_rx_msg_t nxge_mp = NULL; 1575 p_nxge_dma_common_t dmamsg_p; 1576 uchar_t *buffer; 1577 1578 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1579 if (nxge_mp == NULL) { 1580 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1581 "Allocation of a rx msg failed.")); 1582 goto nxge_allocb_exit; 1583 } 1584 1585 nxge_mp->use_buf_pool = B_FALSE; 1586 if (dmabuf_p) { 1587 nxge_mp->use_buf_pool = B_TRUE; 1588 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1589 *dmamsg_p = *dmabuf_p; 1590 dmamsg_p->nblocks = 1; 1591 dmamsg_p->block_size = size; 1592 dmamsg_p->alength = size; 1593 buffer = (uchar_t *)dmabuf_p->kaddrp; 1594 1595 dmabuf_p->kaddrp = (void *) 1596 ((char *)dmabuf_p->kaddrp + size); 1597 dmabuf_p->ioaddr_pp = (void *) 1598 ((char *)dmabuf_p->ioaddr_pp + size); 1599 dmabuf_p->alength -= size; 1600 dmabuf_p->offset += size; 1601 dmabuf_p->dma_cookie.dmac_laddress += size; 1602 dmabuf_p->dma_cookie.dmac_size -= size; 1603 1604 } else { 1605 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1606 if (buffer == NULL) { 1607 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1608 "Allocation of a receive page failed.")); 1609 goto nxge_allocb_fail1; 1610 } 1611 } 1612 1613 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1614 if (nxge_mp->rx_mblk_p == NULL) { 1615 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1616 goto nxge_allocb_fail2; 1617 } 1618 1619 nxge_mp->buffer = buffer; 1620 nxge_mp->block_size = size; 1621 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1622 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1623 nxge_mp->ref_cnt = 1; 1624 nxge_mp->free = B_TRUE; 1625 nxge_mp->rx_use_bcopy = B_FALSE; 1626 1627 atomic_inc_32(&nxge_mblks_pending); 1628 1629 goto nxge_allocb_exit; 1630 1631 nxge_allocb_fail2: 1632 if (!nxge_mp->use_buf_pool) { 1633 KMEM_FREE(buffer, size); 1634 } 1635 1636 nxge_allocb_fail1: 1637 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1638 nxge_mp = NULL; 1639 1640 nxge_allocb_exit: 1641 return (nxge_mp); 1642 } 1643 1644 p_mblk_t 1645 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1646 { 1647 p_mblk_t mp; 1648 1649 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1650 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1651 "offset = 0x%08X " 1652 "size = 0x%08X", 1653 nxge_mp, offset, size)); 1654 1655 mp = desballoc(&nxge_mp->buffer[offset], size, 1656 0, &nxge_mp->freeb); 1657 if (mp == NULL) { 1658 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1659 goto nxge_dupb_exit; 1660 } 1661 atomic_inc_32(&nxge_mp->ref_cnt); 1662 1663 1664 nxge_dupb_exit: 1665 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1666 nxge_mp)); 1667 return (mp); 1668 } 1669 1670 p_mblk_t 1671 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1672 { 1673 p_mblk_t mp; 1674 uchar_t *dp; 1675 1676 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1677 if (mp == NULL) { 1678 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1679 goto nxge_dupb_bcopy_exit; 1680 } 1681 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1682 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1683 mp->b_wptr = dp + size; 1684 1685 nxge_dupb_bcopy_exit: 1686 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1687 nxge_mp)); 1688 return (mp); 1689 } 1690 1691 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1692 p_rx_msg_t rx_msg_p); 1693 1694 void 1695 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1696 { 1697 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1698 1699 /* Reuse this buffer */ 1700 rx_msg_p->free = B_FALSE; 1701 rx_msg_p->cur_usage_cnt = 0; 1702 rx_msg_p->max_usage_cnt = 0; 1703 rx_msg_p->pkt_buf_size = 0; 1704 1705 if (rx_rbr_p->rbr_use_bcopy) { 1706 rx_msg_p->rx_use_bcopy = B_FALSE; 1707 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1708 } 1709 1710 /* 1711 * Get the rbr header pointer and its offset index. 1712 */ 1713 MUTEX_ENTER(&rx_rbr_p->post_lock); 1714 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1715 rx_rbr_p->rbr_wrap_mask); 1716 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1717 MUTEX_EXIT(&rx_rbr_p->post_lock); 1718 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1719 rx_rbr_p->rdc, 1); 1720 1721 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1722 "<== nxge_post_page (channel %d post_next_index %d)", 1723 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1724 1725 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1726 } 1727 1728 void 1729 nxge_freeb(p_rx_msg_t rx_msg_p) 1730 { 1731 size_t size; 1732 uchar_t *buffer = NULL; 1733 int ref_cnt; 1734 boolean_t free_state = B_FALSE; 1735 1736 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1737 1738 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1739 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1740 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1741 rx_msg_p, nxge_mblks_pending)); 1742 1743 /* 1744 * First we need to get the free state, then 1745 * atomic decrement the reference count to prevent 1746 * the race condition with the interrupt thread that 1747 * is processing a loaned up buffer block. 1748 */ 1749 free_state = rx_msg_p->free; 1750 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1751 if (!ref_cnt) { 1752 atomic_dec_32(&nxge_mblks_pending); 1753 buffer = rx_msg_p->buffer; 1754 size = rx_msg_p->block_size; 1755 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1756 "will free: rx_msg_p = $%p (block pending %d)", 1757 rx_msg_p, nxge_mblks_pending)); 1758 1759 if (!rx_msg_p->use_buf_pool) { 1760 KMEM_FREE(buffer, size); 1761 } 1762 1763 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1764 1765 if (ring) { 1766 /* 1767 * Decrement the receive buffer ring's reference 1768 * count, too. 1769 */ 1770 atomic_dec_32(&ring->rbr_ref_cnt); 1771 1772 /* 1773 * Free the receive buffer ring, if 1774 * 1. all the receive buffers have been freed 1775 * 2. and we are in the proper state (that is, 1776 * we are not UNMAPPING). 1777 */ 1778 if (ring->rbr_ref_cnt == 0 && 1779 ring->rbr_state == RBR_UNMAPPED) { 1780 /* 1781 * Free receive data buffers, 1782 * buffer index information 1783 * (rxring_info) and 1784 * the message block ring. 1785 */ 1786 NXGE_DEBUG_MSG((NULL, RX_CTL, 1787 "nxge_freeb:rx_msg_p = $%p " 1788 "(block pending %d) free buffers", 1789 rx_msg_p, nxge_mblks_pending)); 1790 nxge_rxdma_databuf_free(ring); 1791 if (ring->ring_info) { 1792 KMEM_FREE(ring->ring_info, 1793 sizeof (rxring_info_t)); 1794 } 1795 1796 if (ring->rx_msg_ring) { 1797 KMEM_FREE(ring->rx_msg_ring, 1798 ring->tnblocks * 1799 sizeof (p_rx_msg_t)); 1800 } 1801 KMEM_FREE(ring, sizeof (*ring)); 1802 } 1803 } 1804 return; 1805 } 1806 1807 /* 1808 * Repost buffer. 1809 */ 1810 if (free_state && (ref_cnt == 1) && ring) { 1811 NXGE_DEBUG_MSG((NULL, RX_CTL, 1812 "nxge_freeb: post page $%p:", rx_msg_p)); 1813 if (ring->rbr_state == RBR_POSTING) 1814 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1815 } 1816 1817 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1818 } 1819 1820 uint_t 1821 nxge_rx_intr(void *arg1, void *arg2) 1822 { 1823 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1824 p_nxge_t nxgep = (p_nxge_t)arg2; 1825 p_nxge_ldg_t ldgp; 1826 uint8_t channel; 1827 npi_handle_t handle; 1828 rx_dma_ctl_stat_t cs; 1829 p_rx_rcr_ring_t rcr_ring; 1830 mblk_t *mp = NULL; 1831 1832 if (ldvp == NULL) { 1833 NXGE_DEBUG_MSG((NULL, INT_CTL, 1834 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1835 nxgep, ldvp)); 1836 return (DDI_INTR_CLAIMED); 1837 } 1838 1839 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1840 nxgep = ldvp->nxgep; 1841 } 1842 1843 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1844 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1845 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1846 "<== nxge_rx_intr: interface not started or intialized")); 1847 return (DDI_INTR_CLAIMED); 1848 } 1849 1850 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1851 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1852 nxgep, ldvp)); 1853 1854 /* 1855 * Get the PIO handle. 1856 */ 1857 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1858 1859 /* 1860 * Get the ring to enable us to process packets. 1861 */ 1862 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1863 1864 /* 1865 * The RCR ring lock must be held when packets 1866 * are being processed and the hardware registers are 1867 * being read or written to prevent race condition 1868 * among the interrupt thread, the polling thread 1869 * (will cause fatal errors such as rcrincon bit set) 1870 * and the setting of the poll_flag. 1871 */ 1872 MUTEX_ENTER(&rcr_ring->lock); 1873 1874 /* 1875 * Get the control and status for this channel. 1876 */ 1877 channel = ldvp->channel; 1878 ldgp = ldvp->ldgp; 1879 1880 if (!isLDOMguest(nxgep) && (!nxgep->rx_channel_started[channel])) { 1881 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1882 "<== nxge_rx_intr: channel is not started")); 1883 1884 /* 1885 * We received an interrupt before the ring is started. 1886 */ 1887 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1888 &cs.value); 1889 cs.value &= RX_DMA_CTL_STAT_WR1C; 1890 cs.bits.hdw.mex = 1; 1891 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1892 cs.value); 1893 1894 /* 1895 * Rearm this logical group if this is a single device 1896 * group. 1897 */ 1898 if (ldgp->nldvs == 1) { 1899 if (isLDOMguest(nxgep)) { 1900 nxge_hio_ldgimgn(nxgep, ldgp); 1901 } else { 1902 ldgimgm_t mgm; 1903 1904 mgm.value = 0; 1905 mgm.bits.ldw.arm = 1; 1906 mgm.bits.ldw.timer = ldgp->ldg_timer; 1907 1908 NXGE_REG_WR64(handle, 1909 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1910 mgm.value); 1911 } 1912 } 1913 MUTEX_EXIT(&rcr_ring->lock); 1914 return (DDI_INTR_CLAIMED); 1915 } 1916 1917 ASSERT(rcr_ring->ldgp == ldgp); 1918 ASSERT(rcr_ring->ldvp == ldvp); 1919 1920 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1921 1922 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1923 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1924 channel, 1925 cs.value, 1926 cs.bits.hdw.rcrto, 1927 cs.bits.hdw.rcrthres)); 1928 1929 if (rcr_ring->poll_flag == 0) { 1930 mp = nxge_rx_pkts(nxgep, rcr_ring, cs, -1); 1931 } 1932 1933 /* error events. */ 1934 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1935 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1936 } 1937 1938 /* 1939 * Enable the mailbox update interrupt if we want 1940 * to use mailbox. We probably don't need to use 1941 * mailbox as it only saves us one pio read. 1942 * Also write 1 to rcrthres and rcrto to clear 1943 * these two edge triggered bits. 1944 */ 1945 cs.value &= RX_DMA_CTL_STAT_WR1C; 1946 cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 1947 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1948 cs.value); 1949 1950 /* 1951 * If the polling mode is enabled, disable the interrupt. 1952 */ 1953 if (rcr_ring->poll_flag) { 1954 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1955 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1956 "(disabling interrupts)", channel, ldgp, ldvp)); 1957 /* 1958 * Disarm this logical group if this is a single device 1959 * group. 1960 */ 1961 if (ldgp->nldvs == 1) { 1962 ldgimgm_t mgm; 1963 mgm.value = 0; 1964 mgm.bits.ldw.arm = 0; 1965 NXGE_REG_WR64(handle, 1966 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1967 } 1968 } else { 1969 /* 1970 * Rearm this logical group if this is a single device 1971 * group. 1972 */ 1973 if (ldgp->nldvs == 1) { 1974 if (isLDOMguest(nxgep)) { 1975 nxge_hio_ldgimgn(nxgep, ldgp); 1976 } else { 1977 ldgimgm_t mgm; 1978 1979 mgm.value = 0; 1980 mgm.bits.ldw.arm = 1; 1981 mgm.bits.ldw.timer = ldgp->ldg_timer; 1982 1983 NXGE_REG_WR64(handle, 1984 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1985 mgm.value); 1986 } 1987 } 1988 1989 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1990 "==> nxge_rx_intr: rdc %d ldgp $%p " 1991 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1992 } 1993 MUTEX_EXIT(&rcr_ring->lock); 1994 1995 if (mp != NULL) { 1996 if (!isLDOMguest(nxgep)) 1997 mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1998 rcr_ring->rcr_gen_num); 1999 #if defined(sun4v) 2000 else { /* isLDOMguest(nxgep) */ 2001 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 2002 nxgep->nxge_hw_p->hio; 2003 nx_vio_fp_t *vio = &nhd->hio.vio; 2004 2005 if (vio->cb.vio_net_rx_cb) { 2006 (*vio->cb.vio_net_rx_cb) 2007 (nxgep->hio_vr->vhp, mp); 2008 } 2009 } 2010 #endif 2011 } 2012 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 2013 return (DDI_INTR_CLAIMED); 2014 } 2015 2016 /* 2017 * This routine is the main packet receive processing function. 2018 * It gets the packet type, error code, and buffer related 2019 * information from the receive completion entry. 2020 * How many completion entries to process is based on the number of packets 2021 * queued by the hardware, a hardware maintained tail pointer 2022 * and a configurable receive packet count. 2023 * 2024 * A chain of message blocks will be created as result of processing 2025 * the completion entries. This chain of message blocks will be returned and 2026 * a hardware control status register will be updated with the number of 2027 * packets were removed from the hardware queue. 2028 * 2029 * The RCR ring lock is held when entering this function. 2030 */ 2031 static mblk_t * 2032 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 2033 int bytes_to_pickup) 2034 { 2035 npi_handle_t handle; 2036 uint8_t channel; 2037 uint32_t comp_rd_index; 2038 p_rcr_entry_t rcr_desc_rd_head_p; 2039 p_rcr_entry_t rcr_desc_rd_head_pp; 2040 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 2041 uint16_t qlen, nrcr_read, npkt_read; 2042 uint32_t qlen_hw; 2043 boolean_t multi; 2044 rcrcfig_b_t rcr_cfg_b; 2045 int totallen = 0; 2046 #if defined(_BIG_ENDIAN) 2047 npi_status_t rs = NPI_SUCCESS; 2048 #endif 2049 2050 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 2051 "channel %d", rcr_p->rdc)); 2052 2053 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 2054 return (NULL); 2055 } 2056 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2057 channel = rcr_p->rdc; 2058 2059 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2060 "==> nxge_rx_pkts: START: rcr channel %d " 2061 "head_p $%p head_pp $%p index %d ", 2062 channel, rcr_p->rcr_desc_rd_head_p, 2063 rcr_p->rcr_desc_rd_head_pp, 2064 rcr_p->comp_rd_index)); 2065 2066 2067 #if !defined(_BIG_ENDIAN) 2068 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2069 #else 2070 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2071 if (rs != NPI_SUCCESS) { 2072 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2073 "channel %d, get qlen failed 0x%08x", 2074 channel, rs)); 2075 return (NULL); 2076 } 2077 #endif 2078 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2079 "qlen %d", channel, qlen)); 2080 2081 2082 2083 if (!qlen) { 2084 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2085 "==> nxge_rx_pkts:rcr channel %d " 2086 "qlen %d (no pkts)", channel, qlen)); 2087 2088 return (NULL); 2089 } 2090 2091 comp_rd_index = rcr_p->comp_rd_index; 2092 2093 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2094 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2095 nrcr_read = npkt_read = 0; 2096 2097 /* 2098 * Number of packets queued 2099 * (The jumbo or multi packet will be counted as only one 2100 * packets and it may take up more than one completion entry). 2101 */ 2102 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2103 qlen : nxge_max_rx_pkts; 2104 head_mp = NULL; 2105 tail_mp = &head_mp; 2106 nmp = mp_cont = NULL; 2107 multi = B_FALSE; 2108 2109 while (qlen_hw) { 2110 2111 #ifdef NXGE_DEBUG 2112 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2113 #endif 2114 /* 2115 * Process one completion ring entry. 2116 */ 2117 nxge_receive_packet(nxgep, 2118 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2119 2120 /* 2121 * message chaining modes 2122 */ 2123 if (nmp) { 2124 nmp->b_next = NULL; 2125 if (!multi && !mp_cont) { /* frame fits a partition */ 2126 *tail_mp = nmp; 2127 tail_mp = &nmp->b_next; 2128 totallen += MBLKL(nmp); 2129 nmp = NULL; 2130 } else if (multi && !mp_cont) { /* first segment */ 2131 *tail_mp = nmp; 2132 tail_mp = &nmp->b_cont; 2133 totallen += MBLKL(nmp); 2134 } else if (multi && mp_cont) { /* mid of multi segs */ 2135 *tail_mp = mp_cont; 2136 tail_mp = &mp_cont->b_cont; 2137 totallen += MBLKL(mp_cont); 2138 } else if (!multi && mp_cont) { /* last segment */ 2139 *tail_mp = mp_cont; 2140 tail_mp = &nmp->b_next; 2141 totallen += MBLKL(mp_cont); 2142 nmp = NULL; 2143 } 2144 } 2145 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2146 "==> nxge_rx_pkts: loop: rcr channel %d " 2147 "before updating: multi %d " 2148 "nrcr_read %d " 2149 "npk read %d " 2150 "head_pp $%p index %d ", 2151 channel, 2152 multi, 2153 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2154 comp_rd_index)); 2155 2156 if (!multi) { 2157 qlen_hw--; 2158 npkt_read++; 2159 } 2160 2161 /* 2162 * Update the next read entry. 2163 */ 2164 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2165 rcr_p->comp_wrap_mask); 2166 2167 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2168 rcr_p->rcr_desc_first_p, 2169 rcr_p->rcr_desc_last_p); 2170 2171 nrcr_read++; 2172 2173 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2174 "<== nxge_rx_pkts: (SAM, process one packet) " 2175 "nrcr_read %d", 2176 nrcr_read)); 2177 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2178 "==> nxge_rx_pkts: loop: rcr channel %d " 2179 "multi %d " 2180 "nrcr_read %d " 2181 "npk read %d " 2182 "head_pp $%p index %d ", 2183 channel, 2184 multi, 2185 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2186 comp_rd_index)); 2187 2188 if ((bytes_to_pickup != -1) && 2189 (totallen >= bytes_to_pickup)) { 2190 break; 2191 } 2192 } 2193 2194 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2195 rcr_p->comp_rd_index = comp_rd_index; 2196 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2197 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2198 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2199 2200 rcr_p->intr_timeout = (nxgep->intr_timeout < 2201 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2202 nxgep->intr_timeout; 2203 2204 rcr_p->intr_threshold = (nxgep->intr_threshold < 2205 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2206 nxgep->intr_threshold; 2207 2208 rcr_cfg_b.value = 0x0ULL; 2209 rcr_cfg_b.bits.ldw.entout = 1; 2210 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2211 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2212 2213 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2214 channel, rcr_cfg_b.value); 2215 } 2216 2217 cs.bits.ldw.pktread = npkt_read; 2218 cs.bits.ldw.ptrread = nrcr_read; 2219 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2220 channel, cs.value); 2221 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2222 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2223 "head_pp $%p index %016llx ", 2224 channel, 2225 rcr_p->rcr_desc_rd_head_pp, 2226 rcr_p->comp_rd_index)); 2227 /* 2228 * Update RCR buffer pointer read and number of packets 2229 * read. 2230 */ 2231 2232 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2233 "channel %d", rcr_p->rdc)); 2234 2235 return (head_mp); 2236 } 2237 2238 void 2239 nxge_receive_packet(p_nxge_t nxgep, 2240 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2241 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2242 { 2243 p_mblk_t nmp = NULL; 2244 uint64_t multi; 2245 uint64_t dcf_err; 2246 uint8_t channel; 2247 2248 boolean_t first_entry = B_TRUE; 2249 boolean_t is_tcp_udp = B_FALSE; 2250 boolean_t buffer_free = B_FALSE; 2251 boolean_t error_send_up = B_FALSE; 2252 uint8_t error_type; 2253 uint16_t l2_len; 2254 uint16_t skip_len; 2255 uint8_t pktbufsz_type; 2256 uint64_t rcr_entry; 2257 uint64_t *pkt_buf_addr_pp; 2258 uint64_t *pkt_buf_addr_p; 2259 uint32_t buf_offset; 2260 uint32_t bsize; 2261 uint32_t error_disp_cnt; 2262 uint32_t msg_index; 2263 p_rx_rbr_ring_t rx_rbr_p; 2264 p_rx_msg_t *rx_msg_ring_p; 2265 p_rx_msg_t rx_msg_p; 2266 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2267 nxge_status_t status = NXGE_OK; 2268 boolean_t is_valid = B_FALSE; 2269 p_nxge_rx_ring_stats_t rdc_stats; 2270 uint32_t bytes_read; 2271 uint64_t pkt_type; 2272 uint64_t frag; 2273 boolean_t pkt_too_long_err = B_FALSE; 2274 #ifdef NXGE_DEBUG 2275 int dump_len; 2276 #endif 2277 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2278 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2279 2280 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2281 2282 multi = (rcr_entry & RCR_MULTI_MASK); 2283 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2284 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2285 2286 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2287 frag = (rcr_entry & RCR_FRAG_MASK); 2288 2289 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2290 2291 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2292 RCR_PKTBUFSZ_SHIFT); 2293 #if defined(__i386) 2294 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2295 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2296 #else 2297 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2298 RCR_PKT_BUF_ADDR_SHIFT); 2299 #endif 2300 2301 channel = rcr_p->rdc; 2302 2303 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2304 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2305 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2306 "error_type 0x%x pkt_type 0x%x " 2307 "pktbufsz_type %d ", 2308 rcr_desc_rd_head_p, 2309 rcr_entry, pkt_buf_addr_pp, l2_len, 2310 multi, 2311 error_type, 2312 pkt_type, 2313 pktbufsz_type)); 2314 2315 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2316 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2317 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2318 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2319 rcr_entry, pkt_buf_addr_pp, l2_len, 2320 multi, 2321 error_type, 2322 pkt_type)); 2323 2324 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2325 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2326 "full pkt_buf_addr_pp $%p l2_len %d", 2327 rcr_entry, pkt_buf_addr_pp, l2_len)); 2328 2329 /* get the stats ptr */ 2330 rdc_stats = rcr_p->rdc_stats; 2331 2332 if (!l2_len) { 2333 2334 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2335 "<== nxge_receive_packet: failed: l2 length is 0.")); 2336 return; 2337 } 2338 2339 /* 2340 * Software workaround for BMAC hardware limitation that allows 2341 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2342 * instead of 0x2400 for jumbo. 2343 */ 2344 if (l2_len > nxgep->mac.maxframesize) { 2345 pkt_too_long_err = B_TRUE; 2346 } 2347 2348 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2349 l2_len -= ETHERFCSL; 2350 2351 /* shift 6 bits to get the full io address */ 2352 #if defined(__i386) 2353 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2354 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2355 #else 2356 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2357 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2358 #endif 2359 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2360 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2361 "full pkt_buf_addr_pp $%p l2_len %d", 2362 rcr_entry, pkt_buf_addr_pp, l2_len)); 2363 2364 rx_rbr_p = rcr_p->rx_rbr_p; 2365 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2366 2367 if (first_entry) { 2368 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2369 RXDMA_HDR_SIZE_DEFAULT); 2370 2371 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2372 "==> nxge_receive_packet: first entry 0x%016llx " 2373 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2374 rcr_entry, pkt_buf_addr_pp, l2_len, 2375 hdr_size)); 2376 } 2377 2378 MUTEX_ENTER(&rx_rbr_p->lock); 2379 2380 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2381 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2382 "full pkt_buf_addr_pp $%p l2_len %d", 2383 rcr_entry, pkt_buf_addr_pp, l2_len)); 2384 2385 /* 2386 * Packet buffer address in the completion entry points 2387 * to the starting buffer address (offset 0). 2388 * Use the starting buffer address to locate the corresponding 2389 * kernel address. 2390 */ 2391 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2392 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2393 &buf_offset, 2394 &msg_index); 2395 2396 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2397 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2398 "full pkt_buf_addr_pp $%p l2_len %d", 2399 rcr_entry, pkt_buf_addr_pp, l2_len)); 2400 2401 if (status != NXGE_OK) { 2402 MUTEX_EXIT(&rx_rbr_p->lock); 2403 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2404 "<== nxge_receive_packet: found vaddr failed %d", 2405 status)); 2406 return; 2407 } 2408 2409 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2410 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2411 "full pkt_buf_addr_pp $%p l2_len %d", 2412 rcr_entry, pkt_buf_addr_pp, l2_len)); 2413 2414 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2415 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2416 "full pkt_buf_addr_pp $%p l2_len %d", 2417 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2418 2419 rx_msg_p = rx_msg_ring_p[msg_index]; 2420 2421 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2422 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2423 "full pkt_buf_addr_pp $%p l2_len %d", 2424 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2425 2426 switch (pktbufsz_type) { 2427 case RCR_PKTBUFSZ_0: 2428 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2429 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2430 "==> nxge_receive_packet: 0 buf %d", bsize)); 2431 break; 2432 case RCR_PKTBUFSZ_1: 2433 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2434 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2435 "==> nxge_receive_packet: 1 buf %d", bsize)); 2436 break; 2437 case RCR_PKTBUFSZ_2: 2438 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2439 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2440 "==> nxge_receive_packet: 2 buf %d", bsize)); 2441 break; 2442 case RCR_SINGLE_BLOCK: 2443 bsize = rx_msg_p->block_size; 2444 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2445 "==> nxge_receive_packet: single %d", bsize)); 2446 2447 break; 2448 default: 2449 MUTEX_EXIT(&rx_rbr_p->lock); 2450 return; 2451 } 2452 2453 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2454 (buf_offset + sw_offset_bytes), 2455 (hdr_size + l2_len), 2456 DDI_DMA_SYNC_FORCPU); 2457 2458 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2459 "==> nxge_receive_packet: after first dump:usage count")); 2460 2461 if (rx_msg_p->cur_usage_cnt == 0) { 2462 if (rx_rbr_p->rbr_use_bcopy) { 2463 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2464 if (rx_rbr_p->rbr_consumed < 2465 rx_rbr_p->rbr_threshold_hi) { 2466 if (rx_rbr_p->rbr_threshold_lo == 0 || 2467 ((rx_rbr_p->rbr_consumed >= 2468 rx_rbr_p->rbr_threshold_lo) && 2469 (rx_rbr_p->rbr_bufsize_type >= 2470 pktbufsz_type))) { 2471 rx_msg_p->rx_use_bcopy = B_TRUE; 2472 } 2473 } else { 2474 rx_msg_p->rx_use_bcopy = B_TRUE; 2475 } 2476 } 2477 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2478 "==> nxge_receive_packet: buf %d (new block) ", 2479 bsize)); 2480 2481 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2482 rx_msg_p->pkt_buf_size = bsize; 2483 rx_msg_p->cur_usage_cnt = 1; 2484 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2485 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2486 "==> nxge_receive_packet: buf %d " 2487 "(single block) ", 2488 bsize)); 2489 /* 2490 * Buffer can be reused once the free function 2491 * is called. 2492 */ 2493 rx_msg_p->max_usage_cnt = 1; 2494 buffer_free = B_TRUE; 2495 } else { 2496 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2497 if (rx_msg_p->max_usage_cnt == 1) { 2498 buffer_free = B_TRUE; 2499 } 2500 } 2501 } else { 2502 rx_msg_p->cur_usage_cnt++; 2503 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2504 buffer_free = B_TRUE; 2505 } 2506 } 2507 2508 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2509 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2510 msg_index, l2_len, 2511 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2512 2513 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2514 rdc_stats->ierrors++; 2515 if (dcf_err) { 2516 rdc_stats->dcf_err++; 2517 #ifdef NXGE_DEBUG 2518 if (!rdc_stats->dcf_err) { 2519 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2520 "nxge_receive_packet: channel %d dcf_err rcr" 2521 " 0x%llx", channel, rcr_entry)); 2522 } 2523 #endif 2524 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2525 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2526 } else if (pkt_too_long_err) { 2527 rdc_stats->pkt_too_long_err++; 2528 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2529 " channel %d packet length [%d] > " 2530 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2531 nxgep->mac.maxframesize)); 2532 } else { 2533 /* Update error stats */ 2534 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2535 rdc_stats->errlog.compl_err_type = error_type; 2536 2537 switch (error_type) { 2538 /* 2539 * Do not send FMA ereport for RCR_L2_ERROR and 2540 * RCR_L4_CSUM_ERROR because most likely they indicate 2541 * back pressure rather than HW failures. 2542 */ 2543 case RCR_L2_ERROR: 2544 rdc_stats->l2_err++; 2545 if (rdc_stats->l2_err < 2546 error_disp_cnt) { 2547 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2548 " nxge_receive_packet:" 2549 " channel %d RCR L2_ERROR", 2550 channel)); 2551 } 2552 break; 2553 case RCR_L4_CSUM_ERROR: 2554 error_send_up = B_TRUE; 2555 rdc_stats->l4_cksum_err++; 2556 if (rdc_stats->l4_cksum_err < 2557 error_disp_cnt) { 2558 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2559 " nxge_receive_packet:" 2560 " channel %d" 2561 " RCR L4_CSUM_ERROR", channel)); 2562 } 2563 break; 2564 /* 2565 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2566 * RCR_ZCP_SOFT_ERROR because they reflect the same 2567 * FFLP and ZCP errors that have been reported by 2568 * nxge_fflp.c and nxge_zcp.c. 2569 */ 2570 case RCR_FFLP_SOFT_ERROR: 2571 error_send_up = B_TRUE; 2572 rdc_stats->fflp_soft_err++; 2573 if (rdc_stats->fflp_soft_err < 2574 error_disp_cnt) { 2575 NXGE_ERROR_MSG((nxgep, 2576 NXGE_ERR_CTL, 2577 " nxge_receive_packet:" 2578 " channel %d" 2579 " RCR FFLP_SOFT_ERROR", channel)); 2580 } 2581 break; 2582 case RCR_ZCP_SOFT_ERROR: 2583 error_send_up = B_TRUE; 2584 rdc_stats->fflp_soft_err++; 2585 if (rdc_stats->zcp_soft_err < 2586 error_disp_cnt) 2587 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2588 " nxge_receive_packet: Channel %d" 2589 " RCR ZCP_SOFT_ERROR", channel)); 2590 break; 2591 default: 2592 rdc_stats->rcr_unknown_err++; 2593 if (rdc_stats->rcr_unknown_err 2594 < error_disp_cnt) { 2595 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2596 " nxge_receive_packet: Channel %d" 2597 " RCR entry 0x%llx error 0x%x", 2598 rcr_entry, channel, error_type)); 2599 } 2600 break; 2601 } 2602 } 2603 2604 /* 2605 * Update and repost buffer block if max usage 2606 * count is reached. 2607 */ 2608 if (error_send_up == B_FALSE) { 2609 atomic_inc_32(&rx_msg_p->ref_cnt); 2610 if (buffer_free == B_TRUE) { 2611 rx_msg_p->free = B_TRUE; 2612 } 2613 2614 MUTEX_EXIT(&rx_rbr_p->lock); 2615 nxge_freeb(rx_msg_p); 2616 return; 2617 } 2618 } 2619 2620 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2621 "==> nxge_receive_packet: DMA sync second ")); 2622 2623 bytes_read = rcr_p->rcvd_pkt_bytes; 2624 skip_len = sw_offset_bytes + hdr_size; 2625 if (!rx_msg_p->rx_use_bcopy) { 2626 /* 2627 * For loaned up buffers, the driver reference count 2628 * will be incremented first and then the free state. 2629 */ 2630 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2631 if (first_entry) { 2632 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2633 if (l2_len < bsize - skip_len) { 2634 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2635 } else { 2636 nmp->b_wptr = &nmp->b_rptr[bsize 2637 - skip_len]; 2638 } 2639 } else { 2640 if (l2_len - bytes_read < bsize) { 2641 nmp->b_wptr = 2642 &nmp->b_rptr[l2_len - bytes_read]; 2643 } else { 2644 nmp->b_wptr = &nmp->b_rptr[bsize]; 2645 } 2646 } 2647 } 2648 } else { 2649 if (first_entry) { 2650 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2651 l2_len < bsize - skip_len ? 2652 l2_len : bsize - skip_len); 2653 } else { 2654 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2655 l2_len - bytes_read < bsize ? 2656 l2_len - bytes_read : bsize); 2657 } 2658 } 2659 if (nmp != NULL) { 2660 if (first_entry) { 2661 /* 2662 * Jumbo packets may be received with more than one 2663 * buffer, increment ipackets for the first entry only. 2664 */ 2665 rdc_stats->ipackets++; 2666 2667 /* Update ibytes for kstat. */ 2668 rdc_stats->ibytes += skip_len 2669 + l2_len < bsize ? l2_len : bsize; 2670 /* 2671 * Update the number of bytes read so far for the 2672 * current frame. 2673 */ 2674 bytes_read = nmp->b_wptr - nmp->b_rptr; 2675 } else { 2676 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2677 l2_len - bytes_read : bsize; 2678 bytes_read += nmp->b_wptr - nmp->b_rptr; 2679 } 2680 2681 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2682 "==> nxge_receive_packet after dupb: " 2683 "rbr consumed %d " 2684 "pktbufsz_type %d " 2685 "nmp $%p rptr $%p wptr $%p " 2686 "buf_offset %d bzise %d l2_len %d skip_len %d", 2687 rx_rbr_p->rbr_consumed, 2688 pktbufsz_type, 2689 nmp, nmp->b_rptr, nmp->b_wptr, 2690 buf_offset, bsize, l2_len, skip_len)); 2691 } else { 2692 cmn_err(CE_WARN, "!nxge_receive_packet: " 2693 "update stats (error)"); 2694 atomic_inc_32(&rx_msg_p->ref_cnt); 2695 if (buffer_free == B_TRUE) { 2696 rx_msg_p->free = B_TRUE; 2697 } 2698 MUTEX_EXIT(&rx_rbr_p->lock); 2699 nxge_freeb(rx_msg_p); 2700 return; 2701 } 2702 2703 if (buffer_free == B_TRUE) { 2704 rx_msg_p->free = B_TRUE; 2705 } 2706 2707 is_valid = (nmp != NULL); 2708 2709 rcr_p->rcvd_pkt_bytes = bytes_read; 2710 2711 MUTEX_EXIT(&rx_rbr_p->lock); 2712 2713 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2714 atomic_inc_32(&rx_msg_p->ref_cnt); 2715 nxge_freeb(rx_msg_p); 2716 } 2717 2718 if (is_valid) { 2719 nmp->b_cont = NULL; 2720 if (first_entry) { 2721 *mp = nmp; 2722 *mp_cont = NULL; 2723 } else { 2724 *mp_cont = nmp; 2725 } 2726 } 2727 2728 /* 2729 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2730 * If a packet is not fragmented and no error bit is set, then 2731 * L4 checksum is OK. 2732 */ 2733 2734 if (is_valid && !multi) { 2735 /* 2736 * If the checksum flag nxge_chksum_offload 2737 * is 1, TCP and UDP packets can be sent 2738 * up with good checksum. If the checksum flag 2739 * is set to 0, checksum reporting will apply to 2740 * TCP packets only (workaround for a hardware bug). 2741 * If the checksum flag nxge_cksum_offload is 2742 * greater than 1, both TCP and UDP packets 2743 * will not be reported its hardware checksum results. 2744 */ 2745 if (nxge_cksum_offload == 1) { 2746 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2747 pkt_type == RCR_PKT_IS_UDP) ? 2748 B_TRUE: B_FALSE); 2749 } else if (!nxge_cksum_offload) { 2750 /* TCP checksum only. */ 2751 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2752 B_TRUE: B_FALSE); 2753 } 2754 2755 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2756 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2757 is_valid, multi, is_tcp_udp, frag, error_type)); 2758 2759 if (is_tcp_udp && !frag && !error_type) { 2760 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2761 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2762 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2763 "==> nxge_receive_packet: Full tcp/udp cksum " 2764 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2765 "error %d", 2766 is_valid, multi, is_tcp_udp, frag, error_type)); 2767 } 2768 } 2769 2770 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2771 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2772 2773 *multi_p = (multi == RCR_MULTI_MASK); 2774 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2775 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2776 *multi_p, nmp, *mp, *mp_cont)); 2777 } 2778 2779 /* 2780 * Enable polling for a ring. Interrupt for the ring is disabled when 2781 * the nxge interrupt comes (see nxge_rx_intr). 2782 */ 2783 int 2784 nxge_enable_poll(void *arg) 2785 { 2786 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2787 p_rx_rcr_ring_t ringp; 2788 p_nxge_t nxgep; 2789 p_nxge_ldg_t ldgp; 2790 uint32_t channel; 2791 2792 if (ring_handle == NULL) { 2793 return (0); 2794 } 2795 2796 nxgep = ring_handle->nxgep; 2797 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2798 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2799 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2800 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2801 ldgp = ringp->ldgp; 2802 if (ldgp == NULL) { 2803 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2804 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2805 ringp->rdc)); 2806 return (0); 2807 } 2808 2809 MUTEX_ENTER(&ringp->lock); 2810 /* enable polling */ 2811 if (ringp->poll_flag == 0) { 2812 ringp->poll_flag = 1; 2813 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2814 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2815 ringp->rdc)); 2816 } 2817 2818 MUTEX_EXIT(&ringp->lock); 2819 return (0); 2820 } 2821 /* 2822 * Disable polling for a ring and enable its interrupt. 2823 */ 2824 int 2825 nxge_disable_poll(void *arg) 2826 { 2827 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2828 p_rx_rcr_ring_t ringp; 2829 p_nxge_t nxgep; 2830 uint32_t channel; 2831 2832 if (ring_handle == NULL) { 2833 return (0); 2834 } 2835 2836 nxgep = ring_handle->nxgep; 2837 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2838 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2839 2840 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2841 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2842 2843 MUTEX_ENTER(&ringp->lock); 2844 2845 /* disable polling: enable interrupt */ 2846 if (ringp->poll_flag) { 2847 npi_handle_t handle; 2848 rx_dma_ctl_stat_t cs; 2849 uint8_t channel; 2850 p_nxge_ldg_t ldgp; 2851 2852 /* 2853 * Get the control and status for this channel. 2854 */ 2855 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2856 channel = ringp->rdc; 2857 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2858 channel, &cs.value); 2859 2860 /* 2861 * Enable mailbox update 2862 * Since packets were not read and the hardware uses 2863 * bits pktread and ptrread to update the queue 2864 * length, we need to set both bits to 0. 2865 */ 2866 cs.bits.ldw.pktread = 0; 2867 cs.bits.ldw.ptrread = 0; 2868 cs.bits.hdw.mex = 1; 2869 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2870 cs.value); 2871 2872 /* 2873 * Rearm this logical group if this is a single device 2874 * group. 2875 */ 2876 ldgp = ringp->ldgp; 2877 if (ldgp == NULL) { 2878 ringp->poll_flag = 0; 2879 MUTEX_EXIT(&ringp->lock); 2880 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2881 "==> nxge_disable_poll: no ldgp rdc %d " 2882 "(still set poll to 0", ringp->rdc)); 2883 return (0); 2884 } 2885 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2886 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2887 ringp->rdc, ldgp)); 2888 if (ldgp->nldvs == 1) { 2889 ldgimgm_t mgm; 2890 mgm.value = 0; 2891 mgm.bits.ldw.arm = 1; 2892 mgm.bits.ldw.timer = ldgp->ldg_timer; 2893 NXGE_REG_WR64(handle, 2894 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2895 } 2896 ringp->poll_flag = 0; 2897 } 2898 2899 MUTEX_EXIT(&ringp->lock); 2900 return (0); 2901 } 2902 2903 /* 2904 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2905 */ 2906 mblk_t * 2907 nxge_rx_poll(void *arg, int bytes_to_pickup) 2908 { 2909 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2910 p_rx_rcr_ring_t rcr_p; 2911 p_nxge_t nxgep; 2912 npi_handle_t handle; 2913 rx_dma_ctl_stat_t cs; 2914 mblk_t *mblk; 2915 p_nxge_ldv_t ldvp; 2916 uint32_t channel; 2917 2918 nxgep = ring_handle->nxgep; 2919 2920 /* 2921 * Get the control and status for this channel. 2922 */ 2923 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2924 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2925 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2926 MUTEX_ENTER(&rcr_p->lock); 2927 ASSERT(rcr_p->poll_flag == 1); 2928 2929 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2930 2931 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2932 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2933 rcr_p->rdc, rcr_p->poll_flag)); 2934 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2935 2936 ldvp = rcr_p->ldvp; 2937 /* error events. */ 2938 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2939 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2940 } 2941 2942 MUTEX_EXIT(&rcr_p->lock); 2943 2944 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2945 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 2946 return (mblk); 2947 } 2948 2949 2950 /*ARGSUSED*/ 2951 static nxge_status_t 2952 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2953 { 2954 p_nxge_rx_ring_stats_t rdc_stats; 2955 npi_handle_t handle; 2956 npi_status_t rs; 2957 boolean_t rxchan_fatal = B_FALSE; 2958 boolean_t rxport_fatal = B_FALSE; 2959 uint8_t portn; 2960 nxge_status_t status = NXGE_OK; 2961 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2962 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2963 2964 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2965 portn = nxgep->mac.portnum; 2966 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2967 2968 if (cs.bits.hdw.rbr_tmout) { 2969 rdc_stats->rx_rbr_tmout++; 2970 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2971 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2972 rxchan_fatal = B_TRUE; 2973 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2974 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2975 } 2976 if (cs.bits.hdw.rsp_cnt_err) { 2977 rdc_stats->rsp_cnt_err++; 2978 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2979 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2980 rxchan_fatal = B_TRUE; 2981 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2982 "==> nxge_rx_err_evnts(channel %d): " 2983 "rsp_cnt_err", channel)); 2984 } 2985 if (cs.bits.hdw.byte_en_bus) { 2986 rdc_stats->byte_en_bus++; 2987 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2988 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2989 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2990 "==> nxge_rx_err_evnts(channel %d): " 2991 "fatal error: byte_en_bus", channel)); 2992 rxchan_fatal = B_TRUE; 2993 } 2994 if (cs.bits.hdw.rsp_dat_err) { 2995 rdc_stats->rsp_dat_err++; 2996 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2997 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2998 rxchan_fatal = B_TRUE; 2999 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3000 "==> nxge_rx_err_evnts(channel %d): " 3001 "fatal error: rsp_dat_err", channel)); 3002 } 3003 if (cs.bits.hdw.rcr_ack_err) { 3004 rdc_stats->rcr_ack_err++; 3005 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3006 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 3007 rxchan_fatal = B_TRUE; 3008 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3009 "==> nxge_rx_err_evnts(channel %d): " 3010 "fatal error: rcr_ack_err", channel)); 3011 } 3012 if (cs.bits.hdw.dc_fifo_err) { 3013 rdc_stats->dc_fifo_err++; 3014 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3015 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 3016 /* This is not a fatal error! */ 3017 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3018 "==> nxge_rx_err_evnts(channel %d): " 3019 "dc_fifo_err", channel)); 3020 rxport_fatal = B_TRUE; 3021 } 3022 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 3023 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 3024 &rdc_stats->errlog.pre_par, 3025 &rdc_stats->errlog.sha_par)) 3026 != NPI_SUCCESS) { 3027 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3028 "==> nxge_rx_err_evnts(channel %d): " 3029 "rcr_sha_par: get perr", channel)); 3030 return (NXGE_ERROR | rs); 3031 } 3032 if (cs.bits.hdw.rcr_sha_par) { 3033 rdc_stats->rcr_sha_par++; 3034 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3035 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 3036 rxchan_fatal = B_TRUE; 3037 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3038 "==> nxge_rx_err_evnts(channel %d): " 3039 "fatal error: rcr_sha_par", channel)); 3040 } 3041 if (cs.bits.hdw.rbr_pre_par) { 3042 rdc_stats->rbr_pre_par++; 3043 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3044 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 3045 rxchan_fatal = B_TRUE; 3046 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3047 "==> nxge_rx_err_evnts(channel %d): " 3048 "fatal error: rbr_pre_par", channel)); 3049 } 3050 } 3051 /* 3052 * The Following 4 status bits are for information, the system 3053 * is running fine. There is no need to send FMA ereports or 3054 * log messages. 3055 */ 3056 if (cs.bits.hdw.port_drop_pkt) { 3057 rdc_stats->port_drop_pkt++; 3058 } 3059 if (cs.bits.hdw.wred_drop) { 3060 rdc_stats->wred_drop++; 3061 } 3062 if (cs.bits.hdw.rbr_pre_empty) { 3063 rdc_stats->rbr_pre_empty++; 3064 } 3065 if (cs.bits.hdw.rcr_shadow_full) { 3066 rdc_stats->rcr_shadow_full++; 3067 } 3068 if (cs.bits.hdw.config_err) { 3069 rdc_stats->config_err++; 3070 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3071 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3072 rxchan_fatal = B_TRUE; 3073 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3074 "==> nxge_rx_err_evnts(channel %d): " 3075 "config error", channel)); 3076 } 3077 if (cs.bits.hdw.rcrincon) { 3078 rdc_stats->rcrincon++; 3079 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3080 NXGE_FM_EREPORT_RDMC_RCRINCON); 3081 rxchan_fatal = B_TRUE; 3082 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3083 "==> nxge_rx_err_evnts(channel %d): " 3084 "fatal error: rcrincon error", channel)); 3085 } 3086 if (cs.bits.hdw.rcrfull) { 3087 rdc_stats->rcrfull++; 3088 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3089 NXGE_FM_EREPORT_RDMC_RCRFULL); 3090 rxchan_fatal = B_TRUE; 3091 if (rdc_stats->rcrfull < error_disp_cnt) 3092 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3093 "==> nxge_rx_err_evnts(channel %d): " 3094 "fatal error: rcrfull error", channel)); 3095 } 3096 if (cs.bits.hdw.rbr_empty) { 3097 /* 3098 * This bit is for information, there is no need 3099 * send FMA ereport or log a message. 3100 */ 3101 rdc_stats->rbr_empty++; 3102 } 3103 if (cs.bits.hdw.rbrfull) { 3104 rdc_stats->rbrfull++; 3105 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3106 NXGE_FM_EREPORT_RDMC_RBRFULL); 3107 rxchan_fatal = B_TRUE; 3108 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3109 "==> nxge_rx_err_evnts(channel %d): " 3110 "fatal error: rbr_full error", channel)); 3111 } 3112 if (cs.bits.hdw.rbrlogpage) { 3113 rdc_stats->rbrlogpage++; 3114 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3115 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3116 rxchan_fatal = B_TRUE; 3117 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3118 "==> nxge_rx_err_evnts(channel %d): " 3119 "fatal error: rbr logical page error", channel)); 3120 } 3121 if (cs.bits.hdw.cfiglogpage) { 3122 rdc_stats->cfiglogpage++; 3123 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3124 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3125 rxchan_fatal = B_TRUE; 3126 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3127 "==> nxge_rx_err_evnts(channel %d): " 3128 "fatal error: cfig logical page error", channel)); 3129 } 3130 3131 if (rxport_fatal) { 3132 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3133 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3134 portn)); 3135 if (isLDOMguest(nxgep)) { 3136 status = NXGE_ERROR; 3137 } else { 3138 status = nxge_ipp_fatal_err_recover(nxgep); 3139 if (status == NXGE_OK) { 3140 FM_SERVICE_RESTORED(nxgep); 3141 } 3142 } 3143 } 3144 3145 if (rxchan_fatal) { 3146 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3147 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3148 channel)); 3149 if (isLDOMguest(nxgep)) { 3150 status = NXGE_ERROR; 3151 } else { 3152 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3153 if (status == NXGE_OK) { 3154 FM_SERVICE_RESTORED(nxgep); 3155 } 3156 } 3157 } 3158 3159 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3160 3161 return (status); 3162 } 3163 3164 /* 3165 * nxge_rdc_hvio_setup 3166 * 3167 * This code appears to setup some Hypervisor variables. 3168 * 3169 * Arguments: 3170 * nxgep 3171 * channel 3172 * 3173 * Notes: 3174 * What does NIU_LP_WORKAROUND mean? 3175 * 3176 * NPI/NXGE function calls: 3177 * na 3178 * 3179 * Context: 3180 * Any domain 3181 */ 3182 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3183 static void 3184 nxge_rdc_hvio_setup( 3185 nxge_t *nxgep, int channel) 3186 { 3187 nxge_dma_common_t *dma_common; 3188 nxge_dma_common_t *dma_control; 3189 rx_rbr_ring_t *ring; 3190 3191 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3192 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3193 3194 ring->hv_set = B_FALSE; 3195 3196 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3197 dma_common->orig_ioaddr_pp; 3198 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3199 dma_common->orig_alength; 3200 3201 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3202 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3203 channel, ring->hv_rx_buf_base_ioaddr_pp, 3204 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3205 dma_common->orig_alength, dma_common->orig_alength)); 3206 3207 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3208 3209 ring->hv_rx_cntl_base_ioaddr_pp = 3210 (uint64_t)dma_control->orig_ioaddr_pp; 3211 ring->hv_rx_cntl_ioaddr_size = 3212 (uint64_t)dma_control->orig_alength; 3213 3214 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3215 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3216 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3217 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3218 dma_control->orig_alength, dma_control->orig_alength)); 3219 } 3220 #endif 3221 3222 /* 3223 * nxge_map_rxdma 3224 * 3225 * Map an RDC into our kernel space. 3226 * 3227 * Arguments: 3228 * nxgep 3229 * channel The channel to map. 3230 * 3231 * Notes: 3232 * 1. Allocate & initialise a memory pool, if necessary. 3233 * 2. Allocate however many receive buffers are required. 3234 * 3. Setup buffers, descriptors, and mailbox. 3235 * 3236 * NPI/NXGE function calls: 3237 * nxge_alloc_rx_mem_pool() 3238 * nxge_alloc_rbb() 3239 * nxge_map_rxdma_channel() 3240 * 3241 * Registers accessed: 3242 * 3243 * Context: 3244 * Any domain 3245 */ 3246 static nxge_status_t 3247 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3248 { 3249 nxge_dma_common_t **data; 3250 nxge_dma_common_t **control; 3251 rx_rbr_ring_t **rbr_ring; 3252 rx_rcr_ring_t **rcr_ring; 3253 rx_mbox_t **mailbox; 3254 uint32_t chunks; 3255 3256 nxge_status_t status; 3257 3258 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3259 3260 if (!nxgep->rx_buf_pool_p) { 3261 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3262 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3263 "<== nxge_map_rxdma: buf not allocated")); 3264 return (NXGE_ERROR); 3265 } 3266 } 3267 3268 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3269 return (NXGE_ERROR); 3270 3271 /* 3272 * Map descriptors from the buffer polls for each dma channel. 3273 */ 3274 3275 /* 3276 * Set up and prepare buffer blocks, descriptors 3277 * and mailbox. 3278 */ 3279 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3280 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3281 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3282 3283 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3284 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3285 3286 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3287 3288 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3289 chunks, control, rcr_ring, mailbox); 3290 if (status != NXGE_OK) { 3291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3292 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3293 "returned 0x%x", 3294 channel, status)); 3295 return (status); 3296 } 3297 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3298 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3299 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3300 &nxgep->statsp->rdc_stats[channel]; 3301 3302 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3303 if (!isLDOMguest(nxgep)) 3304 nxge_rdc_hvio_setup(nxgep, channel); 3305 #endif 3306 3307 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3308 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3309 3310 return (status); 3311 } 3312 3313 static void 3314 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3315 { 3316 rx_rbr_ring_t *rbr_ring; 3317 rx_rcr_ring_t *rcr_ring; 3318 rx_mbox_t *mailbox; 3319 3320 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3321 3322 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3323 !nxgep->rx_mbox_areas_p) 3324 return; 3325 3326 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3327 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3328 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3329 3330 if (!rbr_ring || !rcr_ring || !mailbox) 3331 return; 3332 3333 (void) nxge_unmap_rxdma_channel( 3334 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3335 3336 nxge_free_rxb(nxgep, channel); 3337 3338 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3339 } 3340 3341 nxge_status_t 3342 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3343 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3344 uint32_t num_chunks, 3345 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3346 p_rx_mbox_t *rx_mbox_p) 3347 { 3348 int status = NXGE_OK; 3349 3350 /* 3351 * Set up and prepare buffer blocks, descriptors 3352 * and mailbox. 3353 */ 3354 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3355 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3356 /* 3357 * Receive buffer blocks 3358 */ 3359 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3360 dma_buf_p, rbr_p, num_chunks); 3361 if (status != NXGE_OK) { 3362 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3363 "==> nxge_map_rxdma_channel (channel %d): " 3364 "map buffer failed 0x%x", channel, status)); 3365 goto nxge_map_rxdma_channel_exit; 3366 } 3367 3368 /* 3369 * Receive block ring, completion ring and mailbox. 3370 */ 3371 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3372 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3373 if (status != NXGE_OK) { 3374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3375 "==> nxge_map_rxdma_channel (channel %d): " 3376 "map config failed 0x%x", channel, status)); 3377 goto nxge_map_rxdma_channel_fail2; 3378 } 3379 3380 goto nxge_map_rxdma_channel_exit; 3381 3382 nxge_map_rxdma_channel_fail3: 3383 /* Free rbr, rcr */ 3384 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3385 "==> nxge_map_rxdma_channel: free rbr/rcr " 3386 "(status 0x%x channel %d)", 3387 status, channel)); 3388 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3389 *rcr_p, *rx_mbox_p); 3390 3391 nxge_map_rxdma_channel_fail2: 3392 /* Free buffer blocks */ 3393 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3394 "==> nxge_map_rxdma_channel: free rx buffers" 3395 "(nxgep 0x%x status 0x%x channel %d)", 3396 nxgep, status, channel)); 3397 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3398 3399 status = NXGE_ERROR; 3400 3401 nxge_map_rxdma_channel_exit: 3402 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3403 "<== nxge_map_rxdma_channel: " 3404 "(nxgep 0x%x status 0x%x channel %d)", 3405 nxgep, status, channel)); 3406 3407 return (status); 3408 } 3409 3410 /*ARGSUSED*/ 3411 static void 3412 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3413 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3414 { 3415 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3416 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3417 3418 /* 3419 * unmap receive block ring, completion ring and mailbox. 3420 */ 3421 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3422 rcr_p, rx_mbox_p); 3423 3424 /* unmap buffer blocks */ 3425 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3426 3427 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3428 } 3429 3430 /*ARGSUSED*/ 3431 static nxge_status_t 3432 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3433 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3434 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3435 { 3436 p_rx_rbr_ring_t rbrp; 3437 p_rx_rcr_ring_t rcrp; 3438 p_rx_mbox_t mboxp; 3439 p_nxge_dma_common_t cntl_dmap; 3440 p_nxge_dma_common_t dmap; 3441 p_rx_msg_t *rx_msg_ring; 3442 p_rx_msg_t rx_msg_p; 3443 p_rbr_cfig_a_t rcfga_p; 3444 p_rbr_cfig_b_t rcfgb_p; 3445 p_rcrcfig_a_t cfga_p; 3446 p_rcrcfig_b_t cfgb_p; 3447 p_rxdma_cfig1_t cfig1_p; 3448 p_rxdma_cfig2_t cfig2_p; 3449 p_rbr_kick_t kick_p; 3450 uint32_t dmaaddrp; 3451 uint32_t *rbr_vaddrp; 3452 uint32_t bkaddr; 3453 nxge_status_t status = NXGE_OK; 3454 int i; 3455 uint32_t nxge_port_rcr_size; 3456 3457 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3458 "==> nxge_map_rxdma_channel_cfg_ring")); 3459 3460 cntl_dmap = *dma_cntl_p; 3461 3462 /* Map in the receive block ring */ 3463 rbrp = *rbr_p; 3464 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3465 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3466 /* 3467 * Zero out buffer block ring descriptors. 3468 */ 3469 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3470 3471 rcfga_p = &(rbrp->rbr_cfga); 3472 rcfgb_p = &(rbrp->rbr_cfgb); 3473 kick_p = &(rbrp->rbr_kick); 3474 rcfga_p->value = 0; 3475 rcfgb_p->value = 0; 3476 kick_p->value = 0; 3477 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3478 rcfga_p->value = (rbrp->rbr_addr & 3479 (RBR_CFIG_A_STDADDR_MASK | 3480 RBR_CFIG_A_STDADDR_BASE_MASK)); 3481 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3482 3483 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3484 rcfgb_p->bits.ldw.vld0 = 1; 3485 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3486 rcfgb_p->bits.ldw.vld1 = 1; 3487 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3488 rcfgb_p->bits.ldw.vld2 = 1; 3489 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3490 3491 /* 3492 * For each buffer block, enter receive block address to the ring. 3493 */ 3494 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3495 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3496 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3497 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3498 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3499 3500 rx_msg_ring = rbrp->rx_msg_ring; 3501 for (i = 0; i < rbrp->tnblocks; i++) { 3502 rx_msg_p = rx_msg_ring[i]; 3503 rx_msg_p->nxgep = nxgep; 3504 rx_msg_p->rx_rbr_p = rbrp; 3505 bkaddr = (uint32_t) 3506 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3507 >> RBR_BKADDR_SHIFT)); 3508 rx_msg_p->free = B_FALSE; 3509 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3510 3511 *rbr_vaddrp++ = bkaddr; 3512 } 3513 3514 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3515 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3516 3517 rbrp->rbr_rd_index = 0; 3518 3519 rbrp->rbr_consumed = 0; 3520 rbrp->rbr_use_bcopy = B_TRUE; 3521 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3522 /* 3523 * Do bcopy on packets greater than bcopy size once 3524 * the lo threshold is reached. 3525 * This lo threshold should be less than the hi threshold. 3526 * 3527 * Do bcopy on every packet once the hi threshold is reached. 3528 */ 3529 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3530 /* default it to use hi */ 3531 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3532 } 3533 3534 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3535 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3536 } 3537 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3538 3539 switch (nxge_rx_threshold_hi) { 3540 default: 3541 case NXGE_RX_COPY_NONE: 3542 /* Do not do bcopy at all */ 3543 rbrp->rbr_use_bcopy = B_FALSE; 3544 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3545 break; 3546 3547 case NXGE_RX_COPY_1: 3548 case NXGE_RX_COPY_2: 3549 case NXGE_RX_COPY_3: 3550 case NXGE_RX_COPY_4: 3551 case NXGE_RX_COPY_5: 3552 case NXGE_RX_COPY_6: 3553 case NXGE_RX_COPY_7: 3554 rbrp->rbr_threshold_hi = 3555 rbrp->rbb_max * 3556 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3557 break; 3558 3559 case NXGE_RX_COPY_ALL: 3560 rbrp->rbr_threshold_hi = 0; 3561 break; 3562 } 3563 3564 switch (nxge_rx_threshold_lo) { 3565 default: 3566 case NXGE_RX_COPY_NONE: 3567 /* Do not do bcopy at all */ 3568 if (rbrp->rbr_use_bcopy) { 3569 rbrp->rbr_use_bcopy = B_FALSE; 3570 } 3571 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3572 break; 3573 3574 case NXGE_RX_COPY_1: 3575 case NXGE_RX_COPY_2: 3576 case NXGE_RX_COPY_3: 3577 case NXGE_RX_COPY_4: 3578 case NXGE_RX_COPY_5: 3579 case NXGE_RX_COPY_6: 3580 case NXGE_RX_COPY_7: 3581 rbrp->rbr_threshold_lo = 3582 rbrp->rbb_max * 3583 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3584 break; 3585 3586 case NXGE_RX_COPY_ALL: 3587 rbrp->rbr_threshold_lo = 0; 3588 break; 3589 } 3590 3591 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3592 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3593 "rbb_max %d " 3594 "rbrp->rbr_bufsize_type %d " 3595 "rbb_threshold_hi %d " 3596 "rbb_threshold_lo %d", 3597 dma_channel, 3598 rbrp->rbb_max, 3599 rbrp->rbr_bufsize_type, 3600 rbrp->rbr_threshold_hi, 3601 rbrp->rbr_threshold_lo)); 3602 3603 rbrp->page_valid.value = 0; 3604 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3605 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3606 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3607 rbrp->page_hdl.value = 0; 3608 3609 rbrp->page_valid.bits.ldw.page0 = 1; 3610 rbrp->page_valid.bits.ldw.page1 = 1; 3611 3612 /* Map in the receive completion ring */ 3613 rcrp = (p_rx_rcr_ring_t) 3614 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3615 rcrp->rdc = dma_channel; 3616 3617 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3618 rcrp->comp_size = nxge_port_rcr_size; 3619 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3620 3621 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3622 3623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3624 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3625 sizeof (rcr_entry_t)); 3626 rcrp->comp_rd_index = 0; 3627 rcrp->comp_wt_index = 0; 3628 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3629 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3630 #if defined(__i386) 3631 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3632 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3633 #else 3634 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3635 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3636 #endif 3637 3638 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3639 (nxge_port_rcr_size - 1); 3640 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3641 (nxge_port_rcr_size - 1); 3642 3643 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3644 "==> nxge_map_rxdma_channel_cfg_ring: " 3645 "channel %d " 3646 "rbr_vaddrp $%p " 3647 "rcr_desc_rd_head_p $%p " 3648 "rcr_desc_rd_head_pp $%p " 3649 "rcr_desc_rd_last_p $%p " 3650 "rcr_desc_rd_last_pp $%p ", 3651 dma_channel, 3652 rbr_vaddrp, 3653 rcrp->rcr_desc_rd_head_p, 3654 rcrp->rcr_desc_rd_head_pp, 3655 rcrp->rcr_desc_last_p, 3656 rcrp->rcr_desc_last_pp)); 3657 3658 /* 3659 * Zero out buffer block ring descriptors. 3660 */ 3661 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3662 3663 rcrp->intr_timeout = (nxgep->intr_timeout < 3664 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3665 nxgep->intr_timeout; 3666 3667 rcrp->intr_threshold = (nxgep->intr_threshold < 3668 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3669 nxgep->intr_threshold; 3670 3671 rcrp->full_hdr_flag = B_FALSE; 3672 rcrp->sw_priv_hdr_len = 0; 3673 3674 cfga_p = &(rcrp->rcr_cfga); 3675 cfgb_p = &(rcrp->rcr_cfgb); 3676 cfga_p->value = 0; 3677 cfgb_p->value = 0; 3678 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3679 cfga_p->value = (rcrp->rcr_addr & 3680 (RCRCFIG_A_STADDR_MASK | 3681 RCRCFIG_A_STADDR_BASE_MASK)); 3682 3683 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3684 RCRCFIG_A_LEN_SHIF); 3685 3686 /* 3687 * Timeout should be set based on the system clock divider. 3688 * A timeout value of 1 assumes that the 3689 * granularity (1000) is 3 microseconds running at 300MHz. 3690 */ 3691 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3692 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3693 cfgb_p->bits.ldw.entout = 1; 3694 3695 /* Map in the mailbox */ 3696 mboxp = (p_rx_mbox_t) 3697 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3698 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3699 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3700 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3701 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3702 cfig1_p->value = cfig2_p->value = 0; 3703 3704 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3705 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3706 "==> nxge_map_rxdma_channel_cfg_ring: " 3707 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3708 dma_channel, cfig1_p->value, cfig2_p->value, 3709 mboxp->mbox_addr)); 3710 3711 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3712 & 0xfff); 3713 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3714 3715 3716 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3717 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3718 RXDMA_CFIG2_MBADDR_L_MASK); 3719 3720 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3721 3722 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3723 "==> nxge_map_rxdma_channel_cfg_ring: " 3724 "channel %d damaddrp $%p " 3725 "cfg1 0x%016llx cfig2 0x%016llx", 3726 dma_channel, dmaaddrp, 3727 cfig1_p->value, cfig2_p->value)); 3728 3729 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3730 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3731 3732 rbrp->rx_rcr_p = rcrp; 3733 rcrp->rx_rbr_p = rbrp; 3734 *rcr_p = rcrp; 3735 *rx_mbox_p = mboxp; 3736 3737 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3738 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3739 3740 return (status); 3741 } 3742 3743 /*ARGSUSED*/ 3744 static void 3745 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3746 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3747 { 3748 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3749 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3750 rcr_p->rdc)); 3751 3752 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3753 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3754 3755 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3756 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3757 } 3758 3759 static nxge_status_t 3760 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3761 p_nxge_dma_common_t *dma_buf_p, 3762 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3763 { 3764 p_rx_rbr_ring_t rbrp; 3765 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3766 p_rx_msg_t *rx_msg_ring; 3767 p_rx_msg_t rx_msg_p; 3768 p_mblk_t mblk_p; 3769 3770 rxring_info_t *ring_info; 3771 nxge_status_t status = NXGE_OK; 3772 int i, j, index; 3773 uint32_t size, bsize, nblocks, nmsgs; 3774 3775 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3776 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3777 channel)); 3778 3779 dma_bufp = tmp_bufp = *dma_buf_p; 3780 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3781 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3782 "chunks bufp 0x%016llx", 3783 channel, num_chunks, dma_bufp)); 3784 3785 nmsgs = 0; 3786 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3787 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3788 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3789 "bufp 0x%016llx nblocks %d nmsgs %d", 3790 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3791 nmsgs += tmp_bufp->nblocks; 3792 } 3793 if (!nmsgs) { 3794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3795 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3796 "no msg blocks", 3797 channel)); 3798 status = NXGE_ERROR; 3799 goto nxge_map_rxdma_channel_buf_ring_exit; 3800 } 3801 3802 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3803 3804 size = nmsgs * sizeof (p_rx_msg_t); 3805 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3806 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3807 KM_SLEEP); 3808 3809 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3810 (void *)nxgep->interrupt_cookie); 3811 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3812 (void *)nxgep->interrupt_cookie); 3813 rbrp->rdc = channel; 3814 rbrp->num_blocks = num_chunks; 3815 rbrp->tnblocks = nmsgs; 3816 rbrp->rbb_max = nmsgs; 3817 rbrp->rbr_max_size = nmsgs; 3818 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3819 3820 /* 3821 * Buffer sizes suggested by NIU architect. 3822 * 256, 512 and 2K. 3823 */ 3824 3825 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3826 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3827 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3828 3829 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3830 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3831 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3832 3833 rbrp->block_size = nxgep->rx_default_block_size; 3834 3835 if (!nxgep->mac.is_jumbo) { 3836 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3837 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3838 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3839 } else { 3840 if (rbrp->block_size >= 0x2000) { 3841 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3842 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3843 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3844 } else { 3845 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3846 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3847 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3848 } 3849 } 3850 3851 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3852 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3853 "actual rbr max %d rbb_max %d nmsgs %d " 3854 "rbrp->block_size %d default_block_size %d " 3855 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3856 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3857 rbrp->block_size, nxgep->rx_default_block_size, 3858 nxge_rbr_size, nxge_rbr_spare_size)); 3859 3860 /* Map in buffers from the buffer pool. */ 3861 index = 0; 3862 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3863 bsize = dma_bufp->block_size; 3864 nblocks = dma_bufp->nblocks; 3865 #if defined(__i386) 3866 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3867 #else 3868 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3869 #endif 3870 ring_info->buffer[i].buf_index = i; 3871 ring_info->buffer[i].buf_size = dma_bufp->alength; 3872 ring_info->buffer[i].start_index = index; 3873 #if defined(__i386) 3874 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3875 #else 3876 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3877 #endif 3878 3879 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3880 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3881 "chunk %d" 3882 " nblocks %d chunk_size %x block_size 0x%x " 3883 "dma_bufp $%p", channel, i, 3884 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3885 dma_bufp)); 3886 3887 for (j = 0; j < nblocks; j++) { 3888 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3889 dma_bufp)) == NULL) { 3890 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3891 "allocb failed (index %d i %d j %d)", 3892 index, i, j)); 3893 goto nxge_map_rxdma_channel_buf_ring_fail1; 3894 } 3895 rx_msg_ring[index] = rx_msg_p; 3896 rx_msg_p->block_index = index; 3897 rx_msg_p->shifted_addr = (uint32_t) 3898 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3899 RBR_BKADDR_SHIFT)); 3900 3901 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3902 "index %d j %d rx_msg_p $%p mblk %p", 3903 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3904 3905 mblk_p = rx_msg_p->rx_mblk_p; 3906 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3907 3908 rbrp->rbr_ref_cnt++; 3909 index++; 3910 rx_msg_p->buf_dma.dma_channel = channel; 3911 } 3912 3913 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3914 if (dma_bufp->contig_alloc_type) { 3915 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3916 } 3917 3918 if (dma_bufp->kmem_alloc_type) { 3919 rbrp->rbr_alloc_type = KMEM_ALLOC; 3920 } 3921 3922 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3923 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3924 "chunk %d" 3925 " nblocks %d chunk_size %x block_size 0x%x " 3926 "dma_bufp $%p", 3927 channel, i, 3928 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3929 dma_bufp)); 3930 } 3931 if (i < rbrp->num_blocks) { 3932 goto nxge_map_rxdma_channel_buf_ring_fail1; 3933 } 3934 3935 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3936 "nxge_map_rxdma_channel_buf_ring: done buf init " 3937 "channel %d msg block entries %d", 3938 channel, index)); 3939 ring_info->block_size_mask = bsize - 1; 3940 rbrp->rx_msg_ring = rx_msg_ring; 3941 rbrp->dma_bufp = dma_buf_p; 3942 rbrp->ring_info = ring_info; 3943 3944 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3945 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3946 " nxge_map_rxdma_channel_buf_ring: " 3947 "channel %d done buf info init", channel)); 3948 3949 /* 3950 * Finally, permit nxge_freeb() to call nxge_post_page(). 3951 */ 3952 rbrp->rbr_state = RBR_POSTING; 3953 3954 *rbr_p = rbrp; 3955 goto nxge_map_rxdma_channel_buf_ring_exit; 3956 3957 nxge_map_rxdma_channel_buf_ring_fail1: 3958 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3959 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3960 channel, status)); 3961 3962 index--; 3963 for (; index >= 0; index--) { 3964 rx_msg_p = rx_msg_ring[index]; 3965 if (rx_msg_p != NULL) { 3966 freeb(rx_msg_p->rx_mblk_p); 3967 rx_msg_ring[index] = NULL; 3968 } 3969 } 3970 nxge_map_rxdma_channel_buf_ring_fail: 3971 MUTEX_DESTROY(&rbrp->post_lock); 3972 MUTEX_DESTROY(&rbrp->lock); 3973 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3974 KMEM_FREE(rx_msg_ring, size); 3975 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3976 3977 status = NXGE_ERROR; 3978 3979 nxge_map_rxdma_channel_buf_ring_exit: 3980 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3981 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3982 3983 return (status); 3984 } 3985 3986 /*ARGSUSED*/ 3987 static void 3988 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3989 p_rx_rbr_ring_t rbr_p) 3990 { 3991 p_rx_msg_t *rx_msg_ring; 3992 p_rx_msg_t rx_msg_p; 3993 rxring_info_t *ring_info; 3994 int i; 3995 uint32_t size; 3996 #ifdef NXGE_DEBUG 3997 int num_chunks; 3998 #endif 3999 4000 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4001 "==> nxge_unmap_rxdma_channel_buf_ring")); 4002 if (rbr_p == NULL) { 4003 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4004 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 4005 return; 4006 } 4007 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4008 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 4009 rbr_p->rdc)); 4010 4011 rx_msg_ring = rbr_p->rx_msg_ring; 4012 ring_info = rbr_p->ring_info; 4013 4014 if (rx_msg_ring == NULL || ring_info == NULL) { 4015 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4016 "<== nxge_unmap_rxdma_channel_buf_ring: " 4017 "rx_msg_ring $%p ring_info $%p", 4018 rx_msg_p, ring_info)); 4019 return; 4020 } 4021 4022 #ifdef NXGE_DEBUG 4023 num_chunks = rbr_p->num_blocks; 4024 #endif 4025 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 4026 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4027 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 4028 "tnblocks %d (max %d) size ptrs %d ", 4029 rbr_p->rdc, num_chunks, 4030 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 4031 4032 for (i = 0; i < rbr_p->tnblocks; i++) { 4033 rx_msg_p = rx_msg_ring[i]; 4034 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4035 " nxge_unmap_rxdma_channel_buf_ring: " 4036 "rx_msg_p $%p", 4037 rx_msg_p)); 4038 if (rx_msg_p != NULL) { 4039 freeb(rx_msg_p->rx_mblk_p); 4040 rx_msg_ring[i] = NULL; 4041 } 4042 } 4043 4044 /* 4045 * We no longer may use the mutex <post_lock>. By setting 4046 * <rbr_state> to anything but POSTING, we prevent 4047 * nxge_post_page() from accessing a dead mutex. 4048 */ 4049 rbr_p->rbr_state = RBR_UNMAPPING; 4050 MUTEX_DESTROY(&rbr_p->post_lock); 4051 4052 MUTEX_DESTROY(&rbr_p->lock); 4053 4054 if (rbr_p->rbr_ref_cnt == 0) { 4055 /* 4056 * This is the normal state of affairs. 4057 * Need to free the following buffers: 4058 * - data buffers 4059 * - rx_msg ring 4060 * - ring_info 4061 * - rbr ring 4062 */ 4063 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4064 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4065 nxge_rxdma_databuf_free(rbr_p); 4066 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4067 KMEM_FREE(rx_msg_ring, size); 4068 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4069 } else { 4070 /* 4071 * Some of our buffers are still being used. 4072 * Therefore, tell nxge_freeb() this ring is 4073 * unmapped, so it may free <rbr_p> for us. 4074 */ 4075 rbr_p->rbr_state = RBR_UNMAPPED; 4076 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4077 "unmap_rxdma_buf_ring: %d %s outstanding.", 4078 rbr_p->rbr_ref_cnt, 4079 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4080 } 4081 4082 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4083 "<== nxge_unmap_rxdma_channel_buf_ring")); 4084 } 4085 4086 /* 4087 * nxge_rxdma_hw_start_common 4088 * 4089 * Arguments: 4090 * nxgep 4091 * 4092 * Notes: 4093 * 4094 * NPI/NXGE function calls: 4095 * nxge_init_fzc_rx_common(); 4096 * nxge_init_fzc_rxdma_port(); 4097 * 4098 * Registers accessed: 4099 * 4100 * Context: 4101 * Service domain 4102 */ 4103 static nxge_status_t 4104 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4105 { 4106 nxge_status_t status = NXGE_OK; 4107 4108 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4109 4110 /* 4111 * Load the sharable parameters by writing to the 4112 * function zero control registers. These FZC registers 4113 * should be initialized only once for the entire chip. 4114 */ 4115 (void) nxge_init_fzc_rx_common(nxgep); 4116 4117 /* 4118 * Initialize the RXDMA port specific FZC control configurations. 4119 * These FZC registers are pertaining to each port. 4120 */ 4121 (void) nxge_init_fzc_rxdma_port(nxgep); 4122 4123 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4124 4125 return (status); 4126 } 4127 4128 static nxge_status_t 4129 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4130 { 4131 int i, ndmas; 4132 p_rx_rbr_rings_t rx_rbr_rings; 4133 p_rx_rbr_ring_t *rbr_rings; 4134 p_rx_rcr_rings_t rx_rcr_rings; 4135 p_rx_rcr_ring_t *rcr_rings; 4136 p_rx_mbox_areas_t rx_mbox_areas_p; 4137 p_rx_mbox_t *rx_mbox_p; 4138 nxge_status_t status = NXGE_OK; 4139 4140 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4141 4142 rx_rbr_rings = nxgep->rx_rbr_rings; 4143 rx_rcr_rings = nxgep->rx_rcr_rings; 4144 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4145 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4146 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4147 return (NXGE_ERROR); 4148 } 4149 ndmas = rx_rbr_rings->ndmas; 4150 if (ndmas == 0) { 4151 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4152 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4153 return (NXGE_ERROR); 4154 } 4155 4156 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4157 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4158 4159 rbr_rings = rx_rbr_rings->rbr_rings; 4160 rcr_rings = rx_rcr_rings->rcr_rings; 4161 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4162 if (rx_mbox_areas_p) { 4163 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4164 } 4165 4166 i = channel; 4167 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4168 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4169 ndmas, channel)); 4170 status = nxge_rxdma_start_channel(nxgep, channel, 4171 (p_rx_rbr_ring_t)rbr_rings[i], 4172 (p_rx_rcr_ring_t)rcr_rings[i], 4173 (p_rx_mbox_t)rx_mbox_p[i]); 4174 if (status != NXGE_OK) { 4175 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4176 "==> nxge_rxdma_hw_start: disable " 4177 "(status 0x%x channel %d)", status, channel)); 4178 return (status); 4179 } 4180 4181 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4182 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4183 rx_rbr_rings, rx_rcr_rings)); 4184 4185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4186 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4187 4188 return (status); 4189 } 4190 4191 static void 4192 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4193 { 4194 p_rx_rbr_rings_t rx_rbr_rings; 4195 p_rx_rcr_rings_t rx_rcr_rings; 4196 4197 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4198 4199 rx_rbr_rings = nxgep->rx_rbr_rings; 4200 rx_rcr_rings = nxgep->rx_rcr_rings; 4201 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4202 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4203 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4204 return; 4205 } 4206 4207 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4208 "==> nxge_rxdma_hw_stop(channel %d)", 4209 channel)); 4210 (void) nxge_rxdma_stop_channel(nxgep, channel); 4211 4212 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4213 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4214 rx_rbr_rings, rx_rcr_rings)); 4215 4216 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4217 } 4218 4219 4220 static nxge_status_t 4221 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4222 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4223 4224 { 4225 npi_handle_t handle; 4226 npi_status_t rs = NPI_SUCCESS; 4227 rx_dma_ctl_stat_t cs; 4228 rx_dma_ent_msk_t ent_mask; 4229 nxge_status_t status = NXGE_OK; 4230 4231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4232 4233 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4234 4235 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4236 "npi handle addr $%p acc $%p", 4237 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4238 4239 /* Reset RXDMA channel, but not if you're a guest. */ 4240 if (!isLDOMguest(nxgep)) { 4241 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4242 if (rs != NPI_SUCCESS) { 4243 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4244 "==> nxge_init_fzc_rdc: " 4245 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4246 channel, rs)); 4247 return (NXGE_ERROR | rs); 4248 } 4249 4250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4251 "==> nxge_rxdma_start_channel: reset done: channel %d", 4252 channel)); 4253 } 4254 4255 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4256 if (isLDOMguest(nxgep)) 4257 (void) nxge_rdc_lp_conf(nxgep, channel); 4258 #endif 4259 4260 /* 4261 * Initialize the RXDMA channel specific FZC control 4262 * configurations. These FZC registers are pertaining 4263 * to each RX channel (logical pages). 4264 */ 4265 if (!isLDOMguest(nxgep)) { 4266 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4267 if (status != NXGE_OK) { 4268 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4269 "==> nxge_rxdma_start_channel: " 4270 "init fzc rxdma failed (0x%08x channel %d)", 4271 status, channel)); 4272 return (status); 4273 } 4274 4275 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4276 "==> nxge_rxdma_start_channel: fzc done")); 4277 } 4278 4279 /* Set up the interrupt event masks. */ 4280 ent_mask.value = 0; 4281 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4282 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4283 &ent_mask); 4284 if (rs != NPI_SUCCESS) { 4285 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4286 "==> nxge_rxdma_start_channel: " 4287 "init rxdma event masks failed " 4288 "(0x%08x channel %d)", 4289 status, channel)); 4290 return (NXGE_ERROR | rs); 4291 } 4292 4293 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4294 "==> nxge_rxdma_start_channel: " 4295 "event done: channel %d (mask 0x%016llx)", 4296 channel, ent_mask.value)); 4297 4298 /* Initialize the receive DMA control and status register */ 4299 cs.value = 0; 4300 cs.bits.hdw.mex = 1; 4301 cs.bits.hdw.rcrthres = 1; 4302 cs.bits.hdw.rcrto = 1; 4303 cs.bits.hdw.rbr_empty = 1; 4304 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4305 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4306 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4307 if (status != NXGE_OK) { 4308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4309 "==> nxge_rxdma_start_channel: " 4310 "init rxdma control register failed (0x%08x channel %d", 4311 status, channel)); 4312 return (status); 4313 } 4314 4315 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4316 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4317 4318 /* 4319 * Load RXDMA descriptors, buffers, mailbox, 4320 * initialise the receive DMA channels and 4321 * enable each DMA channel. 4322 */ 4323 status = nxge_enable_rxdma_channel(nxgep, 4324 channel, rbr_p, rcr_p, mbox_p); 4325 4326 if (status != NXGE_OK) { 4327 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4328 " nxge_rxdma_start_channel: " 4329 " enable rxdma failed (0x%08x channel %d)", 4330 status, channel)); 4331 return (status); 4332 } 4333 4334 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4335 "==> nxge_rxdma_start_channel: enabled channel %d")); 4336 4337 if (isLDOMguest(nxgep)) { 4338 /* Add interrupt handler for this channel. */ 4339 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4340 != NXGE_OK) { 4341 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4342 " nxge_rxdma_start_channel: " 4343 " nxge_hio_intr_add failed (0x%08x channel %d)", 4344 status, channel)); 4345 } 4346 } 4347 4348 ent_mask.value = 0; 4349 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4350 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4351 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4352 &ent_mask); 4353 if (rs != NPI_SUCCESS) { 4354 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4355 "==> nxge_rxdma_start_channel: " 4356 "init rxdma event masks failed (0x%08x channel %d)", 4357 status, channel)); 4358 return (NXGE_ERROR | rs); 4359 } 4360 4361 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4362 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4363 4364 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4365 4366 return (NXGE_OK); 4367 } 4368 4369 static nxge_status_t 4370 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4371 { 4372 npi_handle_t handle; 4373 npi_status_t rs = NPI_SUCCESS; 4374 rx_dma_ctl_stat_t cs; 4375 rx_dma_ent_msk_t ent_mask; 4376 nxge_status_t status = NXGE_OK; 4377 4378 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4379 4380 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4381 4382 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4383 "npi handle addr $%p acc $%p", 4384 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4385 4386 if (!isLDOMguest(nxgep)) { 4387 /* 4388 * Stop RxMAC = A.9.2.6 4389 */ 4390 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4391 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4392 "nxge_rxdma_stop_channel: " 4393 "Failed to disable RxMAC")); 4394 } 4395 4396 /* 4397 * Drain IPP Port = A.9.3.6 4398 */ 4399 (void) nxge_ipp_drain(nxgep); 4400 } 4401 4402 /* Reset RXDMA channel */ 4403 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4404 if (rs != NPI_SUCCESS) { 4405 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4406 " nxge_rxdma_stop_channel: " 4407 " reset rxdma failed (0x%08x channel %d)", 4408 rs, channel)); 4409 return (NXGE_ERROR | rs); 4410 } 4411 4412 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4413 "==> nxge_rxdma_stop_channel: reset done")); 4414 4415 /* Set up the interrupt event masks. */ 4416 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4417 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4418 &ent_mask); 4419 if (rs != NPI_SUCCESS) { 4420 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4421 "==> nxge_rxdma_stop_channel: " 4422 "set rxdma event masks failed (0x%08x channel %d)", 4423 rs, channel)); 4424 return (NXGE_ERROR | rs); 4425 } 4426 4427 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4428 "==> nxge_rxdma_stop_channel: event done")); 4429 4430 /* 4431 * Initialize the receive DMA control and status register 4432 */ 4433 cs.value = 0; 4434 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4435 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4436 " to default (all 0s) 0x%08x", cs.value)); 4437 if (status != NXGE_OK) { 4438 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4439 " nxge_rxdma_stop_channel: init rxdma" 4440 " control register failed (0x%08x channel %d", 4441 status, channel)); 4442 return (status); 4443 } 4444 4445 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4446 "==> nxge_rxdma_stop_channel: control done")); 4447 4448 /* 4449 * Make sure channel is disabled. 4450 */ 4451 status = nxge_disable_rxdma_channel(nxgep, channel); 4452 4453 if (status != NXGE_OK) { 4454 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4455 " nxge_rxdma_stop_channel: " 4456 " init enable rxdma failed (0x%08x channel %d)", 4457 status, channel)); 4458 return (status); 4459 } 4460 4461 if (!isLDOMguest(nxgep)) { 4462 /* 4463 * Enable RxMAC = A.9.2.10 4464 */ 4465 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4466 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4467 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4468 } 4469 } 4470 4471 NXGE_DEBUG_MSG((nxgep, 4472 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4473 4474 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4475 4476 return (NXGE_OK); 4477 } 4478 4479 nxge_status_t 4480 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4481 { 4482 npi_handle_t handle; 4483 p_nxge_rdc_sys_stats_t statsp; 4484 rx_ctl_dat_fifo_stat_t stat; 4485 uint32_t zcp_err_status; 4486 uint32_t ipp_err_status; 4487 nxge_status_t status = NXGE_OK; 4488 npi_status_t rs = NPI_SUCCESS; 4489 boolean_t my_err = B_FALSE; 4490 4491 handle = nxgep->npi_handle; 4492 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4493 4494 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4495 4496 if (rs != NPI_SUCCESS) 4497 return (NXGE_ERROR | rs); 4498 4499 if (stat.bits.ldw.id_mismatch) { 4500 statsp->id_mismatch++; 4501 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4502 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4503 /* Global fatal error encountered */ 4504 } 4505 4506 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4507 switch (nxgep->mac.portnum) { 4508 case 0: 4509 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4510 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4511 my_err = B_TRUE; 4512 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4513 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4514 } 4515 break; 4516 case 1: 4517 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4518 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4519 my_err = B_TRUE; 4520 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4521 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4522 } 4523 break; 4524 case 2: 4525 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4526 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4527 my_err = B_TRUE; 4528 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4529 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4530 } 4531 break; 4532 case 3: 4533 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4534 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4535 my_err = B_TRUE; 4536 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4537 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4538 } 4539 break; 4540 default: 4541 return (NXGE_ERROR); 4542 } 4543 } 4544 4545 if (my_err) { 4546 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4547 zcp_err_status); 4548 if (status != NXGE_OK) 4549 return (status); 4550 } 4551 4552 return (NXGE_OK); 4553 } 4554 4555 static nxge_status_t 4556 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4557 uint32_t zcp_status) 4558 { 4559 boolean_t rxport_fatal = B_FALSE; 4560 p_nxge_rdc_sys_stats_t statsp; 4561 nxge_status_t status = NXGE_OK; 4562 uint8_t portn; 4563 4564 portn = nxgep->mac.portnum; 4565 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4566 4567 if (ipp_status & (0x1 << portn)) { 4568 statsp->ipp_eop_err++; 4569 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4570 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4571 rxport_fatal = B_TRUE; 4572 } 4573 4574 if (zcp_status & (0x1 << portn)) { 4575 statsp->zcp_eop_err++; 4576 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4577 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4578 rxport_fatal = B_TRUE; 4579 } 4580 4581 if (rxport_fatal) { 4582 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4583 " nxge_rxdma_handle_port_error: " 4584 " fatal error on Port #%d\n", 4585 portn)); 4586 status = nxge_rx_port_fatal_err_recover(nxgep); 4587 if (status == NXGE_OK) { 4588 FM_SERVICE_RESTORED(nxgep); 4589 } 4590 } 4591 4592 return (status); 4593 } 4594 4595 static nxge_status_t 4596 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4597 { 4598 npi_handle_t handle; 4599 npi_status_t rs = NPI_SUCCESS; 4600 nxge_status_t status = NXGE_OK; 4601 p_rx_rbr_ring_t rbrp; 4602 p_rx_rcr_ring_t rcrp; 4603 p_rx_mbox_t mboxp; 4604 rx_dma_ent_msk_t ent_mask; 4605 p_nxge_dma_common_t dmap; 4606 int ring_idx; 4607 uint32_t ref_cnt; 4608 p_rx_msg_t rx_msg_p; 4609 int i; 4610 uint32_t nxge_port_rcr_size; 4611 4612 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4613 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4614 "Recovering from RxDMAChannel#%d error...", channel)); 4615 4616 /* 4617 * Stop the dma channel waits for the stop done. 4618 * If the stop done bit is not set, then create 4619 * an error. 4620 */ 4621 4622 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4624 4625 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4626 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4627 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4628 4629 MUTEX_ENTER(&rcrp->lock); 4630 MUTEX_ENTER(&rbrp->lock); 4631 MUTEX_ENTER(&rbrp->post_lock); 4632 4633 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4634 4635 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4636 if (rs != NPI_SUCCESS) { 4637 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4638 "nxge_disable_rxdma_channel:failed")); 4639 goto fail; 4640 } 4641 4642 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4643 4644 /* Disable interrupt */ 4645 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4646 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4647 if (rs != NPI_SUCCESS) { 4648 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4649 "nxge_rxdma_stop_channel: " 4650 "set rxdma event masks failed (channel %d)", 4651 channel)); 4652 } 4653 4654 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4655 4656 /* Reset RXDMA channel */ 4657 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4658 if (rs != NPI_SUCCESS) { 4659 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4660 "nxge_rxdma_fatal_err_recover: " 4661 " reset rxdma failed (channel %d)", channel)); 4662 goto fail; 4663 } 4664 4665 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4666 4667 mboxp = 4668 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4669 4670 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4671 rbrp->rbr_rd_index = 0; 4672 4673 rcrp->comp_rd_index = 0; 4674 rcrp->comp_wt_index = 0; 4675 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4676 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4677 #if defined(__i386) 4678 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4679 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4680 #else 4681 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4682 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4683 #endif 4684 4685 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4686 (nxge_port_rcr_size - 1); 4687 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4688 (nxge_port_rcr_size - 1); 4689 4690 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4691 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4692 4693 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4694 4695 for (i = 0; i < rbrp->rbr_max_size; i++) { 4696 rx_msg_p = rbrp->rx_msg_ring[i]; 4697 ref_cnt = rx_msg_p->ref_cnt; 4698 if (ref_cnt != 1) { 4699 if (rx_msg_p->cur_usage_cnt != 4700 rx_msg_p->max_usage_cnt) { 4701 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4702 "buf[%d]: cur_usage_cnt = %d " 4703 "max_usage_cnt = %d\n", i, 4704 rx_msg_p->cur_usage_cnt, 4705 rx_msg_p->max_usage_cnt)); 4706 } else { 4707 /* Buffer can be re-posted */ 4708 rx_msg_p->free = B_TRUE; 4709 rx_msg_p->cur_usage_cnt = 0; 4710 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4711 rx_msg_p->pkt_buf_size = 0; 4712 } 4713 } 4714 } 4715 4716 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4717 4718 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4719 if (status != NXGE_OK) { 4720 goto fail; 4721 } 4722 4723 MUTEX_EXIT(&rbrp->post_lock); 4724 MUTEX_EXIT(&rbrp->lock); 4725 MUTEX_EXIT(&rcrp->lock); 4726 4727 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4728 "Recovery Successful, RxDMAChannel#%d Restored", 4729 channel)); 4730 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4731 4732 return (NXGE_OK); 4733 fail: 4734 MUTEX_EXIT(&rbrp->post_lock); 4735 MUTEX_EXIT(&rbrp->lock); 4736 MUTEX_EXIT(&rcrp->lock); 4737 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4738 4739 return (NXGE_ERROR | rs); 4740 } 4741 4742 nxge_status_t 4743 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4744 { 4745 nxge_grp_set_t *set = &nxgep->rx_set; 4746 nxge_status_t status = NXGE_OK; 4747 int rdc; 4748 4749 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4750 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4751 "Recovering from RxPort error...")); 4752 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4753 4754 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4755 goto fail; 4756 4757 NXGE_DELAY(1000); 4758 4759 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4760 4761 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4762 if ((1 << rdc) & set->owned.map) { 4763 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4764 != NXGE_OK) { 4765 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4766 "Could not recover channel %d", rdc)); 4767 } 4768 } 4769 } 4770 4771 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4772 4773 /* Reset IPP */ 4774 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4775 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4776 "nxge_rx_port_fatal_err_recover: " 4777 "Failed to reset IPP")); 4778 goto fail; 4779 } 4780 4781 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4782 4783 /* Reset RxMAC */ 4784 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4785 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4786 "nxge_rx_port_fatal_err_recover: " 4787 "Failed to reset RxMAC")); 4788 goto fail; 4789 } 4790 4791 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4792 4793 /* Re-Initialize IPP */ 4794 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4795 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4796 "nxge_rx_port_fatal_err_recover: " 4797 "Failed to init IPP")); 4798 goto fail; 4799 } 4800 4801 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4802 4803 /* Re-Initialize RxMAC */ 4804 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4805 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4806 "nxge_rx_port_fatal_err_recover: " 4807 "Failed to reset RxMAC")); 4808 goto fail; 4809 } 4810 4811 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4812 4813 /* Re-enable RxMAC */ 4814 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4815 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4816 "nxge_rx_port_fatal_err_recover: " 4817 "Failed to enable RxMAC")); 4818 goto fail; 4819 } 4820 4821 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4822 "Recovery Successful, RxPort Restored")); 4823 4824 return (NXGE_OK); 4825 fail: 4826 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4827 return (status); 4828 } 4829 4830 void 4831 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4832 { 4833 rx_dma_ctl_stat_t cs; 4834 rx_ctl_dat_fifo_stat_t cdfs; 4835 4836 switch (err_id) { 4837 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4838 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4839 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4840 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4841 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4842 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4843 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4844 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4845 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4846 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4847 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4848 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4849 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4850 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4851 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4852 chan, &cs.value); 4853 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4854 cs.bits.hdw.rcr_ack_err = 1; 4855 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4856 cs.bits.hdw.dc_fifo_err = 1; 4857 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4858 cs.bits.hdw.rcr_sha_par = 1; 4859 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4860 cs.bits.hdw.rbr_pre_par = 1; 4861 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4862 cs.bits.hdw.rbr_tmout = 1; 4863 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4864 cs.bits.hdw.rsp_cnt_err = 1; 4865 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4866 cs.bits.hdw.byte_en_bus = 1; 4867 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4868 cs.bits.hdw.rsp_dat_err = 1; 4869 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4870 cs.bits.hdw.config_err = 1; 4871 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4872 cs.bits.hdw.rcrincon = 1; 4873 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4874 cs.bits.hdw.rcrfull = 1; 4875 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4876 cs.bits.hdw.rbrfull = 1; 4877 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4878 cs.bits.hdw.rbrlogpage = 1; 4879 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4880 cs.bits.hdw.cfiglogpage = 1; 4881 #if defined(__i386) 4882 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4883 cs.value); 4884 #else 4885 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4886 cs.value); 4887 #endif 4888 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4889 chan, cs.value); 4890 break; 4891 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4892 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4893 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4894 cdfs.value = 0; 4895 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4896 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4897 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4898 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4899 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4900 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4901 #if defined(__i386) 4902 cmn_err(CE_NOTE, 4903 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4904 cdfs.value); 4905 #else 4906 cmn_err(CE_NOTE, 4907 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4908 cdfs.value); 4909 #endif 4910 NXGE_REG_WR64(nxgep->npi_handle, 4911 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4912 break; 4913 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4914 break; 4915 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4916 break; 4917 } 4918 } 4919 4920 static void 4921 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4922 { 4923 rxring_info_t *ring_info; 4924 int index; 4925 uint32_t chunk_size; 4926 uint64_t kaddr; 4927 uint_t num_blocks; 4928 4929 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4930 4931 if (rbr_p == NULL) { 4932 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4933 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4934 return; 4935 } 4936 4937 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4938 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4939 "<== nxge_rxdma_databuf_free: DDI")); 4940 return; 4941 } 4942 4943 ring_info = rbr_p->ring_info; 4944 if (ring_info == NULL) { 4945 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4946 "==> nxge_rxdma_databuf_free: NULL ring info")); 4947 return; 4948 } 4949 num_blocks = rbr_p->num_blocks; 4950 for (index = 0; index < num_blocks; index++) { 4951 kaddr = ring_info->buffer[index].kaddr; 4952 chunk_size = ring_info->buffer[index].buf_size; 4953 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4954 "==> nxge_rxdma_databuf_free: free chunk %d " 4955 "kaddrp $%p chunk size %d", 4956 index, kaddr, chunk_size)); 4957 if (kaddr == NULL) continue; 4958 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4959 ring_info->buffer[index].kaddr = NULL; 4960 } 4961 4962 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4963 } 4964 4965 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4966 extern void contig_mem_free(void *, size_t); 4967 #endif 4968 4969 void 4970 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4971 { 4972 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4973 4974 if (kaddr == NULL || !buf_size) { 4975 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4976 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4977 kaddr, buf_size)); 4978 return; 4979 } 4980 4981 switch (alloc_type) { 4982 case KMEM_ALLOC: 4983 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4984 "==> nxge_free_buf: freeing kmem $%p size %d", 4985 kaddr, buf_size)); 4986 #if defined(__i386) 4987 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4988 #else 4989 KMEM_FREE((void *)kaddr, buf_size); 4990 #endif 4991 break; 4992 4993 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4994 case CONTIG_MEM_ALLOC: 4995 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4996 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4997 kaddr, buf_size)); 4998 contig_mem_free((void *)kaddr, buf_size); 4999 break; 5000 #endif 5001 5002 default: 5003 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5004 "<== nxge_free_buf: unsupported alloc type %d", 5005 alloc_type)); 5006 return; 5007 } 5008 5009 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5010 } 5011