1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * Globals: tunable parameters (/etc/system or adb) 43 * 44 */ 45 extern uint32_t nxge_rbr_size; 46 extern uint32_t nxge_rcr_size; 47 extern uint32_t nxge_rbr_spare_size; 48 49 extern uint32_t nxge_mblks_pending; 50 51 /* 52 * Tunable to reduce the amount of time spent in the 53 * ISR doing Rx Processing. 54 */ 55 extern uint32_t nxge_max_rx_pkts; 56 57 /* 58 * Tunables to manage the receive buffer blocks. 59 * 60 * nxge_rx_threshold_hi: copy all buffers. 61 * nxge_rx_bcopy_size_type: receive buffer block size type. 62 * nxge_rx_threshold_lo: copy only up to tunable block size type. 63 */ 64 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 65 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 67 68 extern uint32_t nxge_cksum_offload; 69 70 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 71 static void nxge_unmap_rxdma(p_nxge_t, int); 72 73 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 74 75 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 76 static void nxge_rxdma_hw_stop(p_nxge_t, int); 77 78 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 79 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 80 uint32_t, 81 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 82 p_rx_mbox_t *); 83 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 84 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 85 86 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 87 uint16_t, 88 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 89 p_rx_rcr_ring_t *, p_rx_mbox_t *); 90 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 91 p_rx_rcr_ring_t, p_rx_mbox_t); 92 93 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 94 uint16_t, 95 p_nxge_dma_common_t *, 96 p_rx_rbr_ring_t *, uint32_t); 97 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 98 p_rx_rbr_ring_t); 99 100 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 101 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 102 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 103 104 static mblk_t * 105 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 106 107 static void nxge_receive_packet(p_nxge_t, 108 p_rx_rcr_ring_t, 109 p_rcr_entry_t, 110 boolean_t *, 111 mblk_t **, mblk_t **); 112 113 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 114 115 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 116 static void nxge_freeb(p_rx_msg_t); 117 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 118 119 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 120 uint32_t, uint32_t); 121 122 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 123 p_rx_rbr_ring_t); 124 125 126 static nxge_status_t 127 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 128 129 nxge_status_t 130 nxge_rx_port_fatal_err_recover(p_nxge_t); 131 132 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 133 134 nxge_status_t 135 nxge_init_rxdma_channels(p_nxge_t nxgep) 136 { 137 nxge_grp_set_t *set = &nxgep->rx_set; 138 int i, count, channel; 139 nxge_grp_t *group; 140 dc_map_t map; 141 int dev_gindex; 142 143 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 144 145 if (!isLDOMguest(nxgep)) { 146 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 147 cmn_err(CE_NOTE, "hw_start_common"); 148 return (NXGE_ERROR); 149 } 150 } 151 152 /* 153 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 154 * We only have 8 hardware RDC tables, but we may have 155 * up to 16 logical (software-defined) groups of RDCS, 156 * if we make use of layer 3 & 4 hardware classification. 157 */ 158 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 159 if ((1 << i) & set->lg.map) { 160 group = set->group[i]; 161 dev_gindex = 162 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 163 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 164 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 165 if ((1 << channel) & map) { 166 if ((nxge_grp_dc_add(nxgep, 167 group, VP_BOUND_RX, channel))) 168 goto init_rxdma_channels_exit; 169 } 170 } 171 } 172 if (++count == set->lg.count) 173 break; 174 } 175 176 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 177 return (NXGE_OK); 178 179 init_rxdma_channels_exit: 180 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 181 if ((1 << i) & set->lg.map) { 182 group = set->group[i]; 183 dev_gindex = 184 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 185 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 186 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 187 if ((1 << channel) & map) { 188 nxge_grp_dc_remove(nxgep, 189 VP_BOUND_RX, channel); 190 } 191 } 192 } 193 if (++count == set->lg.count) 194 break; 195 } 196 197 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 198 return (NXGE_ERROR); 199 } 200 201 nxge_status_t 202 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 203 { 204 nxge_status_t status; 205 206 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 207 208 status = nxge_map_rxdma(nxge, channel); 209 if (status != NXGE_OK) { 210 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 211 "<== nxge_init_rxdma: status 0x%x", status)); 212 return (status); 213 } 214 215 #if defined(sun4v) 216 if (isLDOMguest(nxge)) { 217 /* set rcr_ring */ 218 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 219 220 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 221 if (status != NXGE_OK) { 222 nxge_unmap_rxdma(nxge, channel); 223 return (status); 224 } 225 } 226 #endif 227 228 status = nxge_rxdma_hw_start(nxge, channel); 229 if (status != NXGE_OK) { 230 nxge_unmap_rxdma(nxge, channel); 231 } 232 233 if (!nxge->statsp->rdc_ksp[channel]) 234 nxge_setup_rdc_kstats(nxge, channel); 235 236 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 237 "<== nxge_init_rxdma_channel: status 0x%x", status)); 238 239 return (status); 240 } 241 242 void 243 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 244 { 245 nxge_grp_set_t *set = &nxgep->rx_set; 246 int rdc; 247 248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 249 250 if (set->owned.map == 0) { 251 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 252 "nxge_uninit_rxdma_channels: no channels")); 253 return; 254 } 255 256 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 257 if ((1 << rdc) & set->owned.map) { 258 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 259 } 260 } 261 262 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 263 } 264 265 void 266 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 267 { 268 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 269 270 if (nxgep->statsp->rdc_ksp[channel]) { 271 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 272 nxgep->statsp->rdc_ksp[channel] = 0; 273 } 274 275 nxge_rxdma_hw_stop(nxgep, channel); 276 nxge_unmap_rxdma(nxgep, channel); 277 278 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 279 } 280 281 nxge_status_t 282 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 283 { 284 npi_handle_t handle; 285 npi_status_t rs = NPI_SUCCESS; 286 nxge_status_t status = NXGE_OK; 287 288 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 289 290 handle = NXGE_DEV_NPI_HANDLE(nxgep); 291 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 292 293 if (rs != NPI_SUCCESS) { 294 status = NXGE_ERROR | rs; 295 } 296 297 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 298 299 return (status); 300 } 301 302 void 303 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 304 { 305 nxge_grp_set_t *set = &nxgep->rx_set; 306 int rdc; 307 308 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 309 310 if (!isLDOMguest(nxgep)) { 311 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 312 (void) npi_rxdma_dump_fzc_regs(handle); 313 } 314 315 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 316 NXGE_DEBUG_MSG((nxgep, TX_CTL, 317 "nxge_rxdma_regs_dump_channels: " 318 "NULL ring pointer(s)")); 319 return; 320 } 321 322 if (set->owned.map == 0) { 323 NXGE_DEBUG_MSG((nxgep, RX_CTL, 324 "nxge_rxdma_regs_dump_channels: no channels")); 325 return; 326 } 327 328 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 329 if ((1 << rdc) & set->owned.map) { 330 rx_rbr_ring_t *ring = 331 nxgep->rx_rbr_rings->rbr_rings[rdc]; 332 if (ring) { 333 (void) nxge_dump_rxdma_channel(nxgep, rdc); 334 } 335 } 336 } 337 338 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 339 } 340 341 nxge_status_t 342 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 343 { 344 npi_handle_t handle; 345 npi_status_t rs = NPI_SUCCESS; 346 nxge_status_t status = NXGE_OK; 347 348 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 349 350 handle = NXGE_DEV_NPI_HANDLE(nxgep); 351 rs = npi_rxdma_dump_rdc_regs(handle, channel); 352 353 if (rs != NPI_SUCCESS) { 354 status = NXGE_ERROR | rs; 355 } 356 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 357 return (status); 358 } 359 360 nxge_status_t 361 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 362 p_rx_dma_ent_msk_t mask_p) 363 { 364 npi_handle_t handle; 365 npi_status_t rs = NPI_SUCCESS; 366 nxge_status_t status = NXGE_OK; 367 368 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 369 "<== nxge_init_rxdma_channel_event_mask")); 370 371 handle = NXGE_DEV_NPI_HANDLE(nxgep); 372 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 373 if (rs != NPI_SUCCESS) { 374 status = NXGE_ERROR | rs; 375 } 376 377 return (status); 378 } 379 380 nxge_status_t 381 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 382 p_rx_dma_ctl_stat_t cs_p) 383 { 384 npi_handle_t handle; 385 npi_status_t rs = NPI_SUCCESS; 386 nxge_status_t status = NXGE_OK; 387 388 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 389 "<== nxge_init_rxdma_channel_cntl_stat")); 390 391 handle = NXGE_DEV_NPI_HANDLE(nxgep); 392 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 393 394 if (rs != NPI_SUCCESS) { 395 status = NXGE_ERROR | rs; 396 } 397 398 return (status); 399 } 400 401 /* 402 * nxge_rxdma_cfg_rdcgrp_default_rdc 403 * 404 * Set the default RDC for an RDC Group (Table) 405 * 406 * Arguments: 407 * nxgep 408 * rdcgrp The group to modify 409 * rdc The new default RDC. 410 * 411 * Notes: 412 * 413 * NPI/NXGE function calls: 414 * npi_rxdma_cfg_rdc_table_default_rdc() 415 * 416 * Registers accessed: 417 * RDC_TBL_REG: FZC_ZCP + 0x10000 418 * 419 * Context: 420 * Service domain 421 */ 422 nxge_status_t 423 nxge_rxdma_cfg_rdcgrp_default_rdc( 424 p_nxge_t nxgep, 425 uint8_t rdcgrp, 426 uint8_t rdc) 427 { 428 npi_handle_t handle; 429 npi_status_t rs = NPI_SUCCESS; 430 p_nxge_dma_pt_cfg_t p_dma_cfgp; 431 p_nxge_rdc_grp_t rdc_grp_p; 432 uint8_t actual_rdcgrp, actual_rdc; 433 434 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 435 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 436 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 437 438 handle = NXGE_DEV_NPI_HANDLE(nxgep); 439 440 /* 441 * This has to be rewritten. Do we even allow this anymore? 442 */ 443 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 444 RDC_MAP_IN(rdc_grp_p->map, rdc); 445 rdc_grp_p->def_rdc = rdc; 446 447 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 448 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 449 450 rs = npi_rxdma_cfg_rdc_table_default_rdc( 451 handle, actual_rdcgrp, actual_rdc); 452 453 if (rs != NPI_SUCCESS) { 454 return (NXGE_ERROR | rs); 455 } 456 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 457 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 458 return (NXGE_OK); 459 } 460 461 nxge_status_t 462 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 463 { 464 npi_handle_t handle; 465 466 uint8_t actual_rdc; 467 npi_status_t rs = NPI_SUCCESS; 468 469 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 470 " ==> nxge_rxdma_cfg_port_default_rdc")); 471 472 handle = NXGE_DEV_NPI_HANDLE(nxgep); 473 actual_rdc = rdc; /* XXX Hack! */ 474 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 475 476 477 if (rs != NPI_SUCCESS) { 478 return (NXGE_ERROR | rs); 479 } 480 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 481 " <== nxge_rxdma_cfg_port_default_rdc")); 482 483 return (NXGE_OK); 484 } 485 486 nxge_status_t 487 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 488 uint16_t pkts) 489 { 490 npi_status_t rs = NPI_SUCCESS; 491 npi_handle_t handle; 492 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 493 " ==> nxge_rxdma_cfg_rcr_threshold")); 494 handle = NXGE_DEV_NPI_HANDLE(nxgep); 495 496 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 497 498 if (rs != NPI_SUCCESS) { 499 return (NXGE_ERROR | rs); 500 } 501 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 502 return (NXGE_OK); 503 } 504 505 nxge_status_t 506 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 507 uint16_t tout, uint8_t enable) 508 { 509 npi_status_t rs = NPI_SUCCESS; 510 npi_handle_t handle; 511 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 512 handle = NXGE_DEV_NPI_HANDLE(nxgep); 513 if (enable == 0) { 514 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 515 } else { 516 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 517 tout); 518 } 519 520 if (rs != NPI_SUCCESS) { 521 return (NXGE_ERROR | rs); 522 } 523 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 524 return (NXGE_OK); 525 } 526 527 nxge_status_t 528 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 529 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 530 { 531 npi_handle_t handle; 532 rdc_desc_cfg_t rdc_desc; 533 p_rcrcfig_b_t cfgb_p; 534 npi_status_t rs = NPI_SUCCESS; 535 536 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 537 handle = NXGE_DEV_NPI_HANDLE(nxgep); 538 /* 539 * Use configuration data composed at init time. 540 * Write to hardware the receive ring configurations. 541 */ 542 rdc_desc.mbox_enable = 1; 543 rdc_desc.mbox_addr = mbox_p->mbox_addr; 544 NXGE_DEBUG_MSG((nxgep, RX_CTL, 545 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 546 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 547 548 rdc_desc.rbr_len = rbr_p->rbb_max; 549 rdc_desc.rbr_addr = rbr_p->rbr_addr; 550 551 switch (nxgep->rx_bksize_code) { 552 case RBR_BKSIZE_4K: 553 rdc_desc.page_size = SIZE_4KB; 554 break; 555 case RBR_BKSIZE_8K: 556 rdc_desc.page_size = SIZE_8KB; 557 break; 558 case RBR_BKSIZE_16K: 559 rdc_desc.page_size = SIZE_16KB; 560 break; 561 case RBR_BKSIZE_32K: 562 rdc_desc.page_size = SIZE_32KB; 563 break; 564 } 565 566 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 567 rdc_desc.valid0 = 1; 568 569 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 570 rdc_desc.valid1 = 1; 571 572 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 573 rdc_desc.valid2 = 1; 574 575 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 576 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 577 578 rdc_desc.rcr_len = rcr_p->comp_size; 579 rdc_desc.rcr_addr = rcr_p->rcr_addr; 580 581 cfgb_p = &(rcr_p->rcr_cfgb); 582 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 583 /* For now, disable this timeout in a guest domain. */ 584 if (isLDOMguest(nxgep)) { 585 rdc_desc.rcr_timeout = 0; 586 rdc_desc.rcr_timeout_enable = 0; 587 } else { 588 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 589 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 590 } 591 592 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 593 "rbr_len qlen %d pagesize code %d rcr_len %d", 594 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 595 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 596 "size 0 %d size 1 %d size 2 %d", 597 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 598 rbr_p->npi_pkt_buf_size2)); 599 600 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 601 if (rs != NPI_SUCCESS) { 602 return (NXGE_ERROR | rs); 603 } 604 605 /* 606 * Enable the timeout and threshold. 607 */ 608 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 609 rdc_desc.rcr_threshold); 610 if (rs != NPI_SUCCESS) { 611 return (NXGE_ERROR | rs); 612 } 613 614 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 615 rdc_desc.rcr_timeout); 616 if (rs != NPI_SUCCESS) { 617 return (NXGE_ERROR | rs); 618 } 619 620 if (!isLDOMguest(nxgep)) { 621 /* Enable the DMA */ 622 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 623 if (rs != NPI_SUCCESS) { 624 return (NXGE_ERROR | rs); 625 } 626 } 627 628 /* Kick the DMA engine. */ 629 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 630 631 if (!isLDOMguest(nxgep)) { 632 /* Clear the rbr empty bit */ 633 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 634 } 635 636 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 637 638 return (NXGE_OK); 639 } 640 641 nxge_status_t 642 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 643 { 644 npi_handle_t handle; 645 npi_status_t rs = NPI_SUCCESS; 646 647 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 648 handle = NXGE_DEV_NPI_HANDLE(nxgep); 649 650 /* disable the DMA */ 651 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 652 if (rs != NPI_SUCCESS) { 653 NXGE_DEBUG_MSG((nxgep, RX_CTL, 654 "<== nxge_disable_rxdma_channel:failed (0x%x)", 655 rs)); 656 return (NXGE_ERROR | rs); 657 } 658 659 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 660 return (NXGE_OK); 661 } 662 663 nxge_status_t 664 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 665 { 666 npi_handle_t handle; 667 nxge_status_t status = NXGE_OK; 668 669 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 670 "<== nxge_init_rxdma_channel_rcrflush")); 671 672 handle = NXGE_DEV_NPI_HANDLE(nxgep); 673 npi_rxdma_rdc_rcr_flush(handle, channel); 674 675 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 676 "<== nxge_init_rxdma_channel_rcrflsh")); 677 return (status); 678 679 } 680 681 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 682 683 #define TO_LEFT -1 684 #define TO_RIGHT 1 685 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 686 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 687 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 688 #define NO_HINT 0xffffffff 689 690 /*ARGSUSED*/ 691 nxge_status_t 692 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 693 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 694 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 695 { 696 int bufsize; 697 uint64_t pktbuf_pp; 698 uint64_t dvma_addr; 699 rxring_info_t *ring_info; 700 int base_side, end_side; 701 int r_index, l_index, anchor_index; 702 int found, search_done; 703 uint32_t offset, chunk_size, block_size, page_size_mask; 704 uint32_t chunk_index, block_index, total_index; 705 int max_iterations, iteration; 706 rxbuf_index_info_t *bufinfo; 707 708 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 709 710 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 711 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 712 pkt_buf_addr_pp, 713 pktbufsz_type)); 714 #if defined(__i386) 715 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 716 #else 717 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 718 #endif 719 720 switch (pktbufsz_type) { 721 case 0: 722 bufsize = rbr_p->pkt_buf_size0; 723 break; 724 case 1: 725 bufsize = rbr_p->pkt_buf_size1; 726 break; 727 case 2: 728 bufsize = rbr_p->pkt_buf_size2; 729 break; 730 case RCR_SINGLE_BLOCK: 731 bufsize = 0; 732 anchor_index = 0; 733 break; 734 default: 735 return (NXGE_ERROR); 736 } 737 738 if (rbr_p->num_blocks == 1) { 739 anchor_index = 0; 740 ring_info = rbr_p->ring_info; 741 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 742 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 743 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 744 "buf_pp $%p btype %d anchor_index %d " 745 "bufinfo $%p", 746 pkt_buf_addr_pp, 747 pktbufsz_type, 748 anchor_index, 749 bufinfo)); 750 751 goto found_index; 752 } 753 754 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 755 "==> nxge_rxbuf_pp_to_vp: " 756 "buf_pp $%p btype %d anchor_index %d", 757 pkt_buf_addr_pp, 758 pktbufsz_type, 759 anchor_index)); 760 761 ring_info = rbr_p->ring_info; 762 found = B_FALSE; 763 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 764 iteration = 0; 765 max_iterations = ring_info->max_iterations; 766 /* 767 * First check if this block has been seen 768 * recently. This is indicated by a hint which 769 * is initialized when the first buffer of the block 770 * is seen. The hint is reset when the last buffer of 771 * the block has been processed. 772 * As three block sizes are supported, three hints 773 * are kept. The idea behind the hints is that once 774 * the hardware uses a block for a buffer of that 775 * size, it will use it exclusively for that size 776 * and will use it until it is exhausted. It is assumed 777 * that there would a single block being used for the same 778 * buffer sizes at any given time. 779 */ 780 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 781 anchor_index = ring_info->hint[pktbufsz_type]; 782 dvma_addr = bufinfo[anchor_index].dvma_addr; 783 chunk_size = bufinfo[anchor_index].buf_size; 784 if ((pktbuf_pp >= dvma_addr) && 785 (pktbuf_pp < (dvma_addr + chunk_size))) { 786 found = B_TRUE; 787 /* 788 * check if this is the last buffer in the block 789 * If so, then reset the hint for the size; 790 */ 791 792 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 793 ring_info->hint[pktbufsz_type] = NO_HINT; 794 } 795 } 796 797 if (found == B_FALSE) { 798 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 799 "==> nxge_rxbuf_pp_to_vp: (!found)" 800 "buf_pp $%p btype %d anchor_index %d", 801 pkt_buf_addr_pp, 802 pktbufsz_type, 803 anchor_index)); 804 805 /* 806 * This is the first buffer of the block of this 807 * size. Need to search the whole information 808 * array. 809 * the search algorithm uses a binary tree search 810 * algorithm. It assumes that the information is 811 * already sorted with increasing order 812 * info[0] < info[1] < info[2] .... < info[n-1] 813 * where n is the size of the information array 814 */ 815 r_index = rbr_p->num_blocks - 1; 816 l_index = 0; 817 search_done = B_FALSE; 818 anchor_index = MID_INDEX(r_index, l_index); 819 while (search_done == B_FALSE) { 820 if ((r_index == l_index) || 821 (iteration >= max_iterations)) 822 search_done = B_TRUE; 823 end_side = TO_RIGHT; /* to the right */ 824 base_side = TO_LEFT; /* to the left */ 825 /* read the DVMA address information and sort it */ 826 dvma_addr = bufinfo[anchor_index].dvma_addr; 827 chunk_size = bufinfo[anchor_index].buf_size; 828 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 829 "==> nxge_rxbuf_pp_to_vp: (searching)" 830 "buf_pp $%p btype %d " 831 "anchor_index %d chunk_size %d dvmaaddr $%p", 832 pkt_buf_addr_pp, 833 pktbufsz_type, 834 anchor_index, 835 chunk_size, 836 dvma_addr)); 837 838 if (pktbuf_pp >= dvma_addr) 839 base_side = TO_RIGHT; /* to the right */ 840 if (pktbuf_pp < (dvma_addr + chunk_size)) 841 end_side = TO_LEFT; /* to the left */ 842 843 switch (base_side + end_side) { 844 case IN_MIDDLE: 845 /* found */ 846 found = B_TRUE; 847 search_done = B_TRUE; 848 if ((pktbuf_pp + bufsize) < 849 (dvma_addr + chunk_size)) 850 ring_info->hint[pktbufsz_type] = 851 bufinfo[anchor_index].buf_index; 852 break; 853 case BOTH_RIGHT: 854 /* not found: go to the right */ 855 l_index = anchor_index + 1; 856 anchor_index = MID_INDEX(r_index, l_index); 857 break; 858 859 case BOTH_LEFT: 860 /* not found: go to the left */ 861 r_index = anchor_index - 1; 862 anchor_index = MID_INDEX(r_index, l_index); 863 break; 864 default: /* should not come here */ 865 return (NXGE_ERROR); 866 } 867 iteration++; 868 } 869 870 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 871 "==> nxge_rxbuf_pp_to_vp: (search done)" 872 "buf_pp $%p btype %d anchor_index %d", 873 pkt_buf_addr_pp, 874 pktbufsz_type, 875 anchor_index)); 876 } 877 878 if (found == B_FALSE) { 879 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 880 "==> nxge_rxbuf_pp_to_vp: (search failed)" 881 "buf_pp $%p btype %d anchor_index %d", 882 pkt_buf_addr_pp, 883 pktbufsz_type, 884 anchor_index)); 885 return (NXGE_ERROR); 886 } 887 888 found_index: 889 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 890 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 891 "buf_pp $%p btype %d bufsize %d anchor_index %d", 892 pkt_buf_addr_pp, 893 pktbufsz_type, 894 bufsize, 895 anchor_index)); 896 897 /* index of the first block in this chunk */ 898 chunk_index = bufinfo[anchor_index].start_index; 899 dvma_addr = bufinfo[anchor_index].dvma_addr; 900 page_size_mask = ring_info->block_size_mask; 901 902 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 903 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 904 "buf_pp $%p btype %d bufsize %d " 905 "anchor_index %d chunk_index %d dvma $%p", 906 pkt_buf_addr_pp, 907 pktbufsz_type, 908 bufsize, 909 anchor_index, 910 chunk_index, 911 dvma_addr)); 912 913 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 914 block_size = rbr_p->block_size; /* System block(page) size */ 915 916 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 917 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 918 "buf_pp $%p btype %d bufsize %d " 919 "anchor_index %d chunk_index %d dvma $%p " 920 "offset %d block_size %d", 921 pkt_buf_addr_pp, 922 pktbufsz_type, 923 bufsize, 924 anchor_index, 925 chunk_index, 926 dvma_addr, 927 offset, 928 block_size)); 929 930 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 931 932 block_index = (offset / block_size); /* index within chunk */ 933 total_index = chunk_index + block_index; 934 935 936 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 937 "==> nxge_rxbuf_pp_to_vp: " 938 "total_index %d dvma_addr $%p " 939 "offset %d block_size %d " 940 "block_index %d ", 941 total_index, dvma_addr, 942 offset, block_size, 943 block_index)); 944 #if defined(__i386) 945 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 946 (uint32_t)offset); 947 #else 948 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 949 (uint64_t)offset); 950 #endif 951 952 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 953 "==> nxge_rxbuf_pp_to_vp: " 954 "total_index %d dvma_addr $%p " 955 "offset %d block_size %d " 956 "block_index %d " 957 "*pkt_buf_addr_p $%p", 958 total_index, dvma_addr, 959 offset, block_size, 960 block_index, 961 *pkt_buf_addr_p)); 962 963 964 *msg_index = total_index; 965 *bufoffset = (offset & page_size_mask); 966 967 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 968 "==> nxge_rxbuf_pp_to_vp: get msg index: " 969 "msg_index %d bufoffset_index %d", 970 *msg_index, 971 *bufoffset)); 972 973 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 974 975 return (NXGE_OK); 976 } 977 978 /* 979 * used by quick sort (qsort) function 980 * to perform comparison 981 */ 982 static int 983 nxge_sort_compare(const void *p1, const void *p2) 984 { 985 986 rxbuf_index_info_t *a, *b; 987 988 a = (rxbuf_index_info_t *)p1; 989 b = (rxbuf_index_info_t *)p2; 990 991 if (a->dvma_addr > b->dvma_addr) 992 return (1); 993 if (a->dvma_addr < b->dvma_addr) 994 return (-1); 995 return (0); 996 } 997 998 999 1000 /* 1001 * grabbed this sort implementation from common/syscall/avl.c 1002 * 1003 */ 1004 /* 1005 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 1006 * v = Ptr to array/vector of objs 1007 * n = # objs in the array 1008 * s = size of each obj (must be multiples of a word size) 1009 * f = ptr to function to compare two objs 1010 * returns (-1 = less than, 0 = equal, 1 = greater than 1011 */ 1012 void 1013 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1014 { 1015 int g, i, j, ii; 1016 unsigned int *p1, *p2; 1017 unsigned int tmp; 1018 1019 /* No work to do */ 1020 if (v == NULL || n <= 1) 1021 return; 1022 /* Sanity check on arguments */ 1023 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1024 ASSERT(s > 0); 1025 1026 for (g = n / 2; g > 0; g /= 2) { 1027 for (i = g; i < n; i++) { 1028 for (j = i - g; j >= 0 && 1029 (*f)(v + j * s, v + (j + g) * s) == 1; 1030 j -= g) { 1031 p1 = (unsigned *)(v + j * s); 1032 p2 = (unsigned *)(v + (j + g) * s); 1033 for (ii = 0; ii < s / 4; ii++) { 1034 tmp = *p1; 1035 *p1++ = *p2; 1036 *p2++ = tmp; 1037 } 1038 } 1039 } 1040 } 1041 } 1042 1043 /* 1044 * Initialize data structures required for rxdma 1045 * buffer dvma->vmem address lookup 1046 */ 1047 /*ARGSUSED*/ 1048 static nxge_status_t 1049 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1050 { 1051 1052 int index; 1053 rxring_info_t *ring_info; 1054 int max_iteration = 0, max_index = 0; 1055 1056 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1057 1058 ring_info = rbrp->ring_info; 1059 ring_info->hint[0] = NO_HINT; 1060 ring_info->hint[1] = NO_HINT; 1061 ring_info->hint[2] = NO_HINT; 1062 max_index = rbrp->num_blocks; 1063 1064 /* read the DVMA address information and sort it */ 1065 /* do init of the information array */ 1066 1067 1068 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1069 " nxge_rxbuf_index_info_init Sort ptrs")); 1070 1071 /* sort the array */ 1072 nxge_ksort((void *)ring_info->buffer, max_index, 1073 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1074 1075 1076 1077 for (index = 0; index < max_index; index++) { 1078 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1079 " nxge_rxbuf_index_info_init: sorted chunk %d " 1080 " ioaddr $%p kaddr $%p size %x", 1081 index, ring_info->buffer[index].dvma_addr, 1082 ring_info->buffer[index].kaddr, 1083 ring_info->buffer[index].buf_size)); 1084 } 1085 1086 max_iteration = 0; 1087 while (max_index >= (1ULL << max_iteration)) 1088 max_iteration++; 1089 ring_info->max_iterations = max_iteration + 1; 1090 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1091 " nxge_rxbuf_index_info_init Find max iter %d", 1092 ring_info->max_iterations)); 1093 1094 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1095 return (NXGE_OK); 1096 } 1097 1098 /* ARGSUSED */ 1099 void 1100 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1101 { 1102 #ifdef NXGE_DEBUG 1103 1104 uint32_t bptr; 1105 uint64_t pp; 1106 1107 bptr = entry_p->bits.hdw.pkt_buf_addr; 1108 1109 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1110 "\trcr entry $%p " 1111 "\trcr entry 0x%0llx " 1112 "\trcr entry 0x%08x " 1113 "\trcr entry 0x%08x " 1114 "\tvalue 0x%0llx\n" 1115 "\tmulti = %d\n" 1116 "\tpkt_type = 0x%x\n" 1117 "\tzero_copy = %d\n" 1118 "\tnoport = %d\n" 1119 "\tpromis = %d\n" 1120 "\terror = 0x%04x\n" 1121 "\tdcf_err = 0x%01x\n" 1122 "\tl2_len = %d\n" 1123 "\tpktbufsize = %d\n" 1124 "\tpkt_buf_addr = $%p\n" 1125 "\tpkt_buf_addr (<< 6) = $%p\n", 1126 entry_p, 1127 *(int64_t *)entry_p, 1128 *(int32_t *)entry_p, 1129 *(int32_t *)((char *)entry_p + 32), 1130 entry_p->value, 1131 entry_p->bits.hdw.multi, 1132 entry_p->bits.hdw.pkt_type, 1133 entry_p->bits.hdw.zero_copy, 1134 entry_p->bits.hdw.noport, 1135 entry_p->bits.hdw.promis, 1136 entry_p->bits.hdw.error, 1137 entry_p->bits.hdw.dcf_err, 1138 entry_p->bits.hdw.l2_len, 1139 entry_p->bits.hdw.pktbufsz, 1140 bptr, 1141 entry_p->bits.ldw.pkt_buf_addr)); 1142 1143 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1144 RCR_PKT_BUF_ADDR_SHIFT; 1145 1146 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1147 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1148 #endif 1149 } 1150 1151 void 1152 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1153 { 1154 npi_handle_t handle; 1155 rbr_stat_t rbr_stat; 1156 addr44_t hd_addr; 1157 addr44_t tail_addr; 1158 uint16_t qlen; 1159 1160 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1161 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1162 1163 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1164 1165 /* RBR head */ 1166 hd_addr.addr = 0; 1167 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1168 #if defined(__i386) 1169 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1170 (void *)(uint32_t)hd_addr.addr); 1171 #else 1172 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1173 (void *)hd_addr.addr); 1174 #endif 1175 1176 /* RBR stats */ 1177 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1178 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1179 1180 /* RCR tail */ 1181 tail_addr.addr = 0; 1182 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1183 #if defined(__i386) 1184 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1185 (void *)(uint32_t)tail_addr.addr); 1186 #else 1187 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1188 (void *)tail_addr.addr); 1189 #endif 1190 1191 /* RCR qlen */ 1192 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1193 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1194 1195 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1196 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1197 } 1198 1199 nxge_status_t 1200 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1201 { 1202 nxge_grp_set_t *set = &nxgep->rx_set; 1203 nxge_status_t status; 1204 npi_status_t rs; 1205 int rdc; 1206 1207 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1208 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1209 1210 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1211 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1212 "<== nxge_rxdma_mode: not initialized")); 1213 return (NXGE_ERROR); 1214 } 1215 1216 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1217 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1218 "<== nxge_tx_port_fatal_err_recover: " 1219 "NULL ring pointer(s)")); 1220 return (NXGE_ERROR); 1221 } 1222 1223 if (set->owned.map == 0) { 1224 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1225 "nxge_rxdma_regs_dump_channels: no channels")); 1226 return (NULL); 1227 } 1228 1229 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1230 if ((1 << rdc) & set->owned.map) { 1231 rx_rbr_ring_t *ring = 1232 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1233 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1234 if (ring) { 1235 if (enable) { 1236 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1237 "==> nxge_rxdma_hw_mode: " 1238 "channel %d (enable)", rdc)); 1239 rs = npi_rxdma_cfg_rdc_enable 1240 (handle, rdc); 1241 } else { 1242 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1243 "==> nxge_rxdma_hw_mode: " 1244 "channel %d disable)", rdc)); 1245 rs = npi_rxdma_cfg_rdc_disable 1246 (handle, rdc); 1247 } 1248 } 1249 } 1250 } 1251 1252 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1253 1254 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1255 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1256 1257 return (status); 1258 } 1259 1260 void 1261 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1262 { 1263 npi_handle_t handle; 1264 1265 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1266 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1267 1268 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1269 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1270 1271 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1272 } 1273 1274 void 1275 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1276 { 1277 npi_handle_t handle; 1278 1279 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1280 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1281 1282 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1283 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1284 1285 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1286 } 1287 1288 void 1289 nxge_hw_start_rx(p_nxge_t nxgep) 1290 { 1291 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1292 1293 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1294 (void) nxge_rx_mac_enable(nxgep); 1295 1296 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1297 } 1298 1299 /*ARGSUSED*/ 1300 void 1301 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1302 { 1303 nxge_grp_set_t *set = &nxgep->rx_set; 1304 int rdc; 1305 1306 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1307 1308 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1309 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1310 "<== nxge_tx_port_fatal_err_recover: " 1311 "NULL ring pointer(s)")); 1312 return; 1313 } 1314 1315 if (set->owned.map == 0) { 1316 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1317 "nxge_rxdma_regs_dump_channels: no channels")); 1318 return; 1319 } 1320 1321 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1322 if ((1 << rdc) & set->owned.map) { 1323 rx_rbr_ring_t *ring = 1324 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1325 if (ring) { 1326 nxge_rxdma_hw_stop(nxgep, rdc); 1327 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1328 "==> nxge_fixup_rxdma_rings: " 1329 "channel %d ring $%px", 1330 rdc, ring)); 1331 (void) nxge_rxdma_fix_channel(nxgep, rdc); 1332 } 1333 } 1334 } 1335 1336 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1337 } 1338 1339 void 1340 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1341 { 1342 int ndmas; 1343 p_rx_rbr_rings_t rx_rbr_rings; 1344 p_rx_rbr_ring_t *rbr_rings; 1345 p_rx_rcr_rings_t rx_rcr_rings; 1346 p_rx_rcr_ring_t *rcr_rings; 1347 p_rx_mbox_areas_t rx_mbox_areas_p; 1348 p_rx_mbox_t *rx_mbox_p; 1349 p_nxge_dma_pool_t dma_buf_poolp; 1350 p_nxge_dma_pool_t dma_cntl_poolp; 1351 p_rx_rbr_ring_t rbrp; 1352 p_rx_rcr_ring_t rcrp; 1353 p_rx_mbox_t mboxp; 1354 p_nxge_dma_common_t dmap; 1355 nxge_status_t status = NXGE_OK; 1356 1357 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1358 1359 (void) nxge_rxdma_stop_channel(nxgep, channel); 1360 1361 dma_buf_poolp = nxgep->rx_buf_pool_p; 1362 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1363 1364 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1365 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1366 "<== nxge_rxdma_fix_channel: buf not allocated")); 1367 return; 1368 } 1369 1370 ndmas = dma_buf_poolp->ndmas; 1371 if (!ndmas) { 1372 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1373 "<== nxge_rxdma_fix_channel: no dma allocated")); 1374 return; 1375 } 1376 1377 rx_rbr_rings = nxgep->rx_rbr_rings; 1378 rx_rcr_rings = nxgep->rx_rcr_rings; 1379 rbr_rings = rx_rbr_rings->rbr_rings; 1380 rcr_rings = rx_rcr_rings->rcr_rings; 1381 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1382 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1383 1384 /* Reinitialize the receive block and completion rings */ 1385 rbrp = (p_rx_rbr_ring_t)rbr_rings[channel], 1386 rcrp = (p_rx_rcr_ring_t)rcr_rings[channel], 1387 mboxp = (p_rx_mbox_t)rx_mbox_p[channel]; 1388 1389 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1390 rbrp->rbr_rd_index = 0; 1391 rcrp->comp_rd_index = 0; 1392 rcrp->comp_wt_index = 0; 1393 1394 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1395 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1396 1397 status = nxge_rxdma_start_channel(nxgep, channel, 1398 rbrp, rcrp, mboxp); 1399 if (status != NXGE_OK) { 1400 goto nxge_rxdma_fix_channel_fail; 1401 } 1402 1403 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1404 "<== nxge_rxdma_fix_channel: success (0x%08x)", status)); 1405 return; 1406 1407 nxge_rxdma_fix_channel_fail: 1408 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1409 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status)); 1410 } 1411 1412 p_rx_rbr_ring_t 1413 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1414 { 1415 nxge_grp_set_t *set = &nxgep->rx_set; 1416 nxge_channel_t rdc; 1417 1418 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1419 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1420 1421 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1422 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1423 "<== nxge_rxdma_get_rbr_ring: " 1424 "NULL ring pointer(s)")); 1425 return (NULL); 1426 } 1427 1428 if (set->owned.map == 0) { 1429 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1430 "<== nxge_rxdma_get_rbr_ring: no channels")); 1431 return (NULL); 1432 } 1433 1434 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1435 if ((1 << rdc) & set->owned.map) { 1436 rx_rbr_ring_t *ring = 1437 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1438 if (ring) { 1439 if (channel == ring->rdc) { 1440 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1441 "==> nxge_rxdma_get_rbr_ring: " 1442 "channel %d ring $%p", rdc, ring)); 1443 return (ring); 1444 } 1445 } 1446 } 1447 } 1448 1449 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1450 "<== nxge_rxdma_get_rbr_ring: not found")); 1451 1452 return (NULL); 1453 } 1454 1455 p_rx_rcr_ring_t 1456 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1457 { 1458 nxge_grp_set_t *set = &nxgep->rx_set; 1459 nxge_channel_t rdc; 1460 1461 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1462 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1463 1464 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1465 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1466 "<== nxge_rxdma_get_rcr_ring: " 1467 "NULL ring pointer(s)")); 1468 return (NULL); 1469 } 1470 1471 if (set->owned.map == 0) { 1472 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1473 "<== nxge_rxdma_get_rbr_ring: no channels")); 1474 return (NULL); 1475 } 1476 1477 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1478 if ((1 << rdc) & set->owned.map) { 1479 rx_rcr_ring_t *ring = 1480 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1481 if (ring) { 1482 if (channel == ring->rdc) { 1483 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1484 "==> nxge_rxdma_get_rcr_ring: " 1485 "channel %d ring $%p", rdc, ring)); 1486 return (ring); 1487 } 1488 } 1489 } 1490 } 1491 1492 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1493 "<== nxge_rxdma_get_rcr_ring: not found")); 1494 1495 return (NULL); 1496 } 1497 1498 /* 1499 * Static functions start here. 1500 */ 1501 static p_rx_msg_t 1502 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1503 { 1504 p_rx_msg_t nxge_mp = NULL; 1505 p_nxge_dma_common_t dmamsg_p; 1506 uchar_t *buffer; 1507 1508 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1509 if (nxge_mp == NULL) { 1510 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1511 "Allocation of a rx msg failed.")); 1512 goto nxge_allocb_exit; 1513 } 1514 1515 nxge_mp->use_buf_pool = B_FALSE; 1516 if (dmabuf_p) { 1517 nxge_mp->use_buf_pool = B_TRUE; 1518 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1519 *dmamsg_p = *dmabuf_p; 1520 dmamsg_p->nblocks = 1; 1521 dmamsg_p->block_size = size; 1522 dmamsg_p->alength = size; 1523 buffer = (uchar_t *)dmabuf_p->kaddrp; 1524 1525 dmabuf_p->kaddrp = (void *) 1526 ((char *)dmabuf_p->kaddrp + size); 1527 dmabuf_p->ioaddr_pp = (void *) 1528 ((char *)dmabuf_p->ioaddr_pp + size); 1529 dmabuf_p->alength -= size; 1530 dmabuf_p->offset += size; 1531 dmabuf_p->dma_cookie.dmac_laddress += size; 1532 dmabuf_p->dma_cookie.dmac_size -= size; 1533 1534 } else { 1535 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1536 if (buffer == NULL) { 1537 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1538 "Allocation of a receive page failed.")); 1539 goto nxge_allocb_fail1; 1540 } 1541 } 1542 1543 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1544 if (nxge_mp->rx_mblk_p == NULL) { 1545 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1546 goto nxge_allocb_fail2; 1547 } 1548 1549 nxge_mp->buffer = buffer; 1550 nxge_mp->block_size = size; 1551 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1552 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1553 nxge_mp->ref_cnt = 1; 1554 nxge_mp->free = B_TRUE; 1555 nxge_mp->rx_use_bcopy = B_FALSE; 1556 1557 atomic_inc_32(&nxge_mblks_pending); 1558 1559 goto nxge_allocb_exit; 1560 1561 nxge_allocb_fail2: 1562 if (!nxge_mp->use_buf_pool) { 1563 KMEM_FREE(buffer, size); 1564 } 1565 1566 nxge_allocb_fail1: 1567 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1568 nxge_mp = NULL; 1569 1570 nxge_allocb_exit: 1571 return (nxge_mp); 1572 } 1573 1574 p_mblk_t 1575 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1576 { 1577 p_mblk_t mp; 1578 1579 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1580 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1581 "offset = 0x%08X " 1582 "size = 0x%08X", 1583 nxge_mp, offset, size)); 1584 1585 mp = desballoc(&nxge_mp->buffer[offset], size, 1586 0, &nxge_mp->freeb); 1587 if (mp == NULL) { 1588 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1589 goto nxge_dupb_exit; 1590 } 1591 atomic_inc_32(&nxge_mp->ref_cnt); 1592 1593 1594 nxge_dupb_exit: 1595 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1596 nxge_mp)); 1597 return (mp); 1598 } 1599 1600 p_mblk_t 1601 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1602 { 1603 p_mblk_t mp; 1604 uchar_t *dp; 1605 1606 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1607 if (mp == NULL) { 1608 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1609 goto nxge_dupb_bcopy_exit; 1610 } 1611 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1612 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1613 mp->b_wptr = dp + size; 1614 1615 nxge_dupb_bcopy_exit: 1616 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1617 nxge_mp)); 1618 return (mp); 1619 } 1620 1621 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1622 p_rx_msg_t rx_msg_p); 1623 1624 void 1625 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1626 { 1627 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1628 1629 /* Reuse this buffer */ 1630 rx_msg_p->free = B_FALSE; 1631 rx_msg_p->cur_usage_cnt = 0; 1632 rx_msg_p->max_usage_cnt = 0; 1633 rx_msg_p->pkt_buf_size = 0; 1634 1635 if (rx_rbr_p->rbr_use_bcopy) { 1636 rx_msg_p->rx_use_bcopy = B_FALSE; 1637 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1638 } 1639 1640 /* 1641 * Get the rbr header pointer and its offset index. 1642 */ 1643 MUTEX_ENTER(&rx_rbr_p->post_lock); 1644 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1645 rx_rbr_p->rbr_wrap_mask); 1646 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1647 MUTEX_EXIT(&rx_rbr_p->post_lock); 1648 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1649 rx_rbr_p->rdc, 1); 1650 1651 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1652 "<== nxge_post_page (channel %d post_next_index %d)", 1653 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1654 1655 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1656 } 1657 1658 void 1659 nxge_freeb(p_rx_msg_t rx_msg_p) 1660 { 1661 size_t size; 1662 uchar_t *buffer = NULL; 1663 int ref_cnt; 1664 boolean_t free_state = B_FALSE; 1665 1666 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1667 1668 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1669 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1670 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1671 rx_msg_p, nxge_mblks_pending)); 1672 1673 /* 1674 * First we need to get the free state, then 1675 * atomic decrement the reference count to prevent 1676 * the race condition with the interrupt thread that 1677 * is processing a loaned up buffer block. 1678 */ 1679 free_state = rx_msg_p->free; 1680 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1681 if (!ref_cnt) { 1682 atomic_dec_32(&nxge_mblks_pending); 1683 buffer = rx_msg_p->buffer; 1684 size = rx_msg_p->block_size; 1685 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1686 "will free: rx_msg_p = $%p (block pending %d)", 1687 rx_msg_p, nxge_mblks_pending)); 1688 1689 if (!rx_msg_p->use_buf_pool) { 1690 KMEM_FREE(buffer, size); 1691 } 1692 1693 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1694 1695 if (ring) { 1696 /* 1697 * Decrement the receive buffer ring's reference 1698 * count, too. 1699 */ 1700 atomic_dec_32(&ring->rbr_ref_cnt); 1701 1702 /* 1703 * Free the receive buffer ring, if 1704 * 1. all the receive buffers have been freed 1705 * 2. and we are in the proper state (that is, 1706 * we are not UNMAPPING). 1707 */ 1708 if (ring->rbr_ref_cnt == 0 && 1709 ring->rbr_state == RBR_UNMAPPED) { 1710 /* 1711 * Free receive data buffers, 1712 * buffer index information 1713 * (rxring_info) and 1714 * the message block ring. 1715 */ 1716 NXGE_DEBUG_MSG((NULL, RX_CTL, 1717 "nxge_freeb:rx_msg_p = $%p " 1718 "(block pending %d) free buffers", 1719 rx_msg_p, nxge_mblks_pending)); 1720 nxge_rxdma_databuf_free(ring); 1721 if (ring->ring_info) { 1722 KMEM_FREE(ring->ring_info, 1723 sizeof (rxring_info_t)); 1724 } 1725 1726 if (ring->rx_msg_ring) { 1727 KMEM_FREE(ring->rx_msg_ring, 1728 ring->tnblocks * 1729 sizeof (p_rx_msg_t)); 1730 } 1731 KMEM_FREE(ring, sizeof (*ring)); 1732 } 1733 } 1734 return; 1735 } 1736 1737 /* 1738 * Repost buffer. 1739 */ 1740 if (free_state && (ref_cnt == 1) && ring) { 1741 NXGE_DEBUG_MSG((NULL, RX_CTL, 1742 "nxge_freeb: post page $%p:", rx_msg_p)); 1743 if (ring->rbr_state == RBR_POSTING) 1744 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1745 } 1746 1747 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1748 } 1749 1750 uint_t 1751 nxge_rx_intr(void *arg1, void *arg2) 1752 { 1753 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1754 p_nxge_t nxgep = (p_nxge_t)arg2; 1755 p_nxge_ldg_t ldgp; 1756 uint8_t channel; 1757 npi_handle_t handle; 1758 rx_dma_ctl_stat_t cs; 1759 p_rx_rcr_ring_t rcr_ring; 1760 mblk_t *mp = NULL; 1761 1762 if (ldvp == NULL) { 1763 NXGE_DEBUG_MSG((NULL, INT_CTL, 1764 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1765 nxgep, ldvp)); 1766 return (DDI_INTR_CLAIMED); 1767 } 1768 1769 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1770 nxgep = ldvp->nxgep; 1771 } 1772 1773 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1774 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1775 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1776 "<== nxge_rx_intr: interface not started or intialized")); 1777 return (DDI_INTR_CLAIMED); 1778 } 1779 1780 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1781 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1782 nxgep, ldvp)); 1783 1784 /* 1785 * Get the PIO handle. 1786 */ 1787 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1788 1789 /* 1790 * Get the ring to enable us to process packets. 1791 */ 1792 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1793 1794 /* 1795 * The RCR ring lock must be held when packets 1796 * are being processed and the hardware registers are 1797 * being read or written to prevent race condition 1798 * among the interrupt thread, the polling thread 1799 * (will cause fatal errors such as rcrincon bit set) 1800 * and the setting of the poll_flag. 1801 */ 1802 MUTEX_ENTER(&rcr_ring->lock); 1803 1804 /* 1805 * Get the control and status for this channel. 1806 */ 1807 channel = ldvp->channel; 1808 ldgp = ldvp->ldgp; 1809 1810 if (!isLDOMguest(nxgep) && (!nxgep->rx_channel_started[channel])) { 1811 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1812 "<== nxge_rx_intr: channel is not started")); 1813 1814 /* 1815 * We received an interrupt before the ring is started. 1816 */ 1817 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1818 &cs.value); 1819 cs.value &= RX_DMA_CTL_STAT_WR1C; 1820 cs.bits.hdw.mex = 1; 1821 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1822 cs.value); 1823 1824 /* 1825 * Rearm this logical group if this is a single device 1826 * group. 1827 */ 1828 if (ldgp->nldvs == 1) { 1829 if (isLDOMguest(nxgep)) { 1830 nxge_hio_ldgimgn(nxgep, ldgp); 1831 } else { 1832 ldgimgm_t mgm; 1833 1834 mgm.value = 0; 1835 mgm.bits.ldw.arm = 1; 1836 mgm.bits.ldw.timer = ldgp->ldg_timer; 1837 1838 NXGE_REG_WR64(handle, 1839 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1840 mgm.value); 1841 } 1842 } 1843 MUTEX_EXIT(&rcr_ring->lock); 1844 return (DDI_INTR_CLAIMED); 1845 } 1846 1847 ASSERT(rcr_ring->ldgp == ldgp); 1848 ASSERT(rcr_ring->ldvp == ldvp); 1849 1850 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1851 1852 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1853 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1854 channel, 1855 cs.value, 1856 cs.bits.hdw.rcrto, 1857 cs.bits.hdw.rcrthres)); 1858 1859 if (rcr_ring->poll_flag == 0) { 1860 mp = nxge_rx_pkts(nxgep, rcr_ring, cs, -1); 1861 } 1862 1863 /* error events. */ 1864 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1865 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1866 } 1867 1868 /* 1869 * Enable the mailbox update interrupt if we want 1870 * to use mailbox. We probably don't need to use 1871 * mailbox as it only saves us one pio read. 1872 * Also write 1 to rcrthres and rcrto to clear 1873 * these two edge triggered bits. 1874 */ 1875 cs.value &= RX_DMA_CTL_STAT_WR1C; 1876 cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 1877 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1878 cs.value); 1879 1880 /* 1881 * If the polling mode is enabled, disable the interrupt. 1882 */ 1883 if (rcr_ring->poll_flag) { 1884 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1885 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1886 "(disabling interrupts)", channel, ldgp, ldvp)); 1887 /* 1888 * Disarm this logical group if this is a single device 1889 * group. 1890 */ 1891 if (ldgp->nldvs == 1) { 1892 ldgimgm_t mgm; 1893 mgm.value = 0; 1894 mgm.bits.ldw.arm = 0; 1895 NXGE_REG_WR64(handle, 1896 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1897 } 1898 } else { 1899 /* 1900 * Rearm this logical group if this is a single device 1901 * group. 1902 */ 1903 if (ldgp->nldvs == 1) { 1904 if (isLDOMguest(nxgep)) { 1905 nxge_hio_ldgimgn(nxgep, ldgp); 1906 } else { 1907 ldgimgm_t mgm; 1908 1909 mgm.value = 0; 1910 mgm.bits.ldw.arm = 1; 1911 mgm.bits.ldw.timer = ldgp->ldg_timer; 1912 1913 NXGE_REG_WR64(handle, 1914 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1915 mgm.value); 1916 } 1917 } 1918 1919 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1920 "==> nxge_rx_intr: rdc %d ldgp $%p " 1921 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1922 } 1923 MUTEX_EXIT(&rcr_ring->lock); 1924 1925 if (mp != NULL) { 1926 if (!isLDOMguest(nxgep)) 1927 mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1928 rcr_ring->rcr_gen_num); 1929 #if defined(sun4v) 1930 else { /* isLDOMguest(nxgep) */ 1931 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1932 nxgep->nxge_hw_p->hio; 1933 nx_vio_fp_t *vio = &nhd->hio.vio; 1934 1935 if (vio->cb.vio_net_rx_cb) { 1936 (*vio->cb.vio_net_rx_cb) 1937 (nxgep->hio_vr->vhp, mp); 1938 } 1939 } 1940 #endif 1941 } 1942 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1943 return (DDI_INTR_CLAIMED); 1944 } 1945 1946 /* 1947 * This routine is the main packet receive processing function. 1948 * It gets the packet type, error code, and buffer related 1949 * information from the receive completion entry. 1950 * How many completion entries to process is based on the number of packets 1951 * queued by the hardware, a hardware maintained tail pointer 1952 * and a configurable receive packet count. 1953 * 1954 * A chain of message blocks will be created as result of processing 1955 * the completion entries. This chain of message blocks will be returned and 1956 * a hardware control status register will be updated with the number of 1957 * packets were removed from the hardware queue. 1958 * 1959 * The RCR ring lock is held when entering this function. 1960 */ 1961 static mblk_t * 1962 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1963 int bytes_to_pickup) 1964 { 1965 npi_handle_t handle; 1966 uint8_t channel; 1967 uint32_t comp_rd_index; 1968 p_rcr_entry_t rcr_desc_rd_head_p; 1969 p_rcr_entry_t rcr_desc_rd_head_pp; 1970 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1971 uint16_t qlen, nrcr_read, npkt_read; 1972 uint32_t qlen_hw; 1973 boolean_t multi; 1974 rcrcfig_b_t rcr_cfg_b; 1975 int totallen = 0; 1976 #if defined(_BIG_ENDIAN) 1977 npi_status_t rs = NPI_SUCCESS; 1978 #endif 1979 1980 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 1981 "channel %d", rcr_p->rdc)); 1982 1983 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1984 return (NULL); 1985 } 1986 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1987 channel = rcr_p->rdc; 1988 1989 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1990 "==> nxge_rx_pkts: START: rcr channel %d " 1991 "head_p $%p head_pp $%p index %d ", 1992 channel, rcr_p->rcr_desc_rd_head_p, 1993 rcr_p->rcr_desc_rd_head_pp, 1994 rcr_p->comp_rd_index)); 1995 1996 1997 #if !defined(_BIG_ENDIAN) 1998 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 1999 #else 2000 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2001 if (rs != NPI_SUCCESS) { 2002 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2003 "channel %d, get qlen failed 0x%08x", 2004 channel, rs)); 2005 return (NULL); 2006 } 2007 #endif 2008 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2009 "qlen %d", channel, qlen)); 2010 2011 2012 2013 if (!qlen) { 2014 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2015 "==> nxge_rx_pkts:rcr channel %d " 2016 "qlen %d (no pkts)", channel, qlen)); 2017 2018 return (NULL); 2019 } 2020 2021 comp_rd_index = rcr_p->comp_rd_index; 2022 2023 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2024 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2025 nrcr_read = npkt_read = 0; 2026 2027 /* 2028 * Number of packets queued 2029 * (The jumbo or multi packet will be counted as only one 2030 * packets and it may take up more than one completion entry). 2031 */ 2032 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2033 qlen : nxge_max_rx_pkts; 2034 head_mp = NULL; 2035 tail_mp = &head_mp; 2036 nmp = mp_cont = NULL; 2037 multi = B_FALSE; 2038 2039 while (qlen_hw) { 2040 2041 #ifdef NXGE_DEBUG 2042 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2043 #endif 2044 /* 2045 * Process one completion ring entry. 2046 */ 2047 nxge_receive_packet(nxgep, 2048 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2049 2050 /* 2051 * message chaining modes 2052 */ 2053 if (nmp) { 2054 nmp->b_next = NULL; 2055 if (!multi && !mp_cont) { /* frame fits a partition */ 2056 *tail_mp = nmp; 2057 tail_mp = &nmp->b_next; 2058 totallen += MBLKL(nmp); 2059 nmp = NULL; 2060 } else if (multi && !mp_cont) { /* first segment */ 2061 *tail_mp = nmp; 2062 tail_mp = &nmp->b_cont; 2063 totallen += MBLKL(nmp); 2064 } else if (multi && mp_cont) { /* mid of multi segs */ 2065 *tail_mp = mp_cont; 2066 tail_mp = &mp_cont->b_cont; 2067 totallen += MBLKL(mp_cont); 2068 } else if (!multi && mp_cont) { /* last segment */ 2069 *tail_mp = mp_cont; 2070 tail_mp = &nmp->b_next; 2071 totallen += MBLKL(mp_cont); 2072 nmp = NULL; 2073 } 2074 } 2075 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2076 "==> nxge_rx_pkts: loop: rcr channel %d " 2077 "before updating: multi %d " 2078 "nrcr_read %d " 2079 "npk read %d " 2080 "head_pp $%p index %d ", 2081 channel, 2082 multi, 2083 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2084 comp_rd_index)); 2085 2086 if (!multi) { 2087 qlen_hw--; 2088 npkt_read++; 2089 } 2090 2091 /* 2092 * Update the next read entry. 2093 */ 2094 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2095 rcr_p->comp_wrap_mask); 2096 2097 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2098 rcr_p->rcr_desc_first_p, 2099 rcr_p->rcr_desc_last_p); 2100 2101 nrcr_read++; 2102 2103 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2104 "<== nxge_rx_pkts: (SAM, process one packet) " 2105 "nrcr_read %d", 2106 nrcr_read)); 2107 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2108 "==> nxge_rx_pkts: loop: rcr channel %d " 2109 "multi %d " 2110 "nrcr_read %d " 2111 "npk read %d " 2112 "head_pp $%p index %d ", 2113 channel, 2114 multi, 2115 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2116 comp_rd_index)); 2117 2118 if ((bytes_to_pickup != -1) && 2119 (totallen >= bytes_to_pickup)) { 2120 break; 2121 } 2122 } 2123 2124 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2125 rcr_p->comp_rd_index = comp_rd_index; 2126 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2127 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2128 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2129 2130 rcr_p->intr_timeout = (nxgep->intr_timeout < 2131 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2132 nxgep->intr_timeout; 2133 2134 rcr_p->intr_threshold = (nxgep->intr_threshold < 2135 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2136 nxgep->intr_threshold; 2137 2138 rcr_cfg_b.value = 0x0ULL; 2139 rcr_cfg_b.bits.ldw.entout = 1; 2140 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2141 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2142 2143 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2144 channel, rcr_cfg_b.value); 2145 } 2146 2147 cs.bits.ldw.pktread = npkt_read; 2148 cs.bits.ldw.ptrread = nrcr_read; 2149 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2150 channel, cs.value); 2151 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2152 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2153 "head_pp $%p index %016llx ", 2154 channel, 2155 rcr_p->rcr_desc_rd_head_pp, 2156 rcr_p->comp_rd_index)); 2157 /* 2158 * Update RCR buffer pointer read and number of packets 2159 * read. 2160 */ 2161 2162 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2163 "channel %d", rcr_p->rdc)); 2164 2165 return (head_mp); 2166 } 2167 2168 void 2169 nxge_receive_packet(p_nxge_t nxgep, 2170 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2171 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2172 { 2173 p_mblk_t nmp = NULL; 2174 uint64_t multi; 2175 uint64_t dcf_err; 2176 uint8_t channel; 2177 2178 boolean_t first_entry = B_TRUE; 2179 boolean_t is_tcp_udp = B_FALSE; 2180 boolean_t buffer_free = B_FALSE; 2181 boolean_t error_send_up = B_FALSE; 2182 uint8_t error_type; 2183 uint16_t l2_len; 2184 uint16_t skip_len; 2185 uint8_t pktbufsz_type; 2186 uint64_t rcr_entry; 2187 uint64_t *pkt_buf_addr_pp; 2188 uint64_t *pkt_buf_addr_p; 2189 uint32_t buf_offset; 2190 uint32_t bsize; 2191 uint32_t error_disp_cnt; 2192 uint32_t msg_index; 2193 p_rx_rbr_ring_t rx_rbr_p; 2194 p_rx_msg_t *rx_msg_ring_p; 2195 p_rx_msg_t rx_msg_p; 2196 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2197 nxge_status_t status = NXGE_OK; 2198 boolean_t is_valid = B_FALSE; 2199 p_nxge_rx_ring_stats_t rdc_stats; 2200 uint32_t bytes_read; 2201 uint64_t pkt_type; 2202 uint64_t frag; 2203 boolean_t pkt_too_long_err = B_FALSE; 2204 #ifdef NXGE_DEBUG 2205 int dump_len; 2206 #endif 2207 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2208 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2209 2210 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2211 2212 multi = (rcr_entry & RCR_MULTI_MASK); 2213 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2214 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2215 2216 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2217 frag = (rcr_entry & RCR_FRAG_MASK); 2218 2219 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2220 2221 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2222 RCR_PKTBUFSZ_SHIFT); 2223 #if defined(__i386) 2224 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2225 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2226 #else 2227 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2228 RCR_PKT_BUF_ADDR_SHIFT); 2229 #endif 2230 2231 channel = rcr_p->rdc; 2232 2233 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2234 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2235 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2236 "error_type 0x%x pkt_type 0x%x " 2237 "pktbufsz_type %d ", 2238 rcr_desc_rd_head_p, 2239 rcr_entry, pkt_buf_addr_pp, l2_len, 2240 multi, 2241 error_type, 2242 pkt_type, 2243 pktbufsz_type)); 2244 2245 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2246 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2247 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2248 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2249 rcr_entry, pkt_buf_addr_pp, l2_len, 2250 multi, 2251 error_type, 2252 pkt_type)); 2253 2254 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2255 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2256 "full pkt_buf_addr_pp $%p l2_len %d", 2257 rcr_entry, pkt_buf_addr_pp, l2_len)); 2258 2259 /* get the stats ptr */ 2260 rdc_stats = rcr_p->rdc_stats; 2261 2262 if (!l2_len) { 2263 2264 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2265 "<== nxge_receive_packet: failed: l2 length is 0.")); 2266 return; 2267 } 2268 2269 /* 2270 * Software workaround for BMAC hardware limitation that allows 2271 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2272 * instead of 0x2400 for jumbo. 2273 */ 2274 if (l2_len > nxgep->mac.maxframesize) { 2275 pkt_too_long_err = B_TRUE; 2276 } 2277 2278 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2279 l2_len -= ETHERFCSL; 2280 2281 /* shift 6 bits to get the full io address */ 2282 #if defined(__i386) 2283 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2284 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2285 #else 2286 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2287 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2288 #endif 2289 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2290 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2291 "full pkt_buf_addr_pp $%p l2_len %d", 2292 rcr_entry, pkt_buf_addr_pp, l2_len)); 2293 2294 rx_rbr_p = rcr_p->rx_rbr_p; 2295 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2296 2297 if (first_entry) { 2298 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2299 RXDMA_HDR_SIZE_DEFAULT); 2300 2301 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2302 "==> nxge_receive_packet: first entry 0x%016llx " 2303 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2304 rcr_entry, pkt_buf_addr_pp, l2_len, 2305 hdr_size)); 2306 } 2307 2308 MUTEX_ENTER(&rx_rbr_p->lock); 2309 2310 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2311 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2312 "full pkt_buf_addr_pp $%p l2_len %d", 2313 rcr_entry, pkt_buf_addr_pp, l2_len)); 2314 2315 /* 2316 * Packet buffer address in the completion entry points 2317 * to the starting buffer address (offset 0). 2318 * Use the starting buffer address to locate the corresponding 2319 * kernel address. 2320 */ 2321 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2322 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2323 &buf_offset, 2324 &msg_index); 2325 2326 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2327 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2328 "full pkt_buf_addr_pp $%p l2_len %d", 2329 rcr_entry, pkt_buf_addr_pp, l2_len)); 2330 2331 if (status != NXGE_OK) { 2332 MUTEX_EXIT(&rx_rbr_p->lock); 2333 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2334 "<== nxge_receive_packet: found vaddr failed %d", 2335 status)); 2336 return; 2337 } 2338 2339 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2340 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2341 "full pkt_buf_addr_pp $%p l2_len %d", 2342 rcr_entry, pkt_buf_addr_pp, l2_len)); 2343 2344 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2345 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2346 "full pkt_buf_addr_pp $%p l2_len %d", 2347 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2348 2349 rx_msg_p = rx_msg_ring_p[msg_index]; 2350 2351 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2352 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2353 "full pkt_buf_addr_pp $%p l2_len %d", 2354 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2355 2356 switch (pktbufsz_type) { 2357 case RCR_PKTBUFSZ_0: 2358 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2359 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2360 "==> nxge_receive_packet: 0 buf %d", bsize)); 2361 break; 2362 case RCR_PKTBUFSZ_1: 2363 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2364 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2365 "==> nxge_receive_packet: 1 buf %d", bsize)); 2366 break; 2367 case RCR_PKTBUFSZ_2: 2368 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2369 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2370 "==> nxge_receive_packet: 2 buf %d", bsize)); 2371 break; 2372 case RCR_SINGLE_BLOCK: 2373 bsize = rx_msg_p->block_size; 2374 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2375 "==> nxge_receive_packet: single %d", bsize)); 2376 2377 break; 2378 default: 2379 MUTEX_EXIT(&rx_rbr_p->lock); 2380 return; 2381 } 2382 2383 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2384 (buf_offset + sw_offset_bytes), 2385 (hdr_size + l2_len), 2386 DDI_DMA_SYNC_FORCPU); 2387 2388 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2389 "==> nxge_receive_packet: after first dump:usage count")); 2390 2391 if (rx_msg_p->cur_usage_cnt == 0) { 2392 if (rx_rbr_p->rbr_use_bcopy) { 2393 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2394 if (rx_rbr_p->rbr_consumed < 2395 rx_rbr_p->rbr_threshold_hi) { 2396 if (rx_rbr_p->rbr_threshold_lo == 0 || 2397 ((rx_rbr_p->rbr_consumed >= 2398 rx_rbr_p->rbr_threshold_lo) && 2399 (rx_rbr_p->rbr_bufsize_type >= 2400 pktbufsz_type))) { 2401 rx_msg_p->rx_use_bcopy = B_TRUE; 2402 } 2403 } else { 2404 rx_msg_p->rx_use_bcopy = B_TRUE; 2405 } 2406 } 2407 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2408 "==> nxge_receive_packet: buf %d (new block) ", 2409 bsize)); 2410 2411 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2412 rx_msg_p->pkt_buf_size = bsize; 2413 rx_msg_p->cur_usage_cnt = 1; 2414 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2415 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2416 "==> nxge_receive_packet: buf %d " 2417 "(single block) ", 2418 bsize)); 2419 /* 2420 * Buffer can be reused once the free function 2421 * is called. 2422 */ 2423 rx_msg_p->max_usage_cnt = 1; 2424 buffer_free = B_TRUE; 2425 } else { 2426 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2427 if (rx_msg_p->max_usage_cnt == 1) { 2428 buffer_free = B_TRUE; 2429 } 2430 } 2431 } else { 2432 rx_msg_p->cur_usage_cnt++; 2433 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2434 buffer_free = B_TRUE; 2435 } 2436 } 2437 2438 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2439 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2440 msg_index, l2_len, 2441 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2442 2443 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2444 rdc_stats->ierrors++; 2445 if (dcf_err) { 2446 rdc_stats->dcf_err++; 2447 #ifdef NXGE_DEBUG 2448 if (!rdc_stats->dcf_err) { 2449 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2450 "nxge_receive_packet: channel %d dcf_err rcr" 2451 " 0x%llx", channel, rcr_entry)); 2452 } 2453 #endif 2454 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2455 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2456 } else if (pkt_too_long_err) { 2457 rdc_stats->pkt_too_long_err++; 2458 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2459 " channel %d packet length [%d] > " 2460 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2461 nxgep->mac.maxframesize)); 2462 } else { 2463 /* Update error stats */ 2464 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2465 rdc_stats->errlog.compl_err_type = error_type; 2466 2467 switch (error_type) { 2468 /* 2469 * Do not send FMA ereport for RCR_L2_ERROR and 2470 * RCR_L4_CSUM_ERROR because most likely they indicate 2471 * back pressure rather than HW failures. 2472 */ 2473 case RCR_L2_ERROR: 2474 rdc_stats->l2_err++; 2475 if (rdc_stats->l2_err < 2476 error_disp_cnt) { 2477 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2478 " nxge_receive_packet:" 2479 " channel %d RCR L2_ERROR", 2480 channel)); 2481 } 2482 break; 2483 case RCR_L4_CSUM_ERROR: 2484 error_send_up = B_TRUE; 2485 rdc_stats->l4_cksum_err++; 2486 if (rdc_stats->l4_cksum_err < 2487 error_disp_cnt) { 2488 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2489 " nxge_receive_packet:" 2490 " channel %d" 2491 " RCR L4_CSUM_ERROR", channel)); 2492 } 2493 break; 2494 /* 2495 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2496 * RCR_ZCP_SOFT_ERROR because they reflect the same 2497 * FFLP and ZCP errors that have been reported by 2498 * nxge_fflp.c and nxge_zcp.c. 2499 */ 2500 case RCR_FFLP_SOFT_ERROR: 2501 error_send_up = B_TRUE; 2502 rdc_stats->fflp_soft_err++; 2503 if (rdc_stats->fflp_soft_err < 2504 error_disp_cnt) { 2505 NXGE_ERROR_MSG((nxgep, 2506 NXGE_ERR_CTL, 2507 " nxge_receive_packet:" 2508 " channel %d" 2509 " RCR FFLP_SOFT_ERROR", channel)); 2510 } 2511 break; 2512 case RCR_ZCP_SOFT_ERROR: 2513 error_send_up = B_TRUE; 2514 rdc_stats->fflp_soft_err++; 2515 if (rdc_stats->zcp_soft_err < 2516 error_disp_cnt) 2517 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2518 " nxge_receive_packet: Channel %d" 2519 " RCR ZCP_SOFT_ERROR", channel)); 2520 break; 2521 default: 2522 rdc_stats->rcr_unknown_err++; 2523 if (rdc_stats->rcr_unknown_err 2524 < error_disp_cnt) { 2525 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2526 " nxge_receive_packet: Channel %d" 2527 " RCR entry 0x%llx error 0x%x", 2528 rcr_entry, channel, error_type)); 2529 } 2530 break; 2531 } 2532 } 2533 2534 /* 2535 * Update and repost buffer block if max usage 2536 * count is reached. 2537 */ 2538 if (error_send_up == B_FALSE) { 2539 atomic_inc_32(&rx_msg_p->ref_cnt); 2540 if (buffer_free == B_TRUE) { 2541 rx_msg_p->free = B_TRUE; 2542 } 2543 2544 MUTEX_EXIT(&rx_rbr_p->lock); 2545 nxge_freeb(rx_msg_p); 2546 return; 2547 } 2548 } 2549 2550 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2551 "==> nxge_receive_packet: DMA sync second ")); 2552 2553 bytes_read = rcr_p->rcvd_pkt_bytes; 2554 skip_len = sw_offset_bytes + hdr_size; 2555 if (!rx_msg_p->rx_use_bcopy) { 2556 /* 2557 * For loaned up buffers, the driver reference count 2558 * will be incremented first and then the free state. 2559 */ 2560 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2561 if (first_entry) { 2562 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2563 if (l2_len < bsize - skip_len) { 2564 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2565 } else { 2566 nmp->b_wptr = &nmp->b_rptr[bsize 2567 - skip_len]; 2568 } 2569 } else { 2570 if (l2_len - bytes_read < bsize) { 2571 nmp->b_wptr = 2572 &nmp->b_rptr[l2_len - bytes_read]; 2573 } else { 2574 nmp->b_wptr = &nmp->b_rptr[bsize]; 2575 } 2576 } 2577 } 2578 } else { 2579 if (first_entry) { 2580 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2581 l2_len < bsize - skip_len ? 2582 l2_len : bsize - skip_len); 2583 } else { 2584 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2585 l2_len - bytes_read < bsize ? 2586 l2_len - bytes_read : bsize); 2587 } 2588 } 2589 if (nmp != NULL) { 2590 if (first_entry) { 2591 /* 2592 * Jumbo packets may be received with more than one 2593 * buffer, increment ipackets for the first entry only. 2594 */ 2595 rdc_stats->ipackets++; 2596 2597 /* Update ibytes for kstat. */ 2598 rdc_stats->ibytes += skip_len 2599 + l2_len < bsize ? l2_len : bsize; 2600 /* 2601 * Update the number of bytes read so far for the 2602 * current frame. 2603 */ 2604 bytes_read = nmp->b_wptr - nmp->b_rptr; 2605 } else { 2606 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2607 l2_len - bytes_read : bsize; 2608 bytes_read += nmp->b_wptr - nmp->b_rptr; 2609 } 2610 2611 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2612 "==> nxge_receive_packet after dupb: " 2613 "rbr consumed %d " 2614 "pktbufsz_type %d " 2615 "nmp $%p rptr $%p wptr $%p " 2616 "buf_offset %d bzise %d l2_len %d skip_len %d", 2617 rx_rbr_p->rbr_consumed, 2618 pktbufsz_type, 2619 nmp, nmp->b_rptr, nmp->b_wptr, 2620 buf_offset, bsize, l2_len, skip_len)); 2621 } else { 2622 cmn_err(CE_WARN, "!nxge_receive_packet: " 2623 "update stats (error)"); 2624 atomic_inc_32(&rx_msg_p->ref_cnt); 2625 if (buffer_free == B_TRUE) { 2626 rx_msg_p->free = B_TRUE; 2627 } 2628 MUTEX_EXIT(&rx_rbr_p->lock); 2629 nxge_freeb(rx_msg_p); 2630 return; 2631 } 2632 2633 if (buffer_free == B_TRUE) { 2634 rx_msg_p->free = B_TRUE; 2635 } 2636 2637 is_valid = (nmp != NULL); 2638 2639 rcr_p->rcvd_pkt_bytes = bytes_read; 2640 2641 MUTEX_EXIT(&rx_rbr_p->lock); 2642 2643 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2644 atomic_inc_32(&rx_msg_p->ref_cnt); 2645 nxge_freeb(rx_msg_p); 2646 } 2647 2648 if (is_valid) { 2649 nmp->b_cont = NULL; 2650 if (first_entry) { 2651 *mp = nmp; 2652 *mp_cont = NULL; 2653 } else { 2654 *mp_cont = nmp; 2655 } 2656 } 2657 2658 /* 2659 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2660 * If a packet is not fragmented and no error bit is set, then 2661 * L4 checksum is OK. 2662 */ 2663 2664 if (is_valid && !multi) { 2665 /* 2666 * If the checksum flag nxge_chksum_offload 2667 * is 1, TCP and UDP packets can be sent 2668 * up with good checksum. If the checksum flag 2669 * is set to 0, checksum reporting will apply to 2670 * TCP packets only (workaround for a hardware bug). 2671 * If the checksum flag nxge_cksum_offload is 2672 * greater than 1, both TCP and UDP packets 2673 * will not be reported its hardware checksum results. 2674 */ 2675 if (nxge_cksum_offload == 1) { 2676 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2677 pkt_type == RCR_PKT_IS_UDP) ? 2678 B_TRUE: B_FALSE); 2679 } else if (!nxge_cksum_offload) { 2680 /* TCP checksum only. */ 2681 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2682 B_TRUE: B_FALSE); 2683 } 2684 2685 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2686 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2687 is_valid, multi, is_tcp_udp, frag, error_type)); 2688 2689 if (is_tcp_udp && !frag && !error_type) { 2690 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2691 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2692 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2693 "==> nxge_receive_packet: Full tcp/udp cksum " 2694 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2695 "error %d", 2696 is_valid, multi, is_tcp_udp, frag, error_type)); 2697 } 2698 } 2699 2700 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2701 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2702 2703 *multi_p = (multi == RCR_MULTI_MASK); 2704 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2705 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2706 *multi_p, nmp, *mp, *mp_cont)); 2707 } 2708 2709 /* 2710 * Enable polling for a ring. Interrupt for the ring is disabled when 2711 * the nxge interrupt comes (see nxge_rx_intr). 2712 */ 2713 int 2714 nxge_enable_poll(void *arg) 2715 { 2716 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2717 p_rx_rcr_ring_t ringp; 2718 p_nxge_t nxgep; 2719 p_nxge_ldg_t ldgp; 2720 uint32_t channel; 2721 2722 if (ring_handle == NULL) { 2723 return (0); 2724 } 2725 2726 nxgep = ring_handle->nxgep; 2727 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2728 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2729 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2730 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2731 ldgp = ringp->ldgp; 2732 if (ldgp == NULL) { 2733 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2734 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2735 ringp->rdc)); 2736 return (0); 2737 } 2738 2739 MUTEX_ENTER(&ringp->lock); 2740 /* enable polling */ 2741 if (ringp->poll_flag == 0) { 2742 ringp->poll_flag = 1; 2743 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2744 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2745 ringp->rdc)); 2746 } 2747 2748 MUTEX_EXIT(&ringp->lock); 2749 return (0); 2750 } 2751 /* 2752 * Disable polling for a ring and enable its interrupt. 2753 */ 2754 int 2755 nxge_disable_poll(void *arg) 2756 { 2757 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2758 p_rx_rcr_ring_t ringp; 2759 p_nxge_t nxgep; 2760 uint32_t channel; 2761 2762 if (ring_handle == NULL) { 2763 return (0); 2764 } 2765 2766 nxgep = ring_handle->nxgep; 2767 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2768 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2769 2770 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2771 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2772 2773 MUTEX_ENTER(&ringp->lock); 2774 2775 /* disable polling: enable interrupt */ 2776 if (ringp->poll_flag) { 2777 npi_handle_t handle; 2778 rx_dma_ctl_stat_t cs; 2779 uint8_t channel; 2780 p_nxge_ldg_t ldgp; 2781 2782 /* 2783 * Get the control and status for this channel. 2784 */ 2785 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2786 channel = ringp->rdc; 2787 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2788 channel, &cs.value); 2789 2790 /* 2791 * Enable mailbox update 2792 * Since packets were not read and the hardware uses 2793 * bits pktread and ptrread to update the queue 2794 * length, we need to set both bits to 0. 2795 */ 2796 cs.bits.ldw.pktread = 0; 2797 cs.bits.ldw.ptrread = 0; 2798 cs.bits.hdw.mex = 1; 2799 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2800 cs.value); 2801 2802 /* 2803 * Rearm this logical group if this is a single device 2804 * group. 2805 */ 2806 ldgp = ringp->ldgp; 2807 if (ldgp == NULL) { 2808 ringp->poll_flag = 0; 2809 MUTEX_EXIT(&ringp->lock); 2810 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2811 "==> nxge_disable_poll: no ldgp rdc %d " 2812 "(still set poll to 0", ringp->rdc)); 2813 return (0); 2814 } 2815 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2816 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2817 ringp->rdc, ldgp)); 2818 if (ldgp->nldvs == 1) { 2819 ldgimgm_t mgm; 2820 mgm.value = 0; 2821 mgm.bits.ldw.arm = 1; 2822 mgm.bits.ldw.timer = ldgp->ldg_timer; 2823 NXGE_REG_WR64(handle, 2824 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2825 } 2826 ringp->poll_flag = 0; 2827 } 2828 2829 MUTEX_EXIT(&ringp->lock); 2830 return (0); 2831 } 2832 2833 /* 2834 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2835 */ 2836 mblk_t * 2837 nxge_rx_poll(void *arg, int bytes_to_pickup) 2838 { 2839 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2840 p_rx_rcr_ring_t rcr_p; 2841 p_nxge_t nxgep; 2842 npi_handle_t handle; 2843 rx_dma_ctl_stat_t cs; 2844 mblk_t *mblk; 2845 p_nxge_ldv_t ldvp; 2846 uint32_t channel; 2847 2848 nxgep = ring_handle->nxgep; 2849 2850 /* 2851 * Get the control and status for this channel. 2852 */ 2853 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2854 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2855 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2856 MUTEX_ENTER(&rcr_p->lock); 2857 ASSERT(rcr_p->poll_flag == 1); 2858 2859 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2860 2861 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2862 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2863 rcr_p->rdc, rcr_p->poll_flag)); 2864 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2865 2866 ldvp = rcr_p->ldvp; 2867 /* error events. */ 2868 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2869 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2870 } 2871 2872 MUTEX_EXIT(&rcr_p->lock); 2873 2874 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2875 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 2876 return (mblk); 2877 } 2878 2879 2880 /*ARGSUSED*/ 2881 static nxge_status_t 2882 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2883 { 2884 p_nxge_rx_ring_stats_t rdc_stats; 2885 npi_handle_t handle; 2886 npi_status_t rs; 2887 boolean_t rxchan_fatal = B_FALSE; 2888 boolean_t rxport_fatal = B_FALSE; 2889 uint8_t portn; 2890 nxge_status_t status = NXGE_OK; 2891 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2892 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2893 2894 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2895 portn = nxgep->mac.portnum; 2896 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2897 2898 if (cs.bits.hdw.rbr_tmout) { 2899 rdc_stats->rx_rbr_tmout++; 2900 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2901 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2902 rxchan_fatal = B_TRUE; 2903 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2904 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2905 } 2906 if (cs.bits.hdw.rsp_cnt_err) { 2907 rdc_stats->rsp_cnt_err++; 2908 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2909 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2910 rxchan_fatal = B_TRUE; 2911 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2912 "==> nxge_rx_err_evnts(channel %d): " 2913 "rsp_cnt_err", channel)); 2914 } 2915 if (cs.bits.hdw.byte_en_bus) { 2916 rdc_stats->byte_en_bus++; 2917 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2918 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2919 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2920 "==> nxge_rx_err_evnts(channel %d): " 2921 "fatal error: byte_en_bus", channel)); 2922 rxchan_fatal = B_TRUE; 2923 } 2924 if (cs.bits.hdw.rsp_dat_err) { 2925 rdc_stats->rsp_dat_err++; 2926 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2927 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2928 rxchan_fatal = B_TRUE; 2929 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2930 "==> nxge_rx_err_evnts(channel %d): " 2931 "fatal error: rsp_dat_err", channel)); 2932 } 2933 if (cs.bits.hdw.rcr_ack_err) { 2934 rdc_stats->rcr_ack_err++; 2935 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2936 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2937 rxchan_fatal = B_TRUE; 2938 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2939 "==> nxge_rx_err_evnts(channel %d): " 2940 "fatal error: rcr_ack_err", channel)); 2941 } 2942 if (cs.bits.hdw.dc_fifo_err) { 2943 rdc_stats->dc_fifo_err++; 2944 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2945 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2946 /* This is not a fatal error! */ 2947 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2948 "==> nxge_rx_err_evnts(channel %d): " 2949 "dc_fifo_err", channel)); 2950 rxport_fatal = B_TRUE; 2951 } 2952 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2953 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2954 &rdc_stats->errlog.pre_par, 2955 &rdc_stats->errlog.sha_par)) 2956 != NPI_SUCCESS) { 2957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2958 "==> nxge_rx_err_evnts(channel %d): " 2959 "rcr_sha_par: get perr", channel)); 2960 return (NXGE_ERROR | rs); 2961 } 2962 if (cs.bits.hdw.rcr_sha_par) { 2963 rdc_stats->rcr_sha_par++; 2964 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2965 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2966 rxchan_fatal = B_TRUE; 2967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2968 "==> nxge_rx_err_evnts(channel %d): " 2969 "fatal error: rcr_sha_par", channel)); 2970 } 2971 if (cs.bits.hdw.rbr_pre_par) { 2972 rdc_stats->rbr_pre_par++; 2973 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2974 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2975 rxchan_fatal = B_TRUE; 2976 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2977 "==> nxge_rx_err_evnts(channel %d): " 2978 "fatal error: rbr_pre_par", channel)); 2979 } 2980 } 2981 /* 2982 * The Following 4 status bits are for information, the system 2983 * is running fine. There is no need to send FMA ereports or 2984 * log messages. 2985 */ 2986 if (cs.bits.hdw.port_drop_pkt) { 2987 rdc_stats->port_drop_pkt++; 2988 } 2989 if (cs.bits.hdw.wred_drop) { 2990 rdc_stats->wred_drop++; 2991 } 2992 if (cs.bits.hdw.rbr_pre_empty) { 2993 rdc_stats->rbr_pre_empty++; 2994 } 2995 if (cs.bits.hdw.rcr_shadow_full) { 2996 rdc_stats->rcr_shadow_full++; 2997 } 2998 if (cs.bits.hdw.config_err) { 2999 rdc_stats->config_err++; 3000 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3001 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3002 rxchan_fatal = B_TRUE; 3003 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3004 "==> nxge_rx_err_evnts(channel %d): " 3005 "config error", channel)); 3006 } 3007 if (cs.bits.hdw.rcrincon) { 3008 rdc_stats->rcrincon++; 3009 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3010 NXGE_FM_EREPORT_RDMC_RCRINCON); 3011 rxchan_fatal = B_TRUE; 3012 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3013 "==> nxge_rx_err_evnts(channel %d): " 3014 "fatal error: rcrincon error", channel)); 3015 } 3016 if (cs.bits.hdw.rcrfull) { 3017 rdc_stats->rcrfull++; 3018 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3019 NXGE_FM_EREPORT_RDMC_RCRFULL); 3020 rxchan_fatal = B_TRUE; 3021 if (rdc_stats->rcrfull < error_disp_cnt) 3022 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3023 "==> nxge_rx_err_evnts(channel %d): " 3024 "fatal error: rcrfull error", channel)); 3025 } 3026 if (cs.bits.hdw.rbr_empty) { 3027 /* 3028 * This bit is for information, there is no need 3029 * send FMA ereport or log a message. 3030 */ 3031 rdc_stats->rbr_empty++; 3032 } 3033 if (cs.bits.hdw.rbrfull) { 3034 rdc_stats->rbrfull++; 3035 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3036 NXGE_FM_EREPORT_RDMC_RBRFULL); 3037 rxchan_fatal = B_TRUE; 3038 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3039 "==> nxge_rx_err_evnts(channel %d): " 3040 "fatal error: rbr_full error", channel)); 3041 } 3042 if (cs.bits.hdw.rbrlogpage) { 3043 rdc_stats->rbrlogpage++; 3044 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3045 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3046 rxchan_fatal = B_TRUE; 3047 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3048 "==> nxge_rx_err_evnts(channel %d): " 3049 "fatal error: rbr logical page error", channel)); 3050 } 3051 if (cs.bits.hdw.cfiglogpage) { 3052 rdc_stats->cfiglogpage++; 3053 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3054 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3055 rxchan_fatal = B_TRUE; 3056 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3057 "==> nxge_rx_err_evnts(channel %d): " 3058 "fatal error: cfig logical page error", channel)); 3059 } 3060 3061 if (rxport_fatal) { 3062 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3063 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3064 portn)); 3065 if (isLDOMguest(nxgep)) { 3066 status = NXGE_ERROR; 3067 } else { 3068 status = nxge_ipp_fatal_err_recover(nxgep); 3069 if (status == NXGE_OK) { 3070 FM_SERVICE_RESTORED(nxgep); 3071 } 3072 } 3073 } 3074 3075 if (rxchan_fatal) { 3076 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3077 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3078 channel)); 3079 if (isLDOMguest(nxgep)) { 3080 status = NXGE_ERROR; 3081 } else { 3082 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3083 if (status == NXGE_OK) { 3084 FM_SERVICE_RESTORED(nxgep); 3085 } 3086 } 3087 } 3088 3089 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3090 3091 return (status); 3092 } 3093 3094 /* 3095 * nxge_rdc_hvio_setup 3096 * 3097 * This code appears to setup some Hypervisor variables. 3098 * 3099 * Arguments: 3100 * nxgep 3101 * channel 3102 * 3103 * Notes: 3104 * What does NIU_LP_WORKAROUND mean? 3105 * 3106 * NPI/NXGE function calls: 3107 * na 3108 * 3109 * Context: 3110 * Any domain 3111 */ 3112 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3113 static void 3114 nxge_rdc_hvio_setup( 3115 nxge_t *nxgep, int channel) 3116 { 3117 nxge_dma_common_t *dma_common; 3118 nxge_dma_common_t *dma_control; 3119 rx_rbr_ring_t *ring; 3120 3121 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3122 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3123 3124 ring->hv_set = B_FALSE; 3125 3126 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3127 dma_common->orig_ioaddr_pp; 3128 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3129 dma_common->orig_alength; 3130 3131 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3132 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3133 channel, ring->hv_rx_buf_base_ioaddr_pp, 3134 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3135 dma_common->orig_alength, dma_common->orig_alength)); 3136 3137 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3138 3139 ring->hv_rx_cntl_base_ioaddr_pp = 3140 (uint64_t)dma_control->orig_ioaddr_pp; 3141 ring->hv_rx_cntl_ioaddr_size = 3142 (uint64_t)dma_control->orig_alength; 3143 3144 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3145 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3146 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3147 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3148 dma_control->orig_alength, dma_control->orig_alength)); 3149 } 3150 #endif 3151 3152 /* 3153 * nxge_map_rxdma 3154 * 3155 * Map an RDC into our kernel space. 3156 * 3157 * Arguments: 3158 * nxgep 3159 * channel The channel to map. 3160 * 3161 * Notes: 3162 * 1. Allocate & initialise a memory pool, if necessary. 3163 * 2. Allocate however many receive buffers are required. 3164 * 3. Setup buffers, descriptors, and mailbox. 3165 * 3166 * NPI/NXGE function calls: 3167 * nxge_alloc_rx_mem_pool() 3168 * nxge_alloc_rbb() 3169 * nxge_map_rxdma_channel() 3170 * 3171 * Registers accessed: 3172 * 3173 * Context: 3174 * Any domain 3175 */ 3176 static nxge_status_t 3177 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3178 { 3179 nxge_dma_common_t **data; 3180 nxge_dma_common_t **control; 3181 rx_rbr_ring_t **rbr_ring; 3182 rx_rcr_ring_t **rcr_ring; 3183 rx_mbox_t **mailbox; 3184 uint32_t chunks; 3185 3186 nxge_status_t status; 3187 3188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3189 3190 if (!nxgep->rx_buf_pool_p) { 3191 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3192 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3193 "<== nxge_map_rxdma: buf not allocated")); 3194 return (NXGE_ERROR); 3195 } 3196 } 3197 3198 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3199 return (NXGE_ERROR); 3200 3201 /* 3202 * Map descriptors from the buffer polls for each dma channel. 3203 */ 3204 3205 /* 3206 * Set up and prepare buffer blocks, descriptors 3207 * and mailbox. 3208 */ 3209 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3210 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3211 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3212 3213 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3214 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3215 3216 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3217 3218 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3219 chunks, control, rcr_ring, mailbox); 3220 if (status != NXGE_OK) { 3221 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3222 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3223 "returned 0x%x", 3224 channel, status)); 3225 return (status); 3226 } 3227 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3228 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3229 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3230 &nxgep->statsp->rdc_stats[channel]; 3231 3232 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3233 if (!isLDOMguest(nxgep)) 3234 nxge_rdc_hvio_setup(nxgep, channel); 3235 #endif 3236 3237 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3238 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3239 3240 return (status); 3241 } 3242 3243 static void 3244 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3245 { 3246 rx_rbr_ring_t *rbr_ring; 3247 rx_rcr_ring_t *rcr_ring; 3248 rx_mbox_t *mailbox; 3249 3250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3251 3252 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3253 !nxgep->rx_mbox_areas_p) 3254 return; 3255 3256 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3257 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3258 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3259 3260 if (!rbr_ring || !rcr_ring || !mailbox) 3261 return; 3262 3263 (void) nxge_unmap_rxdma_channel( 3264 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3265 3266 nxge_free_rxb(nxgep, channel); 3267 3268 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3269 } 3270 3271 nxge_status_t 3272 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3273 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3274 uint32_t num_chunks, 3275 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3276 p_rx_mbox_t *rx_mbox_p) 3277 { 3278 int status = NXGE_OK; 3279 3280 /* 3281 * Set up and prepare buffer blocks, descriptors 3282 * and mailbox. 3283 */ 3284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3285 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3286 /* 3287 * Receive buffer blocks 3288 */ 3289 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3290 dma_buf_p, rbr_p, num_chunks); 3291 if (status != NXGE_OK) { 3292 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3293 "==> nxge_map_rxdma_channel (channel %d): " 3294 "map buffer failed 0x%x", channel, status)); 3295 goto nxge_map_rxdma_channel_exit; 3296 } 3297 3298 /* 3299 * Receive block ring, completion ring and mailbox. 3300 */ 3301 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3302 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3303 if (status != NXGE_OK) { 3304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3305 "==> nxge_map_rxdma_channel (channel %d): " 3306 "map config failed 0x%x", channel, status)); 3307 goto nxge_map_rxdma_channel_fail2; 3308 } 3309 3310 goto nxge_map_rxdma_channel_exit; 3311 3312 nxge_map_rxdma_channel_fail3: 3313 /* Free rbr, rcr */ 3314 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3315 "==> nxge_map_rxdma_channel: free rbr/rcr " 3316 "(status 0x%x channel %d)", 3317 status, channel)); 3318 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3319 *rcr_p, *rx_mbox_p); 3320 3321 nxge_map_rxdma_channel_fail2: 3322 /* Free buffer blocks */ 3323 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3324 "==> nxge_map_rxdma_channel: free rx buffers" 3325 "(nxgep 0x%x status 0x%x channel %d)", 3326 nxgep, status, channel)); 3327 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3328 3329 status = NXGE_ERROR; 3330 3331 nxge_map_rxdma_channel_exit: 3332 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3333 "<== nxge_map_rxdma_channel: " 3334 "(nxgep 0x%x status 0x%x channel %d)", 3335 nxgep, status, channel)); 3336 3337 return (status); 3338 } 3339 3340 /*ARGSUSED*/ 3341 static void 3342 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3343 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3344 { 3345 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3346 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3347 3348 /* 3349 * unmap receive block ring, completion ring and mailbox. 3350 */ 3351 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3352 rcr_p, rx_mbox_p); 3353 3354 /* unmap buffer blocks */ 3355 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3356 3357 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3358 } 3359 3360 /*ARGSUSED*/ 3361 static nxge_status_t 3362 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3363 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3364 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3365 { 3366 p_rx_rbr_ring_t rbrp; 3367 p_rx_rcr_ring_t rcrp; 3368 p_rx_mbox_t mboxp; 3369 p_nxge_dma_common_t cntl_dmap; 3370 p_nxge_dma_common_t dmap; 3371 p_rx_msg_t *rx_msg_ring; 3372 p_rx_msg_t rx_msg_p; 3373 p_rbr_cfig_a_t rcfga_p; 3374 p_rbr_cfig_b_t rcfgb_p; 3375 p_rcrcfig_a_t cfga_p; 3376 p_rcrcfig_b_t cfgb_p; 3377 p_rxdma_cfig1_t cfig1_p; 3378 p_rxdma_cfig2_t cfig2_p; 3379 p_rbr_kick_t kick_p; 3380 uint32_t dmaaddrp; 3381 uint32_t *rbr_vaddrp; 3382 uint32_t bkaddr; 3383 nxge_status_t status = NXGE_OK; 3384 int i; 3385 uint32_t nxge_port_rcr_size; 3386 3387 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3388 "==> nxge_map_rxdma_channel_cfg_ring")); 3389 3390 cntl_dmap = *dma_cntl_p; 3391 3392 /* Map in the receive block ring */ 3393 rbrp = *rbr_p; 3394 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3395 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3396 /* 3397 * Zero out buffer block ring descriptors. 3398 */ 3399 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3400 3401 rcfga_p = &(rbrp->rbr_cfga); 3402 rcfgb_p = &(rbrp->rbr_cfgb); 3403 kick_p = &(rbrp->rbr_kick); 3404 rcfga_p->value = 0; 3405 rcfgb_p->value = 0; 3406 kick_p->value = 0; 3407 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3408 rcfga_p->value = (rbrp->rbr_addr & 3409 (RBR_CFIG_A_STDADDR_MASK | 3410 RBR_CFIG_A_STDADDR_BASE_MASK)); 3411 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3412 3413 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3414 rcfgb_p->bits.ldw.vld0 = 1; 3415 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3416 rcfgb_p->bits.ldw.vld1 = 1; 3417 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3418 rcfgb_p->bits.ldw.vld2 = 1; 3419 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3420 3421 /* 3422 * For each buffer block, enter receive block address to the ring. 3423 */ 3424 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3425 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3426 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3427 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3428 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3429 3430 rx_msg_ring = rbrp->rx_msg_ring; 3431 for (i = 0; i < rbrp->tnblocks; i++) { 3432 rx_msg_p = rx_msg_ring[i]; 3433 rx_msg_p->nxgep = nxgep; 3434 rx_msg_p->rx_rbr_p = rbrp; 3435 bkaddr = (uint32_t) 3436 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3437 >> RBR_BKADDR_SHIFT)); 3438 rx_msg_p->free = B_FALSE; 3439 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3440 3441 *rbr_vaddrp++ = bkaddr; 3442 } 3443 3444 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3445 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3446 3447 rbrp->rbr_rd_index = 0; 3448 3449 rbrp->rbr_consumed = 0; 3450 rbrp->rbr_use_bcopy = B_TRUE; 3451 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3452 /* 3453 * Do bcopy on packets greater than bcopy size once 3454 * the lo threshold is reached. 3455 * This lo threshold should be less than the hi threshold. 3456 * 3457 * Do bcopy on every packet once the hi threshold is reached. 3458 */ 3459 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3460 /* default it to use hi */ 3461 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3462 } 3463 3464 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3465 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3466 } 3467 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3468 3469 switch (nxge_rx_threshold_hi) { 3470 default: 3471 case NXGE_RX_COPY_NONE: 3472 /* Do not do bcopy at all */ 3473 rbrp->rbr_use_bcopy = B_FALSE; 3474 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3475 break; 3476 3477 case NXGE_RX_COPY_1: 3478 case NXGE_RX_COPY_2: 3479 case NXGE_RX_COPY_3: 3480 case NXGE_RX_COPY_4: 3481 case NXGE_RX_COPY_5: 3482 case NXGE_RX_COPY_6: 3483 case NXGE_RX_COPY_7: 3484 rbrp->rbr_threshold_hi = 3485 rbrp->rbb_max * 3486 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3487 break; 3488 3489 case NXGE_RX_COPY_ALL: 3490 rbrp->rbr_threshold_hi = 0; 3491 break; 3492 } 3493 3494 switch (nxge_rx_threshold_lo) { 3495 default: 3496 case NXGE_RX_COPY_NONE: 3497 /* Do not do bcopy at all */ 3498 if (rbrp->rbr_use_bcopy) { 3499 rbrp->rbr_use_bcopy = B_FALSE; 3500 } 3501 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3502 break; 3503 3504 case NXGE_RX_COPY_1: 3505 case NXGE_RX_COPY_2: 3506 case NXGE_RX_COPY_3: 3507 case NXGE_RX_COPY_4: 3508 case NXGE_RX_COPY_5: 3509 case NXGE_RX_COPY_6: 3510 case NXGE_RX_COPY_7: 3511 rbrp->rbr_threshold_lo = 3512 rbrp->rbb_max * 3513 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3514 break; 3515 3516 case NXGE_RX_COPY_ALL: 3517 rbrp->rbr_threshold_lo = 0; 3518 break; 3519 } 3520 3521 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3522 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3523 "rbb_max %d " 3524 "rbrp->rbr_bufsize_type %d " 3525 "rbb_threshold_hi %d " 3526 "rbb_threshold_lo %d", 3527 dma_channel, 3528 rbrp->rbb_max, 3529 rbrp->rbr_bufsize_type, 3530 rbrp->rbr_threshold_hi, 3531 rbrp->rbr_threshold_lo)); 3532 3533 rbrp->page_valid.value = 0; 3534 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3535 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3536 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3537 rbrp->page_hdl.value = 0; 3538 3539 rbrp->page_valid.bits.ldw.page0 = 1; 3540 rbrp->page_valid.bits.ldw.page1 = 1; 3541 3542 /* Map in the receive completion ring */ 3543 rcrp = (p_rx_rcr_ring_t) 3544 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3545 rcrp->rdc = dma_channel; 3546 3547 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3548 rcrp->comp_size = nxge_port_rcr_size; 3549 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3550 3551 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3552 3553 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3554 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3555 sizeof (rcr_entry_t)); 3556 rcrp->comp_rd_index = 0; 3557 rcrp->comp_wt_index = 0; 3558 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3559 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3560 #if defined(__i386) 3561 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3562 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3563 #else 3564 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3565 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3566 #endif 3567 3568 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3569 (nxge_port_rcr_size - 1); 3570 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3571 (nxge_port_rcr_size - 1); 3572 3573 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3574 "==> nxge_map_rxdma_channel_cfg_ring: " 3575 "channel %d " 3576 "rbr_vaddrp $%p " 3577 "rcr_desc_rd_head_p $%p " 3578 "rcr_desc_rd_head_pp $%p " 3579 "rcr_desc_rd_last_p $%p " 3580 "rcr_desc_rd_last_pp $%p ", 3581 dma_channel, 3582 rbr_vaddrp, 3583 rcrp->rcr_desc_rd_head_p, 3584 rcrp->rcr_desc_rd_head_pp, 3585 rcrp->rcr_desc_last_p, 3586 rcrp->rcr_desc_last_pp)); 3587 3588 /* 3589 * Zero out buffer block ring descriptors. 3590 */ 3591 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3592 3593 rcrp->intr_timeout = (nxgep->intr_timeout < 3594 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3595 nxgep->intr_timeout; 3596 3597 rcrp->intr_threshold = (nxgep->intr_threshold < 3598 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3599 nxgep->intr_threshold; 3600 3601 rcrp->full_hdr_flag = B_FALSE; 3602 rcrp->sw_priv_hdr_len = 0; 3603 3604 cfga_p = &(rcrp->rcr_cfga); 3605 cfgb_p = &(rcrp->rcr_cfgb); 3606 cfga_p->value = 0; 3607 cfgb_p->value = 0; 3608 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3609 cfga_p->value = (rcrp->rcr_addr & 3610 (RCRCFIG_A_STADDR_MASK | 3611 RCRCFIG_A_STADDR_BASE_MASK)); 3612 3613 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3614 RCRCFIG_A_LEN_SHIF); 3615 3616 /* 3617 * Timeout should be set based on the system clock divider. 3618 * A timeout value of 1 assumes that the 3619 * granularity (1000) is 3 microseconds running at 300MHz. 3620 */ 3621 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3622 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3623 cfgb_p->bits.ldw.entout = 1; 3624 3625 /* Map in the mailbox */ 3626 mboxp = (p_rx_mbox_t) 3627 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3628 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3629 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3630 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3631 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3632 cfig1_p->value = cfig2_p->value = 0; 3633 3634 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3635 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3636 "==> nxge_map_rxdma_channel_cfg_ring: " 3637 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3638 dma_channel, cfig1_p->value, cfig2_p->value, 3639 mboxp->mbox_addr)); 3640 3641 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3642 & 0xfff); 3643 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3644 3645 3646 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3647 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3648 RXDMA_CFIG2_MBADDR_L_MASK); 3649 3650 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3651 3652 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3653 "==> nxge_map_rxdma_channel_cfg_ring: " 3654 "channel %d damaddrp $%p " 3655 "cfg1 0x%016llx cfig2 0x%016llx", 3656 dma_channel, dmaaddrp, 3657 cfig1_p->value, cfig2_p->value)); 3658 3659 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3660 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3661 3662 rbrp->rx_rcr_p = rcrp; 3663 rcrp->rx_rbr_p = rbrp; 3664 *rcr_p = rcrp; 3665 *rx_mbox_p = mboxp; 3666 3667 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3668 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3669 3670 return (status); 3671 } 3672 3673 /*ARGSUSED*/ 3674 static void 3675 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3676 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3677 { 3678 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3679 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3680 rcr_p->rdc)); 3681 3682 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3683 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3684 3685 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3686 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3687 } 3688 3689 static nxge_status_t 3690 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3691 p_nxge_dma_common_t *dma_buf_p, 3692 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3693 { 3694 p_rx_rbr_ring_t rbrp; 3695 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3696 p_rx_msg_t *rx_msg_ring; 3697 p_rx_msg_t rx_msg_p; 3698 p_mblk_t mblk_p; 3699 3700 rxring_info_t *ring_info; 3701 nxge_status_t status = NXGE_OK; 3702 int i, j, index; 3703 uint32_t size, bsize, nblocks, nmsgs; 3704 3705 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3706 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3707 channel)); 3708 3709 dma_bufp = tmp_bufp = *dma_buf_p; 3710 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3711 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3712 "chunks bufp 0x%016llx", 3713 channel, num_chunks, dma_bufp)); 3714 3715 nmsgs = 0; 3716 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3717 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3718 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3719 "bufp 0x%016llx nblocks %d nmsgs %d", 3720 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3721 nmsgs += tmp_bufp->nblocks; 3722 } 3723 if (!nmsgs) { 3724 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3725 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3726 "no msg blocks", 3727 channel)); 3728 status = NXGE_ERROR; 3729 goto nxge_map_rxdma_channel_buf_ring_exit; 3730 } 3731 3732 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3733 3734 size = nmsgs * sizeof (p_rx_msg_t); 3735 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3736 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3737 KM_SLEEP); 3738 3739 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3740 (void *)nxgep->interrupt_cookie); 3741 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3742 (void *)nxgep->interrupt_cookie); 3743 rbrp->rdc = channel; 3744 rbrp->num_blocks = num_chunks; 3745 rbrp->tnblocks = nmsgs; 3746 rbrp->rbb_max = nmsgs; 3747 rbrp->rbr_max_size = nmsgs; 3748 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3749 3750 /* 3751 * Buffer sizes suggested by NIU architect. 3752 * 256, 512 and 2K. 3753 */ 3754 3755 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3756 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3757 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3758 3759 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3760 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3761 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3762 3763 rbrp->block_size = nxgep->rx_default_block_size; 3764 3765 if (!nxgep->mac.is_jumbo) { 3766 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3767 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3768 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3769 } else { 3770 if (rbrp->block_size >= 0x2000) { 3771 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3772 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3773 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3774 } else { 3775 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3776 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3777 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3778 } 3779 } 3780 3781 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3782 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3783 "actual rbr max %d rbb_max %d nmsgs %d " 3784 "rbrp->block_size %d default_block_size %d " 3785 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3786 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3787 rbrp->block_size, nxgep->rx_default_block_size, 3788 nxge_rbr_size, nxge_rbr_spare_size)); 3789 3790 /* Map in buffers from the buffer pool. */ 3791 index = 0; 3792 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3793 bsize = dma_bufp->block_size; 3794 nblocks = dma_bufp->nblocks; 3795 #if defined(__i386) 3796 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3797 #else 3798 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3799 #endif 3800 ring_info->buffer[i].buf_index = i; 3801 ring_info->buffer[i].buf_size = dma_bufp->alength; 3802 ring_info->buffer[i].start_index = index; 3803 #if defined(__i386) 3804 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3805 #else 3806 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3807 #endif 3808 3809 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3810 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3811 "chunk %d" 3812 " nblocks %d chunk_size %x block_size 0x%x " 3813 "dma_bufp $%p", channel, i, 3814 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3815 dma_bufp)); 3816 3817 for (j = 0; j < nblocks; j++) { 3818 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3819 dma_bufp)) == NULL) { 3820 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3821 "allocb failed (index %d i %d j %d)", 3822 index, i, j)); 3823 goto nxge_map_rxdma_channel_buf_ring_fail1; 3824 } 3825 rx_msg_ring[index] = rx_msg_p; 3826 rx_msg_p->block_index = index; 3827 rx_msg_p->shifted_addr = (uint32_t) 3828 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3829 RBR_BKADDR_SHIFT)); 3830 3831 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3832 "index %d j %d rx_msg_p $%p mblk %p", 3833 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3834 3835 mblk_p = rx_msg_p->rx_mblk_p; 3836 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3837 3838 rbrp->rbr_ref_cnt++; 3839 index++; 3840 rx_msg_p->buf_dma.dma_channel = channel; 3841 } 3842 3843 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3844 if (dma_bufp->contig_alloc_type) { 3845 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3846 } 3847 3848 if (dma_bufp->kmem_alloc_type) { 3849 rbrp->rbr_alloc_type = KMEM_ALLOC; 3850 } 3851 3852 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3853 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3854 "chunk %d" 3855 " nblocks %d chunk_size %x block_size 0x%x " 3856 "dma_bufp $%p", 3857 channel, i, 3858 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3859 dma_bufp)); 3860 } 3861 if (i < rbrp->num_blocks) { 3862 goto nxge_map_rxdma_channel_buf_ring_fail1; 3863 } 3864 3865 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3866 "nxge_map_rxdma_channel_buf_ring: done buf init " 3867 "channel %d msg block entries %d", 3868 channel, index)); 3869 ring_info->block_size_mask = bsize - 1; 3870 rbrp->rx_msg_ring = rx_msg_ring; 3871 rbrp->dma_bufp = dma_buf_p; 3872 rbrp->ring_info = ring_info; 3873 3874 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3875 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3876 " nxge_map_rxdma_channel_buf_ring: " 3877 "channel %d done buf info init", channel)); 3878 3879 /* 3880 * Finally, permit nxge_freeb() to call nxge_post_page(). 3881 */ 3882 rbrp->rbr_state = RBR_POSTING; 3883 3884 *rbr_p = rbrp; 3885 goto nxge_map_rxdma_channel_buf_ring_exit; 3886 3887 nxge_map_rxdma_channel_buf_ring_fail1: 3888 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3889 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3890 channel, status)); 3891 3892 index--; 3893 for (; index >= 0; index--) { 3894 rx_msg_p = rx_msg_ring[index]; 3895 if (rx_msg_p != NULL) { 3896 freeb(rx_msg_p->rx_mblk_p); 3897 rx_msg_ring[index] = NULL; 3898 } 3899 } 3900 nxge_map_rxdma_channel_buf_ring_fail: 3901 MUTEX_DESTROY(&rbrp->post_lock); 3902 MUTEX_DESTROY(&rbrp->lock); 3903 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3904 KMEM_FREE(rx_msg_ring, size); 3905 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3906 3907 status = NXGE_ERROR; 3908 3909 nxge_map_rxdma_channel_buf_ring_exit: 3910 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3911 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3912 3913 return (status); 3914 } 3915 3916 /*ARGSUSED*/ 3917 static void 3918 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3919 p_rx_rbr_ring_t rbr_p) 3920 { 3921 p_rx_msg_t *rx_msg_ring; 3922 p_rx_msg_t rx_msg_p; 3923 rxring_info_t *ring_info; 3924 int i; 3925 uint32_t size; 3926 #ifdef NXGE_DEBUG 3927 int num_chunks; 3928 #endif 3929 3930 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3931 "==> nxge_unmap_rxdma_channel_buf_ring")); 3932 if (rbr_p == NULL) { 3933 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3934 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3935 return; 3936 } 3937 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3938 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3939 rbr_p->rdc)); 3940 3941 rx_msg_ring = rbr_p->rx_msg_ring; 3942 ring_info = rbr_p->ring_info; 3943 3944 if (rx_msg_ring == NULL || ring_info == NULL) { 3945 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3946 "<== nxge_unmap_rxdma_channel_buf_ring: " 3947 "rx_msg_ring $%p ring_info $%p", 3948 rx_msg_p, ring_info)); 3949 return; 3950 } 3951 3952 #ifdef NXGE_DEBUG 3953 num_chunks = rbr_p->num_blocks; 3954 #endif 3955 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3956 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3957 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3958 "tnblocks %d (max %d) size ptrs %d ", 3959 rbr_p->rdc, num_chunks, 3960 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3961 3962 for (i = 0; i < rbr_p->tnblocks; i++) { 3963 rx_msg_p = rx_msg_ring[i]; 3964 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3965 " nxge_unmap_rxdma_channel_buf_ring: " 3966 "rx_msg_p $%p", 3967 rx_msg_p)); 3968 if (rx_msg_p != NULL) { 3969 freeb(rx_msg_p->rx_mblk_p); 3970 rx_msg_ring[i] = NULL; 3971 } 3972 } 3973 3974 /* 3975 * We no longer may use the mutex <post_lock>. By setting 3976 * <rbr_state> to anything but POSTING, we prevent 3977 * nxge_post_page() from accessing a dead mutex. 3978 */ 3979 rbr_p->rbr_state = RBR_UNMAPPING; 3980 MUTEX_DESTROY(&rbr_p->post_lock); 3981 3982 MUTEX_DESTROY(&rbr_p->lock); 3983 3984 if (rbr_p->rbr_ref_cnt == 0) { 3985 /* 3986 * This is the normal state of affairs. 3987 * Need to free the following buffers: 3988 * - data buffers 3989 * - rx_msg ring 3990 * - ring_info 3991 * - rbr ring 3992 */ 3993 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3994 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 3995 nxge_rxdma_databuf_free(rbr_p); 3996 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3997 KMEM_FREE(rx_msg_ring, size); 3998 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3999 } else { 4000 /* 4001 * Some of our buffers are still being used. 4002 * Therefore, tell nxge_freeb() this ring is 4003 * unmapped, so it may free <rbr_p> for us. 4004 */ 4005 rbr_p->rbr_state = RBR_UNMAPPED; 4006 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4007 "unmap_rxdma_buf_ring: %d %s outstanding.", 4008 rbr_p->rbr_ref_cnt, 4009 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4010 } 4011 4012 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4013 "<== nxge_unmap_rxdma_channel_buf_ring")); 4014 } 4015 4016 /* 4017 * nxge_rxdma_hw_start_common 4018 * 4019 * Arguments: 4020 * nxgep 4021 * 4022 * Notes: 4023 * 4024 * NPI/NXGE function calls: 4025 * nxge_init_fzc_rx_common(); 4026 * nxge_init_fzc_rxdma_port(); 4027 * 4028 * Registers accessed: 4029 * 4030 * Context: 4031 * Service domain 4032 */ 4033 static nxge_status_t 4034 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4035 { 4036 nxge_status_t status = NXGE_OK; 4037 4038 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4039 4040 /* 4041 * Load the sharable parameters by writing to the 4042 * function zero control registers. These FZC registers 4043 * should be initialized only once for the entire chip. 4044 */ 4045 (void) nxge_init_fzc_rx_common(nxgep); 4046 4047 /* 4048 * Initialize the RXDMA port specific FZC control configurations. 4049 * These FZC registers are pertaining to each port. 4050 */ 4051 (void) nxge_init_fzc_rxdma_port(nxgep); 4052 4053 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4054 4055 return (status); 4056 } 4057 4058 static nxge_status_t 4059 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4060 { 4061 int i, ndmas; 4062 p_rx_rbr_rings_t rx_rbr_rings; 4063 p_rx_rbr_ring_t *rbr_rings; 4064 p_rx_rcr_rings_t rx_rcr_rings; 4065 p_rx_rcr_ring_t *rcr_rings; 4066 p_rx_mbox_areas_t rx_mbox_areas_p; 4067 p_rx_mbox_t *rx_mbox_p; 4068 nxge_status_t status = NXGE_OK; 4069 4070 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4071 4072 rx_rbr_rings = nxgep->rx_rbr_rings; 4073 rx_rcr_rings = nxgep->rx_rcr_rings; 4074 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4075 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4076 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4077 return (NXGE_ERROR); 4078 } 4079 ndmas = rx_rbr_rings->ndmas; 4080 if (ndmas == 0) { 4081 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4082 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4083 return (NXGE_ERROR); 4084 } 4085 4086 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4087 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4088 4089 rbr_rings = rx_rbr_rings->rbr_rings; 4090 rcr_rings = rx_rcr_rings->rcr_rings; 4091 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4092 if (rx_mbox_areas_p) { 4093 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4094 } 4095 4096 i = channel; 4097 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4098 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4099 ndmas, channel)); 4100 status = nxge_rxdma_start_channel(nxgep, channel, 4101 (p_rx_rbr_ring_t)rbr_rings[i], 4102 (p_rx_rcr_ring_t)rcr_rings[i], 4103 (p_rx_mbox_t)rx_mbox_p[i]); 4104 if (status != NXGE_OK) { 4105 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4106 "==> nxge_rxdma_hw_start: disable " 4107 "(status 0x%x channel %d)", status, channel)); 4108 return (status); 4109 } 4110 4111 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4112 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4113 rx_rbr_rings, rx_rcr_rings)); 4114 4115 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4116 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4117 4118 return (status); 4119 } 4120 4121 static void 4122 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4123 { 4124 p_rx_rbr_rings_t rx_rbr_rings; 4125 p_rx_rcr_rings_t rx_rcr_rings; 4126 4127 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4128 4129 rx_rbr_rings = nxgep->rx_rbr_rings; 4130 rx_rcr_rings = nxgep->rx_rcr_rings; 4131 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4132 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4133 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4134 return; 4135 } 4136 4137 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4138 "==> nxge_rxdma_hw_stop(channel %d)", 4139 channel)); 4140 (void) nxge_rxdma_stop_channel(nxgep, channel); 4141 4142 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4143 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4144 rx_rbr_rings, rx_rcr_rings)); 4145 4146 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4147 } 4148 4149 4150 static nxge_status_t 4151 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4152 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4153 4154 { 4155 npi_handle_t handle; 4156 npi_status_t rs = NPI_SUCCESS; 4157 rx_dma_ctl_stat_t cs; 4158 rx_dma_ent_msk_t ent_mask; 4159 nxge_status_t status = NXGE_OK; 4160 4161 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4162 4163 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4164 4165 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4166 "npi handle addr $%p acc $%p", 4167 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4168 4169 /* Reset RXDMA channel, but not if you're a guest. */ 4170 if (!isLDOMguest(nxgep)) { 4171 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4172 if (rs != NPI_SUCCESS) { 4173 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4174 "==> nxge_init_fzc_rdc: " 4175 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4176 channel, rs)); 4177 return (NXGE_ERROR | rs); 4178 } 4179 4180 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4181 "==> nxge_rxdma_start_channel: reset done: channel %d", 4182 channel)); 4183 } 4184 4185 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4186 if (isLDOMguest(nxgep)) 4187 (void) nxge_rdc_lp_conf(nxgep, channel); 4188 #endif 4189 4190 /* 4191 * Initialize the RXDMA channel specific FZC control 4192 * configurations. These FZC registers are pertaining 4193 * to each RX channel (logical pages). 4194 */ 4195 if (!isLDOMguest(nxgep)) { 4196 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4197 if (status != NXGE_OK) { 4198 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4199 "==> nxge_rxdma_start_channel: " 4200 "init fzc rxdma failed (0x%08x channel %d)", 4201 status, channel)); 4202 return (status); 4203 } 4204 4205 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4206 "==> nxge_rxdma_start_channel: fzc done")); 4207 } 4208 4209 /* Set up the interrupt event masks. */ 4210 ent_mask.value = 0; 4211 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4212 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4213 &ent_mask); 4214 if (rs != NPI_SUCCESS) { 4215 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4216 "==> nxge_rxdma_start_channel: " 4217 "init rxdma event masks failed " 4218 "(0x%08x channel %d)", 4219 status, channel)); 4220 return (NXGE_ERROR | rs); 4221 } 4222 4223 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4224 "==> nxge_rxdma_start_channel: " 4225 "event done: channel %d (mask 0x%016llx)", 4226 channel, ent_mask.value)); 4227 4228 /* Initialize the receive DMA control and status register */ 4229 cs.value = 0; 4230 cs.bits.hdw.mex = 1; 4231 cs.bits.hdw.rcrthres = 1; 4232 cs.bits.hdw.rcrto = 1; 4233 cs.bits.hdw.rbr_empty = 1; 4234 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4235 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4236 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4237 if (status != NXGE_OK) { 4238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4239 "==> nxge_rxdma_start_channel: " 4240 "init rxdma control register failed (0x%08x channel %d", 4241 status, channel)); 4242 return (status); 4243 } 4244 4245 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4246 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4247 4248 /* 4249 * Load RXDMA descriptors, buffers, mailbox, 4250 * initialise the receive DMA channels and 4251 * enable each DMA channel. 4252 */ 4253 status = nxge_enable_rxdma_channel(nxgep, 4254 channel, rbr_p, rcr_p, mbox_p); 4255 4256 if (status != NXGE_OK) { 4257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4258 " nxge_rxdma_start_channel: " 4259 " enable rxdma failed (0x%08x channel %d)", 4260 status, channel)); 4261 return (status); 4262 } 4263 4264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4265 "==> nxge_rxdma_start_channel: enabled channel %d")); 4266 4267 if (isLDOMguest(nxgep)) { 4268 /* Add interrupt handler for this channel. */ 4269 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4270 != NXGE_OK) { 4271 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4272 " nxge_rxdma_start_channel: " 4273 " nxge_hio_intr_add failed (0x%08x channel %d)", 4274 status, channel)); 4275 } 4276 } 4277 4278 ent_mask.value = 0; 4279 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4280 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4281 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4282 &ent_mask); 4283 if (rs != NPI_SUCCESS) { 4284 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4285 "==> nxge_rxdma_start_channel: " 4286 "init rxdma event masks failed (0x%08x channel %d)", 4287 status, channel)); 4288 return (NXGE_ERROR | rs); 4289 } 4290 4291 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4292 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4293 4294 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4295 4296 return (NXGE_OK); 4297 } 4298 4299 static nxge_status_t 4300 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4301 { 4302 npi_handle_t handle; 4303 npi_status_t rs = NPI_SUCCESS; 4304 rx_dma_ctl_stat_t cs; 4305 rx_dma_ent_msk_t ent_mask; 4306 nxge_status_t status = NXGE_OK; 4307 4308 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4309 4310 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4311 4312 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4313 "npi handle addr $%p acc $%p", 4314 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4315 4316 if (!isLDOMguest(nxgep)) { 4317 /* 4318 * Stop RxMAC = A.9.2.6 4319 */ 4320 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4322 "nxge_rxdma_stop_channel: " 4323 "Failed to disable RxMAC")); 4324 } 4325 4326 /* 4327 * Drain IPP Port = A.9.3.6 4328 */ 4329 (void) nxge_ipp_drain(nxgep); 4330 } 4331 4332 /* Reset RXDMA channel */ 4333 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4334 if (rs != NPI_SUCCESS) { 4335 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4336 " nxge_rxdma_stop_channel: " 4337 " reset rxdma failed (0x%08x channel %d)", 4338 rs, channel)); 4339 return (NXGE_ERROR | rs); 4340 } 4341 4342 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4343 "==> nxge_rxdma_stop_channel: reset done")); 4344 4345 /* Set up the interrupt event masks. */ 4346 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4347 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4348 &ent_mask); 4349 if (rs != NPI_SUCCESS) { 4350 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4351 "==> nxge_rxdma_stop_channel: " 4352 "set rxdma event masks failed (0x%08x channel %d)", 4353 rs, channel)); 4354 return (NXGE_ERROR | rs); 4355 } 4356 4357 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4358 "==> nxge_rxdma_stop_channel: event done")); 4359 4360 /* 4361 * Initialize the receive DMA control and status register 4362 */ 4363 cs.value = 0; 4364 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4365 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4366 " to default (all 0s) 0x%08x", cs.value)); 4367 if (status != NXGE_OK) { 4368 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4369 " nxge_rxdma_stop_channel: init rxdma" 4370 " control register failed (0x%08x channel %d", 4371 status, channel)); 4372 return (status); 4373 } 4374 4375 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4376 "==> nxge_rxdma_stop_channel: control done")); 4377 4378 /* 4379 * Make sure channel is disabled. 4380 */ 4381 status = nxge_disable_rxdma_channel(nxgep, channel); 4382 4383 if (status != NXGE_OK) { 4384 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4385 " nxge_rxdma_stop_channel: " 4386 " init enable rxdma failed (0x%08x channel %d)", 4387 status, channel)); 4388 return (status); 4389 } 4390 4391 if (!isLDOMguest(nxgep)) { 4392 /* 4393 * Enable RxMAC = A.9.2.10 4394 */ 4395 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4396 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4397 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4398 } 4399 } 4400 4401 NXGE_DEBUG_MSG((nxgep, 4402 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4403 4404 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4405 4406 return (NXGE_OK); 4407 } 4408 4409 nxge_status_t 4410 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4411 { 4412 npi_handle_t handle; 4413 p_nxge_rdc_sys_stats_t statsp; 4414 rx_ctl_dat_fifo_stat_t stat; 4415 uint32_t zcp_err_status; 4416 uint32_t ipp_err_status; 4417 nxge_status_t status = NXGE_OK; 4418 npi_status_t rs = NPI_SUCCESS; 4419 boolean_t my_err = B_FALSE; 4420 4421 handle = nxgep->npi_handle; 4422 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4423 4424 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4425 4426 if (rs != NPI_SUCCESS) 4427 return (NXGE_ERROR | rs); 4428 4429 if (stat.bits.ldw.id_mismatch) { 4430 statsp->id_mismatch++; 4431 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4432 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4433 /* Global fatal error encountered */ 4434 } 4435 4436 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4437 switch (nxgep->mac.portnum) { 4438 case 0: 4439 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4440 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4441 my_err = B_TRUE; 4442 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4443 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4444 } 4445 break; 4446 case 1: 4447 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4448 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4449 my_err = B_TRUE; 4450 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4451 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4452 } 4453 break; 4454 case 2: 4455 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4456 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4457 my_err = B_TRUE; 4458 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4459 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4460 } 4461 break; 4462 case 3: 4463 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4464 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4465 my_err = B_TRUE; 4466 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4467 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4468 } 4469 break; 4470 default: 4471 return (NXGE_ERROR); 4472 } 4473 } 4474 4475 if (my_err) { 4476 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4477 zcp_err_status); 4478 if (status != NXGE_OK) 4479 return (status); 4480 } 4481 4482 return (NXGE_OK); 4483 } 4484 4485 static nxge_status_t 4486 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4487 uint32_t zcp_status) 4488 { 4489 boolean_t rxport_fatal = B_FALSE; 4490 p_nxge_rdc_sys_stats_t statsp; 4491 nxge_status_t status = NXGE_OK; 4492 uint8_t portn; 4493 4494 portn = nxgep->mac.portnum; 4495 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4496 4497 if (ipp_status & (0x1 << portn)) { 4498 statsp->ipp_eop_err++; 4499 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4500 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4501 rxport_fatal = B_TRUE; 4502 } 4503 4504 if (zcp_status & (0x1 << portn)) { 4505 statsp->zcp_eop_err++; 4506 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4507 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4508 rxport_fatal = B_TRUE; 4509 } 4510 4511 if (rxport_fatal) { 4512 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4513 " nxge_rxdma_handle_port_error: " 4514 " fatal error on Port #%d\n", 4515 portn)); 4516 status = nxge_rx_port_fatal_err_recover(nxgep); 4517 if (status == NXGE_OK) { 4518 FM_SERVICE_RESTORED(nxgep); 4519 } 4520 } 4521 4522 return (status); 4523 } 4524 4525 static nxge_status_t 4526 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4527 { 4528 npi_handle_t handle; 4529 npi_status_t rs = NPI_SUCCESS; 4530 nxge_status_t status = NXGE_OK; 4531 p_rx_rbr_ring_t rbrp; 4532 p_rx_rcr_ring_t rcrp; 4533 p_rx_mbox_t mboxp; 4534 rx_dma_ent_msk_t ent_mask; 4535 p_nxge_dma_common_t dmap; 4536 uint32_t ref_cnt; 4537 p_rx_msg_t rx_msg_p; 4538 int i; 4539 uint32_t nxge_port_rcr_size; 4540 4541 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4542 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4543 "Recovering from RxDMAChannel#%d error...", channel)); 4544 4545 /* 4546 * Stop the dma channel waits for the stop done. 4547 * If the stop done bit is not set, then create 4548 * an error. 4549 */ 4550 4551 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4552 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4553 4554 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel]; 4555 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel]; 4556 4557 MUTEX_ENTER(&rcrp->lock); 4558 MUTEX_ENTER(&rbrp->lock); 4559 MUTEX_ENTER(&rbrp->post_lock); 4560 4561 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4562 4563 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4564 if (rs != NPI_SUCCESS) { 4565 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4566 "nxge_disable_rxdma_channel:failed")); 4567 goto fail; 4568 } 4569 4570 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4571 4572 /* Disable interrupt */ 4573 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4574 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4575 if (rs != NPI_SUCCESS) { 4576 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4577 "nxge_rxdma_stop_channel: " 4578 "set rxdma event masks failed (channel %d)", 4579 channel)); 4580 } 4581 4582 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4583 4584 /* Reset RXDMA channel */ 4585 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4586 if (rs != NPI_SUCCESS) { 4587 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4588 "nxge_rxdma_fatal_err_recover: " 4589 " reset rxdma failed (channel %d)", channel)); 4590 goto fail; 4591 } 4592 4593 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4594 4595 mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 4596 4597 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4598 rbrp->rbr_rd_index = 0; 4599 4600 rcrp->comp_rd_index = 0; 4601 rcrp->comp_wt_index = 0; 4602 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4603 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4604 #if defined(__i386) 4605 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4606 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4607 #else 4608 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4609 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4610 #endif 4611 4612 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4613 (nxge_port_rcr_size - 1); 4614 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4615 (nxge_port_rcr_size - 1); 4616 4617 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4618 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4619 4620 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4621 4622 for (i = 0; i < rbrp->rbr_max_size; i++) { 4623 rx_msg_p = rbrp->rx_msg_ring[i]; 4624 ref_cnt = rx_msg_p->ref_cnt; 4625 if (ref_cnt != 1) { 4626 if (rx_msg_p->cur_usage_cnt != 4627 rx_msg_p->max_usage_cnt) { 4628 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4629 "buf[%d]: cur_usage_cnt = %d " 4630 "max_usage_cnt = %d\n", i, 4631 rx_msg_p->cur_usage_cnt, 4632 rx_msg_p->max_usage_cnt)); 4633 } else { 4634 /* Buffer can be re-posted */ 4635 rx_msg_p->free = B_TRUE; 4636 rx_msg_p->cur_usage_cnt = 0; 4637 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4638 rx_msg_p->pkt_buf_size = 0; 4639 } 4640 } 4641 } 4642 4643 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4644 4645 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4646 if (status != NXGE_OK) { 4647 goto fail; 4648 } 4649 4650 MUTEX_EXIT(&rbrp->post_lock); 4651 MUTEX_EXIT(&rbrp->lock); 4652 MUTEX_EXIT(&rcrp->lock); 4653 4654 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4655 "Recovery Successful, RxDMAChannel#%d Restored", 4656 channel)); 4657 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4658 4659 return (NXGE_OK); 4660 fail: 4661 MUTEX_EXIT(&rbrp->post_lock); 4662 MUTEX_EXIT(&rbrp->lock); 4663 MUTEX_EXIT(&rcrp->lock); 4664 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4665 4666 return (NXGE_ERROR | rs); 4667 } 4668 4669 nxge_status_t 4670 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4671 { 4672 nxge_grp_set_t *set = &nxgep->rx_set; 4673 nxge_status_t status = NXGE_OK; 4674 int rdc; 4675 4676 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4678 "Recovering from RxPort error...")); 4679 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4680 4681 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4682 goto fail; 4683 4684 NXGE_DELAY(1000); 4685 4686 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4687 4688 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4689 if ((1 << rdc) & set->owned.map) { 4690 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4691 != NXGE_OK) { 4692 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4693 "Could not recover channel %d", rdc)); 4694 } 4695 } 4696 } 4697 4698 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4699 4700 /* Reset IPP */ 4701 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4702 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4703 "nxge_rx_port_fatal_err_recover: " 4704 "Failed to reset IPP")); 4705 goto fail; 4706 } 4707 4708 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4709 4710 /* Reset RxMAC */ 4711 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4712 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4713 "nxge_rx_port_fatal_err_recover: " 4714 "Failed to reset RxMAC")); 4715 goto fail; 4716 } 4717 4718 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4719 4720 /* Re-Initialize IPP */ 4721 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4722 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4723 "nxge_rx_port_fatal_err_recover: " 4724 "Failed to init IPP")); 4725 goto fail; 4726 } 4727 4728 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4729 4730 /* Re-Initialize RxMAC */ 4731 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4732 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4733 "nxge_rx_port_fatal_err_recover: " 4734 "Failed to reset RxMAC")); 4735 goto fail; 4736 } 4737 4738 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4739 4740 /* Re-enable RxMAC */ 4741 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4742 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4743 "nxge_rx_port_fatal_err_recover: " 4744 "Failed to enable RxMAC")); 4745 goto fail; 4746 } 4747 4748 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4749 "Recovery Successful, RxPort Restored")); 4750 4751 return (NXGE_OK); 4752 fail: 4753 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4754 return (status); 4755 } 4756 4757 void 4758 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4759 { 4760 rx_dma_ctl_stat_t cs; 4761 rx_ctl_dat_fifo_stat_t cdfs; 4762 4763 switch (err_id) { 4764 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4765 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4766 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4767 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4768 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4769 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4770 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4771 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4772 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4773 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4774 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4775 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4776 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4777 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4778 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4779 chan, &cs.value); 4780 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4781 cs.bits.hdw.rcr_ack_err = 1; 4782 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4783 cs.bits.hdw.dc_fifo_err = 1; 4784 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4785 cs.bits.hdw.rcr_sha_par = 1; 4786 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4787 cs.bits.hdw.rbr_pre_par = 1; 4788 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4789 cs.bits.hdw.rbr_tmout = 1; 4790 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4791 cs.bits.hdw.rsp_cnt_err = 1; 4792 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4793 cs.bits.hdw.byte_en_bus = 1; 4794 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4795 cs.bits.hdw.rsp_dat_err = 1; 4796 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4797 cs.bits.hdw.config_err = 1; 4798 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4799 cs.bits.hdw.rcrincon = 1; 4800 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4801 cs.bits.hdw.rcrfull = 1; 4802 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4803 cs.bits.hdw.rbrfull = 1; 4804 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4805 cs.bits.hdw.rbrlogpage = 1; 4806 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4807 cs.bits.hdw.cfiglogpage = 1; 4808 #if defined(__i386) 4809 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4810 cs.value); 4811 #else 4812 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4813 cs.value); 4814 #endif 4815 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4816 chan, cs.value); 4817 break; 4818 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4819 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4820 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4821 cdfs.value = 0; 4822 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4823 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4824 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4825 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4826 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4827 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4828 #if defined(__i386) 4829 cmn_err(CE_NOTE, 4830 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4831 cdfs.value); 4832 #else 4833 cmn_err(CE_NOTE, 4834 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4835 cdfs.value); 4836 #endif 4837 NXGE_REG_WR64(nxgep->npi_handle, 4838 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4839 break; 4840 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4841 break; 4842 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4843 break; 4844 } 4845 } 4846 4847 static void 4848 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4849 { 4850 rxring_info_t *ring_info; 4851 int index; 4852 uint32_t chunk_size; 4853 uint64_t kaddr; 4854 uint_t num_blocks; 4855 4856 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4857 4858 if (rbr_p == NULL) { 4859 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4860 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4861 return; 4862 } 4863 4864 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4865 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4866 "<== nxge_rxdma_databuf_free: DDI")); 4867 return; 4868 } 4869 4870 ring_info = rbr_p->ring_info; 4871 if (ring_info == NULL) { 4872 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4873 "==> nxge_rxdma_databuf_free: NULL ring info")); 4874 return; 4875 } 4876 num_blocks = rbr_p->num_blocks; 4877 for (index = 0; index < num_blocks; index++) { 4878 kaddr = ring_info->buffer[index].kaddr; 4879 chunk_size = ring_info->buffer[index].buf_size; 4880 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4881 "==> nxge_rxdma_databuf_free: free chunk %d " 4882 "kaddrp $%p chunk size %d", 4883 index, kaddr, chunk_size)); 4884 if (kaddr == NULL) continue; 4885 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4886 ring_info->buffer[index].kaddr = NULL; 4887 } 4888 4889 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4890 } 4891 4892 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4893 extern void contig_mem_free(void *, size_t); 4894 #endif 4895 4896 void 4897 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4898 { 4899 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4900 4901 if (kaddr == NULL || !buf_size) { 4902 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4903 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4904 kaddr, buf_size)); 4905 return; 4906 } 4907 4908 switch (alloc_type) { 4909 case KMEM_ALLOC: 4910 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4911 "==> nxge_free_buf: freeing kmem $%p size %d", 4912 kaddr, buf_size)); 4913 #if defined(__i386) 4914 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4915 #else 4916 KMEM_FREE((void *)kaddr, buf_size); 4917 #endif 4918 break; 4919 4920 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4921 case CONTIG_MEM_ALLOC: 4922 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4923 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4924 kaddr, buf_size)); 4925 contig_mem_free((void *)kaddr, buf_size); 4926 break; 4927 #endif 4928 4929 default: 4930 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4931 "<== nxge_free_buf: unsupported alloc type %d", 4932 alloc_type)); 4933 return; 4934 } 4935 4936 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 4937 } 4938