1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/nxge/nxge_impl.h> 28 #include <sys/nxge/nxge_rxdma.h> 29 #include <sys/nxge/nxge_hio.h> 30 31 #if !defined(_BIG_ENDIAN) 32 #include <npi_rx_rd32.h> 33 #endif 34 #include <npi_rx_rd64.h> 35 #include <npi_rx_wr64.h> 36 37 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 38 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 39 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 40 (rdc + nxgep->pt_config.hw_config.start_rdc) 41 42 /* 43 * Globals: tunable parameters (/etc/system or adb) 44 * 45 */ 46 extern uint32_t nxge_rbr_size; 47 extern uint32_t nxge_rcr_size; 48 extern uint32_t nxge_rbr_spare_size; 49 extern uint16_t nxge_rdc_buf_offset; 50 51 extern uint32_t nxge_mblks_pending; 52 53 /* 54 * Tunable to reduce the amount of time spent in the 55 * ISR doing Rx Processing. 56 */ 57 extern uint32_t nxge_max_rx_pkts; 58 59 /* 60 * Tunables to manage the receive buffer blocks. 61 * 62 * nxge_rx_threshold_hi: copy all buffers. 63 * nxge_rx_bcopy_size_type: receive buffer block size type. 64 * nxge_rx_threshold_lo: copy only up to tunable block size type. 65 */ 66 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 67 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 68 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 69 70 extern uint32_t nxge_cksum_offload; 71 72 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 73 static void nxge_unmap_rxdma(p_nxge_t, int); 74 75 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 76 77 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 78 static void nxge_rxdma_hw_stop(p_nxge_t, int); 79 80 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 81 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 82 uint32_t, 83 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 84 p_rx_mbox_t *); 85 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 86 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 87 88 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 89 uint16_t, 90 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 91 p_rx_rcr_ring_t *, p_rx_mbox_t *); 92 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 93 p_rx_rcr_ring_t, p_rx_mbox_t); 94 95 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 96 uint16_t, 97 p_nxge_dma_common_t *, 98 p_rx_rbr_ring_t *, uint32_t); 99 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 100 p_rx_rbr_ring_t); 101 102 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 103 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 104 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 105 106 static mblk_t * 107 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 108 109 static void nxge_receive_packet(p_nxge_t, 110 p_rx_rcr_ring_t, 111 p_rcr_entry_t, 112 boolean_t *, 113 mblk_t **, mblk_t **); 114 115 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 116 117 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 118 static void nxge_freeb(p_rx_msg_t); 119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 120 121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 122 uint32_t, uint32_t); 123 124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 125 p_rx_rbr_ring_t); 126 127 128 static nxge_status_t 129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 130 131 nxge_status_t 132 nxge_rx_port_fatal_err_recover(p_nxge_t); 133 134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135 136 nxge_status_t 137 nxge_init_rxdma_channels(p_nxge_t nxgep) 138 { 139 nxge_grp_set_t *set = &nxgep->rx_set; 140 int i, count, channel; 141 nxge_grp_t *group; 142 dc_map_t map; 143 int dev_gindex; 144 145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 146 147 if (!isLDOMguest(nxgep)) { 148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 149 cmn_err(CE_NOTE, "hw_start_common"); 150 return (NXGE_ERROR); 151 } 152 } 153 154 /* 155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 156 * We only have 8 hardware RDC tables, but we may have 157 * up to 16 logical (software-defined) groups of RDCS, 158 * if we make use of layer 3 & 4 hardware classification. 159 */ 160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 161 if ((1 << i) & set->lg.map) { 162 group = set->group[i]; 163 dev_gindex = 164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 165 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 167 if ((1 << channel) & map) { 168 if ((nxge_grp_dc_add(nxgep, 169 group, VP_BOUND_RX, channel))) 170 goto init_rxdma_channels_exit; 171 } 172 } 173 } 174 if (++count == set->lg.count) 175 break; 176 } 177 178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 179 return (NXGE_OK); 180 181 init_rxdma_channels_exit: 182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 183 if ((1 << i) & set->lg.map) { 184 group = set->group[i]; 185 dev_gindex = 186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 187 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 189 if ((1 << channel) & map) { 190 nxge_grp_dc_remove(nxgep, 191 VP_BOUND_RX, channel); 192 } 193 } 194 } 195 if (++count == set->lg.count) 196 break; 197 } 198 199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 200 return (NXGE_ERROR); 201 } 202 203 nxge_status_t 204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 205 { 206 nxge_status_t status; 207 208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 209 210 status = nxge_map_rxdma(nxge, channel); 211 if (status != NXGE_OK) { 212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 213 "<== nxge_init_rxdma: status 0x%x", status)); 214 return (status); 215 } 216 217 #if defined(sun4v) 218 if (isLDOMguest(nxge)) { 219 /* set rcr_ring */ 220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 221 222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 223 if (status != NXGE_OK) { 224 nxge_unmap_rxdma(nxge, channel); 225 return (status); 226 } 227 } 228 #endif 229 230 status = nxge_rxdma_hw_start(nxge, channel); 231 if (status != NXGE_OK) { 232 nxge_unmap_rxdma(nxge, channel); 233 } 234 235 if (!nxge->statsp->rdc_ksp[channel]) 236 nxge_setup_rdc_kstats(nxge, channel); 237 238 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 239 "<== nxge_init_rxdma_channel: status 0x%x", status)); 240 241 return (status); 242 } 243 244 void 245 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 246 { 247 nxge_grp_set_t *set = &nxgep->rx_set; 248 int rdc; 249 250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 251 252 if (set->owned.map == 0) { 253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 254 "nxge_uninit_rxdma_channels: no channels")); 255 return; 256 } 257 258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 259 if ((1 << rdc) & set->owned.map) { 260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 261 } 262 } 263 264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 265 } 266 267 void 268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 269 { 270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 271 272 if (nxgep->statsp->rdc_ksp[channel]) { 273 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 274 nxgep->statsp->rdc_ksp[channel] = 0; 275 } 276 277 nxge_rxdma_hw_stop(nxgep, channel); 278 nxge_unmap_rxdma(nxgep, channel); 279 280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 281 } 282 283 nxge_status_t 284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 285 { 286 npi_handle_t handle; 287 npi_status_t rs = NPI_SUCCESS; 288 nxge_status_t status = NXGE_OK; 289 290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 291 292 handle = NXGE_DEV_NPI_HANDLE(nxgep); 293 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 294 295 if (rs != NPI_SUCCESS) { 296 status = NXGE_ERROR | rs; 297 } 298 299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 300 301 return (status); 302 } 303 304 void 305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 306 { 307 nxge_grp_set_t *set = &nxgep->rx_set; 308 int rdc; 309 310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 311 312 if (!isLDOMguest(nxgep)) { 313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 314 (void) npi_rxdma_dump_fzc_regs(handle); 315 } 316 317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 318 NXGE_DEBUG_MSG((nxgep, TX_CTL, 319 "nxge_rxdma_regs_dump_channels: " 320 "NULL ring pointer(s)")); 321 return; 322 } 323 324 if (set->owned.map == 0) { 325 NXGE_DEBUG_MSG((nxgep, RX_CTL, 326 "nxge_rxdma_regs_dump_channels: no channels")); 327 return; 328 } 329 330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 331 if ((1 << rdc) & set->owned.map) { 332 rx_rbr_ring_t *ring = 333 nxgep->rx_rbr_rings->rbr_rings[rdc]; 334 if (ring) { 335 (void) nxge_dump_rxdma_channel(nxgep, rdc); 336 } 337 } 338 } 339 340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 341 } 342 343 nxge_status_t 344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 345 { 346 npi_handle_t handle; 347 npi_status_t rs = NPI_SUCCESS; 348 nxge_status_t status = NXGE_OK; 349 350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 351 352 handle = NXGE_DEV_NPI_HANDLE(nxgep); 353 rs = npi_rxdma_dump_rdc_regs(handle, channel); 354 355 if (rs != NPI_SUCCESS) { 356 status = NXGE_ERROR | rs; 357 } 358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 359 return (status); 360 } 361 362 nxge_status_t 363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 364 p_rx_dma_ent_msk_t mask_p) 365 { 366 npi_handle_t handle; 367 npi_status_t rs = NPI_SUCCESS; 368 nxge_status_t status = NXGE_OK; 369 370 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 371 "<== nxge_init_rxdma_channel_event_mask")); 372 373 handle = NXGE_DEV_NPI_HANDLE(nxgep); 374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 375 if (rs != NPI_SUCCESS) { 376 status = NXGE_ERROR | rs; 377 } 378 379 return (status); 380 } 381 382 nxge_status_t 383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 384 p_rx_dma_ctl_stat_t cs_p) 385 { 386 npi_handle_t handle; 387 npi_status_t rs = NPI_SUCCESS; 388 nxge_status_t status = NXGE_OK; 389 390 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 391 "<== nxge_init_rxdma_channel_cntl_stat")); 392 393 handle = NXGE_DEV_NPI_HANDLE(nxgep); 394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 395 396 if (rs != NPI_SUCCESS) { 397 status = NXGE_ERROR | rs; 398 } 399 400 return (status); 401 } 402 403 /* 404 * nxge_rxdma_cfg_rdcgrp_default_rdc 405 * 406 * Set the default RDC for an RDC Group (Table) 407 * 408 * Arguments: 409 * nxgep 410 * rdcgrp The group to modify 411 * rdc The new default RDC. 412 * 413 * Notes: 414 * 415 * NPI/NXGE function calls: 416 * npi_rxdma_cfg_rdc_table_default_rdc() 417 * 418 * Registers accessed: 419 * RDC_TBL_REG: FZC_ZCP + 0x10000 420 * 421 * Context: 422 * Service domain 423 */ 424 nxge_status_t 425 nxge_rxdma_cfg_rdcgrp_default_rdc( 426 p_nxge_t nxgep, 427 uint8_t rdcgrp, 428 uint8_t rdc) 429 { 430 npi_handle_t handle; 431 npi_status_t rs = NPI_SUCCESS; 432 p_nxge_dma_pt_cfg_t p_dma_cfgp; 433 p_nxge_rdc_grp_t rdc_grp_p; 434 uint8_t actual_rdcgrp, actual_rdc; 435 436 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 439 440 handle = NXGE_DEV_NPI_HANDLE(nxgep); 441 442 /* 443 * This has to be rewritten. Do we even allow this anymore? 444 */ 445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 446 RDC_MAP_IN(rdc_grp_p->map, rdc); 447 rdc_grp_p->def_rdc = rdc; 448 449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 451 452 rs = npi_rxdma_cfg_rdc_table_default_rdc( 453 handle, actual_rdcgrp, actual_rdc); 454 455 if (rs != NPI_SUCCESS) { 456 return (NXGE_ERROR | rs); 457 } 458 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 460 return (NXGE_OK); 461 } 462 463 nxge_status_t 464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 465 { 466 npi_handle_t handle; 467 468 uint8_t actual_rdc; 469 npi_status_t rs = NPI_SUCCESS; 470 471 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 472 " ==> nxge_rxdma_cfg_port_default_rdc")); 473 474 handle = NXGE_DEV_NPI_HANDLE(nxgep); 475 actual_rdc = rdc; /* XXX Hack! */ 476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 477 478 479 if (rs != NPI_SUCCESS) { 480 return (NXGE_ERROR | rs); 481 } 482 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 483 " <== nxge_rxdma_cfg_port_default_rdc")); 484 485 return (NXGE_OK); 486 } 487 488 nxge_status_t 489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 490 uint16_t pkts) 491 { 492 npi_status_t rs = NPI_SUCCESS; 493 npi_handle_t handle; 494 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 495 " ==> nxge_rxdma_cfg_rcr_threshold")); 496 handle = NXGE_DEV_NPI_HANDLE(nxgep); 497 498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 499 500 if (rs != NPI_SUCCESS) { 501 return (NXGE_ERROR | rs); 502 } 503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 504 return (NXGE_OK); 505 } 506 507 nxge_status_t 508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 509 uint16_t tout, uint8_t enable) 510 { 511 npi_status_t rs = NPI_SUCCESS; 512 npi_handle_t handle; 513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 514 handle = NXGE_DEV_NPI_HANDLE(nxgep); 515 if (enable == 0) { 516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 517 } else { 518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 519 tout); 520 } 521 522 if (rs != NPI_SUCCESS) { 523 return (NXGE_ERROR | rs); 524 } 525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 526 return (NXGE_OK); 527 } 528 529 nxge_status_t 530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 532 { 533 npi_handle_t handle; 534 rdc_desc_cfg_t rdc_desc; 535 p_rcrcfig_b_t cfgb_p; 536 npi_status_t rs = NPI_SUCCESS; 537 538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 539 handle = NXGE_DEV_NPI_HANDLE(nxgep); 540 /* 541 * Use configuration data composed at init time. 542 * Write to hardware the receive ring configurations. 543 */ 544 rdc_desc.mbox_enable = 1; 545 rdc_desc.mbox_addr = mbox_p->mbox_addr; 546 NXGE_DEBUG_MSG((nxgep, RX_CTL, 547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 548 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 549 550 rdc_desc.rbr_len = rbr_p->rbb_max; 551 rdc_desc.rbr_addr = rbr_p->rbr_addr; 552 553 switch (nxgep->rx_bksize_code) { 554 case RBR_BKSIZE_4K: 555 rdc_desc.page_size = SIZE_4KB; 556 break; 557 case RBR_BKSIZE_8K: 558 rdc_desc.page_size = SIZE_8KB; 559 break; 560 case RBR_BKSIZE_16K: 561 rdc_desc.page_size = SIZE_16KB; 562 break; 563 case RBR_BKSIZE_32K: 564 rdc_desc.page_size = SIZE_32KB; 565 break; 566 } 567 568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 569 rdc_desc.valid0 = 1; 570 571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 572 rdc_desc.valid1 = 1; 573 574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 575 rdc_desc.valid2 = 1; 576 577 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 578 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 579 580 rdc_desc.rcr_len = rcr_p->comp_size; 581 rdc_desc.rcr_addr = rcr_p->rcr_addr; 582 583 cfgb_p = &(rcr_p->rcr_cfgb); 584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 585 /* For now, disable this timeout in a guest domain. */ 586 if (isLDOMguest(nxgep)) { 587 rdc_desc.rcr_timeout = 0; 588 rdc_desc.rcr_timeout_enable = 0; 589 } else { 590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 592 } 593 594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 595 "rbr_len qlen %d pagesize code %d rcr_len %d", 596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 598 "size 0 %d size 1 %d size 2 %d", 599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 600 rbr_p->npi_pkt_buf_size2)); 601 602 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) 603 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, 604 &rdc_desc, B_TRUE); 605 else 606 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, 607 &rdc_desc, B_FALSE); 608 if (rs != NPI_SUCCESS) { 609 return (NXGE_ERROR | rs); 610 } 611 612 /* 613 * Enable the timeout and threshold. 614 */ 615 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 616 rdc_desc.rcr_threshold); 617 if (rs != NPI_SUCCESS) { 618 return (NXGE_ERROR | rs); 619 } 620 621 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 622 rdc_desc.rcr_timeout); 623 if (rs != NPI_SUCCESS) { 624 return (NXGE_ERROR | rs); 625 } 626 627 if (!isLDOMguest(nxgep)) { 628 /* Enable the DMA */ 629 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 630 if (rs != NPI_SUCCESS) { 631 return (NXGE_ERROR | rs); 632 } 633 } 634 635 /* Kick the DMA engine. */ 636 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 637 638 if (!isLDOMguest(nxgep)) { 639 /* Clear the rbr empty bit */ 640 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 641 } 642 643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 644 645 return (NXGE_OK); 646 } 647 648 nxge_status_t 649 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 650 { 651 npi_handle_t handle; 652 npi_status_t rs = NPI_SUCCESS; 653 654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 655 handle = NXGE_DEV_NPI_HANDLE(nxgep); 656 657 /* disable the DMA */ 658 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 659 if (rs != NPI_SUCCESS) { 660 NXGE_DEBUG_MSG((nxgep, RX_CTL, 661 "<== nxge_disable_rxdma_channel:failed (0x%x)", 662 rs)); 663 return (NXGE_ERROR | rs); 664 } 665 666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 667 return (NXGE_OK); 668 } 669 670 nxge_status_t 671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 672 { 673 npi_handle_t handle; 674 nxge_status_t status = NXGE_OK; 675 676 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 677 "<== nxge_init_rxdma_channel_rcrflush")); 678 679 handle = NXGE_DEV_NPI_HANDLE(nxgep); 680 npi_rxdma_rdc_rcr_flush(handle, channel); 681 682 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 683 "<== nxge_init_rxdma_channel_rcrflsh")); 684 return (status); 685 686 } 687 688 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 689 690 #define TO_LEFT -1 691 #define TO_RIGHT 1 692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 693 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 695 #define NO_HINT 0xffffffff 696 697 /*ARGSUSED*/ 698 nxge_status_t 699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 700 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 701 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 702 { 703 int bufsize; 704 uint64_t pktbuf_pp; 705 uint64_t dvma_addr; 706 rxring_info_t *ring_info; 707 int base_side, end_side; 708 int r_index, l_index, anchor_index; 709 int found, search_done; 710 uint32_t offset, chunk_size, block_size, page_size_mask; 711 uint32_t chunk_index, block_index, total_index; 712 int max_iterations, iteration; 713 rxbuf_index_info_t *bufinfo; 714 715 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 716 717 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 719 pkt_buf_addr_pp, 720 pktbufsz_type)); 721 #if defined(__i386) 722 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 723 #else 724 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 725 #endif 726 727 switch (pktbufsz_type) { 728 case 0: 729 bufsize = rbr_p->pkt_buf_size0; 730 break; 731 case 1: 732 bufsize = rbr_p->pkt_buf_size1; 733 break; 734 case 2: 735 bufsize = rbr_p->pkt_buf_size2; 736 break; 737 case RCR_SINGLE_BLOCK: 738 bufsize = 0; 739 anchor_index = 0; 740 break; 741 default: 742 return (NXGE_ERROR); 743 } 744 745 if (rbr_p->num_blocks == 1) { 746 anchor_index = 0; 747 ring_info = rbr_p->ring_info; 748 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 749 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 750 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 751 "buf_pp $%p btype %d anchor_index %d " 752 "bufinfo $%p", 753 pkt_buf_addr_pp, 754 pktbufsz_type, 755 anchor_index, 756 bufinfo)); 757 758 goto found_index; 759 } 760 761 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 762 "==> nxge_rxbuf_pp_to_vp: " 763 "buf_pp $%p btype %d anchor_index %d", 764 pkt_buf_addr_pp, 765 pktbufsz_type, 766 anchor_index)); 767 768 ring_info = rbr_p->ring_info; 769 found = B_FALSE; 770 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 771 iteration = 0; 772 max_iterations = ring_info->max_iterations; 773 /* 774 * First check if this block has been seen 775 * recently. This is indicated by a hint which 776 * is initialized when the first buffer of the block 777 * is seen. The hint is reset when the last buffer of 778 * the block has been processed. 779 * As three block sizes are supported, three hints 780 * are kept. The idea behind the hints is that once 781 * the hardware uses a block for a buffer of that 782 * size, it will use it exclusively for that size 783 * and will use it until it is exhausted. It is assumed 784 * that there would a single block being used for the same 785 * buffer sizes at any given time. 786 */ 787 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 788 anchor_index = ring_info->hint[pktbufsz_type]; 789 dvma_addr = bufinfo[anchor_index].dvma_addr; 790 chunk_size = bufinfo[anchor_index].buf_size; 791 if ((pktbuf_pp >= dvma_addr) && 792 (pktbuf_pp < (dvma_addr + chunk_size))) { 793 found = B_TRUE; 794 /* 795 * check if this is the last buffer in the block 796 * If so, then reset the hint for the size; 797 */ 798 799 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 800 ring_info->hint[pktbufsz_type] = NO_HINT; 801 } 802 } 803 804 if (found == B_FALSE) { 805 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 806 "==> nxge_rxbuf_pp_to_vp: (!found)" 807 "buf_pp $%p btype %d anchor_index %d", 808 pkt_buf_addr_pp, 809 pktbufsz_type, 810 anchor_index)); 811 812 /* 813 * This is the first buffer of the block of this 814 * size. Need to search the whole information 815 * array. 816 * the search algorithm uses a binary tree search 817 * algorithm. It assumes that the information is 818 * already sorted with increasing order 819 * info[0] < info[1] < info[2] .... < info[n-1] 820 * where n is the size of the information array 821 */ 822 r_index = rbr_p->num_blocks - 1; 823 l_index = 0; 824 search_done = B_FALSE; 825 anchor_index = MID_INDEX(r_index, l_index); 826 while (search_done == B_FALSE) { 827 if ((r_index == l_index) || 828 (iteration >= max_iterations)) 829 search_done = B_TRUE; 830 end_side = TO_RIGHT; /* to the right */ 831 base_side = TO_LEFT; /* to the left */ 832 /* read the DVMA address information and sort it */ 833 dvma_addr = bufinfo[anchor_index].dvma_addr; 834 chunk_size = bufinfo[anchor_index].buf_size; 835 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 836 "==> nxge_rxbuf_pp_to_vp: (searching)" 837 "buf_pp $%p btype %d " 838 "anchor_index %d chunk_size %d dvmaaddr $%p", 839 pkt_buf_addr_pp, 840 pktbufsz_type, 841 anchor_index, 842 chunk_size, 843 dvma_addr)); 844 845 if (pktbuf_pp >= dvma_addr) 846 base_side = TO_RIGHT; /* to the right */ 847 if (pktbuf_pp < (dvma_addr + chunk_size)) 848 end_side = TO_LEFT; /* to the left */ 849 850 switch (base_side + end_side) { 851 case IN_MIDDLE: 852 /* found */ 853 found = B_TRUE; 854 search_done = B_TRUE; 855 if ((pktbuf_pp + bufsize) < 856 (dvma_addr + chunk_size)) 857 ring_info->hint[pktbufsz_type] = 858 bufinfo[anchor_index].buf_index; 859 break; 860 case BOTH_RIGHT: 861 /* not found: go to the right */ 862 l_index = anchor_index + 1; 863 anchor_index = MID_INDEX(r_index, l_index); 864 break; 865 866 case BOTH_LEFT: 867 /* not found: go to the left */ 868 r_index = anchor_index - 1; 869 anchor_index = MID_INDEX(r_index, l_index); 870 break; 871 default: /* should not come here */ 872 return (NXGE_ERROR); 873 } 874 iteration++; 875 } 876 877 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 878 "==> nxge_rxbuf_pp_to_vp: (search done)" 879 "buf_pp $%p btype %d anchor_index %d", 880 pkt_buf_addr_pp, 881 pktbufsz_type, 882 anchor_index)); 883 } 884 885 if (found == B_FALSE) { 886 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 887 "==> nxge_rxbuf_pp_to_vp: (search failed)" 888 "buf_pp $%p btype %d anchor_index %d", 889 pkt_buf_addr_pp, 890 pktbufsz_type, 891 anchor_index)); 892 return (NXGE_ERROR); 893 } 894 895 found_index: 896 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 897 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 898 "buf_pp $%p btype %d bufsize %d anchor_index %d", 899 pkt_buf_addr_pp, 900 pktbufsz_type, 901 bufsize, 902 anchor_index)); 903 904 /* index of the first block in this chunk */ 905 chunk_index = bufinfo[anchor_index].start_index; 906 dvma_addr = bufinfo[anchor_index].dvma_addr; 907 page_size_mask = ring_info->block_size_mask; 908 909 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 910 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 911 "buf_pp $%p btype %d bufsize %d " 912 "anchor_index %d chunk_index %d dvma $%p", 913 pkt_buf_addr_pp, 914 pktbufsz_type, 915 bufsize, 916 anchor_index, 917 chunk_index, 918 dvma_addr)); 919 920 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 921 block_size = rbr_p->block_size; /* System block(page) size */ 922 923 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 924 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 925 "buf_pp $%p btype %d bufsize %d " 926 "anchor_index %d chunk_index %d dvma $%p " 927 "offset %d block_size %d", 928 pkt_buf_addr_pp, 929 pktbufsz_type, 930 bufsize, 931 anchor_index, 932 chunk_index, 933 dvma_addr, 934 offset, 935 block_size)); 936 937 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 938 939 block_index = (offset / block_size); /* index within chunk */ 940 total_index = chunk_index + block_index; 941 942 943 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 944 "==> nxge_rxbuf_pp_to_vp: " 945 "total_index %d dvma_addr $%p " 946 "offset %d block_size %d " 947 "block_index %d ", 948 total_index, dvma_addr, 949 offset, block_size, 950 block_index)); 951 #if defined(__i386) 952 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 953 (uint32_t)offset); 954 #else 955 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 956 (uint64_t)offset); 957 #endif 958 959 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 960 "==> nxge_rxbuf_pp_to_vp: " 961 "total_index %d dvma_addr $%p " 962 "offset %d block_size %d " 963 "block_index %d " 964 "*pkt_buf_addr_p $%p", 965 total_index, dvma_addr, 966 offset, block_size, 967 block_index, 968 *pkt_buf_addr_p)); 969 970 971 *msg_index = total_index; 972 *bufoffset = (offset & page_size_mask); 973 974 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 975 "==> nxge_rxbuf_pp_to_vp: get msg index: " 976 "msg_index %d bufoffset_index %d", 977 *msg_index, 978 *bufoffset)); 979 980 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 981 982 return (NXGE_OK); 983 } 984 985 /* 986 * used by quick sort (qsort) function 987 * to perform comparison 988 */ 989 static int 990 nxge_sort_compare(const void *p1, const void *p2) 991 { 992 993 rxbuf_index_info_t *a, *b; 994 995 a = (rxbuf_index_info_t *)p1; 996 b = (rxbuf_index_info_t *)p2; 997 998 if (a->dvma_addr > b->dvma_addr) 999 return (1); 1000 if (a->dvma_addr < b->dvma_addr) 1001 return (-1); 1002 return (0); 1003 } 1004 1005 1006 1007 /* 1008 * grabbed this sort implementation from common/syscall/avl.c 1009 * 1010 */ 1011 /* 1012 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 1013 * v = Ptr to array/vector of objs 1014 * n = # objs in the array 1015 * s = size of each obj (must be multiples of a word size) 1016 * f = ptr to function to compare two objs 1017 * returns (-1 = less than, 0 = equal, 1 = greater than 1018 */ 1019 void 1020 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1021 { 1022 int g, i, j, ii; 1023 unsigned int *p1, *p2; 1024 unsigned int tmp; 1025 1026 /* No work to do */ 1027 if (v == NULL || n <= 1) 1028 return; 1029 /* Sanity check on arguments */ 1030 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1031 ASSERT(s > 0); 1032 1033 for (g = n / 2; g > 0; g /= 2) { 1034 for (i = g; i < n; i++) { 1035 for (j = i - g; j >= 0 && 1036 (*f)(v + j * s, v + (j + g) * s) == 1; 1037 j -= g) { 1038 p1 = (unsigned *)(v + j * s); 1039 p2 = (unsigned *)(v + (j + g) * s); 1040 for (ii = 0; ii < s / 4; ii++) { 1041 tmp = *p1; 1042 *p1++ = *p2; 1043 *p2++ = tmp; 1044 } 1045 } 1046 } 1047 } 1048 } 1049 1050 /* 1051 * Initialize data structures required for rxdma 1052 * buffer dvma->vmem address lookup 1053 */ 1054 /*ARGSUSED*/ 1055 static nxge_status_t 1056 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1057 { 1058 1059 int index; 1060 rxring_info_t *ring_info; 1061 int max_iteration = 0, max_index = 0; 1062 1063 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1064 1065 ring_info = rbrp->ring_info; 1066 ring_info->hint[0] = NO_HINT; 1067 ring_info->hint[1] = NO_HINT; 1068 ring_info->hint[2] = NO_HINT; 1069 max_index = rbrp->num_blocks; 1070 1071 /* read the DVMA address information and sort it */ 1072 /* do init of the information array */ 1073 1074 1075 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1076 " nxge_rxbuf_index_info_init Sort ptrs")); 1077 1078 /* sort the array */ 1079 nxge_ksort((void *)ring_info->buffer, max_index, 1080 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1081 1082 1083 1084 for (index = 0; index < max_index; index++) { 1085 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1086 " nxge_rxbuf_index_info_init: sorted chunk %d " 1087 " ioaddr $%p kaddr $%p size %x", 1088 index, ring_info->buffer[index].dvma_addr, 1089 ring_info->buffer[index].kaddr, 1090 ring_info->buffer[index].buf_size)); 1091 } 1092 1093 max_iteration = 0; 1094 while (max_index >= (1ULL << max_iteration)) 1095 max_iteration++; 1096 ring_info->max_iterations = max_iteration + 1; 1097 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1098 " nxge_rxbuf_index_info_init Find max iter %d", 1099 ring_info->max_iterations)); 1100 1101 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1102 return (NXGE_OK); 1103 } 1104 1105 /* ARGSUSED */ 1106 void 1107 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1108 { 1109 #ifdef NXGE_DEBUG 1110 1111 uint32_t bptr; 1112 uint64_t pp; 1113 1114 bptr = entry_p->bits.hdw.pkt_buf_addr; 1115 1116 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1117 "\trcr entry $%p " 1118 "\trcr entry 0x%0llx " 1119 "\trcr entry 0x%08x " 1120 "\trcr entry 0x%08x " 1121 "\tvalue 0x%0llx\n" 1122 "\tmulti = %d\n" 1123 "\tpkt_type = 0x%x\n" 1124 "\tzero_copy = %d\n" 1125 "\tnoport = %d\n" 1126 "\tpromis = %d\n" 1127 "\terror = 0x%04x\n" 1128 "\tdcf_err = 0x%01x\n" 1129 "\tl2_len = %d\n" 1130 "\tpktbufsize = %d\n" 1131 "\tpkt_buf_addr = $%p\n" 1132 "\tpkt_buf_addr (<< 6) = $%p\n", 1133 entry_p, 1134 *(int64_t *)entry_p, 1135 *(int32_t *)entry_p, 1136 *(int32_t *)((char *)entry_p + 32), 1137 entry_p->value, 1138 entry_p->bits.hdw.multi, 1139 entry_p->bits.hdw.pkt_type, 1140 entry_p->bits.hdw.zero_copy, 1141 entry_p->bits.hdw.noport, 1142 entry_p->bits.hdw.promis, 1143 entry_p->bits.hdw.error, 1144 entry_p->bits.hdw.dcf_err, 1145 entry_p->bits.hdw.l2_len, 1146 entry_p->bits.hdw.pktbufsz, 1147 bptr, 1148 entry_p->bits.ldw.pkt_buf_addr)); 1149 1150 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1151 RCR_PKT_BUF_ADDR_SHIFT; 1152 1153 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1154 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1155 #endif 1156 } 1157 1158 void 1159 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1160 { 1161 npi_handle_t handle; 1162 rbr_stat_t rbr_stat; 1163 addr44_t hd_addr; 1164 addr44_t tail_addr; 1165 uint16_t qlen; 1166 1167 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1168 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1169 1170 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1171 1172 /* RBR head */ 1173 hd_addr.addr = 0; 1174 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1175 #if defined(__i386) 1176 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1177 (void *)(uint32_t)hd_addr.addr); 1178 #else 1179 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1180 (void *)hd_addr.addr); 1181 #endif 1182 1183 /* RBR stats */ 1184 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1185 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1186 1187 /* RCR tail */ 1188 tail_addr.addr = 0; 1189 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1190 #if defined(__i386) 1191 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1192 (void *)(uint32_t)tail_addr.addr); 1193 #else 1194 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1195 (void *)tail_addr.addr); 1196 #endif 1197 1198 /* RCR qlen */ 1199 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1200 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1201 1202 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1203 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1204 } 1205 1206 nxge_status_t 1207 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1208 { 1209 nxge_grp_set_t *set = &nxgep->rx_set; 1210 nxge_status_t status; 1211 npi_status_t rs; 1212 int rdc; 1213 1214 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1215 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1216 1217 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1219 "<== nxge_rxdma_mode: not initialized")); 1220 return (NXGE_ERROR); 1221 } 1222 1223 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1224 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1225 "<== nxge_tx_port_fatal_err_recover: " 1226 "NULL ring pointer(s)")); 1227 return (NXGE_ERROR); 1228 } 1229 1230 if (set->owned.map == 0) { 1231 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1232 "nxge_rxdma_regs_dump_channels: no channels")); 1233 return (0); 1234 } 1235 1236 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1237 if ((1 << rdc) & set->owned.map) { 1238 rx_rbr_ring_t *ring = 1239 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1240 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1241 if (ring) { 1242 if (enable) { 1243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1244 "==> nxge_rxdma_hw_mode: " 1245 "channel %d (enable)", rdc)); 1246 rs = npi_rxdma_cfg_rdc_enable 1247 (handle, rdc); 1248 } else { 1249 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1250 "==> nxge_rxdma_hw_mode: " 1251 "channel %d disable)", rdc)); 1252 rs = npi_rxdma_cfg_rdc_disable 1253 (handle, rdc); 1254 } 1255 } 1256 } 1257 } 1258 1259 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1260 1261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1262 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1263 1264 return (status); 1265 } 1266 1267 void 1268 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1269 { 1270 npi_handle_t handle; 1271 1272 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1273 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1274 1275 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1276 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1277 1278 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1279 } 1280 1281 void 1282 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1283 { 1284 npi_handle_t handle; 1285 1286 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1287 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1288 1289 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1290 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1291 1292 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1293 } 1294 1295 void 1296 nxge_hw_start_rx(p_nxge_t nxgep) 1297 { 1298 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1299 1300 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1301 (void) nxge_rx_mac_enable(nxgep); 1302 1303 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1304 } 1305 1306 /*ARGSUSED*/ 1307 void 1308 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1309 { 1310 nxge_grp_set_t *set = &nxgep->rx_set; 1311 int rdc; 1312 1313 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1314 1315 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1316 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1317 "<== nxge_tx_port_fatal_err_recover: " 1318 "NULL ring pointer(s)")); 1319 return; 1320 } 1321 1322 if (set->owned.map == 0) { 1323 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1324 "nxge_rxdma_regs_dump_channels: no channels")); 1325 return; 1326 } 1327 1328 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1329 if ((1 << rdc) & set->owned.map) { 1330 rx_rbr_ring_t *ring = 1331 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1332 if (ring) { 1333 nxge_rxdma_hw_stop(nxgep, rdc); 1334 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1335 "==> nxge_fixup_rxdma_rings: " 1336 "channel %d ring $%px", 1337 rdc, ring)); 1338 (void) nxge_rxdma_fix_channel(nxgep, rdc); 1339 } 1340 } 1341 } 1342 1343 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1344 } 1345 1346 void 1347 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1348 { 1349 int ndmas; 1350 p_rx_rbr_rings_t rx_rbr_rings; 1351 p_rx_rbr_ring_t *rbr_rings; 1352 p_rx_rcr_rings_t rx_rcr_rings; 1353 p_rx_rcr_ring_t *rcr_rings; 1354 p_rx_mbox_areas_t rx_mbox_areas_p; 1355 p_rx_mbox_t *rx_mbox_p; 1356 p_nxge_dma_pool_t dma_buf_poolp; 1357 p_nxge_dma_pool_t dma_cntl_poolp; 1358 p_rx_rbr_ring_t rbrp; 1359 p_rx_rcr_ring_t rcrp; 1360 p_rx_mbox_t mboxp; 1361 p_nxge_dma_common_t dmap; 1362 nxge_status_t status = NXGE_OK; 1363 1364 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1365 1366 (void) nxge_rxdma_stop_channel(nxgep, channel); 1367 1368 dma_buf_poolp = nxgep->rx_buf_pool_p; 1369 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1370 1371 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1372 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1373 "<== nxge_rxdma_fix_channel: buf not allocated")); 1374 return; 1375 } 1376 1377 ndmas = dma_buf_poolp->ndmas; 1378 if (!ndmas) { 1379 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1380 "<== nxge_rxdma_fix_channel: no dma allocated")); 1381 return; 1382 } 1383 1384 rx_rbr_rings = nxgep->rx_rbr_rings; 1385 rx_rcr_rings = nxgep->rx_rcr_rings; 1386 rbr_rings = rx_rbr_rings->rbr_rings; 1387 rcr_rings = rx_rcr_rings->rcr_rings; 1388 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1389 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1390 1391 /* Reinitialize the receive block and completion rings */ 1392 rbrp = (p_rx_rbr_ring_t)rbr_rings[channel], 1393 rcrp = (p_rx_rcr_ring_t)rcr_rings[channel], 1394 mboxp = (p_rx_mbox_t)rx_mbox_p[channel]; 1395 1396 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1397 rbrp->rbr_rd_index = 0; 1398 rcrp->comp_rd_index = 0; 1399 rcrp->comp_wt_index = 0; 1400 1401 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1402 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1403 1404 status = nxge_rxdma_start_channel(nxgep, channel, 1405 rbrp, rcrp, mboxp); 1406 if (status != NXGE_OK) { 1407 goto nxge_rxdma_fix_channel_fail; 1408 } 1409 1410 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1411 "<== nxge_rxdma_fix_channel: success (0x%08x)", status)); 1412 return; 1413 1414 nxge_rxdma_fix_channel_fail: 1415 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1416 "<== nxge_rxdma_fix_channel: failed (0x%08x)", status)); 1417 } 1418 1419 p_rx_rbr_ring_t 1420 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1421 { 1422 nxge_grp_set_t *set = &nxgep->rx_set; 1423 nxge_channel_t rdc; 1424 1425 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1426 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1427 1428 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1429 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1430 "<== nxge_rxdma_get_rbr_ring: " 1431 "NULL ring pointer(s)")); 1432 return (NULL); 1433 } 1434 1435 if (set->owned.map == 0) { 1436 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1437 "<== nxge_rxdma_get_rbr_ring: no channels")); 1438 return (NULL); 1439 } 1440 1441 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1442 if ((1 << rdc) & set->owned.map) { 1443 rx_rbr_ring_t *ring = 1444 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1445 if (ring) { 1446 if (channel == ring->rdc) { 1447 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1448 "==> nxge_rxdma_get_rbr_ring: " 1449 "channel %d ring $%p", rdc, ring)); 1450 return (ring); 1451 } 1452 } 1453 } 1454 } 1455 1456 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1457 "<== nxge_rxdma_get_rbr_ring: not found")); 1458 1459 return (NULL); 1460 } 1461 1462 p_rx_rcr_ring_t 1463 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1464 { 1465 nxge_grp_set_t *set = &nxgep->rx_set; 1466 nxge_channel_t rdc; 1467 1468 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1469 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1470 1471 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1472 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1473 "<== nxge_rxdma_get_rcr_ring: " 1474 "NULL ring pointer(s)")); 1475 return (NULL); 1476 } 1477 1478 if (set->owned.map == 0) { 1479 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1480 "<== nxge_rxdma_get_rbr_ring: no channels")); 1481 return (NULL); 1482 } 1483 1484 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1485 if ((1 << rdc) & set->owned.map) { 1486 rx_rcr_ring_t *ring = 1487 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1488 if (ring) { 1489 if (channel == ring->rdc) { 1490 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1491 "==> nxge_rxdma_get_rcr_ring: " 1492 "channel %d ring $%p", rdc, ring)); 1493 return (ring); 1494 } 1495 } 1496 } 1497 } 1498 1499 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1500 "<== nxge_rxdma_get_rcr_ring: not found")); 1501 1502 return (NULL); 1503 } 1504 1505 /* 1506 * Static functions start here. 1507 */ 1508 static p_rx_msg_t 1509 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1510 { 1511 p_rx_msg_t nxge_mp = NULL; 1512 p_nxge_dma_common_t dmamsg_p; 1513 uchar_t *buffer; 1514 1515 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1516 if (nxge_mp == NULL) { 1517 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1518 "Allocation of a rx msg failed.")); 1519 goto nxge_allocb_exit; 1520 } 1521 1522 nxge_mp->use_buf_pool = B_FALSE; 1523 if (dmabuf_p) { 1524 nxge_mp->use_buf_pool = B_TRUE; 1525 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1526 *dmamsg_p = *dmabuf_p; 1527 dmamsg_p->nblocks = 1; 1528 dmamsg_p->block_size = size; 1529 dmamsg_p->alength = size; 1530 buffer = (uchar_t *)dmabuf_p->kaddrp; 1531 1532 dmabuf_p->kaddrp = (void *) 1533 ((char *)dmabuf_p->kaddrp + size); 1534 dmabuf_p->ioaddr_pp = (void *) 1535 ((char *)dmabuf_p->ioaddr_pp + size); 1536 dmabuf_p->alength -= size; 1537 dmabuf_p->offset += size; 1538 dmabuf_p->dma_cookie.dmac_laddress += size; 1539 dmabuf_p->dma_cookie.dmac_size -= size; 1540 1541 } else { 1542 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1543 if (buffer == NULL) { 1544 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1545 "Allocation of a receive page failed.")); 1546 goto nxge_allocb_fail1; 1547 } 1548 } 1549 1550 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1551 if (nxge_mp->rx_mblk_p == NULL) { 1552 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1553 goto nxge_allocb_fail2; 1554 } 1555 1556 nxge_mp->buffer = buffer; 1557 nxge_mp->block_size = size; 1558 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1559 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1560 nxge_mp->ref_cnt = 1; 1561 nxge_mp->free = B_TRUE; 1562 nxge_mp->rx_use_bcopy = B_FALSE; 1563 1564 atomic_inc_32(&nxge_mblks_pending); 1565 1566 goto nxge_allocb_exit; 1567 1568 nxge_allocb_fail2: 1569 if (!nxge_mp->use_buf_pool) { 1570 KMEM_FREE(buffer, size); 1571 } 1572 1573 nxge_allocb_fail1: 1574 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1575 nxge_mp = NULL; 1576 1577 nxge_allocb_exit: 1578 return (nxge_mp); 1579 } 1580 1581 p_mblk_t 1582 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1583 { 1584 p_mblk_t mp; 1585 1586 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1587 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1588 "offset = 0x%08X " 1589 "size = 0x%08X", 1590 nxge_mp, offset, size)); 1591 1592 mp = desballoc(&nxge_mp->buffer[offset], size, 1593 0, &nxge_mp->freeb); 1594 if (mp == NULL) { 1595 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1596 goto nxge_dupb_exit; 1597 } 1598 atomic_inc_32(&nxge_mp->ref_cnt); 1599 1600 1601 nxge_dupb_exit: 1602 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1603 nxge_mp)); 1604 return (mp); 1605 } 1606 1607 p_mblk_t 1608 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1609 { 1610 p_mblk_t mp; 1611 uchar_t *dp; 1612 1613 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1614 if (mp == NULL) { 1615 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1616 goto nxge_dupb_bcopy_exit; 1617 } 1618 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1619 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1620 mp->b_wptr = dp + size; 1621 1622 nxge_dupb_bcopy_exit: 1623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1624 nxge_mp)); 1625 return (mp); 1626 } 1627 1628 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1629 p_rx_msg_t rx_msg_p); 1630 1631 void 1632 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1633 { 1634 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1635 1636 /* Reuse this buffer */ 1637 rx_msg_p->free = B_FALSE; 1638 rx_msg_p->cur_usage_cnt = 0; 1639 rx_msg_p->max_usage_cnt = 0; 1640 rx_msg_p->pkt_buf_size = 0; 1641 1642 if (rx_rbr_p->rbr_use_bcopy) { 1643 rx_msg_p->rx_use_bcopy = B_FALSE; 1644 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1645 } 1646 1647 /* 1648 * Get the rbr header pointer and its offset index. 1649 */ 1650 MUTEX_ENTER(&rx_rbr_p->post_lock); 1651 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1652 rx_rbr_p->rbr_wrap_mask); 1653 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1654 MUTEX_EXIT(&rx_rbr_p->post_lock); 1655 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1656 rx_rbr_p->rdc, 1); 1657 1658 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1659 "<== nxge_post_page (channel %d post_next_index %d)", 1660 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1661 1662 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1663 } 1664 1665 void 1666 nxge_freeb(p_rx_msg_t rx_msg_p) 1667 { 1668 size_t size; 1669 uchar_t *buffer = NULL; 1670 int ref_cnt; 1671 boolean_t free_state = B_FALSE; 1672 1673 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1674 1675 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1676 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1677 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1678 rx_msg_p, nxge_mblks_pending)); 1679 1680 /* 1681 * First we need to get the free state, then 1682 * atomic decrement the reference count to prevent 1683 * the race condition with the interrupt thread that 1684 * is processing a loaned up buffer block. 1685 */ 1686 free_state = rx_msg_p->free; 1687 ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt); 1688 if (!ref_cnt) { 1689 atomic_dec_32(&nxge_mblks_pending); 1690 buffer = rx_msg_p->buffer; 1691 size = rx_msg_p->block_size; 1692 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1693 "will free: rx_msg_p = $%p (block pending %d)", 1694 rx_msg_p, nxge_mblks_pending)); 1695 1696 if (!rx_msg_p->use_buf_pool) { 1697 KMEM_FREE(buffer, size); 1698 } 1699 1700 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1701 1702 if (ring) { 1703 /* 1704 * Decrement the receive buffer ring's reference 1705 * count, too. 1706 */ 1707 atomic_dec_32(&ring->rbr_ref_cnt); 1708 1709 /* 1710 * Free the receive buffer ring, if 1711 * 1. all the receive buffers have been freed 1712 * 2. and we are in the proper state (that is, 1713 * we are not UNMAPPING). 1714 */ 1715 if (ring->rbr_ref_cnt == 0 && 1716 ring->rbr_state == RBR_UNMAPPED) { 1717 /* 1718 * Free receive data buffers, 1719 * buffer index information 1720 * (rxring_info) and 1721 * the message block ring. 1722 */ 1723 NXGE_DEBUG_MSG((NULL, RX_CTL, 1724 "nxge_freeb:rx_msg_p = $%p " 1725 "(block pending %d) free buffers", 1726 rx_msg_p, nxge_mblks_pending)); 1727 nxge_rxdma_databuf_free(ring); 1728 if (ring->ring_info) { 1729 KMEM_FREE(ring->ring_info, 1730 sizeof (rxring_info_t)); 1731 } 1732 1733 if (ring->rx_msg_ring) { 1734 KMEM_FREE(ring->rx_msg_ring, 1735 ring->tnblocks * 1736 sizeof (p_rx_msg_t)); 1737 } 1738 KMEM_FREE(ring, sizeof (*ring)); 1739 } 1740 } 1741 return; 1742 } 1743 1744 /* 1745 * Repost buffer. 1746 */ 1747 if (free_state && (ref_cnt == 1) && ring) { 1748 NXGE_DEBUG_MSG((NULL, RX_CTL, 1749 "nxge_freeb: post page $%p:", rx_msg_p)); 1750 if (ring->rbr_state == RBR_POSTING) 1751 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1752 } 1753 1754 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1755 } 1756 1757 uint_t 1758 nxge_rx_intr(void *arg1, void *arg2) 1759 { 1760 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1761 p_nxge_t nxgep = (p_nxge_t)arg2; 1762 p_nxge_ldg_t ldgp; 1763 uint8_t channel; 1764 npi_handle_t handle; 1765 rx_dma_ctl_stat_t cs; 1766 p_rx_rcr_ring_t rcrp; 1767 mblk_t *mp = NULL; 1768 1769 if (ldvp == NULL) { 1770 NXGE_DEBUG_MSG((NULL, INT_CTL, 1771 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1772 nxgep, ldvp)); 1773 return (DDI_INTR_CLAIMED); 1774 } 1775 1776 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1777 nxgep = ldvp->nxgep; 1778 } 1779 1780 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1781 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1782 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1783 "<== nxge_rx_intr: interface not started or intialized")); 1784 return (DDI_INTR_CLAIMED); 1785 } 1786 1787 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1788 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1789 nxgep, ldvp)); 1790 1791 /* 1792 * Get the PIO handle. 1793 */ 1794 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1795 1796 /* 1797 * Get the ring to enable us to process packets. 1798 */ 1799 rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1800 1801 /* 1802 * The RCR ring lock must be held when packets 1803 * are being processed and the hardware registers are 1804 * being read or written to prevent race condition 1805 * among the interrupt thread, the polling thread 1806 * (will cause fatal errors such as rcrincon bit set) 1807 * and the setting of the poll_flag. 1808 */ 1809 MUTEX_ENTER(&rcrp->lock); 1810 1811 /* 1812 * Get the control and status for this channel. 1813 */ 1814 channel = ldvp->channel; 1815 ldgp = ldvp->ldgp; 1816 1817 if (!isLDOMguest(nxgep) && (!rcrp->started)) { 1818 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1819 "<== nxge_rx_intr: channel is not started")); 1820 1821 /* 1822 * We received an interrupt before the ring is started. 1823 */ 1824 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1825 &cs.value); 1826 cs.value &= RX_DMA_CTL_STAT_WR1C; 1827 cs.bits.hdw.mex = 1; 1828 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1829 cs.value); 1830 1831 /* 1832 * Rearm this logical group if this is a single device 1833 * group. 1834 */ 1835 if (ldgp->nldvs == 1) { 1836 if (isLDOMguest(nxgep)) { 1837 nxge_hio_ldgimgn(nxgep, ldgp); 1838 } else { 1839 ldgimgm_t mgm; 1840 1841 mgm.value = 0; 1842 mgm.bits.ldw.arm = 1; 1843 mgm.bits.ldw.timer = ldgp->ldg_timer; 1844 1845 NXGE_REG_WR64(handle, 1846 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1847 mgm.value); 1848 } 1849 } 1850 MUTEX_EXIT(&rcrp->lock); 1851 return (DDI_INTR_CLAIMED); 1852 } 1853 1854 ASSERT(rcrp->ldgp == ldgp); 1855 ASSERT(rcrp->ldvp == ldvp); 1856 1857 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1858 1859 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1860 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1861 channel, 1862 cs.value, 1863 cs.bits.hdw.rcrto, 1864 cs.bits.hdw.rcrthres)); 1865 1866 if (!rcrp->poll_flag) { 1867 mp = nxge_rx_pkts(nxgep, rcrp, cs, -1); 1868 } 1869 1870 /* error events. */ 1871 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1872 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1873 } 1874 1875 /* 1876 * Enable the mailbox update interrupt if we want 1877 * to use mailbox. We probably don't need to use 1878 * mailbox as it only saves us one pio read. 1879 * Also write 1 to rcrthres and rcrto to clear 1880 * these two edge triggered bits. 1881 */ 1882 cs.value &= RX_DMA_CTL_STAT_WR1C; 1883 cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1; 1884 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1885 cs.value); 1886 1887 /* 1888 * If the polling mode is enabled, disable the interrupt. 1889 */ 1890 if (rcrp->poll_flag) { 1891 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1892 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1893 "(disabling interrupts)", channel, ldgp, ldvp)); 1894 1895 /* 1896 * Disarm this logical group if this is a single device 1897 * group. 1898 */ 1899 if (ldgp->nldvs == 1) { 1900 if (isLDOMguest(nxgep)) { 1901 ldgp->arm = B_FALSE; 1902 nxge_hio_ldgimgn(nxgep, ldgp); 1903 } else { 1904 ldgimgm_t mgm; 1905 mgm.value = 0; 1906 mgm.bits.ldw.arm = 0; 1907 NXGE_REG_WR64(handle, 1908 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1909 mgm.value); 1910 } 1911 } 1912 } else { 1913 /* 1914 * Rearm this logical group if this is a single device 1915 * group. 1916 */ 1917 if (ldgp->nldvs == 1) { 1918 if (isLDOMguest(nxgep)) { 1919 nxge_hio_ldgimgn(nxgep, ldgp); 1920 } else { 1921 ldgimgm_t mgm; 1922 1923 mgm.value = 0; 1924 mgm.bits.ldw.arm = 1; 1925 mgm.bits.ldw.timer = ldgp->ldg_timer; 1926 1927 NXGE_REG_WR64(handle, 1928 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1929 mgm.value); 1930 } 1931 } 1932 1933 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1934 "==> nxge_rx_intr: rdc %d ldgp $%p " 1935 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1936 } 1937 MUTEX_EXIT(&rcrp->lock); 1938 1939 if (mp != NULL) { 1940 mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp, 1941 rcrp->rcr_gen_num); 1942 } 1943 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1944 return (DDI_INTR_CLAIMED); 1945 } 1946 1947 /* 1948 * This routine is the main packet receive processing function. 1949 * It gets the packet type, error code, and buffer related 1950 * information from the receive completion entry. 1951 * How many completion entries to process is based on the number of packets 1952 * queued by the hardware, a hardware maintained tail pointer 1953 * and a configurable receive packet count. 1954 * 1955 * A chain of message blocks will be created as result of processing 1956 * the completion entries. This chain of message blocks will be returned and 1957 * a hardware control status register will be updated with the number of 1958 * packets were removed from the hardware queue. 1959 * 1960 * The RCR ring lock is held when entering this function. 1961 */ 1962 static mblk_t * 1963 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1964 int bytes_to_pickup) 1965 { 1966 npi_handle_t handle; 1967 uint8_t channel; 1968 uint32_t comp_rd_index; 1969 p_rcr_entry_t rcr_desc_rd_head_p; 1970 p_rcr_entry_t rcr_desc_rd_head_pp; 1971 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1972 uint16_t qlen, nrcr_read, npkt_read; 1973 uint32_t qlen_hw; 1974 boolean_t multi; 1975 rcrcfig_b_t rcr_cfg_b; 1976 int totallen = 0; 1977 #if defined(_BIG_ENDIAN) 1978 npi_status_t rs = NPI_SUCCESS; 1979 #endif 1980 1981 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 1982 "channel %d", rcr_p->rdc)); 1983 1984 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1985 return (NULL); 1986 } 1987 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1988 channel = rcr_p->rdc; 1989 1990 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1991 "==> nxge_rx_pkts: START: rcr channel %d " 1992 "head_p $%p head_pp $%p index %d ", 1993 channel, rcr_p->rcr_desc_rd_head_p, 1994 rcr_p->rcr_desc_rd_head_pp, 1995 rcr_p->comp_rd_index)); 1996 1997 1998 #if !defined(_BIG_ENDIAN) 1999 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2000 #else 2001 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2002 if (rs != NPI_SUCCESS) { 2003 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2004 "channel %d, get qlen failed 0x%08x", 2005 channel, rs)); 2006 return (NULL); 2007 } 2008 #endif 2009 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2010 "qlen %d", channel, qlen)); 2011 2012 2013 2014 if (!qlen) { 2015 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2016 "==> nxge_rx_pkts:rcr channel %d " 2017 "qlen %d (no pkts)", channel, qlen)); 2018 2019 return (NULL); 2020 } 2021 2022 comp_rd_index = rcr_p->comp_rd_index; 2023 2024 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2025 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2026 nrcr_read = npkt_read = 0; 2027 2028 /* 2029 * Number of packets queued 2030 * (The jumbo or multi packet will be counted as only one 2031 * packets and it may take up more than one completion entry). 2032 */ 2033 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2034 qlen : nxge_max_rx_pkts; 2035 head_mp = NULL; 2036 tail_mp = &head_mp; 2037 nmp = mp_cont = NULL; 2038 multi = B_FALSE; 2039 2040 while (qlen_hw) { 2041 2042 #ifdef NXGE_DEBUG 2043 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2044 #endif 2045 /* 2046 * Process one completion ring entry. 2047 */ 2048 nxge_receive_packet(nxgep, 2049 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2050 2051 /* 2052 * message chaining modes 2053 */ 2054 if (nmp) { 2055 nmp->b_next = NULL; 2056 if (!multi && !mp_cont) { /* frame fits a partition */ 2057 *tail_mp = nmp; 2058 tail_mp = &nmp->b_next; 2059 totallen += MBLKL(nmp); 2060 nmp = NULL; 2061 } else if (multi && !mp_cont) { /* first segment */ 2062 *tail_mp = nmp; 2063 tail_mp = &nmp->b_cont; 2064 totallen += MBLKL(nmp); 2065 } else if (multi && mp_cont) { /* mid of multi segs */ 2066 *tail_mp = mp_cont; 2067 tail_mp = &mp_cont->b_cont; 2068 totallen += MBLKL(mp_cont); 2069 } else if (!multi && mp_cont) { /* last segment */ 2070 *tail_mp = mp_cont; 2071 tail_mp = &nmp->b_next; 2072 totallen += MBLKL(mp_cont); 2073 nmp = NULL; 2074 } 2075 } 2076 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2077 "==> nxge_rx_pkts: loop: rcr channel %d " 2078 "before updating: multi %d " 2079 "nrcr_read %d " 2080 "npk read %d " 2081 "head_pp $%p index %d ", 2082 channel, 2083 multi, 2084 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2085 comp_rd_index)); 2086 2087 if (!multi) { 2088 qlen_hw--; 2089 npkt_read++; 2090 } 2091 2092 /* 2093 * Update the next read entry. 2094 */ 2095 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2096 rcr_p->comp_wrap_mask); 2097 2098 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2099 rcr_p->rcr_desc_first_p, 2100 rcr_p->rcr_desc_last_p); 2101 2102 nrcr_read++; 2103 2104 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2105 "<== nxge_rx_pkts: (SAM, process one packet) " 2106 "nrcr_read %d", 2107 nrcr_read)); 2108 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2109 "==> nxge_rx_pkts: loop: rcr channel %d " 2110 "multi %d " 2111 "nrcr_read %d " 2112 "npk read %d " 2113 "head_pp $%p index %d ", 2114 channel, 2115 multi, 2116 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2117 comp_rd_index)); 2118 2119 if ((bytes_to_pickup != -1) && 2120 (totallen >= bytes_to_pickup)) { 2121 break; 2122 } 2123 } 2124 2125 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2126 rcr_p->comp_rd_index = comp_rd_index; 2127 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2128 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2129 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2130 2131 rcr_p->intr_timeout = (nxgep->intr_timeout < 2132 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2133 nxgep->intr_timeout; 2134 2135 rcr_p->intr_threshold = (nxgep->intr_threshold < 2136 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2137 nxgep->intr_threshold; 2138 2139 rcr_cfg_b.value = 0x0ULL; 2140 rcr_cfg_b.bits.ldw.entout = 1; 2141 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2142 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2143 2144 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2145 channel, rcr_cfg_b.value); 2146 } 2147 2148 cs.bits.ldw.pktread = npkt_read; 2149 cs.bits.ldw.ptrread = nrcr_read; 2150 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2151 channel, cs.value); 2152 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2153 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2154 "head_pp $%p index %016llx ", 2155 channel, 2156 rcr_p->rcr_desc_rd_head_pp, 2157 rcr_p->comp_rd_index)); 2158 /* 2159 * Update RCR buffer pointer read and number of packets 2160 * read. 2161 */ 2162 2163 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2164 "channel %d", rcr_p->rdc)); 2165 2166 return (head_mp); 2167 } 2168 2169 void 2170 nxge_receive_packet(p_nxge_t nxgep, 2171 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2172 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2173 { 2174 p_mblk_t nmp = NULL; 2175 uint64_t multi; 2176 uint64_t dcf_err; 2177 uint8_t channel; 2178 2179 boolean_t first_entry = B_TRUE; 2180 boolean_t is_tcp_udp = B_FALSE; 2181 boolean_t buffer_free = B_FALSE; 2182 boolean_t error_send_up = B_FALSE; 2183 uint8_t error_type; 2184 uint16_t l2_len; 2185 uint16_t skip_len; 2186 uint8_t pktbufsz_type; 2187 uint64_t rcr_entry; 2188 uint64_t *pkt_buf_addr_pp; 2189 uint64_t *pkt_buf_addr_p; 2190 uint32_t buf_offset; 2191 uint32_t bsize; 2192 uint32_t error_disp_cnt; 2193 uint32_t msg_index; 2194 p_rx_rbr_ring_t rx_rbr_p; 2195 p_rx_msg_t *rx_msg_ring_p; 2196 p_rx_msg_t rx_msg_p; 2197 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2198 nxge_status_t status = NXGE_OK; 2199 boolean_t is_valid = B_FALSE; 2200 p_nxge_rx_ring_stats_t rdc_stats; 2201 uint32_t bytes_read; 2202 uint64_t pkt_type; 2203 uint64_t frag; 2204 boolean_t pkt_too_long_err = B_FALSE; 2205 #ifdef NXGE_DEBUG 2206 int dump_len; 2207 #endif 2208 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2209 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2210 2211 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2212 2213 multi = (rcr_entry & RCR_MULTI_MASK); 2214 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2215 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2216 2217 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2218 frag = (rcr_entry & RCR_FRAG_MASK); 2219 2220 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2221 2222 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2223 RCR_PKTBUFSZ_SHIFT); 2224 #if defined(__i386) 2225 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2226 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2227 #else 2228 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2229 RCR_PKT_BUF_ADDR_SHIFT); 2230 #endif 2231 2232 channel = rcr_p->rdc; 2233 2234 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2235 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2236 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2237 "error_type 0x%x pkt_type 0x%x " 2238 "pktbufsz_type %d ", 2239 rcr_desc_rd_head_p, 2240 rcr_entry, pkt_buf_addr_pp, l2_len, 2241 multi, 2242 error_type, 2243 pkt_type, 2244 pktbufsz_type)); 2245 2246 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2247 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2248 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2249 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2250 rcr_entry, pkt_buf_addr_pp, l2_len, 2251 multi, 2252 error_type, 2253 pkt_type)); 2254 2255 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2256 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2257 "full pkt_buf_addr_pp $%p l2_len %d", 2258 rcr_entry, pkt_buf_addr_pp, l2_len)); 2259 2260 /* get the stats ptr */ 2261 rdc_stats = rcr_p->rdc_stats; 2262 2263 if (!l2_len) { 2264 2265 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2266 "<== nxge_receive_packet: failed: l2 length is 0.")); 2267 return; 2268 } 2269 2270 /* 2271 * Software workaround for BMAC hardware limitation that allows 2272 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2273 * instead of 0x2400 for jumbo. 2274 */ 2275 if (l2_len > nxgep->mac.maxframesize) { 2276 pkt_too_long_err = B_TRUE; 2277 } 2278 2279 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2280 l2_len -= ETHERFCSL; 2281 2282 /* shift 6 bits to get the full io address */ 2283 #if defined(__i386) 2284 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2285 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2286 #else 2287 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2288 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2289 #endif 2290 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2291 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2292 "full pkt_buf_addr_pp $%p l2_len %d", 2293 rcr_entry, pkt_buf_addr_pp, l2_len)); 2294 2295 rx_rbr_p = rcr_p->rx_rbr_p; 2296 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2297 2298 if (first_entry) { 2299 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2300 RXDMA_HDR_SIZE_DEFAULT); 2301 2302 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2303 "==> nxge_receive_packet: first entry 0x%016llx " 2304 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2305 rcr_entry, pkt_buf_addr_pp, l2_len, 2306 hdr_size)); 2307 } 2308 2309 MUTEX_ENTER(&rx_rbr_p->lock); 2310 2311 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2312 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2313 "full pkt_buf_addr_pp $%p l2_len %d", 2314 rcr_entry, pkt_buf_addr_pp, l2_len)); 2315 2316 /* 2317 * Packet buffer address in the completion entry points 2318 * to the starting buffer address (offset 0). 2319 * Use the starting buffer address to locate the corresponding 2320 * kernel address. 2321 */ 2322 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2323 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2324 &buf_offset, 2325 &msg_index); 2326 2327 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2328 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2329 "full pkt_buf_addr_pp $%p l2_len %d", 2330 rcr_entry, pkt_buf_addr_pp, l2_len)); 2331 2332 if (status != NXGE_OK) { 2333 MUTEX_EXIT(&rx_rbr_p->lock); 2334 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2335 "<== nxge_receive_packet: found vaddr failed %d", 2336 status)); 2337 return; 2338 } 2339 2340 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2341 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2342 "full pkt_buf_addr_pp $%p l2_len %d", 2343 rcr_entry, pkt_buf_addr_pp, l2_len)); 2344 2345 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2346 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2347 "full pkt_buf_addr_pp $%p l2_len %d", 2348 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2349 2350 rx_msg_p = rx_msg_ring_p[msg_index]; 2351 2352 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2353 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2354 "full pkt_buf_addr_pp $%p l2_len %d", 2355 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2356 2357 switch (pktbufsz_type) { 2358 case RCR_PKTBUFSZ_0: 2359 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2360 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2361 "==> nxge_receive_packet: 0 buf %d", bsize)); 2362 break; 2363 case RCR_PKTBUFSZ_1: 2364 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2365 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2366 "==> nxge_receive_packet: 1 buf %d", bsize)); 2367 break; 2368 case RCR_PKTBUFSZ_2: 2369 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2370 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2371 "==> nxge_receive_packet: 2 buf %d", bsize)); 2372 break; 2373 case RCR_SINGLE_BLOCK: 2374 bsize = rx_msg_p->block_size; 2375 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2376 "==> nxge_receive_packet: single %d", bsize)); 2377 2378 break; 2379 default: 2380 MUTEX_EXIT(&rx_rbr_p->lock); 2381 return; 2382 } 2383 2384 switch (nxge_rdc_buf_offset) { 2385 case SW_OFFSET_NO_OFFSET: 2386 sw_offset_bytes = 0; 2387 break; 2388 case SW_OFFSET_64: 2389 sw_offset_bytes = 64; 2390 break; 2391 case SW_OFFSET_128: 2392 sw_offset_bytes = 128; 2393 break; 2394 case SW_OFFSET_192: 2395 sw_offset_bytes = 192; 2396 break; 2397 case SW_OFFSET_256: 2398 sw_offset_bytes = 256; 2399 break; 2400 case SW_OFFSET_320: 2401 sw_offset_bytes = 320; 2402 break; 2403 case SW_OFFSET_384: 2404 sw_offset_bytes = 384; 2405 break; 2406 case SW_OFFSET_448: 2407 sw_offset_bytes = 448; 2408 break; 2409 default: 2410 sw_offset_bytes = 0; 2411 break; 2412 } 2413 2414 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2415 (buf_offset + sw_offset_bytes), 2416 (hdr_size + l2_len), 2417 DDI_DMA_SYNC_FORCPU); 2418 2419 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2420 "==> nxge_receive_packet: after first dump:usage count")); 2421 2422 if (rx_msg_p->cur_usage_cnt == 0) { 2423 if (rx_rbr_p->rbr_use_bcopy) { 2424 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2425 if (rx_rbr_p->rbr_consumed < 2426 rx_rbr_p->rbr_threshold_hi) { 2427 if (rx_rbr_p->rbr_threshold_lo == 0 || 2428 ((rx_rbr_p->rbr_consumed >= 2429 rx_rbr_p->rbr_threshold_lo) && 2430 (rx_rbr_p->rbr_bufsize_type >= 2431 pktbufsz_type))) { 2432 rx_msg_p->rx_use_bcopy = B_TRUE; 2433 } 2434 } else { 2435 rx_msg_p->rx_use_bcopy = B_TRUE; 2436 } 2437 } 2438 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2439 "==> nxge_receive_packet: buf %d (new block) ", 2440 bsize)); 2441 2442 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2443 rx_msg_p->pkt_buf_size = bsize; 2444 rx_msg_p->cur_usage_cnt = 1; 2445 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2446 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2447 "==> nxge_receive_packet: buf %d " 2448 "(single block) ", 2449 bsize)); 2450 /* 2451 * Buffer can be reused once the free function 2452 * is called. 2453 */ 2454 rx_msg_p->max_usage_cnt = 1; 2455 buffer_free = B_TRUE; 2456 } else { 2457 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2458 if (rx_msg_p->max_usage_cnt == 1) { 2459 buffer_free = B_TRUE; 2460 } 2461 } 2462 } else { 2463 rx_msg_p->cur_usage_cnt++; 2464 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2465 buffer_free = B_TRUE; 2466 } 2467 } 2468 2469 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2470 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2471 msg_index, l2_len, 2472 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2473 2474 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2475 rdc_stats->ierrors++; 2476 if (dcf_err) { 2477 rdc_stats->dcf_err++; 2478 #ifdef NXGE_DEBUG 2479 if (!rdc_stats->dcf_err) { 2480 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2481 "nxge_receive_packet: channel %d dcf_err rcr" 2482 " 0x%llx", channel, rcr_entry)); 2483 } 2484 #endif 2485 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0, 2486 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2487 } else if (pkt_too_long_err) { 2488 rdc_stats->pkt_too_long_err++; 2489 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2490 " channel %d packet length [%d] > " 2491 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2492 nxgep->mac.maxframesize)); 2493 } else { 2494 /* Update error stats */ 2495 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2496 rdc_stats->errlog.compl_err_type = error_type; 2497 2498 switch (error_type) { 2499 /* 2500 * Do not send FMA ereport for RCR_L2_ERROR and 2501 * RCR_L4_CSUM_ERROR because most likely they indicate 2502 * back pressure rather than HW failures. 2503 */ 2504 case RCR_L2_ERROR: 2505 rdc_stats->l2_err++; 2506 if (rdc_stats->l2_err < 2507 error_disp_cnt) { 2508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2509 " nxge_receive_packet:" 2510 " channel %d RCR L2_ERROR", 2511 channel)); 2512 } 2513 break; 2514 case RCR_L4_CSUM_ERROR: 2515 error_send_up = B_TRUE; 2516 rdc_stats->l4_cksum_err++; 2517 if (rdc_stats->l4_cksum_err < 2518 error_disp_cnt) { 2519 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2520 " nxge_receive_packet:" 2521 " channel %d" 2522 " RCR L4_CSUM_ERROR", channel)); 2523 } 2524 break; 2525 /* 2526 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2527 * RCR_ZCP_SOFT_ERROR because they reflect the same 2528 * FFLP and ZCP errors that have been reported by 2529 * nxge_fflp.c and nxge_zcp.c. 2530 */ 2531 case RCR_FFLP_SOFT_ERROR: 2532 error_send_up = B_TRUE; 2533 rdc_stats->fflp_soft_err++; 2534 if (rdc_stats->fflp_soft_err < 2535 error_disp_cnt) { 2536 NXGE_ERROR_MSG((nxgep, 2537 NXGE_ERR_CTL, 2538 " nxge_receive_packet:" 2539 " channel %d" 2540 " RCR FFLP_SOFT_ERROR", channel)); 2541 } 2542 break; 2543 case RCR_ZCP_SOFT_ERROR: 2544 error_send_up = B_TRUE; 2545 rdc_stats->fflp_soft_err++; 2546 if (rdc_stats->zcp_soft_err < 2547 error_disp_cnt) 2548 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2549 " nxge_receive_packet: Channel %d" 2550 " RCR ZCP_SOFT_ERROR", channel)); 2551 break; 2552 default: 2553 rdc_stats->rcr_unknown_err++; 2554 if (rdc_stats->rcr_unknown_err 2555 < error_disp_cnt) { 2556 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2557 " nxge_receive_packet: Channel %d" 2558 " RCR entry 0x%llx error 0x%x", 2559 rcr_entry, channel, error_type)); 2560 } 2561 break; 2562 } 2563 } 2564 2565 /* 2566 * Update and repost buffer block if max usage 2567 * count is reached. 2568 */ 2569 if (error_send_up == B_FALSE) { 2570 atomic_inc_32(&rx_msg_p->ref_cnt); 2571 if (buffer_free == B_TRUE) { 2572 rx_msg_p->free = B_TRUE; 2573 } 2574 2575 MUTEX_EXIT(&rx_rbr_p->lock); 2576 nxge_freeb(rx_msg_p); 2577 return; 2578 } 2579 } 2580 2581 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2582 "==> nxge_receive_packet: DMA sync second ")); 2583 2584 bytes_read = rcr_p->rcvd_pkt_bytes; 2585 skip_len = sw_offset_bytes + hdr_size; 2586 if (!rx_msg_p->rx_use_bcopy) { 2587 /* 2588 * For loaned up buffers, the driver reference count 2589 * will be incremented first and then the free state. 2590 */ 2591 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2592 if (first_entry) { 2593 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2594 if (l2_len < bsize - skip_len) { 2595 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2596 } else { 2597 nmp->b_wptr = &nmp->b_rptr[bsize 2598 - skip_len]; 2599 } 2600 } else { 2601 if (l2_len - bytes_read < bsize) { 2602 nmp->b_wptr = 2603 &nmp->b_rptr[l2_len - bytes_read]; 2604 } else { 2605 nmp->b_wptr = &nmp->b_rptr[bsize]; 2606 } 2607 } 2608 } 2609 } else { 2610 if (first_entry) { 2611 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2612 l2_len < bsize - skip_len ? 2613 l2_len : bsize - skip_len); 2614 } else { 2615 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2616 l2_len - bytes_read < bsize ? 2617 l2_len - bytes_read : bsize); 2618 } 2619 } 2620 if (nmp != NULL) { 2621 if (first_entry) { 2622 /* 2623 * Jumbo packets may be received with more than one 2624 * buffer, increment ipackets for the first entry only. 2625 */ 2626 rdc_stats->ipackets++; 2627 2628 /* Update ibytes for kstat. */ 2629 rdc_stats->ibytes += skip_len 2630 + l2_len < bsize ? l2_len : bsize; 2631 /* 2632 * Update the number of bytes read so far for the 2633 * current frame. 2634 */ 2635 bytes_read = nmp->b_wptr - nmp->b_rptr; 2636 } else { 2637 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2638 l2_len - bytes_read : bsize; 2639 bytes_read += nmp->b_wptr - nmp->b_rptr; 2640 } 2641 2642 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2643 "==> nxge_receive_packet after dupb: " 2644 "rbr consumed %d " 2645 "pktbufsz_type %d " 2646 "nmp $%p rptr $%p wptr $%p " 2647 "buf_offset %d bzise %d l2_len %d skip_len %d", 2648 rx_rbr_p->rbr_consumed, 2649 pktbufsz_type, 2650 nmp, nmp->b_rptr, nmp->b_wptr, 2651 buf_offset, bsize, l2_len, skip_len)); 2652 } else { 2653 cmn_err(CE_WARN, "!nxge_receive_packet: " 2654 "update stats (error)"); 2655 atomic_inc_32(&rx_msg_p->ref_cnt); 2656 if (buffer_free == B_TRUE) { 2657 rx_msg_p->free = B_TRUE; 2658 } 2659 MUTEX_EXIT(&rx_rbr_p->lock); 2660 nxge_freeb(rx_msg_p); 2661 return; 2662 } 2663 2664 if (buffer_free == B_TRUE) { 2665 rx_msg_p->free = B_TRUE; 2666 } 2667 2668 is_valid = (nmp != NULL); 2669 2670 rcr_p->rcvd_pkt_bytes = bytes_read; 2671 2672 MUTEX_EXIT(&rx_rbr_p->lock); 2673 2674 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2675 atomic_inc_32(&rx_msg_p->ref_cnt); 2676 nxge_freeb(rx_msg_p); 2677 } 2678 2679 if (is_valid) { 2680 nmp->b_cont = NULL; 2681 if (first_entry) { 2682 *mp = nmp; 2683 *mp_cont = NULL; 2684 } else { 2685 *mp_cont = nmp; 2686 } 2687 } 2688 2689 /* 2690 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2691 * If a packet is not fragmented and no error bit is set, then 2692 * L4 checksum is OK. 2693 */ 2694 2695 if (is_valid && !multi) { 2696 /* 2697 * If the checksum flag nxge_chksum_offload 2698 * is 1, TCP and UDP packets can be sent 2699 * up with good checksum. If the checksum flag 2700 * is set to 0, checksum reporting will apply to 2701 * TCP packets only (workaround for a hardware bug). 2702 * If the checksum flag nxge_cksum_offload is 2703 * greater than 1, both TCP and UDP packets 2704 * will not be reported its hardware checksum results. 2705 */ 2706 if (nxge_cksum_offload == 1) { 2707 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2708 pkt_type == RCR_PKT_IS_UDP) ? 2709 B_TRUE: B_FALSE); 2710 } else if (!nxge_cksum_offload) { 2711 /* TCP checksum only. */ 2712 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2713 B_TRUE: B_FALSE); 2714 } 2715 2716 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2717 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2718 is_valid, multi, is_tcp_udp, frag, error_type)); 2719 2720 if (is_tcp_udp && !frag && !error_type) { 2721 mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK); 2722 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2723 "==> nxge_receive_packet: Full tcp/udp cksum " 2724 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2725 "error %d", 2726 is_valid, multi, is_tcp_udp, frag, error_type)); 2727 } 2728 } 2729 2730 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2731 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2732 2733 *multi_p = (multi == RCR_MULTI_MASK); 2734 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2735 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2736 *multi_p, nmp, *mp, *mp_cont)); 2737 } 2738 2739 /* 2740 * Enable polling for a ring. Interrupt for the ring is disabled when 2741 * the nxge interrupt comes (see nxge_rx_intr). 2742 */ 2743 int 2744 nxge_enable_poll(void *arg) 2745 { 2746 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2747 p_rx_rcr_ring_t ringp; 2748 p_nxge_t nxgep; 2749 p_nxge_ldg_t ldgp; 2750 uint32_t channel; 2751 2752 if (ring_handle == NULL) { 2753 ASSERT(ring_handle != NULL); 2754 return (0); 2755 } 2756 2757 nxgep = ring_handle->nxgep; 2758 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2759 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2760 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2761 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2762 ldgp = ringp->ldgp; 2763 if (ldgp == NULL) { 2764 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2765 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2766 ringp->rdc)); 2767 return (0); 2768 } 2769 2770 MUTEX_ENTER(&ringp->lock); 2771 /* enable polling */ 2772 if (ringp->poll_flag == 0) { 2773 ringp->poll_flag = 1; 2774 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2775 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2776 ringp->rdc)); 2777 } 2778 2779 MUTEX_EXIT(&ringp->lock); 2780 return (0); 2781 } 2782 /* 2783 * Disable polling for a ring and enable its interrupt. 2784 */ 2785 int 2786 nxge_disable_poll(void *arg) 2787 { 2788 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2789 p_rx_rcr_ring_t ringp; 2790 p_nxge_t nxgep; 2791 uint32_t channel; 2792 2793 if (ring_handle == NULL) { 2794 ASSERT(ring_handle != NULL); 2795 return (0); 2796 } 2797 2798 nxgep = ring_handle->nxgep; 2799 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2800 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2801 2802 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2803 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2804 2805 MUTEX_ENTER(&ringp->lock); 2806 2807 /* disable polling: enable interrupt */ 2808 if (ringp->poll_flag) { 2809 npi_handle_t handle; 2810 rx_dma_ctl_stat_t cs; 2811 uint8_t channel; 2812 p_nxge_ldg_t ldgp; 2813 2814 /* 2815 * Get the control and status for this channel. 2816 */ 2817 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2818 channel = ringp->rdc; 2819 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2820 channel, &cs.value); 2821 2822 /* 2823 * Enable mailbox update 2824 * Since packets were not read and the hardware uses 2825 * bits pktread and ptrread to update the queue 2826 * length, we need to set both bits to 0. 2827 */ 2828 cs.bits.ldw.pktread = 0; 2829 cs.bits.ldw.ptrread = 0; 2830 cs.bits.hdw.mex = 1; 2831 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2832 cs.value); 2833 2834 /* 2835 * Rearm this logical group if this is a single device 2836 * group. 2837 */ 2838 ldgp = ringp->ldgp; 2839 if (ldgp == NULL) { 2840 ringp->poll_flag = 0; 2841 MUTEX_EXIT(&ringp->lock); 2842 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2843 "==> nxge_disable_poll: no ldgp rdc %d " 2844 "(still set poll to 0", ringp->rdc)); 2845 return (0); 2846 } 2847 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2848 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2849 ringp->rdc, ldgp)); 2850 if (ldgp->nldvs == 1) { 2851 if (isLDOMguest(nxgep)) { 2852 ldgp->arm = B_TRUE; 2853 nxge_hio_ldgimgn(nxgep, ldgp); 2854 } else { 2855 ldgimgm_t mgm; 2856 mgm.value = 0; 2857 mgm.bits.ldw.arm = 1; 2858 mgm.bits.ldw.timer = ldgp->ldg_timer; 2859 NXGE_REG_WR64(handle, 2860 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 2861 mgm.value); 2862 } 2863 } 2864 ringp->poll_flag = 0; 2865 } 2866 2867 MUTEX_EXIT(&ringp->lock); 2868 return (0); 2869 } 2870 2871 /* 2872 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2873 */ 2874 mblk_t * 2875 nxge_rx_poll(void *arg, int bytes_to_pickup) 2876 { 2877 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2878 p_rx_rcr_ring_t rcr_p; 2879 p_nxge_t nxgep; 2880 npi_handle_t handle; 2881 rx_dma_ctl_stat_t cs; 2882 mblk_t *mblk; 2883 p_nxge_ldv_t ldvp; 2884 uint32_t channel; 2885 2886 nxgep = ring_handle->nxgep; 2887 2888 /* 2889 * Get the control and status for this channel. 2890 */ 2891 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2892 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2893 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2894 MUTEX_ENTER(&rcr_p->lock); 2895 ASSERT(rcr_p->poll_flag == 1); 2896 2897 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2898 2899 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2900 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2901 rcr_p->rdc, rcr_p->poll_flag)); 2902 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2903 2904 ldvp = rcr_p->ldvp; 2905 /* error events. */ 2906 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2907 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2908 } 2909 2910 MUTEX_EXIT(&rcr_p->lock); 2911 2912 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2913 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 2914 return (mblk); 2915 } 2916 2917 2918 /*ARGSUSED*/ 2919 static nxge_status_t 2920 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2921 { 2922 p_nxge_rx_ring_stats_t rdc_stats; 2923 npi_handle_t handle; 2924 npi_status_t rs; 2925 boolean_t rxchan_fatal = B_FALSE; 2926 boolean_t rxport_fatal = B_FALSE; 2927 uint8_t portn; 2928 nxge_status_t status = NXGE_OK; 2929 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2930 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2931 2932 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2933 portn = nxgep->mac.portnum; 2934 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 2935 2936 if (cs.bits.hdw.rbr_tmout) { 2937 rdc_stats->rx_rbr_tmout++; 2938 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2939 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2940 rxchan_fatal = B_TRUE; 2941 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2942 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2943 } 2944 if (cs.bits.hdw.rsp_cnt_err) { 2945 rdc_stats->rsp_cnt_err++; 2946 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2947 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2948 rxchan_fatal = B_TRUE; 2949 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2950 "==> nxge_rx_err_evnts(channel %d): " 2951 "rsp_cnt_err", channel)); 2952 } 2953 if (cs.bits.hdw.byte_en_bus) { 2954 rdc_stats->byte_en_bus++; 2955 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2956 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2957 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2958 "==> nxge_rx_err_evnts(channel %d): " 2959 "fatal error: byte_en_bus", channel)); 2960 rxchan_fatal = B_TRUE; 2961 } 2962 if (cs.bits.hdw.rsp_dat_err) { 2963 rdc_stats->rsp_dat_err++; 2964 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2965 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2966 rxchan_fatal = B_TRUE; 2967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2968 "==> nxge_rx_err_evnts(channel %d): " 2969 "fatal error: rsp_dat_err", channel)); 2970 } 2971 if (cs.bits.hdw.rcr_ack_err) { 2972 rdc_stats->rcr_ack_err++; 2973 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2974 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2975 rxchan_fatal = B_TRUE; 2976 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2977 "==> nxge_rx_err_evnts(channel %d): " 2978 "fatal error: rcr_ack_err", channel)); 2979 } 2980 if (cs.bits.hdw.dc_fifo_err) { 2981 rdc_stats->dc_fifo_err++; 2982 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2983 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2984 /* This is not a fatal error! */ 2985 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2986 "==> nxge_rx_err_evnts(channel %d): " 2987 "dc_fifo_err", channel)); 2988 rxport_fatal = B_TRUE; 2989 } 2990 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2991 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2992 &rdc_stats->errlog.pre_par, 2993 &rdc_stats->errlog.sha_par)) 2994 != NPI_SUCCESS) { 2995 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2996 "==> nxge_rx_err_evnts(channel %d): " 2997 "rcr_sha_par: get perr", channel)); 2998 return (NXGE_ERROR | rs); 2999 } 3000 if (cs.bits.hdw.rcr_sha_par) { 3001 rdc_stats->rcr_sha_par++; 3002 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3003 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 3004 rxchan_fatal = B_TRUE; 3005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3006 "==> nxge_rx_err_evnts(channel %d): " 3007 "fatal error: rcr_sha_par", channel)); 3008 } 3009 if (cs.bits.hdw.rbr_pre_par) { 3010 rdc_stats->rbr_pre_par++; 3011 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3012 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 3013 rxchan_fatal = B_TRUE; 3014 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3015 "==> nxge_rx_err_evnts(channel %d): " 3016 "fatal error: rbr_pre_par", channel)); 3017 } 3018 } 3019 /* 3020 * The Following 4 status bits are for information, the system 3021 * is running fine. There is no need to send FMA ereports or 3022 * log messages. 3023 */ 3024 if (cs.bits.hdw.port_drop_pkt) { 3025 rdc_stats->port_drop_pkt++; 3026 } 3027 if (cs.bits.hdw.wred_drop) { 3028 rdc_stats->wred_drop++; 3029 } 3030 if (cs.bits.hdw.rbr_pre_empty) { 3031 rdc_stats->rbr_pre_empty++; 3032 } 3033 if (cs.bits.hdw.rcr_shadow_full) { 3034 rdc_stats->rcr_shadow_full++; 3035 } 3036 if (cs.bits.hdw.config_err) { 3037 rdc_stats->config_err++; 3038 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3039 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3040 rxchan_fatal = B_TRUE; 3041 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3042 "==> nxge_rx_err_evnts(channel %d): " 3043 "config error", channel)); 3044 } 3045 if (cs.bits.hdw.rcrincon) { 3046 rdc_stats->rcrincon++; 3047 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3048 NXGE_FM_EREPORT_RDMC_RCRINCON); 3049 rxchan_fatal = B_TRUE; 3050 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3051 "==> nxge_rx_err_evnts(channel %d): " 3052 "fatal error: rcrincon error", channel)); 3053 } 3054 if (cs.bits.hdw.rcrfull) { 3055 rdc_stats->rcrfull++; 3056 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3057 NXGE_FM_EREPORT_RDMC_RCRFULL); 3058 rxchan_fatal = B_TRUE; 3059 if (rdc_stats->rcrfull < error_disp_cnt) { 3060 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3061 "==> nxge_rx_err_evnts(channel %d): " 3062 "fatal error: rcrfull error", channel)); 3063 } 3064 } 3065 if (cs.bits.hdw.rbr_empty) { 3066 /* 3067 * This bit is for information, there is no need 3068 * send FMA ereport or log a message. 3069 */ 3070 rdc_stats->rbr_empty++; 3071 } 3072 if (cs.bits.hdw.rbrfull) { 3073 rdc_stats->rbrfull++; 3074 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3075 NXGE_FM_EREPORT_RDMC_RBRFULL); 3076 rxchan_fatal = B_TRUE; 3077 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3078 "==> nxge_rx_err_evnts(channel %d): " 3079 "fatal error: rbr_full error", channel)); 3080 } 3081 if (cs.bits.hdw.rbrlogpage) { 3082 rdc_stats->rbrlogpage++; 3083 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3084 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3085 rxchan_fatal = B_TRUE; 3086 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3087 "==> nxge_rx_err_evnts(channel %d): " 3088 "fatal error: rbr logical page error", channel)); 3089 } 3090 if (cs.bits.hdw.cfiglogpage) { 3091 rdc_stats->cfiglogpage++; 3092 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3093 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3094 rxchan_fatal = B_TRUE; 3095 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3096 "==> nxge_rx_err_evnts(channel %d): " 3097 "fatal error: cfig logical page error", channel)); 3098 } 3099 3100 if (rxport_fatal) { 3101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3102 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3103 portn)); 3104 if (isLDOMguest(nxgep)) { 3105 status = NXGE_ERROR; 3106 } else { 3107 status = nxge_ipp_fatal_err_recover(nxgep); 3108 if (status == NXGE_OK) { 3109 FM_SERVICE_RESTORED(nxgep); 3110 } 3111 } 3112 } 3113 3114 if (rxchan_fatal) { 3115 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3116 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3117 channel)); 3118 if (isLDOMguest(nxgep)) { 3119 status = NXGE_ERROR; 3120 } else { 3121 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3122 if (status == NXGE_OK) { 3123 FM_SERVICE_RESTORED(nxgep); 3124 } 3125 } 3126 } 3127 3128 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3129 3130 return (status); 3131 } 3132 3133 /* 3134 * nxge_rdc_hvio_setup 3135 * 3136 * This code appears to setup some Hypervisor variables. 3137 * 3138 * Arguments: 3139 * nxgep 3140 * channel 3141 * 3142 * Notes: 3143 * What does NIU_LP_WORKAROUND mean? 3144 * 3145 * NPI/NXGE function calls: 3146 * na 3147 * 3148 * Context: 3149 * Any domain 3150 */ 3151 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3152 static void 3153 nxge_rdc_hvio_setup( 3154 nxge_t *nxgep, int channel) 3155 { 3156 nxge_dma_common_t *dma_common; 3157 nxge_dma_common_t *dma_control; 3158 rx_rbr_ring_t *ring; 3159 3160 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3161 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3162 3163 ring->hv_set = B_FALSE; 3164 3165 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3166 dma_common->orig_ioaddr_pp; 3167 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3168 dma_common->orig_alength; 3169 3170 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3171 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3172 channel, ring->hv_rx_buf_base_ioaddr_pp, 3173 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3174 dma_common->orig_alength, dma_common->orig_alength)); 3175 3176 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3177 3178 ring->hv_rx_cntl_base_ioaddr_pp = 3179 (uint64_t)dma_control->orig_ioaddr_pp; 3180 ring->hv_rx_cntl_ioaddr_size = 3181 (uint64_t)dma_control->orig_alength; 3182 3183 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3184 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3185 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3186 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3187 dma_control->orig_alength, dma_control->orig_alength)); 3188 } 3189 #endif 3190 3191 /* 3192 * nxge_map_rxdma 3193 * 3194 * Map an RDC into our kernel space. 3195 * 3196 * Arguments: 3197 * nxgep 3198 * channel The channel to map. 3199 * 3200 * Notes: 3201 * 1. Allocate & initialise a memory pool, if necessary. 3202 * 2. Allocate however many receive buffers are required. 3203 * 3. Setup buffers, descriptors, and mailbox. 3204 * 3205 * NPI/NXGE function calls: 3206 * nxge_alloc_rx_mem_pool() 3207 * nxge_alloc_rbb() 3208 * nxge_map_rxdma_channel() 3209 * 3210 * Registers accessed: 3211 * 3212 * Context: 3213 * Any domain 3214 */ 3215 static nxge_status_t 3216 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3217 { 3218 nxge_dma_common_t **data; 3219 nxge_dma_common_t **control; 3220 rx_rbr_ring_t **rbr_ring; 3221 rx_rcr_ring_t **rcr_ring; 3222 rx_mbox_t **mailbox; 3223 uint32_t chunks; 3224 3225 nxge_status_t status; 3226 3227 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3228 3229 if (!nxgep->rx_buf_pool_p) { 3230 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3231 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3232 "<== nxge_map_rxdma: buf not allocated")); 3233 return (NXGE_ERROR); 3234 } 3235 } 3236 3237 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3238 return (NXGE_ERROR); 3239 3240 /* 3241 * Map descriptors from the buffer polls for each dma channel. 3242 */ 3243 3244 /* 3245 * Set up and prepare buffer blocks, descriptors 3246 * and mailbox. 3247 */ 3248 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3249 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3250 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3251 3252 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3253 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3254 3255 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3256 3257 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3258 chunks, control, rcr_ring, mailbox); 3259 if (status != NXGE_OK) { 3260 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3261 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3262 "returned 0x%x", 3263 channel, status)); 3264 return (status); 3265 } 3266 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3267 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3268 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3269 &nxgep->statsp->rdc_stats[channel]; 3270 3271 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3272 if (!isLDOMguest(nxgep)) 3273 nxge_rdc_hvio_setup(nxgep, channel); 3274 #endif 3275 3276 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3277 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3278 3279 return (status); 3280 } 3281 3282 static void 3283 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3284 { 3285 rx_rbr_ring_t *rbr_ring; 3286 rx_rcr_ring_t *rcr_ring; 3287 rx_mbox_t *mailbox; 3288 3289 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3290 3291 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3292 !nxgep->rx_mbox_areas_p) 3293 return; 3294 3295 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3296 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3297 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3298 3299 if (!rbr_ring || !rcr_ring || !mailbox) 3300 return; 3301 3302 (void) nxge_unmap_rxdma_channel( 3303 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3304 3305 nxge_free_rxb(nxgep, channel); 3306 3307 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3308 } 3309 3310 nxge_status_t 3311 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3312 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3313 uint32_t num_chunks, 3314 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3315 p_rx_mbox_t *rx_mbox_p) 3316 { 3317 int status = NXGE_OK; 3318 3319 /* 3320 * Set up and prepare buffer blocks, descriptors 3321 * and mailbox. 3322 */ 3323 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3324 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3325 /* 3326 * Receive buffer blocks 3327 */ 3328 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3329 dma_buf_p, rbr_p, num_chunks); 3330 if (status != NXGE_OK) { 3331 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3332 "==> nxge_map_rxdma_channel (channel %d): " 3333 "map buffer failed 0x%x", channel, status)); 3334 goto nxge_map_rxdma_channel_exit; 3335 } 3336 3337 /* 3338 * Receive block ring, completion ring and mailbox. 3339 */ 3340 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3341 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3342 if (status != NXGE_OK) { 3343 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3344 "==> nxge_map_rxdma_channel (channel %d): " 3345 "map config failed 0x%x", channel, status)); 3346 goto nxge_map_rxdma_channel_fail2; 3347 } 3348 3349 goto nxge_map_rxdma_channel_exit; 3350 3351 nxge_map_rxdma_channel_fail3: 3352 /* Free rbr, rcr */ 3353 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3354 "==> nxge_map_rxdma_channel: free rbr/rcr " 3355 "(status 0x%x channel %d)", 3356 status, channel)); 3357 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3358 *rcr_p, *rx_mbox_p); 3359 3360 nxge_map_rxdma_channel_fail2: 3361 /* Free buffer blocks */ 3362 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3363 "==> nxge_map_rxdma_channel: free rx buffers" 3364 "(nxgep 0x%x status 0x%x channel %d)", 3365 nxgep, status, channel)); 3366 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3367 3368 status = NXGE_ERROR; 3369 3370 nxge_map_rxdma_channel_exit: 3371 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3372 "<== nxge_map_rxdma_channel: " 3373 "(nxgep 0x%x status 0x%x channel %d)", 3374 nxgep, status, channel)); 3375 3376 return (status); 3377 } 3378 3379 /*ARGSUSED*/ 3380 static void 3381 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3382 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3383 { 3384 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3385 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3386 3387 /* 3388 * unmap receive block ring, completion ring and mailbox. 3389 */ 3390 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3391 rcr_p, rx_mbox_p); 3392 3393 /* unmap buffer blocks */ 3394 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3395 3396 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3397 } 3398 3399 /*ARGSUSED*/ 3400 static nxge_status_t 3401 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3402 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3403 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3404 { 3405 p_rx_rbr_ring_t rbrp; 3406 p_rx_rcr_ring_t rcrp; 3407 p_rx_mbox_t mboxp; 3408 p_nxge_dma_common_t cntl_dmap; 3409 p_nxge_dma_common_t dmap; 3410 p_rx_msg_t *rx_msg_ring; 3411 p_rx_msg_t rx_msg_p; 3412 p_rbr_cfig_a_t rcfga_p; 3413 p_rbr_cfig_b_t rcfgb_p; 3414 p_rcrcfig_a_t cfga_p; 3415 p_rcrcfig_b_t cfgb_p; 3416 p_rxdma_cfig1_t cfig1_p; 3417 p_rxdma_cfig2_t cfig2_p; 3418 p_rbr_kick_t kick_p; 3419 uint32_t dmaaddrp; 3420 uint32_t *rbr_vaddrp; 3421 uint32_t bkaddr; 3422 nxge_status_t status = NXGE_OK; 3423 int i; 3424 uint32_t nxge_port_rcr_size; 3425 3426 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3427 "==> nxge_map_rxdma_channel_cfg_ring")); 3428 3429 cntl_dmap = *dma_cntl_p; 3430 3431 /* Map in the receive block ring */ 3432 rbrp = *rbr_p; 3433 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3434 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3435 /* 3436 * Zero out buffer block ring descriptors. 3437 */ 3438 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3439 3440 rcfga_p = &(rbrp->rbr_cfga); 3441 rcfgb_p = &(rbrp->rbr_cfgb); 3442 kick_p = &(rbrp->rbr_kick); 3443 rcfga_p->value = 0; 3444 rcfgb_p->value = 0; 3445 kick_p->value = 0; 3446 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3447 rcfga_p->value = (rbrp->rbr_addr & 3448 (RBR_CFIG_A_STDADDR_MASK | 3449 RBR_CFIG_A_STDADDR_BASE_MASK)); 3450 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3451 3452 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3453 rcfgb_p->bits.ldw.vld0 = 1; 3454 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3455 rcfgb_p->bits.ldw.vld1 = 1; 3456 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3457 rcfgb_p->bits.ldw.vld2 = 1; 3458 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3459 3460 /* 3461 * For each buffer block, enter receive block address to the ring. 3462 */ 3463 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3464 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3465 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3466 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3467 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3468 3469 rx_msg_ring = rbrp->rx_msg_ring; 3470 for (i = 0; i < rbrp->tnblocks; i++) { 3471 rx_msg_p = rx_msg_ring[i]; 3472 rx_msg_p->nxgep = nxgep; 3473 rx_msg_p->rx_rbr_p = rbrp; 3474 bkaddr = (uint32_t) 3475 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3476 >> RBR_BKADDR_SHIFT)); 3477 rx_msg_p->free = B_FALSE; 3478 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3479 3480 *rbr_vaddrp++ = bkaddr; 3481 } 3482 3483 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3484 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3485 3486 rbrp->rbr_rd_index = 0; 3487 3488 rbrp->rbr_consumed = 0; 3489 rbrp->rbr_use_bcopy = B_TRUE; 3490 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3491 /* 3492 * Do bcopy on packets greater than bcopy size once 3493 * the lo threshold is reached. 3494 * This lo threshold should be less than the hi threshold. 3495 * 3496 * Do bcopy on every packet once the hi threshold is reached. 3497 */ 3498 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3499 /* default it to use hi */ 3500 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3501 } 3502 3503 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3504 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3505 } 3506 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3507 3508 switch (nxge_rx_threshold_hi) { 3509 default: 3510 case NXGE_RX_COPY_NONE: 3511 /* Do not do bcopy at all */ 3512 rbrp->rbr_use_bcopy = B_FALSE; 3513 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3514 break; 3515 3516 case NXGE_RX_COPY_1: 3517 case NXGE_RX_COPY_2: 3518 case NXGE_RX_COPY_3: 3519 case NXGE_RX_COPY_4: 3520 case NXGE_RX_COPY_5: 3521 case NXGE_RX_COPY_6: 3522 case NXGE_RX_COPY_7: 3523 rbrp->rbr_threshold_hi = 3524 rbrp->rbb_max * 3525 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3526 break; 3527 3528 case NXGE_RX_COPY_ALL: 3529 rbrp->rbr_threshold_hi = 0; 3530 break; 3531 } 3532 3533 switch (nxge_rx_threshold_lo) { 3534 default: 3535 case NXGE_RX_COPY_NONE: 3536 /* Do not do bcopy at all */ 3537 if (rbrp->rbr_use_bcopy) { 3538 rbrp->rbr_use_bcopy = B_FALSE; 3539 } 3540 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3541 break; 3542 3543 case NXGE_RX_COPY_1: 3544 case NXGE_RX_COPY_2: 3545 case NXGE_RX_COPY_3: 3546 case NXGE_RX_COPY_4: 3547 case NXGE_RX_COPY_5: 3548 case NXGE_RX_COPY_6: 3549 case NXGE_RX_COPY_7: 3550 rbrp->rbr_threshold_lo = 3551 rbrp->rbb_max * 3552 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3553 break; 3554 3555 case NXGE_RX_COPY_ALL: 3556 rbrp->rbr_threshold_lo = 0; 3557 break; 3558 } 3559 3560 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3561 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3562 "rbb_max %d " 3563 "rbrp->rbr_bufsize_type %d " 3564 "rbb_threshold_hi %d " 3565 "rbb_threshold_lo %d", 3566 dma_channel, 3567 rbrp->rbb_max, 3568 rbrp->rbr_bufsize_type, 3569 rbrp->rbr_threshold_hi, 3570 rbrp->rbr_threshold_lo)); 3571 3572 rbrp->page_valid.value = 0; 3573 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3574 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3575 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3576 rbrp->page_hdl.value = 0; 3577 3578 rbrp->page_valid.bits.ldw.page0 = 1; 3579 rbrp->page_valid.bits.ldw.page1 = 1; 3580 3581 /* Map in the receive completion ring */ 3582 rcrp = (p_rx_rcr_ring_t) 3583 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3584 rcrp->rdc = dma_channel; 3585 3586 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3587 rcrp->comp_size = nxge_port_rcr_size; 3588 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3589 3590 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3591 3592 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3593 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3594 sizeof (rcr_entry_t)); 3595 rcrp->comp_rd_index = 0; 3596 rcrp->comp_wt_index = 0; 3597 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3598 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3599 #if defined(__i386) 3600 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3601 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3602 #else 3603 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3604 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3605 #endif 3606 3607 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3608 (nxge_port_rcr_size - 1); 3609 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3610 (nxge_port_rcr_size - 1); 3611 3612 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3613 "==> nxge_map_rxdma_channel_cfg_ring: " 3614 "channel %d " 3615 "rbr_vaddrp $%p " 3616 "rcr_desc_rd_head_p $%p " 3617 "rcr_desc_rd_head_pp $%p " 3618 "rcr_desc_rd_last_p $%p " 3619 "rcr_desc_rd_last_pp $%p ", 3620 dma_channel, 3621 rbr_vaddrp, 3622 rcrp->rcr_desc_rd_head_p, 3623 rcrp->rcr_desc_rd_head_pp, 3624 rcrp->rcr_desc_last_p, 3625 rcrp->rcr_desc_last_pp)); 3626 3627 /* 3628 * Zero out buffer block ring descriptors. 3629 */ 3630 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3631 3632 rcrp->intr_timeout = (nxgep->intr_timeout < 3633 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3634 nxgep->intr_timeout; 3635 3636 rcrp->intr_threshold = (nxgep->intr_threshold < 3637 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3638 nxgep->intr_threshold; 3639 3640 rcrp->full_hdr_flag = B_FALSE; 3641 3642 rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset; 3643 3644 3645 cfga_p = &(rcrp->rcr_cfga); 3646 cfgb_p = &(rcrp->rcr_cfgb); 3647 cfga_p->value = 0; 3648 cfgb_p->value = 0; 3649 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3650 cfga_p->value = (rcrp->rcr_addr & 3651 (RCRCFIG_A_STADDR_MASK | 3652 RCRCFIG_A_STADDR_BASE_MASK)); 3653 3654 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3655 RCRCFIG_A_LEN_SHIF); 3656 3657 /* 3658 * Timeout should be set based on the system clock divider. 3659 * A timeout value of 1 assumes that the 3660 * granularity (1000) is 3 microseconds running at 300MHz. 3661 */ 3662 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3663 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3664 cfgb_p->bits.ldw.entout = 1; 3665 3666 /* Map in the mailbox */ 3667 mboxp = (p_rx_mbox_t) 3668 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3669 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3670 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3671 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3672 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3673 cfig1_p->value = cfig2_p->value = 0; 3674 3675 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3676 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3677 "==> nxge_map_rxdma_channel_cfg_ring: " 3678 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3679 dma_channel, cfig1_p->value, cfig2_p->value, 3680 mboxp->mbox_addr)); 3681 3682 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3683 & 0xfff); 3684 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3685 3686 3687 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3688 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3689 RXDMA_CFIG2_MBADDR_L_MASK); 3690 3691 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3692 3693 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3694 "==> nxge_map_rxdma_channel_cfg_ring: " 3695 "channel %d damaddrp $%p " 3696 "cfg1 0x%016llx cfig2 0x%016llx", 3697 dma_channel, dmaaddrp, 3698 cfig1_p->value, cfig2_p->value)); 3699 3700 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3701 if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) { 3702 switch (rcrp->sw_priv_hdr_len) { 3703 case SW_OFFSET_NO_OFFSET: 3704 case SW_OFFSET_64: 3705 case SW_OFFSET_128: 3706 case SW_OFFSET_192: 3707 cfig2_p->bits.ldw.offset = 3708 rcrp->sw_priv_hdr_len; 3709 cfig2_p->bits.ldw.offset256 = 0; 3710 break; 3711 case SW_OFFSET_256: 3712 case SW_OFFSET_320: 3713 case SW_OFFSET_384: 3714 case SW_OFFSET_448: 3715 cfig2_p->bits.ldw.offset = 3716 rcrp->sw_priv_hdr_len & 0x3; 3717 cfig2_p->bits.ldw.offset256 = 1; 3718 break; 3719 default: 3720 cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET; 3721 cfig2_p->bits.ldw.offset256 = 0; 3722 } 3723 } else { 3724 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3725 } 3726 3727 rbrp->rx_rcr_p = rcrp; 3728 rcrp->rx_rbr_p = rbrp; 3729 *rcr_p = rcrp; 3730 *rx_mbox_p = mboxp; 3731 3732 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3733 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3734 3735 return (status); 3736 } 3737 3738 /*ARGSUSED*/ 3739 static void 3740 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3741 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3742 { 3743 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3744 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3745 rcr_p->rdc)); 3746 3747 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3748 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3749 3750 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3751 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3752 } 3753 3754 static nxge_status_t 3755 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3756 p_nxge_dma_common_t *dma_buf_p, 3757 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3758 { 3759 p_rx_rbr_ring_t rbrp; 3760 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3761 p_rx_msg_t *rx_msg_ring; 3762 p_rx_msg_t rx_msg_p; 3763 p_mblk_t mblk_p; 3764 3765 rxring_info_t *ring_info; 3766 nxge_status_t status = NXGE_OK; 3767 int i, j, index; 3768 uint32_t size, bsize, nblocks, nmsgs; 3769 3770 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3771 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3772 channel)); 3773 3774 dma_bufp = tmp_bufp = *dma_buf_p; 3775 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3776 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3777 "chunks bufp 0x%016llx", 3778 channel, num_chunks, dma_bufp)); 3779 3780 nmsgs = 0; 3781 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3782 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3783 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3784 "bufp 0x%016llx nblocks %d nmsgs %d", 3785 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3786 nmsgs += tmp_bufp->nblocks; 3787 } 3788 if (!nmsgs) { 3789 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3790 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3791 "no msg blocks", 3792 channel)); 3793 status = NXGE_ERROR; 3794 goto nxge_map_rxdma_channel_buf_ring_exit; 3795 } 3796 3797 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3798 3799 size = nmsgs * sizeof (p_rx_msg_t); 3800 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3801 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3802 KM_SLEEP); 3803 3804 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3805 (void *)nxgep->interrupt_cookie); 3806 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3807 (void *)nxgep->interrupt_cookie); 3808 rbrp->rdc = channel; 3809 rbrp->num_blocks = num_chunks; 3810 rbrp->tnblocks = nmsgs; 3811 rbrp->rbb_max = nmsgs; 3812 rbrp->rbr_max_size = nmsgs; 3813 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3814 3815 /* 3816 * Buffer sizes suggested by NIU architect. 3817 * 256, 512 and 2K. 3818 */ 3819 3820 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3821 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3822 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3823 3824 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3825 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3826 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3827 3828 rbrp->block_size = nxgep->rx_default_block_size; 3829 3830 if (!nxgep->mac.is_jumbo) { 3831 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3832 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3833 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3834 } else { 3835 if (rbrp->block_size >= 0x2000) { 3836 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3837 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3838 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3839 } else { 3840 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3841 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3842 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3843 } 3844 } 3845 3846 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3847 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3848 "actual rbr max %d rbb_max %d nmsgs %d " 3849 "rbrp->block_size %d default_block_size %d " 3850 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3851 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3852 rbrp->block_size, nxgep->rx_default_block_size, 3853 nxge_rbr_size, nxge_rbr_spare_size)); 3854 3855 /* Map in buffers from the buffer pool. */ 3856 index = 0; 3857 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3858 bsize = dma_bufp->block_size; 3859 nblocks = dma_bufp->nblocks; 3860 #if defined(__i386) 3861 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3862 #else 3863 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3864 #endif 3865 ring_info->buffer[i].buf_index = i; 3866 ring_info->buffer[i].buf_size = dma_bufp->alength; 3867 ring_info->buffer[i].start_index = index; 3868 #if defined(__i386) 3869 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3870 #else 3871 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3872 #endif 3873 3874 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3875 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3876 "chunk %d" 3877 " nblocks %d chunk_size %x block_size 0x%x " 3878 "dma_bufp $%p", channel, i, 3879 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3880 dma_bufp)); 3881 3882 for (j = 0; j < nblocks; j++) { 3883 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3884 dma_bufp)) == NULL) { 3885 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3886 "allocb failed (index %d i %d j %d)", 3887 index, i, j)); 3888 goto nxge_map_rxdma_channel_buf_ring_fail1; 3889 } 3890 rx_msg_ring[index] = rx_msg_p; 3891 rx_msg_p->block_index = index; 3892 rx_msg_p->shifted_addr = (uint32_t) 3893 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3894 RBR_BKADDR_SHIFT)); 3895 3896 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3897 "index %d j %d rx_msg_p $%p mblk %p", 3898 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3899 3900 mblk_p = rx_msg_p->rx_mblk_p; 3901 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3902 3903 rbrp->rbr_ref_cnt++; 3904 index++; 3905 rx_msg_p->buf_dma.dma_channel = channel; 3906 } 3907 3908 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3909 if (dma_bufp->contig_alloc_type) { 3910 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3911 } 3912 3913 if (dma_bufp->kmem_alloc_type) { 3914 rbrp->rbr_alloc_type = KMEM_ALLOC; 3915 } 3916 3917 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3918 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3919 "chunk %d" 3920 " nblocks %d chunk_size %x block_size 0x%x " 3921 "dma_bufp $%p", 3922 channel, i, 3923 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3924 dma_bufp)); 3925 } 3926 if (i < rbrp->num_blocks) { 3927 goto nxge_map_rxdma_channel_buf_ring_fail1; 3928 } 3929 3930 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3931 "nxge_map_rxdma_channel_buf_ring: done buf init " 3932 "channel %d msg block entries %d", 3933 channel, index)); 3934 ring_info->block_size_mask = bsize - 1; 3935 rbrp->rx_msg_ring = rx_msg_ring; 3936 rbrp->dma_bufp = dma_buf_p; 3937 rbrp->ring_info = ring_info; 3938 3939 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3940 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3941 " nxge_map_rxdma_channel_buf_ring: " 3942 "channel %d done buf info init", channel)); 3943 3944 /* 3945 * Finally, permit nxge_freeb() to call nxge_post_page(). 3946 */ 3947 rbrp->rbr_state = RBR_POSTING; 3948 3949 *rbr_p = rbrp; 3950 goto nxge_map_rxdma_channel_buf_ring_exit; 3951 3952 nxge_map_rxdma_channel_buf_ring_fail1: 3953 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3954 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3955 channel, status)); 3956 3957 index--; 3958 for (; index >= 0; index--) { 3959 rx_msg_p = rx_msg_ring[index]; 3960 if (rx_msg_p != NULL) { 3961 freeb(rx_msg_p->rx_mblk_p); 3962 rx_msg_ring[index] = NULL; 3963 } 3964 } 3965 nxge_map_rxdma_channel_buf_ring_fail: 3966 MUTEX_DESTROY(&rbrp->post_lock); 3967 MUTEX_DESTROY(&rbrp->lock); 3968 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3969 KMEM_FREE(rx_msg_ring, size); 3970 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3971 3972 status = NXGE_ERROR; 3973 3974 nxge_map_rxdma_channel_buf_ring_exit: 3975 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3976 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3977 3978 return (status); 3979 } 3980 3981 /*ARGSUSED*/ 3982 static void 3983 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3984 p_rx_rbr_ring_t rbr_p) 3985 { 3986 p_rx_msg_t *rx_msg_ring; 3987 p_rx_msg_t rx_msg_p; 3988 rxring_info_t *ring_info; 3989 int i; 3990 uint32_t size; 3991 #ifdef NXGE_DEBUG 3992 int num_chunks; 3993 #endif 3994 3995 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3996 "==> nxge_unmap_rxdma_channel_buf_ring")); 3997 if (rbr_p == NULL) { 3998 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3999 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 4000 return; 4001 } 4002 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4003 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 4004 rbr_p->rdc)); 4005 4006 rx_msg_ring = rbr_p->rx_msg_ring; 4007 ring_info = rbr_p->ring_info; 4008 4009 if (rx_msg_ring == NULL || ring_info == NULL) { 4010 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4011 "<== nxge_unmap_rxdma_channel_buf_ring: " 4012 "rx_msg_ring $%p ring_info $%p", 4013 rx_msg_p, ring_info)); 4014 return; 4015 } 4016 4017 #ifdef NXGE_DEBUG 4018 num_chunks = rbr_p->num_blocks; 4019 #endif 4020 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 4021 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4022 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 4023 "tnblocks %d (max %d) size ptrs %d ", 4024 rbr_p->rdc, num_chunks, 4025 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 4026 4027 for (i = 0; i < rbr_p->tnblocks; i++) { 4028 rx_msg_p = rx_msg_ring[i]; 4029 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4030 " nxge_unmap_rxdma_channel_buf_ring: " 4031 "rx_msg_p $%p", 4032 rx_msg_p)); 4033 if (rx_msg_p != NULL) { 4034 freeb(rx_msg_p->rx_mblk_p); 4035 rx_msg_ring[i] = NULL; 4036 } 4037 } 4038 4039 /* 4040 * We no longer may use the mutex <post_lock>. By setting 4041 * <rbr_state> to anything but POSTING, we prevent 4042 * nxge_post_page() from accessing a dead mutex. 4043 */ 4044 rbr_p->rbr_state = RBR_UNMAPPING; 4045 MUTEX_DESTROY(&rbr_p->post_lock); 4046 4047 MUTEX_DESTROY(&rbr_p->lock); 4048 4049 if (rbr_p->rbr_ref_cnt == 0) { 4050 /* 4051 * This is the normal state of affairs. 4052 * Need to free the following buffers: 4053 * - data buffers 4054 * - rx_msg ring 4055 * - ring_info 4056 * - rbr ring 4057 */ 4058 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4059 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4060 nxge_rxdma_databuf_free(rbr_p); 4061 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4062 KMEM_FREE(rx_msg_ring, size); 4063 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4064 } else { 4065 /* 4066 * Some of our buffers are still being used. 4067 * Therefore, tell nxge_freeb() this ring is 4068 * unmapped, so it may free <rbr_p> for us. 4069 */ 4070 rbr_p->rbr_state = RBR_UNMAPPED; 4071 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4072 "unmap_rxdma_buf_ring: %d %s outstanding.", 4073 rbr_p->rbr_ref_cnt, 4074 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4075 } 4076 4077 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4078 "<== nxge_unmap_rxdma_channel_buf_ring")); 4079 } 4080 4081 /* 4082 * nxge_rxdma_hw_start_common 4083 * 4084 * Arguments: 4085 * nxgep 4086 * 4087 * Notes: 4088 * 4089 * NPI/NXGE function calls: 4090 * nxge_init_fzc_rx_common(); 4091 * nxge_init_fzc_rxdma_port(); 4092 * 4093 * Registers accessed: 4094 * 4095 * Context: 4096 * Service domain 4097 */ 4098 static nxge_status_t 4099 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4100 { 4101 nxge_status_t status = NXGE_OK; 4102 4103 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4104 4105 /* 4106 * Load the sharable parameters by writing to the 4107 * function zero control registers. These FZC registers 4108 * should be initialized only once for the entire chip. 4109 */ 4110 (void) nxge_init_fzc_rx_common(nxgep); 4111 4112 /* 4113 * Initialize the RXDMA port specific FZC control configurations. 4114 * These FZC registers are pertaining to each port. 4115 */ 4116 (void) nxge_init_fzc_rxdma_port(nxgep); 4117 4118 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4119 4120 return (status); 4121 } 4122 4123 static nxge_status_t 4124 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4125 { 4126 int i, ndmas; 4127 p_rx_rbr_rings_t rx_rbr_rings; 4128 p_rx_rbr_ring_t *rbr_rings; 4129 p_rx_rcr_rings_t rx_rcr_rings; 4130 p_rx_rcr_ring_t *rcr_rings; 4131 p_rx_mbox_areas_t rx_mbox_areas_p; 4132 p_rx_mbox_t *rx_mbox_p; 4133 nxge_status_t status = NXGE_OK; 4134 4135 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4136 4137 rx_rbr_rings = nxgep->rx_rbr_rings; 4138 rx_rcr_rings = nxgep->rx_rcr_rings; 4139 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4140 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4141 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4142 return (NXGE_ERROR); 4143 } 4144 ndmas = rx_rbr_rings->ndmas; 4145 if (ndmas == 0) { 4146 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4147 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4148 return (NXGE_ERROR); 4149 } 4150 4151 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4152 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4153 4154 rbr_rings = rx_rbr_rings->rbr_rings; 4155 rcr_rings = rx_rcr_rings->rcr_rings; 4156 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4157 if (rx_mbox_areas_p) { 4158 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4159 } 4160 4161 i = channel; 4162 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4163 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4164 ndmas, channel)); 4165 status = nxge_rxdma_start_channel(nxgep, channel, 4166 (p_rx_rbr_ring_t)rbr_rings[i], 4167 (p_rx_rcr_ring_t)rcr_rings[i], 4168 (p_rx_mbox_t)rx_mbox_p[i]); 4169 if (status != NXGE_OK) { 4170 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4171 "==> nxge_rxdma_hw_start: disable " 4172 "(status 0x%x channel %d)", status, channel)); 4173 return (status); 4174 } 4175 4176 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4177 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4178 rx_rbr_rings, rx_rcr_rings)); 4179 4180 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4181 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4182 4183 return (status); 4184 } 4185 4186 static void 4187 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4188 { 4189 p_rx_rbr_rings_t rx_rbr_rings; 4190 p_rx_rcr_rings_t rx_rcr_rings; 4191 4192 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4193 4194 rx_rbr_rings = nxgep->rx_rbr_rings; 4195 rx_rcr_rings = nxgep->rx_rcr_rings; 4196 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4197 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4198 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4199 return; 4200 } 4201 4202 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4203 "==> nxge_rxdma_hw_stop(channel %d)", 4204 channel)); 4205 (void) nxge_rxdma_stop_channel(nxgep, channel); 4206 4207 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4208 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4209 rx_rbr_rings, rx_rcr_rings)); 4210 4211 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4212 } 4213 4214 4215 static nxge_status_t 4216 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4217 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4218 4219 { 4220 npi_handle_t handle; 4221 npi_status_t rs = NPI_SUCCESS; 4222 rx_dma_ctl_stat_t cs; 4223 rx_dma_ent_msk_t ent_mask; 4224 nxge_status_t status = NXGE_OK; 4225 4226 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4227 4228 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4229 4230 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4231 "npi handle addr $%p acc $%p", 4232 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4233 4234 /* Reset RXDMA channel, but not if you're a guest. */ 4235 if (!isLDOMguest(nxgep)) { 4236 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4237 if (rs != NPI_SUCCESS) { 4238 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4239 "==> nxge_init_fzc_rdc: " 4240 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4241 channel, rs)); 4242 return (NXGE_ERROR | rs); 4243 } 4244 4245 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4246 "==> nxge_rxdma_start_channel: reset done: channel %d", 4247 channel)); 4248 } 4249 4250 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4251 if (isLDOMguest(nxgep)) 4252 (void) nxge_rdc_lp_conf(nxgep, channel); 4253 #endif 4254 4255 /* 4256 * Initialize the RXDMA channel specific FZC control 4257 * configurations. These FZC registers are pertaining 4258 * to each RX channel (logical pages). 4259 */ 4260 if (!isLDOMguest(nxgep)) { 4261 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4262 if (status != NXGE_OK) { 4263 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4264 "==> nxge_rxdma_start_channel: " 4265 "init fzc rxdma failed (0x%08x channel %d)", 4266 status, channel)); 4267 return (status); 4268 } 4269 4270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4271 "==> nxge_rxdma_start_channel: fzc done")); 4272 } 4273 4274 /* Set up the interrupt event masks. */ 4275 ent_mask.value = 0; 4276 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4277 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4278 &ent_mask); 4279 if (rs != NPI_SUCCESS) { 4280 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4281 "==> nxge_rxdma_start_channel: " 4282 "init rxdma event masks failed " 4283 "(0x%08x channel %d)", 4284 status, channel)); 4285 return (NXGE_ERROR | rs); 4286 } 4287 4288 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4289 "==> nxge_rxdma_start_channel: " 4290 "event done: channel %d (mask 0x%016llx)", 4291 channel, ent_mask.value)); 4292 4293 /* Initialize the receive DMA control and status register */ 4294 cs.value = 0; 4295 cs.bits.hdw.mex = 1; 4296 cs.bits.hdw.rcrthres = 1; 4297 cs.bits.hdw.rcrto = 1; 4298 cs.bits.hdw.rbr_empty = 1; 4299 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4300 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4301 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4302 if (status != NXGE_OK) { 4303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4304 "==> nxge_rxdma_start_channel: " 4305 "init rxdma control register failed (0x%08x channel %d", 4306 status, channel)); 4307 return (status); 4308 } 4309 4310 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4311 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4312 4313 /* 4314 * Load RXDMA descriptors, buffers, mailbox, 4315 * initialise the receive DMA channels and 4316 * enable each DMA channel. 4317 */ 4318 status = nxge_enable_rxdma_channel(nxgep, 4319 channel, rbr_p, rcr_p, mbox_p); 4320 4321 if (status != NXGE_OK) { 4322 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4323 " nxge_rxdma_start_channel: " 4324 " enable rxdma failed (0x%08x channel %d)", 4325 status, channel)); 4326 return (status); 4327 } 4328 4329 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4330 "==> nxge_rxdma_start_channel: enabled channel %d")); 4331 4332 if (isLDOMguest(nxgep)) { 4333 /* Add interrupt handler for this channel. */ 4334 status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel); 4335 if (status != NXGE_OK) { 4336 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4337 " nxge_rxdma_start_channel: " 4338 " nxge_hio_intr_add failed (0x%08x channel %d)", 4339 status, channel)); 4340 return (status); 4341 } 4342 } 4343 4344 ent_mask.value = 0; 4345 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4346 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4347 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4348 &ent_mask); 4349 if (rs != NPI_SUCCESS) { 4350 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4351 "==> nxge_rxdma_start_channel: " 4352 "init rxdma event masks failed (0x%08x channel %d)", 4353 status, channel)); 4354 return (NXGE_ERROR | rs); 4355 } 4356 4357 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4358 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4359 4360 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4361 4362 return (NXGE_OK); 4363 } 4364 4365 static nxge_status_t 4366 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4367 { 4368 npi_handle_t handle; 4369 npi_status_t rs = NPI_SUCCESS; 4370 rx_dma_ctl_stat_t cs; 4371 rx_dma_ent_msk_t ent_mask; 4372 nxge_status_t status = NXGE_OK; 4373 4374 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4375 4376 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4377 4378 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4379 "npi handle addr $%p acc $%p", 4380 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4381 4382 if (!isLDOMguest(nxgep)) { 4383 /* 4384 * Stop RxMAC = A.9.2.6 4385 */ 4386 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4387 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4388 "nxge_rxdma_stop_channel: " 4389 "Failed to disable RxMAC")); 4390 } 4391 4392 /* 4393 * Drain IPP Port = A.9.3.6 4394 */ 4395 (void) nxge_ipp_drain(nxgep); 4396 } 4397 4398 /* Reset RXDMA channel */ 4399 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4400 if (rs != NPI_SUCCESS) { 4401 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4402 " nxge_rxdma_stop_channel: " 4403 " reset rxdma failed (0x%08x channel %d)", 4404 rs, channel)); 4405 return (NXGE_ERROR | rs); 4406 } 4407 4408 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4409 "==> nxge_rxdma_stop_channel: reset done")); 4410 4411 /* Set up the interrupt event masks. */ 4412 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4413 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4414 &ent_mask); 4415 if (rs != NPI_SUCCESS) { 4416 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4417 "==> nxge_rxdma_stop_channel: " 4418 "set rxdma event masks failed (0x%08x channel %d)", 4419 rs, channel)); 4420 return (NXGE_ERROR | rs); 4421 } 4422 4423 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4424 "==> nxge_rxdma_stop_channel: event done")); 4425 4426 /* 4427 * Initialize the receive DMA control and status register 4428 */ 4429 cs.value = 0; 4430 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4431 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4432 " to default (all 0s) 0x%08x", cs.value)); 4433 if (status != NXGE_OK) { 4434 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4435 " nxge_rxdma_stop_channel: init rxdma" 4436 " control register failed (0x%08x channel %d", 4437 status, channel)); 4438 return (status); 4439 } 4440 4441 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4442 "==> nxge_rxdma_stop_channel: control done")); 4443 4444 /* 4445 * Make sure channel is disabled. 4446 */ 4447 status = nxge_disable_rxdma_channel(nxgep, channel); 4448 4449 if (status != NXGE_OK) { 4450 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4451 " nxge_rxdma_stop_channel: " 4452 " init enable rxdma failed (0x%08x channel %d)", 4453 status, channel)); 4454 return (status); 4455 } 4456 4457 if (!isLDOMguest(nxgep)) { 4458 /* 4459 * Enable RxMAC = A.9.2.10 4460 */ 4461 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4462 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4463 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4464 } 4465 } 4466 4467 NXGE_DEBUG_MSG((nxgep, 4468 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4469 4470 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4471 4472 return (NXGE_OK); 4473 } 4474 4475 nxge_status_t 4476 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4477 { 4478 npi_handle_t handle; 4479 p_nxge_rdc_sys_stats_t statsp; 4480 rx_ctl_dat_fifo_stat_t stat; 4481 uint32_t zcp_err_status; 4482 uint32_t ipp_err_status; 4483 nxge_status_t status = NXGE_OK; 4484 npi_status_t rs = NPI_SUCCESS; 4485 boolean_t my_err = B_FALSE; 4486 4487 handle = nxgep->npi_handle; 4488 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4489 4490 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4491 4492 if (rs != NPI_SUCCESS) 4493 return (NXGE_ERROR | rs); 4494 4495 if (stat.bits.ldw.id_mismatch) { 4496 statsp->id_mismatch++; 4497 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, 0, 4498 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4499 /* Global fatal error encountered */ 4500 } 4501 4502 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4503 switch (nxgep->mac.portnum) { 4504 case 0: 4505 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4506 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4507 my_err = B_TRUE; 4508 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4509 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4510 } 4511 break; 4512 case 1: 4513 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4514 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4515 my_err = B_TRUE; 4516 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4517 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4518 } 4519 break; 4520 case 2: 4521 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4522 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4523 my_err = B_TRUE; 4524 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4525 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4526 } 4527 break; 4528 case 3: 4529 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4530 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4531 my_err = B_TRUE; 4532 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4533 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4534 } 4535 break; 4536 default: 4537 return (NXGE_ERROR); 4538 } 4539 } 4540 4541 if (my_err) { 4542 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4543 zcp_err_status); 4544 if (status != NXGE_OK) 4545 return (status); 4546 } 4547 4548 return (NXGE_OK); 4549 } 4550 4551 static nxge_status_t 4552 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4553 uint32_t zcp_status) 4554 { 4555 boolean_t rxport_fatal = B_FALSE; 4556 p_nxge_rdc_sys_stats_t statsp; 4557 nxge_status_t status = NXGE_OK; 4558 uint8_t portn; 4559 4560 portn = nxgep->mac.portnum; 4561 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4562 4563 if (ipp_status & (0x1 << portn)) { 4564 statsp->ipp_eop_err++; 4565 NXGE_FM_REPORT_ERROR(nxgep, portn, 0, 4566 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4567 rxport_fatal = B_TRUE; 4568 } 4569 4570 if (zcp_status & (0x1 << portn)) { 4571 statsp->zcp_eop_err++; 4572 NXGE_FM_REPORT_ERROR(nxgep, portn, 0, 4573 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4574 rxport_fatal = B_TRUE; 4575 } 4576 4577 if (rxport_fatal) { 4578 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4579 " nxge_rxdma_handle_port_error: " 4580 " fatal error on Port #%d\n", 4581 portn)); 4582 status = nxge_rx_port_fatal_err_recover(nxgep); 4583 if (status == NXGE_OK) { 4584 FM_SERVICE_RESTORED(nxgep); 4585 } 4586 } 4587 4588 return (status); 4589 } 4590 4591 static nxge_status_t 4592 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4593 { 4594 npi_handle_t handle; 4595 npi_status_t rs = NPI_SUCCESS; 4596 nxge_status_t status = NXGE_OK; 4597 p_rx_rbr_ring_t rbrp; 4598 p_rx_rcr_ring_t rcrp; 4599 p_rx_mbox_t mboxp; 4600 rx_dma_ent_msk_t ent_mask; 4601 p_nxge_dma_common_t dmap; 4602 uint32_t ref_cnt; 4603 p_rx_msg_t rx_msg_p; 4604 int i; 4605 uint32_t nxge_port_rcr_size; 4606 4607 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4608 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4609 "Recovering from RxDMAChannel#%d error...", channel)); 4610 4611 /* 4612 * Stop the dma channel waits for the stop done. 4613 * If the stop done bit is not set, then create 4614 * an error. 4615 */ 4616 4617 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4618 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4619 4620 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel]; 4621 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel]; 4622 4623 MUTEX_ENTER(&rbrp->lock); 4624 MUTEX_ENTER(&rbrp->post_lock); 4625 4626 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4627 4628 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4629 if (rs != NPI_SUCCESS) { 4630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4631 "nxge_disable_rxdma_channel:failed")); 4632 goto fail; 4633 } 4634 4635 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4636 4637 /* Disable interrupt */ 4638 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4639 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4640 if (rs != NPI_SUCCESS) { 4641 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4642 "nxge_rxdma_stop_channel: " 4643 "set rxdma event masks failed (channel %d)", 4644 channel)); 4645 } 4646 4647 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4648 4649 /* Reset RXDMA channel */ 4650 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4651 if (rs != NPI_SUCCESS) { 4652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4653 "nxge_rxdma_fatal_err_recover: " 4654 " reset rxdma failed (channel %d)", channel)); 4655 goto fail; 4656 } 4657 4658 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4659 4660 mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 4661 4662 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4663 rbrp->rbr_rd_index = 0; 4664 4665 rcrp->comp_rd_index = 0; 4666 rcrp->comp_wt_index = 0; 4667 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4668 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4669 #if defined(__i386) 4670 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4671 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4672 #else 4673 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4674 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4675 #endif 4676 4677 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4678 (nxge_port_rcr_size - 1); 4679 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4680 (nxge_port_rcr_size - 1); 4681 4682 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4683 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4684 4685 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4686 4687 for (i = 0; i < rbrp->rbr_max_size; i++) { 4688 rx_msg_p = rbrp->rx_msg_ring[i]; 4689 ref_cnt = rx_msg_p->ref_cnt; 4690 if (ref_cnt != 1) { 4691 if (rx_msg_p->cur_usage_cnt != 4692 rx_msg_p->max_usage_cnt) { 4693 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4694 "buf[%d]: cur_usage_cnt = %d " 4695 "max_usage_cnt = %d\n", i, 4696 rx_msg_p->cur_usage_cnt, 4697 rx_msg_p->max_usage_cnt)); 4698 } else { 4699 /* Buffer can be re-posted */ 4700 rx_msg_p->free = B_TRUE; 4701 rx_msg_p->cur_usage_cnt = 0; 4702 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4703 rx_msg_p->pkt_buf_size = 0; 4704 } 4705 } 4706 } 4707 4708 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4709 4710 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4711 if (status != NXGE_OK) { 4712 goto fail; 4713 } 4714 4715 MUTEX_EXIT(&rbrp->post_lock); 4716 MUTEX_EXIT(&rbrp->lock); 4717 4718 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4719 "Recovery Successful, RxDMAChannel#%d Restored", 4720 channel)); 4721 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4722 return (NXGE_OK); 4723 4724 fail: 4725 MUTEX_EXIT(&rbrp->post_lock); 4726 MUTEX_EXIT(&rbrp->lock); 4727 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4728 return (NXGE_ERROR | rs); 4729 } 4730 4731 nxge_status_t 4732 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4733 { 4734 nxge_grp_set_t *set = &nxgep->rx_set; 4735 nxge_status_t status = NXGE_OK; 4736 p_rx_rcr_ring_t rcrp; 4737 int rdc; 4738 4739 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4740 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4741 "Recovering from RxPort error...")); 4742 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4743 4744 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4745 goto fail; 4746 4747 NXGE_DELAY(1000); 4748 4749 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4750 4751 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4752 if ((1 << rdc) & set->owned.map) { 4753 rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc]; 4754 if (rcrp != NULL) { 4755 MUTEX_ENTER(&rcrp->lock); 4756 if (nxge_rxdma_fatal_err_recover(nxgep, 4757 rdc) != NXGE_OK) { 4758 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4759 "Could not recover " 4760 "channel %d", rdc)); 4761 } 4762 MUTEX_EXIT(&rcrp->lock); 4763 } 4764 } 4765 } 4766 4767 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4768 4769 /* Reset IPP */ 4770 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4771 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4772 "nxge_rx_port_fatal_err_recover: " 4773 "Failed to reset IPP")); 4774 goto fail; 4775 } 4776 4777 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4778 4779 /* Reset RxMAC */ 4780 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4782 "nxge_rx_port_fatal_err_recover: " 4783 "Failed to reset RxMAC")); 4784 goto fail; 4785 } 4786 4787 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4788 4789 /* Re-Initialize IPP */ 4790 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4791 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4792 "nxge_rx_port_fatal_err_recover: " 4793 "Failed to init IPP")); 4794 goto fail; 4795 } 4796 4797 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4798 4799 /* Re-Initialize RxMAC */ 4800 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4801 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4802 "nxge_rx_port_fatal_err_recover: " 4803 "Failed to reset RxMAC")); 4804 goto fail; 4805 } 4806 4807 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4808 4809 /* Re-enable RxMAC */ 4810 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4811 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4812 "nxge_rx_port_fatal_err_recover: " 4813 "Failed to enable RxMAC")); 4814 goto fail; 4815 } 4816 4817 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4818 "Recovery Successful, RxPort Restored")); 4819 4820 return (NXGE_OK); 4821 fail: 4822 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4823 return (status); 4824 } 4825 4826 void 4827 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4828 { 4829 rx_dma_ctl_stat_t cs; 4830 rx_ctl_dat_fifo_stat_t cdfs; 4831 4832 switch (err_id) { 4833 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4834 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4835 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4836 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4837 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4838 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4839 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4840 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4841 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4842 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4843 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4844 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4845 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4846 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4847 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4848 chan, &cs.value); 4849 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4850 cs.bits.hdw.rcr_ack_err = 1; 4851 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4852 cs.bits.hdw.dc_fifo_err = 1; 4853 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4854 cs.bits.hdw.rcr_sha_par = 1; 4855 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4856 cs.bits.hdw.rbr_pre_par = 1; 4857 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4858 cs.bits.hdw.rbr_tmout = 1; 4859 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4860 cs.bits.hdw.rsp_cnt_err = 1; 4861 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4862 cs.bits.hdw.byte_en_bus = 1; 4863 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4864 cs.bits.hdw.rsp_dat_err = 1; 4865 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4866 cs.bits.hdw.config_err = 1; 4867 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4868 cs.bits.hdw.rcrincon = 1; 4869 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4870 cs.bits.hdw.rcrfull = 1; 4871 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4872 cs.bits.hdw.rbrfull = 1; 4873 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4874 cs.bits.hdw.rbrlogpage = 1; 4875 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4876 cs.bits.hdw.cfiglogpage = 1; 4877 #if defined(__i386) 4878 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4879 cs.value); 4880 #else 4881 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4882 cs.value); 4883 #endif 4884 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4885 chan, cs.value); 4886 break; 4887 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4888 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4889 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4890 cdfs.value = 0; 4891 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4892 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4893 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4894 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4895 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4896 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4897 #if defined(__i386) 4898 cmn_err(CE_NOTE, 4899 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4900 cdfs.value); 4901 #else 4902 cmn_err(CE_NOTE, 4903 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4904 cdfs.value); 4905 #endif 4906 NXGE_REG_WR64(nxgep->npi_handle, 4907 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4908 break; 4909 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4910 break; 4911 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4912 break; 4913 } 4914 } 4915 4916 static void 4917 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4918 { 4919 rxring_info_t *ring_info; 4920 int index; 4921 uint32_t chunk_size; 4922 uint64_t kaddr; 4923 uint_t num_blocks; 4924 4925 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4926 4927 if (rbr_p == NULL) { 4928 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4929 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4930 return; 4931 } 4932 4933 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4934 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4935 "<== nxge_rxdma_databuf_free: DDI")); 4936 return; 4937 } 4938 4939 ring_info = rbr_p->ring_info; 4940 if (ring_info == NULL) { 4941 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4942 "==> nxge_rxdma_databuf_free: NULL ring info")); 4943 return; 4944 } 4945 num_blocks = rbr_p->num_blocks; 4946 for (index = 0; index < num_blocks; index++) { 4947 kaddr = ring_info->buffer[index].kaddr; 4948 chunk_size = ring_info->buffer[index].buf_size; 4949 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4950 "==> nxge_rxdma_databuf_free: free chunk %d " 4951 "kaddrp $%p chunk size %d", 4952 index, kaddr, chunk_size)); 4953 if (kaddr == 0) 4954 continue; 4955 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4956 ring_info->buffer[index].kaddr = 0; 4957 } 4958 4959 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4960 } 4961 4962 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4963 extern void contig_mem_free(void *, size_t); 4964 #endif 4965 4966 void 4967 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4968 { 4969 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4970 4971 if (kaddr == 0 || !buf_size) { 4972 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4973 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4974 kaddr, buf_size)); 4975 return; 4976 } 4977 4978 switch (alloc_type) { 4979 case KMEM_ALLOC: 4980 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4981 "==> nxge_free_buf: freeing kmem $%p size %d", 4982 kaddr, buf_size)); 4983 #if defined(__i386) 4984 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4985 #else 4986 KMEM_FREE((void *)kaddr, buf_size); 4987 #endif 4988 break; 4989 4990 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4991 case CONTIG_MEM_ALLOC: 4992 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4993 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4994 kaddr, buf_size)); 4995 contig_mem_free((void *)kaddr, buf_size); 4996 break; 4997 #endif 4998 4999 default: 5000 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5001 "<== nxge_free_buf: unsupported alloc type %d", 5002 alloc_type)); 5003 return; 5004 } 5005 5006 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5007 } 5008