1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * Globals: tunable parameters (/etc/system or adb) 43 * 44 */ 45 extern uint32_t nxge_rbr_size; 46 extern uint32_t nxge_rcr_size; 47 extern uint32_t nxge_rbr_spare_size; 48 49 extern uint32_t nxge_mblks_pending; 50 51 /* 52 * Tunable to reduce the amount of time spent in the 53 * ISR doing Rx Processing. 54 */ 55 extern uint32_t nxge_max_rx_pkts; 56 boolean_t nxge_jumbo_enable; 57 58 /* 59 * Tunables to manage the receive buffer blocks. 60 * 61 * nxge_rx_threshold_hi: copy all buffers. 62 * nxge_rx_bcopy_size_type: receive buffer block size type. 63 * nxge_rx_threshold_lo: copy only up to tunable block size type. 64 */ 65 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 66 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 67 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 68 69 extern uint32_t nxge_cksum_offload; 70 71 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 72 static void nxge_unmap_rxdma(p_nxge_t, int); 73 74 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 75 76 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 77 static void nxge_rxdma_hw_stop(p_nxge_t, int); 78 79 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 80 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 81 uint32_t, 82 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 83 p_rx_mbox_t *); 84 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 85 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 86 87 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 88 uint16_t, 89 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 90 p_rx_rcr_ring_t *, p_rx_mbox_t *); 91 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 92 p_rx_rcr_ring_t, p_rx_mbox_t); 93 94 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 95 uint16_t, 96 p_nxge_dma_common_t *, 97 p_rx_rbr_ring_t *, uint32_t); 98 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 99 p_rx_rbr_ring_t); 100 101 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 102 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 103 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 104 105 static mblk_t * 106 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 107 108 static void nxge_receive_packet(p_nxge_t, 109 p_rx_rcr_ring_t, 110 p_rcr_entry_t, 111 boolean_t *, 112 mblk_t **, mblk_t **); 113 114 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 115 116 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 117 static void nxge_freeb(p_rx_msg_t); 118 static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 119 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 120 121 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 122 uint32_t, uint32_t); 123 124 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 125 p_rx_rbr_ring_t); 126 127 128 static nxge_status_t 129 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 130 131 nxge_status_t 132 nxge_rx_port_fatal_err_recover(p_nxge_t); 133 134 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135 136 nxge_status_t 137 nxge_init_rxdma_channels(p_nxge_t nxgep) 138 { 139 nxge_grp_set_t *set = &nxgep->rx_set; 140 int i, count, channel; 141 nxge_grp_t *group; 142 dc_map_t map; 143 int dev_gindex; 144 145 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 146 147 if (!isLDOMguest(nxgep)) { 148 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 149 cmn_err(CE_NOTE, "hw_start_common"); 150 return (NXGE_ERROR); 151 } 152 } 153 154 /* 155 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 156 * We only have 8 hardware RDC tables, but we may have 157 * up to 16 logical (software-defined) groups of RDCS, 158 * if we make use of layer 3 & 4 hardware classification. 159 */ 160 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 161 if ((1 << i) & set->lg.map) { 162 group = set->group[i]; 163 dev_gindex = 164 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 165 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 166 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 167 if ((1 << channel) & map) { 168 if ((nxge_grp_dc_add(nxgep, 169 group, VP_BOUND_RX, channel))) 170 goto init_rxdma_channels_exit; 171 } 172 } 173 } 174 if (++count == set->lg.count) 175 break; 176 } 177 178 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 179 return (NXGE_OK); 180 181 init_rxdma_channels_exit: 182 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 183 if ((1 << i) & set->lg.map) { 184 group = set->group[i]; 185 dev_gindex = 186 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 187 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 188 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 189 if ((1 << channel) & map) { 190 nxge_grp_dc_remove(nxgep, 191 VP_BOUND_RX, channel); 192 } 193 } 194 } 195 if (++count == set->lg.count) 196 break; 197 } 198 199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 200 return (NXGE_ERROR); 201 } 202 203 nxge_status_t 204 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 205 { 206 nxge_status_t status; 207 208 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 209 210 status = nxge_map_rxdma(nxge, channel); 211 if (status != NXGE_OK) { 212 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 213 "<== nxge_init_rxdma: status 0x%x", status)); 214 return (status); 215 } 216 217 #if defined(sun4v) 218 if (isLDOMguest(nxge)) { 219 /* set rcr_ring */ 220 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 221 222 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 223 if (status != NXGE_OK) { 224 nxge_unmap_rxdma(nxge, channel); 225 return (status); 226 } 227 } 228 #endif 229 230 status = nxge_rxdma_hw_start(nxge, channel); 231 if (status != NXGE_OK) { 232 nxge_unmap_rxdma(nxge, channel); 233 } 234 235 if (!nxge->statsp->rdc_ksp[channel]) 236 nxge_setup_rdc_kstats(nxge, channel); 237 238 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 239 "<== nxge_init_rxdma_channel: status 0x%x", status)); 240 241 return (status); 242 } 243 244 void 245 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 246 { 247 nxge_grp_set_t *set = &nxgep->rx_set; 248 int rdc; 249 250 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 251 252 if (set->owned.map == 0) { 253 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 254 "nxge_uninit_rxdma_channels: no channels")); 255 return; 256 } 257 258 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 259 if ((1 << rdc) & set->owned.map) { 260 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 261 } 262 } 263 264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 265 } 266 267 void 268 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 269 { 270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 271 272 if (nxgep->statsp->rdc_ksp[channel]) { 273 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 274 nxgep->statsp->rdc_ksp[channel] = 0; 275 } 276 277 nxge_rxdma_hw_stop(nxgep, channel); 278 nxge_unmap_rxdma(nxgep, channel); 279 280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 281 } 282 283 nxge_status_t 284 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 285 { 286 npi_handle_t handle; 287 npi_status_t rs = NPI_SUCCESS; 288 nxge_status_t status = NXGE_OK; 289 290 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 291 292 handle = NXGE_DEV_NPI_HANDLE(nxgep); 293 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 294 295 if (rs != NPI_SUCCESS) { 296 status = NXGE_ERROR | rs; 297 } 298 299 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 300 301 return (status); 302 } 303 304 void 305 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 306 { 307 nxge_grp_set_t *set = &nxgep->rx_set; 308 int rdc; 309 310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 311 312 if (!isLDOMguest(nxgep)) { 313 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 314 (void) npi_rxdma_dump_fzc_regs(handle); 315 } 316 317 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 318 NXGE_DEBUG_MSG((nxgep, TX_CTL, 319 "nxge_rxdma_regs_dump_channels: " 320 "NULL ring pointer(s)")); 321 return; 322 } 323 324 if (set->owned.map == 0) { 325 NXGE_DEBUG_MSG((nxgep, RX_CTL, 326 "nxge_rxdma_regs_dump_channels: no channels")); 327 return; 328 } 329 330 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 331 if ((1 << rdc) & set->owned.map) { 332 rx_rbr_ring_t *ring = 333 nxgep->rx_rbr_rings->rbr_rings[rdc]; 334 if (ring) { 335 (void) nxge_dump_rxdma_channel(nxgep, rdc); 336 } 337 } 338 } 339 340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 341 } 342 343 nxge_status_t 344 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 345 { 346 npi_handle_t handle; 347 npi_status_t rs = NPI_SUCCESS; 348 nxge_status_t status = NXGE_OK; 349 350 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 351 352 handle = NXGE_DEV_NPI_HANDLE(nxgep); 353 rs = npi_rxdma_dump_rdc_regs(handle, channel); 354 355 if (rs != NPI_SUCCESS) { 356 status = NXGE_ERROR | rs; 357 } 358 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 359 return (status); 360 } 361 362 nxge_status_t 363 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 364 p_rx_dma_ent_msk_t mask_p) 365 { 366 npi_handle_t handle; 367 npi_status_t rs = NPI_SUCCESS; 368 nxge_status_t status = NXGE_OK; 369 370 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 371 "<== nxge_init_rxdma_channel_event_mask")); 372 373 handle = NXGE_DEV_NPI_HANDLE(nxgep); 374 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 375 if (rs != NPI_SUCCESS) { 376 status = NXGE_ERROR | rs; 377 } 378 379 return (status); 380 } 381 382 nxge_status_t 383 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 384 p_rx_dma_ctl_stat_t cs_p) 385 { 386 npi_handle_t handle; 387 npi_status_t rs = NPI_SUCCESS; 388 nxge_status_t status = NXGE_OK; 389 390 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 391 "<== nxge_init_rxdma_channel_cntl_stat")); 392 393 handle = NXGE_DEV_NPI_HANDLE(nxgep); 394 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 395 396 if (rs != NPI_SUCCESS) { 397 status = NXGE_ERROR | rs; 398 } 399 400 return (status); 401 } 402 403 /* 404 * nxge_rxdma_cfg_rdcgrp_default_rdc 405 * 406 * Set the default RDC for an RDC Group (Table) 407 * 408 * Arguments: 409 * nxgep 410 * rdcgrp The group to modify 411 * rdc The new default RDC. 412 * 413 * Notes: 414 * 415 * NPI/NXGE function calls: 416 * npi_rxdma_cfg_rdc_table_default_rdc() 417 * 418 * Registers accessed: 419 * RDC_TBL_REG: FZC_ZCP + 0x10000 420 * 421 * Context: 422 * Service domain 423 */ 424 nxge_status_t 425 nxge_rxdma_cfg_rdcgrp_default_rdc( 426 p_nxge_t nxgep, 427 uint8_t rdcgrp, 428 uint8_t rdc) 429 { 430 npi_handle_t handle; 431 npi_status_t rs = NPI_SUCCESS; 432 p_nxge_dma_pt_cfg_t p_dma_cfgp; 433 p_nxge_rdc_grp_t rdc_grp_p; 434 uint8_t actual_rdcgrp, actual_rdc; 435 436 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 437 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 438 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 439 440 handle = NXGE_DEV_NPI_HANDLE(nxgep); 441 442 /* 443 * This has to be rewritten. Do we even allow this anymore? 444 */ 445 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 446 RDC_MAP_IN(rdc_grp_p->map, rdc); 447 rdc_grp_p->def_rdc = rdc; 448 449 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 450 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 451 452 rs = npi_rxdma_cfg_rdc_table_default_rdc( 453 handle, actual_rdcgrp, actual_rdc); 454 455 if (rs != NPI_SUCCESS) { 456 return (NXGE_ERROR | rs); 457 } 458 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 459 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 460 return (NXGE_OK); 461 } 462 463 nxge_status_t 464 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 465 { 466 npi_handle_t handle; 467 468 uint8_t actual_rdc; 469 npi_status_t rs = NPI_SUCCESS; 470 471 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 472 " ==> nxge_rxdma_cfg_port_default_rdc")); 473 474 handle = NXGE_DEV_NPI_HANDLE(nxgep); 475 actual_rdc = rdc; /* XXX Hack! */ 476 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 477 478 479 if (rs != NPI_SUCCESS) { 480 return (NXGE_ERROR | rs); 481 } 482 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 483 " <== nxge_rxdma_cfg_port_default_rdc")); 484 485 return (NXGE_OK); 486 } 487 488 nxge_status_t 489 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 490 uint16_t pkts) 491 { 492 npi_status_t rs = NPI_SUCCESS; 493 npi_handle_t handle; 494 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 495 " ==> nxge_rxdma_cfg_rcr_threshold")); 496 handle = NXGE_DEV_NPI_HANDLE(nxgep); 497 498 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 499 500 if (rs != NPI_SUCCESS) { 501 return (NXGE_ERROR | rs); 502 } 503 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 504 return (NXGE_OK); 505 } 506 507 nxge_status_t 508 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 509 uint16_t tout, uint8_t enable) 510 { 511 npi_status_t rs = NPI_SUCCESS; 512 npi_handle_t handle; 513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 514 handle = NXGE_DEV_NPI_HANDLE(nxgep); 515 if (enable == 0) { 516 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 517 } else { 518 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 519 tout); 520 } 521 522 if (rs != NPI_SUCCESS) { 523 return (NXGE_ERROR | rs); 524 } 525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 526 return (NXGE_OK); 527 } 528 529 nxge_status_t 530 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 531 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 532 { 533 npi_handle_t handle; 534 rdc_desc_cfg_t rdc_desc; 535 p_rcrcfig_b_t cfgb_p; 536 npi_status_t rs = NPI_SUCCESS; 537 538 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 539 handle = NXGE_DEV_NPI_HANDLE(nxgep); 540 /* 541 * Use configuration data composed at init time. 542 * Write to hardware the receive ring configurations. 543 */ 544 rdc_desc.mbox_enable = 1; 545 rdc_desc.mbox_addr = mbox_p->mbox_addr; 546 NXGE_DEBUG_MSG((nxgep, RX_CTL, 547 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 548 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 549 550 rdc_desc.rbr_len = rbr_p->rbb_max; 551 rdc_desc.rbr_addr = rbr_p->rbr_addr; 552 553 switch (nxgep->rx_bksize_code) { 554 case RBR_BKSIZE_4K: 555 rdc_desc.page_size = SIZE_4KB; 556 break; 557 case RBR_BKSIZE_8K: 558 rdc_desc.page_size = SIZE_8KB; 559 break; 560 case RBR_BKSIZE_16K: 561 rdc_desc.page_size = SIZE_16KB; 562 break; 563 case RBR_BKSIZE_32K: 564 rdc_desc.page_size = SIZE_32KB; 565 break; 566 } 567 568 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 569 rdc_desc.valid0 = 1; 570 571 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 572 rdc_desc.valid1 = 1; 573 574 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 575 rdc_desc.valid2 = 1; 576 577 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 578 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 579 580 rdc_desc.rcr_len = rcr_p->comp_size; 581 rdc_desc.rcr_addr = rcr_p->rcr_addr; 582 583 cfgb_p = &(rcr_p->rcr_cfgb); 584 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 585 /* For now, disable this timeout in a guest domain. */ 586 if (isLDOMguest(nxgep)) { 587 rdc_desc.rcr_timeout = 0; 588 rdc_desc.rcr_timeout_enable = 0; 589 } else { 590 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 591 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 592 } 593 594 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 595 "rbr_len qlen %d pagesize code %d rcr_len %d", 596 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 597 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 598 "size 0 %d size 1 %d size 2 %d", 599 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 600 rbr_p->npi_pkt_buf_size2)); 601 602 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 603 if (rs != NPI_SUCCESS) { 604 return (NXGE_ERROR | rs); 605 } 606 607 /* 608 * Enable the timeout and threshold. 609 */ 610 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 611 rdc_desc.rcr_threshold); 612 if (rs != NPI_SUCCESS) { 613 return (NXGE_ERROR | rs); 614 } 615 616 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 617 rdc_desc.rcr_timeout); 618 if (rs != NPI_SUCCESS) { 619 return (NXGE_ERROR | rs); 620 } 621 622 /* Enable the DMA */ 623 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 624 if (rs != NPI_SUCCESS) { 625 return (NXGE_ERROR | rs); 626 } 627 628 /* Kick the DMA engine. */ 629 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 630 /* Clear the rbr empty bit */ 631 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 632 633 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 634 635 return (NXGE_OK); 636 } 637 638 nxge_status_t 639 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 640 { 641 npi_handle_t handle; 642 npi_status_t rs = NPI_SUCCESS; 643 644 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 645 handle = NXGE_DEV_NPI_HANDLE(nxgep); 646 647 /* disable the DMA */ 648 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 649 if (rs != NPI_SUCCESS) { 650 NXGE_DEBUG_MSG((nxgep, RX_CTL, 651 "<== nxge_disable_rxdma_channel:failed (0x%x)", 652 rs)); 653 return (NXGE_ERROR | rs); 654 } 655 656 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 657 return (NXGE_OK); 658 } 659 660 nxge_status_t 661 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 662 { 663 npi_handle_t handle; 664 nxge_status_t status = NXGE_OK; 665 666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 667 "<== nxge_init_rxdma_channel_rcrflush")); 668 669 handle = NXGE_DEV_NPI_HANDLE(nxgep); 670 npi_rxdma_rdc_rcr_flush(handle, channel); 671 672 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 673 "<== nxge_init_rxdma_channel_rcrflsh")); 674 return (status); 675 676 } 677 678 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 679 680 #define TO_LEFT -1 681 #define TO_RIGHT 1 682 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 683 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 684 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 685 #define NO_HINT 0xffffffff 686 687 /*ARGSUSED*/ 688 nxge_status_t 689 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 690 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 691 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 692 { 693 int bufsize; 694 uint64_t pktbuf_pp; 695 uint64_t dvma_addr; 696 rxring_info_t *ring_info; 697 int base_side, end_side; 698 int r_index, l_index, anchor_index; 699 int found, search_done; 700 uint32_t offset, chunk_size, block_size, page_size_mask; 701 uint32_t chunk_index, block_index, total_index; 702 int max_iterations, iteration; 703 rxbuf_index_info_t *bufinfo; 704 705 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 706 707 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 708 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 709 pkt_buf_addr_pp, 710 pktbufsz_type)); 711 #if defined(__i386) 712 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 713 #else 714 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 715 #endif 716 717 switch (pktbufsz_type) { 718 case 0: 719 bufsize = rbr_p->pkt_buf_size0; 720 break; 721 case 1: 722 bufsize = rbr_p->pkt_buf_size1; 723 break; 724 case 2: 725 bufsize = rbr_p->pkt_buf_size2; 726 break; 727 case RCR_SINGLE_BLOCK: 728 bufsize = 0; 729 anchor_index = 0; 730 break; 731 default: 732 return (NXGE_ERROR); 733 } 734 735 if (rbr_p->num_blocks == 1) { 736 anchor_index = 0; 737 ring_info = rbr_p->ring_info; 738 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 739 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 740 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 741 "buf_pp $%p btype %d anchor_index %d " 742 "bufinfo $%p", 743 pkt_buf_addr_pp, 744 pktbufsz_type, 745 anchor_index, 746 bufinfo)); 747 748 goto found_index; 749 } 750 751 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 752 "==> nxge_rxbuf_pp_to_vp: " 753 "buf_pp $%p btype %d anchor_index %d", 754 pkt_buf_addr_pp, 755 pktbufsz_type, 756 anchor_index)); 757 758 ring_info = rbr_p->ring_info; 759 found = B_FALSE; 760 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 761 iteration = 0; 762 max_iterations = ring_info->max_iterations; 763 /* 764 * First check if this block has been seen 765 * recently. This is indicated by a hint which 766 * is initialized when the first buffer of the block 767 * is seen. The hint is reset when the last buffer of 768 * the block has been processed. 769 * As three block sizes are supported, three hints 770 * are kept. The idea behind the hints is that once 771 * the hardware uses a block for a buffer of that 772 * size, it will use it exclusively for that size 773 * and will use it until it is exhausted. It is assumed 774 * that there would a single block being used for the same 775 * buffer sizes at any given time. 776 */ 777 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 778 anchor_index = ring_info->hint[pktbufsz_type]; 779 dvma_addr = bufinfo[anchor_index].dvma_addr; 780 chunk_size = bufinfo[anchor_index].buf_size; 781 if ((pktbuf_pp >= dvma_addr) && 782 (pktbuf_pp < (dvma_addr + chunk_size))) { 783 found = B_TRUE; 784 /* 785 * check if this is the last buffer in the block 786 * If so, then reset the hint for the size; 787 */ 788 789 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 790 ring_info->hint[pktbufsz_type] = NO_HINT; 791 } 792 } 793 794 if (found == B_FALSE) { 795 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 796 "==> nxge_rxbuf_pp_to_vp: (!found)" 797 "buf_pp $%p btype %d anchor_index %d", 798 pkt_buf_addr_pp, 799 pktbufsz_type, 800 anchor_index)); 801 802 /* 803 * This is the first buffer of the block of this 804 * size. Need to search the whole information 805 * array. 806 * the search algorithm uses a binary tree search 807 * algorithm. It assumes that the information is 808 * already sorted with increasing order 809 * info[0] < info[1] < info[2] .... < info[n-1] 810 * where n is the size of the information array 811 */ 812 r_index = rbr_p->num_blocks - 1; 813 l_index = 0; 814 search_done = B_FALSE; 815 anchor_index = MID_INDEX(r_index, l_index); 816 while (search_done == B_FALSE) { 817 if ((r_index == l_index) || 818 (iteration >= max_iterations)) 819 search_done = B_TRUE; 820 end_side = TO_RIGHT; /* to the right */ 821 base_side = TO_LEFT; /* to the left */ 822 /* read the DVMA address information and sort it */ 823 dvma_addr = bufinfo[anchor_index].dvma_addr; 824 chunk_size = bufinfo[anchor_index].buf_size; 825 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 826 "==> nxge_rxbuf_pp_to_vp: (searching)" 827 "buf_pp $%p btype %d " 828 "anchor_index %d chunk_size %d dvmaaddr $%p", 829 pkt_buf_addr_pp, 830 pktbufsz_type, 831 anchor_index, 832 chunk_size, 833 dvma_addr)); 834 835 if (pktbuf_pp >= dvma_addr) 836 base_side = TO_RIGHT; /* to the right */ 837 if (pktbuf_pp < (dvma_addr + chunk_size)) 838 end_side = TO_LEFT; /* to the left */ 839 840 switch (base_side + end_side) { 841 case IN_MIDDLE: 842 /* found */ 843 found = B_TRUE; 844 search_done = B_TRUE; 845 if ((pktbuf_pp + bufsize) < 846 (dvma_addr + chunk_size)) 847 ring_info->hint[pktbufsz_type] = 848 bufinfo[anchor_index].buf_index; 849 break; 850 case BOTH_RIGHT: 851 /* not found: go to the right */ 852 l_index = anchor_index + 1; 853 anchor_index = MID_INDEX(r_index, l_index); 854 break; 855 856 case BOTH_LEFT: 857 /* not found: go to the left */ 858 r_index = anchor_index - 1; 859 anchor_index = MID_INDEX(r_index, l_index); 860 break; 861 default: /* should not come here */ 862 return (NXGE_ERROR); 863 } 864 iteration++; 865 } 866 867 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 868 "==> nxge_rxbuf_pp_to_vp: (search done)" 869 "buf_pp $%p btype %d anchor_index %d", 870 pkt_buf_addr_pp, 871 pktbufsz_type, 872 anchor_index)); 873 } 874 875 if (found == B_FALSE) { 876 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 877 "==> nxge_rxbuf_pp_to_vp: (search failed)" 878 "buf_pp $%p btype %d anchor_index %d", 879 pkt_buf_addr_pp, 880 pktbufsz_type, 881 anchor_index)); 882 return (NXGE_ERROR); 883 } 884 885 found_index: 886 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 887 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 888 "buf_pp $%p btype %d bufsize %d anchor_index %d", 889 pkt_buf_addr_pp, 890 pktbufsz_type, 891 bufsize, 892 anchor_index)); 893 894 /* index of the first block in this chunk */ 895 chunk_index = bufinfo[anchor_index].start_index; 896 dvma_addr = bufinfo[anchor_index].dvma_addr; 897 page_size_mask = ring_info->block_size_mask; 898 899 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 900 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 901 "buf_pp $%p btype %d bufsize %d " 902 "anchor_index %d chunk_index %d dvma $%p", 903 pkt_buf_addr_pp, 904 pktbufsz_type, 905 bufsize, 906 anchor_index, 907 chunk_index, 908 dvma_addr)); 909 910 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 911 block_size = rbr_p->block_size; /* System block(page) size */ 912 913 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 914 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 915 "buf_pp $%p btype %d bufsize %d " 916 "anchor_index %d chunk_index %d dvma $%p " 917 "offset %d block_size %d", 918 pkt_buf_addr_pp, 919 pktbufsz_type, 920 bufsize, 921 anchor_index, 922 chunk_index, 923 dvma_addr, 924 offset, 925 block_size)); 926 927 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 928 929 block_index = (offset / block_size); /* index within chunk */ 930 total_index = chunk_index + block_index; 931 932 933 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 934 "==> nxge_rxbuf_pp_to_vp: " 935 "total_index %d dvma_addr $%p " 936 "offset %d block_size %d " 937 "block_index %d ", 938 total_index, dvma_addr, 939 offset, block_size, 940 block_index)); 941 #if defined(__i386) 942 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 943 (uint32_t)offset); 944 #else 945 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 946 (uint64_t)offset); 947 #endif 948 949 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 950 "==> nxge_rxbuf_pp_to_vp: " 951 "total_index %d dvma_addr $%p " 952 "offset %d block_size %d " 953 "block_index %d " 954 "*pkt_buf_addr_p $%p", 955 total_index, dvma_addr, 956 offset, block_size, 957 block_index, 958 *pkt_buf_addr_p)); 959 960 961 *msg_index = total_index; 962 *bufoffset = (offset & page_size_mask); 963 964 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 965 "==> nxge_rxbuf_pp_to_vp: get msg index: " 966 "msg_index %d bufoffset_index %d", 967 *msg_index, 968 *bufoffset)); 969 970 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 971 972 return (NXGE_OK); 973 } 974 975 /* 976 * used by quick sort (qsort) function 977 * to perform comparison 978 */ 979 static int 980 nxge_sort_compare(const void *p1, const void *p2) 981 { 982 983 rxbuf_index_info_t *a, *b; 984 985 a = (rxbuf_index_info_t *)p1; 986 b = (rxbuf_index_info_t *)p2; 987 988 if (a->dvma_addr > b->dvma_addr) 989 return (1); 990 if (a->dvma_addr < b->dvma_addr) 991 return (-1); 992 return (0); 993 } 994 995 996 997 /* 998 * grabbed this sort implementation from common/syscall/avl.c 999 * 1000 */ 1001 /* 1002 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 1003 * v = Ptr to array/vector of objs 1004 * n = # objs in the array 1005 * s = size of each obj (must be multiples of a word size) 1006 * f = ptr to function to compare two objs 1007 * returns (-1 = less than, 0 = equal, 1 = greater than 1008 */ 1009 void 1010 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1011 { 1012 int g, i, j, ii; 1013 unsigned int *p1, *p2; 1014 unsigned int tmp; 1015 1016 /* No work to do */ 1017 if (v == NULL || n <= 1) 1018 return; 1019 /* Sanity check on arguments */ 1020 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1021 ASSERT(s > 0); 1022 1023 for (g = n / 2; g > 0; g /= 2) { 1024 for (i = g; i < n; i++) { 1025 for (j = i - g; j >= 0 && 1026 (*f)(v + j * s, v + (j + g) * s) == 1; 1027 j -= g) { 1028 p1 = (unsigned *)(v + j * s); 1029 p2 = (unsigned *)(v + (j + g) * s); 1030 for (ii = 0; ii < s / 4; ii++) { 1031 tmp = *p1; 1032 *p1++ = *p2; 1033 *p2++ = tmp; 1034 } 1035 } 1036 } 1037 } 1038 } 1039 1040 /* 1041 * Initialize data structures required for rxdma 1042 * buffer dvma->vmem address lookup 1043 */ 1044 /*ARGSUSED*/ 1045 static nxge_status_t 1046 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1047 { 1048 1049 int index; 1050 rxring_info_t *ring_info; 1051 int max_iteration = 0, max_index = 0; 1052 1053 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1054 1055 ring_info = rbrp->ring_info; 1056 ring_info->hint[0] = NO_HINT; 1057 ring_info->hint[1] = NO_HINT; 1058 ring_info->hint[2] = NO_HINT; 1059 max_index = rbrp->num_blocks; 1060 1061 /* read the DVMA address information and sort it */ 1062 /* do init of the information array */ 1063 1064 1065 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1066 " nxge_rxbuf_index_info_init Sort ptrs")); 1067 1068 /* sort the array */ 1069 nxge_ksort((void *)ring_info->buffer, max_index, 1070 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1071 1072 1073 1074 for (index = 0; index < max_index; index++) { 1075 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1076 " nxge_rxbuf_index_info_init: sorted chunk %d " 1077 " ioaddr $%p kaddr $%p size %x", 1078 index, ring_info->buffer[index].dvma_addr, 1079 ring_info->buffer[index].kaddr, 1080 ring_info->buffer[index].buf_size)); 1081 } 1082 1083 max_iteration = 0; 1084 while (max_index >= (1ULL << max_iteration)) 1085 max_iteration++; 1086 ring_info->max_iterations = max_iteration + 1; 1087 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1088 " nxge_rxbuf_index_info_init Find max iter %d", 1089 ring_info->max_iterations)); 1090 1091 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1092 return (NXGE_OK); 1093 } 1094 1095 /* ARGSUSED */ 1096 void 1097 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1098 { 1099 #ifdef NXGE_DEBUG 1100 1101 uint32_t bptr; 1102 uint64_t pp; 1103 1104 bptr = entry_p->bits.hdw.pkt_buf_addr; 1105 1106 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1107 "\trcr entry $%p " 1108 "\trcr entry 0x%0llx " 1109 "\trcr entry 0x%08x " 1110 "\trcr entry 0x%08x " 1111 "\tvalue 0x%0llx\n" 1112 "\tmulti = %d\n" 1113 "\tpkt_type = 0x%x\n" 1114 "\tzero_copy = %d\n" 1115 "\tnoport = %d\n" 1116 "\tpromis = %d\n" 1117 "\terror = 0x%04x\n" 1118 "\tdcf_err = 0x%01x\n" 1119 "\tl2_len = %d\n" 1120 "\tpktbufsize = %d\n" 1121 "\tpkt_buf_addr = $%p\n" 1122 "\tpkt_buf_addr (<< 6) = $%p\n", 1123 entry_p, 1124 *(int64_t *)entry_p, 1125 *(int32_t *)entry_p, 1126 *(int32_t *)((char *)entry_p + 32), 1127 entry_p->value, 1128 entry_p->bits.hdw.multi, 1129 entry_p->bits.hdw.pkt_type, 1130 entry_p->bits.hdw.zero_copy, 1131 entry_p->bits.hdw.noport, 1132 entry_p->bits.hdw.promis, 1133 entry_p->bits.hdw.error, 1134 entry_p->bits.hdw.dcf_err, 1135 entry_p->bits.hdw.l2_len, 1136 entry_p->bits.hdw.pktbufsz, 1137 bptr, 1138 entry_p->bits.ldw.pkt_buf_addr)); 1139 1140 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1141 RCR_PKT_BUF_ADDR_SHIFT; 1142 1143 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1144 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1145 #endif 1146 } 1147 1148 void 1149 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1150 { 1151 npi_handle_t handle; 1152 rbr_stat_t rbr_stat; 1153 addr44_t hd_addr; 1154 addr44_t tail_addr; 1155 uint16_t qlen; 1156 1157 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1158 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1159 1160 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1161 1162 /* RBR head */ 1163 hd_addr.addr = 0; 1164 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1165 #if defined(__i386) 1166 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1167 (void *)(uint32_t)hd_addr.addr); 1168 #else 1169 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1170 (void *)hd_addr.addr); 1171 #endif 1172 1173 /* RBR stats */ 1174 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1175 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1176 1177 /* RCR tail */ 1178 tail_addr.addr = 0; 1179 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1180 #if defined(__i386) 1181 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1182 (void *)(uint32_t)tail_addr.addr); 1183 #else 1184 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1185 (void *)tail_addr.addr); 1186 #endif 1187 1188 /* RCR qlen */ 1189 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1190 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1191 1192 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1193 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1194 } 1195 1196 nxge_status_t 1197 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1198 { 1199 nxge_grp_set_t *set = &nxgep->rx_set; 1200 nxge_status_t status; 1201 npi_status_t rs; 1202 int rdc; 1203 1204 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1205 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1206 1207 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1208 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1209 "<== nxge_rxdma_mode: not initialized")); 1210 return (NXGE_ERROR); 1211 } 1212 1213 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1214 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1215 "<== nxge_tx_port_fatal_err_recover: " 1216 "NULL ring pointer(s)")); 1217 return (NXGE_ERROR); 1218 } 1219 1220 if (set->owned.map == 0) { 1221 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1222 "nxge_rxdma_regs_dump_channels: no channels")); 1223 return (NULL); 1224 } 1225 1226 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1227 if ((1 << rdc) & set->owned.map) { 1228 rx_rbr_ring_t *ring = 1229 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1230 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1231 if (ring) { 1232 if (enable) { 1233 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1234 "==> nxge_rxdma_hw_mode: " 1235 "channel %d (enable)", rdc)); 1236 rs = npi_rxdma_cfg_rdc_enable 1237 (handle, rdc); 1238 } else { 1239 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1240 "==> nxge_rxdma_hw_mode: " 1241 "channel %d disable)", rdc)); 1242 rs = npi_rxdma_cfg_rdc_disable 1243 (handle, rdc); 1244 } 1245 } 1246 } 1247 } 1248 1249 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1250 1251 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1252 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1253 1254 return (status); 1255 } 1256 1257 void 1258 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1259 { 1260 npi_handle_t handle; 1261 1262 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1263 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1264 1265 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1266 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1267 1268 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1269 } 1270 1271 void 1272 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1273 { 1274 npi_handle_t handle; 1275 1276 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1277 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1278 1279 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1280 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1281 1282 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1283 } 1284 1285 void 1286 nxge_hw_start_rx(p_nxge_t nxgep) 1287 { 1288 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1289 1290 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1291 (void) nxge_rx_mac_enable(nxgep); 1292 1293 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1294 } 1295 1296 /*ARGSUSED*/ 1297 void 1298 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1299 { 1300 nxge_grp_set_t *set = &nxgep->rx_set; 1301 int rdc; 1302 1303 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1304 1305 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1306 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1307 "<== nxge_tx_port_fatal_err_recover: " 1308 "NULL ring pointer(s)")); 1309 return; 1310 } 1311 1312 if (set->owned.map == 0) { 1313 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1314 "nxge_rxdma_regs_dump_channels: no channels")); 1315 return; 1316 } 1317 1318 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1319 if ((1 << rdc) & set->owned.map) { 1320 rx_rbr_ring_t *ring = 1321 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1322 if (ring) { 1323 nxge_rxdma_hw_stop(nxgep, rdc); 1324 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1325 "==> nxge_fixup_rxdma_rings: " 1326 "channel %d ring $%px", 1327 rdc, ring)); 1328 (void) nxge_rxdma_fixup_channel 1329 (nxgep, rdc, rdc); 1330 } 1331 } 1332 } 1333 1334 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1335 } 1336 1337 void 1338 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1339 { 1340 int i; 1341 1342 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1343 i = nxge_rxdma_get_ring_index(nxgep, channel); 1344 if (i < 0) { 1345 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1346 "<== nxge_rxdma_fix_channel: no entry found")); 1347 return; 1348 } 1349 1350 nxge_rxdma_fixup_channel(nxgep, channel, i); 1351 1352 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1353 } 1354 1355 void 1356 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1357 { 1358 int ndmas; 1359 p_rx_rbr_rings_t rx_rbr_rings; 1360 p_rx_rbr_ring_t *rbr_rings; 1361 p_rx_rcr_rings_t rx_rcr_rings; 1362 p_rx_rcr_ring_t *rcr_rings; 1363 p_rx_mbox_areas_t rx_mbox_areas_p; 1364 p_rx_mbox_t *rx_mbox_p; 1365 p_nxge_dma_pool_t dma_buf_poolp; 1366 p_nxge_dma_pool_t dma_cntl_poolp; 1367 p_rx_rbr_ring_t rbrp; 1368 p_rx_rcr_ring_t rcrp; 1369 p_rx_mbox_t mboxp; 1370 p_nxge_dma_common_t dmap; 1371 nxge_status_t status = NXGE_OK; 1372 1373 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1374 1375 (void) nxge_rxdma_stop_channel(nxgep, channel); 1376 1377 dma_buf_poolp = nxgep->rx_buf_pool_p; 1378 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1379 1380 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1381 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1382 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1383 return; 1384 } 1385 1386 ndmas = dma_buf_poolp->ndmas; 1387 if (!ndmas) { 1388 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1389 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1390 return; 1391 } 1392 1393 rx_rbr_rings = nxgep->rx_rbr_rings; 1394 rx_rcr_rings = nxgep->rx_rcr_rings; 1395 rbr_rings = rx_rbr_rings->rbr_rings; 1396 rcr_rings = rx_rcr_rings->rcr_rings; 1397 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1398 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1399 1400 /* Reinitialize the receive block and completion rings */ 1401 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1402 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1403 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1404 1405 1406 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1407 rbrp->rbr_rd_index = 0; 1408 rcrp->comp_rd_index = 0; 1409 rcrp->comp_wt_index = 0; 1410 1411 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1412 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1413 1414 status = nxge_rxdma_start_channel(nxgep, channel, 1415 rbrp, rcrp, mboxp); 1416 if (status != NXGE_OK) { 1417 goto nxge_rxdma_fixup_channel_fail; 1418 } 1419 if (status != NXGE_OK) { 1420 goto nxge_rxdma_fixup_channel_fail; 1421 } 1422 1423 nxge_rxdma_fixup_channel_fail: 1424 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1425 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1426 1427 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1428 } 1429 1430 /* 1431 * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 1432 * map <channel> to an index into nxgep->rx_rbr_rings. 1433 * (device ring index -> port ring index) 1434 */ 1435 int 1436 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1437 { 1438 int i, ndmas; 1439 uint16_t rdc; 1440 p_rx_rbr_rings_t rx_rbr_rings; 1441 p_rx_rbr_ring_t *rbr_rings; 1442 1443 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1444 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1445 1446 rx_rbr_rings = nxgep->rx_rbr_rings; 1447 if (rx_rbr_rings == NULL) { 1448 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1449 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1450 return (-1); 1451 } 1452 ndmas = rx_rbr_rings->ndmas; 1453 if (!ndmas) { 1454 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1455 "<== nxge_rxdma_get_ring_index: no channel")); 1456 return (-1); 1457 } 1458 1459 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1460 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1461 1462 rbr_rings = rx_rbr_rings->rbr_rings; 1463 for (i = 0; i < ndmas; i++) { 1464 rdc = rbr_rings[i]->rdc; 1465 if (channel == rdc) { 1466 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1467 "==> nxge_rxdma_get_rbr_ring: channel %d " 1468 "(index %d) ring %d", channel, i, rbr_rings[i])); 1469 return (i); 1470 } 1471 } 1472 1473 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1474 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1475 1476 return (-1); 1477 } 1478 1479 p_rx_rbr_ring_t 1480 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1481 { 1482 nxge_grp_set_t *set = &nxgep->rx_set; 1483 nxge_channel_t rdc; 1484 1485 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1486 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1487 1488 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1489 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1490 "<== nxge_rxdma_get_rbr_ring: " 1491 "NULL ring pointer(s)")); 1492 return (NULL); 1493 } 1494 1495 if (set->owned.map == 0) { 1496 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1497 "<== nxge_rxdma_get_rbr_ring: no channels")); 1498 return (NULL); 1499 } 1500 1501 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1502 if ((1 << rdc) & set->owned.map) { 1503 rx_rbr_ring_t *ring = 1504 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1505 if (ring) { 1506 if (channel == ring->rdc) { 1507 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1508 "==> nxge_rxdma_get_rbr_ring: " 1509 "channel %d ring $%p", rdc, ring)); 1510 return (ring); 1511 } 1512 } 1513 } 1514 } 1515 1516 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1517 "<== nxge_rxdma_get_rbr_ring: not found")); 1518 1519 return (NULL); 1520 } 1521 1522 p_rx_rcr_ring_t 1523 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1524 { 1525 nxge_grp_set_t *set = &nxgep->rx_set; 1526 nxge_channel_t rdc; 1527 1528 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1529 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1530 1531 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1532 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1533 "<== nxge_rxdma_get_rcr_ring: " 1534 "NULL ring pointer(s)")); 1535 return (NULL); 1536 } 1537 1538 if (set->owned.map == 0) { 1539 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1540 "<== nxge_rxdma_get_rbr_ring: no channels")); 1541 return (NULL); 1542 } 1543 1544 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1545 if ((1 << rdc) & set->owned.map) { 1546 rx_rcr_ring_t *ring = 1547 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1548 if (ring) { 1549 if (channel == ring->rdc) { 1550 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1551 "==> nxge_rxdma_get_rcr_ring: " 1552 "channel %d ring $%p", rdc, ring)); 1553 return (ring); 1554 } 1555 } 1556 } 1557 } 1558 1559 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1560 "<== nxge_rxdma_get_rcr_ring: not found")); 1561 1562 return (NULL); 1563 } 1564 1565 /* 1566 * Static functions start here. 1567 */ 1568 static p_rx_msg_t 1569 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1570 { 1571 p_rx_msg_t nxge_mp = NULL; 1572 p_nxge_dma_common_t dmamsg_p; 1573 uchar_t *buffer; 1574 1575 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1576 if (nxge_mp == NULL) { 1577 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1578 "Allocation of a rx msg failed.")); 1579 goto nxge_allocb_exit; 1580 } 1581 1582 nxge_mp->use_buf_pool = B_FALSE; 1583 if (dmabuf_p) { 1584 nxge_mp->use_buf_pool = B_TRUE; 1585 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1586 *dmamsg_p = *dmabuf_p; 1587 dmamsg_p->nblocks = 1; 1588 dmamsg_p->block_size = size; 1589 dmamsg_p->alength = size; 1590 buffer = (uchar_t *)dmabuf_p->kaddrp; 1591 1592 dmabuf_p->kaddrp = (void *) 1593 ((char *)dmabuf_p->kaddrp + size); 1594 dmabuf_p->ioaddr_pp = (void *) 1595 ((char *)dmabuf_p->ioaddr_pp + size); 1596 dmabuf_p->alength -= size; 1597 dmabuf_p->offset += size; 1598 dmabuf_p->dma_cookie.dmac_laddress += size; 1599 dmabuf_p->dma_cookie.dmac_size -= size; 1600 1601 } else { 1602 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1603 if (buffer == NULL) { 1604 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1605 "Allocation of a receive page failed.")); 1606 goto nxge_allocb_fail1; 1607 } 1608 } 1609 1610 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1611 if (nxge_mp->rx_mblk_p == NULL) { 1612 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1613 goto nxge_allocb_fail2; 1614 } 1615 1616 nxge_mp->buffer = buffer; 1617 nxge_mp->block_size = size; 1618 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1619 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1620 nxge_mp->ref_cnt = 1; 1621 nxge_mp->free = B_TRUE; 1622 nxge_mp->rx_use_bcopy = B_FALSE; 1623 1624 atomic_inc_32(&nxge_mblks_pending); 1625 1626 goto nxge_allocb_exit; 1627 1628 nxge_allocb_fail2: 1629 if (!nxge_mp->use_buf_pool) { 1630 KMEM_FREE(buffer, size); 1631 } 1632 1633 nxge_allocb_fail1: 1634 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1635 nxge_mp = NULL; 1636 1637 nxge_allocb_exit: 1638 return (nxge_mp); 1639 } 1640 1641 p_mblk_t 1642 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1643 { 1644 p_mblk_t mp; 1645 1646 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1647 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1648 "offset = 0x%08X " 1649 "size = 0x%08X", 1650 nxge_mp, offset, size)); 1651 1652 mp = desballoc(&nxge_mp->buffer[offset], size, 1653 0, &nxge_mp->freeb); 1654 if (mp == NULL) { 1655 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1656 goto nxge_dupb_exit; 1657 } 1658 atomic_inc_32(&nxge_mp->ref_cnt); 1659 1660 1661 nxge_dupb_exit: 1662 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1663 nxge_mp)); 1664 return (mp); 1665 } 1666 1667 p_mblk_t 1668 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1669 { 1670 p_mblk_t mp; 1671 uchar_t *dp; 1672 1673 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1674 if (mp == NULL) { 1675 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1676 goto nxge_dupb_bcopy_exit; 1677 } 1678 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1679 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1680 mp->b_wptr = dp + size; 1681 1682 nxge_dupb_bcopy_exit: 1683 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1684 nxge_mp)); 1685 return (mp); 1686 } 1687 1688 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1689 p_rx_msg_t rx_msg_p); 1690 1691 void 1692 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1693 { 1694 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1695 1696 /* Reuse this buffer */ 1697 rx_msg_p->free = B_FALSE; 1698 rx_msg_p->cur_usage_cnt = 0; 1699 rx_msg_p->max_usage_cnt = 0; 1700 rx_msg_p->pkt_buf_size = 0; 1701 1702 if (rx_rbr_p->rbr_use_bcopy) { 1703 rx_msg_p->rx_use_bcopy = B_FALSE; 1704 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1705 } 1706 1707 /* 1708 * Get the rbr header pointer and its offset index. 1709 */ 1710 MUTEX_ENTER(&rx_rbr_p->post_lock); 1711 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1712 rx_rbr_p->rbr_wrap_mask); 1713 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1714 MUTEX_EXIT(&rx_rbr_p->post_lock); 1715 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1716 rx_rbr_p->rdc, 1); 1717 1718 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1719 "<== nxge_post_page (channel %d post_next_index %d)", 1720 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1721 1722 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1723 } 1724 1725 void 1726 nxge_freeb(p_rx_msg_t rx_msg_p) 1727 { 1728 size_t size; 1729 uchar_t *buffer = NULL; 1730 int ref_cnt; 1731 boolean_t free_state = B_FALSE; 1732 1733 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1734 1735 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1736 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1737 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1738 rx_msg_p, nxge_mblks_pending)); 1739 1740 /* 1741 * First we need to get the free state, then 1742 * atomic decrement the reference count to prevent 1743 * the race condition with the interrupt thread that 1744 * is processing a loaned up buffer block. 1745 */ 1746 free_state = rx_msg_p->free; 1747 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1748 if (!ref_cnt) { 1749 atomic_dec_32(&nxge_mblks_pending); 1750 buffer = rx_msg_p->buffer; 1751 size = rx_msg_p->block_size; 1752 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1753 "will free: rx_msg_p = $%p (block pending %d)", 1754 rx_msg_p, nxge_mblks_pending)); 1755 1756 if (!rx_msg_p->use_buf_pool) { 1757 KMEM_FREE(buffer, size); 1758 } 1759 1760 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1761 1762 if (ring) { 1763 /* 1764 * Decrement the receive buffer ring's reference 1765 * count, too. 1766 */ 1767 atomic_dec_32(&ring->rbr_ref_cnt); 1768 1769 /* 1770 * Free the receive buffer ring, if 1771 * 1. all the receive buffers have been freed 1772 * 2. and we are in the proper state (that is, 1773 * we are not UNMAPPING). 1774 */ 1775 if (ring->rbr_ref_cnt == 0 && 1776 ring->rbr_state == RBR_UNMAPPED) { 1777 /* 1778 * Free receive data buffers, 1779 * buffer index information 1780 * (rxring_info) and 1781 * the message block ring. 1782 */ 1783 NXGE_DEBUG_MSG((NULL, RX_CTL, 1784 "nxge_freeb:rx_msg_p = $%p " 1785 "(block pending %d) free buffers", 1786 rx_msg_p, nxge_mblks_pending)); 1787 nxge_rxdma_databuf_free(ring); 1788 if (ring->ring_info) { 1789 KMEM_FREE(ring->ring_info, 1790 sizeof (rxring_info_t)); 1791 } 1792 1793 if (ring->rx_msg_ring) { 1794 KMEM_FREE(ring->rx_msg_ring, 1795 ring->tnblocks * 1796 sizeof (p_rx_msg_t)); 1797 } 1798 KMEM_FREE(ring, sizeof (*ring)); 1799 } 1800 } 1801 return; 1802 } 1803 1804 /* 1805 * Repost buffer. 1806 */ 1807 if (free_state && (ref_cnt == 1) && ring) { 1808 NXGE_DEBUG_MSG((NULL, RX_CTL, 1809 "nxge_freeb: post page $%p:", rx_msg_p)); 1810 if (ring->rbr_state == RBR_POSTING) 1811 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1812 } 1813 1814 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1815 } 1816 1817 uint_t 1818 nxge_rx_intr(void *arg1, void *arg2) 1819 { 1820 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1821 p_nxge_t nxgep = (p_nxge_t)arg2; 1822 p_nxge_ldg_t ldgp; 1823 uint8_t channel; 1824 npi_handle_t handle; 1825 rx_dma_ctl_stat_t cs; 1826 p_rx_rcr_ring_t rcr_ring; 1827 mblk_t *mp; 1828 1829 #ifdef NXGE_DEBUG 1830 rxdma_cfig1_t cfg; 1831 #endif 1832 1833 if (ldvp == NULL) { 1834 NXGE_DEBUG_MSG((NULL, INT_CTL, 1835 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1836 nxgep, ldvp)); 1837 1838 return (DDI_INTR_CLAIMED); 1839 } 1840 1841 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1842 nxgep = ldvp->nxgep; 1843 } 1844 1845 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1846 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1847 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1848 "<== nxge_rx_intr: interface not started or intialized")); 1849 return (DDI_INTR_CLAIMED); 1850 } 1851 1852 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1853 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1854 nxgep, ldvp)); 1855 1856 /* 1857 * This interrupt handler is for a specific 1858 * receive dma channel. 1859 */ 1860 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1861 1862 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1863 1864 /* 1865 * The RCR ring lock must be held when packets 1866 * are being processed and the hardware registers are 1867 * being read or written to prevent race condition 1868 * among the interrupt thread, the polling thread 1869 * (will cause fatal errors such as rcrincon bit set) 1870 * and the setting of the poll_flag. 1871 */ 1872 MUTEX_ENTER(&rcr_ring->lock); 1873 1874 /* 1875 * Get the control and status for this channel. 1876 */ 1877 channel = ldvp->channel; 1878 ldgp = ldvp->ldgp; 1879 1880 if (!isLDOMguest(nxgep)) { 1881 if (!nxgep->rx_channel_started[channel]) { 1882 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1883 "<== nxge_rx_intr: channel is not started")); 1884 MUTEX_EXIT(&rcr_ring->lock); 1885 return (DDI_INTR_CLAIMED); 1886 } 1887 } 1888 1889 ASSERT(rcr_ring->ldgp == ldgp); 1890 ASSERT(rcr_ring->ldvp == ldvp); 1891 1892 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1893 1894 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1895 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1896 channel, 1897 cs.value, 1898 cs.bits.hdw.rcrto, 1899 cs.bits.hdw.rcrthres)); 1900 1901 mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1902 1903 /* error events. */ 1904 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1905 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1906 } 1907 1908 /* 1909 * Enable the mailbox update interrupt if we want 1910 * to use mailbox. We probably don't need to use 1911 * mailbox as it only saves us one pio read. 1912 * Also write 1 to rcrthres and rcrto to clear 1913 * these two edge triggered bits. 1914 */ 1915 cs.value &= RX_DMA_CTL_STAT_WR1C; 1916 cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 1917 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1918 cs.value); 1919 1920 /* 1921 * If the polling mode is enabled, disable the interrupt. 1922 */ 1923 if (rcr_ring->poll_flag) { 1924 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1925 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1926 "(disabling interrupts)", channel, ldgp, ldvp)); 1927 /* 1928 * Disarm this logical group if this is a single device 1929 * group. 1930 */ 1931 if (ldgp->nldvs == 1) { 1932 ldgimgm_t mgm; 1933 mgm.value = 0; 1934 mgm.bits.ldw.arm = 0; 1935 NXGE_REG_WR64(handle, 1936 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1937 } 1938 } else { 1939 /* 1940 * Rearm this logical group if this is a single device 1941 * group. 1942 */ 1943 if (ldgp->nldvs == 1) { 1944 if (isLDOMguest(nxgep)) { 1945 nxge_hio_ldgimgn(nxgep, ldgp); 1946 } else { 1947 ldgimgm_t mgm; 1948 1949 mgm.value = 0; 1950 mgm.bits.ldw.arm = 1; 1951 mgm.bits.ldw.timer = ldgp->ldg_timer; 1952 1953 NXGE_REG_WR64(handle, 1954 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1955 mgm.value); 1956 } 1957 } 1958 1959 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1960 "==> nxge_rx_intr: rdc %d ldgp $%p " 1961 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1962 } 1963 MUTEX_EXIT(&rcr_ring->lock); 1964 1965 if (mp) { 1966 if (!isLDOMguest(nxgep)) 1967 mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1968 rcr_ring->rcr_gen_num); 1969 #if defined(sun4v) 1970 else { /* isLDOMguest(nxgep) */ 1971 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1972 nxgep->nxge_hw_p->hio; 1973 nx_vio_fp_t *vio = &nhd->hio.vio; 1974 1975 if (vio->cb.vio_net_rx_cb) { 1976 (*vio->cb.vio_net_rx_cb) 1977 (nxgep->hio_vr->vhp, mp); 1978 } 1979 } 1980 #endif 1981 } 1982 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1983 return (DDI_INTR_CLAIMED); 1984 } 1985 1986 /* 1987 * Process the packets received in the specified logical device 1988 * and pass up a chain of message blocks to the upper layer. 1989 * The RCR ring lock must be held before calling this function. 1990 */ 1991 static mblk_t * 1992 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 1993 { 1994 p_mblk_t mp; 1995 p_rx_rcr_ring_t rcrp; 1996 1997 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1998 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 1999 2000 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2001 "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 2002 "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 2003 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 2004 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2005 "<== nxge_rx_pkts_vring: no mp")); 2006 return (NULL); 2007 } 2008 2009 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 2010 mp)); 2011 2012 #ifdef NXGE_DEBUG 2013 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2014 "==> nxge_rx_pkts_vring:calling mac_rx " 2015 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 2016 "mac_handle $%p", 2017 mp->b_wptr - mp->b_rptr, 2018 mp, mp->b_cont, mp->b_next, 2019 rcrp, rcrp->rcr_mac_handle)); 2020 2021 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2022 "==> nxge_rx_pkts_vring: dump packets " 2023 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 2024 mp, 2025 mp->b_rptr, 2026 mp->b_wptr, 2027 nxge_dump_packet((char *)mp->b_rptr, 2028 mp->b_wptr - mp->b_rptr))); 2029 if (mp->b_cont) { 2030 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2031 "==> nxge_rx_pkts_vring: dump b_cont packets " 2032 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 2033 mp->b_cont, 2034 mp->b_cont->b_rptr, 2035 mp->b_cont->b_wptr, 2036 nxge_dump_packet((char *)mp->b_cont->b_rptr, 2037 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 2038 } 2039 if (mp->b_next) { 2040 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2041 "==> nxge_rx_pkts_vring: dump next packets " 2042 "(b_rptr $%p): %s", 2043 mp->b_next->b_rptr, 2044 nxge_dump_packet((char *)mp->b_next->b_rptr, 2045 mp->b_next->b_wptr - mp->b_next->b_rptr))); 2046 } 2047 #endif 2048 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2049 "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 2050 rcrp->rdc, rcrp->rcr_mac_handle)); 2051 2052 return (mp); 2053 } 2054 2055 2056 /* 2057 * This routine is the main packet receive processing function. 2058 * It gets the packet type, error code, and buffer related 2059 * information from the receive completion entry. 2060 * How many completion entries to process is based on the number of packets 2061 * queued by the hardware, a hardware maintained tail pointer 2062 * and a configurable receive packet count. 2063 * 2064 * A chain of message blocks will be created as result of processing 2065 * the completion entries. This chain of message blocks will be returned and 2066 * a hardware control status register will be updated with the number of 2067 * packets were removed from the hardware queue. 2068 * 2069 * The RCR ring lock is held when entering this function. 2070 */ 2071 static mblk_t * 2072 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 2073 int bytes_to_pickup) 2074 { 2075 npi_handle_t handle; 2076 uint8_t channel; 2077 uint32_t comp_rd_index; 2078 p_rcr_entry_t rcr_desc_rd_head_p; 2079 p_rcr_entry_t rcr_desc_rd_head_pp; 2080 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 2081 uint16_t qlen, nrcr_read, npkt_read; 2082 uint32_t qlen_hw; 2083 boolean_t multi; 2084 rcrcfig_b_t rcr_cfg_b; 2085 int totallen = 0; 2086 #if defined(_BIG_ENDIAN) 2087 npi_status_t rs = NPI_SUCCESS; 2088 #endif 2089 2090 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 2091 "channel %d", rcr_p->rdc)); 2092 2093 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 2094 return (NULL); 2095 } 2096 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2097 channel = rcr_p->rdc; 2098 2099 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2100 "==> nxge_rx_pkts: START: rcr channel %d " 2101 "head_p $%p head_pp $%p index %d ", 2102 channel, rcr_p->rcr_desc_rd_head_p, 2103 rcr_p->rcr_desc_rd_head_pp, 2104 rcr_p->comp_rd_index)); 2105 2106 2107 #if !defined(_BIG_ENDIAN) 2108 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2109 #else 2110 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2111 if (rs != NPI_SUCCESS) { 2112 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2113 "channel %d, get qlen failed 0x%08x", 2114 channel, rs)); 2115 return (NULL); 2116 } 2117 #endif 2118 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2119 "qlen %d", channel, qlen)); 2120 2121 2122 2123 if (!qlen) { 2124 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2125 "==> nxge_rx_pkts:rcr channel %d " 2126 "qlen %d (no pkts)", channel, qlen)); 2127 2128 return (NULL); 2129 } 2130 2131 comp_rd_index = rcr_p->comp_rd_index; 2132 2133 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2134 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2135 nrcr_read = npkt_read = 0; 2136 2137 /* 2138 * Number of packets queued 2139 * (The jumbo or multi packet will be counted as only one 2140 * packets and it may take up more than one completion entry). 2141 */ 2142 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2143 qlen : nxge_max_rx_pkts; 2144 head_mp = NULL; 2145 tail_mp = &head_mp; 2146 nmp = mp_cont = NULL; 2147 multi = B_FALSE; 2148 2149 while (qlen_hw) { 2150 2151 #ifdef NXGE_DEBUG 2152 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2153 #endif 2154 /* 2155 * Process one completion ring entry. 2156 */ 2157 nxge_receive_packet(nxgep, 2158 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2159 2160 /* 2161 * message chaining modes 2162 */ 2163 if (nmp) { 2164 nmp->b_next = NULL; 2165 if (!multi && !mp_cont) { /* frame fits a partition */ 2166 *tail_mp = nmp; 2167 tail_mp = &nmp->b_next; 2168 totallen += MBLKL(nmp); 2169 nmp = NULL; 2170 } else if (multi && !mp_cont) { /* first segment */ 2171 *tail_mp = nmp; 2172 tail_mp = &nmp->b_cont; 2173 totallen += MBLKL(nmp); 2174 } else if (multi && mp_cont) { /* mid of multi segs */ 2175 *tail_mp = mp_cont; 2176 tail_mp = &mp_cont->b_cont; 2177 totallen += MBLKL(mp_cont); 2178 } else if (!multi && mp_cont) { /* last segment */ 2179 *tail_mp = mp_cont; 2180 tail_mp = &nmp->b_next; 2181 totallen += MBLKL(mp_cont); 2182 nmp = NULL; 2183 } 2184 } 2185 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2186 "==> nxge_rx_pkts: loop: rcr channel %d " 2187 "before updating: multi %d " 2188 "nrcr_read %d " 2189 "npk read %d " 2190 "head_pp $%p index %d ", 2191 channel, 2192 multi, 2193 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2194 comp_rd_index)); 2195 2196 if (!multi) { 2197 qlen_hw--; 2198 npkt_read++; 2199 } 2200 2201 /* 2202 * Update the next read entry. 2203 */ 2204 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2205 rcr_p->comp_wrap_mask); 2206 2207 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2208 rcr_p->rcr_desc_first_p, 2209 rcr_p->rcr_desc_last_p); 2210 2211 nrcr_read++; 2212 2213 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2214 "<== nxge_rx_pkts: (SAM, process one packet) " 2215 "nrcr_read %d", 2216 nrcr_read)); 2217 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2218 "==> nxge_rx_pkts: loop: rcr channel %d " 2219 "multi %d " 2220 "nrcr_read %d " 2221 "npk read %d " 2222 "head_pp $%p index %d ", 2223 channel, 2224 multi, 2225 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2226 comp_rd_index)); 2227 2228 if ((bytes_to_pickup != -1) && 2229 (totallen >= bytes_to_pickup)) { 2230 break; 2231 } 2232 } 2233 2234 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2235 rcr_p->comp_rd_index = comp_rd_index; 2236 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2237 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2238 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2239 2240 rcr_p->intr_timeout = (nxgep->intr_timeout < 2241 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2242 nxgep->intr_timeout; 2243 2244 rcr_p->intr_threshold = (nxgep->intr_threshold < 2245 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2246 nxgep->intr_threshold; 2247 2248 rcr_cfg_b.value = 0x0ULL; 2249 rcr_cfg_b.bits.ldw.entout = 1; 2250 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2251 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2252 2253 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2254 channel, rcr_cfg_b.value); 2255 } 2256 2257 cs.bits.ldw.pktread = npkt_read; 2258 cs.bits.ldw.ptrread = nrcr_read; 2259 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2260 channel, cs.value); 2261 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2262 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2263 "head_pp $%p index %016llx ", 2264 channel, 2265 rcr_p->rcr_desc_rd_head_pp, 2266 rcr_p->comp_rd_index)); 2267 /* 2268 * Update RCR buffer pointer read and number of packets 2269 * read. 2270 */ 2271 2272 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2273 "channel %d", rcr_p->rdc)); 2274 2275 return (head_mp); 2276 } 2277 2278 void 2279 nxge_receive_packet(p_nxge_t nxgep, 2280 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2281 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2282 { 2283 p_mblk_t nmp = NULL; 2284 uint64_t multi; 2285 uint64_t dcf_err; 2286 uint8_t channel; 2287 2288 boolean_t first_entry = B_TRUE; 2289 boolean_t is_tcp_udp = B_FALSE; 2290 boolean_t buffer_free = B_FALSE; 2291 boolean_t error_send_up = B_FALSE; 2292 uint8_t error_type; 2293 uint16_t l2_len; 2294 uint16_t skip_len; 2295 uint8_t pktbufsz_type; 2296 uint64_t rcr_entry; 2297 uint64_t *pkt_buf_addr_pp; 2298 uint64_t *pkt_buf_addr_p; 2299 uint32_t buf_offset; 2300 uint32_t bsize; 2301 uint32_t error_disp_cnt; 2302 uint32_t msg_index; 2303 p_rx_rbr_ring_t rx_rbr_p; 2304 p_rx_msg_t *rx_msg_ring_p; 2305 p_rx_msg_t rx_msg_p; 2306 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2307 nxge_status_t status = NXGE_OK; 2308 boolean_t is_valid = B_FALSE; 2309 p_nxge_rx_ring_stats_t rdc_stats; 2310 uint32_t bytes_read; 2311 uint64_t pkt_type; 2312 uint64_t frag; 2313 boolean_t pkt_too_long_err = B_FALSE; 2314 #ifdef NXGE_DEBUG 2315 int dump_len; 2316 #endif 2317 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2318 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2319 2320 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2321 2322 multi = (rcr_entry & RCR_MULTI_MASK); 2323 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2324 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2325 2326 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2327 frag = (rcr_entry & RCR_FRAG_MASK); 2328 2329 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2330 2331 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2332 RCR_PKTBUFSZ_SHIFT); 2333 #if defined(__i386) 2334 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2335 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2336 #else 2337 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2338 RCR_PKT_BUF_ADDR_SHIFT); 2339 #endif 2340 2341 channel = rcr_p->rdc; 2342 2343 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2344 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2345 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2346 "error_type 0x%x pkt_type 0x%x " 2347 "pktbufsz_type %d ", 2348 rcr_desc_rd_head_p, 2349 rcr_entry, pkt_buf_addr_pp, l2_len, 2350 multi, 2351 error_type, 2352 pkt_type, 2353 pktbufsz_type)); 2354 2355 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2356 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2357 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2358 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2359 rcr_entry, pkt_buf_addr_pp, l2_len, 2360 multi, 2361 error_type, 2362 pkt_type)); 2363 2364 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2365 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2366 "full pkt_buf_addr_pp $%p l2_len %d", 2367 rcr_entry, pkt_buf_addr_pp, l2_len)); 2368 2369 /* get the stats ptr */ 2370 rdc_stats = rcr_p->rdc_stats; 2371 2372 if (!l2_len) { 2373 2374 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2375 "<== nxge_receive_packet: failed: l2 length is 0.")); 2376 return; 2377 } 2378 2379 /* 2380 * Software workaround for BMAC hardware limitation that allows 2381 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2382 * instead of 0x2400 for jumbo. 2383 */ 2384 if (l2_len > nxgep->mac.maxframesize) { 2385 pkt_too_long_err = B_TRUE; 2386 } 2387 2388 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2389 l2_len -= ETHERFCSL; 2390 2391 /* shift 6 bits to get the full io address */ 2392 #if defined(__i386) 2393 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2394 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2395 #else 2396 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2397 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2398 #endif 2399 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2400 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2401 "full pkt_buf_addr_pp $%p l2_len %d", 2402 rcr_entry, pkt_buf_addr_pp, l2_len)); 2403 2404 rx_rbr_p = rcr_p->rx_rbr_p; 2405 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2406 2407 if (first_entry) { 2408 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2409 RXDMA_HDR_SIZE_DEFAULT); 2410 2411 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2412 "==> nxge_receive_packet: first entry 0x%016llx " 2413 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2414 rcr_entry, pkt_buf_addr_pp, l2_len, 2415 hdr_size)); 2416 } 2417 2418 MUTEX_ENTER(&rx_rbr_p->lock); 2419 2420 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2421 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2422 "full pkt_buf_addr_pp $%p l2_len %d", 2423 rcr_entry, pkt_buf_addr_pp, l2_len)); 2424 2425 /* 2426 * Packet buffer address in the completion entry points 2427 * to the starting buffer address (offset 0). 2428 * Use the starting buffer address to locate the corresponding 2429 * kernel address. 2430 */ 2431 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2432 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2433 &buf_offset, 2434 &msg_index); 2435 2436 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2437 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2438 "full pkt_buf_addr_pp $%p l2_len %d", 2439 rcr_entry, pkt_buf_addr_pp, l2_len)); 2440 2441 if (status != NXGE_OK) { 2442 MUTEX_EXIT(&rx_rbr_p->lock); 2443 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2444 "<== nxge_receive_packet: found vaddr failed %d", 2445 status)); 2446 return; 2447 } 2448 2449 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2450 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2451 "full pkt_buf_addr_pp $%p l2_len %d", 2452 rcr_entry, pkt_buf_addr_pp, l2_len)); 2453 2454 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2455 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2456 "full pkt_buf_addr_pp $%p l2_len %d", 2457 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2458 2459 rx_msg_p = rx_msg_ring_p[msg_index]; 2460 2461 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2462 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2463 "full pkt_buf_addr_pp $%p l2_len %d", 2464 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2465 2466 switch (pktbufsz_type) { 2467 case RCR_PKTBUFSZ_0: 2468 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2469 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2470 "==> nxge_receive_packet: 0 buf %d", bsize)); 2471 break; 2472 case RCR_PKTBUFSZ_1: 2473 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2474 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2475 "==> nxge_receive_packet: 1 buf %d", bsize)); 2476 break; 2477 case RCR_PKTBUFSZ_2: 2478 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2479 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2480 "==> nxge_receive_packet: 2 buf %d", bsize)); 2481 break; 2482 case RCR_SINGLE_BLOCK: 2483 bsize = rx_msg_p->block_size; 2484 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2485 "==> nxge_receive_packet: single %d", bsize)); 2486 2487 break; 2488 default: 2489 MUTEX_EXIT(&rx_rbr_p->lock); 2490 return; 2491 } 2492 2493 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2494 (buf_offset + sw_offset_bytes), 2495 (hdr_size + l2_len), 2496 DDI_DMA_SYNC_FORCPU); 2497 2498 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2499 "==> nxge_receive_packet: after first dump:usage count")); 2500 2501 if (rx_msg_p->cur_usage_cnt == 0) { 2502 if (rx_rbr_p->rbr_use_bcopy) { 2503 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2504 if (rx_rbr_p->rbr_consumed < 2505 rx_rbr_p->rbr_threshold_hi) { 2506 if (rx_rbr_p->rbr_threshold_lo == 0 || 2507 ((rx_rbr_p->rbr_consumed >= 2508 rx_rbr_p->rbr_threshold_lo) && 2509 (rx_rbr_p->rbr_bufsize_type >= 2510 pktbufsz_type))) { 2511 rx_msg_p->rx_use_bcopy = B_TRUE; 2512 } 2513 } else { 2514 rx_msg_p->rx_use_bcopy = B_TRUE; 2515 } 2516 } 2517 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2518 "==> nxge_receive_packet: buf %d (new block) ", 2519 bsize)); 2520 2521 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2522 rx_msg_p->pkt_buf_size = bsize; 2523 rx_msg_p->cur_usage_cnt = 1; 2524 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2525 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2526 "==> nxge_receive_packet: buf %d " 2527 "(single block) ", 2528 bsize)); 2529 /* 2530 * Buffer can be reused once the free function 2531 * is called. 2532 */ 2533 rx_msg_p->max_usage_cnt = 1; 2534 buffer_free = B_TRUE; 2535 } else { 2536 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2537 if (rx_msg_p->max_usage_cnt == 1) { 2538 buffer_free = B_TRUE; 2539 } 2540 } 2541 } else { 2542 rx_msg_p->cur_usage_cnt++; 2543 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2544 buffer_free = B_TRUE; 2545 } 2546 } 2547 2548 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2549 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2550 msg_index, l2_len, 2551 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2552 2553 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2554 rdc_stats->ierrors++; 2555 if (dcf_err) { 2556 rdc_stats->dcf_err++; 2557 #ifdef NXGE_DEBUG 2558 if (!rdc_stats->dcf_err) { 2559 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2560 "nxge_receive_packet: channel %d dcf_err rcr" 2561 " 0x%llx", channel, rcr_entry)); 2562 } 2563 #endif 2564 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2565 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2566 } else if (pkt_too_long_err) { 2567 rdc_stats->pkt_too_long_err++; 2568 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2569 " channel %d packet length [%d] > " 2570 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2571 nxgep->mac.maxframesize)); 2572 } else { 2573 /* Update error stats */ 2574 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2575 rdc_stats->errlog.compl_err_type = error_type; 2576 2577 switch (error_type) { 2578 /* 2579 * Do not send FMA ereport for RCR_L2_ERROR and 2580 * RCR_L4_CSUM_ERROR because most likely they indicate 2581 * back pressure rather than HW failures. 2582 */ 2583 case RCR_L2_ERROR: 2584 rdc_stats->l2_err++; 2585 if (rdc_stats->l2_err < 2586 error_disp_cnt) { 2587 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2588 " nxge_receive_packet:" 2589 " channel %d RCR L2_ERROR", 2590 channel)); 2591 } 2592 break; 2593 case RCR_L4_CSUM_ERROR: 2594 error_send_up = B_TRUE; 2595 rdc_stats->l4_cksum_err++; 2596 if (rdc_stats->l4_cksum_err < 2597 error_disp_cnt) { 2598 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2599 " nxge_receive_packet:" 2600 " channel %d" 2601 " RCR L4_CSUM_ERROR", channel)); 2602 } 2603 break; 2604 /* 2605 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2606 * RCR_ZCP_SOFT_ERROR because they reflect the same 2607 * FFLP and ZCP errors that have been reported by 2608 * nxge_fflp.c and nxge_zcp.c. 2609 */ 2610 case RCR_FFLP_SOFT_ERROR: 2611 error_send_up = B_TRUE; 2612 rdc_stats->fflp_soft_err++; 2613 if (rdc_stats->fflp_soft_err < 2614 error_disp_cnt) { 2615 NXGE_ERROR_MSG((nxgep, 2616 NXGE_ERR_CTL, 2617 " nxge_receive_packet:" 2618 " channel %d" 2619 " RCR FFLP_SOFT_ERROR", channel)); 2620 } 2621 break; 2622 case RCR_ZCP_SOFT_ERROR: 2623 error_send_up = B_TRUE; 2624 rdc_stats->fflp_soft_err++; 2625 if (rdc_stats->zcp_soft_err < 2626 error_disp_cnt) 2627 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2628 " nxge_receive_packet: Channel %d" 2629 " RCR ZCP_SOFT_ERROR", channel)); 2630 break; 2631 default: 2632 rdc_stats->rcr_unknown_err++; 2633 if (rdc_stats->rcr_unknown_err 2634 < error_disp_cnt) { 2635 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2636 " nxge_receive_packet: Channel %d" 2637 " RCR entry 0x%llx error 0x%x", 2638 rcr_entry, channel, error_type)); 2639 } 2640 break; 2641 } 2642 } 2643 2644 /* 2645 * Update and repost buffer block if max usage 2646 * count is reached. 2647 */ 2648 if (error_send_up == B_FALSE) { 2649 atomic_inc_32(&rx_msg_p->ref_cnt); 2650 if (buffer_free == B_TRUE) { 2651 rx_msg_p->free = B_TRUE; 2652 } 2653 2654 MUTEX_EXIT(&rx_rbr_p->lock); 2655 nxge_freeb(rx_msg_p); 2656 return; 2657 } 2658 } 2659 2660 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2661 "==> nxge_receive_packet: DMA sync second ")); 2662 2663 bytes_read = rcr_p->rcvd_pkt_bytes; 2664 skip_len = sw_offset_bytes + hdr_size; 2665 if (!rx_msg_p->rx_use_bcopy) { 2666 /* 2667 * For loaned up buffers, the driver reference count 2668 * will be incremented first and then the free state. 2669 */ 2670 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2671 if (first_entry) { 2672 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2673 if (l2_len < bsize - skip_len) { 2674 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2675 } else { 2676 nmp->b_wptr = &nmp->b_rptr[bsize 2677 - skip_len]; 2678 } 2679 } else { 2680 if (l2_len - bytes_read < bsize) { 2681 nmp->b_wptr = 2682 &nmp->b_rptr[l2_len - bytes_read]; 2683 } else { 2684 nmp->b_wptr = &nmp->b_rptr[bsize]; 2685 } 2686 } 2687 } 2688 } else { 2689 if (first_entry) { 2690 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2691 l2_len < bsize - skip_len ? 2692 l2_len : bsize - skip_len); 2693 } else { 2694 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2695 l2_len - bytes_read < bsize ? 2696 l2_len - bytes_read : bsize); 2697 } 2698 } 2699 if (nmp != NULL) { 2700 if (first_entry) { 2701 /* 2702 * Jumbo packets may be received with more than one 2703 * buffer, increment ipackets for the first entry only. 2704 */ 2705 rdc_stats->ipackets++; 2706 2707 /* Update ibytes for kstat. */ 2708 rdc_stats->ibytes += skip_len 2709 + l2_len < bsize ? l2_len : bsize; 2710 /* 2711 * Update the number of bytes read so far for the 2712 * current frame. 2713 */ 2714 bytes_read = nmp->b_wptr - nmp->b_rptr; 2715 } else { 2716 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2717 l2_len - bytes_read : bsize; 2718 bytes_read += nmp->b_wptr - nmp->b_rptr; 2719 } 2720 2721 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2722 "==> nxge_receive_packet after dupb: " 2723 "rbr consumed %d " 2724 "pktbufsz_type %d " 2725 "nmp $%p rptr $%p wptr $%p " 2726 "buf_offset %d bzise %d l2_len %d skip_len %d", 2727 rx_rbr_p->rbr_consumed, 2728 pktbufsz_type, 2729 nmp, nmp->b_rptr, nmp->b_wptr, 2730 buf_offset, bsize, l2_len, skip_len)); 2731 } else { 2732 cmn_err(CE_WARN, "!nxge_receive_packet: " 2733 "update stats (error)"); 2734 atomic_inc_32(&rx_msg_p->ref_cnt); 2735 if (buffer_free == B_TRUE) { 2736 rx_msg_p->free = B_TRUE; 2737 } 2738 MUTEX_EXIT(&rx_rbr_p->lock); 2739 nxge_freeb(rx_msg_p); 2740 return; 2741 } 2742 2743 if (buffer_free == B_TRUE) { 2744 rx_msg_p->free = B_TRUE; 2745 } 2746 2747 is_valid = (nmp != NULL); 2748 2749 rcr_p->rcvd_pkt_bytes = bytes_read; 2750 2751 MUTEX_EXIT(&rx_rbr_p->lock); 2752 2753 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2754 atomic_inc_32(&rx_msg_p->ref_cnt); 2755 nxge_freeb(rx_msg_p); 2756 } 2757 2758 if (is_valid) { 2759 nmp->b_cont = NULL; 2760 if (first_entry) { 2761 *mp = nmp; 2762 *mp_cont = NULL; 2763 } else { 2764 *mp_cont = nmp; 2765 } 2766 } 2767 2768 /* 2769 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2770 * If a packet is not fragmented and no error bit is set, then 2771 * L4 checksum is OK. 2772 */ 2773 2774 if (is_valid && !multi) { 2775 /* 2776 * If the checksum flag nxge_chksum_offload 2777 * is 1, TCP and UDP packets can be sent 2778 * up with good checksum. If the checksum flag 2779 * is set to 0, checksum reporting will apply to 2780 * TCP packets only (workaround for a hardware bug). 2781 * If the checksum flag nxge_cksum_offload is 2782 * greater than 1, both TCP and UDP packets 2783 * will not be reported its hardware checksum results. 2784 */ 2785 if (nxge_cksum_offload == 1) { 2786 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2787 pkt_type == RCR_PKT_IS_UDP) ? 2788 B_TRUE: B_FALSE); 2789 } else if (!nxge_cksum_offload) { 2790 /* TCP checksum only. */ 2791 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2792 B_TRUE: B_FALSE); 2793 } 2794 2795 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2796 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2797 is_valid, multi, is_tcp_udp, frag, error_type)); 2798 2799 if (is_tcp_udp && !frag && !error_type) { 2800 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2801 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2802 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2803 "==> nxge_receive_packet: Full tcp/udp cksum " 2804 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2805 "error %d", 2806 is_valid, multi, is_tcp_udp, frag, error_type)); 2807 } 2808 } 2809 2810 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2811 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2812 2813 *multi_p = (multi == RCR_MULTI_MASK); 2814 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2815 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2816 *multi_p, nmp, *mp, *mp_cont)); 2817 } 2818 2819 /* 2820 * Enable polling for a ring. Interrupt for the ring is disabled when 2821 * the nxge interrupt comes (see nxge_rx_intr). 2822 */ 2823 int 2824 nxge_enable_poll(void *arg) 2825 { 2826 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2827 p_rx_rcr_ring_t ringp; 2828 p_nxge_t nxgep; 2829 p_nxge_ldg_t ldgp; 2830 uint32_t channel; 2831 2832 if (ring_handle == NULL) { 2833 return (0); 2834 } 2835 2836 nxgep = ring_handle->nxgep; 2837 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2838 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2839 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2840 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2841 ldgp = ringp->ldgp; 2842 if (ldgp == NULL) { 2843 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2844 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2845 ringp->rdc)); 2846 return (0); 2847 } 2848 2849 MUTEX_ENTER(&ringp->lock); 2850 /* enable polling */ 2851 if (ringp->poll_flag == 0) { 2852 ringp->poll_flag = 1; 2853 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2854 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2855 ringp->rdc)); 2856 } 2857 2858 MUTEX_EXIT(&ringp->lock); 2859 return (0); 2860 } 2861 /* 2862 * Disable polling for a ring and enable its interrupt. 2863 */ 2864 int 2865 nxge_disable_poll(void *arg) 2866 { 2867 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2868 p_rx_rcr_ring_t ringp; 2869 p_nxge_t nxgep; 2870 uint32_t channel; 2871 2872 if (ring_handle == NULL) { 2873 return (0); 2874 } 2875 2876 nxgep = ring_handle->nxgep; 2877 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2878 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2879 2880 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2881 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2882 2883 MUTEX_ENTER(&ringp->lock); 2884 2885 /* disable polling: enable interrupt */ 2886 if (ringp->poll_flag) { 2887 npi_handle_t handle; 2888 rx_dma_ctl_stat_t cs; 2889 uint8_t channel; 2890 p_nxge_ldg_t ldgp; 2891 2892 /* 2893 * Get the control and status for this channel. 2894 */ 2895 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2896 channel = ringp->rdc; 2897 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2898 channel, &cs.value); 2899 2900 /* 2901 * Enable mailbox update 2902 * Since packets were not read and the hardware uses 2903 * bits pktread and ptrread to update the queue 2904 * length, we need to set both bits to 0. 2905 */ 2906 cs.bits.ldw.pktread = 0; 2907 cs.bits.ldw.ptrread = 0; 2908 cs.bits.hdw.mex = 1; 2909 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2910 cs.value); 2911 2912 /* 2913 * Rearm this logical group if this is a single device 2914 * group. 2915 */ 2916 ldgp = ringp->ldgp; 2917 if (ldgp == NULL) { 2918 ringp->poll_flag = 0; 2919 MUTEX_EXIT(&ringp->lock); 2920 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2921 "==> nxge_disable_poll: no ldgp rdc %d " 2922 "(still set poll to 0", ringp->rdc)); 2923 return (0); 2924 } 2925 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2926 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2927 ringp->rdc, ldgp)); 2928 if (ldgp->nldvs == 1) { 2929 ldgimgm_t mgm; 2930 mgm.value = 0; 2931 mgm.bits.ldw.arm = 1; 2932 mgm.bits.ldw.timer = ldgp->ldg_timer; 2933 NXGE_REG_WR64(handle, 2934 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2935 } 2936 ringp->poll_flag = 0; 2937 } 2938 2939 MUTEX_EXIT(&ringp->lock); 2940 return (0); 2941 } 2942 2943 /* 2944 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2945 */ 2946 mblk_t * 2947 nxge_rx_poll(void *arg, int bytes_to_pickup) 2948 { 2949 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2950 p_rx_rcr_ring_t rcr_p; 2951 p_nxge_t nxgep; 2952 npi_handle_t handle; 2953 rx_dma_ctl_stat_t cs; 2954 mblk_t *mblk; 2955 p_nxge_ldv_t ldvp; 2956 uint32_t channel; 2957 2958 nxgep = ring_handle->nxgep; 2959 2960 /* 2961 * Get the control and status for this channel. 2962 */ 2963 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2964 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2965 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2966 MUTEX_ENTER(&rcr_p->lock); 2967 ASSERT(rcr_p->poll_flag == 1); 2968 2969 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2970 2971 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2972 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2973 rcr_p->rdc, rcr_p->poll_flag)); 2974 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2975 2976 ldvp = rcr_p->ldvp; 2977 /* error events. */ 2978 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2979 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2980 } 2981 2982 MUTEX_EXIT(&rcr_p->lock); 2983 2984 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2985 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 2986 return (mblk); 2987 } 2988 2989 2990 /*ARGSUSED*/ 2991 static nxge_status_t 2992 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2993 { 2994 p_nxge_rx_ring_stats_t rdc_stats; 2995 npi_handle_t handle; 2996 npi_status_t rs; 2997 boolean_t rxchan_fatal = B_FALSE; 2998 boolean_t rxport_fatal = B_FALSE; 2999 uint8_t portn; 3000 nxge_status_t status = NXGE_OK; 3001 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 3002 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 3003 3004 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3005 portn = nxgep->mac.portnum; 3006 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 3007 3008 if (cs.bits.hdw.rbr_tmout) { 3009 rdc_stats->rx_rbr_tmout++; 3010 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3011 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 3012 rxchan_fatal = B_TRUE; 3013 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3014 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 3015 } 3016 if (cs.bits.hdw.rsp_cnt_err) { 3017 rdc_stats->rsp_cnt_err++; 3018 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3019 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 3020 rxchan_fatal = B_TRUE; 3021 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3022 "==> nxge_rx_err_evnts(channel %d): " 3023 "rsp_cnt_err", channel)); 3024 } 3025 if (cs.bits.hdw.byte_en_bus) { 3026 rdc_stats->byte_en_bus++; 3027 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3028 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 3029 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3030 "==> nxge_rx_err_evnts(channel %d): " 3031 "fatal error: byte_en_bus", channel)); 3032 rxchan_fatal = B_TRUE; 3033 } 3034 if (cs.bits.hdw.rsp_dat_err) { 3035 rdc_stats->rsp_dat_err++; 3036 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3037 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 3038 rxchan_fatal = B_TRUE; 3039 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3040 "==> nxge_rx_err_evnts(channel %d): " 3041 "fatal error: rsp_dat_err", channel)); 3042 } 3043 if (cs.bits.hdw.rcr_ack_err) { 3044 rdc_stats->rcr_ack_err++; 3045 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3046 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 3047 rxchan_fatal = B_TRUE; 3048 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3049 "==> nxge_rx_err_evnts(channel %d): " 3050 "fatal error: rcr_ack_err", channel)); 3051 } 3052 if (cs.bits.hdw.dc_fifo_err) { 3053 rdc_stats->dc_fifo_err++; 3054 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3055 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 3056 /* This is not a fatal error! */ 3057 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3058 "==> nxge_rx_err_evnts(channel %d): " 3059 "dc_fifo_err", channel)); 3060 rxport_fatal = B_TRUE; 3061 } 3062 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 3063 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 3064 &rdc_stats->errlog.pre_par, 3065 &rdc_stats->errlog.sha_par)) 3066 != NPI_SUCCESS) { 3067 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3068 "==> nxge_rx_err_evnts(channel %d): " 3069 "rcr_sha_par: get perr", channel)); 3070 return (NXGE_ERROR | rs); 3071 } 3072 if (cs.bits.hdw.rcr_sha_par) { 3073 rdc_stats->rcr_sha_par++; 3074 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3075 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 3076 rxchan_fatal = B_TRUE; 3077 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3078 "==> nxge_rx_err_evnts(channel %d): " 3079 "fatal error: rcr_sha_par", channel)); 3080 } 3081 if (cs.bits.hdw.rbr_pre_par) { 3082 rdc_stats->rbr_pre_par++; 3083 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3084 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 3085 rxchan_fatal = B_TRUE; 3086 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3087 "==> nxge_rx_err_evnts(channel %d): " 3088 "fatal error: rbr_pre_par", channel)); 3089 } 3090 } 3091 /* 3092 * The Following 4 status bits are for information, the system 3093 * is running fine. There is no need to send FMA ereports or 3094 * log messages. 3095 */ 3096 if (cs.bits.hdw.port_drop_pkt) { 3097 rdc_stats->port_drop_pkt++; 3098 } 3099 if (cs.bits.hdw.wred_drop) { 3100 rdc_stats->wred_drop++; 3101 } 3102 if (cs.bits.hdw.rbr_pre_empty) { 3103 rdc_stats->rbr_pre_empty++; 3104 } 3105 if (cs.bits.hdw.rcr_shadow_full) { 3106 rdc_stats->rcr_shadow_full++; 3107 } 3108 if (cs.bits.hdw.config_err) { 3109 rdc_stats->config_err++; 3110 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3111 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3112 rxchan_fatal = B_TRUE; 3113 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3114 "==> nxge_rx_err_evnts(channel %d): " 3115 "config error", channel)); 3116 } 3117 if (cs.bits.hdw.rcrincon) { 3118 rdc_stats->rcrincon++; 3119 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3120 NXGE_FM_EREPORT_RDMC_RCRINCON); 3121 rxchan_fatal = B_TRUE; 3122 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3123 "==> nxge_rx_err_evnts(channel %d): " 3124 "fatal error: rcrincon error", channel)); 3125 } 3126 if (cs.bits.hdw.rcrfull) { 3127 rdc_stats->rcrfull++; 3128 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3129 NXGE_FM_EREPORT_RDMC_RCRFULL); 3130 rxchan_fatal = B_TRUE; 3131 if (rdc_stats->rcrfull < error_disp_cnt) 3132 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3133 "==> nxge_rx_err_evnts(channel %d): " 3134 "fatal error: rcrfull error", channel)); 3135 } 3136 if (cs.bits.hdw.rbr_empty) { 3137 /* 3138 * This bit is for information, there is no need 3139 * send FMA ereport or log a message. 3140 */ 3141 rdc_stats->rbr_empty++; 3142 } 3143 if (cs.bits.hdw.rbrfull) { 3144 rdc_stats->rbrfull++; 3145 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3146 NXGE_FM_EREPORT_RDMC_RBRFULL); 3147 rxchan_fatal = B_TRUE; 3148 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3149 "==> nxge_rx_err_evnts(channel %d): " 3150 "fatal error: rbr_full error", channel)); 3151 } 3152 if (cs.bits.hdw.rbrlogpage) { 3153 rdc_stats->rbrlogpage++; 3154 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3155 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3156 rxchan_fatal = B_TRUE; 3157 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3158 "==> nxge_rx_err_evnts(channel %d): " 3159 "fatal error: rbr logical page error", channel)); 3160 } 3161 if (cs.bits.hdw.cfiglogpage) { 3162 rdc_stats->cfiglogpage++; 3163 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3164 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3165 rxchan_fatal = B_TRUE; 3166 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3167 "==> nxge_rx_err_evnts(channel %d): " 3168 "fatal error: cfig logical page error", channel)); 3169 } 3170 3171 if (rxport_fatal) { 3172 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3173 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3174 portn)); 3175 if (isLDOMguest(nxgep)) { 3176 status = NXGE_ERROR; 3177 } else { 3178 status = nxge_ipp_fatal_err_recover(nxgep); 3179 if (status == NXGE_OK) { 3180 FM_SERVICE_RESTORED(nxgep); 3181 } 3182 } 3183 } 3184 3185 if (rxchan_fatal) { 3186 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3187 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3188 channel)); 3189 if (isLDOMguest(nxgep)) { 3190 status = NXGE_ERROR; 3191 } else { 3192 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3193 if (status == NXGE_OK) { 3194 FM_SERVICE_RESTORED(nxgep); 3195 } 3196 } 3197 } 3198 3199 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3200 3201 return (status); 3202 } 3203 3204 /* 3205 * nxge_rdc_hvio_setup 3206 * 3207 * This code appears to setup some Hypervisor variables. 3208 * 3209 * Arguments: 3210 * nxgep 3211 * channel 3212 * 3213 * Notes: 3214 * What does NIU_LP_WORKAROUND mean? 3215 * 3216 * NPI/NXGE function calls: 3217 * na 3218 * 3219 * Context: 3220 * Any domain 3221 */ 3222 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3223 static void 3224 nxge_rdc_hvio_setup( 3225 nxge_t *nxgep, int channel) 3226 { 3227 nxge_dma_common_t *dma_common; 3228 nxge_dma_common_t *dma_control; 3229 rx_rbr_ring_t *ring; 3230 3231 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3232 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3233 3234 ring->hv_set = B_FALSE; 3235 3236 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3237 dma_common->orig_ioaddr_pp; 3238 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3239 dma_common->orig_alength; 3240 3241 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3242 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3243 channel, ring->hv_rx_buf_base_ioaddr_pp, 3244 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3245 dma_common->orig_alength, dma_common->orig_alength)); 3246 3247 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3248 3249 ring->hv_rx_cntl_base_ioaddr_pp = 3250 (uint64_t)dma_control->orig_ioaddr_pp; 3251 ring->hv_rx_cntl_ioaddr_size = 3252 (uint64_t)dma_control->orig_alength; 3253 3254 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3255 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3256 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3257 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3258 dma_control->orig_alength, dma_control->orig_alength)); 3259 } 3260 #endif 3261 3262 /* 3263 * nxge_map_rxdma 3264 * 3265 * Map an RDC into our kernel space. 3266 * 3267 * Arguments: 3268 * nxgep 3269 * channel The channel to map. 3270 * 3271 * Notes: 3272 * 1. Allocate & initialise a memory pool, if necessary. 3273 * 2. Allocate however many receive buffers are required. 3274 * 3. Setup buffers, descriptors, and mailbox. 3275 * 3276 * NPI/NXGE function calls: 3277 * nxge_alloc_rx_mem_pool() 3278 * nxge_alloc_rbb() 3279 * nxge_map_rxdma_channel() 3280 * 3281 * Registers accessed: 3282 * 3283 * Context: 3284 * Any domain 3285 */ 3286 static nxge_status_t 3287 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3288 { 3289 nxge_dma_common_t **data; 3290 nxge_dma_common_t **control; 3291 rx_rbr_ring_t **rbr_ring; 3292 rx_rcr_ring_t **rcr_ring; 3293 rx_mbox_t **mailbox; 3294 uint32_t chunks; 3295 3296 nxge_status_t status; 3297 3298 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3299 3300 if (!nxgep->rx_buf_pool_p) { 3301 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3303 "<== nxge_map_rxdma: buf not allocated")); 3304 return (NXGE_ERROR); 3305 } 3306 } 3307 3308 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3309 return (NXGE_ERROR); 3310 3311 /* 3312 * Map descriptors from the buffer polls for each dma channel. 3313 */ 3314 3315 /* 3316 * Set up and prepare buffer blocks, descriptors 3317 * and mailbox. 3318 */ 3319 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3320 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3321 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3322 3323 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3324 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3325 3326 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3327 3328 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3329 chunks, control, rcr_ring, mailbox); 3330 if (status != NXGE_OK) { 3331 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3332 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3333 "returned 0x%x", 3334 channel, status)); 3335 return (status); 3336 } 3337 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3338 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3339 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3340 &nxgep->statsp->rdc_stats[channel]; 3341 3342 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3343 if (!isLDOMguest(nxgep)) 3344 nxge_rdc_hvio_setup(nxgep, channel); 3345 #endif 3346 3347 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3348 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3349 3350 return (status); 3351 } 3352 3353 static void 3354 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3355 { 3356 rx_rbr_ring_t *rbr_ring; 3357 rx_rcr_ring_t *rcr_ring; 3358 rx_mbox_t *mailbox; 3359 3360 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3361 3362 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3363 !nxgep->rx_mbox_areas_p) 3364 return; 3365 3366 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3367 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3368 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3369 3370 if (!rbr_ring || !rcr_ring || !mailbox) 3371 return; 3372 3373 (void) nxge_unmap_rxdma_channel( 3374 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3375 3376 nxge_free_rxb(nxgep, channel); 3377 3378 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3379 } 3380 3381 nxge_status_t 3382 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3383 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3384 uint32_t num_chunks, 3385 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3386 p_rx_mbox_t *rx_mbox_p) 3387 { 3388 int status = NXGE_OK; 3389 3390 /* 3391 * Set up and prepare buffer blocks, descriptors 3392 * and mailbox. 3393 */ 3394 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3395 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3396 /* 3397 * Receive buffer blocks 3398 */ 3399 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3400 dma_buf_p, rbr_p, num_chunks); 3401 if (status != NXGE_OK) { 3402 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3403 "==> nxge_map_rxdma_channel (channel %d): " 3404 "map buffer failed 0x%x", channel, status)); 3405 goto nxge_map_rxdma_channel_exit; 3406 } 3407 3408 /* 3409 * Receive block ring, completion ring and mailbox. 3410 */ 3411 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3412 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3413 if (status != NXGE_OK) { 3414 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3415 "==> nxge_map_rxdma_channel (channel %d): " 3416 "map config failed 0x%x", channel, status)); 3417 goto nxge_map_rxdma_channel_fail2; 3418 } 3419 3420 goto nxge_map_rxdma_channel_exit; 3421 3422 nxge_map_rxdma_channel_fail3: 3423 /* Free rbr, rcr */ 3424 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3425 "==> nxge_map_rxdma_channel: free rbr/rcr " 3426 "(status 0x%x channel %d)", 3427 status, channel)); 3428 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3429 *rcr_p, *rx_mbox_p); 3430 3431 nxge_map_rxdma_channel_fail2: 3432 /* Free buffer blocks */ 3433 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3434 "==> nxge_map_rxdma_channel: free rx buffers" 3435 "(nxgep 0x%x status 0x%x channel %d)", 3436 nxgep, status, channel)); 3437 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3438 3439 status = NXGE_ERROR; 3440 3441 nxge_map_rxdma_channel_exit: 3442 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3443 "<== nxge_map_rxdma_channel: " 3444 "(nxgep 0x%x status 0x%x channel %d)", 3445 nxgep, status, channel)); 3446 3447 return (status); 3448 } 3449 3450 /*ARGSUSED*/ 3451 static void 3452 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3453 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3454 { 3455 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3456 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3457 3458 /* 3459 * unmap receive block ring, completion ring and mailbox. 3460 */ 3461 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3462 rcr_p, rx_mbox_p); 3463 3464 /* unmap buffer blocks */ 3465 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3466 3467 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3468 } 3469 3470 /*ARGSUSED*/ 3471 static nxge_status_t 3472 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3473 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3474 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3475 { 3476 p_rx_rbr_ring_t rbrp; 3477 p_rx_rcr_ring_t rcrp; 3478 p_rx_mbox_t mboxp; 3479 p_nxge_dma_common_t cntl_dmap; 3480 p_nxge_dma_common_t dmap; 3481 p_rx_msg_t *rx_msg_ring; 3482 p_rx_msg_t rx_msg_p; 3483 p_rbr_cfig_a_t rcfga_p; 3484 p_rbr_cfig_b_t rcfgb_p; 3485 p_rcrcfig_a_t cfga_p; 3486 p_rcrcfig_b_t cfgb_p; 3487 p_rxdma_cfig1_t cfig1_p; 3488 p_rxdma_cfig2_t cfig2_p; 3489 p_rbr_kick_t kick_p; 3490 uint32_t dmaaddrp; 3491 uint32_t *rbr_vaddrp; 3492 uint32_t bkaddr; 3493 nxge_status_t status = NXGE_OK; 3494 int i; 3495 uint32_t nxge_port_rcr_size; 3496 3497 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3498 "==> nxge_map_rxdma_channel_cfg_ring")); 3499 3500 cntl_dmap = *dma_cntl_p; 3501 3502 /* Map in the receive block ring */ 3503 rbrp = *rbr_p; 3504 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3505 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3506 /* 3507 * Zero out buffer block ring descriptors. 3508 */ 3509 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3510 3511 rcfga_p = &(rbrp->rbr_cfga); 3512 rcfgb_p = &(rbrp->rbr_cfgb); 3513 kick_p = &(rbrp->rbr_kick); 3514 rcfga_p->value = 0; 3515 rcfgb_p->value = 0; 3516 kick_p->value = 0; 3517 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3518 rcfga_p->value = (rbrp->rbr_addr & 3519 (RBR_CFIG_A_STDADDR_MASK | 3520 RBR_CFIG_A_STDADDR_BASE_MASK)); 3521 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3522 3523 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3524 rcfgb_p->bits.ldw.vld0 = 1; 3525 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3526 rcfgb_p->bits.ldw.vld1 = 1; 3527 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3528 rcfgb_p->bits.ldw.vld2 = 1; 3529 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3530 3531 /* 3532 * For each buffer block, enter receive block address to the ring. 3533 */ 3534 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3535 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3536 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3537 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3538 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3539 3540 rx_msg_ring = rbrp->rx_msg_ring; 3541 for (i = 0; i < rbrp->tnblocks; i++) { 3542 rx_msg_p = rx_msg_ring[i]; 3543 rx_msg_p->nxgep = nxgep; 3544 rx_msg_p->rx_rbr_p = rbrp; 3545 bkaddr = (uint32_t) 3546 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3547 >> RBR_BKADDR_SHIFT)); 3548 rx_msg_p->free = B_FALSE; 3549 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3550 3551 *rbr_vaddrp++ = bkaddr; 3552 } 3553 3554 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3555 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3556 3557 rbrp->rbr_rd_index = 0; 3558 3559 rbrp->rbr_consumed = 0; 3560 rbrp->rbr_use_bcopy = B_TRUE; 3561 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3562 /* 3563 * Do bcopy on packets greater than bcopy size once 3564 * the lo threshold is reached. 3565 * This lo threshold should be less than the hi threshold. 3566 * 3567 * Do bcopy on every packet once the hi threshold is reached. 3568 */ 3569 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3570 /* default it to use hi */ 3571 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3572 } 3573 3574 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3575 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3576 } 3577 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3578 3579 switch (nxge_rx_threshold_hi) { 3580 default: 3581 case NXGE_RX_COPY_NONE: 3582 /* Do not do bcopy at all */ 3583 rbrp->rbr_use_bcopy = B_FALSE; 3584 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3585 break; 3586 3587 case NXGE_RX_COPY_1: 3588 case NXGE_RX_COPY_2: 3589 case NXGE_RX_COPY_3: 3590 case NXGE_RX_COPY_4: 3591 case NXGE_RX_COPY_5: 3592 case NXGE_RX_COPY_6: 3593 case NXGE_RX_COPY_7: 3594 rbrp->rbr_threshold_hi = 3595 rbrp->rbb_max * 3596 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3597 break; 3598 3599 case NXGE_RX_COPY_ALL: 3600 rbrp->rbr_threshold_hi = 0; 3601 break; 3602 } 3603 3604 switch (nxge_rx_threshold_lo) { 3605 default: 3606 case NXGE_RX_COPY_NONE: 3607 /* Do not do bcopy at all */ 3608 if (rbrp->rbr_use_bcopy) { 3609 rbrp->rbr_use_bcopy = B_FALSE; 3610 } 3611 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3612 break; 3613 3614 case NXGE_RX_COPY_1: 3615 case NXGE_RX_COPY_2: 3616 case NXGE_RX_COPY_3: 3617 case NXGE_RX_COPY_4: 3618 case NXGE_RX_COPY_5: 3619 case NXGE_RX_COPY_6: 3620 case NXGE_RX_COPY_7: 3621 rbrp->rbr_threshold_lo = 3622 rbrp->rbb_max * 3623 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3624 break; 3625 3626 case NXGE_RX_COPY_ALL: 3627 rbrp->rbr_threshold_lo = 0; 3628 break; 3629 } 3630 3631 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3632 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3633 "rbb_max %d " 3634 "rbrp->rbr_bufsize_type %d " 3635 "rbb_threshold_hi %d " 3636 "rbb_threshold_lo %d", 3637 dma_channel, 3638 rbrp->rbb_max, 3639 rbrp->rbr_bufsize_type, 3640 rbrp->rbr_threshold_hi, 3641 rbrp->rbr_threshold_lo)); 3642 3643 rbrp->page_valid.value = 0; 3644 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3645 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3646 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3647 rbrp->page_hdl.value = 0; 3648 3649 rbrp->page_valid.bits.ldw.page0 = 1; 3650 rbrp->page_valid.bits.ldw.page1 = 1; 3651 3652 /* Map in the receive completion ring */ 3653 rcrp = (p_rx_rcr_ring_t) 3654 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3655 rcrp->rdc = dma_channel; 3656 3657 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3658 rcrp->comp_size = nxge_port_rcr_size; 3659 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3660 3661 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3662 3663 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3664 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3665 sizeof (rcr_entry_t)); 3666 rcrp->comp_rd_index = 0; 3667 rcrp->comp_wt_index = 0; 3668 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3669 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3670 #if defined(__i386) 3671 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3672 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3673 #else 3674 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3675 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3676 #endif 3677 3678 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3679 (nxge_port_rcr_size - 1); 3680 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3681 (nxge_port_rcr_size - 1); 3682 3683 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3684 "==> nxge_map_rxdma_channel_cfg_ring: " 3685 "channel %d " 3686 "rbr_vaddrp $%p " 3687 "rcr_desc_rd_head_p $%p " 3688 "rcr_desc_rd_head_pp $%p " 3689 "rcr_desc_rd_last_p $%p " 3690 "rcr_desc_rd_last_pp $%p ", 3691 dma_channel, 3692 rbr_vaddrp, 3693 rcrp->rcr_desc_rd_head_p, 3694 rcrp->rcr_desc_rd_head_pp, 3695 rcrp->rcr_desc_last_p, 3696 rcrp->rcr_desc_last_pp)); 3697 3698 /* 3699 * Zero out buffer block ring descriptors. 3700 */ 3701 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3702 3703 rcrp->intr_timeout = (nxgep->intr_timeout < 3704 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3705 nxgep->intr_timeout; 3706 3707 rcrp->intr_threshold = (nxgep->intr_threshold < 3708 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3709 nxgep->intr_threshold; 3710 3711 rcrp->full_hdr_flag = B_FALSE; 3712 rcrp->sw_priv_hdr_len = 0; 3713 3714 cfga_p = &(rcrp->rcr_cfga); 3715 cfgb_p = &(rcrp->rcr_cfgb); 3716 cfga_p->value = 0; 3717 cfgb_p->value = 0; 3718 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3719 cfga_p->value = (rcrp->rcr_addr & 3720 (RCRCFIG_A_STADDR_MASK | 3721 RCRCFIG_A_STADDR_BASE_MASK)); 3722 3723 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3724 RCRCFIG_A_LEN_SHIF); 3725 3726 /* 3727 * Timeout should be set based on the system clock divider. 3728 * A timeout value of 1 assumes that the 3729 * granularity (1000) is 3 microseconds running at 300MHz. 3730 */ 3731 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3732 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3733 cfgb_p->bits.ldw.entout = 1; 3734 3735 /* Map in the mailbox */ 3736 mboxp = (p_rx_mbox_t) 3737 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3738 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3739 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3740 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3741 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3742 cfig1_p->value = cfig2_p->value = 0; 3743 3744 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3745 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3746 "==> nxge_map_rxdma_channel_cfg_ring: " 3747 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3748 dma_channel, cfig1_p->value, cfig2_p->value, 3749 mboxp->mbox_addr)); 3750 3751 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3752 & 0xfff); 3753 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3754 3755 3756 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3757 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3758 RXDMA_CFIG2_MBADDR_L_MASK); 3759 3760 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3761 3762 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3763 "==> nxge_map_rxdma_channel_cfg_ring: " 3764 "channel %d damaddrp $%p " 3765 "cfg1 0x%016llx cfig2 0x%016llx", 3766 dma_channel, dmaaddrp, 3767 cfig1_p->value, cfig2_p->value)); 3768 3769 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3770 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3771 3772 rbrp->rx_rcr_p = rcrp; 3773 rcrp->rx_rbr_p = rbrp; 3774 *rcr_p = rcrp; 3775 *rx_mbox_p = mboxp; 3776 3777 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3778 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3779 3780 return (status); 3781 } 3782 3783 /*ARGSUSED*/ 3784 static void 3785 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3786 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3787 { 3788 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3789 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3790 rcr_p->rdc)); 3791 3792 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3793 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3794 3795 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3796 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3797 } 3798 3799 static nxge_status_t 3800 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3801 p_nxge_dma_common_t *dma_buf_p, 3802 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3803 { 3804 p_rx_rbr_ring_t rbrp; 3805 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3806 p_rx_msg_t *rx_msg_ring; 3807 p_rx_msg_t rx_msg_p; 3808 p_mblk_t mblk_p; 3809 3810 rxring_info_t *ring_info; 3811 nxge_status_t status = NXGE_OK; 3812 int i, j, index; 3813 uint32_t size, bsize, nblocks, nmsgs; 3814 3815 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3816 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3817 channel)); 3818 3819 dma_bufp = tmp_bufp = *dma_buf_p; 3820 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3821 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3822 "chunks bufp 0x%016llx", 3823 channel, num_chunks, dma_bufp)); 3824 3825 nmsgs = 0; 3826 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3827 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3828 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3829 "bufp 0x%016llx nblocks %d nmsgs %d", 3830 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3831 nmsgs += tmp_bufp->nblocks; 3832 } 3833 if (!nmsgs) { 3834 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3835 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3836 "no msg blocks", 3837 channel)); 3838 status = NXGE_ERROR; 3839 goto nxge_map_rxdma_channel_buf_ring_exit; 3840 } 3841 3842 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3843 3844 size = nmsgs * sizeof (p_rx_msg_t); 3845 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3846 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3847 KM_SLEEP); 3848 3849 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3850 (void *)nxgep->interrupt_cookie); 3851 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3852 (void *)nxgep->interrupt_cookie); 3853 rbrp->rdc = channel; 3854 rbrp->num_blocks = num_chunks; 3855 rbrp->tnblocks = nmsgs; 3856 rbrp->rbb_max = nmsgs; 3857 rbrp->rbr_max_size = nmsgs; 3858 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3859 3860 /* 3861 * Buffer sizes suggested by NIU architect. 3862 * 256, 512 and 2K. 3863 */ 3864 3865 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3866 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3867 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3868 3869 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3870 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3871 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3872 3873 rbrp->block_size = nxgep->rx_default_block_size; 3874 3875 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3876 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3877 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3878 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3879 } else { 3880 if (rbrp->block_size >= 0x2000) { 3881 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3882 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3883 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3884 } else { 3885 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3886 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3887 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3888 } 3889 } 3890 3891 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3892 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3893 "actual rbr max %d rbb_max %d nmsgs %d " 3894 "rbrp->block_size %d default_block_size %d " 3895 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3896 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3897 rbrp->block_size, nxgep->rx_default_block_size, 3898 nxge_rbr_size, nxge_rbr_spare_size)); 3899 3900 /* Map in buffers from the buffer pool. */ 3901 index = 0; 3902 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3903 bsize = dma_bufp->block_size; 3904 nblocks = dma_bufp->nblocks; 3905 #if defined(__i386) 3906 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3907 #else 3908 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3909 #endif 3910 ring_info->buffer[i].buf_index = i; 3911 ring_info->buffer[i].buf_size = dma_bufp->alength; 3912 ring_info->buffer[i].start_index = index; 3913 #if defined(__i386) 3914 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3915 #else 3916 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3917 #endif 3918 3919 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3920 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3921 "chunk %d" 3922 " nblocks %d chunk_size %x block_size 0x%x " 3923 "dma_bufp $%p", channel, i, 3924 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3925 dma_bufp)); 3926 3927 for (j = 0; j < nblocks; j++) { 3928 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3929 dma_bufp)) == NULL) { 3930 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3931 "allocb failed (index %d i %d j %d)", 3932 index, i, j)); 3933 goto nxge_map_rxdma_channel_buf_ring_fail1; 3934 } 3935 rx_msg_ring[index] = rx_msg_p; 3936 rx_msg_p->block_index = index; 3937 rx_msg_p->shifted_addr = (uint32_t) 3938 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3939 RBR_BKADDR_SHIFT)); 3940 3941 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3942 "index %d j %d rx_msg_p $%p mblk %p", 3943 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3944 3945 mblk_p = rx_msg_p->rx_mblk_p; 3946 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3947 3948 rbrp->rbr_ref_cnt++; 3949 index++; 3950 rx_msg_p->buf_dma.dma_channel = channel; 3951 } 3952 3953 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3954 if (dma_bufp->contig_alloc_type) { 3955 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3956 } 3957 3958 if (dma_bufp->kmem_alloc_type) { 3959 rbrp->rbr_alloc_type = KMEM_ALLOC; 3960 } 3961 3962 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3963 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3964 "chunk %d" 3965 " nblocks %d chunk_size %x block_size 0x%x " 3966 "dma_bufp $%p", 3967 channel, i, 3968 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3969 dma_bufp)); 3970 } 3971 if (i < rbrp->num_blocks) { 3972 goto nxge_map_rxdma_channel_buf_ring_fail1; 3973 } 3974 3975 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3976 "nxge_map_rxdma_channel_buf_ring: done buf init " 3977 "channel %d msg block entries %d", 3978 channel, index)); 3979 ring_info->block_size_mask = bsize - 1; 3980 rbrp->rx_msg_ring = rx_msg_ring; 3981 rbrp->dma_bufp = dma_buf_p; 3982 rbrp->ring_info = ring_info; 3983 3984 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3985 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3986 " nxge_map_rxdma_channel_buf_ring: " 3987 "channel %d done buf info init", channel)); 3988 3989 /* 3990 * Finally, permit nxge_freeb() to call nxge_post_page(). 3991 */ 3992 rbrp->rbr_state = RBR_POSTING; 3993 3994 *rbr_p = rbrp; 3995 goto nxge_map_rxdma_channel_buf_ring_exit; 3996 3997 nxge_map_rxdma_channel_buf_ring_fail1: 3998 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3999 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 4000 channel, status)); 4001 4002 index--; 4003 for (; index >= 0; index--) { 4004 rx_msg_p = rx_msg_ring[index]; 4005 if (rx_msg_p != NULL) { 4006 freeb(rx_msg_p->rx_mblk_p); 4007 rx_msg_ring[index] = NULL; 4008 } 4009 } 4010 nxge_map_rxdma_channel_buf_ring_fail: 4011 MUTEX_DESTROY(&rbrp->post_lock); 4012 MUTEX_DESTROY(&rbrp->lock); 4013 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4014 KMEM_FREE(rx_msg_ring, size); 4015 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 4016 4017 status = NXGE_ERROR; 4018 4019 nxge_map_rxdma_channel_buf_ring_exit: 4020 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4021 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 4022 4023 return (status); 4024 } 4025 4026 /*ARGSUSED*/ 4027 static void 4028 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 4029 p_rx_rbr_ring_t rbr_p) 4030 { 4031 p_rx_msg_t *rx_msg_ring; 4032 p_rx_msg_t rx_msg_p; 4033 rxring_info_t *ring_info; 4034 int i; 4035 uint32_t size; 4036 #ifdef NXGE_DEBUG 4037 int num_chunks; 4038 #endif 4039 4040 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4041 "==> nxge_unmap_rxdma_channel_buf_ring")); 4042 if (rbr_p == NULL) { 4043 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4044 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 4045 return; 4046 } 4047 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4048 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 4049 rbr_p->rdc)); 4050 4051 rx_msg_ring = rbr_p->rx_msg_ring; 4052 ring_info = rbr_p->ring_info; 4053 4054 if (rx_msg_ring == NULL || ring_info == NULL) { 4055 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4056 "<== nxge_unmap_rxdma_channel_buf_ring: " 4057 "rx_msg_ring $%p ring_info $%p", 4058 rx_msg_p, ring_info)); 4059 return; 4060 } 4061 4062 #ifdef NXGE_DEBUG 4063 num_chunks = rbr_p->num_blocks; 4064 #endif 4065 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 4066 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4067 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 4068 "tnblocks %d (max %d) size ptrs %d ", 4069 rbr_p->rdc, num_chunks, 4070 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 4071 4072 for (i = 0; i < rbr_p->tnblocks; i++) { 4073 rx_msg_p = rx_msg_ring[i]; 4074 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4075 " nxge_unmap_rxdma_channel_buf_ring: " 4076 "rx_msg_p $%p", 4077 rx_msg_p)); 4078 if (rx_msg_p != NULL) { 4079 freeb(rx_msg_p->rx_mblk_p); 4080 rx_msg_ring[i] = NULL; 4081 } 4082 } 4083 4084 /* 4085 * We no longer may use the mutex <post_lock>. By setting 4086 * <rbr_state> to anything but POSTING, we prevent 4087 * nxge_post_page() from accessing a dead mutex. 4088 */ 4089 rbr_p->rbr_state = RBR_UNMAPPING; 4090 MUTEX_DESTROY(&rbr_p->post_lock); 4091 4092 MUTEX_DESTROY(&rbr_p->lock); 4093 4094 if (rbr_p->rbr_ref_cnt == 0) { 4095 /* 4096 * This is the normal state of affairs. 4097 * Need to free the following buffers: 4098 * - data buffers 4099 * - rx_msg ring 4100 * - ring_info 4101 * - rbr ring 4102 */ 4103 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4104 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4105 nxge_rxdma_databuf_free(rbr_p); 4106 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4107 KMEM_FREE(rx_msg_ring, size); 4108 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4109 } else { 4110 /* 4111 * Some of our buffers are still being used. 4112 * Therefore, tell nxge_freeb() this ring is 4113 * unmapped, so it may free <rbr_p> for us. 4114 */ 4115 rbr_p->rbr_state = RBR_UNMAPPED; 4116 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4117 "unmap_rxdma_buf_ring: %d %s outstanding.", 4118 rbr_p->rbr_ref_cnt, 4119 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4120 } 4121 4122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4123 "<== nxge_unmap_rxdma_channel_buf_ring")); 4124 } 4125 4126 /* 4127 * nxge_rxdma_hw_start_common 4128 * 4129 * Arguments: 4130 * nxgep 4131 * 4132 * Notes: 4133 * 4134 * NPI/NXGE function calls: 4135 * nxge_init_fzc_rx_common(); 4136 * nxge_init_fzc_rxdma_port(); 4137 * 4138 * Registers accessed: 4139 * 4140 * Context: 4141 * Service domain 4142 */ 4143 static nxge_status_t 4144 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4145 { 4146 nxge_status_t status = NXGE_OK; 4147 4148 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4149 4150 /* 4151 * Load the sharable parameters by writing to the 4152 * function zero control registers. These FZC registers 4153 * should be initialized only once for the entire chip. 4154 */ 4155 (void) nxge_init_fzc_rx_common(nxgep); 4156 4157 /* 4158 * Initialize the RXDMA port specific FZC control configurations. 4159 * These FZC registers are pertaining to each port. 4160 */ 4161 (void) nxge_init_fzc_rxdma_port(nxgep); 4162 4163 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4164 4165 return (status); 4166 } 4167 4168 static nxge_status_t 4169 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4170 { 4171 int i, ndmas; 4172 p_rx_rbr_rings_t rx_rbr_rings; 4173 p_rx_rbr_ring_t *rbr_rings; 4174 p_rx_rcr_rings_t rx_rcr_rings; 4175 p_rx_rcr_ring_t *rcr_rings; 4176 p_rx_mbox_areas_t rx_mbox_areas_p; 4177 p_rx_mbox_t *rx_mbox_p; 4178 nxge_status_t status = NXGE_OK; 4179 4180 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4181 4182 rx_rbr_rings = nxgep->rx_rbr_rings; 4183 rx_rcr_rings = nxgep->rx_rcr_rings; 4184 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4185 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4186 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4187 return (NXGE_ERROR); 4188 } 4189 ndmas = rx_rbr_rings->ndmas; 4190 if (ndmas == 0) { 4191 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4192 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4193 return (NXGE_ERROR); 4194 } 4195 4196 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4197 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4198 4199 rbr_rings = rx_rbr_rings->rbr_rings; 4200 rcr_rings = rx_rcr_rings->rcr_rings; 4201 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4202 if (rx_mbox_areas_p) { 4203 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4204 } 4205 4206 i = channel; 4207 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4208 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4209 ndmas, channel)); 4210 status = nxge_rxdma_start_channel(nxgep, channel, 4211 (p_rx_rbr_ring_t)rbr_rings[i], 4212 (p_rx_rcr_ring_t)rcr_rings[i], 4213 (p_rx_mbox_t)rx_mbox_p[i]); 4214 if (status != NXGE_OK) { 4215 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4216 "==> nxge_rxdma_hw_start: disable " 4217 "(status 0x%x channel %d)", status, channel)); 4218 return (status); 4219 } 4220 4221 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4222 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4223 rx_rbr_rings, rx_rcr_rings)); 4224 4225 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4226 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4227 4228 return (status); 4229 } 4230 4231 static void 4232 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4233 { 4234 p_rx_rbr_rings_t rx_rbr_rings; 4235 p_rx_rcr_rings_t rx_rcr_rings; 4236 4237 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4238 4239 rx_rbr_rings = nxgep->rx_rbr_rings; 4240 rx_rcr_rings = nxgep->rx_rcr_rings; 4241 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4242 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4243 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4244 return; 4245 } 4246 4247 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4248 "==> nxge_rxdma_hw_stop(channel %d)", 4249 channel)); 4250 (void) nxge_rxdma_stop_channel(nxgep, channel); 4251 4252 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4253 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4254 rx_rbr_rings, rx_rcr_rings)); 4255 4256 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4257 } 4258 4259 4260 static nxge_status_t 4261 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4262 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4263 4264 { 4265 npi_handle_t handle; 4266 npi_status_t rs = NPI_SUCCESS; 4267 rx_dma_ctl_stat_t cs; 4268 rx_dma_ent_msk_t ent_mask; 4269 nxge_status_t status = NXGE_OK; 4270 4271 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4272 4273 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4274 4275 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4276 "npi handle addr $%p acc $%p", 4277 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4278 4279 /* Reset RXDMA channel, but not if you're a guest. */ 4280 if (!isLDOMguest(nxgep)) { 4281 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4282 if (rs != NPI_SUCCESS) { 4283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4284 "==> nxge_init_fzc_rdc: " 4285 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4286 channel, rs)); 4287 return (NXGE_ERROR | rs); 4288 } 4289 4290 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4291 "==> nxge_rxdma_start_channel: reset done: channel %d", 4292 channel)); 4293 } 4294 4295 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4296 if (isLDOMguest(nxgep)) 4297 (void) nxge_rdc_lp_conf(nxgep, channel); 4298 #endif 4299 4300 /* 4301 * Initialize the RXDMA channel specific FZC control 4302 * configurations. These FZC registers are pertaining 4303 * to each RX channel (logical pages). 4304 */ 4305 if (!isLDOMguest(nxgep)) { 4306 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4307 if (status != NXGE_OK) { 4308 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4309 "==> nxge_rxdma_start_channel: " 4310 "init fzc rxdma failed (0x%08x channel %d)", 4311 status, channel)); 4312 return (status); 4313 } 4314 4315 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4316 "==> nxge_rxdma_start_channel: fzc done")); 4317 } 4318 4319 /* Set up the interrupt event masks. */ 4320 ent_mask.value = 0; 4321 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4322 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4323 &ent_mask); 4324 if (rs != NPI_SUCCESS) { 4325 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4326 "==> nxge_rxdma_start_channel: " 4327 "init rxdma event masks failed " 4328 "(0x%08x channel %d)", 4329 status, channel)); 4330 return (NXGE_ERROR | rs); 4331 } 4332 4333 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4334 "==> nxge_rxdma_start_channel: " 4335 "event done: channel %d (mask 0x%016llx)", 4336 channel, ent_mask.value)); 4337 4338 /* Initialize the receive DMA control and status register */ 4339 cs.value = 0; 4340 cs.bits.hdw.mex = 1; 4341 cs.bits.hdw.rcrthres = 1; 4342 cs.bits.hdw.rcrto = 1; 4343 cs.bits.hdw.rbr_empty = 1; 4344 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4345 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4346 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4347 if (status != NXGE_OK) { 4348 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4349 "==> nxge_rxdma_start_channel: " 4350 "init rxdma control register failed (0x%08x channel %d", 4351 status, channel)); 4352 return (status); 4353 } 4354 4355 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4356 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4357 4358 /* 4359 * Load RXDMA descriptors, buffers, mailbox, 4360 * initialise the receive DMA channels and 4361 * enable each DMA channel. 4362 */ 4363 status = nxge_enable_rxdma_channel(nxgep, 4364 channel, rbr_p, rcr_p, mbox_p); 4365 4366 if (status != NXGE_OK) { 4367 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4368 " nxge_rxdma_start_channel: " 4369 " enable rxdma failed (0x%08x channel %d)", 4370 status, channel)); 4371 return (status); 4372 } 4373 4374 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4375 "==> nxge_rxdma_start_channel: enabled channel %d")); 4376 4377 if (isLDOMguest(nxgep)) { 4378 /* Add interrupt handler for this channel. */ 4379 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4380 != NXGE_OK) { 4381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4382 " nxge_rxdma_start_channel: " 4383 " nxge_hio_intr_add failed (0x%08x channel %d)", 4384 status, channel)); 4385 } 4386 } 4387 4388 ent_mask.value = 0; 4389 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4390 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4391 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4392 &ent_mask); 4393 if (rs != NPI_SUCCESS) { 4394 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4395 "==> nxge_rxdma_start_channel: " 4396 "init rxdma event masks failed (0x%08x channel %d)", 4397 status, channel)); 4398 return (NXGE_ERROR | rs); 4399 } 4400 4401 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4402 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4403 4404 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4405 4406 return (NXGE_OK); 4407 } 4408 4409 static nxge_status_t 4410 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4411 { 4412 npi_handle_t handle; 4413 npi_status_t rs = NPI_SUCCESS; 4414 rx_dma_ctl_stat_t cs; 4415 rx_dma_ent_msk_t ent_mask; 4416 nxge_status_t status = NXGE_OK; 4417 4418 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4419 4420 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4421 4422 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4423 "npi handle addr $%p acc $%p", 4424 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4425 4426 if (!isLDOMguest(nxgep)) { 4427 /* 4428 * Stop RxMAC = A.9.2.6 4429 */ 4430 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4431 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4432 "nxge_rxdma_stop_channel: " 4433 "Failed to disable RxMAC")); 4434 } 4435 4436 /* 4437 * Drain IPP Port = A.9.3.6 4438 */ 4439 (void) nxge_ipp_drain(nxgep); 4440 } 4441 4442 /* Reset RXDMA channel */ 4443 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4444 if (rs != NPI_SUCCESS) { 4445 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4446 " nxge_rxdma_stop_channel: " 4447 " reset rxdma failed (0x%08x channel %d)", 4448 rs, channel)); 4449 return (NXGE_ERROR | rs); 4450 } 4451 4452 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4453 "==> nxge_rxdma_stop_channel: reset done")); 4454 4455 /* Set up the interrupt event masks. */ 4456 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4457 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4458 &ent_mask); 4459 if (rs != NPI_SUCCESS) { 4460 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4461 "==> nxge_rxdma_stop_channel: " 4462 "set rxdma event masks failed (0x%08x channel %d)", 4463 rs, channel)); 4464 return (NXGE_ERROR | rs); 4465 } 4466 4467 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4468 "==> nxge_rxdma_stop_channel: event done")); 4469 4470 /* 4471 * Initialize the receive DMA control and status register 4472 */ 4473 cs.value = 0; 4474 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4475 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4476 " to default (all 0s) 0x%08x", cs.value)); 4477 if (status != NXGE_OK) { 4478 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4479 " nxge_rxdma_stop_channel: init rxdma" 4480 " control register failed (0x%08x channel %d", 4481 status, channel)); 4482 return (status); 4483 } 4484 4485 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4486 "==> nxge_rxdma_stop_channel: control done")); 4487 4488 /* 4489 * Make sure channel is disabled. 4490 */ 4491 status = nxge_disable_rxdma_channel(nxgep, channel); 4492 4493 if (status != NXGE_OK) { 4494 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4495 " nxge_rxdma_stop_channel: " 4496 " init enable rxdma failed (0x%08x channel %d)", 4497 status, channel)); 4498 return (status); 4499 } 4500 4501 if (!isLDOMguest(nxgep)) { 4502 /* 4503 * Enable RxMAC = A.9.2.10 4504 */ 4505 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4506 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4507 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4508 } 4509 } 4510 4511 NXGE_DEBUG_MSG((nxgep, 4512 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4513 4514 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4515 4516 return (NXGE_OK); 4517 } 4518 4519 nxge_status_t 4520 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4521 { 4522 npi_handle_t handle; 4523 p_nxge_rdc_sys_stats_t statsp; 4524 rx_ctl_dat_fifo_stat_t stat; 4525 uint32_t zcp_err_status; 4526 uint32_t ipp_err_status; 4527 nxge_status_t status = NXGE_OK; 4528 npi_status_t rs = NPI_SUCCESS; 4529 boolean_t my_err = B_FALSE; 4530 4531 handle = nxgep->npi_handle; 4532 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4533 4534 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4535 4536 if (rs != NPI_SUCCESS) 4537 return (NXGE_ERROR | rs); 4538 4539 if (stat.bits.ldw.id_mismatch) { 4540 statsp->id_mismatch++; 4541 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4542 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4543 /* Global fatal error encountered */ 4544 } 4545 4546 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4547 switch (nxgep->mac.portnum) { 4548 case 0: 4549 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4550 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4551 my_err = B_TRUE; 4552 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4553 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4554 } 4555 break; 4556 case 1: 4557 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4558 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4559 my_err = B_TRUE; 4560 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4561 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4562 } 4563 break; 4564 case 2: 4565 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4566 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4567 my_err = B_TRUE; 4568 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4569 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4570 } 4571 break; 4572 case 3: 4573 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4574 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4575 my_err = B_TRUE; 4576 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4577 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4578 } 4579 break; 4580 default: 4581 return (NXGE_ERROR); 4582 } 4583 } 4584 4585 if (my_err) { 4586 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4587 zcp_err_status); 4588 if (status != NXGE_OK) 4589 return (status); 4590 } 4591 4592 return (NXGE_OK); 4593 } 4594 4595 static nxge_status_t 4596 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4597 uint32_t zcp_status) 4598 { 4599 boolean_t rxport_fatal = B_FALSE; 4600 p_nxge_rdc_sys_stats_t statsp; 4601 nxge_status_t status = NXGE_OK; 4602 uint8_t portn; 4603 4604 portn = nxgep->mac.portnum; 4605 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4606 4607 if (ipp_status & (0x1 << portn)) { 4608 statsp->ipp_eop_err++; 4609 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4610 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4611 rxport_fatal = B_TRUE; 4612 } 4613 4614 if (zcp_status & (0x1 << portn)) { 4615 statsp->zcp_eop_err++; 4616 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4617 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4618 rxport_fatal = B_TRUE; 4619 } 4620 4621 if (rxport_fatal) { 4622 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4623 " nxge_rxdma_handle_port_error: " 4624 " fatal error on Port #%d\n", 4625 portn)); 4626 status = nxge_rx_port_fatal_err_recover(nxgep); 4627 if (status == NXGE_OK) { 4628 FM_SERVICE_RESTORED(nxgep); 4629 } 4630 } 4631 4632 return (status); 4633 } 4634 4635 static nxge_status_t 4636 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4637 { 4638 npi_handle_t handle; 4639 npi_status_t rs = NPI_SUCCESS; 4640 nxge_status_t status = NXGE_OK; 4641 p_rx_rbr_ring_t rbrp; 4642 p_rx_rcr_ring_t rcrp; 4643 p_rx_mbox_t mboxp; 4644 rx_dma_ent_msk_t ent_mask; 4645 p_nxge_dma_common_t dmap; 4646 int ring_idx; 4647 uint32_t ref_cnt; 4648 p_rx_msg_t rx_msg_p; 4649 int i; 4650 uint32_t nxge_port_rcr_size; 4651 4652 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4653 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4654 "Recovering from RxDMAChannel#%d error...", channel)); 4655 4656 /* 4657 * Stop the dma channel waits for the stop done. 4658 * If the stop done bit is not set, then create 4659 * an error. 4660 */ 4661 4662 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4663 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4664 4665 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4666 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4667 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4668 4669 MUTEX_ENTER(&rcrp->lock); 4670 MUTEX_ENTER(&rbrp->lock); 4671 MUTEX_ENTER(&rbrp->post_lock); 4672 4673 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4674 4675 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4676 if (rs != NPI_SUCCESS) { 4677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4678 "nxge_disable_rxdma_channel:failed")); 4679 goto fail; 4680 } 4681 4682 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4683 4684 /* Disable interrupt */ 4685 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4686 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4687 if (rs != NPI_SUCCESS) { 4688 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4689 "nxge_rxdma_stop_channel: " 4690 "set rxdma event masks failed (channel %d)", 4691 channel)); 4692 } 4693 4694 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4695 4696 /* Reset RXDMA channel */ 4697 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4698 if (rs != NPI_SUCCESS) { 4699 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4700 "nxge_rxdma_fatal_err_recover: " 4701 " reset rxdma failed (channel %d)", channel)); 4702 goto fail; 4703 } 4704 4705 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4706 4707 mboxp = 4708 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4709 4710 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4711 rbrp->rbr_rd_index = 0; 4712 4713 rcrp->comp_rd_index = 0; 4714 rcrp->comp_wt_index = 0; 4715 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4716 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4717 #if defined(__i386) 4718 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4719 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4720 #else 4721 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4722 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4723 #endif 4724 4725 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4726 (nxge_port_rcr_size - 1); 4727 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4728 (nxge_port_rcr_size - 1); 4729 4730 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4731 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4732 4733 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4734 4735 for (i = 0; i < rbrp->rbr_max_size; i++) { 4736 rx_msg_p = rbrp->rx_msg_ring[i]; 4737 ref_cnt = rx_msg_p->ref_cnt; 4738 if (ref_cnt != 1) { 4739 if (rx_msg_p->cur_usage_cnt != 4740 rx_msg_p->max_usage_cnt) { 4741 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4742 "buf[%d]: cur_usage_cnt = %d " 4743 "max_usage_cnt = %d\n", i, 4744 rx_msg_p->cur_usage_cnt, 4745 rx_msg_p->max_usage_cnt)); 4746 } else { 4747 /* Buffer can be re-posted */ 4748 rx_msg_p->free = B_TRUE; 4749 rx_msg_p->cur_usage_cnt = 0; 4750 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4751 rx_msg_p->pkt_buf_size = 0; 4752 } 4753 } 4754 } 4755 4756 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4757 4758 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4759 if (status != NXGE_OK) { 4760 goto fail; 4761 } 4762 4763 MUTEX_EXIT(&rbrp->post_lock); 4764 MUTEX_EXIT(&rbrp->lock); 4765 MUTEX_EXIT(&rcrp->lock); 4766 4767 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4768 "Recovery Successful, RxDMAChannel#%d Restored", 4769 channel)); 4770 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4771 4772 return (NXGE_OK); 4773 fail: 4774 MUTEX_EXIT(&rbrp->post_lock); 4775 MUTEX_EXIT(&rbrp->lock); 4776 MUTEX_EXIT(&rcrp->lock); 4777 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4778 4779 return (NXGE_ERROR | rs); 4780 } 4781 4782 nxge_status_t 4783 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4784 { 4785 nxge_grp_set_t *set = &nxgep->rx_set; 4786 nxge_status_t status = NXGE_OK; 4787 int rdc; 4788 4789 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4791 "Recovering from RxPort error...")); 4792 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4793 4794 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4795 goto fail; 4796 4797 NXGE_DELAY(1000); 4798 4799 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4800 4801 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4802 if ((1 << rdc) & set->owned.map) { 4803 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4804 != NXGE_OK) { 4805 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4806 "Could not recover channel %d", rdc)); 4807 } 4808 } 4809 } 4810 4811 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4812 4813 /* Reset IPP */ 4814 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4815 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4816 "nxge_rx_port_fatal_err_recover: " 4817 "Failed to reset IPP")); 4818 goto fail; 4819 } 4820 4821 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4822 4823 /* Reset RxMAC */ 4824 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4825 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4826 "nxge_rx_port_fatal_err_recover: " 4827 "Failed to reset RxMAC")); 4828 goto fail; 4829 } 4830 4831 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4832 4833 /* Re-Initialize IPP */ 4834 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4835 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4836 "nxge_rx_port_fatal_err_recover: " 4837 "Failed to init IPP")); 4838 goto fail; 4839 } 4840 4841 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4842 4843 /* Re-Initialize RxMAC */ 4844 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4845 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4846 "nxge_rx_port_fatal_err_recover: " 4847 "Failed to reset RxMAC")); 4848 goto fail; 4849 } 4850 4851 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4852 4853 /* Re-enable RxMAC */ 4854 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4855 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4856 "nxge_rx_port_fatal_err_recover: " 4857 "Failed to enable RxMAC")); 4858 goto fail; 4859 } 4860 4861 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4862 "Recovery Successful, RxPort Restored")); 4863 4864 return (NXGE_OK); 4865 fail: 4866 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4867 return (status); 4868 } 4869 4870 void 4871 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4872 { 4873 rx_dma_ctl_stat_t cs; 4874 rx_ctl_dat_fifo_stat_t cdfs; 4875 4876 switch (err_id) { 4877 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4878 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4879 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4880 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4881 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4882 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4883 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4884 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4885 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4886 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4887 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4888 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4889 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4890 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4891 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4892 chan, &cs.value); 4893 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4894 cs.bits.hdw.rcr_ack_err = 1; 4895 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4896 cs.bits.hdw.dc_fifo_err = 1; 4897 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4898 cs.bits.hdw.rcr_sha_par = 1; 4899 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4900 cs.bits.hdw.rbr_pre_par = 1; 4901 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4902 cs.bits.hdw.rbr_tmout = 1; 4903 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4904 cs.bits.hdw.rsp_cnt_err = 1; 4905 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4906 cs.bits.hdw.byte_en_bus = 1; 4907 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4908 cs.bits.hdw.rsp_dat_err = 1; 4909 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4910 cs.bits.hdw.config_err = 1; 4911 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4912 cs.bits.hdw.rcrincon = 1; 4913 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4914 cs.bits.hdw.rcrfull = 1; 4915 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4916 cs.bits.hdw.rbrfull = 1; 4917 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4918 cs.bits.hdw.rbrlogpage = 1; 4919 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4920 cs.bits.hdw.cfiglogpage = 1; 4921 #if defined(__i386) 4922 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4923 cs.value); 4924 #else 4925 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4926 cs.value); 4927 #endif 4928 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4929 chan, cs.value); 4930 break; 4931 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4932 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4933 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4934 cdfs.value = 0; 4935 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4936 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4937 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4938 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4939 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4940 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4941 #if defined(__i386) 4942 cmn_err(CE_NOTE, 4943 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4944 cdfs.value); 4945 #else 4946 cmn_err(CE_NOTE, 4947 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4948 cdfs.value); 4949 #endif 4950 NXGE_REG_WR64(nxgep->npi_handle, 4951 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4952 break; 4953 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4954 break; 4955 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4956 break; 4957 } 4958 } 4959 4960 static void 4961 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4962 { 4963 rxring_info_t *ring_info; 4964 int index; 4965 uint32_t chunk_size; 4966 uint64_t kaddr; 4967 uint_t num_blocks; 4968 4969 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4970 4971 if (rbr_p == NULL) { 4972 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4973 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4974 return; 4975 } 4976 4977 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4978 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4979 "==> nxge_rxdma_databuf_free: DDI")); 4980 return; 4981 } 4982 4983 ring_info = rbr_p->ring_info; 4984 if (ring_info == NULL) { 4985 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4986 "==> nxge_rxdma_databuf_free: NULL ring info")); 4987 return; 4988 } 4989 num_blocks = rbr_p->num_blocks; 4990 for (index = 0; index < num_blocks; index++) { 4991 kaddr = ring_info->buffer[index].kaddr; 4992 chunk_size = ring_info->buffer[index].buf_size; 4993 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4994 "==> nxge_rxdma_databuf_free: free chunk %d " 4995 "kaddrp $%p chunk size %d", 4996 index, kaddr, chunk_size)); 4997 if (kaddr == NULL) continue; 4998 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4999 ring_info->buffer[index].kaddr = NULL; 5000 } 5001 5002 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 5003 } 5004 5005 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5006 extern void contig_mem_free(void *, size_t); 5007 #endif 5008 5009 void 5010 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 5011 { 5012 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 5013 5014 if (kaddr == NULL || !buf_size) { 5015 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5016 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 5017 kaddr, buf_size)); 5018 return; 5019 } 5020 5021 switch (alloc_type) { 5022 case KMEM_ALLOC: 5023 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5024 "==> nxge_free_buf: freeing kmem $%p size %d", 5025 kaddr, buf_size)); 5026 #if defined(__i386) 5027 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 5028 #else 5029 KMEM_FREE((void *)kaddr, buf_size); 5030 #endif 5031 break; 5032 5033 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5034 case CONTIG_MEM_ALLOC: 5035 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5036 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 5037 kaddr, buf_size)); 5038 contig_mem_free((void *)kaddr, buf_size); 5039 break; 5040 #endif 5041 5042 default: 5043 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5044 "<== nxge_free_buf: unsupported alloc type %d", 5045 alloc_type)); 5046 return; 5047 } 5048 5049 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5050 } 5051