1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * XXX: This is a tunable to limit the number of packets each interrupt 43 * handles. 0 (default) means that each interrupt takes as much packets 44 * as it finds. 45 */ 46 extern int nxge_max_intr_pkts; 47 48 /* 49 * Globals: tunable parameters (/etc/system or adb) 50 * 51 */ 52 extern uint32_t nxge_rbr_size; 53 extern uint32_t nxge_rcr_size; 54 extern uint32_t nxge_rbr_spare_size; 55 56 extern uint32_t nxge_mblks_pending; 57 58 /* 59 * Tunable to reduce the amount of time spent in the 60 * ISR doing Rx Processing. 61 */ 62 extern uint32_t nxge_max_rx_pkts; 63 boolean_t nxge_jumbo_enable; 64 65 extern uint16_t nxge_rcr_timeout; 66 extern uint16_t nxge_rcr_threshold; 67 68 /* 69 * Tunables to manage the receive buffer blocks. 70 * 71 * nxge_rx_threshold_hi: copy all buffers. 72 * nxge_rx_bcopy_size_type: receive buffer block size type. 73 * nxge_rx_threshold_lo: copy only up to tunable block size type. 74 */ 75 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 76 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 77 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 78 79 extern uint32_t nxge_cksum_offload; 80 81 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 82 static void nxge_unmap_rxdma(p_nxge_t, int); 83 84 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 85 86 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 87 static void nxge_rxdma_hw_stop(p_nxge_t, int); 88 89 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 90 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 91 uint32_t, 92 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 93 p_rx_mbox_t *); 94 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 95 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 96 97 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 98 uint16_t, 99 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 100 p_rx_rcr_ring_t *, p_rx_mbox_t *); 101 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 102 p_rx_rcr_ring_t, p_rx_mbox_t); 103 104 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 105 uint16_t, 106 p_nxge_dma_common_t *, 107 p_rx_rbr_ring_t *, uint32_t); 108 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 109 p_rx_rbr_ring_t); 110 111 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 112 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 113 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 114 115 static mblk_t * 116 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 117 118 static void nxge_receive_packet(p_nxge_t, 119 p_rx_rcr_ring_t, 120 p_rcr_entry_t, 121 boolean_t *, 122 mblk_t **, mblk_t **); 123 124 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 125 126 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 127 static void nxge_freeb(p_rx_msg_t); 128 static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 129 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 130 131 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 132 uint32_t, uint32_t); 133 134 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 135 p_rx_rbr_ring_t); 136 137 138 static nxge_status_t 139 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 140 141 nxge_status_t 142 nxge_rx_port_fatal_err_recover(p_nxge_t); 143 144 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 145 146 nxge_status_t 147 nxge_init_rxdma_channels(p_nxge_t nxgep) 148 { 149 nxge_grp_set_t *set = &nxgep->rx_set; 150 int i, count, channel; 151 nxge_grp_t *group; 152 dc_map_t map; 153 int dev_gindex; 154 155 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 156 157 if (!isLDOMguest(nxgep)) { 158 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 159 cmn_err(CE_NOTE, "hw_start_common"); 160 return (NXGE_ERROR); 161 } 162 } 163 164 /* 165 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 166 * We only have 8 hardware RDC tables, but we may have 167 * up to 16 logical (software-defined) groups of RDCS, 168 * if we make use of layer 3 & 4 hardware classification. 169 */ 170 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 171 if ((1 << i) & set->lg.map) { 172 group = set->group[i]; 173 dev_gindex = 174 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 175 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 176 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 177 if ((1 << channel) & map) { 178 if ((nxge_grp_dc_add(nxgep, 179 group, VP_BOUND_RX, channel))) 180 goto init_rxdma_channels_exit; 181 } 182 } 183 } 184 if (++count == set->lg.count) 185 break; 186 } 187 188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 189 return (NXGE_OK); 190 191 init_rxdma_channels_exit: 192 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 193 if ((1 << i) & set->lg.map) { 194 group = set->group[i]; 195 dev_gindex = 196 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 197 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 198 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 199 if ((1 << channel) & map) { 200 nxge_grp_dc_remove(nxgep, 201 VP_BOUND_RX, channel); 202 } 203 } 204 } 205 if (++count == set->lg.count) 206 break; 207 } 208 209 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 210 return (NXGE_ERROR); 211 } 212 213 nxge_status_t 214 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 215 { 216 nxge_status_t status; 217 218 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 219 220 status = nxge_map_rxdma(nxge, channel); 221 if (status != NXGE_OK) { 222 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 223 "<== nxge_init_rxdma: status 0x%x", status)); 224 return (status); 225 } 226 227 #if defined(sun4v) 228 if (isLDOMguest(nxge)) { 229 /* set rcr_ring */ 230 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 231 232 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 233 if (status != NXGE_OK) { 234 nxge_unmap_rxdma(nxge, channel); 235 return (status); 236 } 237 } 238 #endif 239 240 status = nxge_rxdma_hw_start(nxge, channel); 241 if (status != NXGE_OK) { 242 nxge_unmap_rxdma(nxge, channel); 243 } 244 245 if (!nxge->statsp->rdc_ksp[channel]) 246 nxge_setup_rdc_kstats(nxge, channel); 247 248 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 249 "<== nxge_init_rxdma_channel: status 0x%x", status)); 250 251 return (status); 252 } 253 254 void 255 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 256 { 257 nxge_grp_set_t *set = &nxgep->rx_set; 258 int rdc; 259 260 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 261 262 if (set->owned.map == 0) { 263 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 264 "nxge_uninit_rxdma_channels: no channels")); 265 return; 266 } 267 268 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 269 if ((1 << rdc) & set->owned.map) { 270 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 271 } 272 } 273 274 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 275 } 276 277 void 278 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 279 { 280 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 281 282 if (nxgep->statsp->rdc_ksp[channel]) { 283 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 284 nxgep->statsp->rdc_ksp[channel] = 0; 285 } 286 287 nxge_rxdma_hw_stop(nxgep, channel); 288 nxge_unmap_rxdma(nxgep, channel); 289 290 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 291 } 292 293 nxge_status_t 294 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 295 { 296 npi_handle_t handle; 297 npi_status_t rs = NPI_SUCCESS; 298 nxge_status_t status = NXGE_OK; 299 300 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 301 302 handle = NXGE_DEV_NPI_HANDLE(nxgep); 303 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 304 305 if (rs != NPI_SUCCESS) { 306 status = NXGE_ERROR | rs; 307 } 308 309 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 310 311 return (status); 312 } 313 314 void 315 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 316 { 317 nxge_grp_set_t *set = &nxgep->rx_set; 318 int rdc; 319 320 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 321 322 if (!isLDOMguest(nxgep)) { 323 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 324 (void) npi_rxdma_dump_fzc_regs(handle); 325 } 326 327 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 328 NXGE_DEBUG_MSG((nxgep, TX_CTL, 329 "nxge_rxdma_regs_dump_channels: " 330 "NULL ring pointer(s)")); 331 return; 332 } 333 334 if (set->owned.map == 0) { 335 NXGE_DEBUG_MSG((nxgep, RX_CTL, 336 "nxge_rxdma_regs_dump_channels: no channels")); 337 return; 338 } 339 340 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 341 if ((1 << rdc) & set->owned.map) { 342 rx_rbr_ring_t *ring = 343 nxgep->rx_rbr_rings->rbr_rings[rdc]; 344 if (ring) { 345 (void) nxge_dump_rxdma_channel(nxgep, rdc); 346 } 347 } 348 } 349 350 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 351 } 352 353 nxge_status_t 354 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 355 { 356 npi_handle_t handle; 357 npi_status_t rs = NPI_SUCCESS; 358 nxge_status_t status = NXGE_OK; 359 360 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 361 362 handle = NXGE_DEV_NPI_HANDLE(nxgep); 363 rs = npi_rxdma_dump_rdc_regs(handle, channel); 364 365 if (rs != NPI_SUCCESS) { 366 status = NXGE_ERROR | rs; 367 } 368 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 369 return (status); 370 } 371 372 nxge_status_t 373 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 374 p_rx_dma_ent_msk_t mask_p) 375 { 376 npi_handle_t handle; 377 npi_status_t rs = NPI_SUCCESS; 378 nxge_status_t status = NXGE_OK; 379 380 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 381 "<== nxge_init_rxdma_channel_event_mask")); 382 383 handle = NXGE_DEV_NPI_HANDLE(nxgep); 384 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 385 if (rs != NPI_SUCCESS) { 386 status = NXGE_ERROR | rs; 387 } 388 389 return (status); 390 } 391 392 nxge_status_t 393 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 394 p_rx_dma_ctl_stat_t cs_p) 395 { 396 npi_handle_t handle; 397 npi_status_t rs = NPI_SUCCESS; 398 nxge_status_t status = NXGE_OK; 399 400 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 401 "<== nxge_init_rxdma_channel_cntl_stat")); 402 403 handle = NXGE_DEV_NPI_HANDLE(nxgep); 404 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 405 406 if (rs != NPI_SUCCESS) { 407 status = NXGE_ERROR | rs; 408 } 409 410 return (status); 411 } 412 413 /* 414 * nxge_rxdma_cfg_rdcgrp_default_rdc 415 * 416 * Set the default RDC for an RDC Group (Table) 417 * 418 * Arguments: 419 * nxgep 420 * rdcgrp The group to modify 421 * rdc The new default RDC. 422 * 423 * Notes: 424 * 425 * NPI/NXGE function calls: 426 * npi_rxdma_cfg_rdc_table_default_rdc() 427 * 428 * Registers accessed: 429 * RDC_TBL_REG: FZC_ZCP + 0x10000 430 * 431 * Context: 432 * Service domain 433 */ 434 nxge_status_t 435 nxge_rxdma_cfg_rdcgrp_default_rdc( 436 p_nxge_t nxgep, 437 uint8_t rdcgrp, 438 uint8_t rdc) 439 { 440 npi_handle_t handle; 441 npi_status_t rs = NPI_SUCCESS; 442 p_nxge_dma_pt_cfg_t p_dma_cfgp; 443 p_nxge_rdc_grp_t rdc_grp_p; 444 uint8_t actual_rdcgrp, actual_rdc; 445 446 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 447 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 448 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 449 450 handle = NXGE_DEV_NPI_HANDLE(nxgep); 451 452 /* 453 * This has to be rewritten. Do we even allow this anymore? 454 */ 455 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 456 RDC_MAP_IN(rdc_grp_p->map, rdc); 457 rdc_grp_p->def_rdc = rdc; 458 459 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 460 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 461 462 rs = npi_rxdma_cfg_rdc_table_default_rdc( 463 handle, actual_rdcgrp, actual_rdc); 464 465 if (rs != NPI_SUCCESS) { 466 return (NXGE_ERROR | rs); 467 } 468 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 469 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 470 return (NXGE_OK); 471 } 472 473 nxge_status_t 474 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 475 { 476 npi_handle_t handle; 477 478 uint8_t actual_rdc; 479 npi_status_t rs = NPI_SUCCESS; 480 481 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 482 " ==> nxge_rxdma_cfg_port_default_rdc")); 483 484 handle = NXGE_DEV_NPI_HANDLE(nxgep); 485 actual_rdc = rdc; /* XXX Hack! */ 486 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 487 488 489 if (rs != NPI_SUCCESS) { 490 return (NXGE_ERROR | rs); 491 } 492 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 493 " <== nxge_rxdma_cfg_port_default_rdc")); 494 495 return (NXGE_OK); 496 } 497 498 nxge_status_t 499 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 500 uint16_t pkts) 501 { 502 npi_status_t rs = NPI_SUCCESS; 503 npi_handle_t handle; 504 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 505 " ==> nxge_rxdma_cfg_rcr_threshold")); 506 handle = NXGE_DEV_NPI_HANDLE(nxgep); 507 508 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 509 510 if (rs != NPI_SUCCESS) { 511 return (NXGE_ERROR | rs); 512 } 513 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 514 return (NXGE_OK); 515 } 516 517 nxge_status_t 518 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 519 uint16_t tout, uint8_t enable) 520 { 521 npi_status_t rs = NPI_SUCCESS; 522 npi_handle_t handle; 523 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 524 handle = NXGE_DEV_NPI_HANDLE(nxgep); 525 if (enable == 0) { 526 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 527 } else { 528 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 529 tout); 530 } 531 532 if (rs != NPI_SUCCESS) { 533 return (NXGE_ERROR | rs); 534 } 535 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 536 return (NXGE_OK); 537 } 538 539 nxge_status_t 540 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 541 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 542 { 543 npi_handle_t handle; 544 rdc_desc_cfg_t rdc_desc; 545 p_rcrcfig_b_t cfgb_p; 546 npi_status_t rs = NPI_SUCCESS; 547 548 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 549 handle = NXGE_DEV_NPI_HANDLE(nxgep); 550 /* 551 * Use configuration data composed at init time. 552 * Write to hardware the receive ring configurations. 553 */ 554 rdc_desc.mbox_enable = 1; 555 rdc_desc.mbox_addr = mbox_p->mbox_addr; 556 NXGE_DEBUG_MSG((nxgep, RX_CTL, 557 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 558 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 559 560 rdc_desc.rbr_len = rbr_p->rbb_max; 561 rdc_desc.rbr_addr = rbr_p->rbr_addr; 562 563 switch (nxgep->rx_bksize_code) { 564 case RBR_BKSIZE_4K: 565 rdc_desc.page_size = SIZE_4KB; 566 break; 567 case RBR_BKSIZE_8K: 568 rdc_desc.page_size = SIZE_8KB; 569 break; 570 case RBR_BKSIZE_16K: 571 rdc_desc.page_size = SIZE_16KB; 572 break; 573 case RBR_BKSIZE_32K: 574 rdc_desc.page_size = SIZE_32KB; 575 break; 576 } 577 578 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 579 rdc_desc.valid0 = 1; 580 581 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 582 rdc_desc.valid1 = 1; 583 584 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 585 rdc_desc.valid2 = 1; 586 587 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 588 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 589 590 rdc_desc.rcr_len = rcr_p->comp_size; 591 rdc_desc.rcr_addr = rcr_p->rcr_addr; 592 593 cfgb_p = &(rcr_p->rcr_cfgb); 594 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 595 /* For now, disable this timeout in a guest domain. */ 596 if (isLDOMguest(nxgep)) { 597 rdc_desc.rcr_timeout = 0; 598 rdc_desc.rcr_timeout_enable = 0; 599 } else { 600 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 601 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 602 } 603 604 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 605 "rbr_len qlen %d pagesize code %d rcr_len %d", 606 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 607 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 608 "size 0 %d size 1 %d size 2 %d", 609 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 610 rbr_p->npi_pkt_buf_size2)); 611 612 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 613 if (rs != NPI_SUCCESS) { 614 return (NXGE_ERROR | rs); 615 } 616 617 /* 618 * Enable the timeout and threshold. 619 */ 620 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 621 rdc_desc.rcr_threshold); 622 if (rs != NPI_SUCCESS) { 623 return (NXGE_ERROR | rs); 624 } 625 626 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 627 rdc_desc.rcr_timeout); 628 if (rs != NPI_SUCCESS) { 629 return (NXGE_ERROR | rs); 630 } 631 632 /* Enable the DMA */ 633 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 634 if (rs != NPI_SUCCESS) { 635 return (NXGE_ERROR | rs); 636 } 637 638 /* Kick the DMA engine. */ 639 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 640 /* Clear the rbr empty bit */ 641 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 642 643 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 644 645 return (NXGE_OK); 646 } 647 648 nxge_status_t 649 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 650 { 651 npi_handle_t handle; 652 npi_status_t rs = NPI_SUCCESS; 653 654 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 655 handle = NXGE_DEV_NPI_HANDLE(nxgep); 656 657 /* disable the DMA */ 658 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 659 if (rs != NPI_SUCCESS) { 660 NXGE_DEBUG_MSG((nxgep, RX_CTL, 661 "<== nxge_disable_rxdma_channel:failed (0x%x)", 662 rs)); 663 return (NXGE_ERROR | rs); 664 } 665 666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 667 return (NXGE_OK); 668 } 669 670 nxge_status_t 671 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 672 { 673 npi_handle_t handle; 674 nxge_status_t status = NXGE_OK; 675 676 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 677 "<== nxge_init_rxdma_channel_rcrflush")); 678 679 handle = NXGE_DEV_NPI_HANDLE(nxgep); 680 npi_rxdma_rdc_rcr_flush(handle, channel); 681 682 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 683 "<== nxge_init_rxdma_channel_rcrflsh")); 684 return (status); 685 686 } 687 688 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 689 690 #define TO_LEFT -1 691 #define TO_RIGHT 1 692 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 693 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 694 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 695 #define NO_HINT 0xffffffff 696 697 /*ARGSUSED*/ 698 nxge_status_t 699 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 700 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 701 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 702 { 703 int bufsize; 704 uint64_t pktbuf_pp; 705 uint64_t dvma_addr; 706 rxring_info_t *ring_info; 707 int base_side, end_side; 708 int r_index, l_index, anchor_index; 709 int found, search_done; 710 uint32_t offset, chunk_size, block_size, page_size_mask; 711 uint32_t chunk_index, block_index, total_index; 712 int max_iterations, iteration; 713 rxbuf_index_info_t *bufinfo; 714 715 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 716 717 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 718 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 719 pkt_buf_addr_pp, 720 pktbufsz_type)); 721 #if defined(__i386) 722 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 723 #else 724 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 725 #endif 726 727 switch (pktbufsz_type) { 728 case 0: 729 bufsize = rbr_p->pkt_buf_size0; 730 break; 731 case 1: 732 bufsize = rbr_p->pkt_buf_size1; 733 break; 734 case 2: 735 bufsize = rbr_p->pkt_buf_size2; 736 break; 737 case RCR_SINGLE_BLOCK: 738 bufsize = 0; 739 anchor_index = 0; 740 break; 741 default: 742 return (NXGE_ERROR); 743 } 744 745 if (rbr_p->num_blocks == 1) { 746 anchor_index = 0; 747 ring_info = rbr_p->ring_info; 748 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 749 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 750 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 751 "buf_pp $%p btype %d anchor_index %d " 752 "bufinfo $%p", 753 pkt_buf_addr_pp, 754 pktbufsz_type, 755 anchor_index, 756 bufinfo)); 757 758 goto found_index; 759 } 760 761 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 762 "==> nxge_rxbuf_pp_to_vp: " 763 "buf_pp $%p btype %d anchor_index %d", 764 pkt_buf_addr_pp, 765 pktbufsz_type, 766 anchor_index)); 767 768 ring_info = rbr_p->ring_info; 769 found = B_FALSE; 770 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 771 iteration = 0; 772 max_iterations = ring_info->max_iterations; 773 /* 774 * First check if this block has been seen 775 * recently. This is indicated by a hint which 776 * is initialized when the first buffer of the block 777 * is seen. The hint is reset when the last buffer of 778 * the block has been processed. 779 * As three block sizes are supported, three hints 780 * are kept. The idea behind the hints is that once 781 * the hardware uses a block for a buffer of that 782 * size, it will use it exclusively for that size 783 * and will use it until it is exhausted. It is assumed 784 * that there would a single block being used for the same 785 * buffer sizes at any given time. 786 */ 787 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 788 anchor_index = ring_info->hint[pktbufsz_type]; 789 dvma_addr = bufinfo[anchor_index].dvma_addr; 790 chunk_size = bufinfo[anchor_index].buf_size; 791 if ((pktbuf_pp >= dvma_addr) && 792 (pktbuf_pp < (dvma_addr + chunk_size))) { 793 found = B_TRUE; 794 /* 795 * check if this is the last buffer in the block 796 * If so, then reset the hint for the size; 797 */ 798 799 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 800 ring_info->hint[pktbufsz_type] = NO_HINT; 801 } 802 } 803 804 if (found == B_FALSE) { 805 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 806 "==> nxge_rxbuf_pp_to_vp: (!found)" 807 "buf_pp $%p btype %d anchor_index %d", 808 pkt_buf_addr_pp, 809 pktbufsz_type, 810 anchor_index)); 811 812 /* 813 * This is the first buffer of the block of this 814 * size. Need to search the whole information 815 * array. 816 * the search algorithm uses a binary tree search 817 * algorithm. It assumes that the information is 818 * already sorted with increasing order 819 * info[0] < info[1] < info[2] .... < info[n-1] 820 * where n is the size of the information array 821 */ 822 r_index = rbr_p->num_blocks - 1; 823 l_index = 0; 824 search_done = B_FALSE; 825 anchor_index = MID_INDEX(r_index, l_index); 826 while (search_done == B_FALSE) { 827 if ((r_index == l_index) || 828 (iteration >= max_iterations)) 829 search_done = B_TRUE; 830 end_side = TO_RIGHT; /* to the right */ 831 base_side = TO_LEFT; /* to the left */ 832 /* read the DVMA address information and sort it */ 833 dvma_addr = bufinfo[anchor_index].dvma_addr; 834 chunk_size = bufinfo[anchor_index].buf_size; 835 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 836 "==> nxge_rxbuf_pp_to_vp: (searching)" 837 "buf_pp $%p btype %d " 838 "anchor_index %d chunk_size %d dvmaaddr $%p", 839 pkt_buf_addr_pp, 840 pktbufsz_type, 841 anchor_index, 842 chunk_size, 843 dvma_addr)); 844 845 if (pktbuf_pp >= dvma_addr) 846 base_side = TO_RIGHT; /* to the right */ 847 if (pktbuf_pp < (dvma_addr + chunk_size)) 848 end_side = TO_LEFT; /* to the left */ 849 850 switch (base_side + end_side) { 851 case IN_MIDDLE: 852 /* found */ 853 found = B_TRUE; 854 search_done = B_TRUE; 855 if ((pktbuf_pp + bufsize) < 856 (dvma_addr + chunk_size)) 857 ring_info->hint[pktbufsz_type] = 858 bufinfo[anchor_index].buf_index; 859 break; 860 case BOTH_RIGHT: 861 /* not found: go to the right */ 862 l_index = anchor_index + 1; 863 anchor_index = MID_INDEX(r_index, l_index); 864 break; 865 866 case BOTH_LEFT: 867 /* not found: go to the left */ 868 r_index = anchor_index - 1; 869 anchor_index = MID_INDEX(r_index, l_index); 870 break; 871 default: /* should not come here */ 872 return (NXGE_ERROR); 873 } 874 iteration++; 875 } 876 877 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 878 "==> nxge_rxbuf_pp_to_vp: (search done)" 879 "buf_pp $%p btype %d anchor_index %d", 880 pkt_buf_addr_pp, 881 pktbufsz_type, 882 anchor_index)); 883 } 884 885 if (found == B_FALSE) { 886 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 887 "==> nxge_rxbuf_pp_to_vp: (search failed)" 888 "buf_pp $%p btype %d anchor_index %d", 889 pkt_buf_addr_pp, 890 pktbufsz_type, 891 anchor_index)); 892 return (NXGE_ERROR); 893 } 894 895 found_index: 896 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 897 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 898 "buf_pp $%p btype %d bufsize %d anchor_index %d", 899 pkt_buf_addr_pp, 900 pktbufsz_type, 901 bufsize, 902 anchor_index)); 903 904 /* index of the first block in this chunk */ 905 chunk_index = bufinfo[anchor_index].start_index; 906 dvma_addr = bufinfo[anchor_index].dvma_addr; 907 page_size_mask = ring_info->block_size_mask; 908 909 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 910 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 911 "buf_pp $%p btype %d bufsize %d " 912 "anchor_index %d chunk_index %d dvma $%p", 913 pkt_buf_addr_pp, 914 pktbufsz_type, 915 bufsize, 916 anchor_index, 917 chunk_index, 918 dvma_addr)); 919 920 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 921 block_size = rbr_p->block_size; /* System block(page) size */ 922 923 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 924 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 925 "buf_pp $%p btype %d bufsize %d " 926 "anchor_index %d chunk_index %d dvma $%p " 927 "offset %d block_size %d", 928 pkt_buf_addr_pp, 929 pktbufsz_type, 930 bufsize, 931 anchor_index, 932 chunk_index, 933 dvma_addr, 934 offset, 935 block_size)); 936 937 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 938 939 block_index = (offset / block_size); /* index within chunk */ 940 total_index = chunk_index + block_index; 941 942 943 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 944 "==> nxge_rxbuf_pp_to_vp: " 945 "total_index %d dvma_addr $%p " 946 "offset %d block_size %d " 947 "block_index %d ", 948 total_index, dvma_addr, 949 offset, block_size, 950 block_index)); 951 #if defined(__i386) 952 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 953 (uint32_t)offset); 954 #else 955 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 956 (uint64_t)offset); 957 #endif 958 959 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 960 "==> nxge_rxbuf_pp_to_vp: " 961 "total_index %d dvma_addr $%p " 962 "offset %d block_size %d " 963 "block_index %d " 964 "*pkt_buf_addr_p $%p", 965 total_index, dvma_addr, 966 offset, block_size, 967 block_index, 968 *pkt_buf_addr_p)); 969 970 971 *msg_index = total_index; 972 *bufoffset = (offset & page_size_mask); 973 974 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 975 "==> nxge_rxbuf_pp_to_vp: get msg index: " 976 "msg_index %d bufoffset_index %d", 977 *msg_index, 978 *bufoffset)); 979 980 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 981 982 return (NXGE_OK); 983 } 984 985 /* 986 * used by quick sort (qsort) function 987 * to perform comparison 988 */ 989 static int 990 nxge_sort_compare(const void *p1, const void *p2) 991 { 992 993 rxbuf_index_info_t *a, *b; 994 995 a = (rxbuf_index_info_t *)p1; 996 b = (rxbuf_index_info_t *)p2; 997 998 if (a->dvma_addr > b->dvma_addr) 999 return (1); 1000 if (a->dvma_addr < b->dvma_addr) 1001 return (-1); 1002 return (0); 1003 } 1004 1005 1006 1007 /* 1008 * grabbed this sort implementation from common/syscall/avl.c 1009 * 1010 */ 1011 /* 1012 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 1013 * v = Ptr to array/vector of objs 1014 * n = # objs in the array 1015 * s = size of each obj (must be multiples of a word size) 1016 * f = ptr to function to compare two objs 1017 * returns (-1 = less than, 0 = equal, 1 = greater than 1018 */ 1019 void 1020 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1021 { 1022 int g, i, j, ii; 1023 unsigned int *p1, *p2; 1024 unsigned int tmp; 1025 1026 /* No work to do */ 1027 if (v == NULL || n <= 1) 1028 return; 1029 /* Sanity check on arguments */ 1030 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1031 ASSERT(s > 0); 1032 1033 for (g = n / 2; g > 0; g /= 2) { 1034 for (i = g; i < n; i++) { 1035 for (j = i - g; j >= 0 && 1036 (*f)(v + j * s, v + (j + g) * s) == 1; 1037 j -= g) { 1038 p1 = (unsigned *)(v + j * s); 1039 p2 = (unsigned *)(v + (j + g) * s); 1040 for (ii = 0; ii < s / 4; ii++) { 1041 tmp = *p1; 1042 *p1++ = *p2; 1043 *p2++ = tmp; 1044 } 1045 } 1046 } 1047 } 1048 } 1049 1050 /* 1051 * Initialize data structures required for rxdma 1052 * buffer dvma->vmem address lookup 1053 */ 1054 /*ARGSUSED*/ 1055 static nxge_status_t 1056 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1057 { 1058 1059 int index; 1060 rxring_info_t *ring_info; 1061 int max_iteration = 0, max_index = 0; 1062 1063 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1064 1065 ring_info = rbrp->ring_info; 1066 ring_info->hint[0] = NO_HINT; 1067 ring_info->hint[1] = NO_HINT; 1068 ring_info->hint[2] = NO_HINT; 1069 max_index = rbrp->num_blocks; 1070 1071 /* read the DVMA address information and sort it */ 1072 /* do init of the information array */ 1073 1074 1075 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1076 " nxge_rxbuf_index_info_init Sort ptrs")); 1077 1078 /* sort the array */ 1079 nxge_ksort((void *)ring_info->buffer, max_index, 1080 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1081 1082 1083 1084 for (index = 0; index < max_index; index++) { 1085 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1086 " nxge_rxbuf_index_info_init: sorted chunk %d " 1087 " ioaddr $%p kaddr $%p size %x", 1088 index, ring_info->buffer[index].dvma_addr, 1089 ring_info->buffer[index].kaddr, 1090 ring_info->buffer[index].buf_size)); 1091 } 1092 1093 max_iteration = 0; 1094 while (max_index >= (1ULL << max_iteration)) 1095 max_iteration++; 1096 ring_info->max_iterations = max_iteration + 1; 1097 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1098 " nxge_rxbuf_index_info_init Find max iter %d", 1099 ring_info->max_iterations)); 1100 1101 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1102 return (NXGE_OK); 1103 } 1104 1105 /* ARGSUSED */ 1106 void 1107 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1108 { 1109 #ifdef NXGE_DEBUG 1110 1111 uint32_t bptr; 1112 uint64_t pp; 1113 1114 bptr = entry_p->bits.hdw.pkt_buf_addr; 1115 1116 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1117 "\trcr entry $%p " 1118 "\trcr entry 0x%0llx " 1119 "\trcr entry 0x%08x " 1120 "\trcr entry 0x%08x " 1121 "\tvalue 0x%0llx\n" 1122 "\tmulti = %d\n" 1123 "\tpkt_type = 0x%x\n" 1124 "\tzero_copy = %d\n" 1125 "\tnoport = %d\n" 1126 "\tpromis = %d\n" 1127 "\terror = 0x%04x\n" 1128 "\tdcf_err = 0x%01x\n" 1129 "\tl2_len = %d\n" 1130 "\tpktbufsize = %d\n" 1131 "\tpkt_buf_addr = $%p\n" 1132 "\tpkt_buf_addr (<< 6) = $%p\n", 1133 entry_p, 1134 *(int64_t *)entry_p, 1135 *(int32_t *)entry_p, 1136 *(int32_t *)((char *)entry_p + 32), 1137 entry_p->value, 1138 entry_p->bits.hdw.multi, 1139 entry_p->bits.hdw.pkt_type, 1140 entry_p->bits.hdw.zero_copy, 1141 entry_p->bits.hdw.noport, 1142 entry_p->bits.hdw.promis, 1143 entry_p->bits.hdw.error, 1144 entry_p->bits.hdw.dcf_err, 1145 entry_p->bits.hdw.l2_len, 1146 entry_p->bits.hdw.pktbufsz, 1147 bptr, 1148 entry_p->bits.ldw.pkt_buf_addr)); 1149 1150 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1151 RCR_PKT_BUF_ADDR_SHIFT; 1152 1153 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1154 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1155 #endif 1156 } 1157 1158 void 1159 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1160 { 1161 npi_handle_t handle; 1162 rbr_stat_t rbr_stat; 1163 addr44_t hd_addr; 1164 addr44_t tail_addr; 1165 uint16_t qlen; 1166 1167 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1168 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1169 1170 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1171 1172 /* RBR head */ 1173 hd_addr.addr = 0; 1174 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1175 #if defined(__i386) 1176 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1177 (void *)(uint32_t)hd_addr.addr); 1178 #else 1179 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1180 (void *)hd_addr.addr); 1181 #endif 1182 1183 /* RBR stats */ 1184 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1185 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1186 1187 /* RCR tail */ 1188 tail_addr.addr = 0; 1189 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1190 #if defined(__i386) 1191 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1192 (void *)(uint32_t)tail_addr.addr); 1193 #else 1194 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1195 (void *)tail_addr.addr); 1196 #endif 1197 1198 /* RCR qlen */ 1199 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1200 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1201 1202 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1203 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1204 } 1205 1206 nxge_status_t 1207 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1208 { 1209 nxge_grp_set_t *set = &nxgep->rx_set; 1210 nxge_status_t status; 1211 npi_status_t rs; 1212 int rdc; 1213 1214 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1215 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1216 1217 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1219 "<== nxge_rxdma_mode: not initialized")); 1220 return (NXGE_ERROR); 1221 } 1222 1223 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1224 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1225 "<== nxge_tx_port_fatal_err_recover: " 1226 "NULL ring pointer(s)")); 1227 return (NXGE_ERROR); 1228 } 1229 1230 if (set->owned.map == 0) { 1231 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1232 "nxge_rxdma_regs_dump_channels: no channels")); 1233 return (NULL); 1234 } 1235 1236 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1237 if ((1 << rdc) & set->owned.map) { 1238 rx_rbr_ring_t *ring = 1239 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1240 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1241 if (ring) { 1242 if (enable) { 1243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1244 "==> nxge_rxdma_hw_mode: " 1245 "channel %d (enable)", rdc)); 1246 rs = npi_rxdma_cfg_rdc_enable 1247 (handle, rdc); 1248 } else { 1249 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1250 "==> nxge_rxdma_hw_mode: " 1251 "channel %d disable)", rdc)); 1252 rs = npi_rxdma_cfg_rdc_disable 1253 (handle, rdc); 1254 } 1255 } 1256 } 1257 } 1258 1259 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1260 1261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1262 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1263 1264 return (status); 1265 } 1266 1267 void 1268 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1269 { 1270 npi_handle_t handle; 1271 1272 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1273 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1274 1275 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1276 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1277 1278 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1279 } 1280 1281 void 1282 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1283 { 1284 npi_handle_t handle; 1285 1286 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1287 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1288 1289 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1290 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1291 1292 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1293 } 1294 1295 void 1296 nxge_hw_start_rx(p_nxge_t nxgep) 1297 { 1298 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1299 1300 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1301 (void) nxge_rx_mac_enable(nxgep); 1302 1303 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1304 } 1305 1306 /*ARGSUSED*/ 1307 void 1308 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1309 { 1310 nxge_grp_set_t *set = &nxgep->rx_set; 1311 int rdc; 1312 1313 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1314 1315 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1316 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1317 "<== nxge_tx_port_fatal_err_recover: " 1318 "NULL ring pointer(s)")); 1319 return; 1320 } 1321 1322 if (set->owned.map == 0) { 1323 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1324 "nxge_rxdma_regs_dump_channels: no channels")); 1325 return; 1326 } 1327 1328 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1329 if ((1 << rdc) & set->owned.map) { 1330 rx_rbr_ring_t *ring = 1331 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1332 if (ring) { 1333 nxge_rxdma_hw_stop(nxgep, rdc); 1334 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1335 "==> nxge_fixup_rxdma_rings: " 1336 "channel %d ring $%px", 1337 rdc, ring)); 1338 (void) nxge_rxdma_fixup_channel 1339 (nxgep, rdc, rdc); 1340 } 1341 } 1342 } 1343 1344 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1345 } 1346 1347 void 1348 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1349 { 1350 int i; 1351 1352 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1353 i = nxge_rxdma_get_ring_index(nxgep, channel); 1354 if (i < 0) { 1355 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1356 "<== nxge_rxdma_fix_channel: no entry found")); 1357 return; 1358 } 1359 1360 nxge_rxdma_fixup_channel(nxgep, channel, i); 1361 1362 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1363 } 1364 1365 void 1366 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1367 { 1368 int ndmas; 1369 p_rx_rbr_rings_t rx_rbr_rings; 1370 p_rx_rbr_ring_t *rbr_rings; 1371 p_rx_rcr_rings_t rx_rcr_rings; 1372 p_rx_rcr_ring_t *rcr_rings; 1373 p_rx_mbox_areas_t rx_mbox_areas_p; 1374 p_rx_mbox_t *rx_mbox_p; 1375 p_nxge_dma_pool_t dma_buf_poolp; 1376 p_nxge_dma_pool_t dma_cntl_poolp; 1377 p_rx_rbr_ring_t rbrp; 1378 p_rx_rcr_ring_t rcrp; 1379 p_rx_mbox_t mboxp; 1380 p_nxge_dma_common_t dmap; 1381 nxge_status_t status = NXGE_OK; 1382 1383 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1384 1385 (void) nxge_rxdma_stop_channel(nxgep, channel); 1386 1387 dma_buf_poolp = nxgep->rx_buf_pool_p; 1388 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1389 1390 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1391 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1392 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1393 return; 1394 } 1395 1396 ndmas = dma_buf_poolp->ndmas; 1397 if (!ndmas) { 1398 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1399 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1400 return; 1401 } 1402 1403 rx_rbr_rings = nxgep->rx_rbr_rings; 1404 rx_rcr_rings = nxgep->rx_rcr_rings; 1405 rbr_rings = rx_rbr_rings->rbr_rings; 1406 rcr_rings = rx_rcr_rings->rcr_rings; 1407 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1408 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1409 1410 /* Reinitialize the receive block and completion rings */ 1411 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1412 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1413 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1414 1415 1416 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1417 rbrp->rbr_rd_index = 0; 1418 rcrp->comp_rd_index = 0; 1419 rcrp->comp_wt_index = 0; 1420 1421 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1422 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1423 1424 status = nxge_rxdma_start_channel(nxgep, channel, 1425 rbrp, rcrp, mboxp); 1426 if (status != NXGE_OK) { 1427 goto nxge_rxdma_fixup_channel_fail; 1428 } 1429 if (status != NXGE_OK) { 1430 goto nxge_rxdma_fixup_channel_fail; 1431 } 1432 1433 nxge_rxdma_fixup_channel_fail: 1434 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1435 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1436 1437 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1438 } 1439 1440 /* 1441 * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 1442 * map <channel> to an index into nxgep->rx_rbr_rings. 1443 * (device ring index -> port ring index) 1444 */ 1445 int 1446 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1447 { 1448 int i, ndmas; 1449 uint16_t rdc; 1450 p_rx_rbr_rings_t rx_rbr_rings; 1451 p_rx_rbr_ring_t *rbr_rings; 1452 1453 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1454 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1455 1456 rx_rbr_rings = nxgep->rx_rbr_rings; 1457 if (rx_rbr_rings == NULL) { 1458 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1459 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1460 return (-1); 1461 } 1462 ndmas = rx_rbr_rings->ndmas; 1463 if (!ndmas) { 1464 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1465 "<== nxge_rxdma_get_ring_index: no channel")); 1466 return (-1); 1467 } 1468 1469 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1470 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1471 1472 rbr_rings = rx_rbr_rings->rbr_rings; 1473 for (i = 0; i < ndmas; i++) { 1474 rdc = rbr_rings[i]->rdc; 1475 if (channel == rdc) { 1476 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1477 "==> nxge_rxdma_get_rbr_ring: channel %d " 1478 "(index %d) ring %d", channel, i, rbr_rings[i])); 1479 return (i); 1480 } 1481 } 1482 1483 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1484 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1485 1486 return (-1); 1487 } 1488 1489 p_rx_rbr_ring_t 1490 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1491 { 1492 nxge_grp_set_t *set = &nxgep->rx_set; 1493 nxge_channel_t rdc; 1494 1495 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1496 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1497 1498 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1499 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1500 "<== nxge_rxdma_get_rbr_ring: " 1501 "NULL ring pointer(s)")); 1502 return (NULL); 1503 } 1504 1505 if (set->owned.map == 0) { 1506 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1507 "<== nxge_rxdma_get_rbr_ring: no channels")); 1508 return (NULL); 1509 } 1510 1511 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1512 if ((1 << rdc) & set->owned.map) { 1513 rx_rbr_ring_t *ring = 1514 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1515 if (ring) { 1516 if (channel == ring->rdc) { 1517 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1518 "==> nxge_rxdma_get_rbr_ring: " 1519 "channel %d ring $%p", rdc, ring)); 1520 return (ring); 1521 } 1522 } 1523 } 1524 } 1525 1526 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1527 "<== nxge_rxdma_get_rbr_ring: not found")); 1528 1529 return (NULL); 1530 } 1531 1532 p_rx_rcr_ring_t 1533 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1534 { 1535 nxge_grp_set_t *set = &nxgep->rx_set; 1536 nxge_channel_t rdc; 1537 1538 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1539 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1540 1541 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1542 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1543 "<== nxge_rxdma_get_rcr_ring: " 1544 "NULL ring pointer(s)")); 1545 return (NULL); 1546 } 1547 1548 if (set->owned.map == 0) { 1549 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1550 "<== nxge_rxdma_get_rbr_ring: no channels")); 1551 return (NULL); 1552 } 1553 1554 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1555 if ((1 << rdc) & set->owned.map) { 1556 rx_rcr_ring_t *ring = 1557 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1558 if (ring) { 1559 if (channel == ring->rdc) { 1560 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1561 "==> nxge_rxdma_get_rcr_ring: " 1562 "channel %d ring $%p", rdc, ring)); 1563 return (ring); 1564 } 1565 } 1566 } 1567 } 1568 1569 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1570 "<== nxge_rxdma_get_rcr_ring: not found")); 1571 1572 return (NULL); 1573 } 1574 1575 /* 1576 * Static functions start here. 1577 */ 1578 static p_rx_msg_t 1579 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1580 { 1581 p_rx_msg_t nxge_mp = NULL; 1582 p_nxge_dma_common_t dmamsg_p; 1583 uchar_t *buffer; 1584 1585 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1586 if (nxge_mp == NULL) { 1587 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1588 "Allocation of a rx msg failed.")); 1589 goto nxge_allocb_exit; 1590 } 1591 1592 nxge_mp->use_buf_pool = B_FALSE; 1593 if (dmabuf_p) { 1594 nxge_mp->use_buf_pool = B_TRUE; 1595 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1596 *dmamsg_p = *dmabuf_p; 1597 dmamsg_p->nblocks = 1; 1598 dmamsg_p->block_size = size; 1599 dmamsg_p->alength = size; 1600 buffer = (uchar_t *)dmabuf_p->kaddrp; 1601 1602 dmabuf_p->kaddrp = (void *) 1603 ((char *)dmabuf_p->kaddrp + size); 1604 dmabuf_p->ioaddr_pp = (void *) 1605 ((char *)dmabuf_p->ioaddr_pp + size); 1606 dmabuf_p->alength -= size; 1607 dmabuf_p->offset += size; 1608 dmabuf_p->dma_cookie.dmac_laddress += size; 1609 dmabuf_p->dma_cookie.dmac_size -= size; 1610 1611 } else { 1612 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1613 if (buffer == NULL) { 1614 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1615 "Allocation of a receive page failed.")); 1616 goto nxge_allocb_fail1; 1617 } 1618 } 1619 1620 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1621 if (nxge_mp->rx_mblk_p == NULL) { 1622 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1623 goto nxge_allocb_fail2; 1624 } 1625 1626 nxge_mp->buffer = buffer; 1627 nxge_mp->block_size = size; 1628 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1629 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1630 nxge_mp->ref_cnt = 1; 1631 nxge_mp->free = B_TRUE; 1632 nxge_mp->rx_use_bcopy = B_FALSE; 1633 1634 atomic_inc_32(&nxge_mblks_pending); 1635 1636 goto nxge_allocb_exit; 1637 1638 nxge_allocb_fail2: 1639 if (!nxge_mp->use_buf_pool) { 1640 KMEM_FREE(buffer, size); 1641 } 1642 1643 nxge_allocb_fail1: 1644 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1645 nxge_mp = NULL; 1646 1647 nxge_allocb_exit: 1648 return (nxge_mp); 1649 } 1650 1651 p_mblk_t 1652 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1653 { 1654 p_mblk_t mp; 1655 1656 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1657 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1658 "offset = 0x%08X " 1659 "size = 0x%08X", 1660 nxge_mp, offset, size)); 1661 1662 mp = desballoc(&nxge_mp->buffer[offset], size, 1663 0, &nxge_mp->freeb); 1664 if (mp == NULL) { 1665 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1666 goto nxge_dupb_exit; 1667 } 1668 atomic_inc_32(&nxge_mp->ref_cnt); 1669 1670 1671 nxge_dupb_exit: 1672 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1673 nxge_mp)); 1674 return (mp); 1675 } 1676 1677 p_mblk_t 1678 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1679 { 1680 p_mblk_t mp; 1681 uchar_t *dp; 1682 1683 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1684 if (mp == NULL) { 1685 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1686 goto nxge_dupb_bcopy_exit; 1687 } 1688 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1689 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1690 mp->b_wptr = dp + size; 1691 1692 nxge_dupb_bcopy_exit: 1693 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1694 nxge_mp)); 1695 return (mp); 1696 } 1697 1698 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1699 p_rx_msg_t rx_msg_p); 1700 1701 void 1702 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1703 { 1704 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1705 1706 /* Reuse this buffer */ 1707 rx_msg_p->free = B_FALSE; 1708 rx_msg_p->cur_usage_cnt = 0; 1709 rx_msg_p->max_usage_cnt = 0; 1710 rx_msg_p->pkt_buf_size = 0; 1711 1712 if (rx_rbr_p->rbr_use_bcopy) { 1713 rx_msg_p->rx_use_bcopy = B_FALSE; 1714 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1715 } 1716 1717 /* 1718 * Get the rbr header pointer and its offset index. 1719 */ 1720 MUTEX_ENTER(&rx_rbr_p->post_lock); 1721 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1722 rx_rbr_p->rbr_wrap_mask); 1723 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1724 MUTEX_EXIT(&rx_rbr_p->post_lock); 1725 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1726 rx_rbr_p->rdc, 1); 1727 1728 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1729 "<== nxge_post_page (channel %d post_next_index %d)", 1730 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1731 1732 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1733 } 1734 1735 void 1736 nxge_freeb(p_rx_msg_t rx_msg_p) 1737 { 1738 size_t size; 1739 uchar_t *buffer = NULL; 1740 int ref_cnt; 1741 boolean_t free_state = B_FALSE; 1742 1743 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1744 1745 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1746 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1747 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1748 rx_msg_p, nxge_mblks_pending)); 1749 1750 /* 1751 * First we need to get the free state, then 1752 * atomic decrement the reference count to prevent 1753 * the race condition with the interrupt thread that 1754 * is processing a loaned up buffer block. 1755 */ 1756 free_state = rx_msg_p->free; 1757 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1758 if (!ref_cnt) { 1759 atomic_dec_32(&nxge_mblks_pending); 1760 buffer = rx_msg_p->buffer; 1761 size = rx_msg_p->block_size; 1762 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1763 "will free: rx_msg_p = $%p (block pending %d)", 1764 rx_msg_p, nxge_mblks_pending)); 1765 1766 if (!rx_msg_p->use_buf_pool) { 1767 KMEM_FREE(buffer, size); 1768 } 1769 1770 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1771 1772 if (ring) { 1773 /* 1774 * Decrement the receive buffer ring's reference 1775 * count, too. 1776 */ 1777 atomic_dec_32(&ring->rbr_ref_cnt); 1778 1779 /* 1780 * Free the receive buffer ring, if 1781 * 1. all the receive buffers have been freed 1782 * 2. and we are in the proper state (that is, 1783 * we are not UNMAPPING). 1784 */ 1785 if (ring->rbr_ref_cnt == 0 && 1786 ring->rbr_state == RBR_UNMAPPED) { 1787 /* 1788 * Free receive data buffers, 1789 * buffer index information 1790 * (rxring_info) and 1791 * the message block ring. 1792 */ 1793 NXGE_DEBUG_MSG((NULL, RX_CTL, 1794 "nxge_freeb:rx_msg_p = $%p " 1795 "(block pending %d) free buffers", 1796 rx_msg_p, nxge_mblks_pending)); 1797 nxge_rxdma_databuf_free(ring); 1798 if (ring->ring_info) { 1799 KMEM_FREE(ring->ring_info, 1800 sizeof (rxring_info_t)); 1801 } 1802 1803 if (ring->rx_msg_ring) { 1804 KMEM_FREE(ring->rx_msg_ring, 1805 ring->tnblocks * 1806 sizeof (p_rx_msg_t)); 1807 } 1808 KMEM_FREE(ring, sizeof (*ring)); 1809 } 1810 } 1811 return; 1812 } 1813 1814 /* 1815 * Repost buffer. 1816 */ 1817 if (free_state && (ref_cnt == 1) && ring) { 1818 NXGE_DEBUG_MSG((NULL, RX_CTL, 1819 "nxge_freeb: post page $%p:", rx_msg_p)); 1820 if (ring->rbr_state == RBR_POSTING) 1821 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1822 } 1823 1824 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1825 } 1826 1827 uint_t 1828 nxge_rx_intr(void *arg1, void *arg2) 1829 { 1830 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1831 p_nxge_t nxgep = (p_nxge_t)arg2; 1832 p_nxge_ldg_t ldgp; 1833 uint8_t channel; 1834 npi_handle_t handle; 1835 rx_dma_ctl_stat_t cs; 1836 p_rx_rcr_ring_t rcr_ring; 1837 mblk_t *mp; 1838 1839 #ifdef NXGE_DEBUG 1840 rxdma_cfig1_t cfg; 1841 #endif 1842 1843 if (ldvp == NULL) { 1844 NXGE_DEBUG_MSG((NULL, INT_CTL, 1845 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1846 nxgep, ldvp)); 1847 1848 return (DDI_INTR_CLAIMED); 1849 } 1850 1851 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1852 nxgep = ldvp->nxgep; 1853 } 1854 1855 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1856 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1857 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1858 "<== nxge_rx_intr: interface not started or intialized")); 1859 return (DDI_INTR_CLAIMED); 1860 } 1861 1862 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1863 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1864 nxgep, ldvp)); 1865 1866 /* 1867 * This interrupt handler is for a specific 1868 * receive dma channel. 1869 */ 1870 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1871 1872 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1873 1874 /* 1875 * The RCR ring lock must be held when packets 1876 * are being processed and the hardware registers are 1877 * being read or written to prevent race condition 1878 * among the interrupt thread, the polling thread 1879 * (will cause fatal errors such as rcrincon bit set) 1880 * and the setting of the poll_flag. 1881 */ 1882 MUTEX_ENTER(&rcr_ring->lock); 1883 1884 /* 1885 * Get the control and status for this channel. 1886 */ 1887 channel = ldvp->channel; 1888 ldgp = ldvp->ldgp; 1889 1890 if (!isLDOMguest(nxgep)) { 1891 if (!nxgep->rx_channel_started[channel]) { 1892 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1893 "<== nxge_rx_intr: channel is not started")); 1894 MUTEX_EXIT(&rcr_ring->lock); 1895 return (DDI_INTR_CLAIMED); 1896 } 1897 } 1898 1899 ASSERT(rcr_ring->ldgp == ldgp); 1900 ASSERT(rcr_ring->ldvp == ldvp); 1901 1902 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1903 1904 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1905 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1906 channel, 1907 cs.value, 1908 cs.bits.hdw.rcrto, 1909 cs.bits.hdw.rcrthres)); 1910 1911 mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1912 1913 /* error events. */ 1914 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1915 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1916 } 1917 1918 /* 1919 * Enable the mailbox update interrupt if we want 1920 * to use mailbox. We probably don't need to use 1921 * mailbox as it only saves us one pio read. 1922 * Also write 1 to rcrthres and rcrto to clear 1923 * these two edge triggered bits. 1924 */ 1925 cs.value &= RX_DMA_CTL_STAT_WR1C; 1926 cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 1927 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1928 cs.value); 1929 1930 /* 1931 * If the polling mode is enabled, disable the interrupt. 1932 */ 1933 if (rcr_ring->poll_flag) { 1934 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1935 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1936 "(disabling interrupts)", channel, ldgp, ldvp)); 1937 /* 1938 * Disarm this logical group if this is a single device 1939 * group. 1940 */ 1941 if (ldgp->nldvs == 1) { 1942 ldgimgm_t mgm; 1943 mgm.value = 0; 1944 mgm.bits.ldw.arm = 0; 1945 NXGE_REG_WR64(handle, 1946 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1947 } 1948 } else { 1949 /* 1950 * Rearm this logical group if this is a single device 1951 * group. 1952 */ 1953 if (ldgp->nldvs == 1) { 1954 if (isLDOMguest(nxgep)) { 1955 nxge_hio_ldgimgn(nxgep, ldgp); 1956 } else { 1957 ldgimgm_t mgm; 1958 1959 mgm.value = 0; 1960 mgm.bits.ldw.arm = 1; 1961 mgm.bits.ldw.timer = ldgp->ldg_timer; 1962 1963 NXGE_REG_WR64(handle, 1964 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1965 mgm.value); 1966 } 1967 } 1968 1969 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1970 "==> nxge_rx_intr: rdc %d ldgp $%p " 1971 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1972 } 1973 MUTEX_EXIT(&rcr_ring->lock); 1974 1975 if (mp) { 1976 if (!isLDOMguest(nxgep)) 1977 mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1978 rcr_ring->rcr_gen_num); 1979 #if defined(sun4v) 1980 else { /* isLDOMguest(nxgep) */ 1981 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1982 nxgep->nxge_hw_p->hio; 1983 nx_vio_fp_t *vio = &nhd->hio.vio; 1984 1985 if (vio->cb.vio_net_rx_cb) { 1986 (*vio->cb.vio_net_rx_cb) 1987 (nxgep->hio_vr->vhp, mp); 1988 } 1989 } 1990 #endif 1991 } 1992 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1993 return (DDI_INTR_CLAIMED); 1994 } 1995 1996 /* 1997 * Process the packets received in the specified logical device 1998 * and pass up a chain of message blocks to the upper layer. 1999 * The RCR ring lock must be held before calling this function. 2000 */ 2001 static mblk_t * 2002 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 2003 { 2004 p_mblk_t mp; 2005 p_rx_rcr_ring_t rcrp; 2006 2007 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 2008 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 2009 2010 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2011 "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 2012 "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 2013 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 2014 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2015 "<== nxge_rx_pkts_vring: no mp")); 2016 return (NULL); 2017 } 2018 2019 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 2020 mp)); 2021 2022 #ifdef NXGE_DEBUG 2023 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2024 "==> nxge_rx_pkts_vring:calling mac_rx " 2025 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 2026 "mac_handle $%p", 2027 mp->b_wptr - mp->b_rptr, 2028 mp, mp->b_cont, mp->b_next, 2029 rcrp, rcrp->rcr_mac_handle)); 2030 2031 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2032 "==> nxge_rx_pkts_vring: dump packets " 2033 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 2034 mp, 2035 mp->b_rptr, 2036 mp->b_wptr, 2037 nxge_dump_packet((char *)mp->b_rptr, 2038 mp->b_wptr - mp->b_rptr))); 2039 if (mp->b_cont) { 2040 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2041 "==> nxge_rx_pkts_vring: dump b_cont packets " 2042 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 2043 mp->b_cont, 2044 mp->b_cont->b_rptr, 2045 mp->b_cont->b_wptr, 2046 nxge_dump_packet((char *)mp->b_cont->b_rptr, 2047 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 2048 } 2049 if (mp->b_next) { 2050 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2051 "==> nxge_rx_pkts_vring: dump next packets " 2052 "(b_rptr $%p): %s", 2053 mp->b_next->b_rptr, 2054 nxge_dump_packet((char *)mp->b_next->b_rptr, 2055 mp->b_next->b_wptr - mp->b_next->b_rptr))); 2056 } 2057 #endif 2058 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2059 "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 2060 rcrp->rdc, rcrp->rcr_mac_handle)); 2061 2062 return (mp); 2063 } 2064 2065 2066 /* 2067 * This routine is the main packet receive processing function. 2068 * It gets the packet type, error code, and buffer related 2069 * information from the receive completion entry. 2070 * How many completion entries to process is based on the number of packets 2071 * queued by the hardware, a hardware maintained tail pointer 2072 * and a configurable receive packet count. 2073 * 2074 * A chain of message blocks will be created as result of processing 2075 * the completion entries. This chain of message blocks will be returned and 2076 * a hardware control status register will be updated with the number of 2077 * packets were removed from the hardware queue. 2078 * 2079 * The RCR ring lock is held when entering this function. 2080 */ 2081 static mblk_t * 2082 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 2083 int bytes_to_pickup) 2084 { 2085 npi_handle_t handle; 2086 uint8_t channel; 2087 uint32_t comp_rd_index; 2088 p_rcr_entry_t rcr_desc_rd_head_p; 2089 p_rcr_entry_t rcr_desc_rd_head_pp; 2090 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 2091 uint16_t qlen, nrcr_read, npkt_read; 2092 uint32_t qlen_hw; 2093 boolean_t multi; 2094 rcrcfig_b_t rcr_cfg_b; 2095 int totallen = 0; 2096 #if defined(_BIG_ENDIAN) 2097 npi_status_t rs = NPI_SUCCESS; 2098 #endif 2099 2100 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 2101 "channel %d", rcr_p->rdc)); 2102 2103 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 2104 return (NULL); 2105 } 2106 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2107 channel = rcr_p->rdc; 2108 2109 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2110 "==> nxge_rx_pkts: START: rcr channel %d " 2111 "head_p $%p head_pp $%p index %d ", 2112 channel, rcr_p->rcr_desc_rd_head_p, 2113 rcr_p->rcr_desc_rd_head_pp, 2114 rcr_p->comp_rd_index)); 2115 2116 2117 #if !defined(_BIG_ENDIAN) 2118 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2119 #else 2120 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2121 if (rs != NPI_SUCCESS) { 2122 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2123 "channel %d, get qlen failed 0x%08x", 2124 channel, rs)); 2125 return (NULL); 2126 } 2127 #endif 2128 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2129 "qlen %d", channel, qlen)); 2130 2131 2132 2133 if (!qlen) { 2134 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2135 "==> nxge_rx_pkts:rcr channel %d " 2136 "qlen %d (no pkts)", channel, qlen)); 2137 2138 return (NULL); 2139 } 2140 2141 comp_rd_index = rcr_p->comp_rd_index; 2142 2143 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2144 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2145 nrcr_read = npkt_read = 0; 2146 2147 /* 2148 * Number of packets queued 2149 * (The jumbo or multi packet will be counted as only one 2150 * packets and it may take up more than one completion entry). 2151 */ 2152 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2153 qlen : nxge_max_rx_pkts; 2154 head_mp = NULL; 2155 tail_mp = &head_mp; 2156 nmp = mp_cont = NULL; 2157 multi = B_FALSE; 2158 2159 while (qlen_hw) { 2160 2161 #ifdef NXGE_DEBUG 2162 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2163 #endif 2164 /* 2165 * Process one completion ring entry. 2166 */ 2167 nxge_receive_packet(nxgep, 2168 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2169 2170 /* 2171 * message chaining modes 2172 */ 2173 if (nmp) { 2174 nmp->b_next = NULL; 2175 if (!multi && !mp_cont) { /* frame fits a partition */ 2176 *tail_mp = nmp; 2177 tail_mp = &nmp->b_next; 2178 totallen += MBLKL(nmp); 2179 nmp = NULL; 2180 } else if (multi && !mp_cont) { /* first segment */ 2181 *tail_mp = nmp; 2182 tail_mp = &nmp->b_cont; 2183 totallen += MBLKL(nmp); 2184 } else if (multi && mp_cont) { /* mid of multi segs */ 2185 *tail_mp = mp_cont; 2186 tail_mp = &mp_cont->b_cont; 2187 totallen += MBLKL(mp_cont); 2188 } else if (!multi && mp_cont) { /* last segment */ 2189 *tail_mp = mp_cont; 2190 tail_mp = &nmp->b_next; 2191 totallen += MBLKL(mp_cont); 2192 nmp = NULL; 2193 } 2194 } 2195 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2196 "==> nxge_rx_pkts: loop: rcr channel %d " 2197 "before updating: multi %d " 2198 "nrcr_read %d " 2199 "npk read %d " 2200 "head_pp $%p index %d ", 2201 channel, 2202 multi, 2203 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2204 comp_rd_index)); 2205 2206 if (!multi) { 2207 qlen_hw--; 2208 npkt_read++; 2209 } 2210 2211 /* 2212 * Update the next read entry. 2213 */ 2214 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2215 rcr_p->comp_wrap_mask); 2216 2217 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2218 rcr_p->rcr_desc_first_p, 2219 rcr_p->rcr_desc_last_p); 2220 2221 nrcr_read++; 2222 2223 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2224 "<== nxge_rx_pkts: (SAM, process one packet) " 2225 "nrcr_read %d", 2226 nrcr_read)); 2227 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2228 "==> nxge_rx_pkts: loop: rcr channel %d " 2229 "multi %d " 2230 "nrcr_read %d " 2231 "npk read %d " 2232 "head_pp $%p index %d ", 2233 channel, 2234 multi, 2235 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2236 comp_rd_index)); 2237 2238 if ((bytes_to_pickup != -1) && 2239 (totallen >= bytes_to_pickup)) { 2240 break; 2241 } 2242 2243 /* limit the number of packets for interrupt */ 2244 if (!(rcr_p->poll_flag)) { 2245 if (npkt_read == nxge_max_intr_pkts) { 2246 break; 2247 } 2248 } 2249 } 2250 2251 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2252 rcr_p->comp_rd_index = comp_rd_index; 2253 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2254 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2255 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2256 2257 rcr_p->intr_timeout = (nxgep->intr_timeout < 2258 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2259 nxgep->intr_timeout; 2260 2261 rcr_p->intr_threshold = (nxgep->intr_threshold < 2262 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2263 nxgep->intr_threshold; 2264 2265 rcr_cfg_b.value = 0x0ULL; 2266 rcr_cfg_b.bits.ldw.entout = 1; 2267 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2268 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2269 2270 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2271 channel, rcr_cfg_b.value); 2272 } 2273 2274 cs.bits.ldw.pktread = npkt_read; 2275 cs.bits.ldw.ptrread = nrcr_read; 2276 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2277 channel, cs.value); 2278 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2279 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2280 "head_pp $%p index %016llx ", 2281 channel, 2282 rcr_p->rcr_desc_rd_head_pp, 2283 rcr_p->comp_rd_index)); 2284 /* 2285 * Update RCR buffer pointer read and number of packets 2286 * read. 2287 */ 2288 2289 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2290 "channel %d", rcr_p->rdc)); 2291 2292 return (head_mp); 2293 } 2294 2295 void 2296 nxge_receive_packet(p_nxge_t nxgep, 2297 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2298 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2299 { 2300 p_mblk_t nmp = NULL; 2301 uint64_t multi; 2302 uint64_t dcf_err; 2303 uint8_t channel; 2304 2305 boolean_t first_entry = B_TRUE; 2306 boolean_t is_tcp_udp = B_FALSE; 2307 boolean_t buffer_free = B_FALSE; 2308 boolean_t error_send_up = B_FALSE; 2309 uint8_t error_type; 2310 uint16_t l2_len; 2311 uint16_t skip_len; 2312 uint8_t pktbufsz_type; 2313 uint64_t rcr_entry; 2314 uint64_t *pkt_buf_addr_pp; 2315 uint64_t *pkt_buf_addr_p; 2316 uint32_t buf_offset; 2317 uint32_t bsize; 2318 uint32_t error_disp_cnt; 2319 uint32_t msg_index; 2320 p_rx_rbr_ring_t rx_rbr_p; 2321 p_rx_msg_t *rx_msg_ring_p; 2322 p_rx_msg_t rx_msg_p; 2323 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2324 nxge_status_t status = NXGE_OK; 2325 boolean_t is_valid = B_FALSE; 2326 p_nxge_rx_ring_stats_t rdc_stats; 2327 uint32_t bytes_read; 2328 uint64_t pkt_type; 2329 uint64_t frag; 2330 boolean_t pkt_too_long_err = B_FALSE; 2331 #ifdef NXGE_DEBUG 2332 int dump_len; 2333 #endif 2334 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2335 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2336 2337 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2338 2339 multi = (rcr_entry & RCR_MULTI_MASK); 2340 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2341 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2342 2343 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2344 frag = (rcr_entry & RCR_FRAG_MASK); 2345 2346 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2347 2348 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2349 RCR_PKTBUFSZ_SHIFT); 2350 #if defined(__i386) 2351 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2352 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2353 #else 2354 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2355 RCR_PKT_BUF_ADDR_SHIFT); 2356 #endif 2357 2358 channel = rcr_p->rdc; 2359 2360 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2361 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2362 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2363 "error_type 0x%x pkt_type 0x%x " 2364 "pktbufsz_type %d ", 2365 rcr_desc_rd_head_p, 2366 rcr_entry, pkt_buf_addr_pp, l2_len, 2367 multi, 2368 error_type, 2369 pkt_type, 2370 pktbufsz_type)); 2371 2372 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2373 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2374 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2375 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2376 rcr_entry, pkt_buf_addr_pp, l2_len, 2377 multi, 2378 error_type, 2379 pkt_type)); 2380 2381 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2382 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2383 "full pkt_buf_addr_pp $%p l2_len %d", 2384 rcr_entry, pkt_buf_addr_pp, l2_len)); 2385 2386 /* get the stats ptr */ 2387 rdc_stats = rcr_p->rdc_stats; 2388 2389 if (!l2_len) { 2390 2391 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2392 "<== nxge_receive_packet: failed: l2 length is 0.")); 2393 return; 2394 } 2395 2396 /* 2397 * Software workaround for BMAC hardware limitation that allows 2398 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2399 * instead of 0x2400 for jumbo. 2400 */ 2401 if (l2_len > nxgep->mac.maxframesize) { 2402 pkt_too_long_err = B_TRUE; 2403 } 2404 2405 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2406 l2_len -= ETHERFCSL; 2407 2408 /* shift 6 bits to get the full io address */ 2409 #if defined(__i386) 2410 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2411 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2412 #else 2413 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2414 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2415 #endif 2416 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2417 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2418 "full pkt_buf_addr_pp $%p l2_len %d", 2419 rcr_entry, pkt_buf_addr_pp, l2_len)); 2420 2421 rx_rbr_p = rcr_p->rx_rbr_p; 2422 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2423 2424 if (first_entry) { 2425 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2426 RXDMA_HDR_SIZE_DEFAULT); 2427 2428 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2429 "==> nxge_receive_packet: first entry 0x%016llx " 2430 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2431 rcr_entry, pkt_buf_addr_pp, l2_len, 2432 hdr_size)); 2433 } 2434 2435 MUTEX_ENTER(&rx_rbr_p->lock); 2436 2437 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2438 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2439 "full pkt_buf_addr_pp $%p l2_len %d", 2440 rcr_entry, pkt_buf_addr_pp, l2_len)); 2441 2442 /* 2443 * Packet buffer address in the completion entry points 2444 * to the starting buffer address (offset 0). 2445 * Use the starting buffer address to locate the corresponding 2446 * kernel address. 2447 */ 2448 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2449 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2450 &buf_offset, 2451 &msg_index); 2452 2453 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2454 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2455 "full pkt_buf_addr_pp $%p l2_len %d", 2456 rcr_entry, pkt_buf_addr_pp, l2_len)); 2457 2458 if (status != NXGE_OK) { 2459 MUTEX_EXIT(&rx_rbr_p->lock); 2460 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2461 "<== nxge_receive_packet: found vaddr failed %d", 2462 status)); 2463 return; 2464 } 2465 2466 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2467 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2468 "full pkt_buf_addr_pp $%p l2_len %d", 2469 rcr_entry, pkt_buf_addr_pp, l2_len)); 2470 2471 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2472 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2473 "full pkt_buf_addr_pp $%p l2_len %d", 2474 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2475 2476 rx_msg_p = rx_msg_ring_p[msg_index]; 2477 2478 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2479 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2480 "full pkt_buf_addr_pp $%p l2_len %d", 2481 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2482 2483 switch (pktbufsz_type) { 2484 case RCR_PKTBUFSZ_0: 2485 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2486 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2487 "==> nxge_receive_packet: 0 buf %d", bsize)); 2488 break; 2489 case RCR_PKTBUFSZ_1: 2490 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2491 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2492 "==> nxge_receive_packet: 1 buf %d", bsize)); 2493 break; 2494 case RCR_PKTBUFSZ_2: 2495 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2496 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2497 "==> nxge_receive_packet: 2 buf %d", bsize)); 2498 break; 2499 case RCR_SINGLE_BLOCK: 2500 bsize = rx_msg_p->block_size; 2501 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2502 "==> nxge_receive_packet: single %d", bsize)); 2503 2504 break; 2505 default: 2506 MUTEX_EXIT(&rx_rbr_p->lock); 2507 return; 2508 } 2509 2510 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2511 (buf_offset + sw_offset_bytes), 2512 (hdr_size + l2_len), 2513 DDI_DMA_SYNC_FORCPU); 2514 2515 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2516 "==> nxge_receive_packet: after first dump:usage count")); 2517 2518 if (rx_msg_p->cur_usage_cnt == 0) { 2519 if (rx_rbr_p->rbr_use_bcopy) { 2520 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2521 if (rx_rbr_p->rbr_consumed < 2522 rx_rbr_p->rbr_threshold_hi) { 2523 if (rx_rbr_p->rbr_threshold_lo == 0 || 2524 ((rx_rbr_p->rbr_consumed >= 2525 rx_rbr_p->rbr_threshold_lo) && 2526 (rx_rbr_p->rbr_bufsize_type >= 2527 pktbufsz_type))) { 2528 rx_msg_p->rx_use_bcopy = B_TRUE; 2529 } 2530 } else { 2531 rx_msg_p->rx_use_bcopy = B_TRUE; 2532 } 2533 } 2534 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2535 "==> nxge_receive_packet: buf %d (new block) ", 2536 bsize)); 2537 2538 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2539 rx_msg_p->pkt_buf_size = bsize; 2540 rx_msg_p->cur_usage_cnt = 1; 2541 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2542 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2543 "==> nxge_receive_packet: buf %d " 2544 "(single block) ", 2545 bsize)); 2546 /* 2547 * Buffer can be reused once the free function 2548 * is called. 2549 */ 2550 rx_msg_p->max_usage_cnt = 1; 2551 buffer_free = B_TRUE; 2552 } else { 2553 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2554 if (rx_msg_p->max_usage_cnt == 1) { 2555 buffer_free = B_TRUE; 2556 } 2557 } 2558 } else { 2559 rx_msg_p->cur_usage_cnt++; 2560 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2561 buffer_free = B_TRUE; 2562 } 2563 } 2564 2565 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2566 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2567 msg_index, l2_len, 2568 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2569 2570 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2571 rdc_stats->ierrors++; 2572 if (dcf_err) { 2573 rdc_stats->dcf_err++; 2574 #ifdef NXGE_DEBUG 2575 if (!rdc_stats->dcf_err) { 2576 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2577 "nxge_receive_packet: channel %d dcf_err rcr" 2578 " 0x%llx", channel, rcr_entry)); 2579 } 2580 #endif 2581 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2582 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2583 } else if (pkt_too_long_err) { 2584 rdc_stats->pkt_too_long_err++; 2585 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2586 " channel %d packet length [%d] > " 2587 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2588 nxgep->mac.maxframesize)); 2589 } else { 2590 /* Update error stats */ 2591 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2592 rdc_stats->errlog.compl_err_type = error_type; 2593 2594 switch (error_type) { 2595 /* 2596 * Do not send FMA ereport for RCR_L2_ERROR and 2597 * RCR_L4_CSUM_ERROR because most likely they indicate 2598 * back pressure rather than HW failures. 2599 */ 2600 case RCR_L2_ERROR: 2601 rdc_stats->l2_err++; 2602 if (rdc_stats->l2_err < 2603 error_disp_cnt) { 2604 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2605 " nxge_receive_packet:" 2606 " channel %d RCR L2_ERROR", 2607 channel)); 2608 } 2609 break; 2610 case RCR_L4_CSUM_ERROR: 2611 error_send_up = B_TRUE; 2612 rdc_stats->l4_cksum_err++; 2613 if (rdc_stats->l4_cksum_err < 2614 error_disp_cnt) { 2615 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2616 " nxge_receive_packet:" 2617 " channel %d" 2618 " RCR L4_CSUM_ERROR", channel)); 2619 } 2620 break; 2621 /* 2622 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2623 * RCR_ZCP_SOFT_ERROR because they reflect the same 2624 * FFLP and ZCP errors that have been reported by 2625 * nxge_fflp.c and nxge_zcp.c. 2626 */ 2627 case RCR_FFLP_SOFT_ERROR: 2628 error_send_up = B_TRUE; 2629 rdc_stats->fflp_soft_err++; 2630 if (rdc_stats->fflp_soft_err < 2631 error_disp_cnt) { 2632 NXGE_ERROR_MSG((nxgep, 2633 NXGE_ERR_CTL, 2634 " nxge_receive_packet:" 2635 " channel %d" 2636 " RCR FFLP_SOFT_ERROR", channel)); 2637 } 2638 break; 2639 case RCR_ZCP_SOFT_ERROR: 2640 error_send_up = B_TRUE; 2641 rdc_stats->fflp_soft_err++; 2642 if (rdc_stats->zcp_soft_err < 2643 error_disp_cnt) 2644 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2645 " nxge_receive_packet: Channel %d" 2646 " RCR ZCP_SOFT_ERROR", channel)); 2647 break; 2648 default: 2649 rdc_stats->rcr_unknown_err++; 2650 if (rdc_stats->rcr_unknown_err 2651 < error_disp_cnt) { 2652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2653 " nxge_receive_packet: Channel %d" 2654 " RCR entry 0x%llx error 0x%x", 2655 rcr_entry, channel, error_type)); 2656 } 2657 break; 2658 } 2659 } 2660 2661 /* 2662 * Update and repost buffer block if max usage 2663 * count is reached. 2664 */ 2665 if (error_send_up == B_FALSE) { 2666 atomic_inc_32(&rx_msg_p->ref_cnt); 2667 if (buffer_free == B_TRUE) { 2668 rx_msg_p->free = B_TRUE; 2669 } 2670 2671 MUTEX_EXIT(&rx_rbr_p->lock); 2672 nxge_freeb(rx_msg_p); 2673 return; 2674 } 2675 } 2676 2677 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2678 "==> nxge_receive_packet: DMA sync second ")); 2679 2680 bytes_read = rcr_p->rcvd_pkt_bytes; 2681 skip_len = sw_offset_bytes + hdr_size; 2682 if (!rx_msg_p->rx_use_bcopy) { 2683 /* 2684 * For loaned up buffers, the driver reference count 2685 * will be incremented first and then the free state. 2686 */ 2687 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2688 if (first_entry) { 2689 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2690 if (l2_len < bsize - skip_len) { 2691 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2692 } else { 2693 nmp->b_wptr = &nmp->b_rptr[bsize 2694 - skip_len]; 2695 } 2696 } else { 2697 if (l2_len - bytes_read < bsize) { 2698 nmp->b_wptr = 2699 &nmp->b_rptr[l2_len - bytes_read]; 2700 } else { 2701 nmp->b_wptr = &nmp->b_rptr[bsize]; 2702 } 2703 } 2704 } 2705 } else { 2706 if (first_entry) { 2707 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2708 l2_len < bsize - skip_len ? 2709 l2_len : bsize - skip_len); 2710 } else { 2711 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2712 l2_len - bytes_read < bsize ? 2713 l2_len - bytes_read : bsize); 2714 } 2715 } 2716 if (nmp != NULL) { 2717 if (first_entry) { 2718 /* 2719 * Jumbo packets may be received with more than one 2720 * buffer, increment ipackets for the first entry only. 2721 */ 2722 rdc_stats->ipackets++; 2723 2724 /* Update ibytes for kstat. */ 2725 rdc_stats->ibytes += skip_len 2726 + l2_len < bsize ? l2_len : bsize; 2727 /* 2728 * Update the number of bytes read so far for the 2729 * current frame. 2730 */ 2731 bytes_read = nmp->b_wptr - nmp->b_rptr; 2732 } else { 2733 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2734 l2_len - bytes_read : bsize; 2735 bytes_read += nmp->b_wptr - nmp->b_rptr; 2736 } 2737 2738 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2739 "==> nxge_receive_packet after dupb: " 2740 "rbr consumed %d " 2741 "pktbufsz_type %d " 2742 "nmp $%p rptr $%p wptr $%p " 2743 "buf_offset %d bzise %d l2_len %d skip_len %d", 2744 rx_rbr_p->rbr_consumed, 2745 pktbufsz_type, 2746 nmp, nmp->b_rptr, nmp->b_wptr, 2747 buf_offset, bsize, l2_len, skip_len)); 2748 } else { 2749 cmn_err(CE_WARN, "!nxge_receive_packet: " 2750 "update stats (error)"); 2751 atomic_inc_32(&rx_msg_p->ref_cnt); 2752 if (buffer_free == B_TRUE) { 2753 rx_msg_p->free = B_TRUE; 2754 } 2755 MUTEX_EXIT(&rx_rbr_p->lock); 2756 nxge_freeb(rx_msg_p); 2757 return; 2758 } 2759 2760 if (buffer_free == B_TRUE) { 2761 rx_msg_p->free = B_TRUE; 2762 } 2763 2764 is_valid = (nmp != NULL); 2765 2766 rcr_p->rcvd_pkt_bytes = bytes_read; 2767 2768 MUTEX_EXIT(&rx_rbr_p->lock); 2769 2770 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2771 atomic_inc_32(&rx_msg_p->ref_cnt); 2772 nxge_freeb(rx_msg_p); 2773 } 2774 2775 if (is_valid) { 2776 nmp->b_cont = NULL; 2777 if (first_entry) { 2778 *mp = nmp; 2779 *mp_cont = NULL; 2780 } else { 2781 *mp_cont = nmp; 2782 } 2783 } 2784 2785 /* 2786 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2787 * If a packet is not fragmented and no error bit is set, then 2788 * L4 checksum is OK. 2789 */ 2790 2791 if (is_valid && !multi) { 2792 /* 2793 * If the checksum flag nxge_chksum_offload 2794 * is 1, TCP and UDP packets can be sent 2795 * up with good checksum. If the checksum flag 2796 * is set to 0, checksum reporting will apply to 2797 * TCP packets only (workaround for a hardware bug). 2798 * If the checksum flag nxge_cksum_offload is 2799 * greater than 1, both TCP and UDP packets 2800 * will not be reported its hardware checksum results. 2801 */ 2802 if (nxge_cksum_offload == 1) { 2803 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2804 pkt_type == RCR_PKT_IS_UDP) ? 2805 B_TRUE: B_FALSE); 2806 } else if (!nxge_cksum_offload) { 2807 /* TCP checksum only. */ 2808 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2809 B_TRUE: B_FALSE); 2810 } 2811 2812 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2813 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2814 is_valid, multi, is_tcp_udp, frag, error_type)); 2815 2816 if (is_tcp_udp && !frag && !error_type) { 2817 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2818 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2819 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2820 "==> nxge_receive_packet: Full tcp/udp cksum " 2821 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2822 "error %d", 2823 is_valid, multi, is_tcp_udp, frag, error_type)); 2824 } 2825 } 2826 2827 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2828 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2829 2830 *multi_p = (multi == RCR_MULTI_MASK); 2831 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2832 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2833 *multi_p, nmp, *mp, *mp_cont)); 2834 } 2835 2836 /* 2837 * Enable polling for a ring. Interrupt for the ring is disabled when 2838 * the nxge interrupt comes (see nxge_rx_intr). 2839 */ 2840 int 2841 nxge_enable_poll(void *arg) 2842 { 2843 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2844 p_rx_rcr_ring_t ringp; 2845 p_nxge_t nxgep; 2846 p_nxge_ldg_t ldgp; 2847 uint32_t channel; 2848 2849 if (ring_handle == NULL) { 2850 return (0); 2851 } 2852 2853 nxgep = ring_handle->nxgep; 2854 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2855 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2856 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2857 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2858 ldgp = ringp->ldgp; 2859 if (ldgp == NULL) { 2860 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2861 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2862 ringp->rdc)); 2863 return (0); 2864 } 2865 2866 MUTEX_ENTER(&ringp->lock); 2867 /* enable polling */ 2868 if (ringp->poll_flag == 0) { 2869 ringp->poll_flag = 1; 2870 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2871 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2872 ringp->rdc)); 2873 } 2874 2875 MUTEX_EXIT(&ringp->lock); 2876 return (0); 2877 } 2878 /* 2879 * Disable polling for a ring and enable its interrupt. 2880 */ 2881 int 2882 nxge_disable_poll(void *arg) 2883 { 2884 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2885 p_rx_rcr_ring_t ringp; 2886 p_nxge_t nxgep; 2887 uint32_t channel; 2888 2889 if (ring_handle == NULL) { 2890 return (0); 2891 } 2892 2893 nxgep = ring_handle->nxgep; 2894 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2895 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2896 2897 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2898 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2899 2900 MUTEX_ENTER(&ringp->lock); 2901 2902 /* disable polling: enable interrupt */ 2903 if (ringp->poll_flag) { 2904 npi_handle_t handle; 2905 rx_dma_ctl_stat_t cs; 2906 uint8_t channel; 2907 p_nxge_ldg_t ldgp; 2908 2909 /* 2910 * Get the control and status for this channel. 2911 */ 2912 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2913 channel = ringp->rdc; 2914 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2915 channel, &cs.value); 2916 2917 /* 2918 * Enable mailbox update 2919 * Since packets were not read and the hardware uses 2920 * bits pktread and ptrread to update the queue 2921 * length, we need to set both bits to 0. 2922 */ 2923 cs.bits.ldw.pktread = 0; 2924 cs.bits.ldw.ptrread = 0; 2925 cs.bits.hdw.mex = 1; 2926 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2927 cs.value); 2928 2929 /* 2930 * Rearm this logical group if this is a single device 2931 * group. 2932 */ 2933 ldgp = ringp->ldgp; 2934 if (ldgp == NULL) { 2935 ringp->poll_flag = 0; 2936 MUTEX_EXIT(&ringp->lock); 2937 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2938 "==> nxge_disable_poll: no ldgp rdc %d " 2939 "(still set poll to 0", ringp->rdc)); 2940 return (0); 2941 } 2942 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2943 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2944 ringp->rdc, ldgp)); 2945 if (ldgp->nldvs == 1) { 2946 ldgimgm_t mgm; 2947 mgm.value = 0; 2948 mgm.bits.ldw.arm = 1; 2949 mgm.bits.ldw.timer = ldgp->ldg_timer; 2950 NXGE_REG_WR64(handle, 2951 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2952 } 2953 ringp->poll_flag = 0; 2954 } 2955 2956 MUTEX_EXIT(&ringp->lock); 2957 return (0); 2958 } 2959 2960 /* 2961 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2962 */ 2963 mblk_t * 2964 nxge_rx_poll(void *arg, int bytes_to_pickup) 2965 { 2966 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2967 p_rx_rcr_ring_t rcr_p; 2968 p_nxge_t nxgep; 2969 npi_handle_t handle; 2970 rx_dma_ctl_stat_t cs; 2971 mblk_t *mblk; 2972 p_nxge_ldv_t ldvp; 2973 uint32_t channel; 2974 2975 nxgep = ring_handle->nxgep; 2976 2977 /* 2978 * Get the control and status for this channel. 2979 */ 2980 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2981 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2982 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2983 MUTEX_ENTER(&rcr_p->lock); 2984 ASSERT(rcr_p->poll_flag == 1); 2985 2986 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2987 2988 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2989 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2990 rcr_p->rdc, rcr_p->poll_flag)); 2991 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2992 2993 ldvp = rcr_p->ldvp; 2994 /* error events. */ 2995 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2996 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2997 } 2998 2999 MUTEX_EXIT(&rcr_p->lock); 3000 3001 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 3002 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 3003 return (mblk); 3004 } 3005 3006 3007 /*ARGSUSED*/ 3008 static nxge_status_t 3009 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 3010 { 3011 p_nxge_rx_ring_stats_t rdc_stats; 3012 npi_handle_t handle; 3013 npi_status_t rs; 3014 boolean_t rxchan_fatal = B_FALSE; 3015 boolean_t rxport_fatal = B_FALSE; 3016 uint8_t portn; 3017 nxge_status_t status = NXGE_OK; 3018 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 3019 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 3020 3021 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3022 portn = nxgep->mac.portnum; 3023 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 3024 3025 if (cs.bits.hdw.rbr_tmout) { 3026 rdc_stats->rx_rbr_tmout++; 3027 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3028 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 3029 rxchan_fatal = B_TRUE; 3030 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3031 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 3032 } 3033 if (cs.bits.hdw.rsp_cnt_err) { 3034 rdc_stats->rsp_cnt_err++; 3035 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3036 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 3037 rxchan_fatal = B_TRUE; 3038 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3039 "==> nxge_rx_err_evnts(channel %d): " 3040 "rsp_cnt_err", channel)); 3041 } 3042 if (cs.bits.hdw.byte_en_bus) { 3043 rdc_stats->byte_en_bus++; 3044 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3045 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 3046 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3047 "==> nxge_rx_err_evnts(channel %d): " 3048 "fatal error: byte_en_bus", channel)); 3049 rxchan_fatal = B_TRUE; 3050 } 3051 if (cs.bits.hdw.rsp_dat_err) { 3052 rdc_stats->rsp_dat_err++; 3053 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3054 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 3055 rxchan_fatal = B_TRUE; 3056 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3057 "==> nxge_rx_err_evnts(channel %d): " 3058 "fatal error: rsp_dat_err", channel)); 3059 } 3060 if (cs.bits.hdw.rcr_ack_err) { 3061 rdc_stats->rcr_ack_err++; 3062 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3063 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 3064 rxchan_fatal = B_TRUE; 3065 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3066 "==> nxge_rx_err_evnts(channel %d): " 3067 "fatal error: rcr_ack_err", channel)); 3068 } 3069 if (cs.bits.hdw.dc_fifo_err) { 3070 rdc_stats->dc_fifo_err++; 3071 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3072 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 3073 /* This is not a fatal error! */ 3074 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3075 "==> nxge_rx_err_evnts(channel %d): " 3076 "dc_fifo_err", channel)); 3077 rxport_fatal = B_TRUE; 3078 } 3079 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 3080 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 3081 &rdc_stats->errlog.pre_par, 3082 &rdc_stats->errlog.sha_par)) 3083 != NPI_SUCCESS) { 3084 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3085 "==> nxge_rx_err_evnts(channel %d): " 3086 "rcr_sha_par: get perr", channel)); 3087 return (NXGE_ERROR | rs); 3088 } 3089 if (cs.bits.hdw.rcr_sha_par) { 3090 rdc_stats->rcr_sha_par++; 3091 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3092 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 3093 rxchan_fatal = B_TRUE; 3094 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3095 "==> nxge_rx_err_evnts(channel %d): " 3096 "fatal error: rcr_sha_par", channel)); 3097 } 3098 if (cs.bits.hdw.rbr_pre_par) { 3099 rdc_stats->rbr_pre_par++; 3100 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3101 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 3102 rxchan_fatal = B_TRUE; 3103 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3104 "==> nxge_rx_err_evnts(channel %d): " 3105 "fatal error: rbr_pre_par", channel)); 3106 } 3107 } 3108 /* 3109 * The Following 4 status bits are for information, the system 3110 * is running fine. There is no need to send FMA ereports or 3111 * log messages. 3112 */ 3113 if (cs.bits.hdw.port_drop_pkt) { 3114 rdc_stats->port_drop_pkt++; 3115 } 3116 if (cs.bits.hdw.wred_drop) { 3117 rdc_stats->wred_drop++; 3118 } 3119 if (cs.bits.hdw.rbr_pre_empty) { 3120 rdc_stats->rbr_pre_empty++; 3121 } 3122 if (cs.bits.hdw.rcr_shadow_full) { 3123 rdc_stats->rcr_shadow_full++; 3124 } 3125 if (cs.bits.hdw.config_err) { 3126 rdc_stats->config_err++; 3127 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3128 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3129 rxchan_fatal = B_TRUE; 3130 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3131 "==> nxge_rx_err_evnts(channel %d): " 3132 "config error", channel)); 3133 } 3134 if (cs.bits.hdw.rcrincon) { 3135 rdc_stats->rcrincon++; 3136 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3137 NXGE_FM_EREPORT_RDMC_RCRINCON); 3138 rxchan_fatal = B_TRUE; 3139 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3140 "==> nxge_rx_err_evnts(channel %d): " 3141 "fatal error: rcrincon error", channel)); 3142 } 3143 if (cs.bits.hdw.rcrfull) { 3144 rdc_stats->rcrfull++; 3145 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3146 NXGE_FM_EREPORT_RDMC_RCRFULL); 3147 rxchan_fatal = B_TRUE; 3148 if (rdc_stats->rcrfull < error_disp_cnt) 3149 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3150 "==> nxge_rx_err_evnts(channel %d): " 3151 "fatal error: rcrfull error", channel)); 3152 } 3153 if (cs.bits.hdw.rbr_empty) { 3154 /* 3155 * This bit is for information, there is no need 3156 * send FMA ereport or log a message. 3157 */ 3158 rdc_stats->rbr_empty++; 3159 } 3160 if (cs.bits.hdw.rbrfull) { 3161 rdc_stats->rbrfull++; 3162 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3163 NXGE_FM_EREPORT_RDMC_RBRFULL); 3164 rxchan_fatal = B_TRUE; 3165 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3166 "==> nxge_rx_err_evnts(channel %d): " 3167 "fatal error: rbr_full error", channel)); 3168 } 3169 if (cs.bits.hdw.rbrlogpage) { 3170 rdc_stats->rbrlogpage++; 3171 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3172 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3173 rxchan_fatal = B_TRUE; 3174 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3175 "==> nxge_rx_err_evnts(channel %d): " 3176 "fatal error: rbr logical page error", channel)); 3177 } 3178 if (cs.bits.hdw.cfiglogpage) { 3179 rdc_stats->cfiglogpage++; 3180 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3181 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3182 rxchan_fatal = B_TRUE; 3183 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3184 "==> nxge_rx_err_evnts(channel %d): " 3185 "fatal error: cfig logical page error", channel)); 3186 } 3187 3188 if (rxport_fatal) { 3189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3190 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3191 portn)); 3192 if (isLDOMguest(nxgep)) { 3193 status = NXGE_ERROR; 3194 } else { 3195 status = nxge_ipp_fatal_err_recover(nxgep); 3196 if (status == NXGE_OK) { 3197 FM_SERVICE_RESTORED(nxgep); 3198 } 3199 } 3200 } 3201 3202 if (rxchan_fatal) { 3203 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3204 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3205 channel)); 3206 if (isLDOMguest(nxgep)) { 3207 status = NXGE_ERROR; 3208 } else { 3209 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3210 if (status == NXGE_OK) { 3211 FM_SERVICE_RESTORED(nxgep); 3212 } 3213 } 3214 } 3215 3216 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3217 3218 return (status); 3219 } 3220 3221 /* 3222 * nxge_rdc_hvio_setup 3223 * 3224 * This code appears to setup some Hypervisor variables. 3225 * 3226 * Arguments: 3227 * nxgep 3228 * channel 3229 * 3230 * Notes: 3231 * What does NIU_LP_WORKAROUND mean? 3232 * 3233 * NPI/NXGE function calls: 3234 * na 3235 * 3236 * Context: 3237 * Any domain 3238 */ 3239 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3240 static void 3241 nxge_rdc_hvio_setup( 3242 nxge_t *nxgep, int channel) 3243 { 3244 nxge_dma_common_t *dma_common; 3245 nxge_dma_common_t *dma_control; 3246 rx_rbr_ring_t *ring; 3247 3248 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3249 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3250 3251 ring->hv_set = B_FALSE; 3252 3253 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3254 dma_common->orig_ioaddr_pp; 3255 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3256 dma_common->orig_alength; 3257 3258 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3259 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3260 channel, ring->hv_rx_buf_base_ioaddr_pp, 3261 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3262 dma_common->orig_alength, dma_common->orig_alength)); 3263 3264 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3265 3266 ring->hv_rx_cntl_base_ioaddr_pp = 3267 (uint64_t)dma_control->orig_ioaddr_pp; 3268 ring->hv_rx_cntl_ioaddr_size = 3269 (uint64_t)dma_control->orig_alength; 3270 3271 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3272 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3273 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3274 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3275 dma_control->orig_alength, dma_control->orig_alength)); 3276 } 3277 #endif 3278 3279 /* 3280 * nxge_map_rxdma 3281 * 3282 * Map an RDC into our kernel space. 3283 * 3284 * Arguments: 3285 * nxgep 3286 * channel The channel to map. 3287 * 3288 * Notes: 3289 * 1. Allocate & initialise a memory pool, if necessary. 3290 * 2. Allocate however many receive buffers are required. 3291 * 3. Setup buffers, descriptors, and mailbox. 3292 * 3293 * NPI/NXGE function calls: 3294 * nxge_alloc_rx_mem_pool() 3295 * nxge_alloc_rbb() 3296 * nxge_map_rxdma_channel() 3297 * 3298 * Registers accessed: 3299 * 3300 * Context: 3301 * Any domain 3302 */ 3303 static nxge_status_t 3304 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3305 { 3306 nxge_dma_common_t **data; 3307 nxge_dma_common_t **control; 3308 rx_rbr_ring_t **rbr_ring; 3309 rx_rcr_ring_t **rcr_ring; 3310 rx_mbox_t **mailbox; 3311 uint32_t chunks; 3312 3313 nxge_status_t status; 3314 3315 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3316 3317 if (!nxgep->rx_buf_pool_p) { 3318 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3319 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3320 "<== nxge_map_rxdma: buf not allocated")); 3321 return (NXGE_ERROR); 3322 } 3323 } 3324 3325 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3326 return (NXGE_ERROR); 3327 3328 /* 3329 * Timeout should be set based on the system clock divider. 3330 * A timeout value of 1 assumes that the 3331 * granularity (1000) is 3 microseconds running at 300MHz. 3332 */ 3333 3334 nxgep->intr_threshold = nxge_rcr_threshold; 3335 nxgep->intr_timeout = nxge_rcr_timeout; 3336 3337 /* 3338 * Map descriptors from the buffer polls for each dma channel. 3339 */ 3340 3341 /* 3342 * Set up and prepare buffer blocks, descriptors 3343 * and mailbox. 3344 */ 3345 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3346 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3347 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3348 3349 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3350 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3351 3352 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3353 3354 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3355 chunks, control, rcr_ring, mailbox); 3356 if (status != NXGE_OK) { 3357 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3358 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3359 "returned 0x%x", 3360 channel, status)); 3361 return (status); 3362 } 3363 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3364 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3365 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3366 &nxgep->statsp->rdc_stats[channel]; 3367 3368 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3369 if (!isLDOMguest(nxgep)) 3370 nxge_rdc_hvio_setup(nxgep, channel); 3371 #endif 3372 3373 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3374 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3375 3376 return (status); 3377 } 3378 3379 static void 3380 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3381 { 3382 rx_rbr_ring_t *rbr_ring; 3383 rx_rcr_ring_t *rcr_ring; 3384 rx_mbox_t *mailbox; 3385 3386 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3387 3388 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3389 !nxgep->rx_mbox_areas_p) 3390 return; 3391 3392 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3393 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3394 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3395 3396 if (!rbr_ring || !rcr_ring || !mailbox) 3397 return; 3398 3399 (void) nxge_unmap_rxdma_channel( 3400 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3401 3402 nxge_free_rxb(nxgep, channel); 3403 3404 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3405 } 3406 3407 nxge_status_t 3408 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3409 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3410 uint32_t num_chunks, 3411 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3412 p_rx_mbox_t *rx_mbox_p) 3413 { 3414 int status = NXGE_OK; 3415 3416 /* 3417 * Set up and prepare buffer blocks, descriptors 3418 * and mailbox. 3419 */ 3420 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3421 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3422 /* 3423 * Receive buffer blocks 3424 */ 3425 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3426 dma_buf_p, rbr_p, num_chunks); 3427 if (status != NXGE_OK) { 3428 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3429 "==> nxge_map_rxdma_channel (channel %d): " 3430 "map buffer failed 0x%x", channel, status)); 3431 goto nxge_map_rxdma_channel_exit; 3432 } 3433 3434 /* 3435 * Receive block ring, completion ring and mailbox. 3436 */ 3437 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3438 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3439 if (status != NXGE_OK) { 3440 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3441 "==> nxge_map_rxdma_channel (channel %d): " 3442 "map config failed 0x%x", channel, status)); 3443 goto nxge_map_rxdma_channel_fail2; 3444 } 3445 3446 goto nxge_map_rxdma_channel_exit; 3447 3448 nxge_map_rxdma_channel_fail3: 3449 /* Free rbr, rcr */ 3450 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3451 "==> nxge_map_rxdma_channel: free rbr/rcr " 3452 "(status 0x%x channel %d)", 3453 status, channel)); 3454 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3455 *rcr_p, *rx_mbox_p); 3456 3457 nxge_map_rxdma_channel_fail2: 3458 /* Free buffer blocks */ 3459 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3460 "==> nxge_map_rxdma_channel: free rx buffers" 3461 "(nxgep 0x%x status 0x%x channel %d)", 3462 nxgep, status, channel)); 3463 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3464 3465 status = NXGE_ERROR; 3466 3467 nxge_map_rxdma_channel_exit: 3468 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3469 "<== nxge_map_rxdma_channel: " 3470 "(nxgep 0x%x status 0x%x channel %d)", 3471 nxgep, status, channel)); 3472 3473 return (status); 3474 } 3475 3476 /*ARGSUSED*/ 3477 static void 3478 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3479 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3480 { 3481 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3482 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3483 3484 /* 3485 * unmap receive block ring, completion ring and mailbox. 3486 */ 3487 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3488 rcr_p, rx_mbox_p); 3489 3490 /* unmap buffer blocks */ 3491 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3492 3493 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3494 } 3495 3496 /*ARGSUSED*/ 3497 static nxge_status_t 3498 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3499 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3500 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3501 { 3502 p_rx_rbr_ring_t rbrp; 3503 p_rx_rcr_ring_t rcrp; 3504 p_rx_mbox_t mboxp; 3505 p_nxge_dma_common_t cntl_dmap; 3506 p_nxge_dma_common_t dmap; 3507 p_rx_msg_t *rx_msg_ring; 3508 p_rx_msg_t rx_msg_p; 3509 p_rbr_cfig_a_t rcfga_p; 3510 p_rbr_cfig_b_t rcfgb_p; 3511 p_rcrcfig_a_t cfga_p; 3512 p_rcrcfig_b_t cfgb_p; 3513 p_rxdma_cfig1_t cfig1_p; 3514 p_rxdma_cfig2_t cfig2_p; 3515 p_rbr_kick_t kick_p; 3516 uint32_t dmaaddrp; 3517 uint32_t *rbr_vaddrp; 3518 uint32_t bkaddr; 3519 nxge_status_t status = NXGE_OK; 3520 int i; 3521 uint32_t nxge_port_rcr_size; 3522 3523 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3524 "==> nxge_map_rxdma_channel_cfg_ring")); 3525 3526 cntl_dmap = *dma_cntl_p; 3527 3528 /* Map in the receive block ring */ 3529 rbrp = *rbr_p; 3530 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3531 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3532 /* 3533 * Zero out buffer block ring descriptors. 3534 */ 3535 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3536 3537 rcfga_p = &(rbrp->rbr_cfga); 3538 rcfgb_p = &(rbrp->rbr_cfgb); 3539 kick_p = &(rbrp->rbr_kick); 3540 rcfga_p->value = 0; 3541 rcfgb_p->value = 0; 3542 kick_p->value = 0; 3543 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3544 rcfga_p->value = (rbrp->rbr_addr & 3545 (RBR_CFIG_A_STDADDR_MASK | 3546 RBR_CFIG_A_STDADDR_BASE_MASK)); 3547 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3548 3549 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3550 rcfgb_p->bits.ldw.vld0 = 1; 3551 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3552 rcfgb_p->bits.ldw.vld1 = 1; 3553 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3554 rcfgb_p->bits.ldw.vld2 = 1; 3555 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3556 3557 /* 3558 * For each buffer block, enter receive block address to the ring. 3559 */ 3560 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3561 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3562 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3563 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3564 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3565 3566 rx_msg_ring = rbrp->rx_msg_ring; 3567 for (i = 0; i < rbrp->tnblocks; i++) { 3568 rx_msg_p = rx_msg_ring[i]; 3569 rx_msg_p->nxgep = nxgep; 3570 rx_msg_p->rx_rbr_p = rbrp; 3571 bkaddr = (uint32_t) 3572 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3573 >> RBR_BKADDR_SHIFT)); 3574 rx_msg_p->free = B_FALSE; 3575 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3576 3577 *rbr_vaddrp++ = bkaddr; 3578 } 3579 3580 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3581 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3582 3583 rbrp->rbr_rd_index = 0; 3584 3585 rbrp->rbr_consumed = 0; 3586 rbrp->rbr_use_bcopy = B_TRUE; 3587 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3588 /* 3589 * Do bcopy on packets greater than bcopy size once 3590 * the lo threshold is reached. 3591 * This lo threshold should be less than the hi threshold. 3592 * 3593 * Do bcopy on every packet once the hi threshold is reached. 3594 */ 3595 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3596 /* default it to use hi */ 3597 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3598 } 3599 3600 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3601 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3602 } 3603 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3604 3605 switch (nxge_rx_threshold_hi) { 3606 default: 3607 case NXGE_RX_COPY_NONE: 3608 /* Do not do bcopy at all */ 3609 rbrp->rbr_use_bcopy = B_FALSE; 3610 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3611 break; 3612 3613 case NXGE_RX_COPY_1: 3614 case NXGE_RX_COPY_2: 3615 case NXGE_RX_COPY_3: 3616 case NXGE_RX_COPY_4: 3617 case NXGE_RX_COPY_5: 3618 case NXGE_RX_COPY_6: 3619 case NXGE_RX_COPY_7: 3620 rbrp->rbr_threshold_hi = 3621 rbrp->rbb_max * 3622 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3623 break; 3624 3625 case NXGE_RX_COPY_ALL: 3626 rbrp->rbr_threshold_hi = 0; 3627 break; 3628 } 3629 3630 switch (nxge_rx_threshold_lo) { 3631 default: 3632 case NXGE_RX_COPY_NONE: 3633 /* Do not do bcopy at all */ 3634 if (rbrp->rbr_use_bcopy) { 3635 rbrp->rbr_use_bcopy = B_FALSE; 3636 } 3637 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3638 break; 3639 3640 case NXGE_RX_COPY_1: 3641 case NXGE_RX_COPY_2: 3642 case NXGE_RX_COPY_3: 3643 case NXGE_RX_COPY_4: 3644 case NXGE_RX_COPY_5: 3645 case NXGE_RX_COPY_6: 3646 case NXGE_RX_COPY_7: 3647 rbrp->rbr_threshold_lo = 3648 rbrp->rbb_max * 3649 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3650 break; 3651 3652 case NXGE_RX_COPY_ALL: 3653 rbrp->rbr_threshold_lo = 0; 3654 break; 3655 } 3656 3657 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3658 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3659 "rbb_max %d " 3660 "rbrp->rbr_bufsize_type %d " 3661 "rbb_threshold_hi %d " 3662 "rbb_threshold_lo %d", 3663 dma_channel, 3664 rbrp->rbb_max, 3665 rbrp->rbr_bufsize_type, 3666 rbrp->rbr_threshold_hi, 3667 rbrp->rbr_threshold_lo)); 3668 3669 rbrp->page_valid.value = 0; 3670 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3671 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3672 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3673 rbrp->page_hdl.value = 0; 3674 3675 rbrp->page_valid.bits.ldw.page0 = 1; 3676 rbrp->page_valid.bits.ldw.page1 = 1; 3677 3678 /* Map in the receive completion ring */ 3679 rcrp = (p_rx_rcr_ring_t) 3680 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3681 rcrp->rdc = dma_channel; 3682 3683 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3684 rcrp->comp_size = nxge_port_rcr_size; 3685 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3686 3687 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3688 3689 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3690 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3691 sizeof (rcr_entry_t)); 3692 rcrp->comp_rd_index = 0; 3693 rcrp->comp_wt_index = 0; 3694 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3695 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3696 #if defined(__i386) 3697 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3698 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3699 #else 3700 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3701 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3702 #endif 3703 3704 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3705 (nxge_port_rcr_size - 1); 3706 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3707 (nxge_port_rcr_size - 1); 3708 3709 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3710 "==> nxge_map_rxdma_channel_cfg_ring: " 3711 "channel %d " 3712 "rbr_vaddrp $%p " 3713 "rcr_desc_rd_head_p $%p " 3714 "rcr_desc_rd_head_pp $%p " 3715 "rcr_desc_rd_last_p $%p " 3716 "rcr_desc_rd_last_pp $%p ", 3717 dma_channel, 3718 rbr_vaddrp, 3719 rcrp->rcr_desc_rd_head_p, 3720 rcrp->rcr_desc_rd_head_pp, 3721 rcrp->rcr_desc_last_p, 3722 rcrp->rcr_desc_last_pp)); 3723 3724 /* 3725 * Zero out buffer block ring descriptors. 3726 */ 3727 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3728 3729 rcrp->intr_timeout = (nxgep->intr_timeout < 3730 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3731 nxgep->intr_timeout; 3732 3733 rcrp->intr_threshold = (nxgep->intr_threshold < 3734 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3735 nxgep->intr_threshold; 3736 3737 rcrp->full_hdr_flag = B_FALSE; 3738 rcrp->sw_priv_hdr_len = 0; 3739 3740 cfga_p = &(rcrp->rcr_cfga); 3741 cfgb_p = &(rcrp->rcr_cfgb); 3742 cfga_p->value = 0; 3743 cfgb_p->value = 0; 3744 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3745 cfga_p->value = (rcrp->rcr_addr & 3746 (RCRCFIG_A_STADDR_MASK | 3747 RCRCFIG_A_STADDR_BASE_MASK)); 3748 3749 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3750 RCRCFIG_A_LEN_SHIF); 3751 3752 /* 3753 * Timeout should be set based on the system clock divider. 3754 * A timeout value of 1 assumes that the 3755 * granularity (1000) is 3 microseconds running at 300MHz. 3756 */ 3757 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3758 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3759 cfgb_p->bits.ldw.entout = 1; 3760 3761 /* Map in the mailbox */ 3762 mboxp = (p_rx_mbox_t) 3763 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3764 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3765 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3766 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3767 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3768 cfig1_p->value = cfig2_p->value = 0; 3769 3770 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3771 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3772 "==> nxge_map_rxdma_channel_cfg_ring: " 3773 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3774 dma_channel, cfig1_p->value, cfig2_p->value, 3775 mboxp->mbox_addr)); 3776 3777 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3778 & 0xfff); 3779 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3780 3781 3782 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3783 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3784 RXDMA_CFIG2_MBADDR_L_MASK); 3785 3786 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3787 3788 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3789 "==> nxge_map_rxdma_channel_cfg_ring: " 3790 "channel %d damaddrp $%p " 3791 "cfg1 0x%016llx cfig2 0x%016llx", 3792 dma_channel, dmaaddrp, 3793 cfig1_p->value, cfig2_p->value)); 3794 3795 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3796 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3797 3798 rbrp->rx_rcr_p = rcrp; 3799 rcrp->rx_rbr_p = rbrp; 3800 *rcr_p = rcrp; 3801 *rx_mbox_p = mboxp; 3802 3803 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3804 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3805 3806 return (status); 3807 } 3808 3809 /*ARGSUSED*/ 3810 static void 3811 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3812 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3813 { 3814 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3815 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3816 rcr_p->rdc)); 3817 3818 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3819 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3820 3821 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3822 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3823 } 3824 3825 static nxge_status_t 3826 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3827 p_nxge_dma_common_t *dma_buf_p, 3828 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3829 { 3830 p_rx_rbr_ring_t rbrp; 3831 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3832 p_rx_msg_t *rx_msg_ring; 3833 p_rx_msg_t rx_msg_p; 3834 p_mblk_t mblk_p; 3835 3836 rxring_info_t *ring_info; 3837 nxge_status_t status = NXGE_OK; 3838 int i, j, index; 3839 uint32_t size, bsize, nblocks, nmsgs; 3840 3841 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3842 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3843 channel)); 3844 3845 dma_bufp = tmp_bufp = *dma_buf_p; 3846 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3847 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3848 "chunks bufp 0x%016llx", 3849 channel, num_chunks, dma_bufp)); 3850 3851 nmsgs = 0; 3852 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3853 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3854 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3855 "bufp 0x%016llx nblocks %d nmsgs %d", 3856 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3857 nmsgs += tmp_bufp->nblocks; 3858 } 3859 if (!nmsgs) { 3860 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3861 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3862 "no msg blocks", 3863 channel)); 3864 status = NXGE_ERROR; 3865 goto nxge_map_rxdma_channel_buf_ring_exit; 3866 } 3867 3868 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3869 3870 size = nmsgs * sizeof (p_rx_msg_t); 3871 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3872 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3873 KM_SLEEP); 3874 3875 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3876 (void *)nxgep->interrupt_cookie); 3877 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3878 (void *)nxgep->interrupt_cookie); 3879 rbrp->rdc = channel; 3880 rbrp->num_blocks = num_chunks; 3881 rbrp->tnblocks = nmsgs; 3882 rbrp->rbb_max = nmsgs; 3883 rbrp->rbr_max_size = nmsgs; 3884 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3885 3886 /* 3887 * Buffer sizes suggested by NIU architect. 3888 * 256, 512 and 2K. 3889 */ 3890 3891 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3892 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3893 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3894 3895 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3896 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3897 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3898 3899 rbrp->block_size = nxgep->rx_default_block_size; 3900 3901 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3902 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3903 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3904 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3905 } else { 3906 if (rbrp->block_size >= 0x2000) { 3907 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3908 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3909 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3910 } else { 3911 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3912 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3913 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3914 } 3915 } 3916 3917 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3918 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3919 "actual rbr max %d rbb_max %d nmsgs %d " 3920 "rbrp->block_size %d default_block_size %d " 3921 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3922 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3923 rbrp->block_size, nxgep->rx_default_block_size, 3924 nxge_rbr_size, nxge_rbr_spare_size)); 3925 3926 /* Map in buffers from the buffer pool. */ 3927 index = 0; 3928 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3929 bsize = dma_bufp->block_size; 3930 nblocks = dma_bufp->nblocks; 3931 #if defined(__i386) 3932 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3933 #else 3934 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3935 #endif 3936 ring_info->buffer[i].buf_index = i; 3937 ring_info->buffer[i].buf_size = dma_bufp->alength; 3938 ring_info->buffer[i].start_index = index; 3939 #if defined(__i386) 3940 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3941 #else 3942 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3943 #endif 3944 3945 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3946 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3947 "chunk %d" 3948 " nblocks %d chunk_size %x block_size 0x%x " 3949 "dma_bufp $%p", channel, i, 3950 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3951 dma_bufp)); 3952 3953 for (j = 0; j < nblocks; j++) { 3954 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3955 dma_bufp)) == NULL) { 3956 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3957 "allocb failed (index %d i %d j %d)", 3958 index, i, j)); 3959 goto nxge_map_rxdma_channel_buf_ring_fail1; 3960 } 3961 rx_msg_ring[index] = rx_msg_p; 3962 rx_msg_p->block_index = index; 3963 rx_msg_p->shifted_addr = (uint32_t) 3964 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3965 RBR_BKADDR_SHIFT)); 3966 3967 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3968 "index %d j %d rx_msg_p $%p mblk %p", 3969 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3970 3971 mblk_p = rx_msg_p->rx_mblk_p; 3972 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3973 3974 rbrp->rbr_ref_cnt++; 3975 index++; 3976 rx_msg_p->buf_dma.dma_channel = channel; 3977 } 3978 3979 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3980 if (dma_bufp->contig_alloc_type) { 3981 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3982 } 3983 3984 if (dma_bufp->kmem_alloc_type) { 3985 rbrp->rbr_alloc_type = KMEM_ALLOC; 3986 } 3987 3988 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3989 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3990 "chunk %d" 3991 " nblocks %d chunk_size %x block_size 0x%x " 3992 "dma_bufp $%p", 3993 channel, i, 3994 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3995 dma_bufp)); 3996 } 3997 if (i < rbrp->num_blocks) { 3998 goto nxge_map_rxdma_channel_buf_ring_fail1; 3999 } 4000 4001 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4002 "nxge_map_rxdma_channel_buf_ring: done buf init " 4003 "channel %d msg block entries %d", 4004 channel, index)); 4005 ring_info->block_size_mask = bsize - 1; 4006 rbrp->rx_msg_ring = rx_msg_ring; 4007 rbrp->dma_bufp = dma_buf_p; 4008 rbrp->ring_info = ring_info; 4009 4010 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 4011 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4012 " nxge_map_rxdma_channel_buf_ring: " 4013 "channel %d done buf info init", channel)); 4014 4015 /* 4016 * Finally, permit nxge_freeb() to call nxge_post_page(). 4017 */ 4018 rbrp->rbr_state = RBR_POSTING; 4019 4020 *rbr_p = rbrp; 4021 goto nxge_map_rxdma_channel_buf_ring_exit; 4022 4023 nxge_map_rxdma_channel_buf_ring_fail1: 4024 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4025 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 4026 channel, status)); 4027 4028 index--; 4029 for (; index >= 0; index--) { 4030 rx_msg_p = rx_msg_ring[index]; 4031 if (rx_msg_p != NULL) { 4032 freeb(rx_msg_p->rx_mblk_p); 4033 rx_msg_ring[index] = NULL; 4034 } 4035 } 4036 nxge_map_rxdma_channel_buf_ring_fail: 4037 MUTEX_DESTROY(&rbrp->post_lock); 4038 MUTEX_DESTROY(&rbrp->lock); 4039 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4040 KMEM_FREE(rx_msg_ring, size); 4041 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 4042 4043 status = NXGE_ERROR; 4044 4045 nxge_map_rxdma_channel_buf_ring_exit: 4046 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4047 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 4048 4049 return (status); 4050 } 4051 4052 /*ARGSUSED*/ 4053 static void 4054 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 4055 p_rx_rbr_ring_t rbr_p) 4056 { 4057 p_rx_msg_t *rx_msg_ring; 4058 p_rx_msg_t rx_msg_p; 4059 rxring_info_t *ring_info; 4060 int i; 4061 uint32_t size; 4062 #ifdef NXGE_DEBUG 4063 int num_chunks; 4064 #endif 4065 4066 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4067 "==> nxge_unmap_rxdma_channel_buf_ring")); 4068 if (rbr_p == NULL) { 4069 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4070 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 4071 return; 4072 } 4073 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4074 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 4075 rbr_p->rdc)); 4076 4077 rx_msg_ring = rbr_p->rx_msg_ring; 4078 ring_info = rbr_p->ring_info; 4079 4080 if (rx_msg_ring == NULL || ring_info == NULL) { 4081 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4082 "<== nxge_unmap_rxdma_channel_buf_ring: " 4083 "rx_msg_ring $%p ring_info $%p", 4084 rx_msg_p, ring_info)); 4085 return; 4086 } 4087 4088 #ifdef NXGE_DEBUG 4089 num_chunks = rbr_p->num_blocks; 4090 #endif 4091 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 4092 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4093 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 4094 "tnblocks %d (max %d) size ptrs %d ", 4095 rbr_p->rdc, num_chunks, 4096 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 4097 4098 for (i = 0; i < rbr_p->tnblocks; i++) { 4099 rx_msg_p = rx_msg_ring[i]; 4100 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4101 " nxge_unmap_rxdma_channel_buf_ring: " 4102 "rx_msg_p $%p", 4103 rx_msg_p)); 4104 if (rx_msg_p != NULL) { 4105 freeb(rx_msg_p->rx_mblk_p); 4106 rx_msg_ring[i] = NULL; 4107 } 4108 } 4109 4110 /* 4111 * We no longer may use the mutex <post_lock>. By setting 4112 * <rbr_state> to anything but POSTING, we prevent 4113 * nxge_post_page() from accessing a dead mutex. 4114 */ 4115 rbr_p->rbr_state = RBR_UNMAPPING; 4116 MUTEX_DESTROY(&rbr_p->post_lock); 4117 4118 MUTEX_DESTROY(&rbr_p->lock); 4119 4120 if (rbr_p->rbr_ref_cnt == 0) { 4121 /* 4122 * This is the normal state of affairs. 4123 * Need to free the following buffers: 4124 * - data buffers 4125 * - rx_msg ring 4126 * - ring_info 4127 * - rbr ring 4128 */ 4129 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4130 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4131 nxge_rxdma_databuf_free(rbr_p); 4132 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4133 KMEM_FREE(rx_msg_ring, size); 4134 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4135 } else { 4136 /* 4137 * Some of our buffers are still being used. 4138 * Therefore, tell nxge_freeb() this ring is 4139 * unmapped, so it may free <rbr_p> for us. 4140 */ 4141 rbr_p->rbr_state = RBR_UNMAPPED; 4142 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4143 "unmap_rxdma_buf_ring: %d %s outstanding.", 4144 rbr_p->rbr_ref_cnt, 4145 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4146 } 4147 4148 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4149 "<== nxge_unmap_rxdma_channel_buf_ring")); 4150 } 4151 4152 /* 4153 * nxge_rxdma_hw_start_common 4154 * 4155 * Arguments: 4156 * nxgep 4157 * 4158 * Notes: 4159 * 4160 * NPI/NXGE function calls: 4161 * nxge_init_fzc_rx_common(); 4162 * nxge_init_fzc_rxdma_port(); 4163 * 4164 * Registers accessed: 4165 * 4166 * Context: 4167 * Service domain 4168 */ 4169 static nxge_status_t 4170 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4171 { 4172 nxge_status_t status = NXGE_OK; 4173 4174 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4175 4176 /* 4177 * Load the sharable parameters by writing to the 4178 * function zero control registers. These FZC registers 4179 * should be initialized only once for the entire chip. 4180 */ 4181 (void) nxge_init_fzc_rx_common(nxgep); 4182 4183 /* 4184 * Initialize the RXDMA port specific FZC control configurations. 4185 * These FZC registers are pertaining to each port. 4186 */ 4187 (void) nxge_init_fzc_rxdma_port(nxgep); 4188 4189 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4190 4191 return (status); 4192 } 4193 4194 static nxge_status_t 4195 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4196 { 4197 int i, ndmas; 4198 p_rx_rbr_rings_t rx_rbr_rings; 4199 p_rx_rbr_ring_t *rbr_rings; 4200 p_rx_rcr_rings_t rx_rcr_rings; 4201 p_rx_rcr_ring_t *rcr_rings; 4202 p_rx_mbox_areas_t rx_mbox_areas_p; 4203 p_rx_mbox_t *rx_mbox_p; 4204 nxge_status_t status = NXGE_OK; 4205 4206 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4207 4208 rx_rbr_rings = nxgep->rx_rbr_rings; 4209 rx_rcr_rings = nxgep->rx_rcr_rings; 4210 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4211 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4212 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4213 return (NXGE_ERROR); 4214 } 4215 ndmas = rx_rbr_rings->ndmas; 4216 if (ndmas == 0) { 4217 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4218 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4219 return (NXGE_ERROR); 4220 } 4221 4222 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4223 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4224 4225 rbr_rings = rx_rbr_rings->rbr_rings; 4226 rcr_rings = rx_rcr_rings->rcr_rings; 4227 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4228 if (rx_mbox_areas_p) { 4229 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4230 } 4231 4232 i = channel; 4233 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4234 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4235 ndmas, channel)); 4236 status = nxge_rxdma_start_channel(nxgep, channel, 4237 (p_rx_rbr_ring_t)rbr_rings[i], 4238 (p_rx_rcr_ring_t)rcr_rings[i], 4239 (p_rx_mbox_t)rx_mbox_p[i]); 4240 if (status != NXGE_OK) { 4241 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4242 "==> nxge_rxdma_hw_start: disable " 4243 "(status 0x%x channel %d)", status, channel)); 4244 return (status); 4245 } 4246 4247 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4248 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4249 rx_rbr_rings, rx_rcr_rings)); 4250 4251 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4252 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4253 4254 return (status); 4255 } 4256 4257 static void 4258 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4259 { 4260 p_rx_rbr_rings_t rx_rbr_rings; 4261 p_rx_rcr_rings_t rx_rcr_rings; 4262 4263 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4264 4265 rx_rbr_rings = nxgep->rx_rbr_rings; 4266 rx_rcr_rings = nxgep->rx_rcr_rings; 4267 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4268 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4269 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4270 return; 4271 } 4272 4273 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4274 "==> nxge_rxdma_hw_stop(channel %d)", 4275 channel)); 4276 (void) nxge_rxdma_stop_channel(nxgep, channel); 4277 4278 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4279 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4280 rx_rbr_rings, rx_rcr_rings)); 4281 4282 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4283 } 4284 4285 4286 static nxge_status_t 4287 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4288 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4289 4290 { 4291 npi_handle_t handle; 4292 npi_status_t rs = NPI_SUCCESS; 4293 rx_dma_ctl_stat_t cs; 4294 rx_dma_ent_msk_t ent_mask; 4295 nxge_status_t status = NXGE_OK; 4296 4297 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4298 4299 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4300 4301 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4302 "npi handle addr $%p acc $%p", 4303 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4304 4305 /* Reset RXDMA channel, but not if you're a guest. */ 4306 if (!isLDOMguest(nxgep)) { 4307 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4308 if (rs != NPI_SUCCESS) { 4309 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4310 "==> nxge_init_fzc_rdc: " 4311 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4312 channel, rs)); 4313 return (NXGE_ERROR | rs); 4314 } 4315 4316 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4317 "==> nxge_rxdma_start_channel: reset done: channel %d", 4318 channel)); 4319 } 4320 4321 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4322 if (isLDOMguest(nxgep)) 4323 (void) nxge_rdc_lp_conf(nxgep, channel); 4324 #endif 4325 4326 /* 4327 * Initialize the RXDMA channel specific FZC control 4328 * configurations. These FZC registers are pertaining 4329 * to each RX channel (logical pages). 4330 */ 4331 if (!isLDOMguest(nxgep)) { 4332 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4333 if (status != NXGE_OK) { 4334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4335 "==> nxge_rxdma_start_channel: " 4336 "init fzc rxdma failed (0x%08x channel %d)", 4337 status, channel)); 4338 return (status); 4339 } 4340 4341 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4342 "==> nxge_rxdma_start_channel: fzc done")); 4343 } 4344 4345 /* Set up the interrupt event masks. */ 4346 ent_mask.value = 0; 4347 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4348 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4349 &ent_mask); 4350 if (rs != NPI_SUCCESS) { 4351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4352 "==> nxge_rxdma_start_channel: " 4353 "init rxdma event masks failed " 4354 "(0x%08x channel %d)", 4355 status, channel)); 4356 return (NXGE_ERROR | rs); 4357 } 4358 4359 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4360 "==> nxge_rxdma_start_channel: " 4361 "event done: channel %d (mask 0x%016llx)", 4362 channel, ent_mask.value)); 4363 4364 /* Initialize the receive DMA control and status register */ 4365 cs.value = 0; 4366 cs.bits.hdw.mex = 1; 4367 cs.bits.hdw.rcrthres = 1; 4368 cs.bits.hdw.rcrto = 1; 4369 cs.bits.hdw.rbr_empty = 1; 4370 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4371 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4372 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4373 if (status != NXGE_OK) { 4374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4375 "==> nxge_rxdma_start_channel: " 4376 "init rxdma control register failed (0x%08x channel %d", 4377 status, channel)); 4378 return (status); 4379 } 4380 4381 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4382 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4383 4384 /* 4385 * Load RXDMA descriptors, buffers, mailbox, 4386 * initialise the receive DMA channels and 4387 * enable each DMA channel. 4388 */ 4389 status = nxge_enable_rxdma_channel(nxgep, 4390 channel, rbr_p, rcr_p, mbox_p); 4391 4392 if (status != NXGE_OK) { 4393 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4394 " nxge_rxdma_start_channel: " 4395 " enable rxdma failed (0x%08x channel %d)", 4396 status, channel)); 4397 return (status); 4398 } 4399 4400 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4401 "==> nxge_rxdma_start_channel: enabled channel %d")); 4402 4403 if (isLDOMguest(nxgep)) { 4404 /* Add interrupt handler for this channel. */ 4405 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4406 != NXGE_OK) { 4407 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4408 " nxge_rxdma_start_channel: " 4409 " nxge_hio_intr_add failed (0x%08x channel %d)", 4410 status, channel)); 4411 } 4412 } 4413 4414 ent_mask.value = 0; 4415 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4416 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4417 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4418 &ent_mask); 4419 if (rs != NPI_SUCCESS) { 4420 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4421 "==> nxge_rxdma_start_channel: " 4422 "init rxdma event masks failed (0x%08x channel %d)", 4423 status, channel)); 4424 return (NXGE_ERROR | rs); 4425 } 4426 4427 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4428 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4429 4430 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4431 4432 return (NXGE_OK); 4433 } 4434 4435 static nxge_status_t 4436 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4437 { 4438 npi_handle_t handle; 4439 npi_status_t rs = NPI_SUCCESS; 4440 rx_dma_ctl_stat_t cs; 4441 rx_dma_ent_msk_t ent_mask; 4442 nxge_status_t status = NXGE_OK; 4443 4444 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4445 4446 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4447 4448 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4449 "npi handle addr $%p acc $%p", 4450 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4451 4452 if (!isLDOMguest(nxgep)) { 4453 /* 4454 * Stop RxMAC = A.9.2.6 4455 */ 4456 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4457 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4458 "nxge_rxdma_stop_channel: " 4459 "Failed to disable RxMAC")); 4460 } 4461 4462 /* 4463 * Drain IPP Port = A.9.3.6 4464 */ 4465 (void) nxge_ipp_drain(nxgep); 4466 } 4467 4468 /* Reset RXDMA channel */ 4469 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4470 if (rs != NPI_SUCCESS) { 4471 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4472 " nxge_rxdma_stop_channel: " 4473 " reset rxdma failed (0x%08x channel %d)", 4474 rs, channel)); 4475 return (NXGE_ERROR | rs); 4476 } 4477 4478 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4479 "==> nxge_rxdma_stop_channel: reset done")); 4480 4481 /* Set up the interrupt event masks. */ 4482 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4483 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4484 &ent_mask); 4485 if (rs != NPI_SUCCESS) { 4486 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4487 "==> nxge_rxdma_stop_channel: " 4488 "set rxdma event masks failed (0x%08x channel %d)", 4489 rs, channel)); 4490 return (NXGE_ERROR | rs); 4491 } 4492 4493 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4494 "==> nxge_rxdma_stop_channel: event done")); 4495 4496 /* 4497 * Initialize the receive DMA control and status register 4498 */ 4499 cs.value = 0; 4500 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4501 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4502 " to default (all 0s) 0x%08x", cs.value)); 4503 if (status != NXGE_OK) { 4504 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4505 " nxge_rxdma_stop_channel: init rxdma" 4506 " control register failed (0x%08x channel %d", 4507 status, channel)); 4508 return (status); 4509 } 4510 4511 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4512 "==> nxge_rxdma_stop_channel: control done")); 4513 4514 /* 4515 * Make sure channel is disabled. 4516 */ 4517 status = nxge_disable_rxdma_channel(nxgep, channel); 4518 4519 if (status != NXGE_OK) { 4520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4521 " nxge_rxdma_stop_channel: " 4522 " init enable rxdma failed (0x%08x channel %d)", 4523 status, channel)); 4524 return (status); 4525 } 4526 4527 if (!isLDOMguest(nxgep)) { 4528 /* 4529 * Enable RxMAC = A.9.2.10 4530 */ 4531 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4532 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4533 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4534 } 4535 } 4536 4537 NXGE_DEBUG_MSG((nxgep, 4538 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4539 4540 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4541 4542 return (NXGE_OK); 4543 } 4544 4545 nxge_status_t 4546 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4547 { 4548 npi_handle_t handle; 4549 p_nxge_rdc_sys_stats_t statsp; 4550 rx_ctl_dat_fifo_stat_t stat; 4551 uint32_t zcp_err_status; 4552 uint32_t ipp_err_status; 4553 nxge_status_t status = NXGE_OK; 4554 npi_status_t rs = NPI_SUCCESS; 4555 boolean_t my_err = B_FALSE; 4556 4557 handle = nxgep->npi_handle; 4558 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4559 4560 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4561 4562 if (rs != NPI_SUCCESS) 4563 return (NXGE_ERROR | rs); 4564 4565 if (stat.bits.ldw.id_mismatch) { 4566 statsp->id_mismatch++; 4567 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4568 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4569 /* Global fatal error encountered */ 4570 } 4571 4572 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4573 switch (nxgep->mac.portnum) { 4574 case 0: 4575 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4576 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4577 my_err = B_TRUE; 4578 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4579 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4580 } 4581 break; 4582 case 1: 4583 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4584 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4585 my_err = B_TRUE; 4586 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4587 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4588 } 4589 break; 4590 case 2: 4591 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4592 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4593 my_err = B_TRUE; 4594 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4595 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4596 } 4597 break; 4598 case 3: 4599 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4600 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4601 my_err = B_TRUE; 4602 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4603 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4604 } 4605 break; 4606 default: 4607 return (NXGE_ERROR); 4608 } 4609 } 4610 4611 if (my_err) { 4612 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4613 zcp_err_status); 4614 if (status != NXGE_OK) 4615 return (status); 4616 } 4617 4618 return (NXGE_OK); 4619 } 4620 4621 static nxge_status_t 4622 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4623 uint32_t zcp_status) 4624 { 4625 boolean_t rxport_fatal = B_FALSE; 4626 p_nxge_rdc_sys_stats_t statsp; 4627 nxge_status_t status = NXGE_OK; 4628 uint8_t portn; 4629 4630 portn = nxgep->mac.portnum; 4631 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4632 4633 if (ipp_status & (0x1 << portn)) { 4634 statsp->ipp_eop_err++; 4635 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4636 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4637 rxport_fatal = B_TRUE; 4638 } 4639 4640 if (zcp_status & (0x1 << portn)) { 4641 statsp->zcp_eop_err++; 4642 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4643 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4644 rxport_fatal = B_TRUE; 4645 } 4646 4647 if (rxport_fatal) { 4648 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4649 " nxge_rxdma_handle_port_error: " 4650 " fatal error on Port #%d\n", 4651 portn)); 4652 status = nxge_rx_port_fatal_err_recover(nxgep); 4653 if (status == NXGE_OK) { 4654 FM_SERVICE_RESTORED(nxgep); 4655 } 4656 } 4657 4658 return (status); 4659 } 4660 4661 static nxge_status_t 4662 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4663 { 4664 npi_handle_t handle; 4665 npi_status_t rs = NPI_SUCCESS; 4666 nxge_status_t status = NXGE_OK; 4667 p_rx_rbr_ring_t rbrp; 4668 p_rx_rcr_ring_t rcrp; 4669 p_rx_mbox_t mboxp; 4670 rx_dma_ent_msk_t ent_mask; 4671 p_nxge_dma_common_t dmap; 4672 int ring_idx; 4673 uint32_t ref_cnt; 4674 p_rx_msg_t rx_msg_p; 4675 int i; 4676 uint32_t nxge_port_rcr_size; 4677 4678 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4679 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4680 "Recovering from RxDMAChannel#%d error...", channel)); 4681 4682 /* 4683 * Stop the dma channel waits for the stop done. 4684 * If the stop done bit is not set, then create 4685 * an error. 4686 */ 4687 4688 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4689 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4690 4691 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4692 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4693 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4694 4695 MUTEX_ENTER(&rcrp->lock); 4696 MUTEX_ENTER(&rbrp->lock); 4697 MUTEX_ENTER(&rbrp->post_lock); 4698 4699 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4700 4701 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4702 if (rs != NPI_SUCCESS) { 4703 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4704 "nxge_disable_rxdma_channel:failed")); 4705 goto fail; 4706 } 4707 4708 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4709 4710 /* Disable interrupt */ 4711 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4712 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4713 if (rs != NPI_SUCCESS) { 4714 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4715 "nxge_rxdma_stop_channel: " 4716 "set rxdma event masks failed (channel %d)", 4717 channel)); 4718 } 4719 4720 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4721 4722 /* Reset RXDMA channel */ 4723 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4724 if (rs != NPI_SUCCESS) { 4725 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4726 "nxge_rxdma_fatal_err_recover: " 4727 " reset rxdma failed (channel %d)", channel)); 4728 goto fail; 4729 } 4730 4731 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4732 4733 mboxp = 4734 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4735 4736 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4737 rbrp->rbr_rd_index = 0; 4738 4739 rcrp->comp_rd_index = 0; 4740 rcrp->comp_wt_index = 0; 4741 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4742 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4743 #if defined(__i386) 4744 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4745 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4746 #else 4747 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4748 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4749 #endif 4750 4751 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4752 (nxge_port_rcr_size - 1); 4753 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4754 (nxge_port_rcr_size - 1); 4755 4756 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4757 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4758 4759 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4760 4761 for (i = 0; i < rbrp->rbr_max_size; i++) { 4762 rx_msg_p = rbrp->rx_msg_ring[i]; 4763 ref_cnt = rx_msg_p->ref_cnt; 4764 if (ref_cnt != 1) { 4765 if (rx_msg_p->cur_usage_cnt != 4766 rx_msg_p->max_usage_cnt) { 4767 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4768 "buf[%d]: cur_usage_cnt = %d " 4769 "max_usage_cnt = %d\n", i, 4770 rx_msg_p->cur_usage_cnt, 4771 rx_msg_p->max_usage_cnt)); 4772 } else { 4773 /* Buffer can be re-posted */ 4774 rx_msg_p->free = B_TRUE; 4775 rx_msg_p->cur_usage_cnt = 0; 4776 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4777 rx_msg_p->pkt_buf_size = 0; 4778 } 4779 } 4780 } 4781 4782 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4783 4784 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4785 if (status != NXGE_OK) { 4786 goto fail; 4787 } 4788 4789 MUTEX_EXIT(&rbrp->post_lock); 4790 MUTEX_EXIT(&rbrp->lock); 4791 MUTEX_EXIT(&rcrp->lock); 4792 4793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4794 "Recovery Successful, RxDMAChannel#%d Restored", 4795 channel)); 4796 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4797 4798 return (NXGE_OK); 4799 fail: 4800 MUTEX_EXIT(&rbrp->post_lock); 4801 MUTEX_EXIT(&rbrp->lock); 4802 MUTEX_EXIT(&rcrp->lock); 4803 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4804 4805 return (NXGE_ERROR | rs); 4806 } 4807 4808 nxge_status_t 4809 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4810 { 4811 nxge_grp_set_t *set = &nxgep->rx_set; 4812 nxge_status_t status = NXGE_OK; 4813 int rdc; 4814 4815 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4816 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4817 "Recovering from RxPort error...")); 4818 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4819 4820 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4821 goto fail; 4822 4823 NXGE_DELAY(1000); 4824 4825 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4826 4827 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4828 if ((1 << rdc) & set->owned.map) { 4829 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4830 != NXGE_OK) { 4831 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4832 "Could not recover channel %d", rdc)); 4833 } 4834 } 4835 } 4836 4837 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4838 4839 /* Reset IPP */ 4840 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4841 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4842 "nxge_rx_port_fatal_err_recover: " 4843 "Failed to reset IPP")); 4844 goto fail; 4845 } 4846 4847 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4848 4849 /* Reset RxMAC */ 4850 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4851 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4852 "nxge_rx_port_fatal_err_recover: " 4853 "Failed to reset RxMAC")); 4854 goto fail; 4855 } 4856 4857 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4858 4859 /* Re-Initialize IPP */ 4860 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4861 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4862 "nxge_rx_port_fatal_err_recover: " 4863 "Failed to init IPP")); 4864 goto fail; 4865 } 4866 4867 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4868 4869 /* Re-Initialize RxMAC */ 4870 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4871 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4872 "nxge_rx_port_fatal_err_recover: " 4873 "Failed to reset RxMAC")); 4874 goto fail; 4875 } 4876 4877 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4878 4879 /* Re-enable RxMAC */ 4880 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4881 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4882 "nxge_rx_port_fatal_err_recover: " 4883 "Failed to enable RxMAC")); 4884 goto fail; 4885 } 4886 4887 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4888 "Recovery Successful, RxPort Restored")); 4889 4890 return (NXGE_OK); 4891 fail: 4892 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4893 return (status); 4894 } 4895 4896 void 4897 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4898 { 4899 rx_dma_ctl_stat_t cs; 4900 rx_ctl_dat_fifo_stat_t cdfs; 4901 4902 switch (err_id) { 4903 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4904 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4905 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4906 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4907 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4908 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4909 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4910 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4911 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4912 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4913 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4914 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4915 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4916 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4917 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4918 chan, &cs.value); 4919 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4920 cs.bits.hdw.rcr_ack_err = 1; 4921 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4922 cs.bits.hdw.dc_fifo_err = 1; 4923 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4924 cs.bits.hdw.rcr_sha_par = 1; 4925 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4926 cs.bits.hdw.rbr_pre_par = 1; 4927 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4928 cs.bits.hdw.rbr_tmout = 1; 4929 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4930 cs.bits.hdw.rsp_cnt_err = 1; 4931 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4932 cs.bits.hdw.byte_en_bus = 1; 4933 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4934 cs.bits.hdw.rsp_dat_err = 1; 4935 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4936 cs.bits.hdw.config_err = 1; 4937 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4938 cs.bits.hdw.rcrincon = 1; 4939 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4940 cs.bits.hdw.rcrfull = 1; 4941 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4942 cs.bits.hdw.rbrfull = 1; 4943 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4944 cs.bits.hdw.rbrlogpage = 1; 4945 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4946 cs.bits.hdw.cfiglogpage = 1; 4947 #if defined(__i386) 4948 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4949 cs.value); 4950 #else 4951 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4952 cs.value); 4953 #endif 4954 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4955 chan, cs.value); 4956 break; 4957 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4958 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4959 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4960 cdfs.value = 0; 4961 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4962 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4963 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4964 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4965 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4966 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4967 #if defined(__i386) 4968 cmn_err(CE_NOTE, 4969 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4970 cdfs.value); 4971 #else 4972 cmn_err(CE_NOTE, 4973 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4974 cdfs.value); 4975 #endif 4976 NXGE_REG_WR64(nxgep->npi_handle, 4977 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4978 break; 4979 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4980 break; 4981 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4982 break; 4983 } 4984 } 4985 4986 static void 4987 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4988 { 4989 rxring_info_t *ring_info; 4990 int index; 4991 uint32_t chunk_size; 4992 uint64_t kaddr; 4993 uint_t num_blocks; 4994 4995 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4996 4997 if (rbr_p == NULL) { 4998 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4999 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 5000 return; 5001 } 5002 5003 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 5004 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5005 "==> nxge_rxdma_databuf_free: DDI")); 5006 return; 5007 } 5008 5009 ring_info = rbr_p->ring_info; 5010 if (ring_info == NULL) { 5011 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5012 "==> nxge_rxdma_databuf_free: NULL ring info")); 5013 return; 5014 } 5015 num_blocks = rbr_p->num_blocks; 5016 for (index = 0; index < num_blocks; index++) { 5017 kaddr = ring_info->buffer[index].kaddr; 5018 chunk_size = ring_info->buffer[index].buf_size; 5019 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5020 "==> nxge_rxdma_databuf_free: free chunk %d " 5021 "kaddrp $%p chunk size %d", 5022 index, kaddr, chunk_size)); 5023 if (kaddr == NULL) continue; 5024 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 5025 ring_info->buffer[index].kaddr = NULL; 5026 } 5027 5028 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 5029 } 5030 5031 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5032 extern void contig_mem_free(void *, size_t); 5033 #endif 5034 5035 void 5036 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 5037 { 5038 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 5039 5040 if (kaddr == NULL || !buf_size) { 5041 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5042 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 5043 kaddr, buf_size)); 5044 return; 5045 } 5046 5047 switch (alloc_type) { 5048 case KMEM_ALLOC: 5049 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5050 "==> nxge_free_buf: freeing kmem $%p size %d", 5051 kaddr, buf_size)); 5052 #if defined(__i386) 5053 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 5054 #else 5055 KMEM_FREE((void *)kaddr, buf_size); 5056 #endif 5057 break; 5058 5059 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5060 case CONTIG_MEM_ALLOC: 5061 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5062 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 5063 kaddr, buf_size)); 5064 contig_mem_free((void *)kaddr, buf_size); 5065 break; 5066 #endif 5067 5068 default: 5069 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5070 "<== nxge_free_buf: unsupported alloc type %d", 5071 alloc_type)); 5072 return; 5073 } 5074 5075 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5076 } 5077