1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * XXX: This is a tunable to limit the number of packets each interrupt 43 * handles. 0 (default) means that each interrupt takes as much packets 44 * as it finds. 45 */ 46 extern int nxge_max_intr_pkts; 47 48 /* 49 * Globals: tunable parameters (/etc/system or adb) 50 * 51 */ 52 extern uint32_t nxge_rbr_size; 53 extern uint32_t nxge_rcr_size; 54 extern uint32_t nxge_rbr_spare_size; 55 56 extern uint32_t nxge_mblks_pending; 57 58 /* 59 * Tunable to reduce the amount of time spent in the 60 * ISR doing Rx Processing. 61 */ 62 extern uint32_t nxge_max_rx_pkts; 63 boolean_t nxge_jumbo_enable; 64 65 /* 66 * Tunables to manage the receive buffer blocks. 67 * 68 * nxge_rx_threshold_hi: copy all buffers. 69 * nxge_rx_bcopy_size_type: receive buffer block size type. 70 * nxge_rx_threshold_lo: copy only up to tunable block size type. 71 */ 72 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 73 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 74 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 75 76 extern uint32_t nxge_cksum_offload; 77 78 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 79 static void nxge_unmap_rxdma(p_nxge_t, int); 80 81 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 82 83 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 84 static void nxge_rxdma_hw_stop(p_nxge_t, int); 85 86 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 87 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 88 uint32_t, 89 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 90 p_rx_mbox_t *); 91 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 92 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 93 94 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 95 uint16_t, 96 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 97 p_rx_rcr_ring_t *, p_rx_mbox_t *); 98 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 99 p_rx_rcr_ring_t, p_rx_mbox_t); 100 101 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 102 uint16_t, 103 p_nxge_dma_common_t *, 104 p_rx_rbr_ring_t *, uint32_t); 105 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 106 p_rx_rbr_ring_t); 107 108 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 109 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 110 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 111 112 static mblk_t * 113 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 114 115 static void nxge_receive_packet(p_nxge_t, 116 p_rx_rcr_ring_t, 117 p_rcr_entry_t, 118 boolean_t *, 119 mblk_t **, mblk_t **); 120 121 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 122 123 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 124 static void nxge_freeb(p_rx_msg_t); 125 static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 126 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 127 128 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 129 uint32_t, uint32_t); 130 131 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 132 p_rx_rbr_ring_t); 133 134 135 static nxge_status_t 136 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 137 138 nxge_status_t 139 nxge_rx_port_fatal_err_recover(p_nxge_t); 140 141 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 142 143 nxge_status_t 144 nxge_init_rxdma_channels(p_nxge_t nxgep) 145 { 146 nxge_grp_set_t *set = &nxgep->rx_set; 147 int i, count, channel; 148 nxge_grp_t *group; 149 dc_map_t map; 150 int dev_gindex; 151 152 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 153 154 if (!isLDOMguest(nxgep)) { 155 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 156 cmn_err(CE_NOTE, "hw_start_common"); 157 return (NXGE_ERROR); 158 } 159 } 160 161 /* 162 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 163 * We only have 8 hardware RDC tables, but we may have 164 * up to 16 logical (software-defined) groups of RDCS, 165 * if we make use of layer 3 & 4 hardware classification. 166 */ 167 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 168 if ((1 << i) & set->lg.map) { 169 group = set->group[i]; 170 dev_gindex = 171 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 172 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 173 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 174 if ((1 << channel) & map) { 175 if ((nxge_grp_dc_add(nxgep, 176 group, VP_BOUND_RX, channel))) 177 goto init_rxdma_channels_exit; 178 } 179 } 180 } 181 if (++count == set->lg.count) 182 break; 183 } 184 185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 186 return (NXGE_OK); 187 188 init_rxdma_channels_exit: 189 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 190 if ((1 << i) & set->lg.map) { 191 group = set->group[i]; 192 dev_gindex = 193 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 194 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 195 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 196 if ((1 << channel) & map) { 197 nxge_grp_dc_remove(nxgep, 198 VP_BOUND_RX, channel); 199 } 200 } 201 } 202 if (++count == set->lg.count) 203 break; 204 } 205 206 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 207 return (NXGE_ERROR); 208 } 209 210 nxge_status_t 211 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 212 { 213 nxge_status_t status; 214 215 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 216 217 status = nxge_map_rxdma(nxge, channel); 218 if (status != NXGE_OK) { 219 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 220 "<== nxge_init_rxdma: status 0x%x", status)); 221 return (status); 222 } 223 224 #if defined(sun4v) 225 if (isLDOMguest(nxge)) { 226 /* set rcr_ring */ 227 p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 228 229 status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 230 if (status != NXGE_OK) { 231 nxge_unmap_rxdma(nxge, channel); 232 return (status); 233 } 234 } 235 #endif 236 237 status = nxge_rxdma_hw_start(nxge, channel); 238 if (status != NXGE_OK) { 239 nxge_unmap_rxdma(nxge, channel); 240 } 241 242 if (!nxge->statsp->rdc_ksp[channel]) 243 nxge_setup_rdc_kstats(nxge, channel); 244 245 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 246 "<== nxge_init_rxdma_channel: status 0x%x", status)); 247 248 return (status); 249 } 250 251 void 252 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 253 { 254 nxge_grp_set_t *set = &nxgep->rx_set; 255 int rdc; 256 257 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 258 259 if (set->owned.map == 0) { 260 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 261 "nxge_uninit_rxdma_channels: no channels")); 262 return; 263 } 264 265 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 266 if ((1 << rdc) & set->owned.map) { 267 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 268 } 269 } 270 271 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 272 } 273 274 void 275 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 276 { 277 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 278 279 if (nxgep->statsp->rdc_ksp[channel]) { 280 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 281 nxgep->statsp->rdc_ksp[channel] = 0; 282 } 283 284 nxge_rxdma_hw_stop(nxgep, channel); 285 nxge_unmap_rxdma(nxgep, channel); 286 287 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 288 } 289 290 nxge_status_t 291 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 292 { 293 npi_handle_t handle; 294 npi_status_t rs = NPI_SUCCESS; 295 nxge_status_t status = NXGE_OK; 296 297 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 298 299 handle = NXGE_DEV_NPI_HANDLE(nxgep); 300 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 301 302 if (rs != NPI_SUCCESS) { 303 status = NXGE_ERROR | rs; 304 } 305 306 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 307 308 return (status); 309 } 310 311 void 312 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 313 { 314 nxge_grp_set_t *set = &nxgep->rx_set; 315 int rdc; 316 317 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 318 319 if (!isLDOMguest(nxgep)) { 320 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 321 (void) npi_rxdma_dump_fzc_regs(handle); 322 } 323 324 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 325 NXGE_DEBUG_MSG((nxgep, TX_CTL, 326 "nxge_rxdma_regs_dump_channels: " 327 "NULL ring pointer(s)")); 328 return; 329 } 330 331 if (set->owned.map == 0) { 332 NXGE_DEBUG_MSG((nxgep, RX_CTL, 333 "nxge_rxdma_regs_dump_channels: no channels")); 334 return; 335 } 336 337 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 338 if ((1 << rdc) & set->owned.map) { 339 rx_rbr_ring_t *ring = 340 nxgep->rx_rbr_rings->rbr_rings[rdc]; 341 if (ring) { 342 (void) nxge_dump_rxdma_channel(nxgep, rdc); 343 } 344 } 345 } 346 347 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 348 } 349 350 nxge_status_t 351 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 352 { 353 npi_handle_t handle; 354 npi_status_t rs = NPI_SUCCESS; 355 nxge_status_t status = NXGE_OK; 356 357 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 358 359 handle = NXGE_DEV_NPI_HANDLE(nxgep); 360 rs = npi_rxdma_dump_rdc_regs(handle, channel); 361 362 if (rs != NPI_SUCCESS) { 363 status = NXGE_ERROR | rs; 364 } 365 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 366 return (status); 367 } 368 369 nxge_status_t 370 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 371 p_rx_dma_ent_msk_t mask_p) 372 { 373 npi_handle_t handle; 374 npi_status_t rs = NPI_SUCCESS; 375 nxge_status_t status = NXGE_OK; 376 377 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 378 "<== nxge_init_rxdma_channel_event_mask")); 379 380 handle = NXGE_DEV_NPI_HANDLE(nxgep); 381 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 382 if (rs != NPI_SUCCESS) { 383 status = NXGE_ERROR | rs; 384 } 385 386 return (status); 387 } 388 389 nxge_status_t 390 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 391 p_rx_dma_ctl_stat_t cs_p) 392 { 393 npi_handle_t handle; 394 npi_status_t rs = NPI_SUCCESS; 395 nxge_status_t status = NXGE_OK; 396 397 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 398 "<== nxge_init_rxdma_channel_cntl_stat")); 399 400 handle = NXGE_DEV_NPI_HANDLE(nxgep); 401 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 402 403 if (rs != NPI_SUCCESS) { 404 status = NXGE_ERROR | rs; 405 } 406 407 return (status); 408 } 409 410 /* 411 * nxge_rxdma_cfg_rdcgrp_default_rdc 412 * 413 * Set the default RDC for an RDC Group (Table) 414 * 415 * Arguments: 416 * nxgep 417 * rdcgrp The group to modify 418 * rdc The new default RDC. 419 * 420 * Notes: 421 * 422 * NPI/NXGE function calls: 423 * npi_rxdma_cfg_rdc_table_default_rdc() 424 * 425 * Registers accessed: 426 * RDC_TBL_REG: FZC_ZCP + 0x10000 427 * 428 * Context: 429 * Service domain 430 */ 431 nxge_status_t 432 nxge_rxdma_cfg_rdcgrp_default_rdc( 433 p_nxge_t nxgep, 434 uint8_t rdcgrp, 435 uint8_t rdc) 436 { 437 npi_handle_t handle; 438 npi_status_t rs = NPI_SUCCESS; 439 p_nxge_dma_pt_cfg_t p_dma_cfgp; 440 p_nxge_rdc_grp_t rdc_grp_p; 441 uint8_t actual_rdcgrp, actual_rdc; 442 443 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 444 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 445 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 446 447 handle = NXGE_DEV_NPI_HANDLE(nxgep); 448 449 /* 450 * This has to be rewritten. Do we even allow this anymore? 451 */ 452 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 453 RDC_MAP_IN(rdc_grp_p->map, rdc); 454 rdc_grp_p->def_rdc = rdc; 455 456 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 457 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 458 459 rs = npi_rxdma_cfg_rdc_table_default_rdc( 460 handle, actual_rdcgrp, actual_rdc); 461 462 if (rs != NPI_SUCCESS) { 463 return (NXGE_ERROR | rs); 464 } 465 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 466 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 467 return (NXGE_OK); 468 } 469 470 nxge_status_t 471 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 472 { 473 npi_handle_t handle; 474 475 uint8_t actual_rdc; 476 npi_status_t rs = NPI_SUCCESS; 477 478 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 479 " ==> nxge_rxdma_cfg_port_default_rdc")); 480 481 handle = NXGE_DEV_NPI_HANDLE(nxgep); 482 actual_rdc = rdc; /* XXX Hack! */ 483 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 484 485 486 if (rs != NPI_SUCCESS) { 487 return (NXGE_ERROR | rs); 488 } 489 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 490 " <== nxge_rxdma_cfg_port_default_rdc")); 491 492 return (NXGE_OK); 493 } 494 495 nxge_status_t 496 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 497 uint16_t pkts) 498 { 499 npi_status_t rs = NPI_SUCCESS; 500 npi_handle_t handle; 501 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 502 " ==> nxge_rxdma_cfg_rcr_threshold")); 503 handle = NXGE_DEV_NPI_HANDLE(nxgep); 504 505 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 506 507 if (rs != NPI_SUCCESS) { 508 return (NXGE_ERROR | rs); 509 } 510 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 511 return (NXGE_OK); 512 } 513 514 nxge_status_t 515 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 516 uint16_t tout, uint8_t enable) 517 { 518 npi_status_t rs = NPI_SUCCESS; 519 npi_handle_t handle; 520 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 521 handle = NXGE_DEV_NPI_HANDLE(nxgep); 522 if (enable == 0) { 523 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 524 } else { 525 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 526 tout); 527 } 528 529 if (rs != NPI_SUCCESS) { 530 return (NXGE_ERROR | rs); 531 } 532 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 533 return (NXGE_OK); 534 } 535 536 nxge_status_t 537 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 538 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 539 { 540 npi_handle_t handle; 541 rdc_desc_cfg_t rdc_desc; 542 p_rcrcfig_b_t cfgb_p; 543 npi_status_t rs = NPI_SUCCESS; 544 545 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 546 handle = NXGE_DEV_NPI_HANDLE(nxgep); 547 /* 548 * Use configuration data composed at init time. 549 * Write to hardware the receive ring configurations. 550 */ 551 rdc_desc.mbox_enable = 1; 552 rdc_desc.mbox_addr = mbox_p->mbox_addr; 553 NXGE_DEBUG_MSG((nxgep, RX_CTL, 554 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 555 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 556 557 rdc_desc.rbr_len = rbr_p->rbb_max; 558 rdc_desc.rbr_addr = rbr_p->rbr_addr; 559 560 switch (nxgep->rx_bksize_code) { 561 case RBR_BKSIZE_4K: 562 rdc_desc.page_size = SIZE_4KB; 563 break; 564 case RBR_BKSIZE_8K: 565 rdc_desc.page_size = SIZE_8KB; 566 break; 567 case RBR_BKSIZE_16K: 568 rdc_desc.page_size = SIZE_16KB; 569 break; 570 case RBR_BKSIZE_32K: 571 rdc_desc.page_size = SIZE_32KB; 572 break; 573 } 574 575 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 576 rdc_desc.valid0 = 1; 577 578 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 579 rdc_desc.valid1 = 1; 580 581 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 582 rdc_desc.valid2 = 1; 583 584 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 585 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 586 587 rdc_desc.rcr_len = rcr_p->comp_size; 588 rdc_desc.rcr_addr = rcr_p->rcr_addr; 589 590 cfgb_p = &(rcr_p->rcr_cfgb); 591 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 592 /* For now, disable this timeout in a guest domain. */ 593 if (isLDOMguest(nxgep)) { 594 rdc_desc.rcr_timeout = 0; 595 rdc_desc.rcr_timeout_enable = 0; 596 } else { 597 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 598 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 599 } 600 601 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 602 "rbr_len qlen %d pagesize code %d rcr_len %d", 603 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 604 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 605 "size 0 %d size 1 %d size 2 %d", 606 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 607 rbr_p->npi_pkt_buf_size2)); 608 609 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 610 if (rs != NPI_SUCCESS) { 611 return (NXGE_ERROR | rs); 612 } 613 614 /* 615 * Enable the timeout and threshold. 616 */ 617 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 618 rdc_desc.rcr_threshold); 619 if (rs != NPI_SUCCESS) { 620 return (NXGE_ERROR | rs); 621 } 622 623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 624 rdc_desc.rcr_timeout); 625 if (rs != NPI_SUCCESS) { 626 return (NXGE_ERROR | rs); 627 } 628 629 /* Enable the DMA */ 630 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 631 if (rs != NPI_SUCCESS) { 632 return (NXGE_ERROR | rs); 633 } 634 635 /* Kick the DMA engine. */ 636 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 637 /* Clear the rbr empty bit */ 638 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 639 640 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 641 642 return (NXGE_OK); 643 } 644 645 nxge_status_t 646 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 647 { 648 npi_handle_t handle; 649 npi_status_t rs = NPI_SUCCESS; 650 651 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 652 handle = NXGE_DEV_NPI_HANDLE(nxgep); 653 654 /* disable the DMA */ 655 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 656 if (rs != NPI_SUCCESS) { 657 NXGE_DEBUG_MSG((nxgep, RX_CTL, 658 "<== nxge_disable_rxdma_channel:failed (0x%x)", 659 rs)); 660 return (NXGE_ERROR | rs); 661 } 662 663 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 664 return (NXGE_OK); 665 } 666 667 nxge_status_t 668 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 669 { 670 npi_handle_t handle; 671 nxge_status_t status = NXGE_OK; 672 673 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 674 "<== nxge_init_rxdma_channel_rcrflush")); 675 676 handle = NXGE_DEV_NPI_HANDLE(nxgep); 677 npi_rxdma_rdc_rcr_flush(handle, channel); 678 679 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 680 "<== nxge_init_rxdma_channel_rcrflsh")); 681 return (status); 682 683 } 684 685 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 686 687 #define TO_LEFT -1 688 #define TO_RIGHT 1 689 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 690 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 691 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 692 #define NO_HINT 0xffffffff 693 694 /*ARGSUSED*/ 695 nxge_status_t 696 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 697 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 698 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 699 { 700 int bufsize; 701 uint64_t pktbuf_pp; 702 uint64_t dvma_addr; 703 rxring_info_t *ring_info; 704 int base_side, end_side; 705 int r_index, l_index, anchor_index; 706 int found, search_done; 707 uint32_t offset, chunk_size, block_size, page_size_mask; 708 uint32_t chunk_index, block_index, total_index; 709 int max_iterations, iteration; 710 rxbuf_index_info_t *bufinfo; 711 712 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 713 714 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 715 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 716 pkt_buf_addr_pp, 717 pktbufsz_type)); 718 #if defined(__i386) 719 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 720 #else 721 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 722 #endif 723 724 switch (pktbufsz_type) { 725 case 0: 726 bufsize = rbr_p->pkt_buf_size0; 727 break; 728 case 1: 729 bufsize = rbr_p->pkt_buf_size1; 730 break; 731 case 2: 732 bufsize = rbr_p->pkt_buf_size2; 733 break; 734 case RCR_SINGLE_BLOCK: 735 bufsize = 0; 736 anchor_index = 0; 737 break; 738 default: 739 return (NXGE_ERROR); 740 } 741 742 if (rbr_p->num_blocks == 1) { 743 anchor_index = 0; 744 ring_info = rbr_p->ring_info; 745 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 746 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 747 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 748 "buf_pp $%p btype %d anchor_index %d " 749 "bufinfo $%p", 750 pkt_buf_addr_pp, 751 pktbufsz_type, 752 anchor_index, 753 bufinfo)); 754 755 goto found_index; 756 } 757 758 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 759 "==> nxge_rxbuf_pp_to_vp: " 760 "buf_pp $%p btype %d anchor_index %d", 761 pkt_buf_addr_pp, 762 pktbufsz_type, 763 anchor_index)); 764 765 ring_info = rbr_p->ring_info; 766 found = B_FALSE; 767 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 768 iteration = 0; 769 max_iterations = ring_info->max_iterations; 770 /* 771 * First check if this block has been seen 772 * recently. This is indicated by a hint which 773 * is initialized when the first buffer of the block 774 * is seen. The hint is reset when the last buffer of 775 * the block has been processed. 776 * As three block sizes are supported, three hints 777 * are kept. The idea behind the hints is that once 778 * the hardware uses a block for a buffer of that 779 * size, it will use it exclusively for that size 780 * and will use it until it is exhausted. It is assumed 781 * that there would a single block being used for the same 782 * buffer sizes at any given time. 783 */ 784 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 785 anchor_index = ring_info->hint[pktbufsz_type]; 786 dvma_addr = bufinfo[anchor_index].dvma_addr; 787 chunk_size = bufinfo[anchor_index].buf_size; 788 if ((pktbuf_pp >= dvma_addr) && 789 (pktbuf_pp < (dvma_addr + chunk_size))) { 790 found = B_TRUE; 791 /* 792 * check if this is the last buffer in the block 793 * If so, then reset the hint for the size; 794 */ 795 796 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 797 ring_info->hint[pktbufsz_type] = NO_HINT; 798 } 799 } 800 801 if (found == B_FALSE) { 802 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 803 "==> nxge_rxbuf_pp_to_vp: (!found)" 804 "buf_pp $%p btype %d anchor_index %d", 805 pkt_buf_addr_pp, 806 pktbufsz_type, 807 anchor_index)); 808 809 /* 810 * This is the first buffer of the block of this 811 * size. Need to search the whole information 812 * array. 813 * the search algorithm uses a binary tree search 814 * algorithm. It assumes that the information is 815 * already sorted with increasing order 816 * info[0] < info[1] < info[2] .... < info[n-1] 817 * where n is the size of the information array 818 */ 819 r_index = rbr_p->num_blocks - 1; 820 l_index = 0; 821 search_done = B_FALSE; 822 anchor_index = MID_INDEX(r_index, l_index); 823 while (search_done == B_FALSE) { 824 if ((r_index == l_index) || 825 (iteration >= max_iterations)) 826 search_done = B_TRUE; 827 end_side = TO_RIGHT; /* to the right */ 828 base_side = TO_LEFT; /* to the left */ 829 /* read the DVMA address information and sort it */ 830 dvma_addr = bufinfo[anchor_index].dvma_addr; 831 chunk_size = bufinfo[anchor_index].buf_size; 832 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 833 "==> nxge_rxbuf_pp_to_vp: (searching)" 834 "buf_pp $%p btype %d " 835 "anchor_index %d chunk_size %d dvmaaddr $%p", 836 pkt_buf_addr_pp, 837 pktbufsz_type, 838 anchor_index, 839 chunk_size, 840 dvma_addr)); 841 842 if (pktbuf_pp >= dvma_addr) 843 base_side = TO_RIGHT; /* to the right */ 844 if (pktbuf_pp < (dvma_addr + chunk_size)) 845 end_side = TO_LEFT; /* to the left */ 846 847 switch (base_side + end_side) { 848 case IN_MIDDLE: 849 /* found */ 850 found = B_TRUE; 851 search_done = B_TRUE; 852 if ((pktbuf_pp + bufsize) < 853 (dvma_addr + chunk_size)) 854 ring_info->hint[pktbufsz_type] = 855 bufinfo[anchor_index].buf_index; 856 break; 857 case BOTH_RIGHT: 858 /* not found: go to the right */ 859 l_index = anchor_index + 1; 860 anchor_index = MID_INDEX(r_index, l_index); 861 break; 862 863 case BOTH_LEFT: 864 /* not found: go to the left */ 865 r_index = anchor_index - 1; 866 anchor_index = MID_INDEX(r_index, l_index); 867 break; 868 default: /* should not come here */ 869 return (NXGE_ERROR); 870 } 871 iteration++; 872 } 873 874 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 875 "==> nxge_rxbuf_pp_to_vp: (search done)" 876 "buf_pp $%p btype %d anchor_index %d", 877 pkt_buf_addr_pp, 878 pktbufsz_type, 879 anchor_index)); 880 } 881 882 if (found == B_FALSE) { 883 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 884 "==> nxge_rxbuf_pp_to_vp: (search failed)" 885 "buf_pp $%p btype %d anchor_index %d", 886 pkt_buf_addr_pp, 887 pktbufsz_type, 888 anchor_index)); 889 return (NXGE_ERROR); 890 } 891 892 found_index: 893 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 894 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 895 "buf_pp $%p btype %d bufsize %d anchor_index %d", 896 pkt_buf_addr_pp, 897 pktbufsz_type, 898 bufsize, 899 anchor_index)); 900 901 /* index of the first block in this chunk */ 902 chunk_index = bufinfo[anchor_index].start_index; 903 dvma_addr = bufinfo[anchor_index].dvma_addr; 904 page_size_mask = ring_info->block_size_mask; 905 906 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 907 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 908 "buf_pp $%p btype %d bufsize %d " 909 "anchor_index %d chunk_index %d dvma $%p", 910 pkt_buf_addr_pp, 911 pktbufsz_type, 912 bufsize, 913 anchor_index, 914 chunk_index, 915 dvma_addr)); 916 917 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 918 block_size = rbr_p->block_size; /* System block(page) size */ 919 920 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 921 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 922 "buf_pp $%p btype %d bufsize %d " 923 "anchor_index %d chunk_index %d dvma $%p " 924 "offset %d block_size %d", 925 pkt_buf_addr_pp, 926 pktbufsz_type, 927 bufsize, 928 anchor_index, 929 chunk_index, 930 dvma_addr, 931 offset, 932 block_size)); 933 934 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 935 936 block_index = (offset / block_size); /* index within chunk */ 937 total_index = chunk_index + block_index; 938 939 940 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 941 "==> nxge_rxbuf_pp_to_vp: " 942 "total_index %d dvma_addr $%p " 943 "offset %d block_size %d " 944 "block_index %d ", 945 total_index, dvma_addr, 946 offset, block_size, 947 block_index)); 948 #if defined(__i386) 949 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 950 (uint32_t)offset); 951 #else 952 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 953 (uint64_t)offset); 954 #endif 955 956 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 957 "==> nxge_rxbuf_pp_to_vp: " 958 "total_index %d dvma_addr $%p " 959 "offset %d block_size %d " 960 "block_index %d " 961 "*pkt_buf_addr_p $%p", 962 total_index, dvma_addr, 963 offset, block_size, 964 block_index, 965 *pkt_buf_addr_p)); 966 967 968 *msg_index = total_index; 969 *bufoffset = (offset & page_size_mask); 970 971 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 972 "==> nxge_rxbuf_pp_to_vp: get msg index: " 973 "msg_index %d bufoffset_index %d", 974 *msg_index, 975 *bufoffset)); 976 977 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 978 979 return (NXGE_OK); 980 } 981 982 /* 983 * used by quick sort (qsort) function 984 * to perform comparison 985 */ 986 static int 987 nxge_sort_compare(const void *p1, const void *p2) 988 { 989 990 rxbuf_index_info_t *a, *b; 991 992 a = (rxbuf_index_info_t *)p1; 993 b = (rxbuf_index_info_t *)p2; 994 995 if (a->dvma_addr > b->dvma_addr) 996 return (1); 997 if (a->dvma_addr < b->dvma_addr) 998 return (-1); 999 return (0); 1000 } 1001 1002 1003 1004 /* 1005 * grabbed this sort implementation from common/syscall/avl.c 1006 * 1007 */ 1008 /* 1009 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 1010 * v = Ptr to array/vector of objs 1011 * n = # objs in the array 1012 * s = size of each obj (must be multiples of a word size) 1013 * f = ptr to function to compare two objs 1014 * returns (-1 = less than, 0 = equal, 1 = greater than 1015 */ 1016 void 1017 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1018 { 1019 int g, i, j, ii; 1020 unsigned int *p1, *p2; 1021 unsigned int tmp; 1022 1023 /* No work to do */ 1024 if (v == NULL || n <= 1) 1025 return; 1026 /* Sanity check on arguments */ 1027 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1028 ASSERT(s > 0); 1029 1030 for (g = n / 2; g > 0; g /= 2) { 1031 for (i = g; i < n; i++) { 1032 for (j = i - g; j >= 0 && 1033 (*f)(v + j * s, v + (j + g) * s) == 1; 1034 j -= g) { 1035 p1 = (unsigned *)(v + j * s); 1036 p2 = (unsigned *)(v + (j + g) * s); 1037 for (ii = 0; ii < s / 4; ii++) { 1038 tmp = *p1; 1039 *p1++ = *p2; 1040 *p2++ = tmp; 1041 } 1042 } 1043 } 1044 } 1045 } 1046 1047 /* 1048 * Initialize data structures required for rxdma 1049 * buffer dvma->vmem address lookup 1050 */ 1051 /*ARGSUSED*/ 1052 static nxge_status_t 1053 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1054 { 1055 1056 int index; 1057 rxring_info_t *ring_info; 1058 int max_iteration = 0, max_index = 0; 1059 1060 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1061 1062 ring_info = rbrp->ring_info; 1063 ring_info->hint[0] = NO_HINT; 1064 ring_info->hint[1] = NO_HINT; 1065 ring_info->hint[2] = NO_HINT; 1066 max_index = rbrp->num_blocks; 1067 1068 /* read the DVMA address information and sort it */ 1069 /* do init of the information array */ 1070 1071 1072 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1073 " nxge_rxbuf_index_info_init Sort ptrs")); 1074 1075 /* sort the array */ 1076 nxge_ksort((void *)ring_info->buffer, max_index, 1077 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1078 1079 1080 1081 for (index = 0; index < max_index; index++) { 1082 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1083 " nxge_rxbuf_index_info_init: sorted chunk %d " 1084 " ioaddr $%p kaddr $%p size %x", 1085 index, ring_info->buffer[index].dvma_addr, 1086 ring_info->buffer[index].kaddr, 1087 ring_info->buffer[index].buf_size)); 1088 } 1089 1090 max_iteration = 0; 1091 while (max_index >= (1ULL << max_iteration)) 1092 max_iteration++; 1093 ring_info->max_iterations = max_iteration + 1; 1094 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1095 " nxge_rxbuf_index_info_init Find max iter %d", 1096 ring_info->max_iterations)); 1097 1098 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1099 return (NXGE_OK); 1100 } 1101 1102 /* ARGSUSED */ 1103 void 1104 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1105 { 1106 #ifdef NXGE_DEBUG 1107 1108 uint32_t bptr; 1109 uint64_t pp; 1110 1111 bptr = entry_p->bits.hdw.pkt_buf_addr; 1112 1113 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1114 "\trcr entry $%p " 1115 "\trcr entry 0x%0llx " 1116 "\trcr entry 0x%08x " 1117 "\trcr entry 0x%08x " 1118 "\tvalue 0x%0llx\n" 1119 "\tmulti = %d\n" 1120 "\tpkt_type = 0x%x\n" 1121 "\tzero_copy = %d\n" 1122 "\tnoport = %d\n" 1123 "\tpromis = %d\n" 1124 "\terror = 0x%04x\n" 1125 "\tdcf_err = 0x%01x\n" 1126 "\tl2_len = %d\n" 1127 "\tpktbufsize = %d\n" 1128 "\tpkt_buf_addr = $%p\n" 1129 "\tpkt_buf_addr (<< 6) = $%p\n", 1130 entry_p, 1131 *(int64_t *)entry_p, 1132 *(int32_t *)entry_p, 1133 *(int32_t *)((char *)entry_p + 32), 1134 entry_p->value, 1135 entry_p->bits.hdw.multi, 1136 entry_p->bits.hdw.pkt_type, 1137 entry_p->bits.hdw.zero_copy, 1138 entry_p->bits.hdw.noport, 1139 entry_p->bits.hdw.promis, 1140 entry_p->bits.hdw.error, 1141 entry_p->bits.hdw.dcf_err, 1142 entry_p->bits.hdw.l2_len, 1143 entry_p->bits.hdw.pktbufsz, 1144 bptr, 1145 entry_p->bits.ldw.pkt_buf_addr)); 1146 1147 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1148 RCR_PKT_BUF_ADDR_SHIFT; 1149 1150 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1151 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1152 #endif 1153 } 1154 1155 void 1156 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1157 { 1158 npi_handle_t handle; 1159 rbr_stat_t rbr_stat; 1160 addr44_t hd_addr; 1161 addr44_t tail_addr; 1162 uint16_t qlen; 1163 1164 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1165 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1166 1167 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1168 1169 /* RBR head */ 1170 hd_addr.addr = 0; 1171 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1172 #if defined(__i386) 1173 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1174 (void *)(uint32_t)hd_addr.addr); 1175 #else 1176 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1177 (void *)hd_addr.addr); 1178 #endif 1179 1180 /* RBR stats */ 1181 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1182 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1183 1184 /* RCR tail */ 1185 tail_addr.addr = 0; 1186 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1187 #if defined(__i386) 1188 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1189 (void *)(uint32_t)tail_addr.addr); 1190 #else 1191 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1192 (void *)tail_addr.addr); 1193 #endif 1194 1195 /* RCR qlen */ 1196 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1197 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1198 1199 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1200 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1201 } 1202 1203 nxge_status_t 1204 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1205 { 1206 nxge_grp_set_t *set = &nxgep->rx_set; 1207 nxge_status_t status; 1208 npi_status_t rs; 1209 int rdc; 1210 1211 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1212 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1213 1214 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1215 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1216 "<== nxge_rxdma_mode: not initialized")); 1217 return (NXGE_ERROR); 1218 } 1219 1220 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1221 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1222 "<== nxge_tx_port_fatal_err_recover: " 1223 "NULL ring pointer(s)")); 1224 return (NXGE_ERROR); 1225 } 1226 1227 if (set->owned.map == 0) { 1228 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1229 "nxge_rxdma_regs_dump_channels: no channels")); 1230 return (NULL); 1231 } 1232 1233 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1234 if ((1 << rdc) & set->owned.map) { 1235 rx_rbr_ring_t *ring = 1236 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1237 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1238 if (ring) { 1239 if (enable) { 1240 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1241 "==> nxge_rxdma_hw_mode: " 1242 "channel %d (enable)", rdc)); 1243 rs = npi_rxdma_cfg_rdc_enable 1244 (handle, rdc); 1245 } else { 1246 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1247 "==> nxge_rxdma_hw_mode: " 1248 "channel %d disable)", rdc)); 1249 rs = npi_rxdma_cfg_rdc_disable 1250 (handle, rdc); 1251 } 1252 } 1253 } 1254 } 1255 1256 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1257 1258 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1259 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1260 1261 return (status); 1262 } 1263 1264 void 1265 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1266 { 1267 npi_handle_t handle; 1268 1269 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1270 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1271 1272 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1273 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1274 1275 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1276 } 1277 1278 void 1279 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1280 { 1281 npi_handle_t handle; 1282 1283 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1284 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1285 1286 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1287 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1288 1289 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1290 } 1291 1292 void 1293 nxge_hw_start_rx(p_nxge_t nxgep) 1294 { 1295 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1296 1297 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1298 (void) nxge_rx_mac_enable(nxgep); 1299 1300 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1301 } 1302 1303 /*ARGSUSED*/ 1304 void 1305 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1306 { 1307 nxge_grp_set_t *set = &nxgep->rx_set; 1308 int rdc; 1309 1310 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1311 1312 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1313 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1314 "<== nxge_tx_port_fatal_err_recover: " 1315 "NULL ring pointer(s)")); 1316 return; 1317 } 1318 1319 if (set->owned.map == 0) { 1320 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1321 "nxge_rxdma_regs_dump_channels: no channels")); 1322 return; 1323 } 1324 1325 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1326 if ((1 << rdc) & set->owned.map) { 1327 rx_rbr_ring_t *ring = 1328 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1329 if (ring) { 1330 nxge_rxdma_hw_stop(nxgep, rdc); 1331 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1332 "==> nxge_fixup_rxdma_rings: " 1333 "channel %d ring $%px", 1334 rdc, ring)); 1335 (void) nxge_rxdma_fixup_channel 1336 (nxgep, rdc, rdc); 1337 } 1338 } 1339 } 1340 1341 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1342 } 1343 1344 void 1345 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1346 { 1347 int i; 1348 1349 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1350 i = nxge_rxdma_get_ring_index(nxgep, channel); 1351 if (i < 0) { 1352 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1353 "<== nxge_rxdma_fix_channel: no entry found")); 1354 return; 1355 } 1356 1357 nxge_rxdma_fixup_channel(nxgep, channel, i); 1358 1359 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1360 } 1361 1362 void 1363 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1364 { 1365 int ndmas; 1366 p_rx_rbr_rings_t rx_rbr_rings; 1367 p_rx_rbr_ring_t *rbr_rings; 1368 p_rx_rcr_rings_t rx_rcr_rings; 1369 p_rx_rcr_ring_t *rcr_rings; 1370 p_rx_mbox_areas_t rx_mbox_areas_p; 1371 p_rx_mbox_t *rx_mbox_p; 1372 p_nxge_dma_pool_t dma_buf_poolp; 1373 p_nxge_dma_pool_t dma_cntl_poolp; 1374 p_rx_rbr_ring_t rbrp; 1375 p_rx_rcr_ring_t rcrp; 1376 p_rx_mbox_t mboxp; 1377 p_nxge_dma_common_t dmap; 1378 nxge_status_t status = NXGE_OK; 1379 1380 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1381 1382 (void) nxge_rxdma_stop_channel(nxgep, channel); 1383 1384 dma_buf_poolp = nxgep->rx_buf_pool_p; 1385 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1386 1387 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1388 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1389 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1390 return; 1391 } 1392 1393 ndmas = dma_buf_poolp->ndmas; 1394 if (!ndmas) { 1395 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1396 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1397 return; 1398 } 1399 1400 rx_rbr_rings = nxgep->rx_rbr_rings; 1401 rx_rcr_rings = nxgep->rx_rcr_rings; 1402 rbr_rings = rx_rbr_rings->rbr_rings; 1403 rcr_rings = rx_rcr_rings->rcr_rings; 1404 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1405 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1406 1407 /* Reinitialize the receive block and completion rings */ 1408 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1409 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1410 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1411 1412 1413 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1414 rbrp->rbr_rd_index = 0; 1415 rcrp->comp_rd_index = 0; 1416 rcrp->comp_wt_index = 0; 1417 1418 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1419 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1420 1421 status = nxge_rxdma_start_channel(nxgep, channel, 1422 rbrp, rcrp, mboxp); 1423 if (status != NXGE_OK) { 1424 goto nxge_rxdma_fixup_channel_fail; 1425 } 1426 if (status != NXGE_OK) { 1427 goto nxge_rxdma_fixup_channel_fail; 1428 } 1429 1430 nxge_rxdma_fixup_channel_fail: 1431 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1432 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1433 1434 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1435 } 1436 1437 /* 1438 * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 1439 * map <channel> to an index into nxgep->rx_rbr_rings. 1440 * (device ring index -> port ring index) 1441 */ 1442 int 1443 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1444 { 1445 int i, ndmas; 1446 uint16_t rdc; 1447 p_rx_rbr_rings_t rx_rbr_rings; 1448 p_rx_rbr_ring_t *rbr_rings; 1449 1450 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1451 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1452 1453 rx_rbr_rings = nxgep->rx_rbr_rings; 1454 if (rx_rbr_rings == NULL) { 1455 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1456 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1457 return (-1); 1458 } 1459 ndmas = rx_rbr_rings->ndmas; 1460 if (!ndmas) { 1461 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1462 "<== nxge_rxdma_get_ring_index: no channel")); 1463 return (-1); 1464 } 1465 1466 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1467 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1468 1469 rbr_rings = rx_rbr_rings->rbr_rings; 1470 for (i = 0; i < ndmas; i++) { 1471 rdc = rbr_rings[i]->rdc; 1472 if (channel == rdc) { 1473 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1474 "==> nxge_rxdma_get_rbr_ring: channel %d " 1475 "(index %d) ring %d", channel, i, rbr_rings[i])); 1476 return (i); 1477 } 1478 } 1479 1480 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1481 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1482 1483 return (-1); 1484 } 1485 1486 p_rx_rbr_ring_t 1487 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1488 { 1489 nxge_grp_set_t *set = &nxgep->rx_set; 1490 nxge_channel_t rdc; 1491 1492 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1493 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1494 1495 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1496 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1497 "<== nxge_rxdma_get_rbr_ring: " 1498 "NULL ring pointer(s)")); 1499 return (NULL); 1500 } 1501 1502 if (set->owned.map == 0) { 1503 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1504 "<== nxge_rxdma_get_rbr_ring: no channels")); 1505 return (NULL); 1506 } 1507 1508 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1509 if ((1 << rdc) & set->owned.map) { 1510 rx_rbr_ring_t *ring = 1511 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1512 if (ring) { 1513 if (channel == ring->rdc) { 1514 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1515 "==> nxge_rxdma_get_rbr_ring: " 1516 "channel %d ring $%p", rdc, ring)); 1517 return (ring); 1518 } 1519 } 1520 } 1521 } 1522 1523 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1524 "<== nxge_rxdma_get_rbr_ring: not found")); 1525 1526 return (NULL); 1527 } 1528 1529 p_rx_rcr_ring_t 1530 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1531 { 1532 nxge_grp_set_t *set = &nxgep->rx_set; 1533 nxge_channel_t rdc; 1534 1535 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1536 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1537 1538 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1539 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1540 "<== nxge_rxdma_get_rcr_ring: " 1541 "NULL ring pointer(s)")); 1542 return (NULL); 1543 } 1544 1545 if (set->owned.map == 0) { 1546 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1547 "<== nxge_rxdma_get_rbr_ring: no channels")); 1548 return (NULL); 1549 } 1550 1551 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1552 if ((1 << rdc) & set->owned.map) { 1553 rx_rcr_ring_t *ring = 1554 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1555 if (ring) { 1556 if (channel == ring->rdc) { 1557 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1558 "==> nxge_rxdma_get_rcr_ring: " 1559 "channel %d ring $%p", rdc, ring)); 1560 return (ring); 1561 } 1562 } 1563 } 1564 } 1565 1566 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1567 "<== nxge_rxdma_get_rcr_ring: not found")); 1568 1569 return (NULL); 1570 } 1571 1572 /* 1573 * Static functions start here. 1574 */ 1575 static p_rx_msg_t 1576 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1577 { 1578 p_rx_msg_t nxge_mp = NULL; 1579 p_nxge_dma_common_t dmamsg_p; 1580 uchar_t *buffer; 1581 1582 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1583 if (nxge_mp == NULL) { 1584 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1585 "Allocation of a rx msg failed.")); 1586 goto nxge_allocb_exit; 1587 } 1588 1589 nxge_mp->use_buf_pool = B_FALSE; 1590 if (dmabuf_p) { 1591 nxge_mp->use_buf_pool = B_TRUE; 1592 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1593 *dmamsg_p = *dmabuf_p; 1594 dmamsg_p->nblocks = 1; 1595 dmamsg_p->block_size = size; 1596 dmamsg_p->alength = size; 1597 buffer = (uchar_t *)dmabuf_p->kaddrp; 1598 1599 dmabuf_p->kaddrp = (void *) 1600 ((char *)dmabuf_p->kaddrp + size); 1601 dmabuf_p->ioaddr_pp = (void *) 1602 ((char *)dmabuf_p->ioaddr_pp + size); 1603 dmabuf_p->alength -= size; 1604 dmabuf_p->offset += size; 1605 dmabuf_p->dma_cookie.dmac_laddress += size; 1606 dmabuf_p->dma_cookie.dmac_size -= size; 1607 1608 } else { 1609 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1610 if (buffer == NULL) { 1611 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1612 "Allocation of a receive page failed.")); 1613 goto nxge_allocb_fail1; 1614 } 1615 } 1616 1617 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1618 if (nxge_mp->rx_mblk_p == NULL) { 1619 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1620 goto nxge_allocb_fail2; 1621 } 1622 1623 nxge_mp->buffer = buffer; 1624 nxge_mp->block_size = size; 1625 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1626 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1627 nxge_mp->ref_cnt = 1; 1628 nxge_mp->free = B_TRUE; 1629 nxge_mp->rx_use_bcopy = B_FALSE; 1630 1631 atomic_inc_32(&nxge_mblks_pending); 1632 1633 goto nxge_allocb_exit; 1634 1635 nxge_allocb_fail2: 1636 if (!nxge_mp->use_buf_pool) { 1637 KMEM_FREE(buffer, size); 1638 } 1639 1640 nxge_allocb_fail1: 1641 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1642 nxge_mp = NULL; 1643 1644 nxge_allocb_exit: 1645 return (nxge_mp); 1646 } 1647 1648 p_mblk_t 1649 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1650 { 1651 p_mblk_t mp; 1652 1653 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1654 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1655 "offset = 0x%08X " 1656 "size = 0x%08X", 1657 nxge_mp, offset, size)); 1658 1659 mp = desballoc(&nxge_mp->buffer[offset], size, 1660 0, &nxge_mp->freeb); 1661 if (mp == NULL) { 1662 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1663 goto nxge_dupb_exit; 1664 } 1665 atomic_inc_32(&nxge_mp->ref_cnt); 1666 1667 1668 nxge_dupb_exit: 1669 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1670 nxge_mp)); 1671 return (mp); 1672 } 1673 1674 p_mblk_t 1675 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1676 { 1677 p_mblk_t mp; 1678 uchar_t *dp; 1679 1680 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1681 if (mp == NULL) { 1682 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1683 goto nxge_dupb_bcopy_exit; 1684 } 1685 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1686 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1687 mp->b_wptr = dp + size; 1688 1689 nxge_dupb_bcopy_exit: 1690 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1691 nxge_mp)); 1692 return (mp); 1693 } 1694 1695 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1696 p_rx_msg_t rx_msg_p); 1697 1698 void 1699 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1700 { 1701 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1702 1703 /* Reuse this buffer */ 1704 rx_msg_p->free = B_FALSE; 1705 rx_msg_p->cur_usage_cnt = 0; 1706 rx_msg_p->max_usage_cnt = 0; 1707 rx_msg_p->pkt_buf_size = 0; 1708 1709 if (rx_rbr_p->rbr_use_bcopy) { 1710 rx_msg_p->rx_use_bcopy = B_FALSE; 1711 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1712 } 1713 1714 /* 1715 * Get the rbr header pointer and its offset index. 1716 */ 1717 MUTEX_ENTER(&rx_rbr_p->post_lock); 1718 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1719 rx_rbr_p->rbr_wrap_mask); 1720 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1721 MUTEX_EXIT(&rx_rbr_p->post_lock); 1722 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1723 rx_rbr_p->rdc, 1); 1724 1725 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1726 "<== nxge_post_page (channel %d post_next_index %d)", 1727 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1728 1729 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1730 } 1731 1732 void 1733 nxge_freeb(p_rx_msg_t rx_msg_p) 1734 { 1735 size_t size; 1736 uchar_t *buffer = NULL; 1737 int ref_cnt; 1738 boolean_t free_state = B_FALSE; 1739 1740 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1741 1742 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1743 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1744 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1745 rx_msg_p, nxge_mblks_pending)); 1746 1747 /* 1748 * First we need to get the free state, then 1749 * atomic decrement the reference count to prevent 1750 * the race condition with the interrupt thread that 1751 * is processing a loaned up buffer block. 1752 */ 1753 free_state = rx_msg_p->free; 1754 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1755 if (!ref_cnt) { 1756 atomic_dec_32(&nxge_mblks_pending); 1757 buffer = rx_msg_p->buffer; 1758 size = rx_msg_p->block_size; 1759 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1760 "will free: rx_msg_p = $%p (block pending %d)", 1761 rx_msg_p, nxge_mblks_pending)); 1762 1763 if (!rx_msg_p->use_buf_pool) { 1764 KMEM_FREE(buffer, size); 1765 } 1766 1767 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1768 1769 if (ring) { 1770 /* 1771 * Decrement the receive buffer ring's reference 1772 * count, too. 1773 */ 1774 atomic_dec_32(&ring->rbr_ref_cnt); 1775 1776 /* 1777 * Free the receive buffer ring, if 1778 * 1. all the receive buffers have been freed 1779 * 2. and we are in the proper state (that is, 1780 * we are not UNMAPPING). 1781 */ 1782 if (ring->rbr_ref_cnt == 0 && 1783 ring->rbr_state == RBR_UNMAPPED) { 1784 /* 1785 * Free receive data buffers, 1786 * buffer index information 1787 * (rxring_info) and 1788 * the message block ring. 1789 */ 1790 NXGE_DEBUG_MSG((NULL, RX_CTL, 1791 "nxge_freeb:rx_msg_p = $%p " 1792 "(block pending %d) free buffers", 1793 rx_msg_p, nxge_mblks_pending)); 1794 nxge_rxdma_databuf_free(ring); 1795 if (ring->ring_info) { 1796 KMEM_FREE(ring->ring_info, 1797 sizeof (rxring_info_t)); 1798 } 1799 1800 if (ring->rx_msg_ring) { 1801 KMEM_FREE(ring->rx_msg_ring, 1802 ring->tnblocks * 1803 sizeof (p_rx_msg_t)); 1804 } 1805 KMEM_FREE(ring, sizeof (*ring)); 1806 } 1807 } 1808 return; 1809 } 1810 1811 /* 1812 * Repost buffer. 1813 */ 1814 if (free_state && (ref_cnt == 1) && ring) { 1815 NXGE_DEBUG_MSG((NULL, RX_CTL, 1816 "nxge_freeb: post page $%p:", rx_msg_p)); 1817 if (ring->rbr_state == RBR_POSTING) 1818 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1819 } 1820 1821 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1822 } 1823 1824 uint_t 1825 nxge_rx_intr(void *arg1, void *arg2) 1826 { 1827 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1828 p_nxge_t nxgep = (p_nxge_t)arg2; 1829 p_nxge_ldg_t ldgp; 1830 uint8_t channel; 1831 npi_handle_t handle; 1832 rx_dma_ctl_stat_t cs; 1833 p_rx_rcr_ring_t rcr_ring; 1834 mblk_t *mp; 1835 1836 #ifdef NXGE_DEBUG 1837 rxdma_cfig1_t cfg; 1838 #endif 1839 1840 if (ldvp == NULL) { 1841 NXGE_DEBUG_MSG((NULL, INT_CTL, 1842 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1843 nxgep, ldvp)); 1844 1845 return (DDI_INTR_CLAIMED); 1846 } 1847 1848 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1849 nxgep = ldvp->nxgep; 1850 } 1851 1852 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1853 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1854 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1855 "<== nxge_rx_intr: interface not started or intialized")); 1856 return (DDI_INTR_CLAIMED); 1857 } 1858 1859 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1860 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1861 nxgep, ldvp)); 1862 1863 /* 1864 * This interrupt handler is for a specific 1865 * receive dma channel. 1866 */ 1867 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1868 1869 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1870 1871 /* 1872 * The RCR ring lock must be held when packets 1873 * are being processed and the hardware registers are 1874 * being read or written to prevent race condition 1875 * among the interrupt thread, the polling thread 1876 * (will cause fatal errors such as rcrincon bit set) 1877 * and the setting of the poll_flag. 1878 */ 1879 MUTEX_ENTER(&rcr_ring->lock); 1880 1881 /* 1882 * Get the control and status for this channel. 1883 */ 1884 channel = ldvp->channel; 1885 ldgp = ldvp->ldgp; 1886 1887 if (!isLDOMguest(nxgep)) { 1888 if (!nxgep->rx_channel_started[channel]) { 1889 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1890 "<== nxge_rx_intr: channel is not started")); 1891 MUTEX_EXIT(&rcr_ring->lock); 1892 return (DDI_INTR_CLAIMED); 1893 } 1894 } 1895 1896 ASSERT(rcr_ring->ldgp == ldgp); 1897 ASSERT(rcr_ring->ldvp == ldvp); 1898 1899 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1900 1901 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1902 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1903 channel, 1904 cs.value, 1905 cs.bits.hdw.rcrto, 1906 cs.bits.hdw.rcrthres)); 1907 1908 mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1909 1910 /* error events. */ 1911 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1912 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1913 } 1914 1915 /* 1916 * Enable the mailbox update interrupt if we want 1917 * to use mailbox. We probably don't need to use 1918 * mailbox as it only saves us one pio read. 1919 * Also write 1 to rcrthres and rcrto to clear 1920 * these two edge triggered bits. 1921 */ 1922 cs.value &= RX_DMA_CTL_STAT_WR1C; 1923 cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 1924 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1925 cs.value); 1926 1927 /* 1928 * If the polling mode is enabled, disable the interrupt. 1929 */ 1930 if (rcr_ring->poll_flag) { 1931 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1932 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1933 "(disabling interrupts)", channel, ldgp, ldvp)); 1934 /* 1935 * Disarm this logical group if this is a single device 1936 * group. 1937 */ 1938 if (ldgp->nldvs == 1) { 1939 ldgimgm_t mgm; 1940 mgm.value = 0; 1941 mgm.bits.ldw.arm = 0; 1942 NXGE_REG_WR64(handle, 1943 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1944 } 1945 } else { 1946 /* 1947 * Rearm this logical group if this is a single device 1948 * group. 1949 */ 1950 if (ldgp->nldvs == 1) { 1951 if (isLDOMguest(nxgep)) { 1952 nxge_hio_ldgimgn(nxgep, ldgp); 1953 } else { 1954 ldgimgm_t mgm; 1955 1956 mgm.value = 0; 1957 mgm.bits.ldw.arm = 1; 1958 mgm.bits.ldw.timer = ldgp->ldg_timer; 1959 1960 NXGE_REG_WR64(handle, 1961 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1962 mgm.value); 1963 } 1964 } 1965 1966 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1967 "==> nxge_rx_intr: rdc %d ldgp $%p " 1968 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1969 } 1970 MUTEX_EXIT(&rcr_ring->lock); 1971 1972 if (mp) { 1973 if (!isLDOMguest(nxgep)) 1974 mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1975 rcr_ring->rcr_gen_num); 1976 #if defined(sun4v) 1977 else { /* isLDOMguest(nxgep) */ 1978 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1979 nxgep->nxge_hw_p->hio; 1980 nx_vio_fp_t *vio = &nhd->hio.vio; 1981 1982 if (vio->cb.vio_net_rx_cb) { 1983 (*vio->cb.vio_net_rx_cb) 1984 (nxgep->hio_vr->vhp, mp); 1985 } 1986 } 1987 #endif 1988 } 1989 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1990 return (DDI_INTR_CLAIMED); 1991 } 1992 1993 /* 1994 * Process the packets received in the specified logical device 1995 * and pass up a chain of message blocks to the upper layer. 1996 * The RCR ring lock must be held before calling this function. 1997 */ 1998 static mblk_t * 1999 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 2000 { 2001 p_mblk_t mp; 2002 p_rx_rcr_ring_t rcrp; 2003 2004 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 2005 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 2006 2007 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2008 "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 2009 "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 2010 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 2011 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2012 "<== nxge_rx_pkts_vring: no mp")); 2013 return (NULL); 2014 } 2015 2016 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 2017 mp)); 2018 2019 #ifdef NXGE_DEBUG 2020 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2021 "==> nxge_rx_pkts_vring:calling mac_rx " 2022 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 2023 "mac_handle $%p", 2024 mp->b_wptr - mp->b_rptr, 2025 mp, mp->b_cont, mp->b_next, 2026 rcrp, rcrp->rcr_mac_handle)); 2027 2028 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2029 "==> nxge_rx_pkts_vring: dump packets " 2030 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 2031 mp, 2032 mp->b_rptr, 2033 mp->b_wptr, 2034 nxge_dump_packet((char *)mp->b_rptr, 2035 mp->b_wptr - mp->b_rptr))); 2036 if (mp->b_cont) { 2037 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2038 "==> nxge_rx_pkts_vring: dump b_cont packets " 2039 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 2040 mp->b_cont, 2041 mp->b_cont->b_rptr, 2042 mp->b_cont->b_wptr, 2043 nxge_dump_packet((char *)mp->b_cont->b_rptr, 2044 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 2045 } 2046 if (mp->b_next) { 2047 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2048 "==> nxge_rx_pkts_vring: dump next packets " 2049 "(b_rptr $%p): %s", 2050 mp->b_next->b_rptr, 2051 nxge_dump_packet((char *)mp->b_next->b_rptr, 2052 mp->b_next->b_wptr - mp->b_next->b_rptr))); 2053 } 2054 #endif 2055 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2056 "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 2057 rcrp->rdc, rcrp->rcr_mac_handle)); 2058 2059 return (mp); 2060 } 2061 2062 2063 /* 2064 * This routine is the main packet receive processing function. 2065 * It gets the packet type, error code, and buffer related 2066 * information from the receive completion entry. 2067 * How many completion entries to process is based on the number of packets 2068 * queued by the hardware, a hardware maintained tail pointer 2069 * and a configurable receive packet count. 2070 * 2071 * A chain of message blocks will be created as result of processing 2072 * the completion entries. This chain of message blocks will be returned and 2073 * a hardware control status register will be updated with the number of 2074 * packets were removed from the hardware queue. 2075 * 2076 * The RCR ring lock is held when entering this function. 2077 */ 2078 static mblk_t * 2079 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 2080 int bytes_to_pickup) 2081 { 2082 npi_handle_t handle; 2083 uint8_t channel; 2084 uint32_t comp_rd_index; 2085 p_rcr_entry_t rcr_desc_rd_head_p; 2086 p_rcr_entry_t rcr_desc_rd_head_pp; 2087 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 2088 uint16_t qlen, nrcr_read, npkt_read; 2089 uint32_t qlen_hw; 2090 boolean_t multi; 2091 rcrcfig_b_t rcr_cfg_b; 2092 int totallen = 0; 2093 #if defined(_BIG_ENDIAN) 2094 npi_status_t rs = NPI_SUCCESS; 2095 #endif 2096 2097 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 2098 "channel %d", rcr_p->rdc)); 2099 2100 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 2101 return (NULL); 2102 } 2103 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2104 channel = rcr_p->rdc; 2105 2106 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2107 "==> nxge_rx_pkts: START: rcr channel %d " 2108 "head_p $%p head_pp $%p index %d ", 2109 channel, rcr_p->rcr_desc_rd_head_p, 2110 rcr_p->rcr_desc_rd_head_pp, 2111 rcr_p->comp_rd_index)); 2112 2113 2114 #if !defined(_BIG_ENDIAN) 2115 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2116 #else 2117 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2118 if (rs != NPI_SUCCESS) { 2119 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2120 "channel %d, get qlen failed 0x%08x", 2121 channel, rs)); 2122 return (NULL); 2123 } 2124 #endif 2125 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2126 "qlen %d", channel, qlen)); 2127 2128 2129 2130 if (!qlen) { 2131 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2132 "==> nxge_rx_pkts:rcr channel %d " 2133 "qlen %d (no pkts)", channel, qlen)); 2134 2135 return (NULL); 2136 } 2137 2138 comp_rd_index = rcr_p->comp_rd_index; 2139 2140 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2141 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2142 nrcr_read = npkt_read = 0; 2143 2144 /* 2145 * Number of packets queued 2146 * (The jumbo or multi packet will be counted as only one 2147 * packets and it may take up more than one completion entry). 2148 */ 2149 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2150 qlen : nxge_max_rx_pkts; 2151 head_mp = NULL; 2152 tail_mp = &head_mp; 2153 nmp = mp_cont = NULL; 2154 multi = B_FALSE; 2155 2156 while (qlen_hw) { 2157 2158 #ifdef NXGE_DEBUG 2159 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2160 #endif 2161 /* 2162 * Process one completion ring entry. 2163 */ 2164 nxge_receive_packet(nxgep, 2165 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2166 2167 /* 2168 * message chaining modes 2169 */ 2170 if (nmp) { 2171 nmp->b_next = NULL; 2172 if (!multi && !mp_cont) { /* frame fits a partition */ 2173 *tail_mp = nmp; 2174 tail_mp = &nmp->b_next; 2175 totallen += MBLKL(nmp); 2176 nmp = NULL; 2177 } else if (multi && !mp_cont) { /* first segment */ 2178 *tail_mp = nmp; 2179 tail_mp = &nmp->b_cont; 2180 totallen += MBLKL(nmp); 2181 } else if (multi && mp_cont) { /* mid of multi segs */ 2182 *tail_mp = mp_cont; 2183 tail_mp = &mp_cont->b_cont; 2184 totallen += MBLKL(mp_cont); 2185 } else if (!multi && mp_cont) { /* last segment */ 2186 *tail_mp = mp_cont; 2187 tail_mp = &nmp->b_next; 2188 totallen += MBLKL(mp_cont); 2189 nmp = NULL; 2190 } 2191 } 2192 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2193 "==> nxge_rx_pkts: loop: rcr channel %d " 2194 "before updating: multi %d " 2195 "nrcr_read %d " 2196 "npk read %d " 2197 "head_pp $%p index %d ", 2198 channel, 2199 multi, 2200 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2201 comp_rd_index)); 2202 2203 if (!multi) { 2204 qlen_hw--; 2205 npkt_read++; 2206 } 2207 2208 /* 2209 * Update the next read entry. 2210 */ 2211 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2212 rcr_p->comp_wrap_mask); 2213 2214 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2215 rcr_p->rcr_desc_first_p, 2216 rcr_p->rcr_desc_last_p); 2217 2218 nrcr_read++; 2219 2220 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2221 "<== nxge_rx_pkts: (SAM, process one packet) " 2222 "nrcr_read %d", 2223 nrcr_read)); 2224 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2225 "==> nxge_rx_pkts: loop: rcr channel %d " 2226 "multi %d " 2227 "nrcr_read %d " 2228 "npk read %d " 2229 "head_pp $%p index %d ", 2230 channel, 2231 multi, 2232 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2233 comp_rd_index)); 2234 2235 if ((bytes_to_pickup != -1) && 2236 (totallen >= bytes_to_pickup)) { 2237 break; 2238 } 2239 2240 /* limit the number of packets for interrupt */ 2241 if (!(rcr_p->poll_flag)) { 2242 if (npkt_read == nxge_max_intr_pkts) { 2243 break; 2244 } 2245 } 2246 } 2247 2248 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2249 rcr_p->comp_rd_index = comp_rd_index; 2250 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2251 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2252 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2253 2254 rcr_p->intr_timeout = (nxgep->intr_timeout < 2255 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2256 nxgep->intr_timeout; 2257 2258 rcr_p->intr_threshold = (nxgep->intr_threshold < 2259 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2260 nxgep->intr_threshold; 2261 2262 rcr_cfg_b.value = 0x0ULL; 2263 rcr_cfg_b.bits.ldw.entout = 1; 2264 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2265 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2266 2267 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2268 channel, rcr_cfg_b.value); 2269 } 2270 2271 cs.bits.ldw.pktread = npkt_read; 2272 cs.bits.ldw.ptrread = nrcr_read; 2273 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2274 channel, cs.value); 2275 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2276 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2277 "head_pp $%p index %016llx ", 2278 channel, 2279 rcr_p->rcr_desc_rd_head_pp, 2280 rcr_p->comp_rd_index)); 2281 /* 2282 * Update RCR buffer pointer read and number of packets 2283 * read. 2284 */ 2285 2286 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2287 "channel %d", rcr_p->rdc)); 2288 2289 return (head_mp); 2290 } 2291 2292 void 2293 nxge_receive_packet(p_nxge_t nxgep, 2294 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2295 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2296 { 2297 p_mblk_t nmp = NULL; 2298 uint64_t multi; 2299 uint64_t dcf_err; 2300 uint8_t channel; 2301 2302 boolean_t first_entry = B_TRUE; 2303 boolean_t is_tcp_udp = B_FALSE; 2304 boolean_t buffer_free = B_FALSE; 2305 boolean_t error_send_up = B_FALSE; 2306 uint8_t error_type; 2307 uint16_t l2_len; 2308 uint16_t skip_len; 2309 uint8_t pktbufsz_type; 2310 uint64_t rcr_entry; 2311 uint64_t *pkt_buf_addr_pp; 2312 uint64_t *pkt_buf_addr_p; 2313 uint32_t buf_offset; 2314 uint32_t bsize; 2315 uint32_t error_disp_cnt; 2316 uint32_t msg_index; 2317 p_rx_rbr_ring_t rx_rbr_p; 2318 p_rx_msg_t *rx_msg_ring_p; 2319 p_rx_msg_t rx_msg_p; 2320 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2321 nxge_status_t status = NXGE_OK; 2322 boolean_t is_valid = B_FALSE; 2323 p_nxge_rx_ring_stats_t rdc_stats; 2324 uint32_t bytes_read; 2325 uint64_t pkt_type; 2326 uint64_t frag; 2327 boolean_t pkt_too_long_err = B_FALSE; 2328 #ifdef NXGE_DEBUG 2329 int dump_len; 2330 #endif 2331 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2332 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2333 2334 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2335 2336 multi = (rcr_entry & RCR_MULTI_MASK); 2337 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2338 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2339 2340 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2341 frag = (rcr_entry & RCR_FRAG_MASK); 2342 2343 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2344 2345 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2346 RCR_PKTBUFSZ_SHIFT); 2347 #if defined(__i386) 2348 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2349 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2350 #else 2351 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2352 RCR_PKT_BUF_ADDR_SHIFT); 2353 #endif 2354 2355 channel = rcr_p->rdc; 2356 2357 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2358 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2359 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2360 "error_type 0x%x pkt_type 0x%x " 2361 "pktbufsz_type %d ", 2362 rcr_desc_rd_head_p, 2363 rcr_entry, pkt_buf_addr_pp, l2_len, 2364 multi, 2365 error_type, 2366 pkt_type, 2367 pktbufsz_type)); 2368 2369 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2370 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2371 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2372 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2373 rcr_entry, pkt_buf_addr_pp, l2_len, 2374 multi, 2375 error_type, 2376 pkt_type)); 2377 2378 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2379 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2380 "full pkt_buf_addr_pp $%p l2_len %d", 2381 rcr_entry, pkt_buf_addr_pp, l2_len)); 2382 2383 /* get the stats ptr */ 2384 rdc_stats = rcr_p->rdc_stats; 2385 2386 if (!l2_len) { 2387 2388 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2389 "<== nxge_receive_packet: failed: l2 length is 0.")); 2390 return; 2391 } 2392 2393 /* 2394 * Software workaround for BMAC hardware limitation that allows 2395 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2396 * instead of 0x2400 for jumbo. 2397 */ 2398 if (l2_len > nxgep->mac.maxframesize) { 2399 pkt_too_long_err = B_TRUE; 2400 } 2401 2402 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2403 l2_len -= ETHERFCSL; 2404 2405 /* shift 6 bits to get the full io address */ 2406 #if defined(__i386) 2407 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2408 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2409 #else 2410 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2411 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2412 #endif 2413 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2414 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2415 "full pkt_buf_addr_pp $%p l2_len %d", 2416 rcr_entry, pkt_buf_addr_pp, l2_len)); 2417 2418 rx_rbr_p = rcr_p->rx_rbr_p; 2419 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2420 2421 if (first_entry) { 2422 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2423 RXDMA_HDR_SIZE_DEFAULT); 2424 2425 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2426 "==> nxge_receive_packet: first entry 0x%016llx " 2427 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2428 rcr_entry, pkt_buf_addr_pp, l2_len, 2429 hdr_size)); 2430 } 2431 2432 MUTEX_ENTER(&rx_rbr_p->lock); 2433 2434 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2435 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2436 "full pkt_buf_addr_pp $%p l2_len %d", 2437 rcr_entry, pkt_buf_addr_pp, l2_len)); 2438 2439 /* 2440 * Packet buffer address in the completion entry points 2441 * to the starting buffer address (offset 0). 2442 * Use the starting buffer address to locate the corresponding 2443 * kernel address. 2444 */ 2445 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2446 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2447 &buf_offset, 2448 &msg_index); 2449 2450 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2451 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2452 "full pkt_buf_addr_pp $%p l2_len %d", 2453 rcr_entry, pkt_buf_addr_pp, l2_len)); 2454 2455 if (status != NXGE_OK) { 2456 MUTEX_EXIT(&rx_rbr_p->lock); 2457 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2458 "<== nxge_receive_packet: found vaddr failed %d", 2459 status)); 2460 return; 2461 } 2462 2463 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2464 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2465 "full pkt_buf_addr_pp $%p l2_len %d", 2466 rcr_entry, pkt_buf_addr_pp, l2_len)); 2467 2468 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2469 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2470 "full pkt_buf_addr_pp $%p l2_len %d", 2471 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2472 2473 rx_msg_p = rx_msg_ring_p[msg_index]; 2474 2475 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2476 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2477 "full pkt_buf_addr_pp $%p l2_len %d", 2478 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2479 2480 switch (pktbufsz_type) { 2481 case RCR_PKTBUFSZ_0: 2482 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2483 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2484 "==> nxge_receive_packet: 0 buf %d", bsize)); 2485 break; 2486 case RCR_PKTBUFSZ_1: 2487 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2488 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2489 "==> nxge_receive_packet: 1 buf %d", bsize)); 2490 break; 2491 case RCR_PKTBUFSZ_2: 2492 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2493 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2494 "==> nxge_receive_packet: 2 buf %d", bsize)); 2495 break; 2496 case RCR_SINGLE_BLOCK: 2497 bsize = rx_msg_p->block_size; 2498 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2499 "==> nxge_receive_packet: single %d", bsize)); 2500 2501 break; 2502 default: 2503 MUTEX_EXIT(&rx_rbr_p->lock); 2504 return; 2505 } 2506 2507 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2508 (buf_offset + sw_offset_bytes), 2509 (hdr_size + l2_len), 2510 DDI_DMA_SYNC_FORCPU); 2511 2512 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2513 "==> nxge_receive_packet: after first dump:usage count")); 2514 2515 if (rx_msg_p->cur_usage_cnt == 0) { 2516 if (rx_rbr_p->rbr_use_bcopy) { 2517 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2518 if (rx_rbr_p->rbr_consumed < 2519 rx_rbr_p->rbr_threshold_hi) { 2520 if (rx_rbr_p->rbr_threshold_lo == 0 || 2521 ((rx_rbr_p->rbr_consumed >= 2522 rx_rbr_p->rbr_threshold_lo) && 2523 (rx_rbr_p->rbr_bufsize_type >= 2524 pktbufsz_type))) { 2525 rx_msg_p->rx_use_bcopy = B_TRUE; 2526 } 2527 } else { 2528 rx_msg_p->rx_use_bcopy = B_TRUE; 2529 } 2530 } 2531 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2532 "==> nxge_receive_packet: buf %d (new block) ", 2533 bsize)); 2534 2535 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2536 rx_msg_p->pkt_buf_size = bsize; 2537 rx_msg_p->cur_usage_cnt = 1; 2538 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2539 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2540 "==> nxge_receive_packet: buf %d " 2541 "(single block) ", 2542 bsize)); 2543 /* 2544 * Buffer can be reused once the free function 2545 * is called. 2546 */ 2547 rx_msg_p->max_usage_cnt = 1; 2548 buffer_free = B_TRUE; 2549 } else { 2550 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2551 if (rx_msg_p->max_usage_cnt == 1) { 2552 buffer_free = B_TRUE; 2553 } 2554 } 2555 } else { 2556 rx_msg_p->cur_usage_cnt++; 2557 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2558 buffer_free = B_TRUE; 2559 } 2560 } 2561 2562 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2563 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2564 msg_index, l2_len, 2565 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2566 2567 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2568 rdc_stats->ierrors++; 2569 if (dcf_err) { 2570 rdc_stats->dcf_err++; 2571 #ifdef NXGE_DEBUG 2572 if (!rdc_stats->dcf_err) { 2573 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2574 "nxge_receive_packet: channel %d dcf_err rcr" 2575 " 0x%llx", channel, rcr_entry)); 2576 } 2577 #endif 2578 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2579 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2580 } else if (pkt_too_long_err) { 2581 rdc_stats->pkt_too_long_err++; 2582 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2583 " channel %d packet length [%d] > " 2584 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2585 nxgep->mac.maxframesize)); 2586 } else { 2587 /* Update error stats */ 2588 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2589 rdc_stats->errlog.compl_err_type = error_type; 2590 2591 switch (error_type) { 2592 /* 2593 * Do not send FMA ereport for RCR_L2_ERROR and 2594 * RCR_L4_CSUM_ERROR because most likely they indicate 2595 * back pressure rather than HW failures. 2596 */ 2597 case RCR_L2_ERROR: 2598 rdc_stats->l2_err++; 2599 if (rdc_stats->l2_err < 2600 error_disp_cnt) { 2601 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2602 " nxge_receive_packet:" 2603 " channel %d RCR L2_ERROR", 2604 channel)); 2605 } 2606 break; 2607 case RCR_L4_CSUM_ERROR: 2608 error_send_up = B_TRUE; 2609 rdc_stats->l4_cksum_err++; 2610 if (rdc_stats->l4_cksum_err < 2611 error_disp_cnt) { 2612 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2613 " nxge_receive_packet:" 2614 " channel %d" 2615 " RCR L4_CSUM_ERROR", channel)); 2616 } 2617 break; 2618 /* 2619 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2620 * RCR_ZCP_SOFT_ERROR because they reflect the same 2621 * FFLP and ZCP errors that have been reported by 2622 * nxge_fflp.c and nxge_zcp.c. 2623 */ 2624 case RCR_FFLP_SOFT_ERROR: 2625 error_send_up = B_TRUE; 2626 rdc_stats->fflp_soft_err++; 2627 if (rdc_stats->fflp_soft_err < 2628 error_disp_cnt) { 2629 NXGE_ERROR_MSG((nxgep, 2630 NXGE_ERR_CTL, 2631 " nxge_receive_packet:" 2632 " channel %d" 2633 " RCR FFLP_SOFT_ERROR", channel)); 2634 } 2635 break; 2636 case RCR_ZCP_SOFT_ERROR: 2637 error_send_up = B_TRUE; 2638 rdc_stats->fflp_soft_err++; 2639 if (rdc_stats->zcp_soft_err < 2640 error_disp_cnt) 2641 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2642 " nxge_receive_packet: Channel %d" 2643 " RCR ZCP_SOFT_ERROR", channel)); 2644 break; 2645 default: 2646 rdc_stats->rcr_unknown_err++; 2647 if (rdc_stats->rcr_unknown_err 2648 < error_disp_cnt) { 2649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2650 " nxge_receive_packet: Channel %d" 2651 " RCR entry 0x%llx error 0x%x", 2652 rcr_entry, channel, error_type)); 2653 } 2654 break; 2655 } 2656 } 2657 2658 /* 2659 * Update and repost buffer block if max usage 2660 * count is reached. 2661 */ 2662 if (error_send_up == B_FALSE) { 2663 atomic_inc_32(&rx_msg_p->ref_cnt); 2664 if (buffer_free == B_TRUE) { 2665 rx_msg_p->free = B_TRUE; 2666 } 2667 2668 MUTEX_EXIT(&rx_rbr_p->lock); 2669 nxge_freeb(rx_msg_p); 2670 return; 2671 } 2672 } 2673 2674 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2675 "==> nxge_receive_packet: DMA sync second ")); 2676 2677 bytes_read = rcr_p->rcvd_pkt_bytes; 2678 skip_len = sw_offset_bytes + hdr_size; 2679 if (!rx_msg_p->rx_use_bcopy) { 2680 /* 2681 * For loaned up buffers, the driver reference count 2682 * will be incremented first and then the free state. 2683 */ 2684 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2685 if (first_entry) { 2686 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2687 if (l2_len < bsize - skip_len) { 2688 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2689 } else { 2690 nmp->b_wptr = &nmp->b_rptr[bsize 2691 - skip_len]; 2692 } 2693 } else { 2694 if (l2_len - bytes_read < bsize) { 2695 nmp->b_wptr = 2696 &nmp->b_rptr[l2_len - bytes_read]; 2697 } else { 2698 nmp->b_wptr = &nmp->b_rptr[bsize]; 2699 } 2700 } 2701 } 2702 } else { 2703 if (first_entry) { 2704 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2705 l2_len < bsize - skip_len ? 2706 l2_len : bsize - skip_len); 2707 } else { 2708 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2709 l2_len - bytes_read < bsize ? 2710 l2_len - bytes_read : bsize); 2711 } 2712 } 2713 if (nmp != NULL) { 2714 if (first_entry) { 2715 /* 2716 * Jumbo packets may be received with more than one 2717 * buffer, increment ipackets for the first entry only. 2718 */ 2719 rdc_stats->ipackets++; 2720 2721 /* Update ibytes for kstat. */ 2722 rdc_stats->ibytes += skip_len 2723 + l2_len < bsize ? l2_len : bsize; 2724 /* 2725 * Update the number of bytes read so far for the 2726 * current frame. 2727 */ 2728 bytes_read = nmp->b_wptr - nmp->b_rptr; 2729 } else { 2730 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2731 l2_len - bytes_read : bsize; 2732 bytes_read += nmp->b_wptr - nmp->b_rptr; 2733 } 2734 2735 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2736 "==> nxge_receive_packet after dupb: " 2737 "rbr consumed %d " 2738 "pktbufsz_type %d " 2739 "nmp $%p rptr $%p wptr $%p " 2740 "buf_offset %d bzise %d l2_len %d skip_len %d", 2741 rx_rbr_p->rbr_consumed, 2742 pktbufsz_type, 2743 nmp, nmp->b_rptr, nmp->b_wptr, 2744 buf_offset, bsize, l2_len, skip_len)); 2745 } else { 2746 cmn_err(CE_WARN, "!nxge_receive_packet: " 2747 "update stats (error)"); 2748 atomic_inc_32(&rx_msg_p->ref_cnt); 2749 if (buffer_free == B_TRUE) { 2750 rx_msg_p->free = B_TRUE; 2751 } 2752 MUTEX_EXIT(&rx_rbr_p->lock); 2753 nxge_freeb(rx_msg_p); 2754 return; 2755 } 2756 2757 if (buffer_free == B_TRUE) { 2758 rx_msg_p->free = B_TRUE; 2759 } 2760 2761 is_valid = (nmp != NULL); 2762 2763 rcr_p->rcvd_pkt_bytes = bytes_read; 2764 2765 MUTEX_EXIT(&rx_rbr_p->lock); 2766 2767 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2768 atomic_inc_32(&rx_msg_p->ref_cnt); 2769 nxge_freeb(rx_msg_p); 2770 } 2771 2772 if (is_valid) { 2773 nmp->b_cont = NULL; 2774 if (first_entry) { 2775 *mp = nmp; 2776 *mp_cont = NULL; 2777 } else { 2778 *mp_cont = nmp; 2779 } 2780 } 2781 2782 /* 2783 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2784 * If a packet is not fragmented and no error bit is set, then 2785 * L4 checksum is OK. 2786 */ 2787 2788 if (is_valid && !multi) { 2789 /* 2790 * If the checksum flag nxge_chksum_offload 2791 * is 1, TCP and UDP packets can be sent 2792 * up with good checksum. If the checksum flag 2793 * is set to 0, checksum reporting will apply to 2794 * TCP packets only (workaround for a hardware bug). 2795 * If the checksum flag nxge_cksum_offload is 2796 * greater than 1, both TCP and UDP packets 2797 * will not be reported its hardware checksum results. 2798 */ 2799 if (nxge_cksum_offload == 1) { 2800 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2801 pkt_type == RCR_PKT_IS_UDP) ? 2802 B_TRUE: B_FALSE); 2803 } else if (!nxge_cksum_offload) { 2804 /* TCP checksum only. */ 2805 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2806 B_TRUE: B_FALSE); 2807 } 2808 2809 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2810 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2811 is_valid, multi, is_tcp_udp, frag, error_type)); 2812 2813 if (is_tcp_udp && !frag && !error_type) { 2814 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2815 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2816 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2817 "==> nxge_receive_packet: Full tcp/udp cksum " 2818 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2819 "error %d", 2820 is_valid, multi, is_tcp_udp, frag, error_type)); 2821 } 2822 } 2823 2824 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2825 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2826 2827 *multi_p = (multi == RCR_MULTI_MASK); 2828 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2829 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2830 *multi_p, nmp, *mp, *mp_cont)); 2831 } 2832 2833 /* 2834 * Enable polling for a ring. Interrupt for the ring is disabled when 2835 * the nxge interrupt comes (see nxge_rx_intr). 2836 */ 2837 int 2838 nxge_enable_poll(void *arg) 2839 { 2840 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2841 p_rx_rcr_ring_t ringp; 2842 p_nxge_t nxgep; 2843 p_nxge_ldg_t ldgp; 2844 uint32_t channel; 2845 2846 if (ring_handle == NULL) { 2847 return (0); 2848 } 2849 2850 nxgep = ring_handle->nxgep; 2851 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2852 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2853 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2854 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2855 ldgp = ringp->ldgp; 2856 if (ldgp == NULL) { 2857 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2858 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2859 ringp->rdc)); 2860 return (0); 2861 } 2862 2863 MUTEX_ENTER(&ringp->lock); 2864 /* enable polling */ 2865 if (ringp->poll_flag == 0) { 2866 ringp->poll_flag = 1; 2867 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2868 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2869 ringp->rdc)); 2870 } 2871 2872 MUTEX_EXIT(&ringp->lock); 2873 return (0); 2874 } 2875 /* 2876 * Disable polling for a ring and enable its interrupt. 2877 */ 2878 int 2879 nxge_disable_poll(void *arg) 2880 { 2881 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2882 p_rx_rcr_ring_t ringp; 2883 p_nxge_t nxgep; 2884 uint32_t channel; 2885 2886 if (ring_handle == NULL) { 2887 return (0); 2888 } 2889 2890 nxgep = ring_handle->nxgep; 2891 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2892 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2893 2894 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2895 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2896 2897 MUTEX_ENTER(&ringp->lock); 2898 2899 /* disable polling: enable interrupt */ 2900 if (ringp->poll_flag) { 2901 npi_handle_t handle; 2902 rx_dma_ctl_stat_t cs; 2903 uint8_t channel; 2904 p_nxge_ldg_t ldgp; 2905 2906 /* 2907 * Get the control and status for this channel. 2908 */ 2909 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2910 channel = ringp->rdc; 2911 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2912 channel, &cs.value); 2913 2914 /* 2915 * Enable mailbox update 2916 * Since packets were not read and the hardware uses 2917 * bits pktread and ptrread to update the queue 2918 * length, we need to set both bits to 0. 2919 */ 2920 cs.bits.ldw.pktread = 0; 2921 cs.bits.ldw.ptrread = 0; 2922 cs.bits.hdw.mex = 1; 2923 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2924 cs.value); 2925 2926 /* 2927 * Rearm this logical group if this is a single device 2928 * group. 2929 */ 2930 ldgp = ringp->ldgp; 2931 if (ldgp == NULL) { 2932 ringp->poll_flag = 0; 2933 MUTEX_EXIT(&ringp->lock); 2934 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2935 "==> nxge_disable_poll: no ldgp rdc %d " 2936 "(still set poll to 0", ringp->rdc)); 2937 return (0); 2938 } 2939 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2940 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2941 ringp->rdc, ldgp)); 2942 if (ldgp->nldvs == 1) { 2943 ldgimgm_t mgm; 2944 mgm.value = 0; 2945 mgm.bits.ldw.arm = 1; 2946 mgm.bits.ldw.timer = ldgp->ldg_timer; 2947 NXGE_REG_WR64(handle, 2948 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2949 } 2950 ringp->poll_flag = 0; 2951 } 2952 2953 MUTEX_EXIT(&ringp->lock); 2954 return (0); 2955 } 2956 2957 /* 2958 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2959 */ 2960 mblk_t * 2961 nxge_rx_poll(void *arg, int bytes_to_pickup) 2962 { 2963 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2964 p_rx_rcr_ring_t rcr_p; 2965 p_nxge_t nxgep; 2966 npi_handle_t handle; 2967 rx_dma_ctl_stat_t cs; 2968 mblk_t *mblk; 2969 p_nxge_ldv_t ldvp; 2970 uint32_t channel; 2971 2972 nxgep = ring_handle->nxgep; 2973 2974 /* 2975 * Get the control and status for this channel. 2976 */ 2977 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2978 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2979 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2980 MUTEX_ENTER(&rcr_p->lock); 2981 ASSERT(rcr_p->poll_flag == 1); 2982 2983 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2984 2985 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2986 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2987 rcr_p->rdc, rcr_p->poll_flag)); 2988 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2989 2990 ldvp = rcr_p->ldvp; 2991 /* error events. */ 2992 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2993 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2994 } 2995 2996 MUTEX_EXIT(&rcr_p->lock); 2997 2998 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2999 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 3000 return (mblk); 3001 } 3002 3003 3004 /*ARGSUSED*/ 3005 static nxge_status_t 3006 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 3007 { 3008 p_nxge_rx_ring_stats_t rdc_stats; 3009 npi_handle_t handle; 3010 npi_status_t rs; 3011 boolean_t rxchan_fatal = B_FALSE; 3012 boolean_t rxport_fatal = B_FALSE; 3013 uint8_t portn; 3014 nxge_status_t status = NXGE_OK; 3015 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 3016 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 3017 3018 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3019 portn = nxgep->mac.portnum; 3020 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 3021 3022 if (cs.bits.hdw.rbr_tmout) { 3023 rdc_stats->rx_rbr_tmout++; 3024 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3025 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 3026 rxchan_fatal = B_TRUE; 3027 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3028 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 3029 } 3030 if (cs.bits.hdw.rsp_cnt_err) { 3031 rdc_stats->rsp_cnt_err++; 3032 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3033 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 3034 rxchan_fatal = B_TRUE; 3035 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3036 "==> nxge_rx_err_evnts(channel %d): " 3037 "rsp_cnt_err", channel)); 3038 } 3039 if (cs.bits.hdw.byte_en_bus) { 3040 rdc_stats->byte_en_bus++; 3041 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3042 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 3043 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3044 "==> nxge_rx_err_evnts(channel %d): " 3045 "fatal error: byte_en_bus", channel)); 3046 rxchan_fatal = B_TRUE; 3047 } 3048 if (cs.bits.hdw.rsp_dat_err) { 3049 rdc_stats->rsp_dat_err++; 3050 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3051 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 3052 rxchan_fatal = B_TRUE; 3053 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3054 "==> nxge_rx_err_evnts(channel %d): " 3055 "fatal error: rsp_dat_err", channel)); 3056 } 3057 if (cs.bits.hdw.rcr_ack_err) { 3058 rdc_stats->rcr_ack_err++; 3059 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3060 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 3061 rxchan_fatal = B_TRUE; 3062 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3063 "==> nxge_rx_err_evnts(channel %d): " 3064 "fatal error: rcr_ack_err", channel)); 3065 } 3066 if (cs.bits.hdw.dc_fifo_err) { 3067 rdc_stats->dc_fifo_err++; 3068 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3069 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 3070 /* This is not a fatal error! */ 3071 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3072 "==> nxge_rx_err_evnts(channel %d): " 3073 "dc_fifo_err", channel)); 3074 rxport_fatal = B_TRUE; 3075 } 3076 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 3077 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 3078 &rdc_stats->errlog.pre_par, 3079 &rdc_stats->errlog.sha_par)) 3080 != NPI_SUCCESS) { 3081 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3082 "==> nxge_rx_err_evnts(channel %d): " 3083 "rcr_sha_par: get perr", channel)); 3084 return (NXGE_ERROR | rs); 3085 } 3086 if (cs.bits.hdw.rcr_sha_par) { 3087 rdc_stats->rcr_sha_par++; 3088 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3089 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 3090 rxchan_fatal = B_TRUE; 3091 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3092 "==> nxge_rx_err_evnts(channel %d): " 3093 "fatal error: rcr_sha_par", channel)); 3094 } 3095 if (cs.bits.hdw.rbr_pre_par) { 3096 rdc_stats->rbr_pre_par++; 3097 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3098 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 3099 rxchan_fatal = B_TRUE; 3100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3101 "==> nxge_rx_err_evnts(channel %d): " 3102 "fatal error: rbr_pre_par", channel)); 3103 } 3104 } 3105 /* 3106 * The Following 4 status bits are for information, the system 3107 * is running fine. There is no need to send FMA ereports or 3108 * log messages. 3109 */ 3110 if (cs.bits.hdw.port_drop_pkt) { 3111 rdc_stats->port_drop_pkt++; 3112 } 3113 if (cs.bits.hdw.wred_drop) { 3114 rdc_stats->wred_drop++; 3115 } 3116 if (cs.bits.hdw.rbr_pre_empty) { 3117 rdc_stats->rbr_pre_empty++; 3118 } 3119 if (cs.bits.hdw.rcr_shadow_full) { 3120 rdc_stats->rcr_shadow_full++; 3121 } 3122 if (cs.bits.hdw.config_err) { 3123 rdc_stats->config_err++; 3124 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3125 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3126 rxchan_fatal = B_TRUE; 3127 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3128 "==> nxge_rx_err_evnts(channel %d): " 3129 "config error", channel)); 3130 } 3131 if (cs.bits.hdw.rcrincon) { 3132 rdc_stats->rcrincon++; 3133 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3134 NXGE_FM_EREPORT_RDMC_RCRINCON); 3135 rxchan_fatal = B_TRUE; 3136 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3137 "==> nxge_rx_err_evnts(channel %d): " 3138 "fatal error: rcrincon error", channel)); 3139 } 3140 if (cs.bits.hdw.rcrfull) { 3141 rdc_stats->rcrfull++; 3142 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3143 NXGE_FM_EREPORT_RDMC_RCRFULL); 3144 rxchan_fatal = B_TRUE; 3145 if (rdc_stats->rcrfull < error_disp_cnt) 3146 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3147 "==> nxge_rx_err_evnts(channel %d): " 3148 "fatal error: rcrfull error", channel)); 3149 } 3150 if (cs.bits.hdw.rbr_empty) { 3151 /* 3152 * This bit is for information, there is no need 3153 * send FMA ereport or log a message. 3154 */ 3155 rdc_stats->rbr_empty++; 3156 } 3157 if (cs.bits.hdw.rbrfull) { 3158 rdc_stats->rbrfull++; 3159 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3160 NXGE_FM_EREPORT_RDMC_RBRFULL); 3161 rxchan_fatal = B_TRUE; 3162 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3163 "==> nxge_rx_err_evnts(channel %d): " 3164 "fatal error: rbr_full error", channel)); 3165 } 3166 if (cs.bits.hdw.rbrlogpage) { 3167 rdc_stats->rbrlogpage++; 3168 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3169 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3170 rxchan_fatal = B_TRUE; 3171 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3172 "==> nxge_rx_err_evnts(channel %d): " 3173 "fatal error: rbr logical page error", channel)); 3174 } 3175 if (cs.bits.hdw.cfiglogpage) { 3176 rdc_stats->cfiglogpage++; 3177 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3178 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3179 rxchan_fatal = B_TRUE; 3180 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3181 "==> nxge_rx_err_evnts(channel %d): " 3182 "fatal error: cfig logical page error", channel)); 3183 } 3184 3185 if (rxport_fatal) { 3186 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3187 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3188 portn)); 3189 if (isLDOMguest(nxgep)) { 3190 status = NXGE_ERROR; 3191 } else { 3192 status = nxge_ipp_fatal_err_recover(nxgep); 3193 if (status == NXGE_OK) { 3194 FM_SERVICE_RESTORED(nxgep); 3195 } 3196 } 3197 } 3198 3199 if (rxchan_fatal) { 3200 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3201 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3202 channel)); 3203 if (isLDOMguest(nxgep)) { 3204 status = NXGE_ERROR; 3205 } else { 3206 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3207 if (status == NXGE_OK) { 3208 FM_SERVICE_RESTORED(nxgep); 3209 } 3210 } 3211 } 3212 3213 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3214 3215 return (status); 3216 } 3217 3218 /* 3219 * nxge_rdc_hvio_setup 3220 * 3221 * This code appears to setup some Hypervisor variables. 3222 * 3223 * Arguments: 3224 * nxgep 3225 * channel 3226 * 3227 * Notes: 3228 * What does NIU_LP_WORKAROUND mean? 3229 * 3230 * NPI/NXGE function calls: 3231 * na 3232 * 3233 * Context: 3234 * Any domain 3235 */ 3236 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3237 static void 3238 nxge_rdc_hvio_setup( 3239 nxge_t *nxgep, int channel) 3240 { 3241 nxge_dma_common_t *dma_common; 3242 nxge_dma_common_t *dma_control; 3243 rx_rbr_ring_t *ring; 3244 3245 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3246 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3247 3248 ring->hv_set = B_FALSE; 3249 3250 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3251 dma_common->orig_ioaddr_pp; 3252 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3253 dma_common->orig_alength; 3254 3255 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3256 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3257 channel, ring->hv_rx_buf_base_ioaddr_pp, 3258 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3259 dma_common->orig_alength, dma_common->orig_alength)); 3260 3261 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3262 3263 ring->hv_rx_cntl_base_ioaddr_pp = 3264 (uint64_t)dma_control->orig_ioaddr_pp; 3265 ring->hv_rx_cntl_ioaddr_size = 3266 (uint64_t)dma_control->orig_alength; 3267 3268 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3269 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3270 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3271 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3272 dma_control->orig_alength, dma_control->orig_alength)); 3273 } 3274 #endif 3275 3276 /* 3277 * nxge_map_rxdma 3278 * 3279 * Map an RDC into our kernel space. 3280 * 3281 * Arguments: 3282 * nxgep 3283 * channel The channel to map. 3284 * 3285 * Notes: 3286 * 1. Allocate & initialise a memory pool, if necessary. 3287 * 2. Allocate however many receive buffers are required. 3288 * 3. Setup buffers, descriptors, and mailbox. 3289 * 3290 * NPI/NXGE function calls: 3291 * nxge_alloc_rx_mem_pool() 3292 * nxge_alloc_rbb() 3293 * nxge_map_rxdma_channel() 3294 * 3295 * Registers accessed: 3296 * 3297 * Context: 3298 * Any domain 3299 */ 3300 static nxge_status_t 3301 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3302 { 3303 nxge_dma_common_t **data; 3304 nxge_dma_common_t **control; 3305 rx_rbr_ring_t **rbr_ring; 3306 rx_rcr_ring_t **rcr_ring; 3307 rx_mbox_t **mailbox; 3308 uint32_t chunks; 3309 3310 nxge_status_t status; 3311 3312 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3313 3314 if (!nxgep->rx_buf_pool_p) { 3315 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3317 "<== nxge_map_rxdma: buf not allocated")); 3318 return (NXGE_ERROR); 3319 } 3320 } 3321 3322 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3323 return (NXGE_ERROR); 3324 3325 /* 3326 * Map descriptors from the buffer polls for each dma channel. 3327 */ 3328 3329 /* 3330 * Set up and prepare buffer blocks, descriptors 3331 * and mailbox. 3332 */ 3333 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3334 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3335 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3336 3337 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3338 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3339 3340 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3341 3342 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3343 chunks, control, rcr_ring, mailbox); 3344 if (status != NXGE_OK) { 3345 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3346 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3347 "returned 0x%x", 3348 channel, status)); 3349 return (status); 3350 } 3351 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3352 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3353 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3354 &nxgep->statsp->rdc_stats[channel]; 3355 3356 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3357 if (!isLDOMguest(nxgep)) 3358 nxge_rdc_hvio_setup(nxgep, channel); 3359 #endif 3360 3361 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3362 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3363 3364 return (status); 3365 } 3366 3367 static void 3368 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3369 { 3370 rx_rbr_ring_t *rbr_ring; 3371 rx_rcr_ring_t *rcr_ring; 3372 rx_mbox_t *mailbox; 3373 3374 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3375 3376 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3377 !nxgep->rx_mbox_areas_p) 3378 return; 3379 3380 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3381 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3382 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3383 3384 if (!rbr_ring || !rcr_ring || !mailbox) 3385 return; 3386 3387 (void) nxge_unmap_rxdma_channel( 3388 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3389 3390 nxge_free_rxb(nxgep, channel); 3391 3392 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3393 } 3394 3395 nxge_status_t 3396 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3397 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3398 uint32_t num_chunks, 3399 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3400 p_rx_mbox_t *rx_mbox_p) 3401 { 3402 int status = NXGE_OK; 3403 3404 /* 3405 * Set up and prepare buffer blocks, descriptors 3406 * and mailbox. 3407 */ 3408 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3409 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3410 /* 3411 * Receive buffer blocks 3412 */ 3413 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3414 dma_buf_p, rbr_p, num_chunks); 3415 if (status != NXGE_OK) { 3416 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3417 "==> nxge_map_rxdma_channel (channel %d): " 3418 "map buffer failed 0x%x", channel, status)); 3419 goto nxge_map_rxdma_channel_exit; 3420 } 3421 3422 /* 3423 * Receive block ring, completion ring and mailbox. 3424 */ 3425 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3426 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3427 if (status != NXGE_OK) { 3428 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3429 "==> nxge_map_rxdma_channel (channel %d): " 3430 "map config failed 0x%x", channel, status)); 3431 goto nxge_map_rxdma_channel_fail2; 3432 } 3433 3434 goto nxge_map_rxdma_channel_exit; 3435 3436 nxge_map_rxdma_channel_fail3: 3437 /* Free rbr, rcr */ 3438 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3439 "==> nxge_map_rxdma_channel: free rbr/rcr " 3440 "(status 0x%x channel %d)", 3441 status, channel)); 3442 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3443 *rcr_p, *rx_mbox_p); 3444 3445 nxge_map_rxdma_channel_fail2: 3446 /* Free buffer blocks */ 3447 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3448 "==> nxge_map_rxdma_channel: free rx buffers" 3449 "(nxgep 0x%x status 0x%x channel %d)", 3450 nxgep, status, channel)); 3451 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3452 3453 status = NXGE_ERROR; 3454 3455 nxge_map_rxdma_channel_exit: 3456 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3457 "<== nxge_map_rxdma_channel: " 3458 "(nxgep 0x%x status 0x%x channel %d)", 3459 nxgep, status, channel)); 3460 3461 return (status); 3462 } 3463 3464 /*ARGSUSED*/ 3465 static void 3466 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3467 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3468 { 3469 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3470 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3471 3472 /* 3473 * unmap receive block ring, completion ring and mailbox. 3474 */ 3475 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3476 rcr_p, rx_mbox_p); 3477 3478 /* unmap buffer blocks */ 3479 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3480 3481 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3482 } 3483 3484 /*ARGSUSED*/ 3485 static nxge_status_t 3486 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3487 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3488 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3489 { 3490 p_rx_rbr_ring_t rbrp; 3491 p_rx_rcr_ring_t rcrp; 3492 p_rx_mbox_t mboxp; 3493 p_nxge_dma_common_t cntl_dmap; 3494 p_nxge_dma_common_t dmap; 3495 p_rx_msg_t *rx_msg_ring; 3496 p_rx_msg_t rx_msg_p; 3497 p_rbr_cfig_a_t rcfga_p; 3498 p_rbr_cfig_b_t rcfgb_p; 3499 p_rcrcfig_a_t cfga_p; 3500 p_rcrcfig_b_t cfgb_p; 3501 p_rxdma_cfig1_t cfig1_p; 3502 p_rxdma_cfig2_t cfig2_p; 3503 p_rbr_kick_t kick_p; 3504 uint32_t dmaaddrp; 3505 uint32_t *rbr_vaddrp; 3506 uint32_t bkaddr; 3507 nxge_status_t status = NXGE_OK; 3508 int i; 3509 uint32_t nxge_port_rcr_size; 3510 3511 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3512 "==> nxge_map_rxdma_channel_cfg_ring")); 3513 3514 cntl_dmap = *dma_cntl_p; 3515 3516 /* Map in the receive block ring */ 3517 rbrp = *rbr_p; 3518 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3519 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3520 /* 3521 * Zero out buffer block ring descriptors. 3522 */ 3523 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3524 3525 rcfga_p = &(rbrp->rbr_cfga); 3526 rcfgb_p = &(rbrp->rbr_cfgb); 3527 kick_p = &(rbrp->rbr_kick); 3528 rcfga_p->value = 0; 3529 rcfgb_p->value = 0; 3530 kick_p->value = 0; 3531 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3532 rcfga_p->value = (rbrp->rbr_addr & 3533 (RBR_CFIG_A_STDADDR_MASK | 3534 RBR_CFIG_A_STDADDR_BASE_MASK)); 3535 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3536 3537 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3538 rcfgb_p->bits.ldw.vld0 = 1; 3539 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3540 rcfgb_p->bits.ldw.vld1 = 1; 3541 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3542 rcfgb_p->bits.ldw.vld2 = 1; 3543 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3544 3545 /* 3546 * For each buffer block, enter receive block address to the ring. 3547 */ 3548 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3549 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3550 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3551 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3552 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3553 3554 rx_msg_ring = rbrp->rx_msg_ring; 3555 for (i = 0; i < rbrp->tnblocks; i++) { 3556 rx_msg_p = rx_msg_ring[i]; 3557 rx_msg_p->nxgep = nxgep; 3558 rx_msg_p->rx_rbr_p = rbrp; 3559 bkaddr = (uint32_t) 3560 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3561 >> RBR_BKADDR_SHIFT)); 3562 rx_msg_p->free = B_FALSE; 3563 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3564 3565 *rbr_vaddrp++ = bkaddr; 3566 } 3567 3568 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3569 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3570 3571 rbrp->rbr_rd_index = 0; 3572 3573 rbrp->rbr_consumed = 0; 3574 rbrp->rbr_use_bcopy = B_TRUE; 3575 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3576 /* 3577 * Do bcopy on packets greater than bcopy size once 3578 * the lo threshold is reached. 3579 * This lo threshold should be less than the hi threshold. 3580 * 3581 * Do bcopy on every packet once the hi threshold is reached. 3582 */ 3583 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3584 /* default it to use hi */ 3585 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3586 } 3587 3588 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3589 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3590 } 3591 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3592 3593 switch (nxge_rx_threshold_hi) { 3594 default: 3595 case NXGE_RX_COPY_NONE: 3596 /* Do not do bcopy at all */ 3597 rbrp->rbr_use_bcopy = B_FALSE; 3598 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3599 break; 3600 3601 case NXGE_RX_COPY_1: 3602 case NXGE_RX_COPY_2: 3603 case NXGE_RX_COPY_3: 3604 case NXGE_RX_COPY_4: 3605 case NXGE_RX_COPY_5: 3606 case NXGE_RX_COPY_6: 3607 case NXGE_RX_COPY_7: 3608 rbrp->rbr_threshold_hi = 3609 rbrp->rbb_max * 3610 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3611 break; 3612 3613 case NXGE_RX_COPY_ALL: 3614 rbrp->rbr_threshold_hi = 0; 3615 break; 3616 } 3617 3618 switch (nxge_rx_threshold_lo) { 3619 default: 3620 case NXGE_RX_COPY_NONE: 3621 /* Do not do bcopy at all */ 3622 if (rbrp->rbr_use_bcopy) { 3623 rbrp->rbr_use_bcopy = B_FALSE; 3624 } 3625 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3626 break; 3627 3628 case NXGE_RX_COPY_1: 3629 case NXGE_RX_COPY_2: 3630 case NXGE_RX_COPY_3: 3631 case NXGE_RX_COPY_4: 3632 case NXGE_RX_COPY_5: 3633 case NXGE_RX_COPY_6: 3634 case NXGE_RX_COPY_7: 3635 rbrp->rbr_threshold_lo = 3636 rbrp->rbb_max * 3637 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3638 break; 3639 3640 case NXGE_RX_COPY_ALL: 3641 rbrp->rbr_threshold_lo = 0; 3642 break; 3643 } 3644 3645 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3646 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3647 "rbb_max %d " 3648 "rbrp->rbr_bufsize_type %d " 3649 "rbb_threshold_hi %d " 3650 "rbb_threshold_lo %d", 3651 dma_channel, 3652 rbrp->rbb_max, 3653 rbrp->rbr_bufsize_type, 3654 rbrp->rbr_threshold_hi, 3655 rbrp->rbr_threshold_lo)); 3656 3657 rbrp->page_valid.value = 0; 3658 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3659 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3660 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3661 rbrp->page_hdl.value = 0; 3662 3663 rbrp->page_valid.bits.ldw.page0 = 1; 3664 rbrp->page_valid.bits.ldw.page1 = 1; 3665 3666 /* Map in the receive completion ring */ 3667 rcrp = (p_rx_rcr_ring_t) 3668 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3669 rcrp->rdc = dma_channel; 3670 3671 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3672 rcrp->comp_size = nxge_port_rcr_size; 3673 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3674 3675 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3676 3677 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3678 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3679 sizeof (rcr_entry_t)); 3680 rcrp->comp_rd_index = 0; 3681 rcrp->comp_wt_index = 0; 3682 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3683 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3684 #if defined(__i386) 3685 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3686 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3687 #else 3688 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3689 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3690 #endif 3691 3692 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3693 (nxge_port_rcr_size - 1); 3694 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3695 (nxge_port_rcr_size - 1); 3696 3697 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3698 "==> nxge_map_rxdma_channel_cfg_ring: " 3699 "channel %d " 3700 "rbr_vaddrp $%p " 3701 "rcr_desc_rd_head_p $%p " 3702 "rcr_desc_rd_head_pp $%p " 3703 "rcr_desc_rd_last_p $%p " 3704 "rcr_desc_rd_last_pp $%p ", 3705 dma_channel, 3706 rbr_vaddrp, 3707 rcrp->rcr_desc_rd_head_p, 3708 rcrp->rcr_desc_rd_head_pp, 3709 rcrp->rcr_desc_last_p, 3710 rcrp->rcr_desc_last_pp)); 3711 3712 /* 3713 * Zero out buffer block ring descriptors. 3714 */ 3715 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3716 3717 rcrp->intr_timeout = (nxgep->intr_timeout < 3718 NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3719 nxgep->intr_timeout; 3720 3721 rcrp->intr_threshold = (nxgep->intr_threshold < 3722 NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3723 nxgep->intr_threshold; 3724 3725 rcrp->full_hdr_flag = B_FALSE; 3726 rcrp->sw_priv_hdr_len = 0; 3727 3728 cfga_p = &(rcrp->rcr_cfga); 3729 cfgb_p = &(rcrp->rcr_cfgb); 3730 cfga_p->value = 0; 3731 cfgb_p->value = 0; 3732 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3733 cfga_p->value = (rcrp->rcr_addr & 3734 (RCRCFIG_A_STADDR_MASK | 3735 RCRCFIG_A_STADDR_BASE_MASK)); 3736 3737 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3738 RCRCFIG_A_LEN_SHIF); 3739 3740 /* 3741 * Timeout should be set based on the system clock divider. 3742 * A timeout value of 1 assumes that the 3743 * granularity (1000) is 3 microseconds running at 300MHz. 3744 */ 3745 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3746 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3747 cfgb_p->bits.ldw.entout = 1; 3748 3749 /* Map in the mailbox */ 3750 mboxp = (p_rx_mbox_t) 3751 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3752 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3753 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3754 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3755 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3756 cfig1_p->value = cfig2_p->value = 0; 3757 3758 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3759 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3760 "==> nxge_map_rxdma_channel_cfg_ring: " 3761 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3762 dma_channel, cfig1_p->value, cfig2_p->value, 3763 mboxp->mbox_addr)); 3764 3765 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3766 & 0xfff); 3767 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3768 3769 3770 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3771 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3772 RXDMA_CFIG2_MBADDR_L_MASK); 3773 3774 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3775 3776 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3777 "==> nxge_map_rxdma_channel_cfg_ring: " 3778 "channel %d damaddrp $%p " 3779 "cfg1 0x%016llx cfig2 0x%016llx", 3780 dma_channel, dmaaddrp, 3781 cfig1_p->value, cfig2_p->value)); 3782 3783 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3784 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3785 3786 rbrp->rx_rcr_p = rcrp; 3787 rcrp->rx_rbr_p = rbrp; 3788 *rcr_p = rcrp; 3789 *rx_mbox_p = mboxp; 3790 3791 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3792 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3793 3794 return (status); 3795 } 3796 3797 /*ARGSUSED*/ 3798 static void 3799 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3800 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3801 { 3802 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3803 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3804 rcr_p->rdc)); 3805 3806 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3807 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3808 3809 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3810 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3811 } 3812 3813 static nxge_status_t 3814 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3815 p_nxge_dma_common_t *dma_buf_p, 3816 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3817 { 3818 p_rx_rbr_ring_t rbrp; 3819 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3820 p_rx_msg_t *rx_msg_ring; 3821 p_rx_msg_t rx_msg_p; 3822 p_mblk_t mblk_p; 3823 3824 rxring_info_t *ring_info; 3825 nxge_status_t status = NXGE_OK; 3826 int i, j, index; 3827 uint32_t size, bsize, nblocks, nmsgs; 3828 3829 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3830 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3831 channel)); 3832 3833 dma_bufp = tmp_bufp = *dma_buf_p; 3834 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3835 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3836 "chunks bufp 0x%016llx", 3837 channel, num_chunks, dma_bufp)); 3838 3839 nmsgs = 0; 3840 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3841 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3842 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3843 "bufp 0x%016llx nblocks %d nmsgs %d", 3844 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3845 nmsgs += tmp_bufp->nblocks; 3846 } 3847 if (!nmsgs) { 3848 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3849 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3850 "no msg blocks", 3851 channel)); 3852 status = NXGE_ERROR; 3853 goto nxge_map_rxdma_channel_buf_ring_exit; 3854 } 3855 3856 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3857 3858 size = nmsgs * sizeof (p_rx_msg_t); 3859 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3860 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3861 KM_SLEEP); 3862 3863 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3864 (void *)nxgep->interrupt_cookie); 3865 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3866 (void *)nxgep->interrupt_cookie); 3867 rbrp->rdc = channel; 3868 rbrp->num_blocks = num_chunks; 3869 rbrp->tnblocks = nmsgs; 3870 rbrp->rbb_max = nmsgs; 3871 rbrp->rbr_max_size = nmsgs; 3872 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3873 3874 /* 3875 * Buffer sizes suggested by NIU architect. 3876 * 256, 512 and 2K. 3877 */ 3878 3879 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3880 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3881 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3882 3883 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3884 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3885 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3886 3887 rbrp->block_size = nxgep->rx_default_block_size; 3888 3889 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3890 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3891 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3892 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3893 } else { 3894 if (rbrp->block_size >= 0x2000) { 3895 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3896 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3897 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3898 } else { 3899 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3900 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3901 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3902 } 3903 } 3904 3905 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3906 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3907 "actual rbr max %d rbb_max %d nmsgs %d " 3908 "rbrp->block_size %d default_block_size %d " 3909 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3910 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3911 rbrp->block_size, nxgep->rx_default_block_size, 3912 nxge_rbr_size, nxge_rbr_spare_size)); 3913 3914 /* Map in buffers from the buffer pool. */ 3915 index = 0; 3916 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3917 bsize = dma_bufp->block_size; 3918 nblocks = dma_bufp->nblocks; 3919 #if defined(__i386) 3920 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3921 #else 3922 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3923 #endif 3924 ring_info->buffer[i].buf_index = i; 3925 ring_info->buffer[i].buf_size = dma_bufp->alength; 3926 ring_info->buffer[i].start_index = index; 3927 #if defined(__i386) 3928 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3929 #else 3930 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3931 #endif 3932 3933 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3934 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3935 "chunk %d" 3936 " nblocks %d chunk_size %x block_size 0x%x " 3937 "dma_bufp $%p", channel, i, 3938 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3939 dma_bufp)); 3940 3941 for (j = 0; j < nblocks; j++) { 3942 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3943 dma_bufp)) == NULL) { 3944 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3945 "allocb failed (index %d i %d j %d)", 3946 index, i, j)); 3947 goto nxge_map_rxdma_channel_buf_ring_fail1; 3948 } 3949 rx_msg_ring[index] = rx_msg_p; 3950 rx_msg_p->block_index = index; 3951 rx_msg_p->shifted_addr = (uint32_t) 3952 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3953 RBR_BKADDR_SHIFT)); 3954 3955 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3956 "index %d j %d rx_msg_p $%p mblk %p", 3957 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3958 3959 mblk_p = rx_msg_p->rx_mblk_p; 3960 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3961 3962 rbrp->rbr_ref_cnt++; 3963 index++; 3964 rx_msg_p->buf_dma.dma_channel = channel; 3965 } 3966 3967 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3968 if (dma_bufp->contig_alloc_type) { 3969 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3970 } 3971 3972 if (dma_bufp->kmem_alloc_type) { 3973 rbrp->rbr_alloc_type = KMEM_ALLOC; 3974 } 3975 3976 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3977 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3978 "chunk %d" 3979 " nblocks %d chunk_size %x block_size 0x%x " 3980 "dma_bufp $%p", 3981 channel, i, 3982 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3983 dma_bufp)); 3984 } 3985 if (i < rbrp->num_blocks) { 3986 goto nxge_map_rxdma_channel_buf_ring_fail1; 3987 } 3988 3989 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3990 "nxge_map_rxdma_channel_buf_ring: done buf init " 3991 "channel %d msg block entries %d", 3992 channel, index)); 3993 ring_info->block_size_mask = bsize - 1; 3994 rbrp->rx_msg_ring = rx_msg_ring; 3995 rbrp->dma_bufp = dma_buf_p; 3996 rbrp->ring_info = ring_info; 3997 3998 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3999 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4000 " nxge_map_rxdma_channel_buf_ring: " 4001 "channel %d done buf info init", channel)); 4002 4003 /* 4004 * Finally, permit nxge_freeb() to call nxge_post_page(). 4005 */ 4006 rbrp->rbr_state = RBR_POSTING; 4007 4008 *rbr_p = rbrp; 4009 goto nxge_map_rxdma_channel_buf_ring_exit; 4010 4011 nxge_map_rxdma_channel_buf_ring_fail1: 4012 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4013 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 4014 channel, status)); 4015 4016 index--; 4017 for (; index >= 0; index--) { 4018 rx_msg_p = rx_msg_ring[index]; 4019 if (rx_msg_p != NULL) { 4020 freeb(rx_msg_p->rx_mblk_p); 4021 rx_msg_ring[index] = NULL; 4022 } 4023 } 4024 nxge_map_rxdma_channel_buf_ring_fail: 4025 MUTEX_DESTROY(&rbrp->post_lock); 4026 MUTEX_DESTROY(&rbrp->lock); 4027 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4028 KMEM_FREE(rx_msg_ring, size); 4029 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 4030 4031 status = NXGE_ERROR; 4032 4033 nxge_map_rxdma_channel_buf_ring_exit: 4034 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4035 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 4036 4037 return (status); 4038 } 4039 4040 /*ARGSUSED*/ 4041 static void 4042 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 4043 p_rx_rbr_ring_t rbr_p) 4044 { 4045 p_rx_msg_t *rx_msg_ring; 4046 p_rx_msg_t rx_msg_p; 4047 rxring_info_t *ring_info; 4048 int i; 4049 uint32_t size; 4050 #ifdef NXGE_DEBUG 4051 int num_chunks; 4052 #endif 4053 4054 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4055 "==> nxge_unmap_rxdma_channel_buf_ring")); 4056 if (rbr_p == NULL) { 4057 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4058 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 4059 return; 4060 } 4061 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4062 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 4063 rbr_p->rdc)); 4064 4065 rx_msg_ring = rbr_p->rx_msg_ring; 4066 ring_info = rbr_p->ring_info; 4067 4068 if (rx_msg_ring == NULL || ring_info == NULL) { 4069 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4070 "<== nxge_unmap_rxdma_channel_buf_ring: " 4071 "rx_msg_ring $%p ring_info $%p", 4072 rx_msg_p, ring_info)); 4073 return; 4074 } 4075 4076 #ifdef NXGE_DEBUG 4077 num_chunks = rbr_p->num_blocks; 4078 #endif 4079 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 4080 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4081 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 4082 "tnblocks %d (max %d) size ptrs %d ", 4083 rbr_p->rdc, num_chunks, 4084 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 4085 4086 for (i = 0; i < rbr_p->tnblocks; i++) { 4087 rx_msg_p = rx_msg_ring[i]; 4088 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4089 " nxge_unmap_rxdma_channel_buf_ring: " 4090 "rx_msg_p $%p", 4091 rx_msg_p)); 4092 if (rx_msg_p != NULL) { 4093 freeb(rx_msg_p->rx_mblk_p); 4094 rx_msg_ring[i] = NULL; 4095 } 4096 } 4097 4098 /* 4099 * We no longer may use the mutex <post_lock>. By setting 4100 * <rbr_state> to anything but POSTING, we prevent 4101 * nxge_post_page() from accessing a dead mutex. 4102 */ 4103 rbr_p->rbr_state = RBR_UNMAPPING; 4104 MUTEX_DESTROY(&rbr_p->post_lock); 4105 4106 MUTEX_DESTROY(&rbr_p->lock); 4107 4108 if (rbr_p->rbr_ref_cnt == 0) { 4109 /* 4110 * This is the normal state of affairs. 4111 * Need to free the following buffers: 4112 * - data buffers 4113 * - rx_msg ring 4114 * - ring_info 4115 * - rbr ring 4116 */ 4117 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4118 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4119 nxge_rxdma_databuf_free(rbr_p); 4120 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4121 KMEM_FREE(rx_msg_ring, size); 4122 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4123 } else { 4124 /* 4125 * Some of our buffers are still being used. 4126 * Therefore, tell nxge_freeb() this ring is 4127 * unmapped, so it may free <rbr_p> for us. 4128 */ 4129 rbr_p->rbr_state = RBR_UNMAPPED; 4130 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4131 "unmap_rxdma_buf_ring: %d %s outstanding.", 4132 rbr_p->rbr_ref_cnt, 4133 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4134 } 4135 4136 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4137 "<== nxge_unmap_rxdma_channel_buf_ring")); 4138 } 4139 4140 /* 4141 * nxge_rxdma_hw_start_common 4142 * 4143 * Arguments: 4144 * nxgep 4145 * 4146 * Notes: 4147 * 4148 * NPI/NXGE function calls: 4149 * nxge_init_fzc_rx_common(); 4150 * nxge_init_fzc_rxdma_port(); 4151 * 4152 * Registers accessed: 4153 * 4154 * Context: 4155 * Service domain 4156 */ 4157 static nxge_status_t 4158 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4159 { 4160 nxge_status_t status = NXGE_OK; 4161 4162 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4163 4164 /* 4165 * Load the sharable parameters by writing to the 4166 * function zero control registers. These FZC registers 4167 * should be initialized only once for the entire chip. 4168 */ 4169 (void) nxge_init_fzc_rx_common(nxgep); 4170 4171 /* 4172 * Initialize the RXDMA port specific FZC control configurations. 4173 * These FZC registers are pertaining to each port. 4174 */ 4175 (void) nxge_init_fzc_rxdma_port(nxgep); 4176 4177 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4178 4179 return (status); 4180 } 4181 4182 static nxge_status_t 4183 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4184 { 4185 int i, ndmas; 4186 p_rx_rbr_rings_t rx_rbr_rings; 4187 p_rx_rbr_ring_t *rbr_rings; 4188 p_rx_rcr_rings_t rx_rcr_rings; 4189 p_rx_rcr_ring_t *rcr_rings; 4190 p_rx_mbox_areas_t rx_mbox_areas_p; 4191 p_rx_mbox_t *rx_mbox_p; 4192 nxge_status_t status = NXGE_OK; 4193 4194 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4195 4196 rx_rbr_rings = nxgep->rx_rbr_rings; 4197 rx_rcr_rings = nxgep->rx_rcr_rings; 4198 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4199 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4200 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4201 return (NXGE_ERROR); 4202 } 4203 ndmas = rx_rbr_rings->ndmas; 4204 if (ndmas == 0) { 4205 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4206 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4207 return (NXGE_ERROR); 4208 } 4209 4210 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4211 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4212 4213 rbr_rings = rx_rbr_rings->rbr_rings; 4214 rcr_rings = rx_rcr_rings->rcr_rings; 4215 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4216 if (rx_mbox_areas_p) { 4217 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4218 } 4219 4220 i = channel; 4221 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4222 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4223 ndmas, channel)); 4224 status = nxge_rxdma_start_channel(nxgep, channel, 4225 (p_rx_rbr_ring_t)rbr_rings[i], 4226 (p_rx_rcr_ring_t)rcr_rings[i], 4227 (p_rx_mbox_t)rx_mbox_p[i]); 4228 if (status != NXGE_OK) { 4229 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4230 "==> nxge_rxdma_hw_start: disable " 4231 "(status 0x%x channel %d)", status, channel)); 4232 return (status); 4233 } 4234 4235 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4236 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4237 rx_rbr_rings, rx_rcr_rings)); 4238 4239 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4240 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4241 4242 return (status); 4243 } 4244 4245 static void 4246 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4247 { 4248 p_rx_rbr_rings_t rx_rbr_rings; 4249 p_rx_rcr_rings_t rx_rcr_rings; 4250 4251 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4252 4253 rx_rbr_rings = nxgep->rx_rbr_rings; 4254 rx_rcr_rings = nxgep->rx_rcr_rings; 4255 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4256 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4257 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4258 return; 4259 } 4260 4261 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4262 "==> nxge_rxdma_hw_stop(channel %d)", 4263 channel)); 4264 (void) nxge_rxdma_stop_channel(nxgep, channel); 4265 4266 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4267 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4268 rx_rbr_rings, rx_rcr_rings)); 4269 4270 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4271 } 4272 4273 4274 static nxge_status_t 4275 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4276 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4277 4278 { 4279 npi_handle_t handle; 4280 npi_status_t rs = NPI_SUCCESS; 4281 rx_dma_ctl_stat_t cs; 4282 rx_dma_ent_msk_t ent_mask; 4283 nxge_status_t status = NXGE_OK; 4284 4285 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4286 4287 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4288 4289 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4290 "npi handle addr $%p acc $%p", 4291 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4292 4293 /* Reset RXDMA channel, but not if you're a guest. */ 4294 if (!isLDOMguest(nxgep)) { 4295 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4296 if (rs != NPI_SUCCESS) { 4297 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4298 "==> nxge_init_fzc_rdc: " 4299 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4300 channel, rs)); 4301 return (NXGE_ERROR | rs); 4302 } 4303 4304 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4305 "==> nxge_rxdma_start_channel: reset done: channel %d", 4306 channel)); 4307 } 4308 4309 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4310 if (isLDOMguest(nxgep)) 4311 (void) nxge_rdc_lp_conf(nxgep, channel); 4312 #endif 4313 4314 /* 4315 * Initialize the RXDMA channel specific FZC control 4316 * configurations. These FZC registers are pertaining 4317 * to each RX channel (logical pages). 4318 */ 4319 if (!isLDOMguest(nxgep)) { 4320 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4321 if (status != NXGE_OK) { 4322 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4323 "==> nxge_rxdma_start_channel: " 4324 "init fzc rxdma failed (0x%08x channel %d)", 4325 status, channel)); 4326 return (status); 4327 } 4328 4329 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4330 "==> nxge_rxdma_start_channel: fzc done")); 4331 } 4332 4333 /* Set up the interrupt event masks. */ 4334 ent_mask.value = 0; 4335 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4336 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4337 &ent_mask); 4338 if (rs != NPI_SUCCESS) { 4339 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4340 "==> nxge_rxdma_start_channel: " 4341 "init rxdma event masks failed " 4342 "(0x%08x channel %d)", 4343 status, channel)); 4344 return (NXGE_ERROR | rs); 4345 } 4346 4347 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4348 "==> nxge_rxdma_start_channel: " 4349 "event done: channel %d (mask 0x%016llx)", 4350 channel, ent_mask.value)); 4351 4352 /* Initialize the receive DMA control and status register */ 4353 cs.value = 0; 4354 cs.bits.hdw.mex = 1; 4355 cs.bits.hdw.rcrthres = 1; 4356 cs.bits.hdw.rcrto = 1; 4357 cs.bits.hdw.rbr_empty = 1; 4358 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4359 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4360 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4361 if (status != NXGE_OK) { 4362 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4363 "==> nxge_rxdma_start_channel: " 4364 "init rxdma control register failed (0x%08x channel %d", 4365 status, channel)); 4366 return (status); 4367 } 4368 4369 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4370 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4371 4372 /* 4373 * Load RXDMA descriptors, buffers, mailbox, 4374 * initialise the receive DMA channels and 4375 * enable each DMA channel. 4376 */ 4377 status = nxge_enable_rxdma_channel(nxgep, 4378 channel, rbr_p, rcr_p, mbox_p); 4379 4380 if (status != NXGE_OK) { 4381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4382 " nxge_rxdma_start_channel: " 4383 " enable rxdma failed (0x%08x channel %d)", 4384 status, channel)); 4385 return (status); 4386 } 4387 4388 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4389 "==> nxge_rxdma_start_channel: enabled channel %d")); 4390 4391 if (isLDOMguest(nxgep)) { 4392 /* Add interrupt handler for this channel. */ 4393 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4394 != NXGE_OK) { 4395 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4396 " nxge_rxdma_start_channel: " 4397 " nxge_hio_intr_add failed (0x%08x channel %d)", 4398 status, channel)); 4399 } 4400 } 4401 4402 ent_mask.value = 0; 4403 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4404 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4405 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4406 &ent_mask); 4407 if (rs != NPI_SUCCESS) { 4408 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4409 "==> nxge_rxdma_start_channel: " 4410 "init rxdma event masks failed (0x%08x channel %d)", 4411 status, channel)); 4412 return (NXGE_ERROR | rs); 4413 } 4414 4415 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4416 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4417 4418 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4419 4420 return (NXGE_OK); 4421 } 4422 4423 static nxge_status_t 4424 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4425 { 4426 npi_handle_t handle; 4427 npi_status_t rs = NPI_SUCCESS; 4428 rx_dma_ctl_stat_t cs; 4429 rx_dma_ent_msk_t ent_mask; 4430 nxge_status_t status = NXGE_OK; 4431 4432 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4433 4434 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4435 4436 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4437 "npi handle addr $%p acc $%p", 4438 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4439 4440 if (!isLDOMguest(nxgep)) { 4441 /* 4442 * Stop RxMAC = A.9.2.6 4443 */ 4444 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4445 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4446 "nxge_rxdma_stop_channel: " 4447 "Failed to disable RxMAC")); 4448 } 4449 4450 /* 4451 * Drain IPP Port = A.9.3.6 4452 */ 4453 (void) nxge_ipp_drain(nxgep); 4454 } 4455 4456 /* Reset RXDMA channel */ 4457 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4458 if (rs != NPI_SUCCESS) { 4459 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4460 " nxge_rxdma_stop_channel: " 4461 " reset rxdma failed (0x%08x channel %d)", 4462 rs, channel)); 4463 return (NXGE_ERROR | rs); 4464 } 4465 4466 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4467 "==> nxge_rxdma_stop_channel: reset done")); 4468 4469 /* Set up the interrupt event masks. */ 4470 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4471 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4472 &ent_mask); 4473 if (rs != NPI_SUCCESS) { 4474 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4475 "==> nxge_rxdma_stop_channel: " 4476 "set rxdma event masks failed (0x%08x channel %d)", 4477 rs, channel)); 4478 return (NXGE_ERROR | rs); 4479 } 4480 4481 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4482 "==> nxge_rxdma_stop_channel: event done")); 4483 4484 /* 4485 * Initialize the receive DMA control and status register 4486 */ 4487 cs.value = 0; 4488 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4489 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4490 " to default (all 0s) 0x%08x", cs.value)); 4491 if (status != NXGE_OK) { 4492 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4493 " nxge_rxdma_stop_channel: init rxdma" 4494 " control register failed (0x%08x channel %d", 4495 status, channel)); 4496 return (status); 4497 } 4498 4499 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4500 "==> nxge_rxdma_stop_channel: control done")); 4501 4502 /* 4503 * Make sure channel is disabled. 4504 */ 4505 status = nxge_disable_rxdma_channel(nxgep, channel); 4506 4507 if (status != NXGE_OK) { 4508 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4509 " nxge_rxdma_stop_channel: " 4510 " init enable rxdma failed (0x%08x channel %d)", 4511 status, channel)); 4512 return (status); 4513 } 4514 4515 if (!isLDOMguest(nxgep)) { 4516 /* 4517 * Enable RxMAC = A.9.2.10 4518 */ 4519 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4521 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4522 } 4523 } 4524 4525 NXGE_DEBUG_MSG((nxgep, 4526 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4527 4528 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4529 4530 return (NXGE_OK); 4531 } 4532 4533 nxge_status_t 4534 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4535 { 4536 npi_handle_t handle; 4537 p_nxge_rdc_sys_stats_t statsp; 4538 rx_ctl_dat_fifo_stat_t stat; 4539 uint32_t zcp_err_status; 4540 uint32_t ipp_err_status; 4541 nxge_status_t status = NXGE_OK; 4542 npi_status_t rs = NPI_SUCCESS; 4543 boolean_t my_err = B_FALSE; 4544 4545 handle = nxgep->npi_handle; 4546 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4547 4548 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4549 4550 if (rs != NPI_SUCCESS) 4551 return (NXGE_ERROR | rs); 4552 4553 if (stat.bits.ldw.id_mismatch) { 4554 statsp->id_mismatch++; 4555 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4556 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4557 /* Global fatal error encountered */ 4558 } 4559 4560 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4561 switch (nxgep->mac.portnum) { 4562 case 0: 4563 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4564 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4565 my_err = B_TRUE; 4566 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4567 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4568 } 4569 break; 4570 case 1: 4571 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4572 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4573 my_err = B_TRUE; 4574 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4575 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4576 } 4577 break; 4578 case 2: 4579 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4580 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4581 my_err = B_TRUE; 4582 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4583 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4584 } 4585 break; 4586 case 3: 4587 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4588 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4589 my_err = B_TRUE; 4590 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4591 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4592 } 4593 break; 4594 default: 4595 return (NXGE_ERROR); 4596 } 4597 } 4598 4599 if (my_err) { 4600 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4601 zcp_err_status); 4602 if (status != NXGE_OK) 4603 return (status); 4604 } 4605 4606 return (NXGE_OK); 4607 } 4608 4609 static nxge_status_t 4610 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4611 uint32_t zcp_status) 4612 { 4613 boolean_t rxport_fatal = B_FALSE; 4614 p_nxge_rdc_sys_stats_t statsp; 4615 nxge_status_t status = NXGE_OK; 4616 uint8_t portn; 4617 4618 portn = nxgep->mac.portnum; 4619 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4620 4621 if (ipp_status & (0x1 << portn)) { 4622 statsp->ipp_eop_err++; 4623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4624 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4625 rxport_fatal = B_TRUE; 4626 } 4627 4628 if (zcp_status & (0x1 << portn)) { 4629 statsp->zcp_eop_err++; 4630 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4631 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4632 rxport_fatal = B_TRUE; 4633 } 4634 4635 if (rxport_fatal) { 4636 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4637 " nxge_rxdma_handle_port_error: " 4638 " fatal error on Port #%d\n", 4639 portn)); 4640 status = nxge_rx_port_fatal_err_recover(nxgep); 4641 if (status == NXGE_OK) { 4642 FM_SERVICE_RESTORED(nxgep); 4643 } 4644 } 4645 4646 return (status); 4647 } 4648 4649 static nxge_status_t 4650 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4651 { 4652 npi_handle_t handle; 4653 npi_status_t rs = NPI_SUCCESS; 4654 nxge_status_t status = NXGE_OK; 4655 p_rx_rbr_ring_t rbrp; 4656 p_rx_rcr_ring_t rcrp; 4657 p_rx_mbox_t mboxp; 4658 rx_dma_ent_msk_t ent_mask; 4659 p_nxge_dma_common_t dmap; 4660 int ring_idx; 4661 uint32_t ref_cnt; 4662 p_rx_msg_t rx_msg_p; 4663 int i; 4664 uint32_t nxge_port_rcr_size; 4665 4666 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4667 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4668 "Recovering from RxDMAChannel#%d error...", channel)); 4669 4670 /* 4671 * Stop the dma channel waits for the stop done. 4672 * If the stop done bit is not set, then create 4673 * an error. 4674 */ 4675 4676 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4677 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4678 4679 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4680 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4681 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4682 4683 MUTEX_ENTER(&rcrp->lock); 4684 MUTEX_ENTER(&rbrp->lock); 4685 MUTEX_ENTER(&rbrp->post_lock); 4686 4687 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4688 4689 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4690 if (rs != NPI_SUCCESS) { 4691 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4692 "nxge_disable_rxdma_channel:failed")); 4693 goto fail; 4694 } 4695 4696 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4697 4698 /* Disable interrupt */ 4699 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4700 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4701 if (rs != NPI_SUCCESS) { 4702 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4703 "nxge_rxdma_stop_channel: " 4704 "set rxdma event masks failed (channel %d)", 4705 channel)); 4706 } 4707 4708 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4709 4710 /* Reset RXDMA channel */ 4711 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4712 if (rs != NPI_SUCCESS) { 4713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4714 "nxge_rxdma_fatal_err_recover: " 4715 " reset rxdma failed (channel %d)", channel)); 4716 goto fail; 4717 } 4718 4719 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4720 4721 mboxp = 4722 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4723 4724 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4725 rbrp->rbr_rd_index = 0; 4726 4727 rcrp->comp_rd_index = 0; 4728 rcrp->comp_wt_index = 0; 4729 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4730 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4731 #if defined(__i386) 4732 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4733 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4734 #else 4735 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4736 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4737 #endif 4738 4739 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4740 (nxge_port_rcr_size - 1); 4741 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4742 (nxge_port_rcr_size - 1); 4743 4744 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4745 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4746 4747 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4748 4749 for (i = 0; i < rbrp->rbr_max_size; i++) { 4750 rx_msg_p = rbrp->rx_msg_ring[i]; 4751 ref_cnt = rx_msg_p->ref_cnt; 4752 if (ref_cnt != 1) { 4753 if (rx_msg_p->cur_usage_cnt != 4754 rx_msg_p->max_usage_cnt) { 4755 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4756 "buf[%d]: cur_usage_cnt = %d " 4757 "max_usage_cnt = %d\n", i, 4758 rx_msg_p->cur_usage_cnt, 4759 rx_msg_p->max_usage_cnt)); 4760 } else { 4761 /* Buffer can be re-posted */ 4762 rx_msg_p->free = B_TRUE; 4763 rx_msg_p->cur_usage_cnt = 0; 4764 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4765 rx_msg_p->pkt_buf_size = 0; 4766 } 4767 } 4768 } 4769 4770 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4771 4772 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4773 if (status != NXGE_OK) { 4774 goto fail; 4775 } 4776 4777 MUTEX_EXIT(&rbrp->post_lock); 4778 MUTEX_EXIT(&rbrp->lock); 4779 MUTEX_EXIT(&rcrp->lock); 4780 4781 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4782 "Recovery Successful, RxDMAChannel#%d Restored", 4783 channel)); 4784 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4785 4786 return (NXGE_OK); 4787 fail: 4788 MUTEX_EXIT(&rbrp->post_lock); 4789 MUTEX_EXIT(&rbrp->lock); 4790 MUTEX_EXIT(&rcrp->lock); 4791 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4792 4793 return (NXGE_ERROR | rs); 4794 } 4795 4796 nxge_status_t 4797 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4798 { 4799 nxge_grp_set_t *set = &nxgep->rx_set; 4800 nxge_status_t status = NXGE_OK; 4801 int rdc; 4802 4803 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4804 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4805 "Recovering from RxPort error...")); 4806 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4807 4808 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4809 goto fail; 4810 4811 NXGE_DELAY(1000); 4812 4813 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4814 4815 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4816 if ((1 << rdc) & set->owned.map) { 4817 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4818 != NXGE_OK) { 4819 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4820 "Could not recover channel %d", rdc)); 4821 } 4822 } 4823 } 4824 4825 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4826 4827 /* Reset IPP */ 4828 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4829 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4830 "nxge_rx_port_fatal_err_recover: " 4831 "Failed to reset IPP")); 4832 goto fail; 4833 } 4834 4835 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4836 4837 /* Reset RxMAC */ 4838 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4839 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4840 "nxge_rx_port_fatal_err_recover: " 4841 "Failed to reset RxMAC")); 4842 goto fail; 4843 } 4844 4845 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4846 4847 /* Re-Initialize IPP */ 4848 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4849 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4850 "nxge_rx_port_fatal_err_recover: " 4851 "Failed to init IPP")); 4852 goto fail; 4853 } 4854 4855 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4856 4857 /* Re-Initialize RxMAC */ 4858 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4859 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4860 "nxge_rx_port_fatal_err_recover: " 4861 "Failed to reset RxMAC")); 4862 goto fail; 4863 } 4864 4865 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4866 4867 /* Re-enable RxMAC */ 4868 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4869 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4870 "nxge_rx_port_fatal_err_recover: " 4871 "Failed to enable RxMAC")); 4872 goto fail; 4873 } 4874 4875 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4876 "Recovery Successful, RxPort Restored")); 4877 4878 return (NXGE_OK); 4879 fail: 4880 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4881 return (status); 4882 } 4883 4884 void 4885 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4886 { 4887 rx_dma_ctl_stat_t cs; 4888 rx_ctl_dat_fifo_stat_t cdfs; 4889 4890 switch (err_id) { 4891 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4892 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4893 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4894 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4895 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4896 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4897 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4898 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4899 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4900 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4901 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4902 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4903 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4904 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4905 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4906 chan, &cs.value); 4907 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4908 cs.bits.hdw.rcr_ack_err = 1; 4909 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4910 cs.bits.hdw.dc_fifo_err = 1; 4911 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4912 cs.bits.hdw.rcr_sha_par = 1; 4913 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4914 cs.bits.hdw.rbr_pre_par = 1; 4915 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4916 cs.bits.hdw.rbr_tmout = 1; 4917 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4918 cs.bits.hdw.rsp_cnt_err = 1; 4919 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4920 cs.bits.hdw.byte_en_bus = 1; 4921 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4922 cs.bits.hdw.rsp_dat_err = 1; 4923 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4924 cs.bits.hdw.config_err = 1; 4925 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4926 cs.bits.hdw.rcrincon = 1; 4927 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4928 cs.bits.hdw.rcrfull = 1; 4929 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4930 cs.bits.hdw.rbrfull = 1; 4931 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4932 cs.bits.hdw.rbrlogpage = 1; 4933 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4934 cs.bits.hdw.cfiglogpage = 1; 4935 #if defined(__i386) 4936 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4937 cs.value); 4938 #else 4939 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4940 cs.value); 4941 #endif 4942 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4943 chan, cs.value); 4944 break; 4945 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4946 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4947 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4948 cdfs.value = 0; 4949 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4950 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4951 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4952 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4953 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4954 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4955 #if defined(__i386) 4956 cmn_err(CE_NOTE, 4957 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4958 cdfs.value); 4959 #else 4960 cmn_err(CE_NOTE, 4961 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4962 cdfs.value); 4963 #endif 4964 NXGE_REG_WR64(nxgep->npi_handle, 4965 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4966 break; 4967 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4968 break; 4969 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4970 break; 4971 } 4972 } 4973 4974 static void 4975 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4976 { 4977 rxring_info_t *ring_info; 4978 int index; 4979 uint32_t chunk_size; 4980 uint64_t kaddr; 4981 uint_t num_blocks; 4982 4983 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4984 4985 if (rbr_p == NULL) { 4986 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4987 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4988 return; 4989 } 4990 4991 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4992 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4993 "==> nxge_rxdma_databuf_free: DDI")); 4994 return; 4995 } 4996 4997 ring_info = rbr_p->ring_info; 4998 if (ring_info == NULL) { 4999 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5000 "==> nxge_rxdma_databuf_free: NULL ring info")); 5001 return; 5002 } 5003 num_blocks = rbr_p->num_blocks; 5004 for (index = 0; index < num_blocks; index++) { 5005 kaddr = ring_info->buffer[index].kaddr; 5006 chunk_size = ring_info->buffer[index].buf_size; 5007 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5008 "==> nxge_rxdma_databuf_free: free chunk %d " 5009 "kaddrp $%p chunk size %d", 5010 index, kaddr, chunk_size)); 5011 if (kaddr == NULL) continue; 5012 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 5013 ring_info->buffer[index].kaddr = NULL; 5014 } 5015 5016 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 5017 } 5018 5019 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5020 extern void contig_mem_free(void *, size_t); 5021 #endif 5022 5023 void 5024 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 5025 { 5026 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 5027 5028 if (kaddr == NULL || !buf_size) { 5029 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5030 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 5031 kaddr, buf_size)); 5032 return; 5033 } 5034 5035 switch (alloc_type) { 5036 case KMEM_ALLOC: 5037 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5038 "==> nxge_free_buf: freeing kmem $%p size %d", 5039 kaddr, buf_size)); 5040 #if defined(__i386) 5041 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 5042 #else 5043 KMEM_FREE((void *)kaddr, buf_size); 5044 #endif 5045 break; 5046 5047 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5048 case CONTIG_MEM_ALLOC: 5049 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5050 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 5051 kaddr, buf_size)); 5052 contig_mem_free((void *)kaddr, buf_size); 5053 break; 5054 #endif 5055 5056 default: 5057 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5058 "<== nxge_free_buf: unsupported alloc type %d", 5059 alloc_type)); 5060 return; 5061 } 5062 5063 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5064 } 5065