1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/nxge/nxge_impl.h> 27 #include <sys/nxge/nxge_rxdma.h> 28 #include <sys/nxge/nxge_hio.h> 29 30 #if !defined(_BIG_ENDIAN) 31 #include <npi_rx_rd32.h> 32 #endif 33 #include <npi_rx_rd64.h> 34 #include <npi_rx_wr64.h> 35 36 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 37 (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 38 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 39 (rdc + nxgep->pt_config.hw_config.start_rdc) 40 41 /* 42 * XXX: This is a tunable to limit the number of packets each interrupt 43 * handles. 0 (default) means that each interrupt takes as much packets 44 * as it finds. 45 */ 46 extern int nxge_max_intr_pkts; 47 48 /* 49 * Globals: tunable parameters (/etc/system or adb) 50 * 51 */ 52 extern uint32_t nxge_rbr_size; 53 extern uint32_t nxge_rcr_size; 54 extern uint32_t nxge_rbr_spare_size; 55 56 extern uint32_t nxge_mblks_pending; 57 58 /* 59 * Tunable to reduce the amount of time spent in the 60 * ISR doing Rx Processing. 61 */ 62 extern uint32_t nxge_max_rx_pkts; 63 boolean_t nxge_jumbo_enable; 64 65 /* 66 * Tunables to manage the receive buffer blocks. 67 * 68 * nxge_rx_threshold_hi: copy all buffers. 69 * nxge_rx_bcopy_size_type: receive buffer block size type. 70 * nxge_rx_threshold_lo: copy only up to tunable block size type. 71 */ 72 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 73 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 74 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 75 76 extern uint32_t nxge_cksum_offload; 77 78 static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 79 static void nxge_unmap_rxdma(p_nxge_t, int); 80 81 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 82 83 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 84 static void nxge_rxdma_hw_stop(p_nxge_t, int); 85 86 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 87 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 88 uint32_t, 89 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 90 p_rx_mbox_t *); 91 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 92 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 93 94 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 95 uint16_t, 96 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 97 p_rx_rcr_ring_t *, p_rx_mbox_t *); 98 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 99 p_rx_rcr_ring_t, p_rx_mbox_t); 100 101 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 102 uint16_t, 103 p_nxge_dma_common_t *, 104 p_rx_rbr_ring_t *, uint32_t); 105 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 106 p_rx_rbr_ring_t); 107 108 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 109 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 110 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 111 112 static mblk_t * 113 nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 114 115 static void nxge_receive_packet(p_nxge_t, 116 p_rx_rcr_ring_t, 117 p_rcr_entry_t, 118 boolean_t *, 119 mblk_t **, mblk_t **); 120 121 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 122 123 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 124 static void nxge_freeb(p_rx_msg_t); 125 static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 126 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 127 128 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 129 uint32_t, uint32_t); 130 131 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 132 p_rx_rbr_ring_t); 133 134 135 static nxge_status_t 136 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 137 138 nxge_status_t 139 nxge_rx_port_fatal_err_recover(p_nxge_t); 140 141 static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 142 143 nxge_status_t 144 nxge_init_rxdma_channels(p_nxge_t nxgep) 145 { 146 nxge_grp_set_t *set = &nxgep->rx_set; 147 int i, count, channel; 148 nxge_grp_t *group; 149 dc_map_t map; 150 int dev_gindex; 151 152 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 153 154 if (!isLDOMguest(nxgep)) { 155 if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 156 cmn_err(CE_NOTE, "hw_start_common"); 157 return (NXGE_ERROR); 158 } 159 } 160 161 /* 162 * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 163 * We only have 8 hardware RDC tables, but we may have 164 * up to 16 logical (software-defined) groups of RDCS, 165 * if we make use of layer 3 & 4 hardware classification. 166 */ 167 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 168 if ((1 << i) & set->lg.map) { 169 group = set->group[i]; 170 dev_gindex = 171 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 172 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 173 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 174 if ((1 << channel) & map) { 175 if ((nxge_grp_dc_add(nxgep, 176 group, VP_BOUND_RX, channel))) 177 goto init_rxdma_channels_exit; 178 } 179 } 180 } 181 if (++count == set->lg.count) 182 break; 183 } 184 185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 186 return (NXGE_OK); 187 188 init_rxdma_channels_exit: 189 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 190 if ((1 << i) & set->lg.map) { 191 group = set->group[i]; 192 dev_gindex = 193 nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 194 map = nxgep->pt_config.rdc_grps[dev_gindex].map; 195 for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 196 if ((1 << channel) & map) { 197 nxge_grp_dc_remove(nxgep, 198 VP_BOUND_RX, channel); 199 } 200 } 201 } 202 if (++count == set->lg.count) 203 break; 204 } 205 206 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 207 return (NXGE_ERROR); 208 } 209 210 nxge_status_t 211 nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 212 { 213 nxge_status_t status; 214 215 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 216 217 status = nxge_map_rxdma(nxge, channel); 218 if (status != NXGE_OK) { 219 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 220 "<== nxge_init_rxdma: status 0x%x", status)); 221 return (status); 222 } 223 224 status = nxge_rxdma_hw_start(nxge, channel); 225 if (status != NXGE_OK) { 226 nxge_unmap_rxdma(nxge, channel); 227 } 228 229 if (!nxge->statsp->rdc_ksp[channel]) 230 nxge_setup_rdc_kstats(nxge, channel); 231 232 NXGE_DEBUG_MSG((nxge, MEM2_CTL, 233 "<== nxge_init_rxdma_channel: status 0x%x", status)); 234 235 return (status); 236 } 237 238 void 239 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 240 { 241 nxge_grp_set_t *set = &nxgep->rx_set; 242 int rdc; 243 244 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 245 246 if (set->owned.map == 0) { 247 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 248 "nxge_uninit_rxdma_channels: no channels")); 249 return; 250 } 251 252 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 253 if ((1 << rdc) & set->owned.map) { 254 nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 255 } 256 } 257 258 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 259 } 260 261 void 262 nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 263 { 264 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 265 266 if (nxgep->statsp->rdc_ksp[channel]) { 267 kstat_delete(nxgep->statsp->rdc_ksp[channel]); 268 nxgep->statsp->rdc_ksp[channel] = 0; 269 } 270 271 nxge_rxdma_hw_stop(nxgep, channel); 272 nxge_unmap_rxdma(nxgep, channel); 273 274 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 275 } 276 277 nxge_status_t 278 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 279 { 280 npi_handle_t handle; 281 npi_status_t rs = NPI_SUCCESS; 282 nxge_status_t status = NXGE_OK; 283 284 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 285 286 handle = NXGE_DEV_NPI_HANDLE(nxgep); 287 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 288 289 if (rs != NPI_SUCCESS) { 290 status = NXGE_ERROR | rs; 291 } 292 293 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 294 295 return (status); 296 } 297 298 void 299 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 300 { 301 nxge_grp_set_t *set = &nxgep->rx_set; 302 int rdc; 303 304 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 305 306 if (!isLDOMguest(nxgep)) { 307 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 308 (void) npi_rxdma_dump_fzc_regs(handle); 309 } 310 311 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 312 NXGE_DEBUG_MSG((nxgep, TX_CTL, 313 "nxge_rxdma_regs_dump_channels: " 314 "NULL ring pointer(s)")); 315 return; 316 } 317 318 if (set->owned.map == 0) { 319 NXGE_DEBUG_MSG((nxgep, RX_CTL, 320 "nxge_rxdma_regs_dump_channels: no channels")); 321 return; 322 } 323 324 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 325 if ((1 << rdc) & set->owned.map) { 326 rx_rbr_ring_t *ring = 327 nxgep->rx_rbr_rings->rbr_rings[rdc]; 328 if (ring) { 329 (void) nxge_dump_rxdma_channel(nxgep, rdc); 330 } 331 } 332 } 333 334 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 335 } 336 337 nxge_status_t 338 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 339 { 340 npi_handle_t handle; 341 npi_status_t rs = NPI_SUCCESS; 342 nxge_status_t status = NXGE_OK; 343 344 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 345 346 handle = NXGE_DEV_NPI_HANDLE(nxgep); 347 rs = npi_rxdma_dump_rdc_regs(handle, channel); 348 349 if (rs != NPI_SUCCESS) { 350 status = NXGE_ERROR | rs; 351 } 352 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 353 return (status); 354 } 355 356 nxge_status_t 357 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 358 p_rx_dma_ent_msk_t mask_p) 359 { 360 npi_handle_t handle; 361 npi_status_t rs = NPI_SUCCESS; 362 nxge_status_t status = NXGE_OK; 363 364 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 365 "<== nxge_init_rxdma_channel_event_mask")); 366 367 handle = NXGE_DEV_NPI_HANDLE(nxgep); 368 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 369 if (rs != NPI_SUCCESS) { 370 status = NXGE_ERROR | rs; 371 } 372 373 return (status); 374 } 375 376 nxge_status_t 377 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 378 p_rx_dma_ctl_stat_t cs_p) 379 { 380 npi_handle_t handle; 381 npi_status_t rs = NPI_SUCCESS; 382 nxge_status_t status = NXGE_OK; 383 384 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 385 "<== nxge_init_rxdma_channel_cntl_stat")); 386 387 handle = NXGE_DEV_NPI_HANDLE(nxgep); 388 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 389 390 if (rs != NPI_SUCCESS) { 391 status = NXGE_ERROR | rs; 392 } 393 394 return (status); 395 } 396 397 /* 398 * nxge_rxdma_cfg_rdcgrp_default_rdc 399 * 400 * Set the default RDC for an RDC Group (Table) 401 * 402 * Arguments: 403 * nxgep 404 * rdcgrp The group to modify 405 * rdc The new default RDC. 406 * 407 * Notes: 408 * 409 * NPI/NXGE function calls: 410 * npi_rxdma_cfg_rdc_table_default_rdc() 411 * 412 * Registers accessed: 413 * RDC_TBL_REG: FZC_ZCP + 0x10000 414 * 415 * Context: 416 * Service domain 417 */ 418 nxge_status_t 419 nxge_rxdma_cfg_rdcgrp_default_rdc( 420 p_nxge_t nxgep, 421 uint8_t rdcgrp, 422 uint8_t rdc) 423 { 424 npi_handle_t handle; 425 npi_status_t rs = NPI_SUCCESS; 426 p_nxge_dma_pt_cfg_t p_dma_cfgp; 427 p_nxge_rdc_grp_t rdc_grp_p; 428 uint8_t actual_rdcgrp, actual_rdc; 429 430 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 431 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 432 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 433 434 handle = NXGE_DEV_NPI_HANDLE(nxgep); 435 436 /* 437 * This has to be rewritten. Do we even allow this anymore? 438 */ 439 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 440 RDC_MAP_IN(rdc_grp_p->map, rdc); 441 rdc_grp_p->def_rdc = rdc; 442 443 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 444 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 445 446 rs = npi_rxdma_cfg_rdc_table_default_rdc( 447 handle, actual_rdcgrp, actual_rdc); 448 449 if (rs != NPI_SUCCESS) { 450 return (NXGE_ERROR | rs); 451 } 452 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 453 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 454 return (NXGE_OK); 455 } 456 457 nxge_status_t 458 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 459 { 460 npi_handle_t handle; 461 462 uint8_t actual_rdc; 463 npi_status_t rs = NPI_SUCCESS; 464 465 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 466 " ==> nxge_rxdma_cfg_port_default_rdc")); 467 468 handle = NXGE_DEV_NPI_HANDLE(nxgep); 469 actual_rdc = rdc; /* XXX Hack! */ 470 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 471 472 473 if (rs != NPI_SUCCESS) { 474 return (NXGE_ERROR | rs); 475 } 476 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 477 " <== nxge_rxdma_cfg_port_default_rdc")); 478 479 return (NXGE_OK); 480 } 481 482 nxge_status_t 483 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 484 uint16_t pkts) 485 { 486 npi_status_t rs = NPI_SUCCESS; 487 npi_handle_t handle; 488 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 489 " ==> nxge_rxdma_cfg_rcr_threshold")); 490 handle = NXGE_DEV_NPI_HANDLE(nxgep); 491 492 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 493 494 if (rs != NPI_SUCCESS) { 495 return (NXGE_ERROR | rs); 496 } 497 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 498 return (NXGE_OK); 499 } 500 501 nxge_status_t 502 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 503 uint16_t tout, uint8_t enable) 504 { 505 npi_status_t rs = NPI_SUCCESS; 506 npi_handle_t handle; 507 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 508 handle = NXGE_DEV_NPI_HANDLE(nxgep); 509 if (enable == 0) { 510 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 511 } else { 512 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 513 tout); 514 } 515 516 if (rs != NPI_SUCCESS) { 517 return (NXGE_ERROR | rs); 518 } 519 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 520 return (NXGE_OK); 521 } 522 523 nxge_status_t 524 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 525 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 526 { 527 npi_handle_t handle; 528 rdc_desc_cfg_t rdc_desc; 529 p_rcrcfig_b_t cfgb_p; 530 npi_status_t rs = NPI_SUCCESS; 531 532 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 533 handle = NXGE_DEV_NPI_HANDLE(nxgep); 534 /* 535 * Use configuration data composed at init time. 536 * Write to hardware the receive ring configurations. 537 */ 538 rdc_desc.mbox_enable = 1; 539 rdc_desc.mbox_addr = mbox_p->mbox_addr; 540 NXGE_DEBUG_MSG((nxgep, RX_CTL, 541 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 542 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 543 544 rdc_desc.rbr_len = rbr_p->rbb_max; 545 rdc_desc.rbr_addr = rbr_p->rbr_addr; 546 547 switch (nxgep->rx_bksize_code) { 548 case RBR_BKSIZE_4K: 549 rdc_desc.page_size = SIZE_4KB; 550 break; 551 case RBR_BKSIZE_8K: 552 rdc_desc.page_size = SIZE_8KB; 553 break; 554 case RBR_BKSIZE_16K: 555 rdc_desc.page_size = SIZE_16KB; 556 break; 557 case RBR_BKSIZE_32K: 558 rdc_desc.page_size = SIZE_32KB; 559 break; 560 } 561 562 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 563 rdc_desc.valid0 = 1; 564 565 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 566 rdc_desc.valid1 = 1; 567 568 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 569 rdc_desc.valid2 = 1; 570 571 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 572 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 573 574 rdc_desc.rcr_len = rcr_p->comp_size; 575 rdc_desc.rcr_addr = rcr_p->rcr_addr; 576 577 cfgb_p = &(rcr_p->rcr_cfgb); 578 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 579 /* For now, disable this timeout in a guest domain. */ 580 if (isLDOMguest(nxgep)) { 581 rdc_desc.rcr_timeout = 0; 582 rdc_desc.rcr_timeout_enable = 0; 583 } else { 584 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 585 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 586 } 587 588 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 589 "rbr_len qlen %d pagesize code %d rcr_len %d", 590 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 591 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 592 "size 0 %d size 1 %d size 2 %d", 593 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 594 rbr_p->npi_pkt_buf_size2)); 595 596 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 597 if (rs != NPI_SUCCESS) { 598 return (NXGE_ERROR | rs); 599 } 600 601 /* 602 * Enable the timeout and threshold. 603 */ 604 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 605 rdc_desc.rcr_threshold); 606 if (rs != NPI_SUCCESS) { 607 return (NXGE_ERROR | rs); 608 } 609 610 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 611 rdc_desc.rcr_timeout); 612 if (rs != NPI_SUCCESS) { 613 return (NXGE_ERROR | rs); 614 } 615 616 /* Enable the DMA */ 617 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 618 if (rs != NPI_SUCCESS) { 619 return (NXGE_ERROR | rs); 620 } 621 622 /* Kick the DMA engine. */ 623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 624 /* Clear the rbr empty bit */ 625 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 626 627 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 628 629 return (NXGE_OK); 630 } 631 632 nxge_status_t 633 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 634 { 635 npi_handle_t handle; 636 npi_status_t rs = NPI_SUCCESS; 637 638 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 639 handle = NXGE_DEV_NPI_HANDLE(nxgep); 640 641 /* disable the DMA */ 642 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 643 if (rs != NPI_SUCCESS) { 644 NXGE_DEBUG_MSG((nxgep, RX_CTL, 645 "<== nxge_disable_rxdma_channel:failed (0x%x)", 646 rs)); 647 return (NXGE_ERROR | rs); 648 } 649 650 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 651 return (NXGE_OK); 652 } 653 654 nxge_status_t 655 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 656 { 657 npi_handle_t handle; 658 nxge_status_t status = NXGE_OK; 659 660 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 661 "<== nxge_init_rxdma_channel_rcrflush")); 662 663 handle = NXGE_DEV_NPI_HANDLE(nxgep); 664 npi_rxdma_rdc_rcr_flush(handle, channel); 665 666 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 667 "<== nxge_init_rxdma_channel_rcrflsh")); 668 return (status); 669 670 } 671 672 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 673 674 #define TO_LEFT -1 675 #define TO_RIGHT 1 676 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 677 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 678 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 679 #define NO_HINT 0xffffffff 680 681 /*ARGSUSED*/ 682 nxge_status_t 683 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 684 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 685 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 686 { 687 int bufsize; 688 uint64_t pktbuf_pp; 689 uint64_t dvma_addr; 690 rxring_info_t *ring_info; 691 int base_side, end_side; 692 int r_index, l_index, anchor_index; 693 int found, search_done; 694 uint32_t offset, chunk_size, block_size, page_size_mask; 695 uint32_t chunk_index, block_index, total_index; 696 int max_iterations, iteration; 697 rxbuf_index_info_t *bufinfo; 698 699 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 700 701 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 702 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 703 pkt_buf_addr_pp, 704 pktbufsz_type)); 705 #if defined(__i386) 706 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 707 #else 708 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 709 #endif 710 711 switch (pktbufsz_type) { 712 case 0: 713 bufsize = rbr_p->pkt_buf_size0; 714 break; 715 case 1: 716 bufsize = rbr_p->pkt_buf_size1; 717 break; 718 case 2: 719 bufsize = rbr_p->pkt_buf_size2; 720 break; 721 case RCR_SINGLE_BLOCK: 722 bufsize = 0; 723 anchor_index = 0; 724 break; 725 default: 726 return (NXGE_ERROR); 727 } 728 729 if (rbr_p->num_blocks == 1) { 730 anchor_index = 0; 731 ring_info = rbr_p->ring_info; 732 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 733 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 734 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 735 "buf_pp $%p btype %d anchor_index %d " 736 "bufinfo $%p", 737 pkt_buf_addr_pp, 738 pktbufsz_type, 739 anchor_index, 740 bufinfo)); 741 742 goto found_index; 743 } 744 745 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 746 "==> nxge_rxbuf_pp_to_vp: " 747 "buf_pp $%p btype %d anchor_index %d", 748 pkt_buf_addr_pp, 749 pktbufsz_type, 750 anchor_index)); 751 752 ring_info = rbr_p->ring_info; 753 found = B_FALSE; 754 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 755 iteration = 0; 756 max_iterations = ring_info->max_iterations; 757 /* 758 * First check if this block has been seen 759 * recently. This is indicated by a hint which 760 * is initialized when the first buffer of the block 761 * is seen. The hint is reset when the last buffer of 762 * the block has been processed. 763 * As three block sizes are supported, three hints 764 * are kept. The idea behind the hints is that once 765 * the hardware uses a block for a buffer of that 766 * size, it will use it exclusively for that size 767 * and will use it until it is exhausted. It is assumed 768 * that there would a single block being used for the same 769 * buffer sizes at any given time. 770 */ 771 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 772 anchor_index = ring_info->hint[pktbufsz_type]; 773 dvma_addr = bufinfo[anchor_index].dvma_addr; 774 chunk_size = bufinfo[anchor_index].buf_size; 775 if ((pktbuf_pp >= dvma_addr) && 776 (pktbuf_pp < (dvma_addr + chunk_size))) { 777 found = B_TRUE; 778 /* 779 * check if this is the last buffer in the block 780 * If so, then reset the hint for the size; 781 */ 782 783 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 784 ring_info->hint[pktbufsz_type] = NO_HINT; 785 } 786 } 787 788 if (found == B_FALSE) { 789 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 790 "==> nxge_rxbuf_pp_to_vp: (!found)" 791 "buf_pp $%p btype %d anchor_index %d", 792 pkt_buf_addr_pp, 793 pktbufsz_type, 794 anchor_index)); 795 796 /* 797 * This is the first buffer of the block of this 798 * size. Need to search the whole information 799 * array. 800 * the search algorithm uses a binary tree search 801 * algorithm. It assumes that the information is 802 * already sorted with increasing order 803 * info[0] < info[1] < info[2] .... < info[n-1] 804 * where n is the size of the information array 805 */ 806 r_index = rbr_p->num_blocks - 1; 807 l_index = 0; 808 search_done = B_FALSE; 809 anchor_index = MID_INDEX(r_index, l_index); 810 while (search_done == B_FALSE) { 811 if ((r_index == l_index) || 812 (iteration >= max_iterations)) 813 search_done = B_TRUE; 814 end_side = TO_RIGHT; /* to the right */ 815 base_side = TO_LEFT; /* to the left */ 816 /* read the DVMA address information and sort it */ 817 dvma_addr = bufinfo[anchor_index].dvma_addr; 818 chunk_size = bufinfo[anchor_index].buf_size; 819 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 820 "==> nxge_rxbuf_pp_to_vp: (searching)" 821 "buf_pp $%p btype %d " 822 "anchor_index %d chunk_size %d dvmaaddr $%p", 823 pkt_buf_addr_pp, 824 pktbufsz_type, 825 anchor_index, 826 chunk_size, 827 dvma_addr)); 828 829 if (pktbuf_pp >= dvma_addr) 830 base_side = TO_RIGHT; /* to the right */ 831 if (pktbuf_pp < (dvma_addr + chunk_size)) 832 end_side = TO_LEFT; /* to the left */ 833 834 switch (base_side + end_side) { 835 case IN_MIDDLE: 836 /* found */ 837 found = B_TRUE; 838 search_done = B_TRUE; 839 if ((pktbuf_pp + bufsize) < 840 (dvma_addr + chunk_size)) 841 ring_info->hint[pktbufsz_type] = 842 bufinfo[anchor_index].buf_index; 843 break; 844 case BOTH_RIGHT: 845 /* not found: go to the right */ 846 l_index = anchor_index + 1; 847 anchor_index = MID_INDEX(r_index, l_index); 848 break; 849 850 case BOTH_LEFT: 851 /* not found: go to the left */ 852 r_index = anchor_index - 1; 853 anchor_index = MID_INDEX(r_index, l_index); 854 break; 855 default: /* should not come here */ 856 return (NXGE_ERROR); 857 } 858 iteration++; 859 } 860 861 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 862 "==> nxge_rxbuf_pp_to_vp: (search done)" 863 "buf_pp $%p btype %d anchor_index %d", 864 pkt_buf_addr_pp, 865 pktbufsz_type, 866 anchor_index)); 867 } 868 869 if (found == B_FALSE) { 870 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 871 "==> nxge_rxbuf_pp_to_vp: (search failed)" 872 "buf_pp $%p btype %d anchor_index %d", 873 pkt_buf_addr_pp, 874 pktbufsz_type, 875 anchor_index)); 876 return (NXGE_ERROR); 877 } 878 879 found_index: 880 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 881 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 882 "buf_pp $%p btype %d bufsize %d anchor_index %d", 883 pkt_buf_addr_pp, 884 pktbufsz_type, 885 bufsize, 886 anchor_index)); 887 888 /* index of the first block in this chunk */ 889 chunk_index = bufinfo[anchor_index].start_index; 890 dvma_addr = bufinfo[anchor_index].dvma_addr; 891 page_size_mask = ring_info->block_size_mask; 892 893 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 894 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 895 "buf_pp $%p btype %d bufsize %d " 896 "anchor_index %d chunk_index %d dvma $%p", 897 pkt_buf_addr_pp, 898 pktbufsz_type, 899 bufsize, 900 anchor_index, 901 chunk_index, 902 dvma_addr)); 903 904 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 905 block_size = rbr_p->block_size; /* System block(page) size */ 906 907 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 908 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 909 "buf_pp $%p btype %d bufsize %d " 910 "anchor_index %d chunk_index %d dvma $%p " 911 "offset %d block_size %d", 912 pkt_buf_addr_pp, 913 pktbufsz_type, 914 bufsize, 915 anchor_index, 916 chunk_index, 917 dvma_addr, 918 offset, 919 block_size)); 920 921 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 922 923 block_index = (offset / block_size); /* index within chunk */ 924 total_index = chunk_index + block_index; 925 926 927 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 928 "==> nxge_rxbuf_pp_to_vp: " 929 "total_index %d dvma_addr $%p " 930 "offset %d block_size %d " 931 "block_index %d ", 932 total_index, dvma_addr, 933 offset, block_size, 934 block_index)); 935 #if defined(__i386) 936 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 937 (uint32_t)offset); 938 #else 939 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 940 (uint64_t)offset); 941 #endif 942 943 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 944 "==> nxge_rxbuf_pp_to_vp: " 945 "total_index %d dvma_addr $%p " 946 "offset %d block_size %d " 947 "block_index %d " 948 "*pkt_buf_addr_p $%p", 949 total_index, dvma_addr, 950 offset, block_size, 951 block_index, 952 *pkt_buf_addr_p)); 953 954 955 *msg_index = total_index; 956 *bufoffset = (offset & page_size_mask); 957 958 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 959 "==> nxge_rxbuf_pp_to_vp: get msg index: " 960 "msg_index %d bufoffset_index %d", 961 *msg_index, 962 *bufoffset)); 963 964 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 965 966 return (NXGE_OK); 967 } 968 969 /* 970 * used by quick sort (qsort) function 971 * to perform comparison 972 */ 973 static int 974 nxge_sort_compare(const void *p1, const void *p2) 975 { 976 977 rxbuf_index_info_t *a, *b; 978 979 a = (rxbuf_index_info_t *)p1; 980 b = (rxbuf_index_info_t *)p2; 981 982 if (a->dvma_addr > b->dvma_addr) 983 return (1); 984 if (a->dvma_addr < b->dvma_addr) 985 return (-1); 986 return (0); 987 } 988 989 990 991 /* 992 * grabbed this sort implementation from common/syscall/avl.c 993 * 994 */ 995 /* 996 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 997 * v = Ptr to array/vector of objs 998 * n = # objs in the array 999 * s = size of each obj (must be multiples of a word size) 1000 * f = ptr to function to compare two objs 1001 * returns (-1 = less than, 0 = equal, 1 = greater than 1002 */ 1003 void 1004 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 1005 { 1006 int g, i, j, ii; 1007 unsigned int *p1, *p2; 1008 unsigned int tmp; 1009 1010 /* No work to do */ 1011 if (v == NULL || n <= 1) 1012 return; 1013 /* Sanity check on arguments */ 1014 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 1015 ASSERT(s > 0); 1016 1017 for (g = n / 2; g > 0; g /= 2) { 1018 for (i = g; i < n; i++) { 1019 for (j = i - g; j >= 0 && 1020 (*f)(v + j * s, v + (j + g) * s) == 1; 1021 j -= g) { 1022 p1 = (unsigned *)(v + j * s); 1023 p2 = (unsigned *)(v + (j + g) * s); 1024 for (ii = 0; ii < s / 4; ii++) { 1025 tmp = *p1; 1026 *p1++ = *p2; 1027 *p2++ = tmp; 1028 } 1029 } 1030 } 1031 } 1032 } 1033 1034 /* 1035 * Initialize data structures required for rxdma 1036 * buffer dvma->vmem address lookup 1037 */ 1038 /*ARGSUSED*/ 1039 static nxge_status_t 1040 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 1041 { 1042 1043 int index; 1044 rxring_info_t *ring_info; 1045 int max_iteration = 0, max_index = 0; 1046 1047 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 1048 1049 ring_info = rbrp->ring_info; 1050 ring_info->hint[0] = NO_HINT; 1051 ring_info->hint[1] = NO_HINT; 1052 ring_info->hint[2] = NO_HINT; 1053 max_index = rbrp->num_blocks; 1054 1055 /* read the DVMA address information and sort it */ 1056 /* do init of the information array */ 1057 1058 1059 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1060 " nxge_rxbuf_index_info_init Sort ptrs")); 1061 1062 /* sort the array */ 1063 nxge_ksort((void *)ring_info->buffer, max_index, 1064 sizeof (rxbuf_index_info_t), nxge_sort_compare); 1065 1066 1067 1068 for (index = 0; index < max_index; index++) { 1069 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1070 " nxge_rxbuf_index_info_init: sorted chunk %d " 1071 " ioaddr $%p kaddr $%p size %x", 1072 index, ring_info->buffer[index].dvma_addr, 1073 ring_info->buffer[index].kaddr, 1074 ring_info->buffer[index].buf_size)); 1075 } 1076 1077 max_iteration = 0; 1078 while (max_index >= (1ULL << max_iteration)) 1079 max_iteration++; 1080 ring_info->max_iterations = max_iteration + 1; 1081 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 1082 " nxge_rxbuf_index_info_init Find max iter %d", 1083 ring_info->max_iterations)); 1084 1085 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 1086 return (NXGE_OK); 1087 } 1088 1089 /* ARGSUSED */ 1090 void 1091 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 1092 { 1093 #ifdef NXGE_DEBUG 1094 1095 uint32_t bptr; 1096 uint64_t pp; 1097 1098 bptr = entry_p->bits.hdw.pkt_buf_addr; 1099 1100 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1101 "\trcr entry $%p " 1102 "\trcr entry 0x%0llx " 1103 "\trcr entry 0x%08x " 1104 "\trcr entry 0x%08x " 1105 "\tvalue 0x%0llx\n" 1106 "\tmulti = %d\n" 1107 "\tpkt_type = 0x%x\n" 1108 "\tzero_copy = %d\n" 1109 "\tnoport = %d\n" 1110 "\tpromis = %d\n" 1111 "\terror = 0x%04x\n" 1112 "\tdcf_err = 0x%01x\n" 1113 "\tl2_len = %d\n" 1114 "\tpktbufsize = %d\n" 1115 "\tpkt_buf_addr = $%p\n" 1116 "\tpkt_buf_addr (<< 6) = $%p\n", 1117 entry_p, 1118 *(int64_t *)entry_p, 1119 *(int32_t *)entry_p, 1120 *(int32_t *)((char *)entry_p + 32), 1121 entry_p->value, 1122 entry_p->bits.hdw.multi, 1123 entry_p->bits.hdw.pkt_type, 1124 entry_p->bits.hdw.zero_copy, 1125 entry_p->bits.hdw.noport, 1126 entry_p->bits.hdw.promis, 1127 entry_p->bits.hdw.error, 1128 entry_p->bits.hdw.dcf_err, 1129 entry_p->bits.hdw.l2_len, 1130 entry_p->bits.hdw.pktbufsz, 1131 bptr, 1132 entry_p->bits.ldw.pkt_buf_addr)); 1133 1134 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1135 RCR_PKT_BUF_ADDR_SHIFT; 1136 1137 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1138 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1139 #endif 1140 } 1141 1142 void 1143 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1144 { 1145 npi_handle_t handle; 1146 rbr_stat_t rbr_stat; 1147 addr44_t hd_addr; 1148 addr44_t tail_addr; 1149 uint16_t qlen; 1150 1151 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1152 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1153 1154 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1155 1156 /* RBR head */ 1157 hd_addr.addr = 0; 1158 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1159 #if defined(__i386) 1160 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1161 (void *)(uint32_t)hd_addr.addr); 1162 #else 1163 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1164 (void *)hd_addr.addr); 1165 #endif 1166 1167 /* RBR stats */ 1168 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1169 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1170 1171 /* RCR tail */ 1172 tail_addr.addr = 0; 1173 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1174 #if defined(__i386) 1175 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1176 (void *)(uint32_t)tail_addr.addr); 1177 #else 1178 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1179 (void *)tail_addr.addr); 1180 #endif 1181 1182 /* RCR qlen */ 1183 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1184 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1185 1186 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1187 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1188 } 1189 1190 nxge_status_t 1191 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1192 { 1193 nxge_grp_set_t *set = &nxgep->rx_set; 1194 nxge_status_t status; 1195 npi_status_t rs; 1196 int rdc; 1197 1198 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1199 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1200 1201 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1202 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1203 "<== nxge_rxdma_mode: not initialized")); 1204 return (NXGE_ERROR); 1205 } 1206 1207 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1208 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1209 "<== nxge_tx_port_fatal_err_recover: " 1210 "NULL ring pointer(s)")); 1211 return (NXGE_ERROR); 1212 } 1213 1214 if (set->owned.map == 0) { 1215 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1216 "nxge_rxdma_regs_dump_channels: no channels")); 1217 return (NULL); 1218 } 1219 1220 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1221 if ((1 << rdc) & set->owned.map) { 1222 rx_rbr_ring_t *ring = 1223 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1224 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1225 if (ring) { 1226 if (enable) { 1227 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1228 "==> nxge_rxdma_hw_mode: " 1229 "channel %d (enable)", rdc)); 1230 rs = npi_rxdma_cfg_rdc_enable 1231 (handle, rdc); 1232 } else { 1233 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1234 "==> nxge_rxdma_hw_mode: " 1235 "channel %d disable)", rdc)); 1236 rs = npi_rxdma_cfg_rdc_disable 1237 (handle, rdc); 1238 } 1239 } 1240 } 1241 } 1242 1243 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1244 1245 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1246 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1247 1248 return (status); 1249 } 1250 1251 void 1252 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1253 { 1254 npi_handle_t handle; 1255 1256 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1257 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1258 1259 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1260 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1261 1262 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1263 } 1264 1265 void 1266 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1267 { 1268 npi_handle_t handle; 1269 1270 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1271 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1272 1273 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1274 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1275 1276 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1277 } 1278 1279 void 1280 nxge_hw_start_rx(p_nxge_t nxgep) 1281 { 1282 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1283 1284 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1285 (void) nxge_rx_mac_enable(nxgep); 1286 1287 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1288 } 1289 1290 /*ARGSUSED*/ 1291 void 1292 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1293 { 1294 nxge_grp_set_t *set = &nxgep->rx_set; 1295 int rdc; 1296 1297 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1298 1299 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1300 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1301 "<== nxge_tx_port_fatal_err_recover: " 1302 "NULL ring pointer(s)")); 1303 return; 1304 } 1305 1306 if (set->owned.map == 0) { 1307 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1308 "nxge_rxdma_regs_dump_channels: no channels")); 1309 return; 1310 } 1311 1312 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1313 if ((1 << rdc) & set->owned.map) { 1314 rx_rbr_ring_t *ring = 1315 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1316 if (ring) { 1317 nxge_rxdma_hw_stop(nxgep, rdc); 1318 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1319 "==> nxge_fixup_rxdma_rings: " 1320 "channel %d ring $%px", 1321 rdc, ring)); 1322 (void) nxge_rxdma_fixup_channel 1323 (nxgep, rdc, rdc); 1324 } 1325 } 1326 } 1327 1328 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1329 } 1330 1331 void 1332 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1333 { 1334 int i; 1335 1336 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1337 i = nxge_rxdma_get_ring_index(nxgep, channel); 1338 if (i < 0) { 1339 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1340 "<== nxge_rxdma_fix_channel: no entry found")); 1341 return; 1342 } 1343 1344 nxge_rxdma_fixup_channel(nxgep, channel, i); 1345 1346 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 1347 } 1348 1349 void 1350 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1351 { 1352 int ndmas; 1353 p_rx_rbr_rings_t rx_rbr_rings; 1354 p_rx_rbr_ring_t *rbr_rings; 1355 p_rx_rcr_rings_t rx_rcr_rings; 1356 p_rx_rcr_ring_t *rcr_rings; 1357 p_rx_mbox_areas_t rx_mbox_areas_p; 1358 p_rx_mbox_t *rx_mbox_p; 1359 p_nxge_dma_pool_t dma_buf_poolp; 1360 p_nxge_dma_pool_t dma_cntl_poolp; 1361 p_rx_rbr_ring_t rbrp; 1362 p_rx_rcr_ring_t rcrp; 1363 p_rx_mbox_t mboxp; 1364 p_nxge_dma_common_t dmap; 1365 nxge_status_t status = NXGE_OK; 1366 1367 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1368 1369 (void) nxge_rxdma_stop_channel(nxgep, channel); 1370 1371 dma_buf_poolp = nxgep->rx_buf_pool_p; 1372 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1373 1374 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1375 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1376 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1377 return; 1378 } 1379 1380 ndmas = dma_buf_poolp->ndmas; 1381 if (!ndmas) { 1382 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1383 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1384 return; 1385 } 1386 1387 rx_rbr_rings = nxgep->rx_rbr_rings; 1388 rx_rcr_rings = nxgep->rx_rcr_rings; 1389 rbr_rings = rx_rbr_rings->rbr_rings; 1390 rcr_rings = rx_rcr_rings->rcr_rings; 1391 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1392 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1393 1394 /* Reinitialize the receive block and completion rings */ 1395 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1396 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1397 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1398 1399 1400 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1401 rbrp->rbr_rd_index = 0; 1402 rcrp->comp_rd_index = 0; 1403 rcrp->comp_wt_index = 0; 1404 1405 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1406 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1407 1408 status = nxge_rxdma_start_channel(nxgep, channel, 1409 rbrp, rcrp, mboxp); 1410 if (status != NXGE_OK) { 1411 goto nxge_rxdma_fixup_channel_fail; 1412 } 1413 if (status != NXGE_OK) { 1414 goto nxge_rxdma_fixup_channel_fail; 1415 } 1416 1417 nxge_rxdma_fixup_channel_fail: 1418 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1419 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1420 1421 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1422 } 1423 1424 /* 1425 * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 1426 * map <channel> to an index into nxgep->rx_rbr_rings. 1427 * (device ring index -> port ring index) 1428 */ 1429 int 1430 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1431 { 1432 int i, ndmas; 1433 uint16_t rdc; 1434 p_rx_rbr_rings_t rx_rbr_rings; 1435 p_rx_rbr_ring_t *rbr_rings; 1436 1437 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1438 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1439 1440 rx_rbr_rings = nxgep->rx_rbr_rings; 1441 if (rx_rbr_rings == NULL) { 1442 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1443 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1444 return (-1); 1445 } 1446 ndmas = rx_rbr_rings->ndmas; 1447 if (!ndmas) { 1448 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1449 "<== nxge_rxdma_get_ring_index: no channel")); 1450 return (-1); 1451 } 1452 1453 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1454 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1455 1456 rbr_rings = rx_rbr_rings->rbr_rings; 1457 for (i = 0; i < ndmas; i++) { 1458 rdc = rbr_rings[i]->rdc; 1459 if (channel == rdc) { 1460 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1461 "==> nxge_rxdma_get_rbr_ring: channel %d " 1462 "(index %d) ring %d", channel, i, rbr_rings[i])); 1463 return (i); 1464 } 1465 } 1466 1467 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1468 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1469 1470 return (-1); 1471 } 1472 1473 p_rx_rbr_ring_t 1474 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1475 { 1476 nxge_grp_set_t *set = &nxgep->rx_set; 1477 nxge_channel_t rdc; 1478 1479 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1480 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1481 1482 if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1483 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1484 "<== nxge_rxdma_get_rbr_ring: " 1485 "NULL ring pointer(s)")); 1486 return (NULL); 1487 } 1488 1489 if (set->owned.map == 0) { 1490 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1491 "<== nxge_rxdma_get_rbr_ring: no channels")); 1492 return (NULL); 1493 } 1494 1495 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1496 if ((1 << rdc) & set->owned.map) { 1497 rx_rbr_ring_t *ring = 1498 nxgep->rx_rbr_rings->rbr_rings[rdc]; 1499 if (ring) { 1500 if (channel == ring->rdc) { 1501 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1502 "==> nxge_rxdma_get_rbr_ring: " 1503 "channel %d ring $%p", rdc, ring)); 1504 return (ring); 1505 } 1506 } 1507 } 1508 } 1509 1510 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1511 "<== nxge_rxdma_get_rbr_ring: not found")); 1512 1513 return (NULL); 1514 } 1515 1516 p_rx_rcr_ring_t 1517 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1518 { 1519 nxge_grp_set_t *set = &nxgep->rx_set; 1520 nxge_channel_t rdc; 1521 1522 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1523 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1524 1525 if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1526 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1527 "<== nxge_rxdma_get_rcr_ring: " 1528 "NULL ring pointer(s)")); 1529 return (NULL); 1530 } 1531 1532 if (set->owned.map == 0) { 1533 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1534 "<== nxge_rxdma_get_rbr_ring: no channels")); 1535 return (NULL); 1536 } 1537 1538 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1539 if ((1 << rdc) & set->owned.map) { 1540 rx_rcr_ring_t *ring = 1541 nxgep->rx_rcr_rings->rcr_rings[rdc]; 1542 if (ring) { 1543 if (channel == ring->rdc) { 1544 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1545 "==> nxge_rxdma_get_rcr_ring: " 1546 "channel %d ring $%p", rdc, ring)); 1547 return (ring); 1548 } 1549 } 1550 } 1551 } 1552 1553 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1554 "<== nxge_rxdma_get_rcr_ring: not found")); 1555 1556 return (NULL); 1557 } 1558 1559 /* 1560 * Static functions start here. 1561 */ 1562 static p_rx_msg_t 1563 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1564 { 1565 p_rx_msg_t nxge_mp = NULL; 1566 p_nxge_dma_common_t dmamsg_p; 1567 uchar_t *buffer; 1568 1569 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1570 if (nxge_mp == NULL) { 1571 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1572 "Allocation of a rx msg failed.")); 1573 goto nxge_allocb_exit; 1574 } 1575 1576 nxge_mp->use_buf_pool = B_FALSE; 1577 if (dmabuf_p) { 1578 nxge_mp->use_buf_pool = B_TRUE; 1579 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1580 *dmamsg_p = *dmabuf_p; 1581 dmamsg_p->nblocks = 1; 1582 dmamsg_p->block_size = size; 1583 dmamsg_p->alength = size; 1584 buffer = (uchar_t *)dmabuf_p->kaddrp; 1585 1586 dmabuf_p->kaddrp = (void *) 1587 ((char *)dmabuf_p->kaddrp + size); 1588 dmabuf_p->ioaddr_pp = (void *) 1589 ((char *)dmabuf_p->ioaddr_pp + size); 1590 dmabuf_p->alength -= size; 1591 dmabuf_p->offset += size; 1592 dmabuf_p->dma_cookie.dmac_laddress += size; 1593 dmabuf_p->dma_cookie.dmac_size -= size; 1594 1595 } else { 1596 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1597 if (buffer == NULL) { 1598 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1599 "Allocation of a receive page failed.")); 1600 goto nxge_allocb_fail1; 1601 } 1602 } 1603 1604 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1605 if (nxge_mp->rx_mblk_p == NULL) { 1606 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1607 goto nxge_allocb_fail2; 1608 } 1609 1610 nxge_mp->buffer = buffer; 1611 nxge_mp->block_size = size; 1612 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1613 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1614 nxge_mp->ref_cnt = 1; 1615 nxge_mp->free = B_TRUE; 1616 nxge_mp->rx_use_bcopy = B_FALSE; 1617 1618 atomic_inc_32(&nxge_mblks_pending); 1619 1620 goto nxge_allocb_exit; 1621 1622 nxge_allocb_fail2: 1623 if (!nxge_mp->use_buf_pool) { 1624 KMEM_FREE(buffer, size); 1625 } 1626 1627 nxge_allocb_fail1: 1628 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1629 nxge_mp = NULL; 1630 1631 nxge_allocb_exit: 1632 return (nxge_mp); 1633 } 1634 1635 p_mblk_t 1636 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1637 { 1638 p_mblk_t mp; 1639 1640 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1641 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1642 "offset = 0x%08X " 1643 "size = 0x%08X", 1644 nxge_mp, offset, size)); 1645 1646 mp = desballoc(&nxge_mp->buffer[offset], size, 1647 0, &nxge_mp->freeb); 1648 if (mp == NULL) { 1649 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1650 goto nxge_dupb_exit; 1651 } 1652 atomic_inc_32(&nxge_mp->ref_cnt); 1653 1654 1655 nxge_dupb_exit: 1656 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1657 nxge_mp)); 1658 return (mp); 1659 } 1660 1661 p_mblk_t 1662 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1663 { 1664 p_mblk_t mp; 1665 uchar_t *dp; 1666 1667 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1668 if (mp == NULL) { 1669 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1670 goto nxge_dupb_bcopy_exit; 1671 } 1672 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1673 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1674 mp->b_wptr = dp + size; 1675 1676 nxge_dupb_bcopy_exit: 1677 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1678 nxge_mp)); 1679 return (mp); 1680 } 1681 1682 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1683 p_rx_msg_t rx_msg_p); 1684 1685 void 1686 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1687 { 1688 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1689 1690 /* Reuse this buffer */ 1691 rx_msg_p->free = B_FALSE; 1692 rx_msg_p->cur_usage_cnt = 0; 1693 rx_msg_p->max_usage_cnt = 0; 1694 rx_msg_p->pkt_buf_size = 0; 1695 1696 if (rx_rbr_p->rbr_use_bcopy) { 1697 rx_msg_p->rx_use_bcopy = B_FALSE; 1698 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1699 } 1700 1701 /* 1702 * Get the rbr header pointer and its offset index. 1703 */ 1704 MUTEX_ENTER(&rx_rbr_p->post_lock); 1705 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1706 rx_rbr_p->rbr_wrap_mask); 1707 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1708 MUTEX_EXIT(&rx_rbr_p->post_lock); 1709 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1710 rx_rbr_p->rdc, 1); 1711 1712 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1713 "<== nxge_post_page (channel %d post_next_index %d)", 1714 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1715 1716 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1717 } 1718 1719 void 1720 nxge_freeb(p_rx_msg_t rx_msg_p) 1721 { 1722 size_t size; 1723 uchar_t *buffer = NULL; 1724 int ref_cnt; 1725 boolean_t free_state = B_FALSE; 1726 1727 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1728 1729 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1730 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1731 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1732 rx_msg_p, nxge_mblks_pending)); 1733 1734 /* 1735 * First we need to get the free state, then 1736 * atomic decrement the reference count to prevent 1737 * the race condition with the interrupt thread that 1738 * is processing a loaned up buffer block. 1739 */ 1740 free_state = rx_msg_p->free; 1741 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1742 if (!ref_cnt) { 1743 atomic_dec_32(&nxge_mblks_pending); 1744 buffer = rx_msg_p->buffer; 1745 size = rx_msg_p->block_size; 1746 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1747 "will free: rx_msg_p = $%p (block pending %d)", 1748 rx_msg_p, nxge_mblks_pending)); 1749 1750 if (!rx_msg_p->use_buf_pool) { 1751 KMEM_FREE(buffer, size); 1752 } 1753 1754 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1755 1756 if (ring) { 1757 /* 1758 * Decrement the receive buffer ring's reference 1759 * count, too. 1760 */ 1761 atomic_dec_32(&ring->rbr_ref_cnt); 1762 1763 /* 1764 * Free the receive buffer ring, if 1765 * 1. all the receive buffers have been freed 1766 * 2. and we are in the proper state (that is, 1767 * we are not UNMAPPING). 1768 */ 1769 if (ring->rbr_ref_cnt == 0 && 1770 ring->rbr_state == RBR_UNMAPPED) { 1771 /* 1772 * Free receive data buffers, 1773 * buffer index information 1774 * (rxring_info) and 1775 * the message block ring. 1776 */ 1777 NXGE_DEBUG_MSG((NULL, RX_CTL, 1778 "nxge_freeb:rx_msg_p = $%p " 1779 "(block pending %d) free buffers", 1780 rx_msg_p, nxge_mblks_pending)); 1781 nxge_rxdma_databuf_free(ring); 1782 if (ring->ring_info) { 1783 KMEM_FREE(ring->ring_info, 1784 sizeof (rxring_info_t)); 1785 } 1786 1787 if (ring->rx_msg_ring) { 1788 KMEM_FREE(ring->rx_msg_ring, 1789 ring->tnblocks * 1790 sizeof (p_rx_msg_t)); 1791 } 1792 KMEM_FREE(ring, sizeof (*ring)); 1793 } 1794 } 1795 return; 1796 } 1797 1798 /* 1799 * Repost buffer. 1800 */ 1801 if (free_state && (ref_cnt == 1) && ring) { 1802 NXGE_DEBUG_MSG((NULL, RX_CTL, 1803 "nxge_freeb: post page $%p:", rx_msg_p)); 1804 if (ring->rbr_state == RBR_POSTING) 1805 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1806 } 1807 1808 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1809 } 1810 1811 uint_t 1812 nxge_rx_intr(void *arg1, void *arg2) 1813 { 1814 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1815 p_nxge_t nxgep = (p_nxge_t)arg2; 1816 p_nxge_ldg_t ldgp; 1817 uint8_t channel; 1818 npi_handle_t handle; 1819 rx_dma_ctl_stat_t cs; 1820 p_rx_rcr_ring_t rcr_ring; 1821 mblk_t *mp; 1822 1823 #ifdef NXGE_DEBUG 1824 rxdma_cfig1_t cfg; 1825 #endif 1826 1827 if (ldvp == NULL) { 1828 NXGE_DEBUG_MSG((NULL, INT_CTL, 1829 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1830 nxgep, ldvp)); 1831 1832 return (DDI_INTR_CLAIMED); 1833 } 1834 1835 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1836 nxgep = ldvp->nxgep; 1837 } 1838 1839 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 1840 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 1841 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1842 "<== nxge_rx_intr: interface not started or intialized")); 1843 return (DDI_INTR_CLAIMED); 1844 } 1845 1846 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1847 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1848 nxgep, ldvp)); 1849 1850 /* 1851 * This interrupt handler is for a specific 1852 * receive dma channel. 1853 */ 1854 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1855 1856 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1857 1858 /* 1859 * The RCR ring lock must be held when packets 1860 * are being processed and the hardware registers are 1861 * being read or written to prevent race condition 1862 * among the interrupt thread, the polling thread 1863 * (will cause fatal errors such as rcrincon bit set) 1864 * and the setting of the poll_flag. 1865 */ 1866 MUTEX_ENTER(&rcr_ring->lock); 1867 1868 /* 1869 * Get the control and status for this channel. 1870 */ 1871 channel = ldvp->channel; 1872 ldgp = ldvp->ldgp; 1873 1874 if (!isLDOMguest(nxgep)) { 1875 if (!nxgep->rx_channel_started[channel]) { 1876 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1877 "<== nxge_rx_intr: channel is not started")); 1878 MUTEX_EXIT(&rcr_ring->lock); 1879 return (DDI_INTR_CLAIMED); 1880 } 1881 } 1882 1883 ASSERT(rcr_ring->ldgp == ldgp); 1884 ASSERT(rcr_ring->ldvp == ldvp); 1885 1886 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1887 1888 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1889 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1890 channel, 1891 cs.value, 1892 cs.bits.hdw.rcrto, 1893 cs.bits.hdw.rcrthres)); 1894 1895 mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 1896 1897 /* error events. */ 1898 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1899 (void) nxge_rx_err_evnts(nxgep, channel, cs); 1900 } 1901 1902 /* 1903 * Enable the mailbox update interrupt if we want 1904 * to use mailbox. We probably don't need to use 1905 * mailbox as it only saves us one pio read. 1906 * Also write 1 to rcrthres and rcrto to clear 1907 * these two edge triggered bits. 1908 */ 1909 cs.value &= RX_DMA_CTL_STAT_WR1C; 1910 cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 1911 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1912 cs.value); 1913 1914 /* 1915 * If the polling mode is enabled, disable the interrupt. 1916 */ 1917 if (rcr_ring->poll_flag) { 1918 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1919 "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1920 "(disabling interrupts)", channel, ldgp, ldvp)); 1921 /* 1922 * Disarm this logical group if this is a single device 1923 * group. 1924 */ 1925 if (ldgp->nldvs == 1) { 1926 ldgimgm_t mgm; 1927 mgm.value = 0; 1928 mgm.bits.ldw.arm = 0; 1929 NXGE_REG_WR64(handle, 1930 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 1931 } 1932 } else { 1933 /* 1934 * Rearm this logical group if this is a single device group. 1935 */ 1936 if (ldgp->nldvs == 1) { 1937 if (isLDOMguest(nxgep)) { 1938 nxge_hio_ldgimgn(nxgep, ldgp); 1939 } else { 1940 ldgimgm_t mgm; 1941 1942 mgm.value = 0; 1943 mgm.bits.ldw.arm = 1; 1944 mgm.bits.ldw.timer = ldgp->ldg_timer; 1945 1946 NXGE_REG_WR64(handle, 1947 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1948 mgm.value); 1949 } 1950 } 1951 1952 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1953 "==> nxge_rx_intr: rdc %d ldgp $%p " 1954 "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1955 } 1956 MUTEX_EXIT(&rcr_ring->lock); 1957 1958 if (mp) { 1959 if (!isLDOMguest(nxgep)) 1960 mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 1961 rcr_ring->rcr_gen_num); 1962 #if defined(sun4v) 1963 else { /* isLDOMguest(nxgep) */ 1964 nxge_hio_data_t *nhd = (nxge_hio_data_t *) 1965 nxgep->nxge_hw_p->hio; 1966 nx_vio_fp_t *vio = &nhd->hio.vio; 1967 1968 if (vio->cb.vio_net_rx_cb) { 1969 (*vio->cb.vio_net_rx_cb) 1970 (nxgep->hio_vr->vhp, mp); 1971 } 1972 } 1973 #endif 1974 } 1975 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1976 return (DDI_INTR_CLAIMED); 1977 } 1978 1979 /* 1980 * Process the packets received in the specified logical device 1981 * and pass up a chain of message blocks to the upper layer. 1982 * The RCR ring lock must be held before calling this function. 1983 */ 1984 static mblk_t * 1985 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 1986 { 1987 p_mblk_t mp; 1988 p_rx_rcr_ring_t rcrp; 1989 1990 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1991 rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 1992 1993 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1994 "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 1995 "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 1996 if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 1997 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1998 "<== nxge_rx_pkts_vring: no mp")); 1999 return (NULL); 2000 } 2001 2002 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 2003 mp)); 2004 2005 #ifdef NXGE_DEBUG 2006 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2007 "==> nxge_rx_pkts_vring:calling mac_rx " 2008 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 2009 "mac_handle $%p", 2010 mp->b_wptr - mp->b_rptr, 2011 mp, mp->b_cont, mp->b_next, 2012 rcrp, rcrp->rcr_mac_handle)); 2013 2014 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2015 "==> nxge_rx_pkts_vring: dump packets " 2016 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 2017 mp, 2018 mp->b_rptr, 2019 mp->b_wptr, 2020 nxge_dump_packet((char *)mp->b_rptr, 2021 mp->b_wptr - mp->b_rptr))); 2022 if (mp->b_cont) { 2023 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2024 "==> nxge_rx_pkts_vring: dump b_cont packets " 2025 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 2026 mp->b_cont, 2027 mp->b_cont->b_rptr, 2028 mp->b_cont->b_wptr, 2029 nxge_dump_packet((char *)mp->b_cont->b_rptr, 2030 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 2031 } 2032 if (mp->b_next) { 2033 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2034 "==> nxge_rx_pkts_vring: dump next packets " 2035 "(b_rptr $%p): %s", 2036 mp->b_next->b_rptr, 2037 nxge_dump_packet((char *)mp->b_next->b_rptr, 2038 mp->b_next->b_wptr - mp->b_next->b_rptr))); 2039 } 2040 #endif 2041 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2042 "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 2043 rcrp->rdc, rcrp->rcr_mac_handle)); 2044 2045 return (mp); 2046 } 2047 2048 2049 /* 2050 * This routine is the main packet receive processing function. 2051 * It gets the packet type, error code, and buffer related 2052 * information from the receive completion entry. 2053 * How many completion entries to process is based on the number of packets 2054 * queued by the hardware, a hardware maintained tail pointer 2055 * and a configurable receive packet count. 2056 * 2057 * A chain of message blocks will be created as result of processing 2058 * the completion entries. This chain of message blocks will be returned and 2059 * a hardware control status register will be updated with the number of 2060 * packets were removed from the hardware queue. 2061 * 2062 * The RCR ring lock is held when entering this function. 2063 */ 2064 static mblk_t * 2065 nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 2066 int bytes_to_pickup) 2067 { 2068 npi_handle_t handle; 2069 uint8_t channel; 2070 uint32_t comp_rd_index; 2071 p_rcr_entry_t rcr_desc_rd_head_p; 2072 p_rcr_entry_t rcr_desc_rd_head_pp; 2073 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 2074 uint16_t qlen, nrcr_read, npkt_read; 2075 uint32_t qlen_hw; 2076 boolean_t multi; 2077 rcrcfig_b_t rcr_cfg_b; 2078 int totallen = 0; 2079 #if defined(_BIG_ENDIAN) 2080 npi_status_t rs = NPI_SUCCESS; 2081 #endif 2082 2083 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 2084 "channel %d", rcr_p->rdc)); 2085 2086 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 2087 return (NULL); 2088 } 2089 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2090 channel = rcr_p->rdc; 2091 2092 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2093 "==> nxge_rx_pkts: START: rcr channel %d " 2094 "head_p $%p head_pp $%p index %d ", 2095 channel, rcr_p->rcr_desc_rd_head_p, 2096 rcr_p->rcr_desc_rd_head_pp, 2097 rcr_p->comp_rd_index)); 2098 2099 2100 #if !defined(_BIG_ENDIAN) 2101 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 2102 #else 2103 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 2104 if (rs != NPI_SUCCESS) { 2105 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 2106 "channel %d, get qlen failed 0x%08x", 2107 channel, rs)); 2108 return (NULL); 2109 } 2110 #endif 2111 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 2112 "qlen %d", channel, qlen)); 2113 2114 2115 2116 if (!qlen) { 2117 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2118 "==> nxge_rx_pkts:rcr channel %d " 2119 "qlen %d (no pkts)", channel, qlen)); 2120 2121 return (NULL); 2122 } 2123 2124 comp_rd_index = rcr_p->comp_rd_index; 2125 2126 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 2127 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 2128 nrcr_read = npkt_read = 0; 2129 2130 /* 2131 * Number of packets queued 2132 * (The jumbo or multi packet will be counted as only one 2133 * packets and it may take up more than one completion entry). 2134 */ 2135 qlen_hw = (qlen < nxge_max_rx_pkts) ? 2136 qlen : nxge_max_rx_pkts; 2137 head_mp = NULL; 2138 tail_mp = &head_mp; 2139 nmp = mp_cont = NULL; 2140 multi = B_FALSE; 2141 2142 while (qlen_hw) { 2143 2144 #ifdef NXGE_DEBUG 2145 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 2146 #endif 2147 /* 2148 * Process one completion ring entry. 2149 */ 2150 nxge_receive_packet(nxgep, 2151 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 2152 2153 /* 2154 * message chaining modes 2155 */ 2156 if (nmp) { 2157 nmp->b_next = NULL; 2158 if (!multi && !mp_cont) { /* frame fits a partition */ 2159 *tail_mp = nmp; 2160 tail_mp = &nmp->b_next; 2161 totallen += MBLKL(nmp); 2162 nmp = NULL; 2163 } else if (multi && !mp_cont) { /* first segment */ 2164 *tail_mp = nmp; 2165 tail_mp = &nmp->b_cont; 2166 totallen += MBLKL(nmp); 2167 } else if (multi && mp_cont) { /* mid of multi segs */ 2168 *tail_mp = mp_cont; 2169 tail_mp = &mp_cont->b_cont; 2170 totallen += MBLKL(mp_cont); 2171 } else if (!multi && mp_cont) { /* last segment */ 2172 *tail_mp = mp_cont; 2173 tail_mp = &nmp->b_next; 2174 totallen += MBLKL(mp_cont); 2175 nmp = NULL; 2176 } 2177 } 2178 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2179 "==> nxge_rx_pkts: loop: rcr channel %d " 2180 "before updating: multi %d " 2181 "nrcr_read %d " 2182 "npk read %d " 2183 "head_pp $%p index %d ", 2184 channel, 2185 multi, 2186 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2187 comp_rd_index)); 2188 2189 if (!multi) { 2190 qlen_hw--; 2191 npkt_read++; 2192 } 2193 2194 /* 2195 * Update the next read entry. 2196 */ 2197 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2198 rcr_p->comp_wrap_mask); 2199 2200 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2201 rcr_p->rcr_desc_first_p, 2202 rcr_p->rcr_desc_last_p); 2203 2204 nrcr_read++; 2205 2206 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2207 "<== nxge_rx_pkts: (SAM, process one packet) " 2208 "nrcr_read %d", 2209 nrcr_read)); 2210 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2211 "==> nxge_rx_pkts: loop: rcr channel %d " 2212 "multi %d " 2213 "nrcr_read %d " 2214 "npk read %d " 2215 "head_pp $%p index %d ", 2216 channel, 2217 multi, 2218 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2219 comp_rd_index)); 2220 2221 if ((bytes_to_pickup != -1) && 2222 (totallen >= bytes_to_pickup)) { 2223 break; 2224 } 2225 2226 /* limit the number of packets for interrupt */ 2227 if (!(rcr_p->poll_flag)) { 2228 if (npkt_read == nxge_max_intr_pkts) { 2229 break; 2230 } 2231 } 2232 } 2233 2234 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2235 rcr_p->comp_rd_index = comp_rd_index; 2236 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2237 2238 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2239 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2240 rcr_p->intr_timeout = nxgep->intr_timeout; 2241 rcr_p->intr_threshold = nxgep->intr_threshold; 2242 rcr_cfg_b.value = 0x0ULL; 2243 if (rcr_p->intr_timeout) 2244 rcr_cfg_b.bits.ldw.entout = 1; 2245 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2246 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2247 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2248 channel, rcr_cfg_b.value); 2249 } 2250 2251 cs.bits.ldw.pktread = npkt_read; 2252 cs.bits.ldw.ptrread = nrcr_read; 2253 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2254 channel, cs.value); 2255 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2256 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2257 "head_pp $%p index %016llx ", 2258 channel, 2259 rcr_p->rcr_desc_rd_head_pp, 2260 rcr_p->comp_rd_index)); 2261 /* 2262 * Update RCR buffer pointer read and number of packets 2263 * read. 2264 */ 2265 2266 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2267 "channel %d", rcr_p->rdc)); 2268 2269 return (head_mp); 2270 } 2271 2272 void 2273 nxge_receive_packet(p_nxge_t nxgep, 2274 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2275 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2276 { 2277 p_mblk_t nmp = NULL; 2278 uint64_t multi; 2279 uint64_t dcf_err; 2280 uint8_t channel; 2281 2282 boolean_t first_entry = B_TRUE; 2283 boolean_t is_tcp_udp = B_FALSE; 2284 boolean_t buffer_free = B_FALSE; 2285 boolean_t error_send_up = B_FALSE; 2286 uint8_t error_type; 2287 uint16_t l2_len; 2288 uint16_t skip_len; 2289 uint8_t pktbufsz_type; 2290 uint64_t rcr_entry; 2291 uint64_t *pkt_buf_addr_pp; 2292 uint64_t *pkt_buf_addr_p; 2293 uint32_t buf_offset; 2294 uint32_t bsize; 2295 uint32_t error_disp_cnt; 2296 uint32_t msg_index; 2297 p_rx_rbr_ring_t rx_rbr_p; 2298 p_rx_msg_t *rx_msg_ring_p; 2299 p_rx_msg_t rx_msg_p; 2300 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2301 nxge_status_t status = NXGE_OK; 2302 boolean_t is_valid = B_FALSE; 2303 p_nxge_rx_ring_stats_t rdc_stats; 2304 uint32_t bytes_read; 2305 uint64_t pkt_type; 2306 uint64_t frag; 2307 boolean_t pkt_too_long_err = B_FALSE; 2308 #ifdef NXGE_DEBUG 2309 int dump_len; 2310 #endif 2311 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2312 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2313 2314 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2315 2316 multi = (rcr_entry & RCR_MULTI_MASK); 2317 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2318 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2319 2320 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2321 frag = (rcr_entry & RCR_FRAG_MASK); 2322 2323 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2324 2325 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2326 RCR_PKTBUFSZ_SHIFT); 2327 #if defined(__i386) 2328 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2329 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2330 #else 2331 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2332 RCR_PKT_BUF_ADDR_SHIFT); 2333 #endif 2334 2335 channel = rcr_p->rdc; 2336 2337 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2338 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2339 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2340 "error_type 0x%x pkt_type 0x%x " 2341 "pktbufsz_type %d ", 2342 rcr_desc_rd_head_p, 2343 rcr_entry, pkt_buf_addr_pp, l2_len, 2344 multi, 2345 error_type, 2346 pkt_type, 2347 pktbufsz_type)); 2348 2349 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2350 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2351 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2352 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2353 rcr_entry, pkt_buf_addr_pp, l2_len, 2354 multi, 2355 error_type, 2356 pkt_type)); 2357 2358 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2359 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2360 "full pkt_buf_addr_pp $%p l2_len %d", 2361 rcr_entry, pkt_buf_addr_pp, l2_len)); 2362 2363 /* get the stats ptr */ 2364 rdc_stats = rcr_p->rdc_stats; 2365 2366 if (!l2_len) { 2367 2368 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2369 "<== nxge_receive_packet: failed: l2 length is 0.")); 2370 return; 2371 } 2372 2373 /* 2374 * Software workaround for BMAC hardware limitation that allows 2375 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2376 * instead of 0x2400 for jumbo. 2377 */ 2378 if (l2_len > nxgep->mac.maxframesize) { 2379 pkt_too_long_err = B_TRUE; 2380 } 2381 2382 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2383 l2_len -= ETHERFCSL; 2384 2385 /* shift 6 bits to get the full io address */ 2386 #if defined(__i386) 2387 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2388 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2389 #else 2390 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2391 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2392 #endif 2393 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2394 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2395 "full pkt_buf_addr_pp $%p l2_len %d", 2396 rcr_entry, pkt_buf_addr_pp, l2_len)); 2397 2398 rx_rbr_p = rcr_p->rx_rbr_p; 2399 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2400 2401 if (first_entry) { 2402 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2403 RXDMA_HDR_SIZE_DEFAULT); 2404 2405 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2406 "==> nxge_receive_packet: first entry 0x%016llx " 2407 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2408 rcr_entry, pkt_buf_addr_pp, l2_len, 2409 hdr_size)); 2410 } 2411 2412 MUTEX_ENTER(&rx_rbr_p->lock); 2413 2414 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2415 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2416 "full pkt_buf_addr_pp $%p l2_len %d", 2417 rcr_entry, pkt_buf_addr_pp, l2_len)); 2418 2419 /* 2420 * Packet buffer address in the completion entry points 2421 * to the starting buffer address (offset 0). 2422 * Use the starting buffer address to locate the corresponding 2423 * kernel address. 2424 */ 2425 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2426 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2427 &buf_offset, 2428 &msg_index); 2429 2430 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2431 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2432 "full pkt_buf_addr_pp $%p l2_len %d", 2433 rcr_entry, pkt_buf_addr_pp, l2_len)); 2434 2435 if (status != NXGE_OK) { 2436 MUTEX_EXIT(&rx_rbr_p->lock); 2437 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2438 "<== nxge_receive_packet: found vaddr failed %d", 2439 status)); 2440 return; 2441 } 2442 2443 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2444 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2445 "full pkt_buf_addr_pp $%p l2_len %d", 2446 rcr_entry, pkt_buf_addr_pp, l2_len)); 2447 2448 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2449 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2450 "full pkt_buf_addr_pp $%p l2_len %d", 2451 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2452 2453 rx_msg_p = rx_msg_ring_p[msg_index]; 2454 2455 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2456 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2457 "full pkt_buf_addr_pp $%p l2_len %d", 2458 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2459 2460 switch (pktbufsz_type) { 2461 case RCR_PKTBUFSZ_0: 2462 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2463 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2464 "==> nxge_receive_packet: 0 buf %d", bsize)); 2465 break; 2466 case RCR_PKTBUFSZ_1: 2467 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2468 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2469 "==> nxge_receive_packet: 1 buf %d", bsize)); 2470 break; 2471 case RCR_PKTBUFSZ_2: 2472 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2473 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2474 "==> nxge_receive_packet: 2 buf %d", bsize)); 2475 break; 2476 case RCR_SINGLE_BLOCK: 2477 bsize = rx_msg_p->block_size; 2478 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2479 "==> nxge_receive_packet: single %d", bsize)); 2480 2481 break; 2482 default: 2483 MUTEX_EXIT(&rx_rbr_p->lock); 2484 return; 2485 } 2486 2487 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2488 (buf_offset + sw_offset_bytes), 2489 (hdr_size + l2_len), 2490 DDI_DMA_SYNC_FORCPU); 2491 2492 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2493 "==> nxge_receive_packet: after first dump:usage count")); 2494 2495 if (rx_msg_p->cur_usage_cnt == 0) { 2496 if (rx_rbr_p->rbr_use_bcopy) { 2497 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2498 if (rx_rbr_p->rbr_consumed < 2499 rx_rbr_p->rbr_threshold_hi) { 2500 if (rx_rbr_p->rbr_threshold_lo == 0 || 2501 ((rx_rbr_p->rbr_consumed >= 2502 rx_rbr_p->rbr_threshold_lo) && 2503 (rx_rbr_p->rbr_bufsize_type >= 2504 pktbufsz_type))) { 2505 rx_msg_p->rx_use_bcopy = B_TRUE; 2506 } 2507 } else { 2508 rx_msg_p->rx_use_bcopy = B_TRUE; 2509 } 2510 } 2511 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2512 "==> nxge_receive_packet: buf %d (new block) ", 2513 bsize)); 2514 2515 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2516 rx_msg_p->pkt_buf_size = bsize; 2517 rx_msg_p->cur_usage_cnt = 1; 2518 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2519 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2520 "==> nxge_receive_packet: buf %d " 2521 "(single block) ", 2522 bsize)); 2523 /* 2524 * Buffer can be reused once the free function 2525 * is called. 2526 */ 2527 rx_msg_p->max_usage_cnt = 1; 2528 buffer_free = B_TRUE; 2529 } else { 2530 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2531 if (rx_msg_p->max_usage_cnt == 1) { 2532 buffer_free = B_TRUE; 2533 } 2534 } 2535 } else { 2536 rx_msg_p->cur_usage_cnt++; 2537 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2538 buffer_free = B_TRUE; 2539 } 2540 } 2541 2542 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2543 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2544 msg_index, l2_len, 2545 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2546 2547 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2548 rdc_stats->ierrors++; 2549 if (dcf_err) { 2550 rdc_stats->dcf_err++; 2551 #ifdef NXGE_DEBUG 2552 if (!rdc_stats->dcf_err) { 2553 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2554 "nxge_receive_packet: channel %d dcf_err rcr" 2555 " 0x%llx", channel, rcr_entry)); 2556 } 2557 #endif 2558 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2559 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2560 } else if (pkt_too_long_err) { 2561 rdc_stats->pkt_too_long_err++; 2562 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2563 " channel %d packet length [%d] > " 2564 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2565 nxgep->mac.maxframesize)); 2566 } else { 2567 /* Update error stats */ 2568 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2569 rdc_stats->errlog.compl_err_type = error_type; 2570 2571 switch (error_type) { 2572 /* 2573 * Do not send FMA ereport for RCR_L2_ERROR and 2574 * RCR_L4_CSUM_ERROR because most likely they indicate 2575 * back pressure rather than HW failures. 2576 */ 2577 case RCR_L2_ERROR: 2578 rdc_stats->l2_err++; 2579 if (rdc_stats->l2_err < 2580 error_disp_cnt) { 2581 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2582 " nxge_receive_packet:" 2583 " channel %d RCR L2_ERROR", 2584 channel)); 2585 } 2586 break; 2587 case RCR_L4_CSUM_ERROR: 2588 error_send_up = B_TRUE; 2589 rdc_stats->l4_cksum_err++; 2590 if (rdc_stats->l4_cksum_err < 2591 error_disp_cnt) { 2592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2593 " nxge_receive_packet:" 2594 " channel %d" 2595 " RCR L4_CSUM_ERROR", channel)); 2596 } 2597 break; 2598 /* 2599 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2600 * RCR_ZCP_SOFT_ERROR because they reflect the same 2601 * FFLP and ZCP errors that have been reported by 2602 * nxge_fflp.c and nxge_zcp.c. 2603 */ 2604 case RCR_FFLP_SOFT_ERROR: 2605 error_send_up = B_TRUE; 2606 rdc_stats->fflp_soft_err++; 2607 if (rdc_stats->fflp_soft_err < 2608 error_disp_cnt) { 2609 NXGE_ERROR_MSG((nxgep, 2610 NXGE_ERR_CTL, 2611 " nxge_receive_packet:" 2612 " channel %d" 2613 " RCR FFLP_SOFT_ERROR", channel)); 2614 } 2615 break; 2616 case RCR_ZCP_SOFT_ERROR: 2617 error_send_up = B_TRUE; 2618 rdc_stats->fflp_soft_err++; 2619 if (rdc_stats->zcp_soft_err < 2620 error_disp_cnt) 2621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2622 " nxge_receive_packet: Channel %d" 2623 " RCR ZCP_SOFT_ERROR", channel)); 2624 break; 2625 default: 2626 rdc_stats->rcr_unknown_err++; 2627 if (rdc_stats->rcr_unknown_err 2628 < error_disp_cnt) { 2629 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2630 " nxge_receive_packet: Channel %d" 2631 " RCR entry 0x%llx error 0x%x", 2632 rcr_entry, channel, error_type)); 2633 } 2634 break; 2635 } 2636 } 2637 2638 /* 2639 * Update and repost buffer block if max usage 2640 * count is reached. 2641 */ 2642 if (error_send_up == B_FALSE) { 2643 atomic_inc_32(&rx_msg_p->ref_cnt); 2644 if (buffer_free == B_TRUE) { 2645 rx_msg_p->free = B_TRUE; 2646 } 2647 2648 MUTEX_EXIT(&rx_rbr_p->lock); 2649 nxge_freeb(rx_msg_p); 2650 return; 2651 } 2652 } 2653 2654 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2655 "==> nxge_receive_packet: DMA sync second ")); 2656 2657 bytes_read = rcr_p->rcvd_pkt_bytes; 2658 skip_len = sw_offset_bytes + hdr_size; 2659 if (!rx_msg_p->rx_use_bcopy) { 2660 /* 2661 * For loaned up buffers, the driver reference count 2662 * will be incremented first and then the free state. 2663 */ 2664 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2665 if (first_entry) { 2666 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2667 if (l2_len < bsize - skip_len) { 2668 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2669 } else { 2670 nmp->b_wptr = &nmp->b_rptr[bsize 2671 - skip_len]; 2672 } 2673 } else { 2674 if (l2_len - bytes_read < bsize) { 2675 nmp->b_wptr = 2676 &nmp->b_rptr[l2_len - bytes_read]; 2677 } else { 2678 nmp->b_wptr = &nmp->b_rptr[bsize]; 2679 } 2680 } 2681 } 2682 } else { 2683 if (first_entry) { 2684 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2685 l2_len < bsize - skip_len ? 2686 l2_len : bsize - skip_len); 2687 } else { 2688 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2689 l2_len - bytes_read < bsize ? 2690 l2_len - bytes_read : bsize); 2691 } 2692 } 2693 if (nmp != NULL) { 2694 if (first_entry) { 2695 /* 2696 * Jumbo packets may be received with more than one 2697 * buffer, increment ipackets for the first entry only. 2698 */ 2699 rdc_stats->ipackets++; 2700 2701 /* Update ibytes for kstat. */ 2702 rdc_stats->ibytes += skip_len 2703 + l2_len < bsize ? l2_len : bsize; 2704 /* 2705 * Update the number of bytes read so far for the 2706 * current frame. 2707 */ 2708 bytes_read = nmp->b_wptr - nmp->b_rptr; 2709 } else { 2710 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2711 l2_len - bytes_read : bsize; 2712 bytes_read += nmp->b_wptr - nmp->b_rptr; 2713 } 2714 2715 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2716 "==> nxge_receive_packet after dupb: " 2717 "rbr consumed %d " 2718 "pktbufsz_type %d " 2719 "nmp $%p rptr $%p wptr $%p " 2720 "buf_offset %d bzise %d l2_len %d skip_len %d", 2721 rx_rbr_p->rbr_consumed, 2722 pktbufsz_type, 2723 nmp, nmp->b_rptr, nmp->b_wptr, 2724 buf_offset, bsize, l2_len, skip_len)); 2725 } else { 2726 cmn_err(CE_WARN, "!nxge_receive_packet: " 2727 "update stats (error)"); 2728 atomic_inc_32(&rx_msg_p->ref_cnt); 2729 if (buffer_free == B_TRUE) { 2730 rx_msg_p->free = B_TRUE; 2731 } 2732 MUTEX_EXIT(&rx_rbr_p->lock); 2733 nxge_freeb(rx_msg_p); 2734 return; 2735 } 2736 2737 if (buffer_free == B_TRUE) { 2738 rx_msg_p->free = B_TRUE; 2739 } 2740 2741 is_valid = (nmp != NULL); 2742 2743 rcr_p->rcvd_pkt_bytes = bytes_read; 2744 2745 MUTEX_EXIT(&rx_rbr_p->lock); 2746 2747 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2748 atomic_inc_32(&rx_msg_p->ref_cnt); 2749 nxge_freeb(rx_msg_p); 2750 } 2751 2752 if (is_valid) { 2753 nmp->b_cont = NULL; 2754 if (first_entry) { 2755 *mp = nmp; 2756 *mp_cont = NULL; 2757 } else { 2758 *mp_cont = nmp; 2759 } 2760 } 2761 2762 /* 2763 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2764 * If a packet is not fragmented and no error bit is set, then 2765 * L4 checksum is OK. 2766 */ 2767 2768 if (is_valid && !multi) { 2769 /* 2770 * If the checksum flag nxge_chksum_offload 2771 * is 1, TCP and UDP packets can be sent 2772 * up with good checksum. If the checksum flag 2773 * is set to 0, checksum reporting will apply to 2774 * TCP packets only (workaround for a hardware bug). 2775 * If the checksum flag nxge_cksum_offload is 2776 * greater than 1, both TCP and UDP packets 2777 * will not be reported its hardware checksum results. 2778 */ 2779 if (nxge_cksum_offload == 1) { 2780 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2781 pkt_type == RCR_PKT_IS_UDP) ? 2782 B_TRUE: B_FALSE); 2783 } else if (!nxge_cksum_offload) { 2784 /* TCP checksum only. */ 2785 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2786 B_TRUE: B_FALSE); 2787 } 2788 2789 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2790 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2791 is_valid, multi, is_tcp_udp, frag, error_type)); 2792 2793 if (is_tcp_udp && !frag && !error_type) { 2794 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2795 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2796 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2797 "==> nxge_receive_packet: Full tcp/udp cksum " 2798 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2799 "error %d", 2800 is_valid, multi, is_tcp_udp, frag, error_type)); 2801 } 2802 } 2803 2804 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2805 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2806 2807 *multi_p = (multi == RCR_MULTI_MASK); 2808 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2809 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2810 *multi_p, nmp, *mp, *mp_cont)); 2811 } 2812 2813 /* 2814 * Enable polling for a ring. Interrupt for the ring is disabled when 2815 * the nxge interrupt comes (see nxge_rx_intr). 2816 */ 2817 int 2818 nxge_enable_poll(void *arg) 2819 { 2820 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2821 p_rx_rcr_ring_t ringp; 2822 p_nxge_t nxgep; 2823 p_nxge_ldg_t ldgp; 2824 uint32_t channel; 2825 2826 if (ring_handle == NULL) { 2827 return (0); 2828 } 2829 2830 nxgep = ring_handle->nxgep; 2831 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2832 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2833 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2834 "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2835 ldgp = ringp->ldgp; 2836 if (ldgp == NULL) { 2837 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2838 "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2839 ringp->rdc)); 2840 return (0); 2841 } 2842 2843 MUTEX_ENTER(&ringp->lock); 2844 /* enable polling */ 2845 if (ringp->poll_flag == 0) { 2846 ringp->poll_flag = 1; 2847 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2848 "==> nxge_enable_poll: rdc %d set poll flag to 1", 2849 ringp->rdc)); 2850 } 2851 2852 MUTEX_EXIT(&ringp->lock); 2853 return (0); 2854 } 2855 /* 2856 * Disable polling for a ring and enable its interrupt. 2857 */ 2858 int 2859 nxge_disable_poll(void *arg) 2860 { 2861 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2862 p_rx_rcr_ring_t ringp; 2863 p_nxge_t nxgep; 2864 uint32_t channel; 2865 2866 if (ring_handle == NULL) { 2867 return (0); 2868 } 2869 2870 nxgep = ring_handle->nxgep; 2871 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2872 ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2873 2874 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2875 "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2876 2877 MUTEX_ENTER(&ringp->lock); 2878 2879 /* disable polling: enable interrupt */ 2880 if (ringp->poll_flag) { 2881 npi_handle_t handle; 2882 rx_dma_ctl_stat_t cs; 2883 uint8_t channel; 2884 p_nxge_ldg_t ldgp; 2885 2886 /* 2887 * Get the control and status for this channel. 2888 */ 2889 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2890 channel = ringp->rdc; 2891 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2892 channel, &cs.value); 2893 2894 /* 2895 * Enable mailbox update 2896 * Since packets were not read and the hardware uses 2897 * bits pktread and ptrread to update the queue 2898 * length, we need to set both bits to 0. 2899 */ 2900 cs.bits.ldw.pktread = 0; 2901 cs.bits.ldw.ptrread = 0; 2902 cs.bits.hdw.mex = 1; 2903 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2904 cs.value); 2905 2906 /* 2907 * Rearm this logical group if this is a single device 2908 * group. 2909 */ 2910 ldgp = ringp->ldgp; 2911 if (ldgp == NULL) { 2912 ringp->poll_flag = 0; 2913 MUTEX_EXIT(&ringp->lock); 2914 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2915 "==> nxge_disable_poll: no ldgp rdc %d " 2916 "(still set poll to 0", ringp->rdc)); 2917 return (0); 2918 } 2919 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2920 "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2921 ringp->rdc, ldgp)); 2922 if (ldgp->nldvs == 1) { 2923 ldgimgm_t mgm; 2924 mgm.value = 0; 2925 mgm.bits.ldw.arm = 1; 2926 mgm.bits.ldw.timer = ldgp->ldg_timer; 2927 NXGE_REG_WR64(handle, 2928 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 2929 } 2930 ringp->poll_flag = 0; 2931 } 2932 2933 MUTEX_EXIT(&ringp->lock); 2934 return (0); 2935 } 2936 2937 /* 2938 * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2939 */ 2940 mblk_t * 2941 nxge_rx_poll(void *arg, int bytes_to_pickup) 2942 { 2943 p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2944 p_rx_rcr_ring_t rcr_p; 2945 p_nxge_t nxgep; 2946 npi_handle_t handle; 2947 rx_dma_ctl_stat_t cs; 2948 mblk_t *mblk; 2949 p_nxge_ldv_t ldvp; 2950 uint32_t channel; 2951 2952 nxgep = ring_handle->nxgep; 2953 2954 /* 2955 * Get the control and status for this channel. 2956 */ 2957 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2958 channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2959 rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2960 MUTEX_ENTER(&rcr_p->lock); 2961 ASSERT(rcr_p->poll_flag == 1); 2962 2963 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2964 2965 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2966 "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2967 rcr_p->rdc, rcr_p->poll_flag)); 2968 mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2969 2970 ldvp = rcr_p->ldvp; 2971 /* error events. */ 2972 if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2973 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2974 } 2975 2976 MUTEX_EXIT(&rcr_p->lock); 2977 2978 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2979 "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 2980 return (mblk); 2981 } 2982 2983 2984 /*ARGSUSED*/ 2985 static nxge_status_t 2986 nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 2987 { 2988 p_nxge_rx_ring_stats_t rdc_stats; 2989 npi_handle_t handle; 2990 npi_status_t rs; 2991 boolean_t rxchan_fatal = B_FALSE; 2992 boolean_t rxport_fatal = B_FALSE; 2993 uint8_t portn; 2994 nxge_status_t status = NXGE_OK; 2995 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2996 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2997 2998 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2999 portn = nxgep->mac.portnum; 3000 rdc_stats = &nxgep->statsp->rdc_stats[channel]; 3001 3002 if (cs.bits.hdw.rbr_tmout) { 3003 rdc_stats->rx_rbr_tmout++; 3004 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3005 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 3006 rxchan_fatal = B_TRUE; 3007 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3008 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 3009 } 3010 if (cs.bits.hdw.rsp_cnt_err) { 3011 rdc_stats->rsp_cnt_err++; 3012 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3013 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 3014 rxchan_fatal = B_TRUE; 3015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3016 "==> nxge_rx_err_evnts(channel %d): " 3017 "rsp_cnt_err", channel)); 3018 } 3019 if (cs.bits.hdw.byte_en_bus) { 3020 rdc_stats->byte_en_bus++; 3021 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3022 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 3023 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3024 "==> nxge_rx_err_evnts(channel %d): " 3025 "fatal error: byte_en_bus", channel)); 3026 rxchan_fatal = B_TRUE; 3027 } 3028 if (cs.bits.hdw.rsp_dat_err) { 3029 rdc_stats->rsp_dat_err++; 3030 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3031 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 3032 rxchan_fatal = B_TRUE; 3033 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3034 "==> nxge_rx_err_evnts(channel %d): " 3035 "fatal error: rsp_dat_err", channel)); 3036 } 3037 if (cs.bits.hdw.rcr_ack_err) { 3038 rdc_stats->rcr_ack_err++; 3039 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3040 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 3041 rxchan_fatal = B_TRUE; 3042 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3043 "==> nxge_rx_err_evnts(channel %d): " 3044 "fatal error: rcr_ack_err", channel)); 3045 } 3046 if (cs.bits.hdw.dc_fifo_err) { 3047 rdc_stats->dc_fifo_err++; 3048 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3049 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 3050 /* This is not a fatal error! */ 3051 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3052 "==> nxge_rx_err_evnts(channel %d): " 3053 "dc_fifo_err", channel)); 3054 rxport_fatal = B_TRUE; 3055 } 3056 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 3057 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 3058 &rdc_stats->errlog.pre_par, 3059 &rdc_stats->errlog.sha_par)) 3060 != NPI_SUCCESS) { 3061 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3062 "==> nxge_rx_err_evnts(channel %d): " 3063 "rcr_sha_par: get perr", channel)); 3064 return (NXGE_ERROR | rs); 3065 } 3066 if (cs.bits.hdw.rcr_sha_par) { 3067 rdc_stats->rcr_sha_par++; 3068 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3069 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 3070 rxchan_fatal = B_TRUE; 3071 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3072 "==> nxge_rx_err_evnts(channel %d): " 3073 "fatal error: rcr_sha_par", channel)); 3074 } 3075 if (cs.bits.hdw.rbr_pre_par) { 3076 rdc_stats->rbr_pre_par++; 3077 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3078 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 3079 rxchan_fatal = B_TRUE; 3080 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3081 "==> nxge_rx_err_evnts(channel %d): " 3082 "fatal error: rbr_pre_par", channel)); 3083 } 3084 } 3085 /* 3086 * The Following 4 status bits are for information, the system 3087 * is running fine. There is no need to send FMA ereports or 3088 * log messages. 3089 */ 3090 if (cs.bits.hdw.port_drop_pkt) { 3091 rdc_stats->port_drop_pkt++; 3092 } 3093 if (cs.bits.hdw.wred_drop) { 3094 rdc_stats->wred_drop++; 3095 } 3096 if (cs.bits.hdw.rbr_pre_empty) { 3097 rdc_stats->rbr_pre_empty++; 3098 } 3099 if (cs.bits.hdw.rcr_shadow_full) { 3100 rdc_stats->rcr_shadow_full++; 3101 } 3102 if (cs.bits.hdw.config_err) { 3103 rdc_stats->config_err++; 3104 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3105 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 3106 rxchan_fatal = B_TRUE; 3107 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3108 "==> nxge_rx_err_evnts(channel %d): " 3109 "config error", channel)); 3110 } 3111 if (cs.bits.hdw.rcrincon) { 3112 rdc_stats->rcrincon++; 3113 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3114 NXGE_FM_EREPORT_RDMC_RCRINCON); 3115 rxchan_fatal = B_TRUE; 3116 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3117 "==> nxge_rx_err_evnts(channel %d): " 3118 "fatal error: rcrincon error", channel)); 3119 } 3120 if (cs.bits.hdw.rcrfull) { 3121 rdc_stats->rcrfull++; 3122 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3123 NXGE_FM_EREPORT_RDMC_RCRFULL); 3124 rxchan_fatal = B_TRUE; 3125 if (rdc_stats->rcrfull < error_disp_cnt) 3126 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3127 "==> nxge_rx_err_evnts(channel %d): " 3128 "fatal error: rcrfull error", channel)); 3129 } 3130 if (cs.bits.hdw.rbr_empty) { 3131 /* 3132 * This bit is for information, there is no need 3133 * send FMA ereport or log a message. 3134 */ 3135 rdc_stats->rbr_empty++; 3136 } 3137 if (cs.bits.hdw.rbrfull) { 3138 rdc_stats->rbrfull++; 3139 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3140 NXGE_FM_EREPORT_RDMC_RBRFULL); 3141 rxchan_fatal = B_TRUE; 3142 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3143 "==> nxge_rx_err_evnts(channel %d): " 3144 "fatal error: rbr_full error", channel)); 3145 } 3146 if (cs.bits.hdw.rbrlogpage) { 3147 rdc_stats->rbrlogpage++; 3148 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3149 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 3150 rxchan_fatal = B_TRUE; 3151 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3152 "==> nxge_rx_err_evnts(channel %d): " 3153 "fatal error: rbr logical page error", channel)); 3154 } 3155 if (cs.bits.hdw.cfiglogpage) { 3156 rdc_stats->cfiglogpage++; 3157 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 3158 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 3159 rxchan_fatal = B_TRUE; 3160 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3161 "==> nxge_rx_err_evnts(channel %d): " 3162 "fatal error: cfig logical page error", channel)); 3163 } 3164 3165 if (rxport_fatal) { 3166 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3167 " nxge_rx_err_evnts: fatal error on Port #%d\n", 3168 portn)); 3169 if (isLDOMguest(nxgep)) { 3170 status = NXGE_ERROR; 3171 } else { 3172 status = nxge_ipp_fatal_err_recover(nxgep); 3173 if (status == NXGE_OK) { 3174 FM_SERVICE_RESTORED(nxgep); 3175 } 3176 } 3177 } 3178 3179 if (rxchan_fatal) { 3180 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3181 " nxge_rx_err_evnts: fatal error on Channel #%d\n", 3182 channel)); 3183 if (isLDOMguest(nxgep)) { 3184 status = NXGE_ERROR; 3185 } else { 3186 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 3187 if (status == NXGE_OK) { 3188 FM_SERVICE_RESTORED(nxgep); 3189 } 3190 } 3191 } 3192 3193 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 3194 3195 return (status); 3196 } 3197 3198 /* 3199 * nxge_rdc_hvio_setup 3200 * 3201 * This code appears to setup some Hypervisor variables. 3202 * 3203 * Arguments: 3204 * nxgep 3205 * channel 3206 * 3207 * Notes: 3208 * What does NIU_LP_WORKAROUND mean? 3209 * 3210 * NPI/NXGE function calls: 3211 * na 3212 * 3213 * Context: 3214 * Any domain 3215 */ 3216 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3217 static void 3218 nxge_rdc_hvio_setup( 3219 nxge_t *nxgep, int channel) 3220 { 3221 nxge_dma_common_t *dma_common; 3222 nxge_dma_common_t *dma_control; 3223 rx_rbr_ring_t *ring; 3224 3225 ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3226 dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3227 3228 ring->hv_set = B_FALSE; 3229 3230 ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3231 dma_common->orig_ioaddr_pp; 3232 ring->hv_rx_buf_ioaddr_size = (uint64_t) 3233 dma_common->orig_alength; 3234 3235 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3236 "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3237 channel, ring->hv_rx_buf_base_ioaddr_pp, 3238 dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3239 dma_common->orig_alength, dma_common->orig_alength)); 3240 3241 dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3242 3243 ring->hv_rx_cntl_base_ioaddr_pp = 3244 (uint64_t)dma_control->orig_ioaddr_pp; 3245 ring->hv_rx_cntl_ioaddr_size = 3246 (uint64_t)dma_control->orig_alength; 3247 3248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3249 "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3250 channel, ring->hv_rx_cntl_base_ioaddr_pp, 3251 dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3252 dma_control->orig_alength, dma_control->orig_alength)); 3253 } 3254 #endif 3255 3256 /* 3257 * nxge_map_rxdma 3258 * 3259 * Map an RDC into our kernel space. 3260 * 3261 * Arguments: 3262 * nxgep 3263 * channel The channel to map. 3264 * 3265 * Notes: 3266 * 1. Allocate & initialise a memory pool, if necessary. 3267 * 2. Allocate however many receive buffers are required. 3268 * 3. Setup buffers, descriptors, and mailbox. 3269 * 3270 * NPI/NXGE function calls: 3271 * nxge_alloc_rx_mem_pool() 3272 * nxge_alloc_rbb() 3273 * nxge_map_rxdma_channel() 3274 * 3275 * Registers accessed: 3276 * 3277 * Context: 3278 * Any domain 3279 */ 3280 static nxge_status_t 3281 nxge_map_rxdma(p_nxge_t nxgep, int channel) 3282 { 3283 nxge_dma_common_t **data; 3284 nxge_dma_common_t **control; 3285 rx_rbr_ring_t **rbr_ring; 3286 rx_rcr_ring_t **rcr_ring; 3287 rx_mbox_t **mailbox; 3288 uint32_t chunks; 3289 3290 nxge_status_t status; 3291 3292 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 3293 3294 if (!nxgep->rx_buf_pool_p) { 3295 if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 3296 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3297 "<== nxge_map_rxdma: buf not allocated")); 3298 return (NXGE_ERROR); 3299 } 3300 } 3301 3302 if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3303 return (NXGE_ERROR); 3304 3305 /* 3306 * Timeout should be set based on the system clock divider. 3307 * The following timeout value of 1 assumes that the 3308 * granularity (1000) is 3 microseconds running at 300MHz. 3309 */ 3310 3311 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 3312 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 3313 3314 /* 3315 * Map descriptors from the buffer polls for each dma channel. 3316 */ 3317 3318 /* 3319 * Set up and prepare buffer blocks, descriptors 3320 * and mailbox. 3321 */ 3322 data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3323 rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3324 chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3325 3326 control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3327 rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3328 3329 mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3330 3331 status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3332 chunks, control, rcr_ring, mailbox); 3333 if (status != NXGE_OK) { 3334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3335 "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3336 "returned 0x%x", 3337 channel, status)); 3338 return (status); 3339 } 3340 nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3341 nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3342 nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3343 &nxgep->statsp->rdc_stats[channel]; 3344 3345 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3346 if (!isLDOMguest(nxgep)) 3347 nxge_rdc_hvio_setup(nxgep, channel); 3348 #endif 3349 3350 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3351 "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 3352 3353 return (status); 3354 } 3355 3356 static void 3357 nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 3358 { 3359 rx_rbr_ring_t *rbr_ring; 3360 rx_rcr_ring_t *rcr_ring; 3361 rx_mbox_t *mailbox; 3362 3363 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 3364 3365 if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3366 !nxgep->rx_mbox_areas_p) 3367 return; 3368 3369 rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3370 rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3371 mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3372 3373 if (!rbr_ring || !rcr_ring || !mailbox) 3374 return; 3375 3376 (void) nxge_unmap_rxdma_channel( 3377 nxgep, channel, rbr_ring, rcr_ring, mailbox); 3378 3379 nxge_free_rxb(nxgep, channel); 3380 3381 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 3382 } 3383 3384 nxge_status_t 3385 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3386 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3387 uint32_t num_chunks, 3388 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3389 p_rx_mbox_t *rx_mbox_p) 3390 { 3391 int status = NXGE_OK; 3392 3393 /* 3394 * Set up and prepare buffer blocks, descriptors 3395 * and mailbox. 3396 */ 3397 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3398 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3399 /* 3400 * Receive buffer blocks 3401 */ 3402 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3403 dma_buf_p, rbr_p, num_chunks); 3404 if (status != NXGE_OK) { 3405 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3406 "==> nxge_map_rxdma_channel (channel %d): " 3407 "map buffer failed 0x%x", channel, status)); 3408 goto nxge_map_rxdma_channel_exit; 3409 } 3410 3411 /* 3412 * Receive block ring, completion ring and mailbox. 3413 */ 3414 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3415 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3416 if (status != NXGE_OK) { 3417 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3418 "==> nxge_map_rxdma_channel (channel %d): " 3419 "map config failed 0x%x", channel, status)); 3420 goto nxge_map_rxdma_channel_fail2; 3421 } 3422 3423 goto nxge_map_rxdma_channel_exit; 3424 3425 nxge_map_rxdma_channel_fail3: 3426 /* Free rbr, rcr */ 3427 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3428 "==> nxge_map_rxdma_channel: free rbr/rcr " 3429 "(status 0x%x channel %d)", 3430 status, channel)); 3431 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3432 *rcr_p, *rx_mbox_p); 3433 3434 nxge_map_rxdma_channel_fail2: 3435 /* Free buffer blocks */ 3436 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3437 "==> nxge_map_rxdma_channel: free rx buffers" 3438 "(nxgep 0x%x status 0x%x channel %d)", 3439 nxgep, status, channel)); 3440 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3441 3442 status = NXGE_ERROR; 3443 3444 nxge_map_rxdma_channel_exit: 3445 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3446 "<== nxge_map_rxdma_channel: " 3447 "(nxgep 0x%x status 0x%x channel %d)", 3448 nxgep, status, channel)); 3449 3450 return (status); 3451 } 3452 3453 /*ARGSUSED*/ 3454 static void 3455 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3456 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3457 { 3458 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3459 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3460 3461 /* 3462 * unmap receive block ring, completion ring and mailbox. 3463 */ 3464 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3465 rcr_p, rx_mbox_p); 3466 3467 /* unmap buffer blocks */ 3468 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3469 3470 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3471 } 3472 3473 /*ARGSUSED*/ 3474 static nxge_status_t 3475 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3476 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3477 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3478 { 3479 p_rx_rbr_ring_t rbrp; 3480 p_rx_rcr_ring_t rcrp; 3481 p_rx_mbox_t mboxp; 3482 p_nxge_dma_common_t cntl_dmap; 3483 p_nxge_dma_common_t dmap; 3484 p_rx_msg_t *rx_msg_ring; 3485 p_rx_msg_t rx_msg_p; 3486 p_rbr_cfig_a_t rcfga_p; 3487 p_rbr_cfig_b_t rcfgb_p; 3488 p_rcrcfig_a_t cfga_p; 3489 p_rcrcfig_b_t cfgb_p; 3490 p_rxdma_cfig1_t cfig1_p; 3491 p_rxdma_cfig2_t cfig2_p; 3492 p_rbr_kick_t kick_p; 3493 uint32_t dmaaddrp; 3494 uint32_t *rbr_vaddrp; 3495 uint32_t bkaddr; 3496 nxge_status_t status = NXGE_OK; 3497 int i; 3498 uint32_t nxge_port_rcr_size; 3499 3500 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3501 "==> nxge_map_rxdma_channel_cfg_ring")); 3502 3503 cntl_dmap = *dma_cntl_p; 3504 3505 /* Map in the receive block ring */ 3506 rbrp = *rbr_p; 3507 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3508 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3509 /* 3510 * Zero out buffer block ring descriptors. 3511 */ 3512 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3513 3514 rcfga_p = &(rbrp->rbr_cfga); 3515 rcfgb_p = &(rbrp->rbr_cfgb); 3516 kick_p = &(rbrp->rbr_kick); 3517 rcfga_p->value = 0; 3518 rcfgb_p->value = 0; 3519 kick_p->value = 0; 3520 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3521 rcfga_p->value = (rbrp->rbr_addr & 3522 (RBR_CFIG_A_STDADDR_MASK | 3523 RBR_CFIG_A_STDADDR_BASE_MASK)); 3524 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3525 3526 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3527 rcfgb_p->bits.ldw.vld0 = 1; 3528 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3529 rcfgb_p->bits.ldw.vld1 = 1; 3530 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3531 rcfgb_p->bits.ldw.vld2 = 1; 3532 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3533 3534 /* 3535 * For each buffer block, enter receive block address to the ring. 3536 */ 3537 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3538 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3539 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3540 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3541 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3542 3543 rx_msg_ring = rbrp->rx_msg_ring; 3544 for (i = 0; i < rbrp->tnblocks; i++) { 3545 rx_msg_p = rx_msg_ring[i]; 3546 rx_msg_p->nxgep = nxgep; 3547 rx_msg_p->rx_rbr_p = rbrp; 3548 bkaddr = (uint32_t) 3549 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3550 >> RBR_BKADDR_SHIFT)); 3551 rx_msg_p->free = B_FALSE; 3552 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3553 3554 *rbr_vaddrp++ = bkaddr; 3555 } 3556 3557 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3558 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3559 3560 rbrp->rbr_rd_index = 0; 3561 3562 rbrp->rbr_consumed = 0; 3563 rbrp->rbr_use_bcopy = B_TRUE; 3564 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3565 /* 3566 * Do bcopy on packets greater than bcopy size once 3567 * the lo threshold is reached. 3568 * This lo threshold should be less than the hi threshold. 3569 * 3570 * Do bcopy on every packet once the hi threshold is reached. 3571 */ 3572 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3573 /* default it to use hi */ 3574 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3575 } 3576 3577 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3578 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3579 } 3580 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3581 3582 switch (nxge_rx_threshold_hi) { 3583 default: 3584 case NXGE_RX_COPY_NONE: 3585 /* Do not do bcopy at all */ 3586 rbrp->rbr_use_bcopy = B_FALSE; 3587 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3588 break; 3589 3590 case NXGE_RX_COPY_1: 3591 case NXGE_RX_COPY_2: 3592 case NXGE_RX_COPY_3: 3593 case NXGE_RX_COPY_4: 3594 case NXGE_RX_COPY_5: 3595 case NXGE_RX_COPY_6: 3596 case NXGE_RX_COPY_7: 3597 rbrp->rbr_threshold_hi = 3598 rbrp->rbb_max * 3599 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3600 break; 3601 3602 case NXGE_RX_COPY_ALL: 3603 rbrp->rbr_threshold_hi = 0; 3604 break; 3605 } 3606 3607 switch (nxge_rx_threshold_lo) { 3608 default: 3609 case NXGE_RX_COPY_NONE: 3610 /* Do not do bcopy at all */ 3611 if (rbrp->rbr_use_bcopy) { 3612 rbrp->rbr_use_bcopy = B_FALSE; 3613 } 3614 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3615 break; 3616 3617 case NXGE_RX_COPY_1: 3618 case NXGE_RX_COPY_2: 3619 case NXGE_RX_COPY_3: 3620 case NXGE_RX_COPY_4: 3621 case NXGE_RX_COPY_5: 3622 case NXGE_RX_COPY_6: 3623 case NXGE_RX_COPY_7: 3624 rbrp->rbr_threshold_lo = 3625 rbrp->rbb_max * 3626 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3627 break; 3628 3629 case NXGE_RX_COPY_ALL: 3630 rbrp->rbr_threshold_lo = 0; 3631 break; 3632 } 3633 3634 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3635 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3636 "rbb_max %d " 3637 "rbrp->rbr_bufsize_type %d " 3638 "rbb_threshold_hi %d " 3639 "rbb_threshold_lo %d", 3640 dma_channel, 3641 rbrp->rbb_max, 3642 rbrp->rbr_bufsize_type, 3643 rbrp->rbr_threshold_hi, 3644 rbrp->rbr_threshold_lo)); 3645 3646 rbrp->page_valid.value = 0; 3647 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3648 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3649 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3650 rbrp->page_hdl.value = 0; 3651 3652 rbrp->page_valid.bits.ldw.page0 = 1; 3653 rbrp->page_valid.bits.ldw.page1 = 1; 3654 3655 /* Map in the receive completion ring */ 3656 rcrp = (p_rx_rcr_ring_t) 3657 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3658 rcrp->rdc = dma_channel; 3659 3660 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3661 rcrp->comp_size = nxge_port_rcr_size; 3662 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3663 3664 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3665 3666 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3667 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3668 sizeof (rcr_entry_t)); 3669 rcrp->comp_rd_index = 0; 3670 rcrp->comp_wt_index = 0; 3671 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3672 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3673 #if defined(__i386) 3674 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3675 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3676 #else 3677 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3678 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3679 #endif 3680 3681 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3682 (nxge_port_rcr_size - 1); 3683 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3684 (nxge_port_rcr_size - 1); 3685 3686 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3687 "==> nxge_map_rxdma_channel_cfg_ring: " 3688 "channel %d " 3689 "rbr_vaddrp $%p " 3690 "rcr_desc_rd_head_p $%p " 3691 "rcr_desc_rd_head_pp $%p " 3692 "rcr_desc_rd_last_p $%p " 3693 "rcr_desc_rd_last_pp $%p ", 3694 dma_channel, 3695 rbr_vaddrp, 3696 rcrp->rcr_desc_rd_head_p, 3697 rcrp->rcr_desc_rd_head_pp, 3698 rcrp->rcr_desc_last_p, 3699 rcrp->rcr_desc_last_pp)); 3700 3701 /* 3702 * Zero out buffer block ring descriptors. 3703 */ 3704 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3705 rcrp->intr_timeout = nxgep->intr_timeout; 3706 rcrp->intr_threshold = nxgep->intr_threshold; 3707 rcrp->full_hdr_flag = B_FALSE; 3708 rcrp->sw_priv_hdr_len = 0; 3709 3710 cfga_p = &(rcrp->rcr_cfga); 3711 cfgb_p = &(rcrp->rcr_cfgb); 3712 cfga_p->value = 0; 3713 cfgb_p->value = 0; 3714 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3715 cfga_p->value = (rcrp->rcr_addr & 3716 (RCRCFIG_A_STADDR_MASK | 3717 RCRCFIG_A_STADDR_BASE_MASK)); 3718 3719 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3720 RCRCFIG_A_LEN_SHIF); 3721 3722 /* 3723 * Timeout should be set based on the system clock divider. 3724 * The following timeout value of 1 assumes that the 3725 * granularity (1000) is 3 microseconds running at 300MHz. 3726 */ 3727 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3728 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3729 cfgb_p->bits.ldw.entout = 1; 3730 3731 /* Map in the mailbox */ 3732 mboxp = (p_rx_mbox_t) 3733 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3734 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3735 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3736 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3737 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3738 cfig1_p->value = cfig2_p->value = 0; 3739 3740 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3741 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3742 "==> nxge_map_rxdma_channel_cfg_ring: " 3743 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3744 dma_channel, cfig1_p->value, cfig2_p->value, 3745 mboxp->mbox_addr)); 3746 3747 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3748 & 0xfff); 3749 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3750 3751 3752 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3753 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3754 RXDMA_CFIG2_MBADDR_L_MASK); 3755 3756 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3757 3758 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3759 "==> nxge_map_rxdma_channel_cfg_ring: " 3760 "channel %d damaddrp $%p " 3761 "cfg1 0x%016llx cfig2 0x%016llx", 3762 dma_channel, dmaaddrp, 3763 cfig1_p->value, cfig2_p->value)); 3764 3765 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3766 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3767 3768 rbrp->rx_rcr_p = rcrp; 3769 rcrp->rx_rbr_p = rbrp; 3770 *rcr_p = rcrp; 3771 *rx_mbox_p = mboxp; 3772 3773 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3774 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3775 3776 return (status); 3777 } 3778 3779 /*ARGSUSED*/ 3780 static void 3781 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3782 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3783 { 3784 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3785 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3786 rcr_p->rdc)); 3787 3788 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3789 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3790 3791 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3792 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3793 } 3794 3795 static nxge_status_t 3796 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3797 p_nxge_dma_common_t *dma_buf_p, 3798 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3799 { 3800 p_rx_rbr_ring_t rbrp; 3801 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3802 p_rx_msg_t *rx_msg_ring; 3803 p_rx_msg_t rx_msg_p; 3804 p_mblk_t mblk_p; 3805 3806 rxring_info_t *ring_info; 3807 nxge_status_t status = NXGE_OK; 3808 int i, j, index; 3809 uint32_t size, bsize, nblocks, nmsgs; 3810 3811 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3812 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3813 channel)); 3814 3815 dma_bufp = tmp_bufp = *dma_buf_p; 3816 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3817 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3818 "chunks bufp 0x%016llx", 3819 channel, num_chunks, dma_bufp)); 3820 3821 nmsgs = 0; 3822 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3823 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3824 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3825 "bufp 0x%016llx nblocks %d nmsgs %d", 3826 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3827 nmsgs += tmp_bufp->nblocks; 3828 } 3829 if (!nmsgs) { 3830 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3831 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3832 "no msg blocks", 3833 channel)); 3834 status = NXGE_ERROR; 3835 goto nxge_map_rxdma_channel_buf_ring_exit; 3836 } 3837 3838 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3839 3840 size = nmsgs * sizeof (p_rx_msg_t); 3841 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3842 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3843 KM_SLEEP); 3844 3845 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3846 (void *)nxgep->interrupt_cookie); 3847 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3848 (void *)nxgep->interrupt_cookie); 3849 rbrp->rdc = channel; 3850 rbrp->num_blocks = num_chunks; 3851 rbrp->tnblocks = nmsgs; 3852 rbrp->rbb_max = nmsgs; 3853 rbrp->rbr_max_size = nmsgs; 3854 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3855 3856 /* 3857 * Buffer sizes suggested by NIU architect. 3858 * 256, 512 and 2K. 3859 */ 3860 3861 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3862 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3863 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3864 3865 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3866 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3867 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3868 3869 rbrp->block_size = nxgep->rx_default_block_size; 3870 3871 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3872 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3873 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3874 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3875 } else { 3876 if (rbrp->block_size >= 0x2000) { 3877 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3878 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3879 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3880 } else { 3881 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3882 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3883 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3884 } 3885 } 3886 3887 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3888 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3889 "actual rbr max %d rbb_max %d nmsgs %d " 3890 "rbrp->block_size %d default_block_size %d " 3891 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3892 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3893 rbrp->block_size, nxgep->rx_default_block_size, 3894 nxge_rbr_size, nxge_rbr_spare_size)); 3895 3896 /* Map in buffers from the buffer pool. */ 3897 index = 0; 3898 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3899 bsize = dma_bufp->block_size; 3900 nblocks = dma_bufp->nblocks; 3901 #if defined(__i386) 3902 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3903 #else 3904 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3905 #endif 3906 ring_info->buffer[i].buf_index = i; 3907 ring_info->buffer[i].buf_size = dma_bufp->alength; 3908 ring_info->buffer[i].start_index = index; 3909 #if defined(__i386) 3910 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3911 #else 3912 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3913 #endif 3914 3915 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3916 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3917 "chunk %d" 3918 " nblocks %d chunk_size %x block_size 0x%x " 3919 "dma_bufp $%p", channel, i, 3920 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3921 dma_bufp)); 3922 3923 for (j = 0; j < nblocks; j++) { 3924 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3925 dma_bufp)) == NULL) { 3926 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3927 "allocb failed (index %d i %d j %d)", 3928 index, i, j)); 3929 goto nxge_map_rxdma_channel_buf_ring_fail1; 3930 } 3931 rx_msg_ring[index] = rx_msg_p; 3932 rx_msg_p->block_index = index; 3933 rx_msg_p->shifted_addr = (uint32_t) 3934 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3935 RBR_BKADDR_SHIFT)); 3936 3937 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3938 "index %d j %d rx_msg_p $%p mblk %p", 3939 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3940 3941 mblk_p = rx_msg_p->rx_mblk_p; 3942 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3943 3944 rbrp->rbr_ref_cnt++; 3945 index++; 3946 rx_msg_p->buf_dma.dma_channel = channel; 3947 } 3948 3949 rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3950 if (dma_bufp->contig_alloc_type) { 3951 rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3952 } 3953 3954 if (dma_bufp->kmem_alloc_type) { 3955 rbrp->rbr_alloc_type = KMEM_ALLOC; 3956 } 3957 3958 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3959 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3960 "chunk %d" 3961 " nblocks %d chunk_size %x block_size 0x%x " 3962 "dma_bufp $%p", 3963 channel, i, 3964 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3965 dma_bufp)); 3966 } 3967 if (i < rbrp->num_blocks) { 3968 goto nxge_map_rxdma_channel_buf_ring_fail1; 3969 } 3970 3971 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3972 "nxge_map_rxdma_channel_buf_ring: done buf init " 3973 "channel %d msg block entries %d", 3974 channel, index)); 3975 ring_info->block_size_mask = bsize - 1; 3976 rbrp->rx_msg_ring = rx_msg_ring; 3977 rbrp->dma_bufp = dma_buf_p; 3978 rbrp->ring_info = ring_info; 3979 3980 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3981 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3982 " nxge_map_rxdma_channel_buf_ring: " 3983 "channel %d done buf info init", channel)); 3984 3985 /* 3986 * Finally, permit nxge_freeb() to call nxge_post_page(). 3987 */ 3988 rbrp->rbr_state = RBR_POSTING; 3989 3990 *rbr_p = rbrp; 3991 goto nxge_map_rxdma_channel_buf_ring_exit; 3992 3993 nxge_map_rxdma_channel_buf_ring_fail1: 3994 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3995 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3996 channel, status)); 3997 3998 index--; 3999 for (; index >= 0; index--) { 4000 rx_msg_p = rx_msg_ring[index]; 4001 if (rx_msg_p != NULL) { 4002 freeb(rx_msg_p->rx_mblk_p); 4003 rx_msg_ring[index] = NULL; 4004 } 4005 } 4006 nxge_map_rxdma_channel_buf_ring_fail: 4007 MUTEX_DESTROY(&rbrp->post_lock); 4008 MUTEX_DESTROY(&rbrp->lock); 4009 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4010 KMEM_FREE(rx_msg_ring, size); 4011 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 4012 4013 status = NXGE_ERROR; 4014 4015 nxge_map_rxdma_channel_buf_ring_exit: 4016 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4017 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 4018 4019 return (status); 4020 } 4021 4022 /*ARGSUSED*/ 4023 static void 4024 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 4025 p_rx_rbr_ring_t rbr_p) 4026 { 4027 p_rx_msg_t *rx_msg_ring; 4028 p_rx_msg_t rx_msg_p; 4029 rxring_info_t *ring_info; 4030 int i; 4031 uint32_t size; 4032 #ifdef NXGE_DEBUG 4033 int num_chunks; 4034 #endif 4035 4036 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4037 "==> nxge_unmap_rxdma_channel_buf_ring")); 4038 if (rbr_p == NULL) { 4039 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4040 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 4041 return; 4042 } 4043 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4044 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 4045 rbr_p->rdc)); 4046 4047 rx_msg_ring = rbr_p->rx_msg_ring; 4048 ring_info = rbr_p->ring_info; 4049 4050 if (rx_msg_ring == NULL || ring_info == NULL) { 4051 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4052 "<== nxge_unmap_rxdma_channel_buf_ring: " 4053 "rx_msg_ring $%p ring_info $%p", 4054 rx_msg_p, ring_info)); 4055 return; 4056 } 4057 4058 #ifdef NXGE_DEBUG 4059 num_chunks = rbr_p->num_blocks; 4060 #endif 4061 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 4062 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4063 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 4064 "tnblocks %d (max %d) size ptrs %d ", 4065 rbr_p->rdc, num_chunks, 4066 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 4067 4068 for (i = 0; i < rbr_p->tnblocks; i++) { 4069 rx_msg_p = rx_msg_ring[i]; 4070 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4071 " nxge_unmap_rxdma_channel_buf_ring: " 4072 "rx_msg_p $%p", 4073 rx_msg_p)); 4074 if (rx_msg_p != NULL) { 4075 freeb(rx_msg_p->rx_mblk_p); 4076 rx_msg_ring[i] = NULL; 4077 } 4078 } 4079 4080 /* 4081 * We no longer may use the mutex <post_lock>. By setting 4082 * <rbr_state> to anything but POSTING, we prevent 4083 * nxge_post_page() from accessing a dead mutex. 4084 */ 4085 rbr_p->rbr_state = RBR_UNMAPPING; 4086 MUTEX_DESTROY(&rbr_p->post_lock); 4087 4088 MUTEX_DESTROY(&rbr_p->lock); 4089 4090 if (rbr_p->rbr_ref_cnt == 0) { 4091 /* 4092 * This is the normal state of affairs. 4093 * Need to free the following buffers: 4094 * - data buffers 4095 * - rx_msg ring 4096 * - ring_info 4097 * - rbr ring 4098 */ 4099 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4100 "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4101 nxge_rxdma_databuf_free(rbr_p); 4102 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4103 KMEM_FREE(rx_msg_ring, size); 4104 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4105 } else { 4106 /* 4107 * Some of our buffers are still being used. 4108 * Therefore, tell nxge_freeb() this ring is 4109 * unmapped, so it may free <rbr_p> for us. 4110 */ 4111 rbr_p->rbr_state = RBR_UNMAPPED; 4112 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4113 "unmap_rxdma_buf_ring: %d %s outstanding.", 4114 rbr_p->rbr_ref_cnt, 4115 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4116 } 4117 4118 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4119 "<== nxge_unmap_rxdma_channel_buf_ring")); 4120 } 4121 4122 /* 4123 * nxge_rxdma_hw_start_common 4124 * 4125 * Arguments: 4126 * nxgep 4127 * 4128 * Notes: 4129 * 4130 * NPI/NXGE function calls: 4131 * nxge_init_fzc_rx_common(); 4132 * nxge_init_fzc_rxdma_port(); 4133 * 4134 * Registers accessed: 4135 * 4136 * Context: 4137 * Service domain 4138 */ 4139 static nxge_status_t 4140 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 4141 { 4142 nxge_status_t status = NXGE_OK; 4143 4144 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4145 4146 /* 4147 * Load the sharable parameters by writing to the 4148 * function zero control registers. These FZC registers 4149 * should be initialized only once for the entire chip. 4150 */ 4151 (void) nxge_init_fzc_rx_common(nxgep); 4152 4153 /* 4154 * Initialize the RXDMA port specific FZC control configurations. 4155 * These FZC registers are pertaining to each port. 4156 */ 4157 (void) nxge_init_fzc_rxdma_port(nxgep); 4158 4159 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 4160 4161 return (status); 4162 } 4163 4164 static nxge_status_t 4165 nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 4166 { 4167 int i, ndmas; 4168 p_rx_rbr_rings_t rx_rbr_rings; 4169 p_rx_rbr_ring_t *rbr_rings; 4170 p_rx_rcr_rings_t rx_rcr_rings; 4171 p_rx_rcr_ring_t *rcr_rings; 4172 p_rx_mbox_areas_t rx_mbox_areas_p; 4173 p_rx_mbox_t *rx_mbox_p; 4174 nxge_status_t status = NXGE_OK; 4175 4176 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 4177 4178 rx_rbr_rings = nxgep->rx_rbr_rings; 4179 rx_rcr_rings = nxgep->rx_rcr_rings; 4180 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4181 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4182 "<== nxge_rxdma_hw_start: NULL ring pointers")); 4183 return (NXGE_ERROR); 4184 } 4185 ndmas = rx_rbr_rings->ndmas; 4186 if (ndmas == 0) { 4187 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4188 "<== nxge_rxdma_hw_start: no dma channel allocated")); 4189 return (NXGE_ERROR); 4190 } 4191 4192 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4193 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 4194 4195 rbr_rings = rx_rbr_rings->rbr_rings; 4196 rcr_rings = rx_rcr_rings->rcr_rings; 4197 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 4198 if (rx_mbox_areas_p) { 4199 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 4200 } 4201 4202 i = channel; 4203 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4204 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 4205 ndmas, channel)); 4206 status = nxge_rxdma_start_channel(nxgep, channel, 4207 (p_rx_rbr_ring_t)rbr_rings[i], 4208 (p_rx_rcr_ring_t)rcr_rings[i], 4209 (p_rx_mbox_t)rx_mbox_p[i]); 4210 if (status != NXGE_OK) { 4211 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4212 "==> nxge_rxdma_hw_start: disable " 4213 "(status 0x%x channel %d)", status, channel)); 4214 return (status); 4215 } 4216 4217 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 4218 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4219 rx_rbr_rings, rx_rcr_rings)); 4220 4221 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4222 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 4223 4224 return (status); 4225 } 4226 4227 static void 4228 nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 4229 { 4230 p_rx_rbr_rings_t rx_rbr_rings; 4231 p_rx_rcr_rings_t rx_rcr_rings; 4232 4233 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 4234 4235 rx_rbr_rings = nxgep->rx_rbr_rings; 4236 rx_rcr_rings = nxgep->rx_rcr_rings; 4237 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 4238 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4239 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 4240 return; 4241 } 4242 4243 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4244 "==> nxge_rxdma_hw_stop(channel %d)", 4245 channel)); 4246 (void) nxge_rxdma_stop_channel(nxgep, channel); 4247 4248 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 4249 "rx_rbr_rings 0x%016llx rings 0x%016llx", 4250 rx_rbr_rings, rx_rcr_rings)); 4251 4252 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 4253 } 4254 4255 4256 static nxge_status_t 4257 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 4258 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4259 4260 { 4261 npi_handle_t handle; 4262 npi_status_t rs = NPI_SUCCESS; 4263 rx_dma_ctl_stat_t cs; 4264 rx_dma_ent_msk_t ent_mask; 4265 nxge_status_t status = NXGE_OK; 4266 4267 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 4268 4269 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4270 4271 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 4272 "npi handle addr $%p acc $%p", 4273 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4274 4275 /* Reset RXDMA channel, but not if you're a guest. */ 4276 if (!isLDOMguest(nxgep)) { 4277 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4278 if (rs != NPI_SUCCESS) { 4279 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4280 "==> nxge_init_fzc_rdc: " 4281 "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4282 channel, rs)); 4283 return (NXGE_ERROR | rs); 4284 } 4285 4286 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4287 "==> nxge_rxdma_start_channel: reset done: channel %d", 4288 channel)); 4289 } 4290 4291 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4292 if (isLDOMguest(nxgep)) 4293 (void) nxge_rdc_lp_conf(nxgep, channel); 4294 #endif 4295 4296 /* 4297 * Initialize the RXDMA channel specific FZC control 4298 * configurations. These FZC registers are pertaining 4299 * to each RX channel (logical pages). 4300 */ 4301 if (!isLDOMguest(nxgep)) { 4302 status = nxge_init_fzc_rxdma_channel(nxgep, channel); 4303 if (status != NXGE_OK) { 4304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4305 "==> nxge_rxdma_start_channel: " 4306 "init fzc rxdma failed (0x%08x channel %d)", 4307 status, channel)); 4308 return (status); 4309 } 4310 4311 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4312 "==> nxge_rxdma_start_channel: fzc done")); 4313 } 4314 4315 /* Set up the interrupt event masks. */ 4316 ent_mask.value = 0; 4317 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 4318 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4319 &ent_mask); 4320 if (rs != NPI_SUCCESS) { 4321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4322 "==> nxge_rxdma_start_channel: " 4323 "init rxdma event masks failed " 4324 "(0x%08x channel %d)", 4325 status, channel)); 4326 return (NXGE_ERROR | rs); 4327 } 4328 4329 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4330 "==> nxge_rxdma_start_channel: " 4331 "event done: channel %d (mask 0x%016llx)", 4332 channel, ent_mask.value)); 4333 4334 /* Initialize the receive DMA control and status register */ 4335 cs.value = 0; 4336 cs.bits.hdw.mex = 1; 4337 cs.bits.hdw.rcrthres = 1; 4338 cs.bits.hdw.rcrto = 1; 4339 cs.bits.hdw.rbr_empty = 1; 4340 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4341 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4342 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4343 if (status != NXGE_OK) { 4344 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4345 "==> nxge_rxdma_start_channel: " 4346 "init rxdma control register failed (0x%08x channel %d", 4347 status, channel)); 4348 return (status); 4349 } 4350 4351 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4352 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4353 4354 /* 4355 * Load RXDMA descriptors, buffers, mailbox, 4356 * initialise the receive DMA channels and 4357 * enable each DMA channel. 4358 */ 4359 status = nxge_enable_rxdma_channel(nxgep, 4360 channel, rbr_p, rcr_p, mbox_p); 4361 4362 if (status != NXGE_OK) { 4363 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4364 " nxge_rxdma_start_channel: " 4365 " enable rxdma failed (0x%08x channel %d)", 4366 status, channel)); 4367 return (status); 4368 } 4369 4370 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4371 "==> nxge_rxdma_start_channel: enabled channel %d")); 4372 4373 if (isLDOMguest(nxgep)) { 4374 /* Add interrupt handler for this channel. */ 4375 if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 4376 != NXGE_OK) { 4377 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4378 " nxge_rxdma_start_channel: " 4379 " nxge_hio_intr_add failed (0x%08x channel %d)", 4380 status, channel)); 4381 } 4382 } 4383 4384 ent_mask.value = 0; 4385 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4386 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4387 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4388 &ent_mask); 4389 if (rs != NPI_SUCCESS) { 4390 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4391 "==> nxge_rxdma_start_channel: " 4392 "init rxdma event masks failed (0x%08x channel %d)", 4393 status, channel)); 4394 return (NXGE_ERROR | rs); 4395 } 4396 4397 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4398 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4399 4400 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4401 4402 return (NXGE_OK); 4403 } 4404 4405 static nxge_status_t 4406 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4407 { 4408 npi_handle_t handle; 4409 npi_status_t rs = NPI_SUCCESS; 4410 rx_dma_ctl_stat_t cs; 4411 rx_dma_ent_msk_t ent_mask; 4412 nxge_status_t status = NXGE_OK; 4413 4414 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4415 4416 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4417 4418 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4419 "npi handle addr $%p acc $%p", 4420 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4421 4422 if (!isLDOMguest(nxgep)) { 4423 /* 4424 * Stop RxMAC = A.9.2.6 4425 */ 4426 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4427 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4428 "nxge_rxdma_stop_channel: " 4429 "Failed to disable RxMAC")); 4430 } 4431 4432 /* 4433 * Drain IPP Port = A.9.3.6 4434 */ 4435 (void) nxge_ipp_drain(nxgep); 4436 } 4437 4438 /* Reset RXDMA channel */ 4439 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4440 if (rs != NPI_SUCCESS) { 4441 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4442 " nxge_rxdma_stop_channel: " 4443 " reset rxdma failed (0x%08x channel %d)", 4444 rs, channel)); 4445 return (NXGE_ERROR | rs); 4446 } 4447 4448 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4449 "==> nxge_rxdma_stop_channel: reset done")); 4450 4451 /* Set up the interrupt event masks. */ 4452 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4453 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4454 &ent_mask); 4455 if (rs != NPI_SUCCESS) { 4456 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4457 "==> nxge_rxdma_stop_channel: " 4458 "set rxdma event masks failed (0x%08x channel %d)", 4459 rs, channel)); 4460 return (NXGE_ERROR | rs); 4461 } 4462 4463 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4464 "==> nxge_rxdma_stop_channel: event done")); 4465 4466 /* 4467 * Initialize the receive DMA control and status register 4468 */ 4469 cs.value = 0; 4470 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4471 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4472 " to default (all 0s) 0x%08x", cs.value)); 4473 if (status != NXGE_OK) { 4474 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4475 " nxge_rxdma_stop_channel: init rxdma" 4476 " control register failed (0x%08x channel %d", 4477 status, channel)); 4478 return (status); 4479 } 4480 4481 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4482 "==> nxge_rxdma_stop_channel: control done")); 4483 4484 /* 4485 * Make sure channel is disabled. 4486 */ 4487 status = nxge_disable_rxdma_channel(nxgep, channel); 4488 4489 if (status != NXGE_OK) { 4490 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4491 " nxge_rxdma_stop_channel: " 4492 " init enable rxdma failed (0x%08x channel %d)", 4493 status, channel)); 4494 return (status); 4495 } 4496 4497 if (!isLDOMguest(nxgep)) { 4498 /* 4499 * Enable RxMAC = A.9.2.10 4500 */ 4501 if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4502 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4503 "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4504 } 4505 } 4506 4507 NXGE_DEBUG_MSG((nxgep, 4508 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4509 4510 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4511 4512 return (NXGE_OK); 4513 } 4514 4515 nxge_status_t 4516 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4517 { 4518 npi_handle_t handle; 4519 p_nxge_rdc_sys_stats_t statsp; 4520 rx_ctl_dat_fifo_stat_t stat; 4521 uint32_t zcp_err_status; 4522 uint32_t ipp_err_status; 4523 nxge_status_t status = NXGE_OK; 4524 npi_status_t rs = NPI_SUCCESS; 4525 boolean_t my_err = B_FALSE; 4526 4527 handle = nxgep->npi_handle; 4528 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4529 4530 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4531 4532 if (rs != NPI_SUCCESS) 4533 return (NXGE_ERROR | rs); 4534 4535 if (stat.bits.ldw.id_mismatch) { 4536 statsp->id_mismatch++; 4537 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4538 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4539 /* Global fatal error encountered */ 4540 } 4541 4542 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4543 switch (nxgep->mac.portnum) { 4544 case 0: 4545 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4546 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4547 my_err = B_TRUE; 4548 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4549 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4550 } 4551 break; 4552 case 1: 4553 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4554 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4555 my_err = B_TRUE; 4556 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4557 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4558 } 4559 break; 4560 case 2: 4561 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4562 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4563 my_err = B_TRUE; 4564 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4565 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4566 } 4567 break; 4568 case 3: 4569 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4570 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4571 my_err = B_TRUE; 4572 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4573 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4574 } 4575 break; 4576 default: 4577 return (NXGE_ERROR); 4578 } 4579 } 4580 4581 if (my_err) { 4582 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4583 zcp_err_status); 4584 if (status != NXGE_OK) 4585 return (status); 4586 } 4587 4588 return (NXGE_OK); 4589 } 4590 4591 static nxge_status_t 4592 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4593 uint32_t zcp_status) 4594 { 4595 boolean_t rxport_fatal = B_FALSE; 4596 p_nxge_rdc_sys_stats_t statsp; 4597 nxge_status_t status = NXGE_OK; 4598 uint8_t portn; 4599 4600 portn = nxgep->mac.portnum; 4601 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4602 4603 if (ipp_status & (0x1 << portn)) { 4604 statsp->ipp_eop_err++; 4605 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4606 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4607 rxport_fatal = B_TRUE; 4608 } 4609 4610 if (zcp_status & (0x1 << portn)) { 4611 statsp->zcp_eop_err++; 4612 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4613 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4614 rxport_fatal = B_TRUE; 4615 } 4616 4617 if (rxport_fatal) { 4618 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4619 " nxge_rxdma_handle_port_error: " 4620 " fatal error on Port #%d\n", 4621 portn)); 4622 status = nxge_rx_port_fatal_err_recover(nxgep); 4623 if (status == NXGE_OK) { 4624 FM_SERVICE_RESTORED(nxgep); 4625 } 4626 } 4627 4628 return (status); 4629 } 4630 4631 static nxge_status_t 4632 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4633 { 4634 npi_handle_t handle; 4635 npi_status_t rs = NPI_SUCCESS; 4636 nxge_status_t status = NXGE_OK; 4637 p_rx_rbr_ring_t rbrp; 4638 p_rx_rcr_ring_t rcrp; 4639 p_rx_mbox_t mboxp; 4640 rx_dma_ent_msk_t ent_mask; 4641 p_nxge_dma_common_t dmap; 4642 int ring_idx; 4643 uint32_t ref_cnt; 4644 p_rx_msg_t rx_msg_p; 4645 int i; 4646 uint32_t nxge_port_rcr_size; 4647 4648 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4650 "Recovering from RxDMAChannel#%d error...", channel)); 4651 4652 /* 4653 * Stop the dma channel waits for the stop done. 4654 * If the stop done bit is not set, then create 4655 * an error. 4656 */ 4657 4658 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4659 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4660 4661 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4662 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4663 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4664 4665 MUTEX_ENTER(&rcrp->lock); 4666 MUTEX_ENTER(&rbrp->lock); 4667 MUTEX_ENTER(&rbrp->post_lock); 4668 4669 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4670 4671 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4672 if (rs != NPI_SUCCESS) { 4673 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4674 "nxge_disable_rxdma_channel:failed")); 4675 goto fail; 4676 } 4677 4678 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4679 4680 /* Disable interrupt */ 4681 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4682 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4683 if (rs != NPI_SUCCESS) { 4684 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4685 "nxge_rxdma_stop_channel: " 4686 "set rxdma event masks failed (channel %d)", 4687 channel)); 4688 } 4689 4690 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4691 4692 /* Reset RXDMA channel */ 4693 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4694 if (rs != NPI_SUCCESS) { 4695 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4696 "nxge_rxdma_fatal_err_recover: " 4697 " reset rxdma failed (channel %d)", channel)); 4698 goto fail; 4699 } 4700 4701 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4702 4703 mboxp = 4704 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4705 4706 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4707 rbrp->rbr_rd_index = 0; 4708 4709 rcrp->comp_rd_index = 0; 4710 rcrp->comp_wt_index = 0; 4711 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4712 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4713 #if defined(__i386) 4714 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4715 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4716 #else 4717 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4718 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4719 #endif 4720 4721 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4722 (nxge_port_rcr_size - 1); 4723 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4724 (nxge_port_rcr_size - 1); 4725 4726 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4727 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4728 4729 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4730 4731 for (i = 0; i < rbrp->rbr_max_size; i++) { 4732 rx_msg_p = rbrp->rx_msg_ring[i]; 4733 ref_cnt = rx_msg_p->ref_cnt; 4734 if (ref_cnt != 1) { 4735 if (rx_msg_p->cur_usage_cnt != 4736 rx_msg_p->max_usage_cnt) { 4737 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4738 "buf[%d]: cur_usage_cnt = %d " 4739 "max_usage_cnt = %d\n", i, 4740 rx_msg_p->cur_usage_cnt, 4741 rx_msg_p->max_usage_cnt)); 4742 } else { 4743 /* Buffer can be re-posted */ 4744 rx_msg_p->free = B_TRUE; 4745 rx_msg_p->cur_usage_cnt = 0; 4746 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4747 rx_msg_p->pkt_buf_size = 0; 4748 } 4749 } 4750 } 4751 4752 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4753 4754 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4755 if (status != NXGE_OK) { 4756 goto fail; 4757 } 4758 4759 MUTEX_EXIT(&rbrp->post_lock); 4760 MUTEX_EXIT(&rbrp->lock); 4761 MUTEX_EXIT(&rcrp->lock); 4762 4763 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4764 "Recovery Successful, RxDMAChannel#%d Restored", 4765 channel)); 4766 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4767 4768 return (NXGE_OK); 4769 fail: 4770 MUTEX_EXIT(&rbrp->post_lock); 4771 MUTEX_EXIT(&rbrp->lock); 4772 MUTEX_EXIT(&rcrp->lock); 4773 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4774 4775 return (NXGE_ERROR | rs); 4776 } 4777 4778 nxge_status_t 4779 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4780 { 4781 nxge_grp_set_t *set = &nxgep->rx_set; 4782 nxge_status_t status = NXGE_OK; 4783 int rdc; 4784 4785 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4787 "Recovering from RxPort error...")); 4788 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 4789 4790 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4791 goto fail; 4792 4793 NXGE_DELAY(1000); 4794 4795 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 4796 4797 for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4798 if ((1 << rdc) & set->owned.map) { 4799 if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 4800 != NXGE_OK) { 4801 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4802 "Could not recover channel %d", rdc)); 4803 } 4804 } 4805 } 4806 4807 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 4808 4809 /* Reset IPP */ 4810 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4811 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4812 "nxge_rx_port_fatal_err_recover: " 4813 "Failed to reset IPP")); 4814 goto fail; 4815 } 4816 4817 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4818 4819 /* Reset RxMAC */ 4820 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4821 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4822 "nxge_rx_port_fatal_err_recover: " 4823 "Failed to reset RxMAC")); 4824 goto fail; 4825 } 4826 4827 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4828 4829 /* Re-Initialize IPP */ 4830 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4831 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4832 "nxge_rx_port_fatal_err_recover: " 4833 "Failed to init IPP")); 4834 goto fail; 4835 } 4836 4837 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4838 4839 /* Re-Initialize RxMAC */ 4840 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4841 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4842 "nxge_rx_port_fatal_err_recover: " 4843 "Failed to reset RxMAC")); 4844 goto fail; 4845 } 4846 4847 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4848 4849 /* Re-enable RxMAC */ 4850 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4851 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4852 "nxge_rx_port_fatal_err_recover: " 4853 "Failed to enable RxMAC")); 4854 goto fail; 4855 } 4856 4857 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4858 "Recovery Successful, RxPort Restored")); 4859 4860 return (NXGE_OK); 4861 fail: 4862 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4863 return (status); 4864 } 4865 4866 void 4867 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4868 { 4869 rx_dma_ctl_stat_t cs; 4870 rx_ctl_dat_fifo_stat_t cdfs; 4871 4872 switch (err_id) { 4873 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4874 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4875 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4876 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4877 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4878 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4879 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4880 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4881 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4882 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4883 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4884 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4885 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4886 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4887 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4888 chan, &cs.value); 4889 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4890 cs.bits.hdw.rcr_ack_err = 1; 4891 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4892 cs.bits.hdw.dc_fifo_err = 1; 4893 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4894 cs.bits.hdw.rcr_sha_par = 1; 4895 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4896 cs.bits.hdw.rbr_pre_par = 1; 4897 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4898 cs.bits.hdw.rbr_tmout = 1; 4899 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4900 cs.bits.hdw.rsp_cnt_err = 1; 4901 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4902 cs.bits.hdw.byte_en_bus = 1; 4903 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4904 cs.bits.hdw.rsp_dat_err = 1; 4905 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4906 cs.bits.hdw.config_err = 1; 4907 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4908 cs.bits.hdw.rcrincon = 1; 4909 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4910 cs.bits.hdw.rcrfull = 1; 4911 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4912 cs.bits.hdw.rbrfull = 1; 4913 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4914 cs.bits.hdw.rbrlogpage = 1; 4915 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4916 cs.bits.hdw.cfiglogpage = 1; 4917 #if defined(__i386) 4918 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4919 cs.value); 4920 #else 4921 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4922 cs.value); 4923 #endif 4924 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4925 chan, cs.value); 4926 break; 4927 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4928 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4929 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4930 cdfs.value = 0; 4931 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4932 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4933 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4934 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4935 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4936 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4937 #if defined(__i386) 4938 cmn_err(CE_NOTE, 4939 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4940 cdfs.value); 4941 #else 4942 cmn_err(CE_NOTE, 4943 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4944 cdfs.value); 4945 #endif 4946 NXGE_REG_WR64(nxgep->npi_handle, 4947 RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 4948 break; 4949 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4950 break; 4951 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4952 break; 4953 } 4954 } 4955 4956 static void 4957 nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4958 { 4959 rxring_info_t *ring_info; 4960 int index; 4961 uint32_t chunk_size; 4962 uint64_t kaddr; 4963 uint_t num_blocks; 4964 4965 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4966 4967 if (rbr_p == NULL) { 4968 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4969 "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4970 return; 4971 } 4972 4973 if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4974 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4975 "==> nxge_rxdma_databuf_free: DDI")); 4976 return; 4977 } 4978 4979 ring_info = rbr_p->ring_info; 4980 if (ring_info == NULL) { 4981 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4982 "==> nxge_rxdma_databuf_free: NULL ring info")); 4983 return; 4984 } 4985 num_blocks = rbr_p->num_blocks; 4986 for (index = 0; index < num_blocks; index++) { 4987 kaddr = ring_info->buffer[index].kaddr; 4988 chunk_size = ring_info->buffer[index].buf_size; 4989 NXGE_DEBUG_MSG((NULL, DMA_CTL, 4990 "==> nxge_rxdma_databuf_free: free chunk %d " 4991 "kaddrp $%p chunk size %d", 4992 index, kaddr, chunk_size)); 4993 if (kaddr == NULL) continue; 4994 nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4995 ring_info->buffer[index].kaddr = NULL; 4996 } 4997 4998 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4999 } 5000 5001 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5002 extern void contig_mem_free(void *, size_t); 5003 #endif 5004 5005 void 5006 nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 5007 { 5008 NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 5009 5010 if (kaddr == NULL || !buf_size) { 5011 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5012 "==> nxge_free_buf: invalid kaddr $%p size to free %d", 5013 kaddr, buf_size)); 5014 return; 5015 } 5016 5017 switch (alloc_type) { 5018 case KMEM_ALLOC: 5019 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5020 "==> nxge_free_buf: freeing kmem $%p size %d", 5021 kaddr, buf_size)); 5022 #if defined(__i386) 5023 KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 5024 #else 5025 KMEM_FREE((void *)kaddr, buf_size); 5026 #endif 5027 break; 5028 5029 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 5030 case CONTIG_MEM_ALLOC: 5031 NXGE_DEBUG_MSG((NULL, DMA_CTL, 5032 "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 5033 kaddr, buf_size)); 5034 contig_mem_free((void *)kaddr, buf_size); 5035 break; 5036 #endif 5037 5038 default: 5039 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 5040 "<== nxge_free_buf: unsupported alloc type %d", 5041 alloc_type)); 5042 return; 5043 } 5044 5045 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5046 } 5047