1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_rxdma.h> 30 31 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 32 (rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid) 33 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 34 (rdc + nxgep->pt_config.hw_config.start_rdc) 35 36 /* 37 * Globals: tunable parameters (/etc/system or adb) 38 * 39 */ 40 extern uint32_t nxge_rbr_size; 41 extern uint32_t nxge_rcr_size; 42 extern uint32_t nxge_rbr_spare_size; 43 44 extern uint32_t nxge_mblks_pending; 45 46 /* 47 * Tunable to reduce the amount of time spent in the 48 * ISR doing Rx Processing. 49 */ 50 extern uint32_t nxge_max_rx_pkts; 51 boolean_t nxge_jumbo_enable; 52 53 /* 54 * Tunables to manage the receive buffer blocks. 55 * 56 * nxge_rx_threshold_hi: copy all buffers. 57 * nxge_rx_bcopy_size_type: receive buffer block size type. 58 * nxge_rx_threshold_lo: copy only up to tunable block size type. 59 */ 60 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 61 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 62 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 63 64 static nxge_status_t nxge_map_rxdma(p_nxge_t); 65 static void nxge_unmap_rxdma(p_nxge_t); 66 67 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 68 static void nxge_rxdma_hw_stop_common(p_nxge_t); 69 70 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t); 71 static void nxge_rxdma_hw_stop(p_nxge_t); 72 73 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 75 uint32_t, 76 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 77 p_rx_mbox_t *); 78 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 79 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 80 81 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 82 uint16_t, 83 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 84 p_rx_rcr_ring_t *, p_rx_mbox_t *); 85 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 86 p_rx_rcr_ring_t, p_rx_mbox_t); 87 88 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 89 uint16_t, 90 p_nxge_dma_common_t *, 91 p_rx_rbr_ring_t *, uint32_t); 92 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 93 p_rx_rbr_ring_t); 94 95 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 96 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 97 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 98 99 mblk_t * 100 nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t, 101 p_rx_rcr_ring_t *, rx_dma_ctl_stat_t); 102 103 static void nxge_receive_packet(p_nxge_t, 104 p_rx_rcr_ring_t, 105 p_rcr_entry_t, 106 boolean_t *, 107 mblk_t **, mblk_t **); 108 109 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 110 111 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 112 static void nxge_freeb(p_rx_msg_t); 113 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, 114 p_nxge_ldv_t, rx_dma_ctl_stat_t); 115 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t, 116 p_nxge_ldv_t, rx_dma_ctl_stat_t); 117 118 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 119 uint32_t, uint32_t); 120 121 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 122 p_rx_rbr_ring_t); 123 124 125 static nxge_status_t 126 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 127 128 nxge_status_t 129 nxge_rx_port_fatal_err_recover(p_nxge_t); 130 131 nxge_status_t 132 nxge_init_rxdma_channels(p_nxge_t nxgep) 133 { 134 nxge_status_t status = NXGE_OK; 135 136 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 137 138 status = nxge_map_rxdma(nxgep); 139 if (status != NXGE_OK) { 140 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 141 "<== nxge_init_rxdma: status 0x%x", status)); 142 return (status); 143 } 144 145 status = nxge_rxdma_hw_start_common(nxgep); 146 if (status != NXGE_OK) { 147 nxge_unmap_rxdma(nxgep); 148 } 149 150 status = nxge_rxdma_hw_start(nxgep); 151 if (status != NXGE_OK) { 152 nxge_unmap_rxdma(nxgep); 153 } 154 155 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 156 "<== nxge_init_rxdma_channels: status 0x%x", status)); 157 158 return (status); 159 } 160 161 void 162 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 163 { 164 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 165 166 nxge_rxdma_hw_stop(nxgep); 167 nxge_rxdma_hw_stop_common(nxgep); 168 nxge_unmap_rxdma(nxgep); 169 170 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 171 "<== nxge_uinit_rxdma_channels")); 172 } 173 174 nxge_status_t 175 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 176 { 177 npi_handle_t handle; 178 npi_status_t rs = NPI_SUCCESS; 179 nxge_status_t status = NXGE_OK; 180 181 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 182 183 handle = NXGE_DEV_NPI_HANDLE(nxgep); 184 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 185 186 if (rs != NPI_SUCCESS) { 187 status = NXGE_ERROR | rs; 188 } 189 190 return (status); 191 } 192 193 void 194 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 195 { 196 int i, ndmas; 197 uint16_t channel; 198 p_rx_rbr_rings_t rx_rbr_rings; 199 p_rx_rbr_ring_t *rbr_rings; 200 npi_handle_t handle; 201 202 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 203 204 handle = NXGE_DEV_NPI_HANDLE(nxgep); 205 (void) npi_rxdma_dump_fzc_regs(handle); 206 207 rx_rbr_rings = nxgep->rx_rbr_rings; 208 if (rx_rbr_rings == NULL) { 209 NXGE_DEBUG_MSG((nxgep, RX_CTL, 210 "<== nxge_rxdma_regs_dump_channels: " 211 "NULL ring pointer")); 212 return; 213 } 214 if (rx_rbr_rings->rbr_rings == NULL) { 215 NXGE_DEBUG_MSG((nxgep, RX_CTL, 216 "<== nxge_rxdma_regs_dump_channels: " 217 " NULL rbr rings pointer")); 218 return; 219 } 220 221 ndmas = rx_rbr_rings->ndmas; 222 if (!ndmas) { 223 NXGE_DEBUG_MSG((nxgep, RX_CTL, 224 "<== nxge_rxdma_regs_dump_channels: no channel")); 225 return; 226 } 227 228 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 229 "==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas)); 230 231 rbr_rings = rx_rbr_rings->rbr_rings; 232 for (i = 0; i < ndmas; i++) { 233 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 234 continue; 235 } 236 channel = rbr_rings[i]->rdc; 237 (void) nxge_dump_rxdma_channel(nxgep, channel); 238 } 239 240 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 241 242 } 243 244 nxge_status_t 245 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 246 { 247 npi_handle_t handle; 248 npi_status_t rs = NPI_SUCCESS; 249 nxge_status_t status = NXGE_OK; 250 251 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 252 253 handle = NXGE_DEV_NPI_HANDLE(nxgep); 254 rs = npi_rxdma_dump_rdc_regs(handle, channel); 255 256 if (rs != NPI_SUCCESS) { 257 status = NXGE_ERROR | rs; 258 } 259 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 260 return (status); 261 } 262 263 nxge_status_t 264 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 265 p_rx_dma_ent_msk_t mask_p) 266 { 267 npi_handle_t handle; 268 npi_status_t rs = NPI_SUCCESS; 269 nxge_status_t status = NXGE_OK; 270 271 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 272 "<== nxge_init_rxdma_channel_event_mask")); 273 274 handle = NXGE_DEV_NPI_HANDLE(nxgep); 275 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 276 if (rs != NPI_SUCCESS) { 277 status = NXGE_ERROR | rs; 278 } 279 280 return (status); 281 } 282 283 nxge_status_t 284 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 285 p_rx_dma_ctl_stat_t cs_p) 286 { 287 npi_handle_t handle; 288 npi_status_t rs = NPI_SUCCESS; 289 nxge_status_t status = NXGE_OK; 290 291 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 292 "<== nxge_init_rxdma_channel_cntl_stat")); 293 294 handle = NXGE_DEV_NPI_HANDLE(nxgep); 295 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 296 297 if (rs != NPI_SUCCESS) { 298 status = NXGE_ERROR | rs; 299 } 300 301 return (status); 302 } 303 304 nxge_status_t 305 nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp, 306 uint8_t rdc) 307 { 308 npi_handle_t handle; 309 npi_status_t rs = NPI_SUCCESS; 310 p_nxge_dma_pt_cfg_t p_dma_cfgp; 311 p_nxge_rdc_grp_t rdc_grp_p; 312 uint8_t actual_rdcgrp, actual_rdc; 313 314 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 315 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 316 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 317 318 handle = NXGE_DEV_NPI_HANDLE(nxgep); 319 320 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 321 rdc_grp_p->rdc[0] = rdc; 322 323 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 324 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 325 326 rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp, 327 actual_rdc); 328 329 if (rs != NPI_SUCCESS) { 330 return (NXGE_ERROR | rs); 331 } 332 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 333 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 334 return (NXGE_OK); 335 } 336 337 nxge_status_t 338 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 339 { 340 npi_handle_t handle; 341 342 uint8_t actual_rdc; 343 npi_status_t rs = NPI_SUCCESS; 344 345 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 346 " ==> nxge_rxdma_cfg_port_default_rdc")); 347 348 handle = NXGE_DEV_NPI_HANDLE(nxgep); 349 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 350 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 351 352 353 if (rs != NPI_SUCCESS) { 354 return (NXGE_ERROR | rs); 355 } 356 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 357 " <== nxge_rxdma_cfg_port_default_rdc")); 358 359 return (NXGE_OK); 360 } 361 362 nxge_status_t 363 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 364 uint16_t pkts) 365 { 366 npi_status_t rs = NPI_SUCCESS; 367 npi_handle_t handle; 368 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 369 " ==> nxge_rxdma_cfg_rcr_threshold")); 370 handle = NXGE_DEV_NPI_HANDLE(nxgep); 371 372 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 373 374 if (rs != NPI_SUCCESS) { 375 return (NXGE_ERROR | rs); 376 } 377 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 378 return (NXGE_OK); 379 } 380 381 nxge_status_t 382 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 383 uint16_t tout, uint8_t enable) 384 { 385 npi_status_t rs = NPI_SUCCESS; 386 npi_handle_t handle; 387 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 388 handle = NXGE_DEV_NPI_HANDLE(nxgep); 389 if (enable == 0) { 390 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 391 } else { 392 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 393 tout); 394 } 395 396 if (rs != NPI_SUCCESS) { 397 return (NXGE_ERROR | rs); 398 } 399 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 400 return (NXGE_OK); 401 } 402 403 nxge_status_t 404 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 405 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 406 { 407 npi_handle_t handle; 408 rdc_desc_cfg_t rdc_desc; 409 p_rcrcfig_b_t cfgb_p; 410 npi_status_t rs = NPI_SUCCESS; 411 412 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 413 handle = NXGE_DEV_NPI_HANDLE(nxgep); 414 /* 415 * Use configuration data composed at init time. 416 * Write to hardware the receive ring configurations. 417 */ 418 rdc_desc.mbox_enable = 1; 419 rdc_desc.mbox_addr = mbox_p->mbox_addr; 420 NXGE_DEBUG_MSG((nxgep, RX_CTL, 421 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 422 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 423 424 rdc_desc.rbr_len = rbr_p->rbb_max; 425 rdc_desc.rbr_addr = rbr_p->rbr_addr; 426 427 switch (nxgep->rx_bksize_code) { 428 case RBR_BKSIZE_4K: 429 rdc_desc.page_size = SIZE_4KB; 430 break; 431 case RBR_BKSIZE_8K: 432 rdc_desc.page_size = SIZE_8KB; 433 break; 434 case RBR_BKSIZE_16K: 435 rdc_desc.page_size = SIZE_16KB; 436 break; 437 case RBR_BKSIZE_32K: 438 rdc_desc.page_size = SIZE_32KB; 439 break; 440 } 441 442 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 443 rdc_desc.valid0 = 1; 444 445 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 446 rdc_desc.valid1 = 1; 447 448 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 449 rdc_desc.valid2 = 1; 450 451 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 452 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 453 454 rdc_desc.rcr_len = rcr_p->comp_size; 455 rdc_desc.rcr_addr = rcr_p->rcr_addr; 456 457 cfgb_p = &(rcr_p->rcr_cfgb); 458 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 459 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 460 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 461 462 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 463 "rbr_len qlen %d pagesize code %d rcr_len %d", 464 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 466 "size 0 %d size 1 %d size 2 %d", 467 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 468 rbr_p->npi_pkt_buf_size2)); 469 470 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 471 if (rs != NPI_SUCCESS) { 472 return (NXGE_ERROR | rs); 473 } 474 475 /* 476 * Enable the timeout and threshold. 477 */ 478 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 479 rdc_desc.rcr_threshold); 480 if (rs != NPI_SUCCESS) { 481 return (NXGE_ERROR | rs); 482 } 483 484 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 485 rdc_desc.rcr_timeout); 486 if (rs != NPI_SUCCESS) { 487 return (NXGE_ERROR | rs); 488 } 489 490 /* Enable the DMA */ 491 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 492 if (rs != NPI_SUCCESS) { 493 return (NXGE_ERROR | rs); 494 } 495 496 /* Kick the DMA engine. */ 497 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 498 /* Clear the rbr empty bit */ 499 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 500 501 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 502 503 return (NXGE_OK); 504 } 505 506 nxge_status_t 507 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 508 { 509 npi_handle_t handle; 510 npi_status_t rs = NPI_SUCCESS; 511 512 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 513 handle = NXGE_DEV_NPI_HANDLE(nxgep); 514 515 /* disable the DMA */ 516 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 517 if (rs != NPI_SUCCESS) { 518 NXGE_DEBUG_MSG((nxgep, RX_CTL, 519 "<== nxge_disable_rxdma_channel:failed (0x%x)", 520 rs)); 521 return (NXGE_ERROR | rs); 522 } 523 524 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 525 return (NXGE_OK); 526 } 527 528 nxge_status_t 529 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 530 { 531 npi_handle_t handle; 532 nxge_status_t status = NXGE_OK; 533 534 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 535 "<== nxge_init_rxdma_channel_rcrflush")); 536 537 handle = NXGE_DEV_NPI_HANDLE(nxgep); 538 npi_rxdma_rdc_rcr_flush(handle, channel); 539 540 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 541 "<== nxge_init_rxdma_channel_rcrflsh")); 542 return (status); 543 544 } 545 546 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 547 548 #define TO_LEFT -1 549 #define TO_RIGHT 1 550 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 551 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 552 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 553 #define NO_HINT 0xffffffff 554 555 /*ARGSUSED*/ 556 nxge_status_t 557 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 558 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 559 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 560 { 561 int bufsize; 562 uint64_t pktbuf_pp; 563 uint64_t dvma_addr; 564 rxring_info_t *ring_info; 565 int base_side, end_side; 566 int r_index, l_index, anchor_index; 567 int found, search_done; 568 uint32_t offset, chunk_size, block_size, page_size_mask; 569 uint32_t chunk_index, block_index, total_index; 570 int max_iterations, iteration; 571 rxbuf_index_info_t *bufinfo; 572 573 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 574 575 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 576 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 577 pkt_buf_addr_pp, 578 pktbufsz_type)); 579 #if defined(__i386) 580 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 581 #else 582 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 583 #endif 584 585 switch (pktbufsz_type) { 586 case 0: 587 bufsize = rbr_p->pkt_buf_size0; 588 break; 589 case 1: 590 bufsize = rbr_p->pkt_buf_size1; 591 break; 592 case 2: 593 bufsize = rbr_p->pkt_buf_size2; 594 break; 595 case RCR_SINGLE_BLOCK: 596 bufsize = 0; 597 anchor_index = 0; 598 break; 599 default: 600 return (NXGE_ERROR); 601 } 602 603 if (rbr_p->num_blocks == 1) { 604 anchor_index = 0; 605 ring_info = rbr_p->ring_info; 606 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 607 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 608 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 609 "buf_pp $%p btype %d anchor_index %d " 610 "bufinfo $%p", 611 pkt_buf_addr_pp, 612 pktbufsz_type, 613 anchor_index, 614 bufinfo)); 615 616 goto found_index; 617 } 618 619 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 620 "==> nxge_rxbuf_pp_to_vp: " 621 "buf_pp $%p btype %d anchor_index %d", 622 pkt_buf_addr_pp, 623 pktbufsz_type, 624 anchor_index)); 625 626 ring_info = rbr_p->ring_info; 627 found = B_FALSE; 628 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 629 iteration = 0; 630 max_iterations = ring_info->max_iterations; 631 /* 632 * First check if this block has been seen 633 * recently. This is indicated by a hint which 634 * is initialized when the first buffer of the block 635 * is seen. The hint is reset when the last buffer of 636 * the block has been processed. 637 * As three block sizes are supported, three hints 638 * are kept. The idea behind the hints is that once 639 * the hardware uses a block for a buffer of that 640 * size, it will use it exclusively for that size 641 * and will use it until it is exhausted. It is assumed 642 * that there would a single block being used for the same 643 * buffer sizes at any given time. 644 */ 645 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 646 anchor_index = ring_info->hint[pktbufsz_type]; 647 dvma_addr = bufinfo[anchor_index].dvma_addr; 648 chunk_size = bufinfo[anchor_index].buf_size; 649 if ((pktbuf_pp >= dvma_addr) && 650 (pktbuf_pp < (dvma_addr + chunk_size))) { 651 found = B_TRUE; 652 /* 653 * check if this is the last buffer in the block 654 * If so, then reset the hint for the size; 655 */ 656 657 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 658 ring_info->hint[pktbufsz_type] = NO_HINT; 659 } 660 } 661 662 if (found == B_FALSE) { 663 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 664 "==> nxge_rxbuf_pp_to_vp: (!found)" 665 "buf_pp $%p btype %d anchor_index %d", 666 pkt_buf_addr_pp, 667 pktbufsz_type, 668 anchor_index)); 669 670 /* 671 * This is the first buffer of the block of this 672 * size. Need to search the whole information 673 * array. 674 * the search algorithm uses a binary tree search 675 * algorithm. It assumes that the information is 676 * already sorted with increasing order 677 * info[0] < info[1] < info[2] .... < info[n-1] 678 * where n is the size of the information array 679 */ 680 r_index = rbr_p->num_blocks - 1; 681 l_index = 0; 682 search_done = B_FALSE; 683 anchor_index = MID_INDEX(r_index, l_index); 684 while (search_done == B_FALSE) { 685 if ((r_index == l_index) || 686 (iteration >= max_iterations)) 687 search_done = B_TRUE; 688 end_side = TO_RIGHT; /* to the right */ 689 base_side = TO_LEFT; /* to the left */ 690 /* read the DVMA address information and sort it */ 691 dvma_addr = bufinfo[anchor_index].dvma_addr; 692 chunk_size = bufinfo[anchor_index].buf_size; 693 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 694 "==> nxge_rxbuf_pp_to_vp: (searching)" 695 "buf_pp $%p btype %d " 696 "anchor_index %d chunk_size %d dvmaaddr $%p", 697 pkt_buf_addr_pp, 698 pktbufsz_type, 699 anchor_index, 700 chunk_size, 701 dvma_addr)); 702 703 if (pktbuf_pp >= dvma_addr) 704 base_side = TO_RIGHT; /* to the right */ 705 if (pktbuf_pp < (dvma_addr + chunk_size)) 706 end_side = TO_LEFT; /* to the left */ 707 708 switch (base_side + end_side) { 709 case IN_MIDDLE: 710 /* found */ 711 found = B_TRUE; 712 search_done = B_TRUE; 713 if ((pktbuf_pp + bufsize) < 714 (dvma_addr + chunk_size)) 715 ring_info->hint[pktbufsz_type] = 716 bufinfo[anchor_index].buf_index; 717 break; 718 case BOTH_RIGHT: 719 /* not found: go to the right */ 720 l_index = anchor_index + 1; 721 anchor_index = 722 MID_INDEX(r_index, l_index); 723 break; 724 725 case BOTH_LEFT: 726 /* not found: go to the left */ 727 r_index = anchor_index - 1; 728 anchor_index = MID_INDEX(r_index, 729 l_index); 730 break; 731 default: /* should not come here */ 732 return (NXGE_ERROR); 733 } 734 iteration++; 735 } 736 737 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 738 "==> nxge_rxbuf_pp_to_vp: (search done)" 739 "buf_pp $%p btype %d anchor_index %d", 740 pkt_buf_addr_pp, 741 pktbufsz_type, 742 anchor_index)); 743 } 744 745 if (found == B_FALSE) { 746 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 747 "==> nxge_rxbuf_pp_to_vp: (search failed)" 748 "buf_pp $%p btype %d anchor_index %d", 749 pkt_buf_addr_pp, 750 pktbufsz_type, 751 anchor_index)); 752 return (NXGE_ERROR); 753 } 754 755 found_index: 756 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 757 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 758 "buf_pp $%p btype %d bufsize %d anchor_index %d", 759 pkt_buf_addr_pp, 760 pktbufsz_type, 761 bufsize, 762 anchor_index)); 763 764 /* index of the first block in this chunk */ 765 chunk_index = bufinfo[anchor_index].start_index; 766 dvma_addr = bufinfo[anchor_index].dvma_addr; 767 page_size_mask = ring_info->block_size_mask; 768 769 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 770 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 771 "buf_pp $%p btype %d bufsize %d " 772 "anchor_index %d chunk_index %d dvma $%p", 773 pkt_buf_addr_pp, 774 pktbufsz_type, 775 bufsize, 776 anchor_index, 777 chunk_index, 778 dvma_addr)); 779 780 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 781 block_size = rbr_p->block_size; /* System block(page) size */ 782 783 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 784 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 785 "buf_pp $%p btype %d bufsize %d " 786 "anchor_index %d chunk_index %d dvma $%p " 787 "offset %d block_size %d", 788 pkt_buf_addr_pp, 789 pktbufsz_type, 790 bufsize, 791 anchor_index, 792 chunk_index, 793 dvma_addr, 794 offset, 795 block_size)); 796 797 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 798 799 block_index = (offset / block_size); /* index within chunk */ 800 total_index = chunk_index + block_index; 801 802 803 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 804 "==> nxge_rxbuf_pp_to_vp: " 805 "total_index %d dvma_addr $%p " 806 "offset %d block_size %d " 807 "block_index %d ", 808 total_index, dvma_addr, 809 offset, block_size, 810 block_index)); 811 #if defined(__i386) 812 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 813 (uint32_t)offset); 814 #else 815 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 816 (uint64_t)offset); 817 #endif 818 819 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 820 "==> nxge_rxbuf_pp_to_vp: " 821 "total_index %d dvma_addr $%p " 822 "offset %d block_size %d " 823 "block_index %d " 824 "*pkt_buf_addr_p $%p", 825 total_index, dvma_addr, 826 offset, block_size, 827 block_index, 828 *pkt_buf_addr_p)); 829 830 831 *msg_index = total_index; 832 *bufoffset = (offset & page_size_mask); 833 834 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 835 "==> nxge_rxbuf_pp_to_vp: get msg index: " 836 "msg_index %d bufoffset_index %d", 837 *msg_index, 838 *bufoffset)); 839 840 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 841 842 return (NXGE_OK); 843 } 844 845 /* 846 * used by quick sort (qsort) function 847 * to perform comparison 848 */ 849 static int 850 nxge_sort_compare(const void *p1, const void *p2) 851 { 852 853 rxbuf_index_info_t *a, *b; 854 855 a = (rxbuf_index_info_t *)p1; 856 b = (rxbuf_index_info_t *)p2; 857 858 if (a->dvma_addr > b->dvma_addr) 859 return (1); 860 if (a->dvma_addr < b->dvma_addr) 861 return (-1); 862 return (0); 863 } 864 865 866 867 /* 868 * grabbed this sort implementation from common/syscall/avl.c 869 * 870 */ 871 /* 872 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 873 * v = Ptr to array/vector of objs 874 * n = # objs in the array 875 * s = size of each obj (must be multiples of a word size) 876 * f = ptr to function to compare two objs 877 * returns (-1 = less than, 0 = equal, 1 = greater than 878 */ 879 void 880 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 881 { 882 int g, i, j, ii; 883 unsigned int *p1, *p2; 884 unsigned int tmp; 885 886 /* No work to do */ 887 if (v == NULL || n <= 1) 888 return; 889 /* Sanity check on arguments */ 890 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 891 ASSERT(s > 0); 892 893 for (g = n / 2; g > 0; g /= 2) { 894 for (i = g; i < n; i++) { 895 for (j = i - g; j >= 0 && 896 (*f)(v + j * s, v + (j + g) * s) == 1; 897 j -= g) { 898 p1 = (unsigned *)(v + j * s); 899 p2 = (unsigned *)(v + (j + g) * s); 900 for (ii = 0; ii < s / 4; ii++) { 901 tmp = *p1; 902 *p1++ = *p2; 903 *p2++ = tmp; 904 } 905 } 906 } 907 } 908 } 909 910 /* 911 * Initialize data structures required for rxdma 912 * buffer dvma->vmem address lookup 913 */ 914 /*ARGSUSED*/ 915 static nxge_status_t 916 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 917 { 918 919 int index; 920 rxring_info_t *ring_info; 921 int max_iteration = 0, max_index = 0; 922 923 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 924 925 ring_info = rbrp->ring_info; 926 ring_info->hint[0] = NO_HINT; 927 ring_info->hint[1] = NO_HINT; 928 ring_info->hint[2] = NO_HINT; 929 max_index = rbrp->num_blocks; 930 931 /* read the DVMA address information and sort it */ 932 /* do init of the information array */ 933 934 935 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 936 " nxge_rxbuf_index_info_init Sort ptrs")); 937 938 /* sort the array */ 939 nxge_ksort((void *)ring_info->buffer, max_index, 940 sizeof (rxbuf_index_info_t), nxge_sort_compare); 941 942 943 944 for (index = 0; index < max_index; index++) { 945 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 946 " nxge_rxbuf_index_info_init: sorted chunk %d " 947 " ioaddr $%p kaddr $%p size %x", 948 index, ring_info->buffer[index].dvma_addr, 949 ring_info->buffer[index].kaddr, 950 ring_info->buffer[index].buf_size)); 951 } 952 953 max_iteration = 0; 954 while (max_index >= (1ULL << max_iteration)) 955 max_iteration++; 956 ring_info->max_iterations = max_iteration + 1; 957 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 958 " nxge_rxbuf_index_info_init Find max iter %d", 959 ring_info->max_iterations)); 960 961 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 962 return (NXGE_OK); 963 } 964 965 /* ARGSUSED */ 966 void 967 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 968 { 969 #ifdef NXGE_DEBUG 970 971 uint32_t bptr; 972 uint64_t pp; 973 974 bptr = entry_p->bits.hdw.pkt_buf_addr; 975 976 NXGE_DEBUG_MSG((nxgep, RX_CTL, 977 "\trcr entry $%p " 978 "\trcr entry 0x%0llx " 979 "\trcr entry 0x%08x " 980 "\trcr entry 0x%08x " 981 "\tvalue 0x%0llx\n" 982 "\tmulti = %d\n" 983 "\tpkt_type = 0x%x\n" 984 "\tzero_copy = %d\n" 985 "\tnoport = %d\n" 986 "\tpromis = %d\n" 987 "\terror = 0x%04x\n" 988 "\tdcf_err = 0x%01x\n" 989 "\tl2_len = %d\n" 990 "\tpktbufsize = %d\n" 991 "\tpkt_buf_addr = $%p\n" 992 "\tpkt_buf_addr (<< 6) = $%p\n", 993 entry_p, 994 *(int64_t *)entry_p, 995 *(int32_t *)entry_p, 996 *(int32_t *)((char *)entry_p + 32), 997 entry_p->value, 998 entry_p->bits.hdw.multi, 999 entry_p->bits.hdw.pkt_type, 1000 entry_p->bits.hdw.zero_copy, 1001 entry_p->bits.hdw.noport, 1002 entry_p->bits.hdw.promis, 1003 entry_p->bits.hdw.error, 1004 entry_p->bits.hdw.dcf_err, 1005 entry_p->bits.hdw.l2_len, 1006 entry_p->bits.hdw.pktbufsz, 1007 bptr, 1008 entry_p->bits.ldw.pkt_buf_addr)); 1009 1010 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1011 RCR_PKT_BUF_ADDR_SHIFT; 1012 1013 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1014 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1015 #endif 1016 } 1017 1018 void 1019 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1020 { 1021 npi_handle_t handle; 1022 rbr_stat_t rbr_stat; 1023 addr44_t hd_addr; 1024 addr44_t tail_addr; 1025 uint16_t qlen; 1026 1027 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1028 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1029 1030 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1031 1032 /* RBR head */ 1033 hd_addr.addr = 0; 1034 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1035 #if defined(__i386) 1036 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1037 (void *)(uint32_t)hd_addr.addr); 1038 #else 1039 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1040 (void *)hd_addr.addr); 1041 #endif 1042 1043 /* RBR stats */ 1044 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1045 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1046 1047 /* RCR tail */ 1048 tail_addr.addr = 0; 1049 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1050 #if defined(__i386) 1051 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1052 (void *)(uint32_t)tail_addr.addr); 1053 #else 1054 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1055 (void *)tail_addr.addr); 1056 #endif 1057 1058 /* RCR qlen */ 1059 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1060 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1061 1062 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1063 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1064 } 1065 1066 void 1067 nxge_rxdma_stop(p_nxge_t nxgep) 1068 { 1069 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1070 1071 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1072 (void) nxge_rx_mac_disable(nxgep); 1073 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1074 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1075 } 1076 1077 void 1078 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1079 { 1080 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1081 1082 (void) nxge_rxdma_stop(nxgep); 1083 (void) nxge_uninit_rxdma_channels(nxgep); 1084 (void) nxge_init_rxdma_channels(nxgep); 1085 1086 #ifndef AXIS_DEBUG_LB 1087 (void) nxge_xcvr_init(nxgep); 1088 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1089 #endif 1090 (void) nxge_rx_mac_enable(nxgep); 1091 1092 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1093 } 1094 1095 nxge_status_t 1096 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1097 { 1098 int i, ndmas; 1099 uint16_t channel; 1100 p_rx_rbr_rings_t rx_rbr_rings; 1101 p_rx_rbr_ring_t *rbr_rings; 1102 npi_handle_t handle; 1103 npi_status_t rs = NPI_SUCCESS; 1104 nxge_status_t status = NXGE_OK; 1105 1106 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1107 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1108 1109 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1110 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1111 "<== nxge_rxdma_mode: not initialized")); 1112 return (NXGE_ERROR); 1113 } 1114 1115 rx_rbr_rings = nxgep->rx_rbr_rings; 1116 if (rx_rbr_rings == NULL) { 1117 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1118 "<== nxge_rxdma_mode: NULL ring pointer")); 1119 return (NXGE_ERROR); 1120 } 1121 if (rx_rbr_rings->rbr_rings == NULL) { 1122 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1123 "<== nxge_rxdma_mode: NULL rbr rings pointer")); 1124 return (NXGE_ERROR); 1125 } 1126 1127 ndmas = rx_rbr_rings->ndmas; 1128 if (!ndmas) { 1129 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1130 "<== nxge_rxdma_mode: no channel")); 1131 return (NXGE_ERROR); 1132 } 1133 1134 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1135 "==> nxge_rxdma_mode (ndmas %d)", ndmas)); 1136 1137 rbr_rings = rx_rbr_rings->rbr_rings; 1138 1139 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1140 for (i = 0; i < ndmas; i++) { 1141 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 1142 continue; 1143 } 1144 channel = rbr_rings[i]->rdc; 1145 if (enable) { 1146 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1147 "==> nxge_rxdma_hw_mode: channel %d (enable)", 1148 channel)); 1149 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 1150 } else { 1151 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1152 "==> nxge_rxdma_hw_mode: channel %d (disable)", 1153 channel)); 1154 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 1155 } 1156 } 1157 1158 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1159 1160 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1161 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1162 1163 return (status); 1164 } 1165 1166 void 1167 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1168 { 1169 npi_handle_t handle; 1170 1171 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1172 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1173 1174 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1175 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1176 1177 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1178 } 1179 1180 void 1181 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1182 { 1183 npi_handle_t handle; 1184 1185 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1186 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1187 1188 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1189 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1190 1191 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1192 } 1193 1194 void 1195 nxge_hw_start_rx(p_nxge_t nxgep) 1196 { 1197 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1198 1199 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1200 (void) nxge_rx_mac_enable(nxgep); 1201 1202 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1203 } 1204 1205 /*ARGSUSED*/ 1206 void 1207 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1208 { 1209 int i, ndmas; 1210 uint16_t rdc; 1211 p_rx_rbr_rings_t rx_rbr_rings; 1212 p_rx_rbr_ring_t *rbr_rings; 1213 p_rx_rcr_rings_t rx_rcr_rings; 1214 1215 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1216 1217 rx_rbr_rings = nxgep->rx_rbr_rings; 1218 if (rx_rbr_rings == NULL) { 1219 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1220 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1221 return; 1222 } 1223 ndmas = rx_rbr_rings->ndmas; 1224 if (!ndmas) { 1225 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1226 "<== nxge_fixup_rxdma_rings: no channel")); 1227 return; 1228 } 1229 1230 rx_rcr_rings = nxgep->rx_rcr_rings; 1231 if (rx_rcr_rings == NULL) { 1232 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1233 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1234 return; 1235 } 1236 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1237 "==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas)); 1238 1239 nxge_rxdma_hw_stop(nxgep); 1240 1241 rbr_rings = rx_rbr_rings->rbr_rings; 1242 for (i = 0; i < ndmas; i++) { 1243 rdc = rbr_rings[i]->rdc; 1244 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1245 "==> nxge_fixup_rxdma_rings: channel %d " 1246 "ring $%px", rdc, rbr_rings[i])); 1247 (void) nxge_rxdma_fixup_channel(nxgep, rdc, i); 1248 } 1249 1250 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1251 } 1252 1253 void 1254 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1255 { 1256 int i; 1257 1258 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1259 i = nxge_rxdma_get_ring_index(nxgep, channel); 1260 if (i < 0) { 1261 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1262 "<== nxge_rxdma_fix_channel: no entry found")); 1263 return; 1264 } 1265 1266 nxge_rxdma_fixup_channel(nxgep, channel, i); 1267 1268 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel")); 1269 } 1270 1271 void 1272 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1273 { 1274 int ndmas; 1275 p_rx_rbr_rings_t rx_rbr_rings; 1276 p_rx_rbr_ring_t *rbr_rings; 1277 p_rx_rcr_rings_t rx_rcr_rings; 1278 p_rx_rcr_ring_t *rcr_rings; 1279 p_rx_mbox_areas_t rx_mbox_areas_p; 1280 p_rx_mbox_t *rx_mbox_p; 1281 p_nxge_dma_pool_t dma_buf_poolp; 1282 p_nxge_dma_pool_t dma_cntl_poolp; 1283 p_rx_rbr_ring_t rbrp; 1284 p_rx_rcr_ring_t rcrp; 1285 p_rx_mbox_t mboxp; 1286 p_nxge_dma_common_t dmap; 1287 nxge_status_t status = NXGE_OK; 1288 1289 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1290 1291 (void) nxge_rxdma_stop_channel(nxgep, channel); 1292 1293 dma_buf_poolp = nxgep->rx_buf_pool_p; 1294 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1295 1296 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1297 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1298 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1299 return; 1300 } 1301 1302 ndmas = dma_buf_poolp->ndmas; 1303 if (!ndmas) { 1304 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1305 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1306 return; 1307 } 1308 1309 rx_rbr_rings = nxgep->rx_rbr_rings; 1310 rx_rcr_rings = nxgep->rx_rcr_rings; 1311 rbr_rings = rx_rbr_rings->rbr_rings; 1312 rcr_rings = rx_rcr_rings->rcr_rings; 1313 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1314 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1315 1316 /* Reinitialize the receive block and completion rings */ 1317 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1318 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1319 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1320 1321 1322 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1323 rbrp->rbr_rd_index = 0; 1324 rcrp->comp_rd_index = 0; 1325 rcrp->comp_wt_index = 0; 1326 1327 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1328 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1329 1330 status = nxge_rxdma_start_channel(nxgep, channel, 1331 rbrp, rcrp, mboxp); 1332 if (status != NXGE_OK) { 1333 goto nxge_rxdma_fixup_channel_fail; 1334 } 1335 if (status != NXGE_OK) { 1336 goto nxge_rxdma_fixup_channel_fail; 1337 } 1338 1339 nxge_rxdma_fixup_channel_fail: 1340 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1341 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1342 1343 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1344 } 1345 1346 int 1347 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1348 { 1349 int i, ndmas; 1350 uint16_t rdc; 1351 p_rx_rbr_rings_t rx_rbr_rings; 1352 p_rx_rbr_ring_t *rbr_rings; 1353 1354 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1355 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1356 1357 rx_rbr_rings = nxgep->rx_rbr_rings; 1358 if (rx_rbr_rings == NULL) { 1359 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1360 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1361 return (-1); 1362 } 1363 ndmas = rx_rbr_rings->ndmas; 1364 if (!ndmas) { 1365 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1366 "<== nxge_rxdma_get_ring_index: no channel")); 1367 return (-1); 1368 } 1369 1370 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1371 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1372 1373 rbr_rings = rx_rbr_rings->rbr_rings; 1374 for (i = 0; i < ndmas; i++) { 1375 rdc = rbr_rings[i]->rdc; 1376 if (channel == rdc) { 1377 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1378 "==> nxge_rxdma_get_rbr_ring: " 1379 "channel %d (index %d) " 1380 "ring %d", channel, i, 1381 rbr_rings[i])); 1382 return (i); 1383 } 1384 } 1385 1386 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1387 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1388 1389 return (-1); 1390 } 1391 1392 p_rx_rbr_ring_t 1393 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1394 { 1395 int i, ndmas; 1396 uint16_t rdc; 1397 p_rx_rbr_rings_t rx_rbr_rings; 1398 p_rx_rbr_ring_t *rbr_rings; 1399 1400 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1401 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1402 1403 rx_rbr_rings = nxgep->rx_rbr_rings; 1404 if (rx_rbr_rings == NULL) { 1405 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1406 "<== nxge_rxdma_get_rbr_ring: NULL ring pointer")); 1407 return (NULL); 1408 } 1409 ndmas = rx_rbr_rings->ndmas; 1410 if (!ndmas) { 1411 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1412 "<== nxge_rxdma_get_rbr_ring: no channel")); 1413 return (NULL); 1414 } 1415 1416 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1417 "==> nxge_rxdma_get_ring (ndmas %d)", ndmas)); 1418 1419 rbr_rings = rx_rbr_rings->rbr_rings; 1420 for (i = 0; i < ndmas; i++) { 1421 rdc = rbr_rings[i]->rdc; 1422 if (channel == rdc) { 1423 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1424 "==> nxge_rxdma_get_rbr_ring: channel %d " 1425 "ring $%p", channel, rbr_rings[i])); 1426 return (rbr_rings[i]); 1427 } 1428 } 1429 1430 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1431 "<== nxge_rxdma_get_rbr_ring: not found")); 1432 1433 return (NULL); 1434 } 1435 1436 p_rx_rcr_ring_t 1437 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1438 { 1439 int i, ndmas; 1440 uint16_t rdc; 1441 p_rx_rcr_rings_t rx_rcr_rings; 1442 p_rx_rcr_ring_t *rcr_rings; 1443 1444 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1445 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1446 1447 rx_rcr_rings = nxgep->rx_rcr_rings; 1448 if (rx_rcr_rings == NULL) { 1449 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1450 "<== nxge_rxdma_get_rcr_ring: NULL ring pointer")); 1451 return (NULL); 1452 } 1453 ndmas = rx_rcr_rings->ndmas; 1454 if (!ndmas) { 1455 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1456 "<== nxge_rxdma_get_rcr_ring: no channel")); 1457 return (NULL); 1458 } 1459 1460 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1461 "==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas)); 1462 1463 rcr_rings = rx_rcr_rings->rcr_rings; 1464 for (i = 0; i < ndmas; i++) { 1465 rdc = rcr_rings[i]->rdc; 1466 if (channel == rdc) { 1467 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1468 "==> nxge_rxdma_get_rcr_ring: channel %d " 1469 "ring $%p", channel, rcr_rings[i])); 1470 return (rcr_rings[i]); 1471 } 1472 } 1473 1474 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1475 "<== nxge_rxdma_get_rcr_ring: not found")); 1476 1477 return (NULL); 1478 } 1479 1480 /* 1481 * Static functions start here. 1482 */ 1483 static p_rx_msg_t 1484 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1485 { 1486 p_rx_msg_t nxge_mp = NULL; 1487 p_nxge_dma_common_t dmamsg_p; 1488 uchar_t *buffer; 1489 1490 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1491 if (nxge_mp == NULL) { 1492 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1493 "Allocation of a rx msg failed.")); 1494 goto nxge_allocb_exit; 1495 } 1496 1497 nxge_mp->use_buf_pool = B_FALSE; 1498 if (dmabuf_p) { 1499 nxge_mp->use_buf_pool = B_TRUE; 1500 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1501 *dmamsg_p = *dmabuf_p; 1502 dmamsg_p->nblocks = 1; 1503 dmamsg_p->block_size = size; 1504 dmamsg_p->alength = size; 1505 buffer = (uchar_t *)dmabuf_p->kaddrp; 1506 1507 dmabuf_p->kaddrp = (void *) 1508 ((char *)dmabuf_p->kaddrp + size); 1509 dmabuf_p->ioaddr_pp = (void *) 1510 ((char *)dmabuf_p->ioaddr_pp + size); 1511 dmabuf_p->alength -= size; 1512 dmabuf_p->offset += size; 1513 dmabuf_p->dma_cookie.dmac_laddress += size; 1514 dmabuf_p->dma_cookie.dmac_size -= size; 1515 1516 } else { 1517 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1518 if (buffer == NULL) { 1519 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1520 "Allocation of a receive page failed.")); 1521 goto nxge_allocb_fail1; 1522 } 1523 } 1524 1525 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1526 if (nxge_mp->rx_mblk_p == NULL) { 1527 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1528 goto nxge_allocb_fail2; 1529 } 1530 1531 nxge_mp->buffer = buffer; 1532 nxge_mp->block_size = size; 1533 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1534 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1535 nxge_mp->ref_cnt = 1; 1536 nxge_mp->free = B_TRUE; 1537 nxge_mp->rx_use_bcopy = B_FALSE; 1538 1539 atomic_inc_32(&nxge_mblks_pending); 1540 1541 goto nxge_allocb_exit; 1542 1543 nxge_allocb_fail2: 1544 if (!nxge_mp->use_buf_pool) { 1545 KMEM_FREE(buffer, size); 1546 } 1547 1548 nxge_allocb_fail1: 1549 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1550 nxge_mp = NULL; 1551 1552 nxge_allocb_exit: 1553 return (nxge_mp); 1554 } 1555 1556 p_mblk_t 1557 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1558 { 1559 p_mblk_t mp; 1560 1561 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1562 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1563 "offset = 0x%08X " 1564 "size = 0x%08X", 1565 nxge_mp, offset, size)); 1566 1567 mp = desballoc(&nxge_mp->buffer[offset], size, 1568 0, &nxge_mp->freeb); 1569 if (mp == NULL) { 1570 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1571 goto nxge_dupb_exit; 1572 } 1573 atomic_inc_32(&nxge_mp->ref_cnt); 1574 1575 1576 nxge_dupb_exit: 1577 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1578 nxge_mp)); 1579 return (mp); 1580 } 1581 1582 p_mblk_t 1583 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1584 { 1585 p_mblk_t mp; 1586 uchar_t *dp; 1587 1588 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1589 if (mp == NULL) { 1590 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1591 goto nxge_dupb_bcopy_exit; 1592 } 1593 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1594 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1595 mp->b_wptr = dp + size; 1596 1597 nxge_dupb_bcopy_exit: 1598 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1599 nxge_mp)); 1600 return (mp); 1601 } 1602 1603 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1604 p_rx_msg_t rx_msg_p); 1605 1606 void 1607 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1608 { 1609 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1610 1611 /* Reuse this buffer */ 1612 rx_msg_p->free = B_FALSE; 1613 rx_msg_p->cur_usage_cnt = 0; 1614 rx_msg_p->max_usage_cnt = 0; 1615 rx_msg_p->pkt_buf_size = 0; 1616 1617 if (rx_rbr_p->rbr_use_bcopy) { 1618 rx_msg_p->rx_use_bcopy = B_FALSE; 1619 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1620 } 1621 1622 /* 1623 * Get the rbr header pointer and its offset index. 1624 */ 1625 MUTEX_ENTER(&rx_rbr_p->post_lock); 1626 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1627 rx_rbr_p->rbr_wrap_mask); 1628 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1629 MUTEX_EXIT(&rx_rbr_p->post_lock); 1630 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 1631 rx_rbr_p->rdc, 1); 1632 1633 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1634 "<== nxge_post_page (channel %d post_next_index %d)", 1635 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1636 1637 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1638 } 1639 1640 void 1641 nxge_freeb(p_rx_msg_t rx_msg_p) 1642 { 1643 size_t size; 1644 uchar_t *buffer = NULL; 1645 int ref_cnt; 1646 boolean_t free_state = B_FALSE; 1647 1648 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1649 1650 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1651 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1652 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1653 rx_msg_p, nxge_mblks_pending)); 1654 1655 /* 1656 * First we need to get the free state, then 1657 * atomic decrement the reference count to prevent 1658 * the race condition with the interrupt thread that 1659 * is processing a loaned up buffer block. 1660 */ 1661 free_state = rx_msg_p->free; 1662 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1663 if (!ref_cnt) { 1664 atomic_dec_32(&nxge_mblks_pending); 1665 buffer = rx_msg_p->buffer; 1666 size = rx_msg_p->block_size; 1667 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1668 "will free: rx_msg_p = $%p (block pending %d)", 1669 rx_msg_p, nxge_mblks_pending)); 1670 1671 if (!rx_msg_p->use_buf_pool) { 1672 KMEM_FREE(buffer, size); 1673 } 1674 1675 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1676 1677 if (ring) { 1678 /* 1679 * Decrement the receive buffer ring's reference 1680 * count, too. 1681 */ 1682 atomic_dec_32(&ring->rbr_ref_cnt); 1683 1684 /* 1685 * Free the receive buffer ring, iff 1686 * 1. all the receive buffers have been freed 1687 * 2. and we are in the proper state (that is, 1688 * we are not UNMAPPING). 1689 */ 1690 if (ring->rbr_ref_cnt == 0 && 1691 ring->rbr_state == RBR_UNMAPPED) { 1692 KMEM_FREE(ring, sizeof (*ring)); 1693 } 1694 } 1695 return; 1696 } 1697 1698 /* 1699 * Repost buffer. 1700 */ 1701 if (free_state && (ref_cnt == 1) && ring) { 1702 NXGE_DEBUG_MSG((NULL, RX_CTL, 1703 "nxge_freeb: post page $%p:", rx_msg_p)); 1704 if (ring->rbr_state == RBR_POSTING) 1705 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1706 } 1707 1708 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1709 } 1710 1711 uint_t 1712 nxge_rx_intr(void *arg1, void *arg2) 1713 { 1714 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1715 p_nxge_t nxgep = (p_nxge_t)arg2; 1716 p_nxge_ldg_t ldgp; 1717 uint8_t channel; 1718 npi_handle_t handle; 1719 rx_dma_ctl_stat_t cs; 1720 1721 #ifdef NXGE_DEBUG 1722 rxdma_cfig1_t cfg; 1723 #endif 1724 uint_t serviced = DDI_INTR_UNCLAIMED; 1725 1726 if (ldvp == NULL) { 1727 NXGE_DEBUG_MSG((NULL, INT_CTL, 1728 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1729 nxgep, ldvp)); 1730 1731 return (DDI_INTR_CLAIMED); 1732 } 1733 1734 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1735 nxgep = ldvp->nxgep; 1736 } 1737 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1738 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1739 nxgep, ldvp)); 1740 1741 /* 1742 * This interrupt handler is for a specific 1743 * receive dma channel. 1744 */ 1745 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1746 /* 1747 * Get the control and status for this channel. 1748 */ 1749 channel = ldvp->channel; 1750 ldgp = ldvp->ldgp; 1751 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1752 1753 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1754 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1755 channel, 1756 cs.value, 1757 cs.bits.hdw.rcrto, 1758 cs.bits.hdw.rcrthres)); 1759 1760 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs); 1761 serviced = DDI_INTR_CLAIMED; 1762 1763 /* error events. */ 1764 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1765 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1766 } 1767 1768 nxge_intr_exit: 1769 1770 1771 /* 1772 * Enable the mailbox update interrupt if we want 1773 * to use mailbox. We probably don't need to use 1774 * mailbox as it only saves us one pio read. 1775 * Also write 1 to rcrthres and rcrto to clear 1776 * these two edge triggered bits. 1777 */ 1778 1779 cs.value &= RX_DMA_CTL_STAT_WR1C; 1780 cs.bits.hdw.mex = 1; 1781 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1782 cs.value); 1783 1784 /* 1785 * Rearm this logical group if this is a single device 1786 * group. 1787 */ 1788 if (ldgp->nldvs == 1) { 1789 ldgimgm_t mgm; 1790 mgm.value = 0; 1791 mgm.bits.ldw.arm = 1; 1792 mgm.bits.ldw.timer = ldgp->ldg_timer; 1793 NXGE_REG_WR64(handle, 1794 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1795 mgm.value); 1796 } 1797 1798 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1799 serviced)); 1800 return (serviced); 1801 } 1802 1803 /* 1804 * Process the packets received in the specified logical device 1805 * and pass up a chain of message blocks to the upper layer. 1806 */ 1807 static void 1808 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1809 rx_dma_ctl_stat_t cs) 1810 { 1811 p_mblk_t mp; 1812 p_rx_rcr_ring_t rcrp; 1813 1814 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1815 if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1816 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1817 "<== nxge_rx_pkts_vring: no mp")); 1818 return; 1819 } 1820 1821 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1822 mp)); 1823 1824 #ifdef NXGE_DEBUG 1825 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1826 "==> nxge_rx_pkts_vring:calling mac_rx " 1827 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1828 "mac_handle $%p", 1829 mp->b_wptr - mp->b_rptr, 1830 mp, mp->b_cont, mp->b_next, 1831 rcrp, rcrp->rcr_mac_handle)); 1832 1833 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1834 "==> nxge_rx_pkts_vring: dump packets " 1835 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1836 mp, 1837 mp->b_rptr, 1838 mp->b_wptr, 1839 nxge_dump_packet((char *)mp->b_rptr, 1840 mp->b_wptr - mp->b_rptr))); 1841 if (mp->b_cont) { 1842 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1843 "==> nxge_rx_pkts_vring: dump b_cont packets " 1844 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1845 mp->b_cont, 1846 mp->b_cont->b_rptr, 1847 mp->b_cont->b_wptr, 1848 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1849 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1850 } 1851 if (mp->b_next) { 1852 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1853 "==> nxge_rx_pkts_vring: dump next packets " 1854 "(b_rptr $%p): %s", 1855 mp->b_next->b_rptr, 1856 nxge_dump_packet((char *)mp->b_next->b_rptr, 1857 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1858 } 1859 #endif 1860 1861 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1862 } 1863 1864 1865 /* 1866 * This routine is the main packet receive processing function. 1867 * It gets the packet type, error code, and buffer related 1868 * information from the receive completion entry. 1869 * How many completion entries to process is based on the number of packets 1870 * queued by the hardware, a hardware maintained tail pointer 1871 * and a configurable receive packet count. 1872 * 1873 * A chain of message blocks will be created as result of processing 1874 * the completion entries. This chain of message blocks will be returned and 1875 * a hardware control status register will be updated with the number of 1876 * packets were removed from the hardware queue. 1877 * 1878 */ 1879 mblk_t * 1880 nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1881 p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs) 1882 { 1883 npi_handle_t handle; 1884 uint8_t channel; 1885 p_rx_rcr_rings_t rx_rcr_rings; 1886 p_rx_rcr_ring_t rcr_p; 1887 uint32_t comp_rd_index; 1888 p_rcr_entry_t rcr_desc_rd_head_p; 1889 p_rcr_entry_t rcr_desc_rd_head_pp; 1890 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1891 uint16_t qlen, nrcr_read, npkt_read; 1892 uint32_t qlen_hw; 1893 boolean_t multi; 1894 rcrcfig_b_t rcr_cfg_b; 1895 #if defined(_BIG_ENDIAN) 1896 npi_status_t rs = NPI_SUCCESS; 1897 #endif 1898 1899 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d " 1900 "channel %d", vindex, ldvp->channel)); 1901 1902 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1903 return (NULL); 1904 } 1905 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1906 rx_rcr_rings = nxgep->rx_rcr_rings; 1907 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1908 channel = rcr_p->rdc; 1909 if (channel != ldvp->channel) { 1910 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1911 "channel %d, and rcr channel %d not matched.", 1912 vindex, ldvp->channel, channel)); 1913 return (NULL); 1914 } 1915 1916 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1917 "==> nxge_rx_pkts: START: rcr channel %d " 1918 "head_p $%p head_pp $%p index %d ", 1919 channel, rcr_p->rcr_desc_rd_head_p, 1920 rcr_p->rcr_desc_rd_head_pp, 1921 rcr_p->comp_rd_index)); 1922 1923 1924 #if !defined(_BIG_ENDIAN) 1925 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 1926 #else 1927 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1928 if (rs != NPI_SUCCESS) { 1929 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1930 "channel %d, get qlen failed 0x%08x", 1931 vindex, ldvp->channel, rs)); 1932 return (NULL); 1933 } 1934 #endif 1935 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 1936 "qlen %d", channel, qlen)); 1937 1938 1939 1940 if (!qlen) { 1941 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1942 "==> nxge_rx_pkts:rcr channel %d " 1943 "qlen %d (no pkts)", channel, qlen)); 1944 1945 return (NULL); 1946 } 1947 1948 comp_rd_index = rcr_p->comp_rd_index; 1949 1950 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1951 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1952 nrcr_read = npkt_read = 0; 1953 1954 /* 1955 * Number of packets queued 1956 * (The jumbo or multi packet will be counted as only one 1957 * packets and it may take up more than one completion entry). 1958 */ 1959 qlen_hw = (qlen < nxge_max_rx_pkts) ? 1960 qlen : nxge_max_rx_pkts; 1961 head_mp = NULL; 1962 tail_mp = &head_mp; 1963 nmp = mp_cont = NULL; 1964 multi = B_FALSE; 1965 1966 while (qlen_hw) { 1967 1968 #ifdef NXGE_DEBUG 1969 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 1970 #endif 1971 /* 1972 * Process one completion ring entry. 1973 */ 1974 nxge_receive_packet(nxgep, 1975 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 1976 1977 /* 1978 * message chaining modes 1979 */ 1980 if (nmp) { 1981 nmp->b_next = NULL; 1982 if (!multi && !mp_cont) { /* frame fits a partition */ 1983 *tail_mp = nmp; 1984 tail_mp = &nmp->b_next; 1985 nmp = NULL; 1986 } else if (multi && !mp_cont) { /* first segment */ 1987 *tail_mp = nmp; 1988 tail_mp = &nmp->b_cont; 1989 } else if (multi && mp_cont) { /* mid of multi segs */ 1990 *tail_mp = mp_cont; 1991 tail_mp = &mp_cont->b_cont; 1992 } else if (!multi && mp_cont) { /* last segment */ 1993 *tail_mp = mp_cont; 1994 tail_mp = &nmp->b_next; 1995 nmp = NULL; 1996 } 1997 } 1998 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1999 "==> nxge_rx_pkts: loop: rcr channel %d " 2000 "before updating: multi %d " 2001 "nrcr_read %d " 2002 "npk read %d " 2003 "head_pp $%p index %d ", 2004 channel, 2005 multi, 2006 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2007 comp_rd_index)); 2008 2009 if (!multi) { 2010 qlen_hw--; 2011 npkt_read++; 2012 } 2013 2014 /* 2015 * Update the next read entry. 2016 */ 2017 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2018 rcr_p->comp_wrap_mask); 2019 2020 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2021 rcr_p->rcr_desc_first_p, 2022 rcr_p->rcr_desc_last_p); 2023 2024 nrcr_read++; 2025 2026 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2027 "<== nxge_rx_pkts: (SAM, process one packet) " 2028 "nrcr_read %d", 2029 nrcr_read)); 2030 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2031 "==> nxge_rx_pkts: loop: rcr channel %d " 2032 "multi %d " 2033 "nrcr_read %d " 2034 "npk read %d " 2035 "head_pp $%p index %d ", 2036 channel, 2037 multi, 2038 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2039 comp_rd_index)); 2040 2041 } 2042 2043 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2044 rcr_p->comp_rd_index = comp_rd_index; 2045 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2046 2047 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2048 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2049 rcr_p->intr_timeout = nxgep->intr_timeout; 2050 rcr_p->intr_threshold = nxgep->intr_threshold; 2051 rcr_cfg_b.value = 0x0ULL; 2052 if (rcr_p->intr_timeout) 2053 rcr_cfg_b.bits.ldw.entout = 1; 2054 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2055 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2056 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2057 channel, rcr_cfg_b.value); 2058 } 2059 2060 cs.bits.ldw.pktread = npkt_read; 2061 cs.bits.ldw.ptrread = nrcr_read; 2062 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2063 channel, cs.value); 2064 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2065 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2066 "head_pp $%p index %016llx ", 2067 channel, 2068 rcr_p->rcr_desc_rd_head_pp, 2069 rcr_p->comp_rd_index)); 2070 /* 2071 * Update RCR buffer pointer read and number of packets 2072 * read. 2073 */ 2074 2075 *rcrp = rcr_p; 2076 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2077 return (head_mp); 2078 } 2079 2080 void 2081 nxge_receive_packet(p_nxge_t nxgep, 2082 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2083 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2084 { 2085 p_mblk_t nmp = NULL; 2086 uint64_t multi; 2087 uint64_t dcf_err; 2088 uint8_t channel; 2089 2090 boolean_t first_entry = B_TRUE; 2091 boolean_t is_tcp_udp = B_FALSE; 2092 boolean_t buffer_free = B_FALSE; 2093 boolean_t error_send_up = B_FALSE; 2094 uint8_t error_type; 2095 uint16_t l2_len; 2096 uint16_t skip_len; 2097 uint8_t pktbufsz_type; 2098 uint64_t rcr_entry; 2099 uint64_t *pkt_buf_addr_pp; 2100 uint64_t *pkt_buf_addr_p; 2101 uint32_t buf_offset; 2102 uint32_t bsize; 2103 uint32_t error_disp_cnt; 2104 uint32_t msg_index; 2105 p_rx_rbr_ring_t rx_rbr_p; 2106 p_rx_msg_t *rx_msg_ring_p; 2107 p_rx_msg_t rx_msg_p; 2108 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2109 nxge_status_t status = NXGE_OK; 2110 boolean_t is_valid = B_FALSE; 2111 p_nxge_rx_ring_stats_t rdc_stats; 2112 uint32_t bytes_read; 2113 uint64_t pkt_type; 2114 uint64_t frag; 2115 boolean_t pkt_too_long_err = B_FALSE; 2116 #ifdef NXGE_DEBUG 2117 int dump_len; 2118 #endif 2119 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2120 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2121 2122 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2123 2124 multi = (rcr_entry & RCR_MULTI_MASK); 2125 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2126 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2127 2128 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2129 frag = (rcr_entry & RCR_FRAG_MASK); 2130 2131 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2132 2133 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2134 RCR_PKTBUFSZ_SHIFT); 2135 #if defined(__i386) 2136 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2137 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2138 #else 2139 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2140 RCR_PKT_BUF_ADDR_SHIFT); 2141 #endif 2142 2143 channel = rcr_p->rdc; 2144 2145 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2146 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2147 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2148 "error_type 0x%x pkt_type 0x%x " 2149 "pktbufsz_type %d ", 2150 rcr_desc_rd_head_p, 2151 rcr_entry, pkt_buf_addr_pp, l2_len, 2152 multi, 2153 error_type, 2154 pkt_type, 2155 pktbufsz_type)); 2156 2157 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2158 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2159 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2160 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2161 rcr_entry, pkt_buf_addr_pp, l2_len, 2162 multi, 2163 error_type, 2164 pkt_type)); 2165 2166 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2167 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2168 "full pkt_buf_addr_pp $%p l2_len %d", 2169 rcr_entry, pkt_buf_addr_pp, l2_len)); 2170 2171 /* get the stats ptr */ 2172 rdc_stats = rcr_p->rdc_stats; 2173 2174 if (!l2_len) { 2175 2176 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2177 "<== nxge_receive_packet: failed: l2 length is 0.")); 2178 return; 2179 } 2180 2181 /* 2182 * Sofware workaround for BMAC hardware limitation that allows 2183 * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 2184 * instead of 0x2400 for jumbo. 2185 */ 2186 if (l2_len > nxgep->mac.maxframesize) { 2187 pkt_too_long_err = B_TRUE; 2188 } 2189 2190 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2191 l2_len -= ETHERFCSL; 2192 2193 /* shift 6 bits to get the full io address */ 2194 #if defined(__i386) 2195 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2196 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2197 #else 2198 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2199 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2200 #endif 2201 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2202 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2203 "full pkt_buf_addr_pp $%p l2_len %d", 2204 rcr_entry, pkt_buf_addr_pp, l2_len)); 2205 2206 rx_rbr_p = rcr_p->rx_rbr_p; 2207 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2208 2209 if (first_entry) { 2210 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2211 RXDMA_HDR_SIZE_DEFAULT); 2212 2213 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2214 "==> nxge_receive_packet: first entry 0x%016llx " 2215 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2216 rcr_entry, pkt_buf_addr_pp, l2_len, 2217 hdr_size)); 2218 } 2219 2220 MUTEX_ENTER(&rcr_p->lock); 2221 MUTEX_ENTER(&rx_rbr_p->lock); 2222 2223 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2224 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2225 "full pkt_buf_addr_pp $%p l2_len %d", 2226 rcr_entry, pkt_buf_addr_pp, l2_len)); 2227 2228 /* 2229 * Packet buffer address in the completion entry points 2230 * to the starting buffer address (offset 0). 2231 * Use the starting buffer address to locate the corresponding 2232 * kernel address. 2233 */ 2234 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2235 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2236 &buf_offset, 2237 &msg_index); 2238 2239 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2240 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2241 "full pkt_buf_addr_pp $%p l2_len %d", 2242 rcr_entry, pkt_buf_addr_pp, l2_len)); 2243 2244 if (status != NXGE_OK) { 2245 MUTEX_EXIT(&rx_rbr_p->lock); 2246 MUTEX_EXIT(&rcr_p->lock); 2247 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2248 "<== nxge_receive_packet: found vaddr failed %d", 2249 status)); 2250 return; 2251 } 2252 2253 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2254 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2255 "full pkt_buf_addr_pp $%p l2_len %d", 2256 rcr_entry, pkt_buf_addr_pp, l2_len)); 2257 2258 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2259 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2260 "full pkt_buf_addr_pp $%p l2_len %d", 2261 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2262 2263 rx_msg_p = rx_msg_ring_p[msg_index]; 2264 2265 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2266 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2267 "full pkt_buf_addr_pp $%p l2_len %d", 2268 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2269 2270 switch (pktbufsz_type) { 2271 case RCR_PKTBUFSZ_0: 2272 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2273 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2274 "==> nxge_receive_packet: 0 buf %d", bsize)); 2275 break; 2276 case RCR_PKTBUFSZ_1: 2277 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2278 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2279 "==> nxge_receive_packet: 1 buf %d", bsize)); 2280 break; 2281 case RCR_PKTBUFSZ_2: 2282 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2283 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2284 "==> nxge_receive_packet: 2 buf %d", bsize)); 2285 break; 2286 case RCR_SINGLE_BLOCK: 2287 bsize = rx_msg_p->block_size; 2288 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2289 "==> nxge_receive_packet: single %d", bsize)); 2290 2291 break; 2292 default: 2293 MUTEX_EXIT(&rx_rbr_p->lock); 2294 MUTEX_EXIT(&rcr_p->lock); 2295 return; 2296 } 2297 2298 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2299 (buf_offset + sw_offset_bytes), 2300 (hdr_size + l2_len), 2301 DDI_DMA_SYNC_FORCPU); 2302 2303 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2304 "==> nxge_receive_packet: after first dump:usage count")); 2305 2306 if (rx_msg_p->cur_usage_cnt == 0) { 2307 if (rx_rbr_p->rbr_use_bcopy) { 2308 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2309 if (rx_rbr_p->rbr_consumed < 2310 rx_rbr_p->rbr_threshold_hi) { 2311 if (rx_rbr_p->rbr_threshold_lo == 0 || 2312 ((rx_rbr_p->rbr_consumed >= 2313 rx_rbr_p->rbr_threshold_lo) && 2314 (rx_rbr_p->rbr_bufsize_type >= 2315 pktbufsz_type))) { 2316 rx_msg_p->rx_use_bcopy = B_TRUE; 2317 } 2318 } else { 2319 rx_msg_p->rx_use_bcopy = B_TRUE; 2320 } 2321 } 2322 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2323 "==> nxge_receive_packet: buf %d (new block) ", 2324 bsize)); 2325 2326 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2327 rx_msg_p->pkt_buf_size = bsize; 2328 rx_msg_p->cur_usage_cnt = 1; 2329 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2330 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2331 "==> nxge_receive_packet: buf %d " 2332 "(single block) ", 2333 bsize)); 2334 /* 2335 * Buffer can be reused once the free function 2336 * is called. 2337 */ 2338 rx_msg_p->max_usage_cnt = 1; 2339 buffer_free = B_TRUE; 2340 } else { 2341 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2342 if (rx_msg_p->max_usage_cnt == 1) { 2343 buffer_free = B_TRUE; 2344 } 2345 } 2346 } else { 2347 rx_msg_p->cur_usage_cnt++; 2348 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2349 buffer_free = B_TRUE; 2350 } 2351 } 2352 2353 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2354 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2355 msg_index, l2_len, 2356 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2357 2358 if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 2359 rdc_stats->ierrors++; 2360 if (dcf_err) { 2361 rdc_stats->dcf_err++; 2362 #ifdef NXGE_DEBUG 2363 if (!rdc_stats->dcf_err) { 2364 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2365 "nxge_receive_packet: channel %d dcf_err rcr" 2366 " 0x%llx", channel, rcr_entry)); 2367 } 2368 #endif 2369 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2370 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2371 } else if (pkt_too_long_err) { 2372 rdc_stats->pkt_too_long_err++; 2373 NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 2374 " channel %d packet length [%d] > " 2375 "maxframesize [%d]", channel, l2_len + ETHERFCSL, 2376 nxgep->mac.maxframesize)); 2377 } else { 2378 /* Update error stats */ 2379 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2380 rdc_stats->errlog.compl_err_type = error_type; 2381 2382 switch (error_type) { 2383 /* 2384 * Do not send FMA ereport for RCR_L2_ERROR and 2385 * RCR_L4_CSUM_ERROR because most likely they indicate 2386 * back pressure rather than HW failures. 2387 */ 2388 case RCR_L2_ERROR: 2389 rdc_stats->l2_err++; 2390 if (rdc_stats->l2_err < 2391 error_disp_cnt) { 2392 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2393 " nxge_receive_packet:" 2394 " channel %d RCR L2_ERROR", 2395 channel)); 2396 } 2397 break; 2398 case RCR_L4_CSUM_ERROR: 2399 error_send_up = B_TRUE; 2400 rdc_stats->l4_cksum_err++; 2401 if (rdc_stats->l4_cksum_err < 2402 error_disp_cnt) { 2403 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2404 " nxge_receive_packet:" 2405 " channel %d" 2406 " RCR L4_CSUM_ERROR", channel)); 2407 } 2408 break; 2409 /* 2410 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2411 * RCR_ZCP_SOFT_ERROR because they reflect the same 2412 * FFLP and ZCP errors that have been reported by 2413 * nxge_fflp.c and nxge_zcp.c. 2414 */ 2415 case RCR_FFLP_SOFT_ERROR: 2416 error_send_up = B_TRUE; 2417 rdc_stats->fflp_soft_err++; 2418 if (rdc_stats->fflp_soft_err < 2419 error_disp_cnt) { 2420 NXGE_ERROR_MSG((nxgep, 2421 NXGE_ERR_CTL, 2422 " nxge_receive_packet:" 2423 " channel %d" 2424 " RCR FFLP_SOFT_ERROR", channel)); 2425 } 2426 break; 2427 case RCR_ZCP_SOFT_ERROR: 2428 error_send_up = B_TRUE; 2429 rdc_stats->fflp_soft_err++; 2430 if (rdc_stats->zcp_soft_err < 2431 error_disp_cnt) 2432 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2433 " nxge_receive_packet: Channel %d" 2434 " RCR ZCP_SOFT_ERROR", channel)); 2435 break; 2436 default: 2437 rdc_stats->rcr_unknown_err++; 2438 if (rdc_stats->rcr_unknown_err 2439 < error_disp_cnt) { 2440 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2441 " nxge_receive_packet: Channel %d" 2442 " RCR entry 0x%llx error 0x%x", 2443 rcr_entry, channel, error_type)); 2444 } 2445 break; 2446 } 2447 } 2448 2449 /* 2450 * Update and repost buffer block if max usage 2451 * count is reached. 2452 */ 2453 if (error_send_up == B_FALSE) { 2454 atomic_inc_32(&rx_msg_p->ref_cnt); 2455 if (buffer_free == B_TRUE) { 2456 rx_msg_p->free = B_TRUE; 2457 } 2458 2459 MUTEX_EXIT(&rx_rbr_p->lock); 2460 MUTEX_EXIT(&rcr_p->lock); 2461 nxge_freeb(rx_msg_p); 2462 return; 2463 } 2464 } 2465 2466 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2467 "==> nxge_receive_packet: DMA sync second ")); 2468 2469 bytes_read = rcr_p->rcvd_pkt_bytes; 2470 skip_len = sw_offset_bytes + hdr_size; 2471 if (!rx_msg_p->rx_use_bcopy) { 2472 /* 2473 * For loaned up buffers, the driver reference count 2474 * will be incremented first and then the free state. 2475 */ 2476 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2477 if (first_entry) { 2478 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2479 if (l2_len < bsize - skip_len) { 2480 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2481 } else { 2482 nmp->b_wptr = &nmp->b_rptr[bsize 2483 - skip_len]; 2484 } 2485 } else { 2486 if (l2_len - bytes_read < bsize) { 2487 nmp->b_wptr = 2488 &nmp->b_rptr[l2_len - bytes_read]; 2489 } else { 2490 nmp->b_wptr = &nmp->b_rptr[bsize]; 2491 } 2492 } 2493 } 2494 } else { 2495 if (first_entry) { 2496 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2497 l2_len < bsize - skip_len ? 2498 l2_len : bsize - skip_len); 2499 } else { 2500 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2501 l2_len - bytes_read < bsize ? 2502 l2_len - bytes_read : bsize); 2503 } 2504 } 2505 if (nmp != NULL) { 2506 if (first_entry) 2507 bytes_read = nmp->b_wptr - nmp->b_rptr; 2508 else 2509 bytes_read += nmp->b_wptr - nmp->b_rptr; 2510 2511 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2512 "==> nxge_receive_packet after dupb: " 2513 "rbr consumed %d " 2514 "pktbufsz_type %d " 2515 "nmp $%p rptr $%p wptr $%p " 2516 "buf_offset %d bzise %d l2_len %d skip_len %d", 2517 rx_rbr_p->rbr_consumed, 2518 pktbufsz_type, 2519 nmp, nmp->b_rptr, nmp->b_wptr, 2520 buf_offset, bsize, l2_len, skip_len)); 2521 } else { 2522 cmn_err(CE_WARN, "!nxge_receive_packet: " 2523 "update stats (error)"); 2524 atomic_inc_32(&rx_msg_p->ref_cnt); 2525 if (buffer_free == B_TRUE) { 2526 rx_msg_p->free = B_TRUE; 2527 } 2528 MUTEX_EXIT(&rx_rbr_p->lock); 2529 MUTEX_EXIT(&rcr_p->lock); 2530 nxge_freeb(rx_msg_p); 2531 return; 2532 } 2533 2534 if (buffer_free == B_TRUE) { 2535 rx_msg_p->free = B_TRUE; 2536 } 2537 /* 2538 * ERROR, FRAG and PKT_TYPE are only reported 2539 * in the first entry. 2540 * If a packet is not fragmented and no error bit is set, then 2541 * L4 checksum is OK. 2542 */ 2543 is_valid = (nmp != NULL); 2544 if (first_entry) { 2545 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 2546 rdc_stats->ibytes += skip_len + l2_len < bsize ? 2547 l2_len : bsize; 2548 } else { 2549 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2550 l2_len - bytes_read : bsize; 2551 } 2552 2553 rcr_p->rcvd_pkt_bytes = bytes_read; 2554 2555 MUTEX_EXIT(&rx_rbr_p->lock); 2556 MUTEX_EXIT(&rcr_p->lock); 2557 2558 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2559 atomic_inc_32(&rx_msg_p->ref_cnt); 2560 nxge_freeb(rx_msg_p); 2561 } 2562 2563 if (is_valid) { 2564 nmp->b_cont = NULL; 2565 if (first_entry) { 2566 *mp = nmp; 2567 *mp_cont = NULL; 2568 } else { 2569 *mp_cont = nmp; 2570 } 2571 } 2572 2573 /* 2574 * Update stats and hardware checksuming. 2575 */ 2576 if (is_valid && !multi) { 2577 2578 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2579 pkt_type == RCR_PKT_IS_UDP) ? 2580 B_TRUE: B_FALSE); 2581 2582 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2583 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2584 is_valid, multi, is_tcp_udp, frag, error_type)); 2585 2586 if (is_tcp_udp && !frag && !error_type) { 2587 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2588 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2589 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2590 "==> nxge_receive_packet: Full tcp/udp cksum " 2591 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2592 "error %d", 2593 is_valid, multi, is_tcp_udp, frag, error_type)); 2594 } 2595 } 2596 2597 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2598 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2599 2600 *multi_p = (multi == RCR_MULTI_MASK); 2601 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2602 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2603 *multi_p, nmp, *mp, *mp_cont)); 2604 } 2605 2606 /*ARGSUSED*/ 2607 static nxge_status_t 2608 nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, 2609 rx_dma_ctl_stat_t cs) 2610 { 2611 p_nxge_rx_ring_stats_t rdc_stats; 2612 npi_handle_t handle; 2613 npi_status_t rs; 2614 boolean_t rxchan_fatal = B_FALSE; 2615 boolean_t rxport_fatal = B_FALSE; 2616 uint8_t channel; 2617 uint8_t portn; 2618 nxge_status_t status = NXGE_OK; 2619 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2620 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2621 2622 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2623 channel = ldvp->channel; 2624 portn = nxgep->mac.portnum; 2625 rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index]; 2626 2627 if (cs.bits.hdw.rbr_tmout) { 2628 rdc_stats->rx_rbr_tmout++; 2629 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2630 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2631 rxchan_fatal = B_TRUE; 2632 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2633 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2634 } 2635 if (cs.bits.hdw.rsp_cnt_err) { 2636 rdc_stats->rsp_cnt_err++; 2637 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2638 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2639 rxchan_fatal = B_TRUE; 2640 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2641 "==> nxge_rx_err_evnts(channel %d): " 2642 "rsp_cnt_err", channel)); 2643 } 2644 if (cs.bits.hdw.byte_en_bus) { 2645 rdc_stats->byte_en_bus++; 2646 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2647 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2648 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2649 "==> nxge_rx_err_evnts(channel %d): " 2650 "fatal error: byte_en_bus", channel)); 2651 rxchan_fatal = B_TRUE; 2652 } 2653 if (cs.bits.hdw.rsp_dat_err) { 2654 rdc_stats->rsp_dat_err++; 2655 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2656 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2657 rxchan_fatal = B_TRUE; 2658 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2659 "==> nxge_rx_err_evnts(channel %d): " 2660 "fatal error: rsp_dat_err", channel)); 2661 } 2662 if (cs.bits.hdw.rcr_ack_err) { 2663 rdc_stats->rcr_ack_err++; 2664 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2665 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2666 rxchan_fatal = B_TRUE; 2667 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2668 "==> nxge_rx_err_evnts(channel %d): " 2669 "fatal error: rcr_ack_err", channel)); 2670 } 2671 if (cs.bits.hdw.dc_fifo_err) { 2672 rdc_stats->dc_fifo_err++; 2673 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2674 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2675 /* This is not a fatal error! */ 2676 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2677 "==> nxge_rx_err_evnts(channel %d): " 2678 "dc_fifo_err", channel)); 2679 rxport_fatal = B_TRUE; 2680 } 2681 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2682 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2683 &rdc_stats->errlog.pre_par, 2684 &rdc_stats->errlog.sha_par)) 2685 != NPI_SUCCESS) { 2686 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2687 "==> nxge_rx_err_evnts(channel %d): " 2688 "rcr_sha_par: get perr", channel)); 2689 return (NXGE_ERROR | rs); 2690 } 2691 if (cs.bits.hdw.rcr_sha_par) { 2692 rdc_stats->rcr_sha_par++; 2693 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2694 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2695 rxchan_fatal = B_TRUE; 2696 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2697 "==> nxge_rx_err_evnts(channel %d): " 2698 "fatal error: rcr_sha_par", channel)); 2699 } 2700 if (cs.bits.hdw.rbr_pre_par) { 2701 rdc_stats->rbr_pre_par++; 2702 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2703 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2704 rxchan_fatal = B_TRUE; 2705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2706 "==> nxge_rx_err_evnts(channel %d): " 2707 "fatal error: rbr_pre_par", channel)); 2708 } 2709 } 2710 if (cs.bits.hdw.port_drop_pkt) { 2711 rdc_stats->port_drop_pkt++; 2712 if (rdc_stats->port_drop_pkt < error_disp_cnt) 2713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2714 "==> nxge_rx_err_evnts (channel %d): " 2715 "port_drop_pkt", channel)); 2716 } 2717 if (cs.bits.hdw.wred_drop) { 2718 rdc_stats->wred_drop++; 2719 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2720 "==> nxge_rx_err_evnts(channel %d): " 2721 "wred_drop", channel)); 2722 } 2723 if (cs.bits.hdw.rbr_pre_empty) { 2724 rdc_stats->rbr_pre_empty++; 2725 if (rdc_stats->rbr_pre_empty < error_disp_cnt) 2726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2727 "==> nxge_rx_err_evnts(channel %d): " 2728 "rbr_pre_empty", channel)); 2729 } 2730 if (cs.bits.hdw.rcr_shadow_full) { 2731 rdc_stats->rcr_shadow_full++; 2732 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2733 "==> nxge_rx_err_evnts(channel %d): " 2734 "rcr_shadow_full", channel)); 2735 } 2736 if (cs.bits.hdw.config_err) { 2737 rdc_stats->config_err++; 2738 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2739 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2740 rxchan_fatal = B_TRUE; 2741 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2742 "==> nxge_rx_err_evnts(channel %d): " 2743 "config error", channel)); 2744 } 2745 if (cs.bits.hdw.rcrincon) { 2746 rdc_stats->rcrincon++; 2747 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2748 NXGE_FM_EREPORT_RDMC_RCRINCON); 2749 rxchan_fatal = B_TRUE; 2750 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2751 "==> nxge_rx_err_evnts(channel %d): " 2752 "fatal error: rcrincon error", channel)); 2753 } 2754 if (cs.bits.hdw.rcrfull) { 2755 rdc_stats->rcrfull++; 2756 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2757 NXGE_FM_EREPORT_RDMC_RCRFULL); 2758 rxchan_fatal = B_TRUE; 2759 if (rdc_stats->rcrfull < error_disp_cnt) 2760 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2761 "==> nxge_rx_err_evnts(channel %d): " 2762 "fatal error: rcrfull error", channel)); 2763 } 2764 if (cs.bits.hdw.rbr_empty) { 2765 rdc_stats->rbr_empty++; 2766 if (rdc_stats->rbr_empty < error_disp_cnt) 2767 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2768 "==> nxge_rx_err_evnts(channel %d): " 2769 "rbr empty error", channel)); 2770 } 2771 if (cs.bits.hdw.rbrfull) { 2772 rdc_stats->rbrfull++; 2773 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2774 NXGE_FM_EREPORT_RDMC_RBRFULL); 2775 rxchan_fatal = B_TRUE; 2776 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2777 "==> nxge_rx_err_evnts(channel %d): " 2778 "fatal error: rbr_full error", channel)); 2779 } 2780 if (cs.bits.hdw.rbrlogpage) { 2781 rdc_stats->rbrlogpage++; 2782 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2783 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2784 rxchan_fatal = B_TRUE; 2785 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2786 "==> nxge_rx_err_evnts(channel %d): " 2787 "fatal error: rbr logical page error", channel)); 2788 } 2789 if (cs.bits.hdw.cfiglogpage) { 2790 rdc_stats->cfiglogpage++; 2791 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2792 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2793 rxchan_fatal = B_TRUE; 2794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2795 "==> nxge_rx_err_evnts(channel %d): " 2796 "fatal error: cfig logical page error", channel)); 2797 } 2798 2799 if (rxport_fatal) { 2800 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2801 " nxge_rx_err_evnts: " 2802 " fatal error on Port #%d\n", 2803 portn)); 2804 status = nxge_ipp_fatal_err_recover(nxgep); 2805 if (status == NXGE_OK) { 2806 FM_SERVICE_RESTORED(nxgep); 2807 } 2808 } 2809 2810 if (rxchan_fatal) { 2811 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2812 " nxge_rx_err_evnts: " 2813 " fatal error on Channel #%d\n", 2814 channel)); 2815 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2816 if (status == NXGE_OK) { 2817 FM_SERVICE_RESTORED(nxgep); 2818 } 2819 } 2820 2821 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2822 2823 return (status); 2824 } 2825 2826 static nxge_status_t 2827 nxge_map_rxdma(p_nxge_t nxgep) 2828 { 2829 int i, ndmas; 2830 uint16_t channel; 2831 p_rx_rbr_rings_t rx_rbr_rings; 2832 p_rx_rbr_ring_t *rbr_rings; 2833 p_rx_rcr_rings_t rx_rcr_rings; 2834 p_rx_rcr_ring_t *rcr_rings; 2835 p_rx_mbox_areas_t rx_mbox_areas_p; 2836 p_rx_mbox_t *rx_mbox_p; 2837 p_nxge_dma_pool_t dma_buf_poolp; 2838 p_nxge_dma_pool_t dma_cntl_poolp; 2839 p_nxge_dma_common_t *dma_buf_p; 2840 p_nxge_dma_common_t *dma_cntl_p; 2841 uint32_t *num_chunks; 2842 nxge_status_t status = NXGE_OK; 2843 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2844 p_nxge_dma_common_t t_dma_buf_p; 2845 p_nxge_dma_common_t t_dma_cntl_p; 2846 #endif 2847 2848 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 2849 2850 dma_buf_poolp = nxgep->rx_buf_pool_p; 2851 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2852 2853 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2854 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2855 "<== nxge_map_rxdma: buf not allocated")); 2856 return (NXGE_ERROR); 2857 } 2858 2859 ndmas = dma_buf_poolp->ndmas; 2860 if (!ndmas) { 2861 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2862 "<== nxge_map_rxdma: no dma allocated")); 2863 return (NXGE_ERROR); 2864 } 2865 2866 num_chunks = dma_buf_poolp->num_chunks; 2867 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2868 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2869 2870 rx_rbr_rings = (p_rx_rbr_rings_t) 2871 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2872 rbr_rings = (p_rx_rbr_ring_t *) 2873 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2874 rx_rcr_rings = (p_rx_rcr_rings_t) 2875 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2876 rcr_rings = (p_rx_rcr_ring_t *) 2877 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2878 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2879 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2880 rx_mbox_p = (p_rx_mbox_t *) 2881 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2882 2883 /* 2884 * Timeout should be set based on the system clock divider. 2885 * The following timeout value of 1 assumes that the 2886 * granularity (1000) is 3 microseconds running at 300MHz. 2887 */ 2888 2889 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2890 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2891 2892 /* 2893 * Map descriptors from the buffer polls for each dam channel. 2894 */ 2895 for (i = 0; i < ndmas; i++) { 2896 /* 2897 * Set up and prepare buffer blocks, descriptors 2898 * and mailbox. 2899 */ 2900 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2901 status = nxge_map_rxdma_channel(nxgep, channel, 2902 (p_nxge_dma_common_t *)&dma_buf_p[i], 2903 (p_rx_rbr_ring_t *)&rbr_rings[i], 2904 num_chunks[i], 2905 (p_nxge_dma_common_t *)&dma_cntl_p[i], 2906 (p_rx_rcr_ring_t *)&rcr_rings[i], 2907 (p_rx_mbox_t *)&rx_mbox_p[i]); 2908 if (status != NXGE_OK) { 2909 goto nxge_map_rxdma_fail1; 2910 } 2911 rbr_rings[i]->index = (uint16_t)i; 2912 rcr_rings[i]->index = (uint16_t)i; 2913 rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i]; 2914 2915 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2916 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 2917 rbr_rings[i]->hv_set = B_FALSE; 2918 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 2919 t_dma_cntl_p = 2920 (p_nxge_dma_common_t)dma_cntl_p[i]; 2921 2922 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp = 2923 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 2924 rbr_rings[i]->hv_rx_buf_ioaddr_size = 2925 (uint64_t)t_dma_buf_p->orig_alength; 2926 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2927 "==> nxge_map_rxdma_channel: " 2928 "channel %d " 2929 "data buf base io $%p ($%p) " 2930 "size 0x%llx (%d 0x%x)", 2931 channel, 2932 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp, 2933 t_dma_cntl_p->ioaddr_pp, 2934 rbr_rings[i]->hv_rx_buf_ioaddr_size, 2935 t_dma_buf_p->orig_alength, 2936 t_dma_buf_p->orig_alength)); 2937 2938 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp = 2939 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 2940 rbr_rings[i]->hv_rx_cntl_ioaddr_size = 2941 (uint64_t)t_dma_cntl_p->orig_alength; 2942 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2943 "==> nxge_map_rxdma_channel: " 2944 "channel %d " 2945 "cntl base io $%p ($%p) " 2946 "size 0x%llx (%d 0x%x)", 2947 channel, 2948 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp, 2949 t_dma_cntl_p->ioaddr_pp, 2950 rbr_rings[i]->hv_rx_cntl_ioaddr_size, 2951 t_dma_cntl_p->orig_alength, 2952 t_dma_cntl_p->orig_alength)); 2953 } 2954 2955 #endif /* sun4v and NIU_LP_WORKAROUND */ 2956 } 2957 2958 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2959 rx_rbr_rings->rbr_rings = rbr_rings; 2960 nxgep->rx_rbr_rings = rx_rbr_rings; 2961 rx_rcr_rings->rcr_rings = rcr_rings; 2962 nxgep->rx_rcr_rings = rx_rcr_rings; 2963 2964 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2965 nxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2966 2967 goto nxge_map_rxdma_exit; 2968 2969 nxge_map_rxdma_fail1: 2970 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2971 "==> nxge_map_rxdma: unmap rbr,rcr " 2972 "(status 0x%x channel %d i %d)", 2973 status, channel, i)); 2974 i--; 2975 for (; i >= 0; i--) { 2976 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2977 nxge_unmap_rxdma_channel(nxgep, channel, 2978 rbr_rings[i], 2979 rcr_rings[i], 2980 rx_mbox_p[i]); 2981 } 2982 2983 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2984 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2985 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2986 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2987 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2988 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2989 2990 nxge_map_rxdma_exit: 2991 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2992 "<== nxge_map_rxdma: " 2993 "(status 0x%x channel %d)", 2994 status, channel)); 2995 2996 return (status); 2997 } 2998 2999 static void 3000 nxge_unmap_rxdma(p_nxge_t nxgep) 3001 { 3002 int i, ndmas; 3003 uint16_t channel; 3004 p_rx_rbr_rings_t rx_rbr_rings; 3005 p_rx_rbr_ring_t *rbr_rings; 3006 p_rx_rcr_rings_t rx_rcr_rings; 3007 p_rx_rcr_ring_t *rcr_rings; 3008 p_rx_mbox_areas_t rx_mbox_areas_p; 3009 p_rx_mbox_t *rx_mbox_p; 3010 p_nxge_dma_pool_t dma_buf_poolp; 3011 p_nxge_dma_pool_t dma_cntl_poolp; 3012 p_nxge_dma_common_t *dma_buf_p; 3013 3014 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma")); 3015 3016 dma_buf_poolp = nxgep->rx_buf_pool_p; 3017 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 3018 3019 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 3020 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3021 "<== nxge_unmap_rxdma: NULL buf pointers")); 3022 return; 3023 } 3024 3025 rx_rbr_rings = nxgep->rx_rbr_rings; 3026 rx_rcr_rings = nxgep->rx_rcr_rings; 3027 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3028 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3029 "<== nxge_unmap_rxdma: NULL ring pointers")); 3030 return; 3031 } 3032 ndmas = rx_rbr_rings->ndmas; 3033 if (!ndmas) { 3034 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3035 "<== nxge_unmap_rxdma: no channel")); 3036 return; 3037 } 3038 3039 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3040 "==> nxge_unmap_rxdma (ndmas %d)", ndmas)); 3041 rbr_rings = rx_rbr_rings->rbr_rings; 3042 rcr_rings = rx_rcr_rings->rcr_rings; 3043 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3044 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3045 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 3046 3047 for (i = 0; i < ndmas; i++) { 3048 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 3049 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3050 "==> nxge_unmap_rxdma (ndmas %d) channel %d", 3051 ndmas, channel)); 3052 (void) nxge_unmap_rxdma_channel(nxgep, channel, 3053 (p_rx_rbr_ring_t)rbr_rings[i], 3054 (p_rx_rcr_ring_t)rcr_rings[i], 3055 (p_rx_mbox_t)rx_mbox_p[i]); 3056 } 3057 3058 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 3059 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 3060 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 3061 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 3062 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 3063 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 3064 3065 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3066 "<== nxge_unmap_rxdma")); 3067 } 3068 3069 nxge_status_t 3070 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3071 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3072 uint32_t num_chunks, 3073 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3074 p_rx_mbox_t *rx_mbox_p) 3075 { 3076 int status = NXGE_OK; 3077 3078 /* 3079 * Set up and prepare buffer blocks, descriptors 3080 * and mailbox. 3081 */ 3082 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3083 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3084 /* 3085 * Receive buffer blocks 3086 */ 3087 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3088 dma_buf_p, rbr_p, num_chunks); 3089 if (status != NXGE_OK) { 3090 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3091 "==> nxge_map_rxdma_channel (channel %d): " 3092 "map buffer failed 0x%x", channel, status)); 3093 goto nxge_map_rxdma_channel_exit; 3094 } 3095 3096 /* 3097 * Receive block ring, completion ring and mailbox. 3098 */ 3099 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3100 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3101 if (status != NXGE_OK) { 3102 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3103 "==> nxge_map_rxdma_channel (channel %d): " 3104 "map config failed 0x%x", channel, status)); 3105 goto nxge_map_rxdma_channel_fail2; 3106 } 3107 3108 goto nxge_map_rxdma_channel_exit; 3109 3110 nxge_map_rxdma_channel_fail3: 3111 /* Free rbr, rcr */ 3112 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3113 "==> nxge_map_rxdma_channel: free rbr/rcr " 3114 "(status 0x%x channel %d)", 3115 status, channel)); 3116 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3117 *rcr_p, *rx_mbox_p); 3118 3119 nxge_map_rxdma_channel_fail2: 3120 /* Free buffer blocks */ 3121 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3122 "==> nxge_map_rxdma_channel: free rx buffers" 3123 "(nxgep 0x%x status 0x%x channel %d)", 3124 nxgep, status, channel)); 3125 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3126 3127 status = NXGE_ERROR; 3128 3129 nxge_map_rxdma_channel_exit: 3130 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3131 "<== nxge_map_rxdma_channel: " 3132 "(nxgep 0x%x status 0x%x channel %d)", 3133 nxgep, status, channel)); 3134 3135 return (status); 3136 } 3137 3138 /*ARGSUSED*/ 3139 static void 3140 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3141 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3142 { 3143 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3144 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3145 3146 /* 3147 * unmap receive block ring, completion ring and mailbox. 3148 */ 3149 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3150 rcr_p, rx_mbox_p); 3151 3152 /* unmap buffer blocks */ 3153 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3154 3155 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3156 } 3157 3158 /*ARGSUSED*/ 3159 static nxge_status_t 3160 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3161 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3162 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3163 { 3164 p_rx_rbr_ring_t rbrp; 3165 p_rx_rcr_ring_t rcrp; 3166 p_rx_mbox_t mboxp; 3167 p_nxge_dma_common_t cntl_dmap; 3168 p_nxge_dma_common_t dmap; 3169 p_rx_msg_t *rx_msg_ring; 3170 p_rx_msg_t rx_msg_p; 3171 p_rbr_cfig_a_t rcfga_p; 3172 p_rbr_cfig_b_t rcfgb_p; 3173 p_rcrcfig_a_t cfga_p; 3174 p_rcrcfig_b_t cfgb_p; 3175 p_rxdma_cfig1_t cfig1_p; 3176 p_rxdma_cfig2_t cfig2_p; 3177 p_rbr_kick_t kick_p; 3178 uint32_t dmaaddrp; 3179 uint32_t *rbr_vaddrp; 3180 uint32_t bkaddr; 3181 nxge_status_t status = NXGE_OK; 3182 int i; 3183 uint32_t nxge_port_rcr_size; 3184 3185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3186 "==> nxge_map_rxdma_channel_cfg_ring")); 3187 3188 cntl_dmap = *dma_cntl_p; 3189 3190 /* Map in the receive block ring */ 3191 rbrp = *rbr_p; 3192 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3193 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3194 /* 3195 * Zero out buffer block ring descriptors. 3196 */ 3197 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3198 3199 rcfga_p = &(rbrp->rbr_cfga); 3200 rcfgb_p = &(rbrp->rbr_cfgb); 3201 kick_p = &(rbrp->rbr_kick); 3202 rcfga_p->value = 0; 3203 rcfgb_p->value = 0; 3204 kick_p->value = 0; 3205 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3206 rcfga_p->value = (rbrp->rbr_addr & 3207 (RBR_CFIG_A_STDADDR_MASK | 3208 RBR_CFIG_A_STDADDR_BASE_MASK)); 3209 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3210 3211 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3212 rcfgb_p->bits.ldw.vld0 = 1; 3213 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3214 rcfgb_p->bits.ldw.vld1 = 1; 3215 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3216 rcfgb_p->bits.ldw.vld2 = 1; 3217 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3218 3219 /* 3220 * For each buffer block, enter receive block address to the ring. 3221 */ 3222 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3223 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3224 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3225 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3226 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3227 3228 rx_msg_ring = rbrp->rx_msg_ring; 3229 for (i = 0; i < rbrp->tnblocks; i++) { 3230 rx_msg_p = rx_msg_ring[i]; 3231 rx_msg_p->nxgep = nxgep; 3232 rx_msg_p->rx_rbr_p = rbrp; 3233 bkaddr = (uint32_t) 3234 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3235 >> RBR_BKADDR_SHIFT)); 3236 rx_msg_p->free = B_FALSE; 3237 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3238 3239 *rbr_vaddrp++ = bkaddr; 3240 } 3241 3242 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3243 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3244 3245 rbrp->rbr_rd_index = 0; 3246 3247 rbrp->rbr_consumed = 0; 3248 rbrp->rbr_use_bcopy = B_TRUE; 3249 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3250 /* 3251 * Do bcopy on packets greater than bcopy size once 3252 * the lo threshold is reached. 3253 * This lo threshold should be less than the hi threshold. 3254 * 3255 * Do bcopy on every packet once the hi threshold is reached. 3256 */ 3257 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3258 /* default it to use hi */ 3259 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3260 } 3261 3262 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3263 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3264 } 3265 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3266 3267 switch (nxge_rx_threshold_hi) { 3268 default: 3269 case NXGE_RX_COPY_NONE: 3270 /* Do not do bcopy at all */ 3271 rbrp->rbr_use_bcopy = B_FALSE; 3272 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3273 break; 3274 3275 case NXGE_RX_COPY_1: 3276 case NXGE_RX_COPY_2: 3277 case NXGE_RX_COPY_3: 3278 case NXGE_RX_COPY_4: 3279 case NXGE_RX_COPY_5: 3280 case NXGE_RX_COPY_6: 3281 case NXGE_RX_COPY_7: 3282 rbrp->rbr_threshold_hi = 3283 rbrp->rbb_max * 3284 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3285 break; 3286 3287 case NXGE_RX_COPY_ALL: 3288 rbrp->rbr_threshold_hi = 0; 3289 break; 3290 } 3291 3292 switch (nxge_rx_threshold_lo) { 3293 default: 3294 case NXGE_RX_COPY_NONE: 3295 /* Do not do bcopy at all */ 3296 if (rbrp->rbr_use_bcopy) { 3297 rbrp->rbr_use_bcopy = B_FALSE; 3298 } 3299 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3300 break; 3301 3302 case NXGE_RX_COPY_1: 3303 case NXGE_RX_COPY_2: 3304 case NXGE_RX_COPY_3: 3305 case NXGE_RX_COPY_4: 3306 case NXGE_RX_COPY_5: 3307 case NXGE_RX_COPY_6: 3308 case NXGE_RX_COPY_7: 3309 rbrp->rbr_threshold_lo = 3310 rbrp->rbb_max * 3311 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3312 break; 3313 3314 case NXGE_RX_COPY_ALL: 3315 rbrp->rbr_threshold_lo = 0; 3316 break; 3317 } 3318 3319 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3320 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3321 "rbb_max %d " 3322 "rbrp->rbr_bufsize_type %d " 3323 "rbb_threshold_hi %d " 3324 "rbb_threshold_lo %d", 3325 dma_channel, 3326 rbrp->rbb_max, 3327 rbrp->rbr_bufsize_type, 3328 rbrp->rbr_threshold_hi, 3329 rbrp->rbr_threshold_lo)); 3330 3331 rbrp->page_valid.value = 0; 3332 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3333 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3334 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3335 rbrp->page_hdl.value = 0; 3336 3337 rbrp->page_valid.bits.ldw.page0 = 1; 3338 rbrp->page_valid.bits.ldw.page1 = 1; 3339 3340 /* Map in the receive completion ring */ 3341 rcrp = (p_rx_rcr_ring_t) 3342 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3343 rcrp->rdc = dma_channel; 3344 3345 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3346 rcrp->comp_size = nxge_port_rcr_size; 3347 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3348 3349 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3350 3351 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3352 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3353 sizeof (rcr_entry_t)); 3354 rcrp->comp_rd_index = 0; 3355 rcrp->comp_wt_index = 0; 3356 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3357 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3358 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3359 #if defined(__i386) 3360 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3361 #else 3362 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3363 #endif 3364 3365 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3366 (nxge_port_rcr_size - 1); 3367 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3368 (nxge_port_rcr_size - 1); 3369 3370 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3371 "==> nxge_map_rxdma_channel_cfg_ring: " 3372 "channel %d " 3373 "rbr_vaddrp $%p " 3374 "rcr_desc_rd_head_p $%p " 3375 "rcr_desc_rd_head_pp $%p " 3376 "rcr_desc_rd_last_p $%p " 3377 "rcr_desc_rd_last_pp $%p ", 3378 dma_channel, 3379 rbr_vaddrp, 3380 rcrp->rcr_desc_rd_head_p, 3381 rcrp->rcr_desc_rd_head_pp, 3382 rcrp->rcr_desc_last_p, 3383 rcrp->rcr_desc_last_pp)); 3384 3385 /* 3386 * Zero out buffer block ring descriptors. 3387 */ 3388 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3389 rcrp->intr_timeout = nxgep->intr_timeout; 3390 rcrp->intr_threshold = nxgep->intr_threshold; 3391 rcrp->full_hdr_flag = B_FALSE; 3392 rcrp->sw_priv_hdr_len = 0; 3393 3394 cfga_p = &(rcrp->rcr_cfga); 3395 cfgb_p = &(rcrp->rcr_cfgb); 3396 cfga_p->value = 0; 3397 cfgb_p->value = 0; 3398 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3399 cfga_p->value = (rcrp->rcr_addr & 3400 (RCRCFIG_A_STADDR_MASK | 3401 RCRCFIG_A_STADDR_BASE_MASK)); 3402 3403 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3404 RCRCFIG_A_LEN_SHIF); 3405 3406 /* 3407 * Timeout should be set based on the system clock divider. 3408 * The following timeout value of 1 assumes that the 3409 * granularity (1000) is 3 microseconds running at 300MHz. 3410 */ 3411 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3412 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3413 cfgb_p->bits.ldw.entout = 1; 3414 3415 /* Map in the mailbox */ 3416 mboxp = (p_rx_mbox_t) 3417 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3418 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3419 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3420 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3421 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3422 cfig1_p->value = cfig2_p->value = 0; 3423 3424 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3425 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3426 "==> nxge_map_rxdma_channel_cfg_ring: " 3427 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3428 dma_channel, cfig1_p->value, cfig2_p->value, 3429 mboxp->mbox_addr)); 3430 3431 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3432 & 0xfff); 3433 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3434 3435 3436 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3437 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3438 RXDMA_CFIG2_MBADDR_L_MASK); 3439 3440 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3441 3442 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3443 "==> nxge_map_rxdma_channel_cfg_ring: " 3444 "channel %d damaddrp $%p " 3445 "cfg1 0x%016llx cfig2 0x%016llx", 3446 dma_channel, dmaaddrp, 3447 cfig1_p->value, cfig2_p->value)); 3448 3449 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3450 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3451 3452 rbrp->rx_rcr_p = rcrp; 3453 rcrp->rx_rbr_p = rbrp; 3454 *rcr_p = rcrp; 3455 *rx_mbox_p = mboxp; 3456 3457 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3458 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3459 3460 return (status); 3461 } 3462 3463 /*ARGSUSED*/ 3464 static void 3465 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3466 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3467 { 3468 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3469 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3470 rcr_p->rdc)); 3471 3472 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3473 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3474 3475 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3476 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3477 } 3478 3479 static nxge_status_t 3480 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3481 p_nxge_dma_common_t *dma_buf_p, 3482 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3483 { 3484 p_rx_rbr_ring_t rbrp; 3485 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3486 p_rx_msg_t *rx_msg_ring; 3487 p_rx_msg_t rx_msg_p; 3488 p_mblk_t mblk_p; 3489 3490 rxring_info_t *ring_info; 3491 nxge_status_t status = NXGE_OK; 3492 int i, j, index; 3493 uint32_t size, bsize, nblocks, nmsgs; 3494 3495 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3496 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3497 channel)); 3498 3499 dma_bufp = tmp_bufp = *dma_buf_p; 3500 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3501 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3502 "chunks bufp 0x%016llx", 3503 channel, num_chunks, dma_bufp)); 3504 3505 nmsgs = 0; 3506 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3507 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3508 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3509 "bufp 0x%016llx nblocks %d nmsgs %d", 3510 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3511 nmsgs += tmp_bufp->nblocks; 3512 } 3513 if (!nmsgs) { 3514 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3515 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3516 "no msg blocks", 3517 channel)); 3518 status = NXGE_ERROR; 3519 goto nxge_map_rxdma_channel_buf_ring_exit; 3520 } 3521 3522 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3523 3524 size = nmsgs * sizeof (p_rx_msg_t); 3525 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3526 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3527 KM_SLEEP); 3528 3529 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3530 (void *)nxgep->interrupt_cookie); 3531 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3532 (void *)nxgep->interrupt_cookie); 3533 rbrp->rdc = channel; 3534 rbrp->num_blocks = num_chunks; 3535 rbrp->tnblocks = nmsgs; 3536 rbrp->rbb_max = nmsgs; 3537 rbrp->rbr_max_size = nmsgs; 3538 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3539 3540 /* 3541 * Buffer sizes suggested by NIU architect. 3542 * 256, 512 and 2K. 3543 */ 3544 3545 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3546 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3547 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3548 3549 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3550 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3551 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3552 3553 rbrp->block_size = nxgep->rx_default_block_size; 3554 3555 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3556 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3557 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3558 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3559 } else { 3560 if (rbrp->block_size >= 0x2000) { 3561 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3562 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3563 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3564 } else { 3565 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3566 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3567 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3568 } 3569 } 3570 3571 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3572 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3573 "actual rbr max %d rbb_max %d nmsgs %d " 3574 "rbrp->block_size %d default_block_size %d " 3575 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3576 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3577 rbrp->block_size, nxgep->rx_default_block_size, 3578 nxge_rbr_size, nxge_rbr_spare_size)); 3579 3580 /* Map in buffers from the buffer pool. */ 3581 index = 0; 3582 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3583 bsize = dma_bufp->block_size; 3584 nblocks = dma_bufp->nblocks; 3585 #if defined(__i386) 3586 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3587 #else 3588 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3589 #endif 3590 ring_info->buffer[i].buf_index = i; 3591 ring_info->buffer[i].buf_size = dma_bufp->alength; 3592 ring_info->buffer[i].start_index = index; 3593 #if defined(__i386) 3594 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3595 #else 3596 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3597 #endif 3598 3599 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3600 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3601 "chunk %d" 3602 " nblocks %d chunk_size %x block_size 0x%x " 3603 "dma_bufp $%p", channel, i, 3604 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3605 dma_bufp)); 3606 3607 for (j = 0; j < nblocks; j++) { 3608 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3609 dma_bufp)) == NULL) { 3610 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3611 "allocb failed (index %d i %d j %d)", 3612 index, i, j)); 3613 goto nxge_map_rxdma_channel_buf_ring_fail1; 3614 } 3615 rx_msg_ring[index] = rx_msg_p; 3616 rx_msg_p->block_index = index; 3617 rx_msg_p->shifted_addr = (uint32_t) 3618 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3619 RBR_BKADDR_SHIFT)); 3620 3621 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3622 "index %d j %d rx_msg_p $%p mblk %p", 3623 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3624 3625 mblk_p = rx_msg_p->rx_mblk_p; 3626 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3627 3628 rbrp->rbr_ref_cnt++; 3629 index++; 3630 rx_msg_p->buf_dma.dma_channel = channel; 3631 } 3632 } 3633 if (i < rbrp->num_blocks) { 3634 goto nxge_map_rxdma_channel_buf_ring_fail1; 3635 } 3636 3637 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3638 "nxge_map_rxdma_channel_buf_ring: done buf init " 3639 "channel %d msg block entries %d", 3640 channel, index)); 3641 ring_info->block_size_mask = bsize - 1; 3642 rbrp->rx_msg_ring = rx_msg_ring; 3643 rbrp->dma_bufp = dma_buf_p; 3644 rbrp->ring_info = ring_info; 3645 3646 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3647 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3648 " nxge_map_rxdma_channel_buf_ring: " 3649 "channel %d done buf info init", channel)); 3650 3651 /* 3652 * Finally, permit nxge_freeb() to call nxge_post_page(). 3653 */ 3654 rbrp->rbr_state = RBR_POSTING; 3655 3656 *rbr_p = rbrp; 3657 goto nxge_map_rxdma_channel_buf_ring_exit; 3658 3659 nxge_map_rxdma_channel_buf_ring_fail1: 3660 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3661 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3662 channel, status)); 3663 3664 index--; 3665 for (; index >= 0; index--) { 3666 rx_msg_p = rx_msg_ring[index]; 3667 if (rx_msg_p != NULL) { 3668 freeb(rx_msg_p->rx_mblk_p); 3669 rx_msg_ring[index] = NULL; 3670 } 3671 } 3672 nxge_map_rxdma_channel_buf_ring_fail: 3673 MUTEX_DESTROY(&rbrp->post_lock); 3674 MUTEX_DESTROY(&rbrp->lock); 3675 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3676 KMEM_FREE(rx_msg_ring, size); 3677 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3678 3679 status = NXGE_ERROR; 3680 3681 nxge_map_rxdma_channel_buf_ring_exit: 3682 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3683 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3684 3685 return (status); 3686 } 3687 3688 /*ARGSUSED*/ 3689 static void 3690 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3691 p_rx_rbr_ring_t rbr_p) 3692 { 3693 p_rx_msg_t *rx_msg_ring; 3694 p_rx_msg_t rx_msg_p; 3695 rxring_info_t *ring_info; 3696 int i; 3697 uint32_t size; 3698 #ifdef NXGE_DEBUG 3699 int num_chunks; 3700 #endif 3701 3702 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3703 "==> nxge_unmap_rxdma_channel_buf_ring")); 3704 if (rbr_p == NULL) { 3705 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3706 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3707 return; 3708 } 3709 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3710 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3711 rbr_p->rdc)); 3712 3713 rx_msg_ring = rbr_p->rx_msg_ring; 3714 ring_info = rbr_p->ring_info; 3715 3716 if (rx_msg_ring == NULL || ring_info == NULL) { 3717 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3718 "<== nxge_unmap_rxdma_channel_buf_ring: " 3719 "rx_msg_ring $%p ring_info $%p", 3720 rx_msg_p, ring_info)); 3721 return; 3722 } 3723 3724 #ifdef NXGE_DEBUG 3725 num_chunks = rbr_p->num_blocks; 3726 #endif 3727 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3728 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3729 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3730 "tnblocks %d (max %d) size ptrs %d ", 3731 rbr_p->rdc, num_chunks, 3732 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3733 3734 for (i = 0; i < rbr_p->tnblocks; i++) { 3735 rx_msg_p = rx_msg_ring[i]; 3736 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3737 " nxge_unmap_rxdma_channel_buf_ring: " 3738 "rx_msg_p $%p", 3739 rx_msg_p)); 3740 if (rx_msg_p != NULL) { 3741 freeb(rx_msg_p->rx_mblk_p); 3742 rx_msg_ring[i] = NULL; 3743 } 3744 } 3745 3746 /* 3747 * We no longer may use the mutex <post_lock>. By setting 3748 * <rbr_state> to anything but POSTING, we prevent 3749 * nxge_post_page() from accessing a dead mutex. 3750 */ 3751 rbr_p->rbr_state = RBR_UNMAPPING; 3752 MUTEX_DESTROY(&rbr_p->post_lock); 3753 3754 MUTEX_DESTROY(&rbr_p->lock); 3755 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3756 KMEM_FREE(rx_msg_ring, size); 3757 3758 if (rbr_p->rbr_ref_cnt == 0) { 3759 /* This is the normal state of affairs. */ 3760 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3761 } else { 3762 /* 3763 * Some of our buffers are still being used. 3764 * Therefore, tell nxge_freeb() this ring is 3765 * unmapped, so it may free <rbr_p> for us. 3766 */ 3767 rbr_p->rbr_state = RBR_UNMAPPED; 3768 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3769 "unmap_rxdma_buf_ring: %d %s outstanding.", 3770 rbr_p->rbr_ref_cnt, 3771 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3772 } 3773 3774 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3775 "<== nxge_unmap_rxdma_channel_buf_ring")); 3776 } 3777 3778 static nxge_status_t 3779 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3780 { 3781 nxge_status_t status = NXGE_OK; 3782 3783 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3784 3785 /* 3786 * Load the sharable parameters by writing to the 3787 * function zero control registers. These FZC registers 3788 * should be initialized only once for the entire chip. 3789 */ 3790 (void) nxge_init_fzc_rx_common(nxgep); 3791 3792 /* 3793 * Initialize the RXDMA port specific FZC control configurations. 3794 * These FZC registers are pertaining to each port. 3795 */ 3796 (void) nxge_init_fzc_rxdma_port(nxgep); 3797 3798 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3799 3800 return (status); 3801 } 3802 3803 /*ARGSUSED*/ 3804 static void 3805 nxge_rxdma_hw_stop_common(p_nxge_t nxgep) 3806 { 3807 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3808 3809 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3810 } 3811 3812 static nxge_status_t 3813 nxge_rxdma_hw_start(p_nxge_t nxgep) 3814 { 3815 int i, ndmas; 3816 uint16_t channel; 3817 p_rx_rbr_rings_t rx_rbr_rings; 3818 p_rx_rbr_ring_t *rbr_rings; 3819 p_rx_rcr_rings_t rx_rcr_rings; 3820 p_rx_rcr_ring_t *rcr_rings; 3821 p_rx_mbox_areas_t rx_mbox_areas_p; 3822 p_rx_mbox_t *rx_mbox_p; 3823 nxge_status_t status = NXGE_OK; 3824 3825 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3826 3827 rx_rbr_rings = nxgep->rx_rbr_rings; 3828 rx_rcr_rings = nxgep->rx_rcr_rings; 3829 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3830 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3831 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3832 return (NXGE_ERROR); 3833 } 3834 ndmas = rx_rbr_rings->ndmas; 3835 if (ndmas == 0) { 3836 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3837 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3838 return (NXGE_ERROR); 3839 } 3840 3841 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3842 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3843 3844 rbr_rings = rx_rbr_rings->rbr_rings; 3845 rcr_rings = rx_rcr_rings->rcr_rings; 3846 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3847 if (rx_mbox_areas_p) { 3848 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3849 } 3850 3851 for (i = 0; i < ndmas; i++) { 3852 channel = rbr_rings[i]->rdc; 3853 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3854 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3855 ndmas, channel)); 3856 status = nxge_rxdma_start_channel(nxgep, channel, 3857 (p_rx_rbr_ring_t)rbr_rings[i], 3858 (p_rx_rcr_ring_t)rcr_rings[i], 3859 (p_rx_mbox_t)rx_mbox_p[i]); 3860 if (status != NXGE_OK) { 3861 goto nxge_rxdma_hw_start_fail1; 3862 } 3863 } 3864 3865 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3866 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3867 rx_rbr_rings, rx_rcr_rings)); 3868 3869 goto nxge_rxdma_hw_start_exit; 3870 3871 nxge_rxdma_hw_start_fail1: 3872 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3873 "==> nxge_rxdma_hw_start: disable " 3874 "(status 0x%x channel %d i %d)", status, channel, i)); 3875 for (; i >= 0; i--) { 3876 channel = rbr_rings[i]->rdc; 3877 (void) nxge_rxdma_stop_channel(nxgep, channel); 3878 } 3879 3880 nxge_rxdma_hw_start_exit: 3881 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3882 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3883 3884 return (status); 3885 } 3886 3887 static void 3888 nxge_rxdma_hw_stop(p_nxge_t nxgep) 3889 { 3890 int i, ndmas; 3891 uint16_t channel; 3892 p_rx_rbr_rings_t rx_rbr_rings; 3893 p_rx_rbr_ring_t *rbr_rings; 3894 p_rx_rcr_rings_t rx_rcr_rings; 3895 3896 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3897 3898 rx_rbr_rings = nxgep->rx_rbr_rings; 3899 rx_rcr_rings = nxgep->rx_rcr_rings; 3900 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3901 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3902 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3903 return; 3904 } 3905 ndmas = rx_rbr_rings->ndmas; 3906 if (!ndmas) { 3907 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3908 "<== nxge_rxdma_hw_stop: no dma channel allocated")); 3909 return; 3910 } 3911 3912 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3913 "==> nxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3914 3915 rbr_rings = rx_rbr_rings->rbr_rings; 3916 3917 for (i = 0; i < ndmas; i++) { 3918 channel = rbr_rings[i]->rdc; 3919 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3920 "==> nxge_rxdma_hw_stop (ndmas %d) channel %d", 3921 ndmas, channel)); 3922 (void) nxge_rxdma_stop_channel(nxgep, channel); 3923 } 3924 3925 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3926 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3927 rx_rbr_rings, rx_rcr_rings)); 3928 3929 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3930 } 3931 3932 3933 static nxge_status_t 3934 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3935 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3936 3937 { 3938 npi_handle_t handle; 3939 npi_status_t rs = NPI_SUCCESS; 3940 rx_dma_ctl_stat_t cs; 3941 rx_dma_ent_msk_t ent_mask; 3942 nxge_status_t status = NXGE_OK; 3943 3944 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3945 3946 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3947 3948 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3949 "npi handle addr $%p acc $%p", 3950 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 3951 3952 /* Reset RXDMA channel */ 3953 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3954 if (rs != NPI_SUCCESS) { 3955 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3956 "==> nxge_rxdma_start_channel: " 3957 "reset rxdma failed (0x%08x channel %d)", 3958 status, channel)); 3959 return (NXGE_ERROR | rs); 3960 } 3961 3962 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3963 "==> nxge_rxdma_start_channel: reset done: channel %d", 3964 channel)); 3965 3966 /* 3967 * Initialize the RXDMA channel specific FZC control 3968 * configurations. These FZC registers are pertaining 3969 * to each RX channel (logical pages). 3970 */ 3971 status = nxge_init_fzc_rxdma_channel(nxgep, 3972 channel, rbr_p, rcr_p, mbox_p); 3973 if (status != NXGE_OK) { 3974 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3975 "==> nxge_rxdma_start_channel: " 3976 "init fzc rxdma failed (0x%08x channel %d)", 3977 status, channel)); 3978 return (status); 3979 } 3980 3981 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3982 "==> nxge_rxdma_start_channel: fzc done")); 3983 3984 /* 3985 * Zero out the shadow and prefetch ram. 3986 */ 3987 3988 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3989 "ram done")); 3990 3991 /* Set up the interrupt event masks. */ 3992 ent_mask.value = 0; 3993 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 3994 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 3995 &ent_mask); 3996 if (rs != NPI_SUCCESS) { 3997 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3998 "==> nxge_rxdma_start_channel: " 3999 "init rxdma event masks failed (0x%08x channel %d)", 4000 status, channel)); 4001 return (NXGE_ERROR | rs); 4002 } 4003 4004 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4005 "event done: channel %d (mask 0x%016llx)", 4006 channel, ent_mask.value)); 4007 4008 /* Initialize the receive DMA control and status register */ 4009 cs.value = 0; 4010 cs.bits.hdw.mex = 1; 4011 cs.bits.hdw.rcrthres = 1; 4012 cs.bits.hdw.rcrto = 1; 4013 cs.bits.hdw.rbr_empty = 1; 4014 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4015 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4016 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4017 if (status != NXGE_OK) { 4018 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4019 "==> nxge_rxdma_start_channel: " 4020 "init rxdma control register failed (0x%08x channel %d", 4021 status, channel)); 4022 return (status); 4023 } 4024 4025 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4026 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4027 4028 /* 4029 * Load RXDMA descriptors, buffers, mailbox, 4030 * initialise the receive DMA channels and 4031 * enable each DMA channel. 4032 */ 4033 status = nxge_enable_rxdma_channel(nxgep, 4034 channel, rbr_p, rcr_p, mbox_p); 4035 4036 if (status != NXGE_OK) { 4037 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4038 " nxge_rxdma_start_channel: " 4039 " init enable rxdma failed (0x%08x channel %d)", 4040 status, channel)); 4041 return (status); 4042 } 4043 4044 ent_mask.value = 0; 4045 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4046 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4047 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4048 &ent_mask); 4049 if (rs != NPI_SUCCESS) { 4050 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4051 "==> nxge_rxdma_start_channel: " 4052 "init rxdma event masks failed (0x%08x channel %d)", 4053 status, channel)); 4054 return (NXGE_ERROR | rs); 4055 } 4056 4057 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4058 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4059 4060 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4061 "==> nxge_rxdma_start_channel: enable done")); 4062 4063 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4064 4065 return (NXGE_OK); 4066 } 4067 4068 static nxge_status_t 4069 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4070 { 4071 npi_handle_t handle; 4072 npi_status_t rs = NPI_SUCCESS; 4073 rx_dma_ctl_stat_t cs; 4074 rx_dma_ent_msk_t ent_mask; 4075 nxge_status_t status = NXGE_OK; 4076 4077 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4078 4079 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4080 4081 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4082 "npi handle addr $%p acc $%p", 4083 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4084 4085 /* Reset RXDMA channel */ 4086 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4087 if (rs != NPI_SUCCESS) { 4088 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4089 " nxge_rxdma_stop_channel: " 4090 " reset rxdma failed (0x%08x channel %d)", 4091 rs, channel)); 4092 return (NXGE_ERROR | rs); 4093 } 4094 4095 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4096 "==> nxge_rxdma_stop_channel: reset done")); 4097 4098 /* Set up the interrupt event masks. */ 4099 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4100 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4101 &ent_mask); 4102 if (rs != NPI_SUCCESS) { 4103 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4104 "==> nxge_rxdma_stop_channel: " 4105 "set rxdma event masks failed (0x%08x channel %d)", 4106 rs, channel)); 4107 return (NXGE_ERROR | rs); 4108 } 4109 4110 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4111 "==> nxge_rxdma_stop_channel: event done")); 4112 4113 /* Initialize the receive DMA control and status register */ 4114 cs.value = 0; 4115 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4116 &cs); 4117 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4118 " to default (all 0s) 0x%08x", cs.value)); 4119 if (status != NXGE_OK) { 4120 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4121 " nxge_rxdma_stop_channel: init rxdma" 4122 " control register failed (0x%08x channel %d", 4123 status, channel)); 4124 return (status); 4125 } 4126 4127 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4128 "==> nxge_rxdma_stop_channel: control done")); 4129 4130 /* disable dma channel */ 4131 status = nxge_disable_rxdma_channel(nxgep, channel); 4132 4133 if (status != NXGE_OK) { 4134 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4135 " nxge_rxdma_stop_channel: " 4136 " init enable rxdma failed (0x%08x channel %d)", 4137 status, channel)); 4138 return (status); 4139 } 4140 4141 NXGE_DEBUG_MSG((nxgep, 4142 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4143 4144 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4145 4146 return (NXGE_OK); 4147 } 4148 4149 nxge_status_t 4150 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4151 { 4152 npi_handle_t handle; 4153 p_nxge_rdc_sys_stats_t statsp; 4154 rx_ctl_dat_fifo_stat_t stat; 4155 uint32_t zcp_err_status; 4156 uint32_t ipp_err_status; 4157 nxge_status_t status = NXGE_OK; 4158 npi_status_t rs = NPI_SUCCESS; 4159 boolean_t my_err = B_FALSE; 4160 4161 handle = nxgep->npi_handle; 4162 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4163 4164 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4165 4166 if (rs != NPI_SUCCESS) 4167 return (NXGE_ERROR | rs); 4168 4169 if (stat.bits.ldw.id_mismatch) { 4170 statsp->id_mismatch++; 4171 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4172 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4173 /* Global fatal error encountered */ 4174 } 4175 4176 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4177 switch (nxgep->mac.portnum) { 4178 case 0: 4179 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4180 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4181 my_err = B_TRUE; 4182 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4183 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4184 } 4185 break; 4186 case 1: 4187 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4188 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4189 my_err = B_TRUE; 4190 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4191 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4192 } 4193 break; 4194 case 2: 4195 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4196 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4197 my_err = B_TRUE; 4198 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4199 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4200 } 4201 break; 4202 case 3: 4203 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4204 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4205 my_err = B_TRUE; 4206 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4207 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4208 } 4209 break; 4210 default: 4211 return (NXGE_ERROR); 4212 } 4213 } 4214 4215 if (my_err) { 4216 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4217 zcp_err_status); 4218 if (status != NXGE_OK) 4219 return (status); 4220 } 4221 4222 return (NXGE_OK); 4223 } 4224 4225 static nxge_status_t 4226 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4227 uint32_t zcp_status) 4228 { 4229 boolean_t rxport_fatal = B_FALSE; 4230 p_nxge_rdc_sys_stats_t statsp; 4231 nxge_status_t status = NXGE_OK; 4232 uint8_t portn; 4233 4234 portn = nxgep->mac.portnum; 4235 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4236 4237 if (ipp_status & (0x1 << portn)) { 4238 statsp->ipp_eop_err++; 4239 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4240 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4241 rxport_fatal = B_TRUE; 4242 } 4243 4244 if (zcp_status & (0x1 << portn)) { 4245 statsp->zcp_eop_err++; 4246 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4247 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4248 rxport_fatal = B_TRUE; 4249 } 4250 4251 if (rxport_fatal) { 4252 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4253 " nxge_rxdma_handle_port_error: " 4254 " fatal error on Port #%d\n", 4255 portn)); 4256 status = nxge_rx_port_fatal_err_recover(nxgep); 4257 if (status == NXGE_OK) { 4258 FM_SERVICE_RESTORED(nxgep); 4259 } 4260 } 4261 4262 return (status); 4263 } 4264 4265 static nxge_status_t 4266 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4267 { 4268 npi_handle_t handle; 4269 npi_status_t rs = NPI_SUCCESS; 4270 nxge_status_t status = NXGE_OK; 4271 p_rx_rbr_ring_t rbrp; 4272 p_rx_rcr_ring_t rcrp; 4273 p_rx_mbox_t mboxp; 4274 rx_dma_ent_msk_t ent_mask; 4275 p_nxge_dma_common_t dmap; 4276 int ring_idx; 4277 uint32_t ref_cnt; 4278 p_rx_msg_t rx_msg_p; 4279 int i; 4280 uint32_t nxge_port_rcr_size; 4281 4282 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4283 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4284 "Recovering from RxDMAChannel#%d error...", channel)); 4285 4286 /* 4287 * Stop the dma channel waits for the stop done. 4288 * If the stop done bit is not set, then create 4289 * an error. 4290 */ 4291 4292 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4293 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4294 4295 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4296 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4297 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4298 4299 MUTEX_ENTER(&rcrp->lock); 4300 MUTEX_ENTER(&rbrp->lock); 4301 MUTEX_ENTER(&rbrp->post_lock); 4302 4303 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4304 4305 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4306 if (rs != NPI_SUCCESS) { 4307 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4308 "nxge_disable_rxdma_channel:failed")); 4309 goto fail; 4310 } 4311 4312 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4313 4314 /* Disable interrupt */ 4315 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4316 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4317 if (rs != NPI_SUCCESS) { 4318 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4319 "nxge_rxdma_stop_channel: " 4320 "set rxdma event masks failed (channel %d)", 4321 channel)); 4322 } 4323 4324 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4325 4326 /* Reset RXDMA channel */ 4327 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4328 if (rs != NPI_SUCCESS) { 4329 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4330 "nxge_rxdma_fatal_err_recover: " 4331 " reset rxdma failed (channel %d)", channel)); 4332 goto fail; 4333 } 4334 4335 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4336 4337 mboxp = 4338 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4339 4340 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4341 rbrp->rbr_rd_index = 0; 4342 4343 rcrp->comp_rd_index = 0; 4344 rcrp->comp_wt_index = 0; 4345 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4346 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4347 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4348 #if defined(__i386) 4349 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4350 #else 4351 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4352 #endif 4353 4354 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4355 (nxge_port_rcr_size - 1); 4356 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4357 (nxge_port_rcr_size - 1); 4358 4359 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4360 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4361 4362 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4363 4364 for (i = 0; i < rbrp->rbr_max_size; i++) { 4365 rx_msg_p = rbrp->rx_msg_ring[i]; 4366 ref_cnt = rx_msg_p->ref_cnt; 4367 if (ref_cnt != 1) { 4368 if (rx_msg_p->cur_usage_cnt != 4369 rx_msg_p->max_usage_cnt) { 4370 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4371 "buf[%d]: cur_usage_cnt = %d " 4372 "max_usage_cnt = %d\n", i, 4373 rx_msg_p->cur_usage_cnt, 4374 rx_msg_p->max_usage_cnt)); 4375 } else { 4376 /* Buffer can be re-posted */ 4377 rx_msg_p->free = B_TRUE; 4378 rx_msg_p->cur_usage_cnt = 0; 4379 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4380 rx_msg_p->pkt_buf_size = 0; 4381 } 4382 } 4383 } 4384 4385 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4386 4387 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4388 if (status != NXGE_OK) { 4389 goto fail; 4390 } 4391 4392 MUTEX_EXIT(&rbrp->post_lock); 4393 MUTEX_EXIT(&rbrp->lock); 4394 MUTEX_EXIT(&rcrp->lock); 4395 4396 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4397 "Recovery Successful, RxDMAChannel#%d Restored", 4398 channel)); 4399 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4400 4401 return (NXGE_OK); 4402 fail: 4403 MUTEX_EXIT(&rbrp->post_lock); 4404 MUTEX_EXIT(&rbrp->lock); 4405 MUTEX_EXIT(&rcrp->lock); 4406 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4407 4408 return (NXGE_ERROR | rs); 4409 } 4410 4411 nxge_status_t 4412 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4413 { 4414 nxge_status_t status = NXGE_OK; 4415 p_nxge_dma_common_t *dma_buf_p; 4416 uint16_t channel; 4417 int ndmas; 4418 int i; 4419 4420 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4421 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4422 "Recovering from RxPort error...")); 4423 /* Disable RxMAC */ 4424 4425 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n")); 4426 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4427 goto fail; 4428 4429 NXGE_DELAY(1000); 4430 4431 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels...")); 4432 4433 ndmas = nxgep->rx_buf_pool_p->ndmas; 4434 dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p; 4435 4436 for (i = 0; i < ndmas; i++) { 4437 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 4438 if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) { 4439 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4440 "Could not recover channel %d", 4441 channel)); 4442 } 4443 } 4444 4445 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP...")); 4446 4447 /* Reset IPP */ 4448 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4449 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4450 "nxge_rx_port_fatal_err_recover: " 4451 "Failed to reset IPP")); 4452 goto fail; 4453 } 4454 4455 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4456 4457 /* Reset RxMAC */ 4458 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4459 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4460 "nxge_rx_port_fatal_err_recover: " 4461 "Failed to reset RxMAC")); 4462 goto fail; 4463 } 4464 4465 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4466 4467 /* Re-Initialize IPP */ 4468 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4469 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4470 "nxge_rx_port_fatal_err_recover: " 4471 "Failed to init IPP")); 4472 goto fail; 4473 } 4474 4475 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4476 4477 /* Re-Initialize RxMAC */ 4478 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4480 "nxge_rx_port_fatal_err_recover: " 4481 "Failed to reset RxMAC")); 4482 goto fail; 4483 } 4484 4485 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4486 4487 /* Re-enable RxMAC */ 4488 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4489 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4490 "nxge_rx_port_fatal_err_recover: " 4491 "Failed to enable RxMAC")); 4492 goto fail; 4493 } 4494 4495 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4496 "Recovery Successful, RxPort Restored")); 4497 4498 return (NXGE_OK); 4499 fail: 4500 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4501 return (status); 4502 } 4503 4504 void 4505 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4506 { 4507 rx_dma_ctl_stat_t cs; 4508 rx_ctl_dat_fifo_stat_t cdfs; 4509 4510 switch (err_id) { 4511 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4512 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4513 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4514 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4515 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4516 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4517 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4518 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4519 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4520 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4521 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4522 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4523 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4524 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4525 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4526 chan, &cs.value); 4527 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4528 cs.bits.hdw.rcr_ack_err = 1; 4529 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4530 cs.bits.hdw.dc_fifo_err = 1; 4531 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4532 cs.bits.hdw.rcr_sha_par = 1; 4533 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4534 cs.bits.hdw.rbr_pre_par = 1; 4535 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4536 cs.bits.hdw.rbr_tmout = 1; 4537 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4538 cs.bits.hdw.rsp_cnt_err = 1; 4539 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4540 cs.bits.hdw.byte_en_bus = 1; 4541 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4542 cs.bits.hdw.rsp_dat_err = 1; 4543 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4544 cs.bits.hdw.config_err = 1; 4545 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4546 cs.bits.hdw.rcrincon = 1; 4547 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4548 cs.bits.hdw.rcrfull = 1; 4549 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4550 cs.bits.hdw.rbrfull = 1; 4551 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4552 cs.bits.hdw.rbrlogpage = 1; 4553 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4554 cs.bits.hdw.cfiglogpage = 1; 4555 #if defined(__i386) 4556 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4557 cs.value); 4558 #else 4559 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4560 cs.value); 4561 #endif 4562 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4563 chan, cs.value); 4564 break; 4565 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4566 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4567 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4568 cdfs.value = 0; 4569 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4570 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4571 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4572 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4573 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4574 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4575 #if defined(__i386) 4576 cmn_err(CE_NOTE, 4577 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4578 cdfs.value); 4579 #else 4580 cmn_err(CE_NOTE, 4581 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4582 cdfs.value); 4583 #endif 4584 RXDMA_REG_WRITE64(nxgep->npi_handle, 4585 RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value); 4586 break; 4587 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4588 break; 4589 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4590 break; 4591 } 4592 } 4593