1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_rxdma.h> 30 31 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 32 (rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid) 33 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 34 (rdc + nxgep->pt_config.hw_config.start_rdc) 35 36 /* 37 * Globals: tunable parameters (/etc/system or adb) 38 * 39 */ 40 extern uint32_t nxge_rbr_size; 41 extern uint32_t nxge_rcr_size; 42 extern uint32_t nxge_rbr_spare_size; 43 44 extern uint32_t nxge_mblks_pending; 45 46 /* 47 * Tunable to reduce the amount of time spent in the 48 * ISR doing Rx Processing. 49 */ 50 extern uint32_t nxge_max_rx_pkts; 51 boolean_t nxge_jumbo_enable; 52 53 /* 54 * Tunables to manage the receive buffer blocks. 55 * 56 * nxge_rx_threshold_hi: copy all buffers. 57 * nxge_rx_bcopy_size_type: receive buffer block size type. 58 * nxge_rx_threshold_lo: copy only up to tunable block size type. 59 */ 60 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 61 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 62 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 63 64 static nxge_status_t nxge_map_rxdma(p_nxge_t); 65 static void nxge_unmap_rxdma(p_nxge_t); 66 67 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 68 static void nxge_rxdma_hw_stop_common(p_nxge_t); 69 70 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t); 71 static void nxge_rxdma_hw_stop(p_nxge_t); 72 73 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 75 uint32_t, 76 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 77 p_rx_mbox_t *); 78 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 79 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 80 81 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 82 uint16_t, 83 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 84 p_rx_rcr_ring_t *, p_rx_mbox_t *); 85 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 86 p_rx_rcr_ring_t, p_rx_mbox_t); 87 88 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 89 uint16_t, 90 p_nxge_dma_common_t *, 91 p_rx_rbr_ring_t *, uint32_t); 92 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 93 p_rx_rbr_ring_t); 94 95 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 96 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 97 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 98 99 mblk_t * 100 nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t, 101 p_rx_rcr_ring_t *, rx_dma_ctl_stat_t); 102 103 static void nxge_receive_packet(p_nxge_t, 104 p_rx_rcr_ring_t, 105 p_rcr_entry_t, 106 boolean_t *, 107 mblk_t **, mblk_t **); 108 109 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 110 111 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 112 static void nxge_freeb(p_rx_msg_t); 113 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, 114 p_nxge_ldv_t, rx_dma_ctl_stat_t); 115 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t, 116 p_nxge_ldv_t, rx_dma_ctl_stat_t); 117 118 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 119 uint32_t, uint32_t); 120 121 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 122 p_rx_rbr_ring_t); 123 124 125 static nxge_status_t 126 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 127 128 nxge_status_t 129 nxge_rx_port_fatal_err_recover(p_nxge_t); 130 131 static uint16_t 132 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb); 133 134 nxge_status_t 135 nxge_init_rxdma_channels(p_nxge_t nxgep) 136 { 137 nxge_status_t status = NXGE_OK; 138 139 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 140 141 status = nxge_map_rxdma(nxgep); 142 if (status != NXGE_OK) { 143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 144 "<== nxge_init_rxdma: status 0x%x", status)); 145 return (status); 146 } 147 148 status = nxge_rxdma_hw_start_common(nxgep); 149 if (status != NXGE_OK) { 150 nxge_unmap_rxdma(nxgep); 151 } 152 153 status = nxge_rxdma_hw_start(nxgep); 154 if (status != NXGE_OK) { 155 nxge_unmap_rxdma(nxgep); 156 } 157 158 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 159 "<== nxge_init_rxdma_channels: status 0x%x", status)); 160 161 return (status); 162 } 163 164 void 165 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 166 { 167 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 168 169 nxge_rxdma_hw_stop(nxgep); 170 nxge_rxdma_hw_stop_common(nxgep); 171 nxge_unmap_rxdma(nxgep); 172 173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 174 "<== nxge_uinit_rxdma_channels")); 175 } 176 177 nxge_status_t 178 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 179 { 180 npi_handle_t handle; 181 npi_status_t rs = NPI_SUCCESS; 182 nxge_status_t status = NXGE_OK; 183 184 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 185 186 handle = NXGE_DEV_NPI_HANDLE(nxgep); 187 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 188 189 if (rs != NPI_SUCCESS) { 190 status = NXGE_ERROR | rs; 191 } 192 193 return (status); 194 } 195 196 void 197 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 198 { 199 int i, ndmas; 200 uint16_t channel; 201 p_rx_rbr_rings_t rx_rbr_rings; 202 p_rx_rbr_ring_t *rbr_rings; 203 npi_handle_t handle; 204 205 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 206 207 handle = NXGE_DEV_NPI_HANDLE(nxgep); 208 (void) npi_rxdma_dump_fzc_regs(handle); 209 210 rx_rbr_rings = nxgep->rx_rbr_rings; 211 if (rx_rbr_rings == NULL) { 212 NXGE_DEBUG_MSG((nxgep, RX_CTL, 213 "<== nxge_rxdma_regs_dump_channels: " 214 "NULL ring pointer")); 215 return; 216 } 217 if (rx_rbr_rings->rbr_rings == NULL) { 218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 219 "<== nxge_rxdma_regs_dump_channels: " 220 " NULL rbr rings pointer")); 221 return; 222 } 223 224 ndmas = rx_rbr_rings->ndmas; 225 if (!ndmas) { 226 NXGE_DEBUG_MSG((nxgep, RX_CTL, 227 "<== nxge_rxdma_regs_dump_channels: no channel")); 228 return; 229 } 230 231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 232 "==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas)); 233 234 rbr_rings = rx_rbr_rings->rbr_rings; 235 for (i = 0; i < ndmas; i++) { 236 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 237 continue; 238 } 239 channel = rbr_rings[i]->rdc; 240 (void) nxge_dump_rxdma_channel(nxgep, channel); 241 } 242 243 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 244 245 } 246 247 nxge_status_t 248 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 249 { 250 npi_handle_t handle; 251 npi_status_t rs = NPI_SUCCESS; 252 nxge_status_t status = NXGE_OK; 253 254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 255 256 handle = NXGE_DEV_NPI_HANDLE(nxgep); 257 rs = npi_rxdma_dump_rdc_regs(handle, channel); 258 259 if (rs != NPI_SUCCESS) { 260 status = NXGE_ERROR | rs; 261 } 262 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 263 return (status); 264 } 265 266 nxge_status_t 267 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 268 p_rx_dma_ent_msk_t mask_p) 269 { 270 npi_handle_t handle; 271 npi_status_t rs = NPI_SUCCESS; 272 nxge_status_t status = NXGE_OK; 273 274 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 275 "<== nxge_init_rxdma_channel_event_mask")); 276 277 handle = NXGE_DEV_NPI_HANDLE(nxgep); 278 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 279 if (rs != NPI_SUCCESS) { 280 status = NXGE_ERROR | rs; 281 } 282 283 return (status); 284 } 285 286 nxge_status_t 287 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 288 p_rx_dma_ctl_stat_t cs_p) 289 { 290 npi_handle_t handle; 291 npi_status_t rs = NPI_SUCCESS; 292 nxge_status_t status = NXGE_OK; 293 294 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 295 "<== nxge_init_rxdma_channel_cntl_stat")); 296 297 handle = NXGE_DEV_NPI_HANDLE(nxgep); 298 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 299 300 if (rs != NPI_SUCCESS) { 301 status = NXGE_ERROR | rs; 302 } 303 304 return (status); 305 } 306 307 nxge_status_t 308 nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp, 309 uint8_t rdc) 310 { 311 npi_handle_t handle; 312 npi_status_t rs = NPI_SUCCESS; 313 p_nxge_dma_pt_cfg_t p_dma_cfgp; 314 p_nxge_rdc_grp_t rdc_grp_p; 315 uint8_t actual_rdcgrp, actual_rdc; 316 317 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 318 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 319 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 320 321 handle = NXGE_DEV_NPI_HANDLE(nxgep); 322 323 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 324 rdc_grp_p->rdc[0] = rdc; 325 326 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 327 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 328 329 rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp, 330 actual_rdc); 331 332 if (rs != NPI_SUCCESS) { 333 return (NXGE_ERROR | rs); 334 } 335 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 336 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 337 return (NXGE_OK); 338 } 339 340 nxge_status_t 341 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 342 { 343 npi_handle_t handle; 344 345 uint8_t actual_rdc; 346 npi_status_t rs = NPI_SUCCESS; 347 348 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 349 " ==> nxge_rxdma_cfg_port_default_rdc")); 350 351 handle = NXGE_DEV_NPI_HANDLE(nxgep); 352 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 353 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 354 355 356 if (rs != NPI_SUCCESS) { 357 return (NXGE_ERROR | rs); 358 } 359 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 360 " <== nxge_rxdma_cfg_port_default_rdc")); 361 362 return (NXGE_OK); 363 } 364 365 nxge_status_t 366 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 367 uint16_t pkts) 368 { 369 npi_status_t rs = NPI_SUCCESS; 370 npi_handle_t handle; 371 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 372 " ==> nxge_rxdma_cfg_rcr_threshold")); 373 handle = NXGE_DEV_NPI_HANDLE(nxgep); 374 375 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 376 377 if (rs != NPI_SUCCESS) { 378 return (NXGE_ERROR | rs); 379 } 380 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 381 return (NXGE_OK); 382 } 383 384 nxge_status_t 385 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 386 uint16_t tout, uint8_t enable) 387 { 388 npi_status_t rs = NPI_SUCCESS; 389 npi_handle_t handle; 390 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 391 handle = NXGE_DEV_NPI_HANDLE(nxgep); 392 if (enable == 0) { 393 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 394 } else { 395 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 396 tout); 397 } 398 399 if (rs != NPI_SUCCESS) { 400 return (NXGE_ERROR | rs); 401 } 402 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 403 return (NXGE_OK); 404 } 405 406 nxge_status_t 407 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 408 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 409 { 410 npi_handle_t handle; 411 rdc_desc_cfg_t rdc_desc; 412 p_rcrcfig_b_t cfgb_p; 413 npi_status_t rs = NPI_SUCCESS; 414 415 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 416 handle = NXGE_DEV_NPI_HANDLE(nxgep); 417 /* 418 * Use configuration data composed at init time. 419 * Write to hardware the receive ring configurations. 420 */ 421 rdc_desc.mbox_enable = 1; 422 rdc_desc.mbox_addr = mbox_p->mbox_addr; 423 NXGE_DEBUG_MSG((nxgep, RX_CTL, 424 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 425 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 426 427 rdc_desc.rbr_len = rbr_p->rbb_max; 428 rdc_desc.rbr_addr = rbr_p->rbr_addr; 429 430 switch (nxgep->rx_bksize_code) { 431 case RBR_BKSIZE_4K: 432 rdc_desc.page_size = SIZE_4KB; 433 break; 434 case RBR_BKSIZE_8K: 435 rdc_desc.page_size = SIZE_8KB; 436 break; 437 case RBR_BKSIZE_16K: 438 rdc_desc.page_size = SIZE_16KB; 439 break; 440 case RBR_BKSIZE_32K: 441 rdc_desc.page_size = SIZE_32KB; 442 break; 443 } 444 445 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 446 rdc_desc.valid0 = 1; 447 448 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 449 rdc_desc.valid1 = 1; 450 451 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 452 rdc_desc.valid2 = 1; 453 454 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 455 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 456 457 rdc_desc.rcr_len = rcr_p->comp_size; 458 rdc_desc.rcr_addr = rcr_p->rcr_addr; 459 460 cfgb_p = &(rcr_p->rcr_cfgb); 461 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 462 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 463 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 464 465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 466 "rbr_len qlen %d pagesize code %d rcr_len %d", 467 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 468 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 469 "size 0 %d size 1 %d size 2 %d", 470 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 471 rbr_p->npi_pkt_buf_size2)); 472 473 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 474 if (rs != NPI_SUCCESS) { 475 return (NXGE_ERROR | rs); 476 } 477 478 /* 479 * Enable the timeout and threshold. 480 */ 481 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 482 rdc_desc.rcr_threshold); 483 if (rs != NPI_SUCCESS) { 484 return (NXGE_ERROR | rs); 485 } 486 487 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 488 rdc_desc.rcr_timeout); 489 if (rs != NPI_SUCCESS) { 490 return (NXGE_ERROR | rs); 491 } 492 493 /* Enable the DMA */ 494 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 495 if (rs != NPI_SUCCESS) { 496 return (NXGE_ERROR | rs); 497 } 498 499 /* Kick the DMA engine. */ 500 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 501 /* Clear the rbr empty bit */ 502 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 503 504 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 505 506 return (NXGE_OK); 507 } 508 509 nxge_status_t 510 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 511 { 512 npi_handle_t handle; 513 npi_status_t rs = NPI_SUCCESS; 514 515 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 516 handle = NXGE_DEV_NPI_HANDLE(nxgep); 517 518 /* disable the DMA */ 519 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 520 if (rs != NPI_SUCCESS) { 521 NXGE_DEBUG_MSG((nxgep, RX_CTL, 522 "<== nxge_disable_rxdma_channel:failed (0x%x)", 523 rs)); 524 return (NXGE_ERROR | rs); 525 } 526 527 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 528 return (NXGE_OK); 529 } 530 531 nxge_status_t 532 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 533 { 534 npi_handle_t handle; 535 nxge_status_t status = NXGE_OK; 536 537 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 538 "<== nxge_init_rxdma_channel_rcrflush")); 539 540 handle = NXGE_DEV_NPI_HANDLE(nxgep); 541 npi_rxdma_rdc_rcr_flush(handle, channel); 542 543 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 544 "<== nxge_init_rxdma_channel_rcrflsh")); 545 return (status); 546 547 } 548 549 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 550 551 #define TO_LEFT -1 552 #define TO_RIGHT 1 553 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 554 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 555 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 556 #define NO_HINT 0xffffffff 557 558 /*ARGSUSED*/ 559 nxge_status_t 560 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 561 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 562 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 563 { 564 int bufsize; 565 uint64_t pktbuf_pp; 566 uint64_t dvma_addr; 567 rxring_info_t *ring_info; 568 int base_side, end_side; 569 int r_index, l_index, anchor_index; 570 int found, search_done; 571 uint32_t offset, chunk_size, block_size, page_size_mask; 572 uint32_t chunk_index, block_index, total_index; 573 int max_iterations, iteration; 574 rxbuf_index_info_t *bufinfo; 575 576 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 577 578 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 579 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 580 pkt_buf_addr_pp, 581 pktbufsz_type)); 582 #if defined(__i386) 583 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 584 #else 585 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 586 #endif 587 588 switch (pktbufsz_type) { 589 case 0: 590 bufsize = rbr_p->pkt_buf_size0; 591 break; 592 case 1: 593 bufsize = rbr_p->pkt_buf_size1; 594 break; 595 case 2: 596 bufsize = rbr_p->pkt_buf_size2; 597 break; 598 case RCR_SINGLE_BLOCK: 599 bufsize = 0; 600 anchor_index = 0; 601 break; 602 default: 603 return (NXGE_ERROR); 604 } 605 606 if (rbr_p->num_blocks == 1) { 607 anchor_index = 0; 608 ring_info = rbr_p->ring_info; 609 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 610 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 611 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 612 "buf_pp $%p btype %d anchor_index %d " 613 "bufinfo $%p", 614 pkt_buf_addr_pp, 615 pktbufsz_type, 616 anchor_index, 617 bufinfo)); 618 619 goto found_index; 620 } 621 622 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 623 "==> nxge_rxbuf_pp_to_vp: " 624 "buf_pp $%p btype %d anchor_index %d", 625 pkt_buf_addr_pp, 626 pktbufsz_type, 627 anchor_index)); 628 629 ring_info = rbr_p->ring_info; 630 found = B_FALSE; 631 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 632 iteration = 0; 633 max_iterations = ring_info->max_iterations; 634 /* 635 * First check if this block has been seen 636 * recently. This is indicated by a hint which 637 * is initialized when the first buffer of the block 638 * is seen. The hint is reset when the last buffer of 639 * the block has been processed. 640 * As three block sizes are supported, three hints 641 * are kept. The idea behind the hints is that once 642 * the hardware uses a block for a buffer of that 643 * size, it will use it exclusively for that size 644 * and will use it until it is exhausted. It is assumed 645 * that there would a single block being used for the same 646 * buffer sizes at any given time. 647 */ 648 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 649 anchor_index = ring_info->hint[pktbufsz_type]; 650 dvma_addr = bufinfo[anchor_index].dvma_addr; 651 chunk_size = bufinfo[anchor_index].buf_size; 652 if ((pktbuf_pp >= dvma_addr) && 653 (pktbuf_pp < (dvma_addr + chunk_size))) { 654 found = B_TRUE; 655 /* 656 * check if this is the last buffer in the block 657 * If so, then reset the hint for the size; 658 */ 659 660 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 661 ring_info->hint[pktbufsz_type] = NO_HINT; 662 } 663 } 664 665 if (found == B_FALSE) { 666 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 667 "==> nxge_rxbuf_pp_to_vp: (!found)" 668 "buf_pp $%p btype %d anchor_index %d", 669 pkt_buf_addr_pp, 670 pktbufsz_type, 671 anchor_index)); 672 673 /* 674 * This is the first buffer of the block of this 675 * size. Need to search the whole information 676 * array. 677 * the search algorithm uses a binary tree search 678 * algorithm. It assumes that the information is 679 * already sorted with increasing order 680 * info[0] < info[1] < info[2] .... < info[n-1] 681 * where n is the size of the information array 682 */ 683 r_index = rbr_p->num_blocks - 1; 684 l_index = 0; 685 search_done = B_FALSE; 686 anchor_index = MID_INDEX(r_index, l_index); 687 while (search_done == B_FALSE) { 688 if ((r_index == l_index) || 689 (iteration >= max_iterations)) 690 search_done = B_TRUE; 691 end_side = TO_RIGHT; /* to the right */ 692 base_side = TO_LEFT; /* to the left */ 693 /* read the DVMA address information and sort it */ 694 dvma_addr = bufinfo[anchor_index].dvma_addr; 695 chunk_size = bufinfo[anchor_index].buf_size; 696 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 697 "==> nxge_rxbuf_pp_to_vp: (searching)" 698 "buf_pp $%p btype %d " 699 "anchor_index %d chunk_size %d dvmaaddr $%p", 700 pkt_buf_addr_pp, 701 pktbufsz_type, 702 anchor_index, 703 chunk_size, 704 dvma_addr)); 705 706 if (pktbuf_pp >= dvma_addr) 707 base_side = TO_RIGHT; /* to the right */ 708 if (pktbuf_pp < (dvma_addr + chunk_size)) 709 end_side = TO_LEFT; /* to the left */ 710 711 switch (base_side + end_side) { 712 case IN_MIDDLE: 713 /* found */ 714 found = B_TRUE; 715 search_done = B_TRUE; 716 if ((pktbuf_pp + bufsize) < 717 (dvma_addr + chunk_size)) 718 ring_info->hint[pktbufsz_type] = 719 bufinfo[anchor_index].buf_index; 720 break; 721 case BOTH_RIGHT: 722 /* not found: go to the right */ 723 l_index = anchor_index + 1; 724 anchor_index = 725 MID_INDEX(r_index, l_index); 726 break; 727 728 case BOTH_LEFT: 729 /* not found: go to the left */ 730 r_index = anchor_index - 1; 731 anchor_index = MID_INDEX(r_index, 732 l_index); 733 break; 734 default: /* should not come here */ 735 return (NXGE_ERROR); 736 } 737 iteration++; 738 } 739 740 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 741 "==> nxge_rxbuf_pp_to_vp: (search done)" 742 "buf_pp $%p btype %d anchor_index %d", 743 pkt_buf_addr_pp, 744 pktbufsz_type, 745 anchor_index)); 746 } 747 748 if (found == B_FALSE) { 749 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 750 "==> nxge_rxbuf_pp_to_vp: (search failed)" 751 "buf_pp $%p btype %d anchor_index %d", 752 pkt_buf_addr_pp, 753 pktbufsz_type, 754 anchor_index)); 755 return (NXGE_ERROR); 756 } 757 758 found_index: 759 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 760 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 761 "buf_pp $%p btype %d bufsize %d anchor_index %d", 762 pkt_buf_addr_pp, 763 pktbufsz_type, 764 bufsize, 765 anchor_index)); 766 767 /* index of the first block in this chunk */ 768 chunk_index = bufinfo[anchor_index].start_index; 769 dvma_addr = bufinfo[anchor_index].dvma_addr; 770 page_size_mask = ring_info->block_size_mask; 771 772 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 773 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 774 "buf_pp $%p btype %d bufsize %d " 775 "anchor_index %d chunk_index %d dvma $%p", 776 pkt_buf_addr_pp, 777 pktbufsz_type, 778 bufsize, 779 anchor_index, 780 chunk_index, 781 dvma_addr)); 782 783 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 784 block_size = rbr_p->block_size; /* System block(page) size */ 785 786 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 787 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 788 "buf_pp $%p btype %d bufsize %d " 789 "anchor_index %d chunk_index %d dvma $%p " 790 "offset %d block_size %d", 791 pkt_buf_addr_pp, 792 pktbufsz_type, 793 bufsize, 794 anchor_index, 795 chunk_index, 796 dvma_addr, 797 offset, 798 block_size)); 799 800 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 801 802 block_index = (offset / block_size); /* index within chunk */ 803 total_index = chunk_index + block_index; 804 805 806 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 807 "==> nxge_rxbuf_pp_to_vp: " 808 "total_index %d dvma_addr $%p " 809 "offset %d block_size %d " 810 "block_index %d ", 811 total_index, dvma_addr, 812 offset, block_size, 813 block_index)); 814 #if defined(__i386) 815 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 816 (uint32_t)offset); 817 #else 818 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 819 (uint64_t)offset); 820 #endif 821 822 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 823 "==> nxge_rxbuf_pp_to_vp: " 824 "total_index %d dvma_addr $%p " 825 "offset %d block_size %d " 826 "block_index %d " 827 "*pkt_buf_addr_p $%p", 828 total_index, dvma_addr, 829 offset, block_size, 830 block_index, 831 *pkt_buf_addr_p)); 832 833 834 *msg_index = total_index; 835 *bufoffset = (offset & page_size_mask); 836 837 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 838 "==> nxge_rxbuf_pp_to_vp: get msg index: " 839 "msg_index %d bufoffset_index %d", 840 *msg_index, 841 *bufoffset)); 842 843 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 844 845 return (NXGE_OK); 846 } 847 848 /* 849 * used by quick sort (qsort) function 850 * to perform comparison 851 */ 852 static int 853 nxge_sort_compare(const void *p1, const void *p2) 854 { 855 856 rxbuf_index_info_t *a, *b; 857 858 a = (rxbuf_index_info_t *)p1; 859 b = (rxbuf_index_info_t *)p2; 860 861 if (a->dvma_addr > b->dvma_addr) 862 return (1); 863 if (a->dvma_addr < b->dvma_addr) 864 return (-1); 865 return (0); 866 } 867 868 869 870 /* 871 * grabbed this sort implementation from common/syscall/avl.c 872 * 873 */ 874 /* 875 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 876 * v = Ptr to array/vector of objs 877 * n = # objs in the array 878 * s = size of each obj (must be multiples of a word size) 879 * f = ptr to function to compare two objs 880 * returns (-1 = less than, 0 = equal, 1 = greater than 881 */ 882 void 883 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 884 { 885 int g, i, j, ii; 886 unsigned int *p1, *p2; 887 unsigned int tmp; 888 889 /* No work to do */ 890 if (v == NULL || n <= 1) 891 return; 892 /* Sanity check on arguments */ 893 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 894 ASSERT(s > 0); 895 896 for (g = n / 2; g > 0; g /= 2) { 897 for (i = g; i < n; i++) { 898 for (j = i - g; j >= 0 && 899 (*f)(v + j * s, v + (j + g) * s) == 1; 900 j -= g) { 901 p1 = (unsigned *)(v + j * s); 902 p2 = (unsigned *)(v + (j + g) * s); 903 for (ii = 0; ii < s / 4; ii++) { 904 tmp = *p1; 905 *p1++ = *p2; 906 *p2++ = tmp; 907 } 908 } 909 } 910 } 911 } 912 913 /* 914 * Initialize data structures required for rxdma 915 * buffer dvma->vmem address lookup 916 */ 917 /*ARGSUSED*/ 918 static nxge_status_t 919 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 920 { 921 922 int index; 923 rxring_info_t *ring_info; 924 int max_iteration = 0, max_index = 0; 925 926 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 927 928 ring_info = rbrp->ring_info; 929 ring_info->hint[0] = NO_HINT; 930 ring_info->hint[1] = NO_HINT; 931 ring_info->hint[2] = NO_HINT; 932 max_index = rbrp->num_blocks; 933 934 /* read the DVMA address information and sort it */ 935 /* do init of the information array */ 936 937 938 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 939 " nxge_rxbuf_index_info_init Sort ptrs")); 940 941 /* sort the array */ 942 nxge_ksort((void *)ring_info->buffer, max_index, 943 sizeof (rxbuf_index_info_t), nxge_sort_compare); 944 945 946 947 for (index = 0; index < max_index; index++) { 948 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 949 " nxge_rxbuf_index_info_init: sorted chunk %d " 950 " ioaddr $%p kaddr $%p size %x", 951 index, ring_info->buffer[index].dvma_addr, 952 ring_info->buffer[index].kaddr, 953 ring_info->buffer[index].buf_size)); 954 } 955 956 max_iteration = 0; 957 while (max_index >= (1ULL << max_iteration)) 958 max_iteration++; 959 ring_info->max_iterations = max_iteration + 1; 960 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 961 " nxge_rxbuf_index_info_init Find max iter %d", 962 ring_info->max_iterations)); 963 964 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 965 return (NXGE_OK); 966 } 967 968 /* ARGSUSED */ 969 void 970 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 971 { 972 #ifdef NXGE_DEBUG 973 974 uint32_t bptr; 975 uint64_t pp; 976 977 bptr = entry_p->bits.hdw.pkt_buf_addr; 978 979 NXGE_DEBUG_MSG((nxgep, RX_CTL, 980 "\trcr entry $%p " 981 "\trcr entry 0x%0llx " 982 "\trcr entry 0x%08x " 983 "\trcr entry 0x%08x " 984 "\tvalue 0x%0llx\n" 985 "\tmulti = %d\n" 986 "\tpkt_type = 0x%x\n" 987 "\tzero_copy = %d\n" 988 "\tnoport = %d\n" 989 "\tpromis = %d\n" 990 "\terror = 0x%04x\n" 991 "\tdcf_err = 0x%01x\n" 992 "\tl2_len = %d\n" 993 "\tpktbufsize = %d\n" 994 "\tpkt_buf_addr = $%p\n" 995 "\tpkt_buf_addr (<< 6) = $%p\n", 996 entry_p, 997 *(int64_t *)entry_p, 998 *(int32_t *)entry_p, 999 *(int32_t *)((char *)entry_p + 32), 1000 entry_p->value, 1001 entry_p->bits.hdw.multi, 1002 entry_p->bits.hdw.pkt_type, 1003 entry_p->bits.hdw.zero_copy, 1004 entry_p->bits.hdw.noport, 1005 entry_p->bits.hdw.promis, 1006 entry_p->bits.hdw.error, 1007 entry_p->bits.hdw.dcf_err, 1008 entry_p->bits.hdw.l2_len, 1009 entry_p->bits.hdw.pktbufsz, 1010 bptr, 1011 entry_p->bits.ldw.pkt_buf_addr)); 1012 1013 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1014 RCR_PKT_BUF_ADDR_SHIFT; 1015 1016 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1017 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1018 #endif 1019 } 1020 1021 void 1022 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1023 { 1024 npi_handle_t handle; 1025 rbr_stat_t rbr_stat; 1026 addr44_t hd_addr; 1027 addr44_t tail_addr; 1028 uint16_t qlen; 1029 1030 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1031 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1032 1033 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1034 1035 /* RBR head */ 1036 hd_addr.addr = 0; 1037 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1038 #if defined(__i386) 1039 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1040 (void *)(uint32_t)hd_addr.addr); 1041 #else 1042 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1043 (void *)hd_addr.addr); 1044 #endif 1045 1046 /* RBR stats */ 1047 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1048 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1049 1050 /* RCR tail */ 1051 tail_addr.addr = 0; 1052 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1053 #if defined(__i386) 1054 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1055 (void *)(uint32_t)tail_addr.addr); 1056 #else 1057 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1058 (void *)tail_addr.addr); 1059 #endif 1060 1061 /* RCR qlen */ 1062 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1063 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1064 1065 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1066 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1067 } 1068 1069 void 1070 nxge_rxdma_stop(p_nxge_t nxgep) 1071 { 1072 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1073 1074 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1075 (void) nxge_rx_mac_disable(nxgep); 1076 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1077 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1078 } 1079 1080 void 1081 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1082 { 1083 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1084 1085 (void) nxge_rxdma_stop(nxgep); 1086 (void) nxge_uninit_rxdma_channels(nxgep); 1087 (void) nxge_init_rxdma_channels(nxgep); 1088 1089 #ifndef AXIS_DEBUG_LB 1090 (void) nxge_xcvr_init(nxgep); 1091 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1092 #endif 1093 (void) nxge_rx_mac_enable(nxgep); 1094 1095 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1096 } 1097 1098 nxge_status_t 1099 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1100 { 1101 int i, ndmas; 1102 uint16_t channel; 1103 p_rx_rbr_rings_t rx_rbr_rings; 1104 p_rx_rbr_ring_t *rbr_rings; 1105 npi_handle_t handle; 1106 npi_status_t rs = NPI_SUCCESS; 1107 nxge_status_t status = NXGE_OK; 1108 1109 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1110 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1111 1112 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1113 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1114 "<== nxge_rxdma_mode: not initialized")); 1115 return (NXGE_ERROR); 1116 } 1117 1118 rx_rbr_rings = nxgep->rx_rbr_rings; 1119 if (rx_rbr_rings == NULL) { 1120 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1121 "<== nxge_rxdma_mode: NULL ring pointer")); 1122 return (NXGE_ERROR); 1123 } 1124 if (rx_rbr_rings->rbr_rings == NULL) { 1125 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1126 "<== nxge_rxdma_mode: NULL rbr rings pointer")); 1127 return (NXGE_ERROR); 1128 } 1129 1130 ndmas = rx_rbr_rings->ndmas; 1131 if (!ndmas) { 1132 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1133 "<== nxge_rxdma_mode: no channel")); 1134 return (NXGE_ERROR); 1135 } 1136 1137 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1138 "==> nxge_rxdma_mode (ndmas %d)", ndmas)); 1139 1140 rbr_rings = rx_rbr_rings->rbr_rings; 1141 1142 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1143 for (i = 0; i < ndmas; i++) { 1144 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 1145 continue; 1146 } 1147 channel = rbr_rings[i]->rdc; 1148 if (enable) { 1149 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1150 "==> nxge_rxdma_hw_mode: channel %d (enable)", 1151 channel)); 1152 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 1153 } else { 1154 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1155 "==> nxge_rxdma_hw_mode: channel %d (disable)", 1156 channel)); 1157 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 1158 } 1159 } 1160 1161 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1162 1163 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1164 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1165 1166 return (status); 1167 } 1168 1169 void 1170 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1171 { 1172 npi_handle_t handle; 1173 1174 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1175 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1176 1177 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1178 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1179 1180 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1181 } 1182 1183 void 1184 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1185 { 1186 npi_handle_t handle; 1187 1188 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1189 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1190 1191 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1192 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1193 1194 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1195 } 1196 1197 void 1198 nxge_hw_start_rx(p_nxge_t nxgep) 1199 { 1200 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1201 1202 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1203 (void) nxge_rx_mac_enable(nxgep); 1204 1205 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1206 } 1207 1208 /*ARGSUSED*/ 1209 void 1210 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1211 { 1212 int i, ndmas; 1213 uint16_t rdc; 1214 p_rx_rbr_rings_t rx_rbr_rings; 1215 p_rx_rbr_ring_t *rbr_rings; 1216 p_rx_rcr_rings_t rx_rcr_rings; 1217 1218 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1219 1220 rx_rbr_rings = nxgep->rx_rbr_rings; 1221 if (rx_rbr_rings == NULL) { 1222 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1223 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1224 return; 1225 } 1226 ndmas = rx_rbr_rings->ndmas; 1227 if (!ndmas) { 1228 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1229 "<== nxge_fixup_rxdma_rings: no channel")); 1230 return; 1231 } 1232 1233 rx_rcr_rings = nxgep->rx_rcr_rings; 1234 if (rx_rcr_rings == NULL) { 1235 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1236 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1237 return; 1238 } 1239 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1240 "==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas)); 1241 1242 nxge_rxdma_hw_stop(nxgep); 1243 1244 rbr_rings = rx_rbr_rings->rbr_rings; 1245 for (i = 0; i < ndmas; i++) { 1246 rdc = rbr_rings[i]->rdc; 1247 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1248 "==> nxge_fixup_rxdma_rings: channel %d " 1249 "ring $%px", rdc, rbr_rings[i])); 1250 (void) nxge_rxdma_fixup_channel(nxgep, rdc, i); 1251 } 1252 1253 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1254 } 1255 1256 void 1257 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1258 { 1259 int i; 1260 1261 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1262 i = nxge_rxdma_get_ring_index(nxgep, channel); 1263 if (i < 0) { 1264 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1265 "<== nxge_rxdma_fix_channel: no entry found")); 1266 return; 1267 } 1268 1269 nxge_rxdma_fixup_channel(nxgep, channel, i); 1270 1271 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel")); 1272 } 1273 1274 void 1275 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1276 { 1277 int ndmas; 1278 p_rx_rbr_rings_t rx_rbr_rings; 1279 p_rx_rbr_ring_t *rbr_rings; 1280 p_rx_rcr_rings_t rx_rcr_rings; 1281 p_rx_rcr_ring_t *rcr_rings; 1282 p_rx_mbox_areas_t rx_mbox_areas_p; 1283 p_rx_mbox_t *rx_mbox_p; 1284 p_nxge_dma_pool_t dma_buf_poolp; 1285 p_nxge_dma_pool_t dma_cntl_poolp; 1286 p_rx_rbr_ring_t rbrp; 1287 p_rx_rcr_ring_t rcrp; 1288 p_rx_mbox_t mboxp; 1289 p_nxge_dma_common_t dmap; 1290 nxge_status_t status = NXGE_OK; 1291 1292 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1293 1294 (void) nxge_rxdma_stop_channel(nxgep, channel); 1295 1296 dma_buf_poolp = nxgep->rx_buf_pool_p; 1297 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1298 1299 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1300 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1301 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1302 return; 1303 } 1304 1305 ndmas = dma_buf_poolp->ndmas; 1306 if (!ndmas) { 1307 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1308 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1309 return; 1310 } 1311 1312 rx_rbr_rings = nxgep->rx_rbr_rings; 1313 rx_rcr_rings = nxgep->rx_rcr_rings; 1314 rbr_rings = rx_rbr_rings->rbr_rings; 1315 rcr_rings = rx_rcr_rings->rcr_rings; 1316 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1317 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1318 1319 /* Reinitialize the receive block and completion rings */ 1320 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1321 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1322 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1323 1324 1325 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1326 rbrp->rbr_rd_index = 0; 1327 rcrp->comp_rd_index = 0; 1328 rcrp->comp_wt_index = 0; 1329 1330 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1331 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1332 1333 status = nxge_rxdma_start_channel(nxgep, channel, 1334 rbrp, rcrp, mboxp); 1335 if (status != NXGE_OK) { 1336 goto nxge_rxdma_fixup_channel_fail; 1337 } 1338 if (status != NXGE_OK) { 1339 goto nxge_rxdma_fixup_channel_fail; 1340 } 1341 1342 nxge_rxdma_fixup_channel_fail: 1343 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1344 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1345 1346 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1347 } 1348 1349 int 1350 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1351 { 1352 int i, ndmas; 1353 uint16_t rdc; 1354 p_rx_rbr_rings_t rx_rbr_rings; 1355 p_rx_rbr_ring_t *rbr_rings; 1356 1357 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1358 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1359 1360 rx_rbr_rings = nxgep->rx_rbr_rings; 1361 if (rx_rbr_rings == NULL) { 1362 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1363 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1364 return (-1); 1365 } 1366 ndmas = rx_rbr_rings->ndmas; 1367 if (!ndmas) { 1368 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1369 "<== nxge_rxdma_get_ring_index: no channel")); 1370 return (-1); 1371 } 1372 1373 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1374 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1375 1376 rbr_rings = rx_rbr_rings->rbr_rings; 1377 for (i = 0; i < ndmas; i++) { 1378 rdc = rbr_rings[i]->rdc; 1379 if (channel == rdc) { 1380 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1381 "==> nxge_rxdma_get_rbr_ring: " 1382 "channel %d (index %d) " 1383 "ring %d", channel, i, 1384 rbr_rings[i])); 1385 return (i); 1386 } 1387 } 1388 1389 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1390 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1391 1392 return (-1); 1393 } 1394 1395 p_rx_rbr_ring_t 1396 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1397 { 1398 int i, ndmas; 1399 uint16_t rdc; 1400 p_rx_rbr_rings_t rx_rbr_rings; 1401 p_rx_rbr_ring_t *rbr_rings; 1402 1403 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1404 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1405 1406 rx_rbr_rings = nxgep->rx_rbr_rings; 1407 if (rx_rbr_rings == NULL) { 1408 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1409 "<== nxge_rxdma_get_rbr_ring: NULL ring pointer")); 1410 return (NULL); 1411 } 1412 ndmas = rx_rbr_rings->ndmas; 1413 if (!ndmas) { 1414 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1415 "<== nxge_rxdma_get_rbr_ring: no channel")); 1416 return (NULL); 1417 } 1418 1419 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1420 "==> nxge_rxdma_get_ring (ndmas %d)", ndmas)); 1421 1422 rbr_rings = rx_rbr_rings->rbr_rings; 1423 for (i = 0; i < ndmas; i++) { 1424 rdc = rbr_rings[i]->rdc; 1425 if (channel == rdc) { 1426 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1427 "==> nxge_rxdma_get_rbr_ring: channel %d " 1428 "ring $%p", channel, rbr_rings[i])); 1429 return (rbr_rings[i]); 1430 } 1431 } 1432 1433 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1434 "<== nxge_rxdma_get_rbr_ring: not found")); 1435 1436 return (NULL); 1437 } 1438 1439 p_rx_rcr_ring_t 1440 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1441 { 1442 int i, ndmas; 1443 uint16_t rdc; 1444 p_rx_rcr_rings_t rx_rcr_rings; 1445 p_rx_rcr_ring_t *rcr_rings; 1446 1447 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1448 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1449 1450 rx_rcr_rings = nxgep->rx_rcr_rings; 1451 if (rx_rcr_rings == NULL) { 1452 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1453 "<== nxge_rxdma_get_rcr_ring: NULL ring pointer")); 1454 return (NULL); 1455 } 1456 ndmas = rx_rcr_rings->ndmas; 1457 if (!ndmas) { 1458 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1459 "<== nxge_rxdma_get_rcr_ring: no channel")); 1460 return (NULL); 1461 } 1462 1463 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1464 "==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas)); 1465 1466 rcr_rings = rx_rcr_rings->rcr_rings; 1467 for (i = 0; i < ndmas; i++) { 1468 rdc = rcr_rings[i]->rdc; 1469 if (channel == rdc) { 1470 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1471 "==> nxge_rxdma_get_rcr_ring: channel %d " 1472 "ring $%p", channel, rcr_rings[i])); 1473 return (rcr_rings[i]); 1474 } 1475 } 1476 1477 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1478 "<== nxge_rxdma_get_rcr_ring: not found")); 1479 1480 return (NULL); 1481 } 1482 1483 /* 1484 * Static functions start here. 1485 */ 1486 static p_rx_msg_t 1487 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1488 { 1489 p_rx_msg_t nxge_mp = NULL; 1490 p_nxge_dma_common_t dmamsg_p; 1491 uchar_t *buffer; 1492 1493 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1494 if (nxge_mp == NULL) { 1495 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1496 "Allocation of a rx msg failed.")); 1497 goto nxge_allocb_exit; 1498 } 1499 1500 nxge_mp->use_buf_pool = B_FALSE; 1501 if (dmabuf_p) { 1502 nxge_mp->use_buf_pool = B_TRUE; 1503 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1504 *dmamsg_p = *dmabuf_p; 1505 dmamsg_p->nblocks = 1; 1506 dmamsg_p->block_size = size; 1507 dmamsg_p->alength = size; 1508 buffer = (uchar_t *)dmabuf_p->kaddrp; 1509 1510 dmabuf_p->kaddrp = (void *) 1511 ((char *)dmabuf_p->kaddrp + size); 1512 dmabuf_p->ioaddr_pp = (void *) 1513 ((char *)dmabuf_p->ioaddr_pp + size); 1514 dmabuf_p->alength -= size; 1515 dmabuf_p->offset += size; 1516 dmabuf_p->dma_cookie.dmac_laddress += size; 1517 dmabuf_p->dma_cookie.dmac_size -= size; 1518 1519 } else { 1520 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1521 if (buffer == NULL) { 1522 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1523 "Allocation of a receive page failed.")); 1524 goto nxge_allocb_fail1; 1525 } 1526 } 1527 1528 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1529 if (nxge_mp->rx_mblk_p == NULL) { 1530 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1531 goto nxge_allocb_fail2; 1532 } 1533 1534 nxge_mp->buffer = buffer; 1535 nxge_mp->block_size = size; 1536 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1537 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1538 nxge_mp->ref_cnt = 1; 1539 nxge_mp->free = B_TRUE; 1540 nxge_mp->rx_use_bcopy = B_FALSE; 1541 1542 atomic_inc_32(&nxge_mblks_pending); 1543 1544 goto nxge_allocb_exit; 1545 1546 nxge_allocb_fail2: 1547 if (!nxge_mp->use_buf_pool) { 1548 KMEM_FREE(buffer, size); 1549 } 1550 1551 nxge_allocb_fail1: 1552 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1553 nxge_mp = NULL; 1554 1555 nxge_allocb_exit: 1556 return (nxge_mp); 1557 } 1558 1559 p_mblk_t 1560 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1561 { 1562 p_mblk_t mp; 1563 1564 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1565 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1566 "offset = 0x%08X " 1567 "size = 0x%08X", 1568 nxge_mp, offset, size)); 1569 1570 mp = desballoc(&nxge_mp->buffer[offset], size, 1571 0, &nxge_mp->freeb); 1572 if (mp == NULL) { 1573 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1574 goto nxge_dupb_exit; 1575 } 1576 atomic_inc_32(&nxge_mp->ref_cnt); 1577 atomic_inc_32(&nxge_mblks_pending); 1578 1579 1580 nxge_dupb_exit: 1581 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1582 nxge_mp)); 1583 return (mp); 1584 } 1585 1586 p_mblk_t 1587 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1588 { 1589 p_mblk_t mp; 1590 uchar_t *dp; 1591 1592 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1593 if (mp == NULL) { 1594 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1595 goto nxge_dupb_bcopy_exit; 1596 } 1597 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1598 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1599 mp->b_wptr = dp + size; 1600 1601 nxge_dupb_bcopy_exit: 1602 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1603 nxge_mp)); 1604 return (mp); 1605 } 1606 1607 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1608 p_rx_msg_t rx_msg_p); 1609 1610 void 1611 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1612 { 1613 1614 npi_handle_t handle; 1615 1616 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1617 1618 /* Reuse this buffer */ 1619 rx_msg_p->free = B_FALSE; 1620 rx_msg_p->cur_usage_cnt = 0; 1621 rx_msg_p->max_usage_cnt = 0; 1622 rx_msg_p->pkt_buf_size = 0; 1623 1624 if (rx_rbr_p->rbr_use_bcopy) { 1625 rx_msg_p->rx_use_bcopy = B_FALSE; 1626 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1627 } 1628 1629 /* 1630 * Get the rbr header pointer and its offset index. 1631 */ 1632 MUTEX_ENTER(&rx_rbr_p->post_lock); 1633 1634 1635 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1636 rx_rbr_p->rbr_wrap_mask); 1637 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1638 MUTEX_EXIT(&rx_rbr_p->post_lock); 1639 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1640 npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1); 1641 1642 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1643 "<== nxge_post_page (channel %d post_next_index %d)", 1644 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1645 1646 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1647 } 1648 1649 void 1650 nxge_freeb(p_rx_msg_t rx_msg_p) 1651 { 1652 size_t size; 1653 uchar_t *buffer = NULL; 1654 int ref_cnt; 1655 boolean_t free_state = B_FALSE; 1656 1657 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1658 1659 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1660 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1661 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1662 rx_msg_p, nxge_mblks_pending)); 1663 1664 atomic_dec_32(&nxge_mblks_pending); 1665 /* 1666 * First we need to get the free state, then 1667 * atomic decrement the reference count to prevent 1668 * the race condition with the interrupt thread that 1669 * is processing a loaned up buffer block. 1670 */ 1671 free_state = rx_msg_p->free; 1672 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1673 if (!ref_cnt) { 1674 buffer = rx_msg_p->buffer; 1675 size = rx_msg_p->block_size; 1676 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1677 "will free: rx_msg_p = $%p (block pending %d)", 1678 rx_msg_p, nxge_mblks_pending)); 1679 1680 if (!rx_msg_p->use_buf_pool) { 1681 KMEM_FREE(buffer, size); 1682 } 1683 1684 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1685 1686 /* Decrement the receive buffer ring's reference count, too. */ 1687 atomic_dec_32(&ring->rbr_ref_cnt); 1688 1689 /* 1690 * Free the receive buffer ring, iff 1691 * 1. all the receive buffers have been freed 1692 * 2. and we are in the proper state (that is, 1693 * we are not UNMAPPING). 1694 */ 1695 if (ring->rbr_ref_cnt == 0 && 1696 ring->rbr_state == RBR_UNMAPPED) { 1697 KMEM_FREE(ring, sizeof (*ring)); 1698 } 1699 return; 1700 } 1701 1702 /* 1703 * Repost buffer. 1704 */ 1705 if (free_state && (ref_cnt == 1)) { 1706 NXGE_DEBUG_MSG((NULL, RX_CTL, 1707 "nxge_freeb: post page $%p:", rx_msg_p)); 1708 if (ring->rbr_state == RBR_POSTING) 1709 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 1710 } 1711 1712 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1713 } 1714 1715 uint_t 1716 nxge_rx_intr(void *arg1, void *arg2) 1717 { 1718 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1719 p_nxge_t nxgep = (p_nxge_t)arg2; 1720 p_nxge_ldg_t ldgp; 1721 uint8_t channel; 1722 npi_handle_t handle; 1723 rx_dma_ctl_stat_t cs; 1724 1725 #ifdef NXGE_DEBUG 1726 rxdma_cfig1_t cfg; 1727 #endif 1728 uint_t serviced = DDI_INTR_UNCLAIMED; 1729 1730 if (ldvp == NULL) { 1731 NXGE_DEBUG_MSG((NULL, INT_CTL, 1732 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1733 nxgep, ldvp)); 1734 1735 return (DDI_INTR_CLAIMED); 1736 } 1737 1738 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1739 nxgep = ldvp->nxgep; 1740 } 1741 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1742 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1743 nxgep, ldvp)); 1744 1745 /* 1746 * This interrupt handler is for a specific 1747 * receive dma channel. 1748 */ 1749 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1750 /* 1751 * Get the control and status for this channel. 1752 */ 1753 channel = ldvp->channel; 1754 ldgp = ldvp->ldgp; 1755 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1756 1757 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1758 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1759 channel, 1760 cs.value, 1761 cs.bits.hdw.rcrto, 1762 cs.bits.hdw.rcrthres)); 1763 1764 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs); 1765 serviced = DDI_INTR_CLAIMED; 1766 1767 /* error events. */ 1768 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1769 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1770 } 1771 1772 nxge_intr_exit: 1773 1774 1775 /* 1776 * Enable the mailbox update interrupt if we want 1777 * to use mailbox. We probably don't need to use 1778 * mailbox as it only saves us one pio read. 1779 * Also write 1 to rcrthres and rcrto to clear 1780 * these two edge triggered bits. 1781 */ 1782 1783 cs.value &= RX_DMA_CTL_STAT_WR1C; 1784 cs.bits.hdw.mex = 1; 1785 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1786 cs.value); 1787 1788 /* 1789 * Rearm this logical group if this is a single device 1790 * group. 1791 */ 1792 if (ldgp->nldvs == 1) { 1793 ldgimgm_t mgm; 1794 mgm.value = 0; 1795 mgm.bits.ldw.arm = 1; 1796 mgm.bits.ldw.timer = ldgp->ldg_timer; 1797 NXGE_REG_WR64(handle, 1798 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1799 mgm.value); 1800 } 1801 1802 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1803 serviced)); 1804 return (serviced); 1805 } 1806 1807 /* 1808 * Process the packets received in the specified logical device 1809 * and pass up a chain of message blocks to the upper layer. 1810 */ 1811 static void 1812 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1813 rx_dma_ctl_stat_t cs) 1814 { 1815 p_mblk_t mp; 1816 p_rx_rcr_ring_t rcrp; 1817 1818 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1819 if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1820 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1821 "<== nxge_rx_pkts_vring: no mp")); 1822 return; 1823 } 1824 1825 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1826 mp)); 1827 1828 #ifdef NXGE_DEBUG 1829 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1830 "==> nxge_rx_pkts_vring:calling mac_rx " 1831 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1832 "mac_handle $%p", 1833 mp->b_wptr - mp->b_rptr, 1834 mp, mp->b_cont, mp->b_next, 1835 rcrp, rcrp->rcr_mac_handle)); 1836 1837 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1838 "==> nxge_rx_pkts_vring: dump packets " 1839 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1840 mp, 1841 mp->b_rptr, 1842 mp->b_wptr, 1843 nxge_dump_packet((char *)mp->b_rptr, 1844 mp->b_wptr - mp->b_rptr))); 1845 if (mp->b_cont) { 1846 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1847 "==> nxge_rx_pkts_vring: dump b_cont packets " 1848 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1849 mp->b_cont, 1850 mp->b_cont->b_rptr, 1851 mp->b_cont->b_wptr, 1852 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1853 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1854 } 1855 if (mp->b_next) { 1856 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1857 "==> nxge_rx_pkts_vring: dump next packets " 1858 "(b_rptr $%p): %s", 1859 mp->b_next->b_rptr, 1860 nxge_dump_packet((char *)mp->b_next->b_rptr, 1861 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1862 } 1863 #endif 1864 1865 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1866 } 1867 1868 1869 /* 1870 * This routine is the main packet receive processing function. 1871 * It gets the packet type, error code, and buffer related 1872 * information from the receive completion entry. 1873 * How many completion entries to process is based on the number of packets 1874 * queued by the hardware, a hardware maintained tail pointer 1875 * and a configurable receive packet count. 1876 * 1877 * A chain of message blocks will be created as result of processing 1878 * the completion entries. This chain of message blocks will be returned and 1879 * a hardware control status register will be updated with the number of 1880 * packets were removed from the hardware queue. 1881 * 1882 */ 1883 mblk_t * 1884 nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1885 p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs) 1886 { 1887 npi_handle_t handle; 1888 uint8_t channel; 1889 p_rx_rcr_rings_t rx_rcr_rings; 1890 p_rx_rcr_ring_t rcr_p; 1891 uint32_t comp_rd_index; 1892 p_rcr_entry_t rcr_desc_rd_head_p; 1893 p_rcr_entry_t rcr_desc_rd_head_pp; 1894 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1895 uint16_t qlen, nrcr_read, npkt_read; 1896 uint32_t qlen_hw; 1897 boolean_t multi; 1898 rcrcfig_b_t rcr_cfg_b; 1899 #if defined(_BIG_ENDIAN) 1900 npi_status_t rs = NPI_SUCCESS; 1901 #endif 1902 1903 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d " 1904 "channel %d", vindex, ldvp->channel)); 1905 1906 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1907 return (NULL); 1908 } 1909 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1910 rx_rcr_rings = nxgep->rx_rcr_rings; 1911 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1912 channel = rcr_p->rdc; 1913 if (channel != ldvp->channel) { 1914 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1915 "channel %d, and rcr channel %d not matched.", 1916 vindex, ldvp->channel, channel)); 1917 return (NULL); 1918 } 1919 1920 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1921 "==> nxge_rx_pkts: START: rcr channel %d " 1922 "head_p $%p head_pp $%p index %d ", 1923 channel, rcr_p->rcr_desc_rd_head_p, 1924 rcr_p->rcr_desc_rd_head_pp, 1925 rcr_p->comp_rd_index)); 1926 1927 1928 #if !defined(_BIG_ENDIAN) 1929 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 1930 #else 1931 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1932 if (rs != NPI_SUCCESS) { 1933 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1934 "channel %d, get qlen failed 0x%08x", 1935 vindex, ldvp->channel, rs)); 1936 return (NULL); 1937 } 1938 #endif 1939 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 1940 "qlen %d", channel, qlen)); 1941 1942 1943 1944 if (!qlen) { 1945 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1946 "==> nxge_rx_pkts:rcr channel %d " 1947 "qlen %d (no pkts)", channel, qlen)); 1948 1949 return (NULL); 1950 } 1951 1952 comp_rd_index = rcr_p->comp_rd_index; 1953 1954 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1955 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1956 nrcr_read = npkt_read = 0; 1957 1958 /* 1959 * Number of packets queued 1960 * (The jumbo or multi packet will be counted as only one 1961 * packets and it may take up more than one completion entry). 1962 */ 1963 qlen_hw = (qlen < nxge_max_rx_pkts) ? 1964 qlen : nxge_max_rx_pkts; 1965 head_mp = NULL; 1966 tail_mp = &head_mp; 1967 nmp = mp_cont = NULL; 1968 multi = B_FALSE; 1969 1970 while (qlen_hw) { 1971 1972 #ifdef NXGE_DEBUG 1973 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 1974 #endif 1975 /* 1976 * Process one completion ring entry. 1977 */ 1978 nxge_receive_packet(nxgep, 1979 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 1980 1981 /* 1982 * message chaining modes 1983 */ 1984 if (nmp) { 1985 nmp->b_next = NULL; 1986 if (!multi && !mp_cont) { /* frame fits a partition */ 1987 *tail_mp = nmp; 1988 tail_mp = &nmp->b_next; 1989 nmp = NULL; 1990 } else if (multi && !mp_cont) { /* first segment */ 1991 *tail_mp = nmp; 1992 tail_mp = &nmp->b_cont; 1993 } else if (multi && mp_cont) { /* mid of multi segs */ 1994 *tail_mp = mp_cont; 1995 tail_mp = &mp_cont->b_cont; 1996 } else if (!multi && mp_cont) { /* last segment */ 1997 *tail_mp = mp_cont; 1998 tail_mp = &nmp->b_next; 1999 nmp = NULL; 2000 } 2001 } 2002 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2003 "==> nxge_rx_pkts: loop: rcr channel %d " 2004 "before updating: multi %d " 2005 "nrcr_read %d " 2006 "npk read %d " 2007 "head_pp $%p index %d ", 2008 channel, 2009 multi, 2010 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2011 comp_rd_index)); 2012 2013 if (!multi) { 2014 qlen_hw--; 2015 npkt_read++; 2016 } 2017 2018 /* 2019 * Update the next read entry. 2020 */ 2021 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2022 rcr_p->comp_wrap_mask); 2023 2024 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2025 rcr_p->rcr_desc_first_p, 2026 rcr_p->rcr_desc_last_p); 2027 2028 nrcr_read++; 2029 2030 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2031 "<== nxge_rx_pkts: (SAM, process one packet) " 2032 "nrcr_read %d", 2033 nrcr_read)); 2034 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2035 "==> nxge_rx_pkts: loop: rcr channel %d " 2036 "multi %d " 2037 "nrcr_read %d " 2038 "npk read %d " 2039 "head_pp $%p index %d ", 2040 channel, 2041 multi, 2042 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2043 comp_rd_index)); 2044 2045 } 2046 2047 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2048 rcr_p->comp_rd_index = comp_rd_index; 2049 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2050 2051 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2052 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2053 rcr_p->intr_timeout = nxgep->intr_timeout; 2054 rcr_p->intr_threshold = nxgep->intr_threshold; 2055 rcr_cfg_b.value = 0x0ULL; 2056 if (rcr_p->intr_timeout) 2057 rcr_cfg_b.bits.ldw.entout = 1; 2058 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2059 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2060 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2061 channel, rcr_cfg_b.value); 2062 } 2063 2064 cs.bits.ldw.pktread = npkt_read; 2065 cs.bits.ldw.ptrread = nrcr_read; 2066 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2067 channel, cs.value); 2068 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2069 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2070 "head_pp $%p index %016llx ", 2071 channel, 2072 rcr_p->rcr_desc_rd_head_pp, 2073 rcr_p->comp_rd_index)); 2074 /* 2075 * Update RCR buffer pointer read and number of packets 2076 * read. 2077 */ 2078 2079 *rcrp = rcr_p; 2080 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2081 return (head_mp); 2082 } 2083 2084 void 2085 nxge_receive_packet(p_nxge_t nxgep, 2086 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2087 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2088 { 2089 p_mblk_t nmp = NULL; 2090 uint64_t multi; 2091 uint64_t dcf_err; 2092 uint8_t channel; 2093 2094 boolean_t first_entry = B_TRUE; 2095 boolean_t is_tcp_udp = B_FALSE; 2096 boolean_t buffer_free = B_FALSE; 2097 boolean_t error_send_up = B_FALSE; 2098 uint8_t error_type; 2099 uint16_t l2_len; 2100 uint16_t skip_len; 2101 uint8_t pktbufsz_type; 2102 uint64_t rcr_entry; 2103 uint64_t *pkt_buf_addr_pp; 2104 uint64_t *pkt_buf_addr_p; 2105 uint32_t buf_offset; 2106 uint32_t bsize; 2107 uint32_t error_disp_cnt; 2108 uint32_t msg_index; 2109 p_rx_rbr_ring_t rx_rbr_p; 2110 p_rx_msg_t *rx_msg_ring_p; 2111 p_rx_msg_t rx_msg_p; 2112 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2113 nxge_status_t status = NXGE_OK; 2114 boolean_t is_valid = B_FALSE; 2115 p_nxge_rx_ring_stats_t rdc_stats; 2116 uint32_t bytes_read; 2117 uint64_t pkt_type; 2118 uint64_t frag; 2119 #ifdef NXGE_DEBUG 2120 int dump_len; 2121 #endif 2122 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2123 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2124 2125 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2126 2127 multi = (rcr_entry & RCR_MULTI_MASK); 2128 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2129 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2130 2131 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2132 frag = (rcr_entry & RCR_FRAG_MASK); 2133 2134 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2135 2136 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2137 RCR_PKTBUFSZ_SHIFT); 2138 #if defined(__i386) 2139 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2140 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2141 #else 2142 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2143 RCR_PKT_BUF_ADDR_SHIFT); 2144 #endif 2145 2146 channel = rcr_p->rdc; 2147 2148 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2149 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2150 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2151 "error_type 0x%x pkt_type 0x%x " 2152 "pktbufsz_type %d ", 2153 rcr_desc_rd_head_p, 2154 rcr_entry, pkt_buf_addr_pp, l2_len, 2155 multi, 2156 error_type, 2157 pkt_type, 2158 pktbufsz_type)); 2159 2160 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2161 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2162 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2163 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2164 rcr_entry, pkt_buf_addr_pp, l2_len, 2165 multi, 2166 error_type, 2167 pkt_type)); 2168 2169 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2170 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2171 "full pkt_buf_addr_pp $%p l2_len %d", 2172 rcr_entry, pkt_buf_addr_pp, l2_len)); 2173 2174 /* get the stats ptr */ 2175 rdc_stats = rcr_p->rdc_stats; 2176 2177 if (!l2_len) { 2178 2179 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2180 "<== nxge_receive_packet: failed: l2 length is 0.")); 2181 return; 2182 } 2183 2184 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2185 l2_len -= ETHERFCSL; 2186 2187 /* shift 6 bits to get the full io address */ 2188 #if defined(__i386) 2189 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2190 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2191 #else 2192 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2193 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2194 #endif 2195 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2196 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2197 "full pkt_buf_addr_pp $%p l2_len %d", 2198 rcr_entry, pkt_buf_addr_pp, l2_len)); 2199 2200 rx_rbr_p = rcr_p->rx_rbr_p; 2201 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2202 2203 if (first_entry) { 2204 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2205 RXDMA_HDR_SIZE_DEFAULT); 2206 2207 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2208 "==> nxge_receive_packet: first entry 0x%016llx " 2209 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2210 rcr_entry, pkt_buf_addr_pp, l2_len, 2211 hdr_size)); 2212 } 2213 2214 MUTEX_ENTER(&rcr_p->lock); 2215 MUTEX_ENTER(&rx_rbr_p->lock); 2216 2217 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2218 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2219 "full pkt_buf_addr_pp $%p l2_len %d", 2220 rcr_entry, pkt_buf_addr_pp, l2_len)); 2221 2222 /* 2223 * Packet buffer address in the completion entry points 2224 * to the starting buffer address (offset 0). 2225 * Use the starting buffer address to locate the corresponding 2226 * kernel address. 2227 */ 2228 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2229 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2230 &buf_offset, 2231 &msg_index); 2232 2233 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2234 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2235 "full pkt_buf_addr_pp $%p l2_len %d", 2236 rcr_entry, pkt_buf_addr_pp, l2_len)); 2237 2238 if (status != NXGE_OK) { 2239 MUTEX_EXIT(&rx_rbr_p->lock); 2240 MUTEX_EXIT(&rcr_p->lock); 2241 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2242 "<== nxge_receive_packet: found vaddr failed %d", 2243 status)); 2244 return; 2245 } 2246 2247 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2248 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2249 "full pkt_buf_addr_pp $%p l2_len %d", 2250 rcr_entry, pkt_buf_addr_pp, l2_len)); 2251 2252 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2253 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2254 "full pkt_buf_addr_pp $%p l2_len %d", 2255 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2256 2257 rx_msg_p = rx_msg_ring_p[msg_index]; 2258 2259 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2260 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2261 "full pkt_buf_addr_pp $%p l2_len %d", 2262 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2263 2264 switch (pktbufsz_type) { 2265 case RCR_PKTBUFSZ_0: 2266 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2267 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2268 "==> nxge_receive_packet: 0 buf %d", bsize)); 2269 break; 2270 case RCR_PKTBUFSZ_1: 2271 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2272 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2273 "==> nxge_receive_packet: 1 buf %d", bsize)); 2274 break; 2275 case RCR_PKTBUFSZ_2: 2276 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2277 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2278 "==> nxge_receive_packet: 2 buf %d", bsize)); 2279 break; 2280 case RCR_SINGLE_BLOCK: 2281 bsize = rx_msg_p->block_size; 2282 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2283 "==> nxge_receive_packet: single %d", bsize)); 2284 2285 break; 2286 default: 2287 MUTEX_EXIT(&rx_rbr_p->lock); 2288 MUTEX_EXIT(&rcr_p->lock); 2289 return; 2290 } 2291 2292 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2293 (buf_offset + sw_offset_bytes), 2294 (hdr_size + l2_len), 2295 DDI_DMA_SYNC_FORCPU); 2296 2297 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2298 "==> nxge_receive_packet: after first dump:usage count")); 2299 2300 if (rx_msg_p->cur_usage_cnt == 0) { 2301 if (rx_rbr_p->rbr_use_bcopy) { 2302 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2303 if (rx_rbr_p->rbr_consumed < 2304 rx_rbr_p->rbr_threshold_hi) { 2305 if (rx_rbr_p->rbr_threshold_lo == 0 || 2306 ((rx_rbr_p->rbr_consumed >= 2307 rx_rbr_p->rbr_threshold_lo) && 2308 (rx_rbr_p->rbr_bufsize_type >= 2309 pktbufsz_type))) { 2310 rx_msg_p->rx_use_bcopy = B_TRUE; 2311 } 2312 } else { 2313 rx_msg_p->rx_use_bcopy = B_TRUE; 2314 } 2315 } 2316 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2317 "==> nxge_receive_packet: buf %d (new block) ", 2318 bsize)); 2319 2320 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2321 rx_msg_p->pkt_buf_size = bsize; 2322 rx_msg_p->cur_usage_cnt = 1; 2323 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2324 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2325 "==> nxge_receive_packet: buf %d " 2326 "(single block) ", 2327 bsize)); 2328 /* 2329 * Buffer can be reused once the free function 2330 * is called. 2331 */ 2332 rx_msg_p->max_usage_cnt = 1; 2333 buffer_free = B_TRUE; 2334 } else { 2335 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2336 if (rx_msg_p->max_usage_cnt == 1) { 2337 buffer_free = B_TRUE; 2338 } 2339 } 2340 } else { 2341 rx_msg_p->cur_usage_cnt++; 2342 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2343 buffer_free = B_TRUE; 2344 } 2345 } 2346 2347 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2348 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2349 msg_index, l2_len, 2350 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2351 2352 if ((error_type) || (dcf_err)) { 2353 rdc_stats->ierrors++; 2354 if (dcf_err) { 2355 rdc_stats->dcf_err++; 2356 #ifdef NXGE_DEBUG 2357 if (!rdc_stats->dcf_err) { 2358 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2359 "nxge_receive_packet: channel %d dcf_err rcr" 2360 " 0x%llx", channel, rcr_entry)); 2361 } 2362 #endif 2363 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2364 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2365 } else { 2366 /* Update error stats */ 2367 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2368 rdc_stats->errlog.compl_err_type = error_type; 2369 2370 switch (error_type) { 2371 case RCR_L2_ERROR: 2372 rdc_stats->l2_err++; 2373 if (rdc_stats->l2_err < 2374 error_disp_cnt) { 2375 NXGE_FM_REPORT_ERROR(nxgep, 2376 nxgep->mac.portnum, NULL, 2377 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2378 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2379 " nxge_receive_packet:" 2380 " channel %d RCR L2_ERROR", 2381 channel)); 2382 } 2383 break; 2384 case RCR_L4_CSUM_ERROR: 2385 error_send_up = B_TRUE; 2386 rdc_stats->l4_cksum_err++; 2387 if (rdc_stats->l4_cksum_err < 2388 error_disp_cnt) { 2389 NXGE_FM_REPORT_ERROR(nxgep, 2390 nxgep->mac.portnum, NULL, 2391 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2392 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2393 " nxge_receive_packet:" 2394 " channel %d" 2395 " RCR L4_CSUM_ERROR", channel)); 2396 } 2397 break; 2398 case RCR_FFLP_SOFT_ERROR: 2399 error_send_up = B_TRUE; 2400 rdc_stats->fflp_soft_err++; 2401 if (rdc_stats->fflp_soft_err < 2402 error_disp_cnt) { 2403 NXGE_FM_REPORT_ERROR(nxgep, 2404 nxgep->mac.portnum, NULL, 2405 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2406 NXGE_ERROR_MSG((nxgep, 2407 NXGE_ERR_CTL, 2408 " nxge_receive_packet:" 2409 " channel %d" 2410 " RCR FFLP_SOFT_ERROR", channel)); 2411 } 2412 break; 2413 case RCR_ZCP_SOFT_ERROR: 2414 error_send_up = B_TRUE; 2415 rdc_stats->fflp_soft_err++; 2416 if (rdc_stats->zcp_soft_err < 2417 error_disp_cnt) 2418 NXGE_FM_REPORT_ERROR(nxgep, 2419 nxgep->mac.portnum, NULL, 2420 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2421 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2422 " nxge_receive_packet: Channel %d" 2423 " RCR ZCP_SOFT_ERROR", channel)); 2424 break; 2425 default: 2426 rdc_stats->rcr_unknown_err++; 2427 if (rdc_stats->rcr_unknown_err 2428 < error_disp_cnt) { 2429 NXGE_FM_REPORT_ERROR(nxgep, 2430 nxgep->mac.portnum, NULL, 2431 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2432 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2433 " nxge_receive_packet: Channel %d" 2434 " RCR entry 0x%llx error 0x%x", 2435 rcr_entry, channel, error_type)); 2436 } 2437 break; 2438 } 2439 } 2440 2441 /* 2442 * Update and repost buffer block if max usage 2443 * count is reached. 2444 */ 2445 if (error_send_up == B_FALSE) { 2446 atomic_inc_32(&rx_msg_p->ref_cnt); 2447 atomic_inc_32(&nxge_mblks_pending); 2448 if (buffer_free == B_TRUE) { 2449 rx_msg_p->free = B_TRUE; 2450 } 2451 2452 MUTEX_EXIT(&rx_rbr_p->lock); 2453 MUTEX_EXIT(&rcr_p->lock); 2454 nxge_freeb(rx_msg_p); 2455 return; 2456 } 2457 } 2458 2459 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2460 "==> nxge_receive_packet: DMA sync second ")); 2461 2462 bytes_read = rcr_p->rcvd_pkt_bytes; 2463 skip_len = sw_offset_bytes + hdr_size; 2464 if (!rx_msg_p->rx_use_bcopy) { 2465 /* 2466 * For loaned up buffers, the driver reference count 2467 * will be incremented first and then the free state. 2468 */ 2469 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2470 if (first_entry) { 2471 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2472 if (l2_len < bsize - skip_len) { 2473 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2474 } else { 2475 nmp->b_wptr = &nmp->b_rptr[bsize 2476 - skip_len]; 2477 } 2478 } else { 2479 if (l2_len - bytes_read < bsize) { 2480 nmp->b_wptr = 2481 &nmp->b_rptr[l2_len - bytes_read]; 2482 } else { 2483 nmp->b_wptr = &nmp->b_rptr[bsize]; 2484 } 2485 } 2486 } 2487 } else { 2488 if (first_entry) { 2489 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2490 l2_len < bsize - skip_len ? 2491 l2_len : bsize - skip_len); 2492 } else { 2493 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2494 l2_len - bytes_read < bsize ? 2495 l2_len - bytes_read : bsize); 2496 } 2497 } 2498 if (nmp != NULL) { 2499 if (first_entry) 2500 bytes_read = nmp->b_wptr - nmp->b_rptr; 2501 else 2502 bytes_read += nmp->b_wptr - nmp->b_rptr; 2503 2504 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2505 "==> nxge_receive_packet after dupb: " 2506 "rbr consumed %d " 2507 "pktbufsz_type %d " 2508 "nmp $%p rptr $%p wptr $%p " 2509 "buf_offset %d bzise %d l2_len %d skip_len %d", 2510 rx_rbr_p->rbr_consumed, 2511 pktbufsz_type, 2512 nmp, nmp->b_rptr, nmp->b_wptr, 2513 buf_offset, bsize, l2_len, skip_len)); 2514 } else { 2515 cmn_err(CE_WARN, "!nxge_receive_packet: " 2516 "update stats (error)"); 2517 atomic_inc_32(&rx_msg_p->ref_cnt); 2518 atomic_inc_32(&nxge_mblks_pending); 2519 if (buffer_free == B_TRUE) { 2520 rx_msg_p->free = B_TRUE; 2521 } 2522 MUTEX_EXIT(&rx_rbr_p->lock); 2523 MUTEX_EXIT(&rcr_p->lock); 2524 nxge_freeb(rx_msg_p); 2525 return; 2526 } 2527 2528 if (buffer_free == B_TRUE) { 2529 rx_msg_p->free = B_TRUE; 2530 } 2531 /* 2532 * ERROR, FRAG and PKT_TYPE are only reported 2533 * in the first entry. 2534 * If a packet is not fragmented and no error bit is set, then 2535 * L4 checksum is OK. 2536 */ 2537 is_valid = (nmp != NULL); 2538 if (first_entry) { 2539 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 2540 rdc_stats->ibytes += skip_len + l2_len < bsize ? 2541 l2_len : bsize; 2542 } else { 2543 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2544 l2_len - bytes_read : bsize; 2545 } 2546 2547 rcr_p->rcvd_pkt_bytes = bytes_read; 2548 2549 MUTEX_EXIT(&rx_rbr_p->lock); 2550 MUTEX_EXIT(&rcr_p->lock); 2551 2552 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2553 atomic_inc_32(&rx_msg_p->ref_cnt); 2554 atomic_inc_32(&nxge_mblks_pending); 2555 nxge_freeb(rx_msg_p); 2556 } 2557 2558 if (is_valid) { 2559 nmp->b_cont = NULL; 2560 if (first_entry) { 2561 *mp = nmp; 2562 *mp_cont = NULL; 2563 } else { 2564 *mp_cont = nmp; 2565 } 2566 } 2567 2568 /* 2569 * Update stats and hardware checksuming. 2570 */ 2571 if (is_valid && !multi) { 2572 2573 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2574 pkt_type == RCR_PKT_IS_UDP) ? 2575 B_TRUE: B_FALSE); 2576 2577 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2578 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2579 is_valid, multi, is_tcp_udp, frag, error_type)); 2580 2581 if (is_tcp_udp && !frag && !error_type) { 2582 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2583 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2584 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2585 "==> nxge_receive_packet: Full tcp/udp cksum " 2586 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2587 "error %d", 2588 is_valid, multi, is_tcp_udp, frag, error_type)); 2589 } 2590 } 2591 2592 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2593 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2594 2595 *multi_p = (multi == RCR_MULTI_MASK); 2596 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2597 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2598 *multi_p, nmp, *mp, *mp_cont)); 2599 } 2600 2601 /*ARGSUSED*/ 2602 static nxge_status_t 2603 nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, 2604 rx_dma_ctl_stat_t cs) 2605 { 2606 p_nxge_rx_ring_stats_t rdc_stats; 2607 npi_handle_t handle; 2608 npi_status_t rs; 2609 boolean_t rxchan_fatal = B_FALSE; 2610 boolean_t rxport_fatal = B_FALSE; 2611 uint8_t channel; 2612 uint8_t portn; 2613 nxge_status_t status = NXGE_OK; 2614 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2615 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2616 2617 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2618 channel = ldvp->channel; 2619 portn = nxgep->mac.portnum; 2620 rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index]; 2621 2622 if (cs.bits.hdw.rbr_tmout) { 2623 rdc_stats->rx_rbr_tmout++; 2624 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2625 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2626 rxchan_fatal = B_TRUE; 2627 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2628 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2629 } 2630 if (cs.bits.hdw.rsp_cnt_err) { 2631 rdc_stats->rsp_cnt_err++; 2632 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2633 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2634 rxchan_fatal = B_TRUE; 2635 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2636 "==> nxge_rx_err_evnts(channel %d): " 2637 "rsp_cnt_err", channel)); 2638 } 2639 if (cs.bits.hdw.byte_en_bus) { 2640 rdc_stats->byte_en_bus++; 2641 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2642 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2643 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2644 "==> nxge_rx_err_evnts(channel %d): " 2645 "fatal error: byte_en_bus", channel)); 2646 rxchan_fatal = B_TRUE; 2647 } 2648 if (cs.bits.hdw.rsp_dat_err) { 2649 rdc_stats->rsp_dat_err++; 2650 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2651 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2652 rxchan_fatal = B_TRUE; 2653 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2654 "==> nxge_rx_err_evnts(channel %d): " 2655 "fatal error: rsp_dat_err", channel)); 2656 } 2657 if (cs.bits.hdw.rcr_ack_err) { 2658 rdc_stats->rcr_ack_err++; 2659 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2660 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2661 rxchan_fatal = B_TRUE; 2662 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2663 "==> nxge_rx_err_evnts(channel %d): " 2664 "fatal error: rcr_ack_err", channel)); 2665 } 2666 if (cs.bits.hdw.dc_fifo_err) { 2667 rdc_stats->dc_fifo_err++; 2668 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2669 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2670 /* This is not a fatal error! */ 2671 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2672 "==> nxge_rx_err_evnts(channel %d): " 2673 "dc_fifo_err", channel)); 2674 rxport_fatal = B_TRUE; 2675 } 2676 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2677 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2678 &rdc_stats->errlog.pre_par, 2679 &rdc_stats->errlog.sha_par)) 2680 != NPI_SUCCESS) { 2681 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2682 "==> nxge_rx_err_evnts(channel %d): " 2683 "rcr_sha_par: get perr", channel)); 2684 return (NXGE_ERROR | rs); 2685 } 2686 if (cs.bits.hdw.rcr_sha_par) { 2687 rdc_stats->rcr_sha_par++; 2688 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2689 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2690 rxchan_fatal = B_TRUE; 2691 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2692 "==> nxge_rx_err_evnts(channel %d): " 2693 "fatal error: rcr_sha_par", channel)); 2694 } 2695 if (cs.bits.hdw.rbr_pre_par) { 2696 rdc_stats->rbr_pre_par++; 2697 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2698 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2699 rxchan_fatal = B_TRUE; 2700 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2701 "==> nxge_rx_err_evnts(channel %d): " 2702 "fatal error: rbr_pre_par", channel)); 2703 } 2704 } 2705 if (cs.bits.hdw.port_drop_pkt) { 2706 rdc_stats->port_drop_pkt++; 2707 if (rdc_stats->port_drop_pkt < error_disp_cnt) 2708 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2709 "==> nxge_rx_err_evnts (channel %d): " 2710 "port_drop_pkt", channel)); 2711 } 2712 if (cs.bits.hdw.wred_drop) { 2713 rdc_stats->wred_drop++; 2714 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2715 "==> nxge_rx_err_evnts(channel %d): " 2716 "wred_drop", channel)); 2717 } 2718 if (cs.bits.hdw.rbr_pre_empty) { 2719 rdc_stats->rbr_pre_empty++; 2720 if (rdc_stats->rbr_pre_empty < error_disp_cnt) 2721 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2722 "==> nxge_rx_err_evnts(channel %d): " 2723 "rbr_pre_empty", channel)); 2724 } 2725 if (cs.bits.hdw.rcr_shadow_full) { 2726 rdc_stats->rcr_shadow_full++; 2727 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2728 "==> nxge_rx_err_evnts(channel %d): " 2729 "rcr_shadow_full", channel)); 2730 } 2731 if (cs.bits.hdw.config_err) { 2732 rdc_stats->config_err++; 2733 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2734 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2735 rxchan_fatal = B_TRUE; 2736 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2737 "==> nxge_rx_err_evnts(channel %d): " 2738 "config error", channel)); 2739 } 2740 if (cs.bits.hdw.rcrincon) { 2741 rdc_stats->rcrincon++; 2742 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2743 NXGE_FM_EREPORT_RDMC_RCRINCON); 2744 rxchan_fatal = B_TRUE; 2745 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2746 "==> nxge_rx_err_evnts(channel %d): " 2747 "fatal error: rcrincon error", channel)); 2748 } 2749 if (cs.bits.hdw.rcrfull) { 2750 rdc_stats->rcrfull++; 2751 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2752 NXGE_FM_EREPORT_RDMC_RCRFULL); 2753 rxchan_fatal = B_TRUE; 2754 if (rdc_stats->rcrfull < error_disp_cnt) 2755 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2756 "==> nxge_rx_err_evnts(channel %d): " 2757 "fatal error: rcrfull error", channel)); 2758 } 2759 if (cs.bits.hdw.rbr_empty) { 2760 rdc_stats->rbr_empty++; 2761 if (rdc_stats->rbr_empty < error_disp_cnt) 2762 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2763 "==> nxge_rx_err_evnts(channel %d): " 2764 "rbr empty error", channel)); 2765 } 2766 if (cs.bits.hdw.rbrfull) { 2767 rdc_stats->rbrfull++; 2768 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2769 NXGE_FM_EREPORT_RDMC_RBRFULL); 2770 rxchan_fatal = B_TRUE; 2771 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2772 "==> nxge_rx_err_evnts(channel %d): " 2773 "fatal error: rbr_full error", channel)); 2774 } 2775 if (cs.bits.hdw.rbrlogpage) { 2776 rdc_stats->rbrlogpage++; 2777 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2778 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2779 rxchan_fatal = B_TRUE; 2780 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2781 "==> nxge_rx_err_evnts(channel %d): " 2782 "fatal error: rbr logical page error", channel)); 2783 } 2784 if (cs.bits.hdw.cfiglogpage) { 2785 rdc_stats->cfiglogpage++; 2786 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2787 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2788 rxchan_fatal = B_TRUE; 2789 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2790 "==> nxge_rx_err_evnts(channel %d): " 2791 "fatal error: cfig logical page error", channel)); 2792 } 2793 2794 if (rxport_fatal) { 2795 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2796 " nxge_rx_err_evnts: " 2797 " fatal error on Port #%d\n", 2798 portn)); 2799 status = nxge_ipp_fatal_err_recover(nxgep); 2800 if (status == NXGE_OK) { 2801 FM_SERVICE_RESTORED(nxgep); 2802 } 2803 } 2804 2805 if (rxchan_fatal) { 2806 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2807 " nxge_rx_err_evnts: " 2808 " fatal error on Channel #%d\n", 2809 channel)); 2810 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2811 if (status == NXGE_OK) { 2812 FM_SERVICE_RESTORED(nxgep); 2813 } 2814 } 2815 2816 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2817 2818 return (status); 2819 } 2820 2821 static nxge_status_t 2822 nxge_map_rxdma(p_nxge_t nxgep) 2823 { 2824 int i, ndmas; 2825 uint16_t channel; 2826 p_rx_rbr_rings_t rx_rbr_rings; 2827 p_rx_rbr_ring_t *rbr_rings; 2828 p_rx_rcr_rings_t rx_rcr_rings; 2829 p_rx_rcr_ring_t *rcr_rings; 2830 p_rx_mbox_areas_t rx_mbox_areas_p; 2831 p_rx_mbox_t *rx_mbox_p; 2832 p_nxge_dma_pool_t dma_buf_poolp; 2833 p_nxge_dma_pool_t dma_cntl_poolp; 2834 p_nxge_dma_common_t *dma_buf_p; 2835 p_nxge_dma_common_t *dma_cntl_p; 2836 uint32_t *num_chunks; 2837 nxge_status_t status = NXGE_OK; 2838 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2839 p_nxge_dma_common_t t_dma_buf_p; 2840 p_nxge_dma_common_t t_dma_cntl_p; 2841 #endif 2842 2843 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 2844 2845 dma_buf_poolp = nxgep->rx_buf_pool_p; 2846 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2847 2848 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2849 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2850 "<== nxge_map_rxdma: buf not allocated")); 2851 return (NXGE_ERROR); 2852 } 2853 2854 ndmas = dma_buf_poolp->ndmas; 2855 if (!ndmas) { 2856 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2857 "<== nxge_map_rxdma: no dma allocated")); 2858 return (NXGE_ERROR); 2859 } 2860 2861 num_chunks = dma_buf_poolp->num_chunks; 2862 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2863 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2864 2865 rx_rbr_rings = (p_rx_rbr_rings_t) 2866 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2867 rbr_rings = (p_rx_rbr_ring_t *) 2868 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2869 rx_rcr_rings = (p_rx_rcr_rings_t) 2870 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2871 rcr_rings = (p_rx_rcr_ring_t *) 2872 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2873 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2874 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2875 rx_mbox_p = (p_rx_mbox_t *) 2876 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2877 2878 /* 2879 * Timeout should be set based on the system clock divider. 2880 * The following timeout value of 1 assumes that the 2881 * granularity (1000) is 3 microseconds running at 300MHz. 2882 */ 2883 2884 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2885 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2886 2887 /* 2888 * Map descriptors from the buffer polls for each dam channel. 2889 */ 2890 for (i = 0; i < ndmas; i++) { 2891 /* 2892 * Set up and prepare buffer blocks, descriptors 2893 * and mailbox. 2894 */ 2895 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2896 status = nxge_map_rxdma_channel(nxgep, channel, 2897 (p_nxge_dma_common_t *)&dma_buf_p[i], 2898 (p_rx_rbr_ring_t *)&rbr_rings[i], 2899 num_chunks[i], 2900 (p_nxge_dma_common_t *)&dma_cntl_p[i], 2901 (p_rx_rcr_ring_t *)&rcr_rings[i], 2902 (p_rx_mbox_t *)&rx_mbox_p[i]); 2903 if (status != NXGE_OK) { 2904 goto nxge_map_rxdma_fail1; 2905 } 2906 rbr_rings[i]->index = (uint16_t)i; 2907 rcr_rings[i]->index = (uint16_t)i; 2908 rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i]; 2909 2910 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2911 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 2912 rbr_rings[i]->hv_set = B_FALSE; 2913 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 2914 t_dma_cntl_p = 2915 (p_nxge_dma_common_t)dma_cntl_p[i]; 2916 2917 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp = 2918 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 2919 rbr_rings[i]->hv_rx_buf_ioaddr_size = 2920 (uint64_t)t_dma_buf_p->orig_alength; 2921 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2922 "==> nxge_map_rxdma_channel: " 2923 "channel %d " 2924 "data buf base io $%p ($%p) " 2925 "size 0x%llx (%d 0x%x)", 2926 channel, 2927 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp, 2928 t_dma_cntl_p->ioaddr_pp, 2929 rbr_rings[i]->hv_rx_buf_ioaddr_size, 2930 t_dma_buf_p->orig_alength, 2931 t_dma_buf_p->orig_alength)); 2932 2933 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp = 2934 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 2935 rbr_rings[i]->hv_rx_cntl_ioaddr_size = 2936 (uint64_t)t_dma_cntl_p->orig_alength; 2937 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2938 "==> nxge_map_rxdma_channel: " 2939 "channel %d " 2940 "cntl base io $%p ($%p) " 2941 "size 0x%llx (%d 0x%x)", 2942 channel, 2943 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp, 2944 t_dma_cntl_p->ioaddr_pp, 2945 rbr_rings[i]->hv_rx_cntl_ioaddr_size, 2946 t_dma_cntl_p->orig_alength, 2947 t_dma_cntl_p->orig_alength)); 2948 } 2949 2950 #endif /* sun4v and NIU_LP_WORKAROUND */ 2951 } 2952 2953 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2954 rx_rbr_rings->rbr_rings = rbr_rings; 2955 nxgep->rx_rbr_rings = rx_rbr_rings; 2956 rx_rcr_rings->rcr_rings = rcr_rings; 2957 nxgep->rx_rcr_rings = rx_rcr_rings; 2958 2959 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2960 nxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2961 2962 goto nxge_map_rxdma_exit; 2963 2964 nxge_map_rxdma_fail1: 2965 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2966 "==> nxge_map_rxdma: unmap rbr,rcr " 2967 "(status 0x%x channel %d i %d)", 2968 status, channel, i)); 2969 i--; 2970 for (; i >= 0; i--) { 2971 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2972 nxge_unmap_rxdma_channel(nxgep, channel, 2973 rbr_rings[i], 2974 rcr_rings[i], 2975 rx_mbox_p[i]); 2976 } 2977 2978 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2979 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2980 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2981 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2982 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2983 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2984 2985 nxge_map_rxdma_exit: 2986 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2987 "<== nxge_map_rxdma: " 2988 "(status 0x%x channel %d)", 2989 status, channel)); 2990 2991 return (status); 2992 } 2993 2994 static void 2995 nxge_unmap_rxdma(p_nxge_t nxgep) 2996 { 2997 int i, ndmas; 2998 uint16_t channel; 2999 p_rx_rbr_rings_t rx_rbr_rings; 3000 p_rx_rbr_ring_t *rbr_rings; 3001 p_rx_rcr_rings_t rx_rcr_rings; 3002 p_rx_rcr_ring_t *rcr_rings; 3003 p_rx_mbox_areas_t rx_mbox_areas_p; 3004 p_rx_mbox_t *rx_mbox_p; 3005 p_nxge_dma_pool_t dma_buf_poolp; 3006 p_nxge_dma_pool_t dma_cntl_poolp; 3007 p_nxge_dma_common_t *dma_buf_p; 3008 3009 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma")); 3010 3011 dma_buf_poolp = nxgep->rx_buf_pool_p; 3012 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 3013 3014 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 3015 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3016 "<== nxge_unmap_rxdma: NULL buf pointers")); 3017 return; 3018 } 3019 3020 rx_rbr_rings = nxgep->rx_rbr_rings; 3021 rx_rcr_rings = nxgep->rx_rcr_rings; 3022 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3023 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3024 "<== nxge_unmap_rxdma: NULL ring pointers")); 3025 return; 3026 } 3027 ndmas = rx_rbr_rings->ndmas; 3028 if (!ndmas) { 3029 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3030 "<== nxge_unmap_rxdma: no channel")); 3031 return; 3032 } 3033 3034 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3035 "==> nxge_unmap_rxdma (ndmas %d)", ndmas)); 3036 rbr_rings = rx_rbr_rings->rbr_rings; 3037 rcr_rings = rx_rcr_rings->rcr_rings; 3038 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3039 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3040 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 3041 3042 for (i = 0; i < ndmas; i++) { 3043 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 3044 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3045 "==> nxge_unmap_rxdma (ndmas %d) channel %d", 3046 ndmas, channel)); 3047 (void) nxge_unmap_rxdma_channel(nxgep, channel, 3048 (p_rx_rbr_ring_t)rbr_rings[i], 3049 (p_rx_rcr_ring_t)rcr_rings[i], 3050 (p_rx_mbox_t)rx_mbox_p[i]); 3051 } 3052 3053 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 3054 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 3055 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 3056 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 3057 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 3058 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 3059 3060 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3061 "<== nxge_unmap_rxdma")); 3062 } 3063 3064 nxge_status_t 3065 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3066 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3067 uint32_t num_chunks, 3068 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3069 p_rx_mbox_t *rx_mbox_p) 3070 { 3071 int status = NXGE_OK; 3072 3073 /* 3074 * Set up and prepare buffer blocks, descriptors 3075 * and mailbox. 3076 */ 3077 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3078 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3079 /* 3080 * Receive buffer blocks 3081 */ 3082 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3083 dma_buf_p, rbr_p, num_chunks); 3084 if (status != NXGE_OK) { 3085 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3086 "==> nxge_map_rxdma_channel (channel %d): " 3087 "map buffer failed 0x%x", channel, status)); 3088 goto nxge_map_rxdma_channel_exit; 3089 } 3090 3091 /* 3092 * Receive block ring, completion ring and mailbox. 3093 */ 3094 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3095 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3096 if (status != NXGE_OK) { 3097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3098 "==> nxge_map_rxdma_channel (channel %d): " 3099 "map config failed 0x%x", channel, status)); 3100 goto nxge_map_rxdma_channel_fail2; 3101 } 3102 3103 goto nxge_map_rxdma_channel_exit; 3104 3105 nxge_map_rxdma_channel_fail3: 3106 /* Free rbr, rcr */ 3107 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3108 "==> nxge_map_rxdma_channel: free rbr/rcr " 3109 "(status 0x%x channel %d)", 3110 status, channel)); 3111 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3112 *rcr_p, *rx_mbox_p); 3113 3114 nxge_map_rxdma_channel_fail2: 3115 /* Free buffer blocks */ 3116 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3117 "==> nxge_map_rxdma_channel: free rx buffers" 3118 "(nxgep 0x%x status 0x%x channel %d)", 3119 nxgep, status, channel)); 3120 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3121 3122 status = NXGE_ERROR; 3123 3124 nxge_map_rxdma_channel_exit: 3125 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3126 "<== nxge_map_rxdma_channel: " 3127 "(nxgep 0x%x status 0x%x channel %d)", 3128 nxgep, status, channel)); 3129 3130 return (status); 3131 } 3132 3133 /*ARGSUSED*/ 3134 static void 3135 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3136 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3137 { 3138 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3139 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3140 3141 /* 3142 * unmap receive block ring, completion ring and mailbox. 3143 */ 3144 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3145 rcr_p, rx_mbox_p); 3146 3147 /* unmap buffer blocks */ 3148 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3149 3150 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3151 } 3152 3153 /*ARGSUSED*/ 3154 static nxge_status_t 3155 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3156 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3157 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3158 { 3159 p_rx_rbr_ring_t rbrp; 3160 p_rx_rcr_ring_t rcrp; 3161 p_rx_mbox_t mboxp; 3162 p_nxge_dma_common_t cntl_dmap; 3163 p_nxge_dma_common_t dmap; 3164 p_rx_msg_t *rx_msg_ring; 3165 p_rx_msg_t rx_msg_p; 3166 p_rbr_cfig_a_t rcfga_p; 3167 p_rbr_cfig_b_t rcfgb_p; 3168 p_rcrcfig_a_t cfga_p; 3169 p_rcrcfig_b_t cfgb_p; 3170 p_rxdma_cfig1_t cfig1_p; 3171 p_rxdma_cfig2_t cfig2_p; 3172 p_rbr_kick_t kick_p; 3173 uint32_t dmaaddrp; 3174 uint32_t *rbr_vaddrp; 3175 uint32_t bkaddr; 3176 nxge_status_t status = NXGE_OK; 3177 int i; 3178 uint32_t nxge_port_rcr_size; 3179 3180 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3181 "==> nxge_map_rxdma_channel_cfg_ring")); 3182 3183 cntl_dmap = *dma_cntl_p; 3184 3185 /* Map in the receive block ring */ 3186 rbrp = *rbr_p; 3187 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3188 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3189 /* 3190 * Zero out buffer block ring descriptors. 3191 */ 3192 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3193 3194 rcfga_p = &(rbrp->rbr_cfga); 3195 rcfgb_p = &(rbrp->rbr_cfgb); 3196 kick_p = &(rbrp->rbr_kick); 3197 rcfga_p->value = 0; 3198 rcfgb_p->value = 0; 3199 kick_p->value = 0; 3200 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3201 rcfga_p->value = (rbrp->rbr_addr & 3202 (RBR_CFIG_A_STDADDR_MASK | 3203 RBR_CFIG_A_STDADDR_BASE_MASK)); 3204 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3205 3206 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3207 rcfgb_p->bits.ldw.vld0 = 1; 3208 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3209 rcfgb_p->bits.ldw.vld1 = 1; 3210 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3211 rcfgb_p->bits.ldw.vld2 = 1; 3212 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3213 3214 /* 3215 * For each buffer block, enter receive block address to the ring. 3216 */ 3217 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3218 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3219 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3220 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3221 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3222 3223 rx_msg_ring = rbrp->rx_msg_ring; 3224 for (i = 0; i < rbrp->tnblocks; i++) { 3225 rx_msg_p = rx_msg_ring[i]; 3226 rx_msg_p->nxgep = nxgep; 3227 rx_msg_p->rx_rbr_p = rbrp; 3228 bkaddr = (uint32_t) 3229 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3230 >> RBR_BKADDR_SHIFT)); 3231 rx_msg_p->free = B_FALSE; 3232 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3233 3234 *rbr_vaddrp++ = bkaddr; 3235 } 3236 3237 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3238 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3239 3240 rbrp->rbr_rd_index = 0; 3241 3242 rbrp->rbr_consumed = 0; 3243 rbrp->rbr_use_bcopy = B_TRUE; 3244 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3245 /* 3246 * Do bcopy on packets greater than bcopy size once 3247 * the lo threshold is reached. 3248 * This lo threshold should be less than the hi threshold. 3249 * 3250 * Do bcopy on every packet once the hi threshold is reached. 3251 */ 3252 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3253 /* default it to use hi */ 3254 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3255 } 3256 3257 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3258 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3259 } 3260 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3261 3262 switch (nxge_rx_threshold_hi) { 3263 default: 3264 case NXGE_RX_COPY_NONE: 3265 /* Do not do bcopy at all */ 3266 rbrp->rbr_use_bcopy = B_FALSE; 3267 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3268 break; 3269 3270 case NXGE_RX_COPY_1: 3271 case NXGE_RX_COPY_2: 3272 case NXGE_RX_COPY_3: 3273 case NXGE_RX_COPY_4: 3274 case NXGE_RX_COPY_5: 3275 case NXGE_RX_COPY_6: 3276 case NXGE_RX_COPY_7: 3277 rbrp->rbr_threshold_hi = 3278 rbrp->rbb_max * 3279 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3280 break; 3281 3282 case NXGE_RX_COPY_ALL: 3283 rbrp->rbr_threshold_hi = 0; 3284 break; 3285 } 3286 3287 switch (nxge_rx_threshold_lo) { 3288 default: 3289 case NXGE_RX_COPY_NONE: 3290 /* Do not do bcopy at all */ 3291 if (rbrp->rbr_use_bcopy) { 3292 rbrp->rbr_use_bcopy = B_FALSE; 3293 } 3294 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3295 break; 3296 3297 case NXGE_RX_COPY_1: 3298 case NXGE_RX_COPY_2: 3299 case NXGE_RX_COPY_3: 3300 case NXGE_RX_COPY_4: 3301 case NXGE_RX_COPY_5: 3302 case NXGE_RX_COPY_6: 3303 case NXGE_RX_COPY_7: 3304 rbrp->rbr_threshold_lo = 3305 rbrp->rbb_max * 3306 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3307 break; 3308 3309 case NXGE_RX_COPY_ALL: 3310 rbrp->rbr_threshold_lo = 0; 3311 break; 3312 } 3313 3314 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3315 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3316 "rbb_max %d " 3317 "rbrp->rbr_bufsize_type %d " 3318 "rbb_threshold_hi %d " 3319 "rbb_threshold_lo %d", 3320 dma_channel, 3321 rbrp->rbb_max, 3322 rbrp->rbr_bufsize_type, 3323 rbrp->rbr_threshold_hi, 3324 rbrp->rbr_threshold_lo)); 3325 3326 rbrp->page_valid.value = 0; 3327 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3328 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3329 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3330 rbrp->page_hdl.value = 0; 3331 3332 rbrp->page_valid.bits.ldw.page0 = 1; 3333 rbrp->page_valid.bits.ldw.page1 = 1; 3334 3335 /* Map in the receive completion ring */ 3336 rcrp = (p_rx_rcr_ring_t) 3337 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3338 rcrp->rdc = dma_channel; 3339 3340 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3341 rcrp->comp_size = nxge_port_rcr_size; 3342 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3343 3344 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3345 3346 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3347 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3348 sizeof (rcr_entry_t)); 3349 rcrp->comp_rd_index = 0; 3350 rcrp->comp_wt_index = 0; 3351 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3352 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3353 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3354 #if defined(__i386) 3355 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3356 #else 3357 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3358 #endif 3359 3360 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3361 (nxge_port_rcr_size - 1); 3362 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3363 (nxge_port_rcr_size - 1); 3364 3365 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3366 "==> nxge_map_rxdma_channel_cfg_ring: " 3367 "channel %d " 3368 "rbr_vaddrp $%p " 3369 "rcr_desc_rd_head_p $%p " 3370 "rcr_desc_rd_head_pp $%p " 3371 "rcr_desc_rd_last_p $%p " 3372 "rcr_desc_rd_last_pp $%p ", 3373 dma_channel, 3374 rbr_vaddrp, 3375 rcrp->rcr_desc_rd_head_p, 3376 rcrp->rcr_desc_rd_head_pp, 3377 rcrp->rcr_desc_last_p, 3378 rcrp->rcr_desc_last_pp)); 3379 3380 /* 3381 * Zero out buffer block ring descriptors. 3382 */ 3383 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3384 rcrp->intr_timeout = nxgep->intr_timeout; 3385 rcrp->intr_threshold = nxgep->intr_threshold; 3386 rcrp->full_hdr_flag = B_FALSE; 3387 rcrp->sw_priv_hdr_len = 0; 3388 3389 cfga_p = &(rcrp->rcr_cfga); 3390 cfgb_p = &(rcrp->rcr_cfgb); 3391 cfga_p->value = 0; 3392 cfgb_p->value = 0; 3393 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3394 cfga_p->value = (rcrp->rcr_addr & 3395 (RCRCFIG_A_STADDR_MASK | 3396 RCRCFIG_A_STADDR_BASE_MASK)); 3397 3398 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3399 RCRCFIG_A_LEN_SHIF); 3400 3401 /* 3402 * Timeout should be set based on the system clock divider. 3403 * The following timeout value of 1 assumes that the 3404 * granularity (1000) is 3 microseconds running at 300MHz. 3405 */ 3406 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3407 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3408 cfgb_p->bits.ldw.entout = 1; 3409 3410 /* Map in the mailbox */ 3411 mboxp = (p_rx_mbox_t) 3412 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3413 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3414 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3415 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3416 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3417 cfig1_p->value = cfig2_p->value = 0; 3418 3419 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3420 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3421 "==> nxge_map_rxdma_channel_cfg_ring: " 3422 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3423 dma_channel, cfig1_p->value, cfig2_p->value, 3424 mboxp->mbox_addr)); 3425 3426 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3427 & 0xfff); 3428 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3429 3430 3431 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3432 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3433 RXDMA_CFIG2_MBADDR_L_MASK); 3434 3435 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3436 3437 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3438 "==> nxge_map_rxdma_channel_cfg_ring: " 3439 "channel %d damaddrp $%p " 3440 "cfg1 0x%016llx cfig2 0x%016llx", 3441 dma_channel, dmaaddrp, 3442 cfig1_p->value, cfig2_p->value)); 3443 3444 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3445 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3446 3447 rbrp->rx_rcr_p = rcrp; 3448 rcrp->rx_rbr_p = rbrp; 3449 *rcr_p = rcrp; 3450 *rx_mbox_p = mboxp; 3451 3452 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3453 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3454 3455 return (status); 3456 } 3457 3458 /*ARGSUSED*/ 3459 static void 3460 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3461 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3462 { 3463 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3464 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3465 rcr_p->rdc)); 3466 3467 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3468 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3469 3470 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3471 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3472 } 3473 3474 static nxge_status_t 3475 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3476 p_nxge_dma_common_t *dma_buf_p, 3477 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3478 { 3479 p_rx_rbr_ring_t rbrp; 3480 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3481 p_rx_msg_t *rx_msg_ring; 3482 p_rx_msg_t rx_msg_p; 3483 p_mblk_t mblk_p; 3484 3485 rxring_info_t *ring_info; 3486 nxge_status_t status = NXGE_OK; 3487 int i, j, index; 3488 uint32_t size, bsize, nblocks, nmsgs; 3489 3490 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3491 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3492 channel)); 3493 3494 dma_bufp = tmp_bufp = *dma_buf_p; 3495 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3496 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3497 "chunks bufp 0x%016llx", 3498 channel, num_chunks, dma_bufp)); 3499 3500 nmsgs = 0; 3501 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3502 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3503 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3504 "bufp 0x%016llx nblocks %d nmsgs %d", 3505 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3506 nmsgs += tmp_bufp->nblocks; 3507 } 3508 if (!nmsgs) { 3509 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3510 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3511 "no msg blocks", 3512 channel)); 3513 status = NXGE_ERROR; 3514 goto nxge_map_rxdma_channel_buf_ring_exit; 3515 } 3516 3517 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 3518 3519 size = nmsgs * sizeof (p_rx_msg_t); 3520 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3521 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3522 KM_SLEEP); 3523 3524 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3525 (void *)nxgep->interrupt_cookie); 3526 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3527 (void *)nxgep->interrupt_cookie); 3528 rbrp->rdc = channel; 3529 rbrp->num_blocks = num_chunks; 3530 rbrp->tnblocks = nmsgs; 3531 rbrp->rbb_max = nmsgs; 3532 rbrp->rbr_max_size = nmsgs; 3533 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3534 3535 /* 3536 * Buffer sizes suggested by NIU architect. 3537 * 256, 512 and 2K. 3538 */ 3539 3540 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3541 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3542 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3543 3544 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3545 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3546 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3547 3548 rbrp->block_size = nxgep->rx_default_block_size; 3549 3550 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3551 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3552 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3553 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3554 } else { 3555 if (rbrp->block_size >= 0x2000) { 3556 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3557 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3558 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3559 } else { 3560 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3561 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3562 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3563 } 3564 } 3565 3566 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3567 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3568 "actual rbr max %d rbb_max %d nmsgs %d " 3569 "rbrp->block_size %d default_block_size %d " 3570 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3571 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3572 rbrp->block_size, nxgep->rx_default_block_size, 3573 nxge_rbr_size, nxge_rbr_spare_size)); 3574 3575 /* Map in buffers from the buffer pool. */ 3576 index = 0; 3577 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3578 bsize = dma_bufp->block_size; 3579 nblocks = dma_bufp->nblocks; 3580 #if defined(__i386) 3581 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3582 #else 3583 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3584 #endif 3585 ring_info->buffer[i].buf_index = i; 3586 ring_info->buffer[i].buf_size = dma_bufp->alength; 3587 ring_info->buffer[i].start_index = index; 3588 #if defined(__i386) 3589 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3590 #else 3591 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3592 #endif 3593 3594 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3595 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3596 "chunk %d" 3597 " nblocks %d chunk_size %x block_size 0x%x " 3598 "dma_bufp $%p", channel, i, 3599 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3600 dma_bufp)); 3601 3602 for (j = 0; j < nblocks; j++) { 3603 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3604 dma_bufp)) == NULL) { 3605 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3606 "allocb failed (index %d i %d j %d)", 3607 index, i, j)); 3608 goto nxge_map_rxdma_channel_buf_ring_fail1; 3609 } 3610 rx_msg_ring[index] = rx_msg_p; 3611 rx_msg_p->block_index = index; 3612 rx_msg_p->shifted_addr = (uint32_t) 3613 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3614 RBR_BKADDR_SHIFT)); 3615 3616 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3617 "index %d j %d rx_msg_p $%p mblk %p", 3618 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3619 3620 mblk_p = rx_msg_p->rx_mblk_p; 3621 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3622 3623 rbrp->rbr_ref_cnt++; 3624 index++; 3625 rx_msg_p->buf_dma.dma_channel = channel; 3626 } 3627 } 3628 if (i < rbrp->num_blocks) { 3629 goto nxge_map_rxdma_channel_buf_ring_fail1; 3630 } 3631 3632 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3633 "nxge_map_rxdma_channel_buf_ring: done buf init " 3634 "channel %d msg block entries %d", 3635 channel, index)); 3636 ring_info->block_size_mask = bsize - 1; 3637 rbrp->rx_msg_ring = rx_msg_ring; 3638 rbrp->dma_bufp = dma_buf_p; 3639 rbrp->ring_info = ring_info; 3640 3641 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3642 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3643 " nxge_map_rxdma_channel_buf_ring: " 3644 "channel %d done buf info init", channel)); 3645 3646 /* 3647 * Finally, permit nxge_freeb() to call nxge_post_page(). 3648 */ 3649 rbrp->rbr_state = RBR_POSTING; 3650 3651 *rbr_p = rbrp; 3652 goto nxge_map_rxdma_channel_buf_ring_exit; 3653 3654 nxge_map_rxdma_channel_buf_ring_fail1: 3655 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3656 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3657 channel, status)); 3658 3659 index--; 3660 for (; index >= 0; index--) { 3661 rx_msg_p = rx_msg_ring[index]; 3662 if (rx_msg_p != NULL) { 3663 freeb(rx_msg_p->rx_mblk_p); 3664 rx_msg_ring[index] = NULL; 3665 } 3666 } 3667 nxge_map_rxdma_channel_buf_ring_fail: 3668 MUTEX_DESTROY(&rbrp->post_lock); 3669 MUTEX_DESTROY(&rbrp->lock); 3670 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3671 KMEM_FREE(rx_msg_ring, size); 3672 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3673 3674 status = NXGE_ERROR; 3675 3676 nxge_map_rxdma_channel_buf_ring_exit: 3677 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3678 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3679 3680 return (status); 3681 } 3682 3683 /*ARGSUSED*/ 3684 static void 3685 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3686 p_rx_rbr_ring_t rbr_p) 3687 { 3688 p_rx_msg_t *rx_msg_ring; 3689 p_rx_msg_t rx_msg_p; 3690 rxring_info_t *ring_info; 3691 int i; 3692 uint32_t size; 3693 #ifdef NXGE_DEBUG 3694 int num_chunks; 3695 #endif 3696 3697 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3698 "==> nxge_unmap_rxdma_channel_buf_ring")); 3699 if (rbr_p == NULL) { 3700 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3701 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3702 return; 3703 } 3704 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3705 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3706 rbr_p->rdc)); 3707 3708 rx_msg_ring = rbr_p->rx_msg_ring; 3709 ring_info = rbr_p->ring_info; 3710 3711 if (rx_msg_ring == NULL || ring_info == NULL) { 3712 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3713 "<== nxge_unmap_rxdma_channel_buf_ring: " 3714 "rx_msg_ring $%p ring_info $%p", 3715 rx_msg_p, ring_info)); 3716 return; 3717 } 3718 3719 #ifdef NXGE_DEBUG 3720 num_chunks = rbr_p->num_blocks; 3721 #endif 3722 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3723 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3724 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3725 "tnblocks %d (max %d) size ptrs %d ", 3726 rbr_p->rdc, num_chunks, 3727 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3728 3729 for (i = 0; i < rbr_p->tnblocks; i++) { 3730 rx_msg_p = rx_msg_ring[i]; 3731 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3732 " nxge_unmap_rxdma_channel_buf_ring: " 3733 "rx_msg_p $%p", 3734 rx_msg_p)); 3735 if (rx_msg_p != NULL) { 3736 freeb(rx_msg_p->rx_mblk_p); 3737 rx_msg_ring[i] = NULL; 3738 } 3739 } 3740 3741 /* 3742 * We no longer may use the mutex <post_lock>. By setting 3743 * <rbr_state> to anything but POSTING, we prevent 3744 * nxge_post_page() from accessing a dead mutex. 3745 */ 3746 rbr_p->rbr_state = RBR_UNMAPPING; 3747 MUTEX_DESTROY(&rbr_p->post_lock); 3748 3749 MUTEX_DESTROY(&rbr_p->lock); 3750 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3751 KMEM_FREE(rx_msg_ring, size); 3752 3753 if (rbr_p->rbr_ref_cnt == 0) { 3754 /* This is the normal state of affairs. */ 3755 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 3756 } else { 3757 /* 3758 * Some of our buffers are still being used. 3759 * Therefore, tell nxge_freeb() this ring is 3760 * unmapped, so it may free <rbr_p> for us. 3761 */ 3762 rbr_p->rbr_state = RBR_UNMAPPED; 3763 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3764 "unmap_rxdma_buf_ring: %d %s outstanding.", 3765 rbr_p->rbr_ref_cnt, 3766 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 3767 } 3768 3769 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3770 "<== nxge_unmap_rxdma_channel_buf_ring")); 3771 } 3772 3773 static nxge_status_t 3774 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3775 { 3776 nxge_status_t status = NXGE_OK; 3777 3778 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3779 3780 /* 3781 * Load the sharable parameters by writing to the 3782 * function zero control registers. These FZC registers 3783 * should be initialized only once for the entire chip. 3784 */ 3785 (void) nxge_init_fzc_rx_common(nxgep); 3786 3787 /* 3788 * Initialize the RXDMA port specific FZC control configurations. 3789 * These FZC registers are pertaining to each port. 3790 */ 3791 (void) nxge_init_fzc_rxdma_port(nxgep); 3792 3793 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3794 3795 return (status); 3796 } 3797 3798 /*ARGSUSED*/ 3799 static void 3800 nxge_rxdma_hw_stop_common(p_nxge_t nxgep) 3801 { 3802 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3803 3804 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3805 } 3806 3807 static nxge_status_t 3808 nxge_rxdma_hw_start(p_nxge_t nxgep) 3809 { 3810 int i, ndmas; 3811 uint16_t channel; 3812 p_rx_rbr_rings_t rx_rbr_rings; 3813 p_rx_rbr_ring_t *rbr_rings; 3814 p_rx_rcr_rings_t rx_rcr_rings; 3815 p_rx_rcr_ring_t *rcr_rings; 3816 p_rx_mbox_areas_t rx_mbox_areas_p; 3817 p_rx_mbox_t *rx_mbox_p; 3818 nxge_status_t status = NXGE_OK; 3819 3820 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3821 3822 rx_rbr_rings = nxgep->rx_rbr_rings; 3823 rx_rcr_rings = nxgep->rx_rcr_rings; 3824 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3825 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3826 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3827 return (NXGE_ERROR); 3828 } 3829 ndmas = rx_rbr_rings->ndmas; 3830 if (ndmas == 0) { 3831 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3832 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3833 return (NXGE_ERROR); 3834 } 3835 3836 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3837 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3838 3839 rbr_rings = rx_rbr_rings->rbr_rings; 3840 rcr_rings = rx_rcr_rings->rcr_rings; 3841 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3842 if (rx_mbox_areas_p) { 3843 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3844 } 3845 3846 for (i = 0; i < ndmas; i++) { 3847 channel = rbr_rings[i]->rdc; 3848 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3849 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3850 ndmas, channel)); 3851 status = nxge_rxdma_start_channel(nxgep, channel, 3852 (p_rx_rbr_ring_t)rbr_rings[i], 3853 (p_rx_rcr_ring_t)rcr_rings[i], 3854 (p_rx_mbox_t)rx_mbox_p[i]); 3855 if (status != NXGE_OK) { 3856 goto nxge_rxdma_hw_start_fail1; 3857 } 3858 } 3859 3860 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3861 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3862 rx_rbr_rings, rx_rcr_rings)); 3863 3864 goto nxge_rxdma_hw_start_exit; 3865 3866 nxge_rxdma_hw_start_fail1: 3867 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3868 "==> nxge_rxdma_hw_start: disable " 3869 "(status 0x%x channel %d i %d)", status, channel, i)); 3870 for (; i >= 0; i--) { 3871 channel = rbr_rings[i]->rdc; 3872 (void) nxge_rxdma_stop_channel(nxgep, channel); 3873 } 3874 3875 nxge_rxdma_hw_start_exit: 3876 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3877 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3878 3879 return (status); 3880 } 3881 3882 static void 3883 nxge_rxdma_hw_stop(p_nxge_t nxgep) 3884 { 3885 int i, ndmas; 3886 uint16_t channel; 3887 p_rx_rbr_rings_t rx_rbr_rings; 3888 p_rx_rbr_ring_t *rbr_rings; 3889 p_rx_rcr_rings_t rx_rcr_rings; 3890 3891 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3892 3893 rx_rbr_rings = nxgep->rx_rbr_rings; 3894 rx_rcr_rings = nxgep->rx_rcr_rings; 3895 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3896 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3897 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3898 return; 3899 } 3900 ndmas = rx_rbr_rings->ndmas; 3901 if (!ndmas) { 3902 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3903 "<== nxge_rxdma_hw_stop: no dma channel allocated")); 3904 return; 3905 } 3906 3907 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3908 "==> nxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3909 3910 rbr_rings = rx_rbr_rings->rbr_rings; 3911 3912 for (i = 0; i < ndmas; i++) { 3913 channel = rbr_rings[i]->rdc; 3914 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3915 "==> nxge_rxdma_hw_stop (ndmas %d) channel %d", 3916 ndmas, channel)); 3917 (void) nxge_rxdma_stop_channel(nxgep, channel); 3918 } 3919 3920 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3921 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3922 rx_rbr_rings, rx_rcr_rings)); 3923 3924 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3925 } 3926 3927 3928 static nxge_status_t 3929 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3930 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3931 3932 { 3933 npi_handle_t handle; 3934 npi_status_t rs = NPI_SUCCESS; 3935 rx_dma_ctl_stat_t cs; 3936 rx_dma_ent_msk_t ent_mask; 3937 nxge_status_t status = NXGE_OK; 3938 3939 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3940 3941 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3942 3943 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3944 "npi handle addr $%p acc $%p", 3945 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 3946 3947 /* Reset RXDMA channel */ 3948 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3949 if (rs != NPI_SUCCESS) { 3950 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3951 "==> nxge_rxdma_start_channel: " 3952 "reset rxdma failed (0x%08x channel %d)", 3953 status, channel)); 3954 return (NXGE_ERROR | rs); 3955 } 3956 3957 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3958 "==> nxge_rxdma_start_channel: reset done: channel %d", 3959 channel)); 3960 3961 /* 3962 * Initialize the RXDMA channel specific FZC control 3963 * configurations. These FZC registers are pertaining 3964 * to each RX channel (logical pages). 3965 */ 3966 status = nxge_init_fzc_rxdma_channel(nxgep, 3967 channel, rbr_p, rcr_p, mbox_p); 3968 if (status != NXGE_OK) { 3969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3970 "==> nxge_rxdma_start_channel: " 3971 "init fzc rxdma failed (0x%08x channel %d)", 3972 status, channel)); 3973 return (status); 3974 } 3975 3976 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3977 "==> nxge_rxdma_start_channel: fzc done")); 3978 3979 /* 3980 * Zero out the shadow and prefetch ram. 3981 */ 3982 3983 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3984 "ram done")); 3985 3986 /* Set up the interrupt event masks. */ 3987 ent_mask.value = 0; 3988 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 3989 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 3990 &ent_mask); 3991 if (rs != NPI_SUCCESS) { 3992 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3993 "==> nxge_rxdma_start_channel: " 3994 "init rxdma event masks failed (0x%08x channel %d)", 3995 status, channel)); 3996 return (NXGE_ERROR | rs); 3997 } 3998 3999 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4000 "event done: channel %d (mask 0x%016llx)", 4001 channel, ent_mask.value)); 4002 4003 /* Initialize the receive DMA control and status register */ 4004 cs.value = 0; 4005 cs.bits.hdw.mex = 1; 4006 cs.bits.hdw.rcrthres = 1; 4007 cs.bits.hdw.rcrto = 1; 4008 cs.bits.hdw.rbr_empty = 1; 4009 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 4010 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4011 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 4012 if (status != NXGE_OK) { 4013 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4014 "==> nxge_rxdma_start_channel: " 4015 "init rxdma control register failed (0x%08x channel %d", 4016 status, channel)); 4017 return (status); 4018 } 4019 4020 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4021 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4022 4023 /* 4024 * Load RXDMA descriptors, buffers, mailbox, 4025 * initialise the receive DMA channels and 4026 * enable each DMA channel. 4027 */ 4028 status = nxge_enable_rxdma_channel(nxgep, 4029 channel, rbr_p, rcr_p, mbox_p); 4030 4031 if (status != NXGE_OK) { 4032 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4033 " nxge_rxdma_start_channel: " 4034 " init enable rxdma failed (0x%08x channel %d)", 4035 status, channel)); 4036 return (status); 4037 } 4038 4039 ent_mask.value = 0; 4040 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 4041 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 4042 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4043 &ent_mask); 4044 if (rs != NPI_SUCCESS) { 4045 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4046 "==> nxge_rxdma_start_channel: " 4047 "init rxdma event masks failed (0x%08x channel %d)", 4048 status, channel)); 4049 return (NXGE_ERROR | rs); 4050 } 4051 4052 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4053 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4054 4055 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4056 "==> nxge_rxdma_start_channel: enable done")); 4057 4058 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4059 4060 return (NXGE_OK); 4061 } 4062 4063 static nxge_status_t 4064 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4065 { 4066 npi_handle_t handle; 4067 npi_status_t rs = NPI_SUCCESS; 4068 rx_dma_ctl_stat_t cs; 4069 rx_dma_ent_msk_t ent_mask; 4070 nxge_status_t status = NXGE_OK; 4071 4072 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4073 4074 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4075 4076 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4077 "npi handle addr $%p acc $%p", 4078 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4079 4080 /* Reset RXDMA channel */ 4081 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4082 if (rs != NPI_SUCCESS) { 4083 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4084 " nxge_rxdma_stop_channel: " 4085 " reset rxdma failed (0x%08x channel %d)", 4086 rs, channel)); 4087 return (NXGE_ERROR | rs); 4088 } 4089 4090 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4091 "==> nxge_rxdma_stop_channel: reset done")); 4092 4093 /* Set up the interrupt event masks. */ 4094 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4095 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4096 &ent_mask); 4097 if (rs != NPI_SUCCESS) { 4098 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4099 "==> nxge_rxdma_stop_channel: " 4100 "set rxdma event masks failed (0x%08x channel %d)", 4101 rs, channel)); 4102 return (NXGE_ERROR | rs); 4103 } 4104 4105 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4106 "==> nxge_rxdma_stop_channel: event done")); 4107 4108 /* Initialize the receive DMA control and status register */ 4109 cs.value = 0; 4110 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4111 &cs); 4112 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4113 " to default (all 0s) 0x%08x", cs.value)); 4114 if (status != NXGE_OK) { 4115 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4116 " nxge_rxdma_stop_channel: init rxdma" 4117 " control register failed (0x%08x channel %d", 4118 status, channel)); 4119 return (status); 4120 } 4121 4122 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4123 "==> nxge_rxdma_stop_channel: control done")); 4124 4125 /* disable dma channel */ 4126 status = nxge_disable_rxdma_channel(nxgep, channel); 4127 4128 if (status != NXGE_OK) { 4129 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4130 " nxge_rxdma_stop_channel: " 4131 " init enable rxdma failed (0x%08x channel %d)", 4132 status, channel)); 4133 return (status); 4134 } 4135 4136 NXGE_DEBUG_MSG((nxgep, 4137 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4138 4139 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4140 4141 return (NXGE_OK); 4142 } 4143 4144 nxge_status_t 4145 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4146 { 4147 npi_handle_t handle; 4148 p_nxge_rdc_sys_stats_t statsp; 4149 rx_ctl_dat_fifo_stat_t stat; 4150 uint32_t zcp_err_status; 4151 uint32_t ipp_err_status; 4152 nxge_status_t status = NXGE_OK; 4153 npi_status_t rs = NPI_SUCCESS; 4154 boolean_t my_err = B_FALSE; 4155 4156 handle = nxgep->npi_handle; 4157 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4158 4159 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4160 4161 if (rs != NPI_SUCCESS) 4162 return (NXGE_ERROR | rs); 4163 4164 if (stat.bits.ldw.id_mismatch) { 4165 statsp->id_mismatch++; 4166 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4167 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4168 /* Global fatal error encountered */ 4169 } 4170 4171 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4172 switch (nxgep->mac.portnum) { 4173 case 0: 4174 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4175 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4176 my_err = B_TRUE; 4177 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4178 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4179 } 4180 break; 4181 case 1: 4182 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4183 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4184 my_err = B_TRUE; 4185 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4186 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4187 } 4188 break; 4189 case 2: 4190 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4191 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4192 my_err = B_TRUE; 4193 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4194 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4195 } 4196 break; 4197 case 3: 4198 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4199 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4200 my_err = B_TRUE; 4201 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4202 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4203 } 4204 break; 4205 default: 4206 return (NXGE_ERROR); 4207 } 4208 } 4209 4210 if (my_err) { 4211 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4212 zcp_err_status); 4213 if (status != NXGE_OK) 4214 return (status); 4215 } 4216 4217 return (NXGE_OK); 4218 } 4219 4220 static nxge_status_t 4221 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4222 uint32_t zcp_status) 4223 { 4224 boolean_t rxport_fatal = B_FALSE; 4225 p_nxge_rdc_sys_stats_t statsp; 4226 nxge_status_t status = NXGE_OK; 4227 uint8_t portn; 4228 4229 portn = nxgep->mac.portnum; 4230 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4231 4232 if (ipp_status & (0x1 << portn)) { 4233 statsp->ipp_eop_err++; 4234 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4235 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4236 rxport_fatal = B_TRUE; 4237 } 4238 4239 if (zcp_status & (0x1 << portn)) { 4240 statsp->zcp_eop_err++; 4241 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4242 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4243 rxport_fatal = B_TRUE; 4244 } 4245 4246 if (rxport_fatal) { 4247 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4248 " nxge_rxdma_handle_port_error: " 4249 " fatal error on Port #%d\n", 4250 portn)); 4251 status = nxge_rx_port_fatal_err_recover(nxgep); 4252 if (status == NXGE_OK) { 4253 FM_SERVICE_RESTORED(nxgep); 4254 } 4255 } 4256 4257 return (status); 4258 } 4259 4260 static nxge_status_t 4261 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4262 { 4263 npi_handle_t handle; 4264 npi_status_t rs = NPI_SUCCESS; 4265 nxge_status_t status = NXGE_OK; 4266 p_rx_rbr_ring_t rbrp; 4267 p_rx_rcr_ring_t rcrp; 4268 p_rx_mbox_t mboxp; 4269 rx_dma_ent_msk_t ent_mask; 4270 p_nxge_dma_common_t dmap; 4271 int ring_idx; 4272 uint32_t ref_cnt; 4273 p_rx_msg_t rx_msg_p; 4274 int i; 4275 uint32_t nxge_port_rcr_size; 4276 4277 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4278 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4279 "Recovering from RxDMAChannel#%d error...", channel)); 4280 4281 /* 4282 * Stop the dma channel waits for the stop done. 4283 * If the stop done bit is not set, then create 4284 * an error. 4285 */ 4286 4287 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4288 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4289 4290 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4291 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4292 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4293 4294 MUTEX_ENTER(&rcrp->lock); 4295 MUTEX_ENTER(&rbrp->lock); 4296 MUTEX_ENTER(&rbrp->post_lock); 4297 4298 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4299 4300 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4301 if (rs != NPI_SUCCESS) { 4302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4303 "nxge_disable_rxdma_channel:failed")); 4304 goto fail; 4305 } 4306 4307 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4308 4309 /* Disable interrupt */ 4310 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4311 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4312 if (rs != NPI_SUCCESS) { 4313 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4314 "nxge_rxdma_stop_channel: " 4315 "set rxdma event masks failed (channel %d)", 4316 channel)); 4317 } 4318 4319 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4320 4321 /* Reset RXDMA channel */ 4322 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4323 if (rs != NPI_SUCCESS) { 4324 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4325 "nxge_rxdma_fatal_err_recover: " 4326 " reset rxdma failed (channel %d)", channel)); 4327 goto fail; 4328 } 4329 4330 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4331 4332 mboxp = 4333 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4334 4335 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4336 rbrp->rbr_rd_index = 0; 4337 4338 rcrp->comp_rd_index = 0; 4339 rcrp->comp_wt_index = 0; 4340 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4341 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4342 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4343 #if defined(__i386) 4344 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4345 #else 4346 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4347 #endif 4348 4349 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4350 (nxge_port_rcr_size - 1); 4351 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4352 (nxge_port_rcr_size - 1); 4353 4354 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4355 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4356 4357 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4358 4359 for (i = 0; i < rbrp->rbr_max_size; i++) { 4360 rx_msg_p = rbrp->rx_msg_ring[i]; 4361 ref_cnt = rx_msg_p->ref_cnt; 4362 if (ref_cnt != 1) { 4363 if (rx_msg_p->cur_usage_cnt != 4364 rx_msg_p->max_usage_cnt) { 4365 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4366 "buf[%d]: cur_usage_cnt = %d " 4367 "max_usage_cnt = %d\n", i, 4368 rx_msg_p->cur_usage_cnt, 4369 rx_msg_p->max_usage_cnt)); 4370 } else { 4371 /* Buffer can be re-posted */ 4372 rx_msg_p->free = B_TRUE; 4373 rx_msg_p->cur_usage_cnt = 0; 4374 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4375 rx_msg_p->pkt_buf_size = 0; 4376 } 4377 } 4378 } 4379 4380 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4381 4382 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4383 if (status != NXGE_OK) { 4384 goto fail; 4385 } 4386 4387 MUTEX_EXIT(&rbrp->post_lock); 4388 MUTEX_EXIT(&rbrp->lock); 4389 MUTEX_EXIT(&rcrp->lock); 4390 4391 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4392 "Recovery Successful, RxDMAChannel#%d Restored", 4393 channel)); 4394 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4395 4396 return (NXGE_OK); 4397 fail: 4398 MUTEX_EXIT(&rbrp->post_lock); 4399 MUTEX_EXIT(&rbrp->lock); 4400 MUTEX_EXIT(&rcrp->lock); 4401 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4402 4403 return (NXGE_ERROR | rs); 4404 } 4405 4406 nxge_status_t 4407 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4408 { 4409 nxge_status_t status = NXGE_OK; 4410 p_nxge_dma_common_t *dma_buf_p; 4411 uint16_t channel; 4412 int ndmas; 4413 int i; 4414 4415 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4416 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4417 "Recovering from RxPort error...")); 4418 /* Disable RxMAC */ 4419 4420 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n")); 4421 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4422 goto fail; 4423 4424 NXGE_DELAY(1000); 4425 4426 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels...")); 4427 4428 ndmas = nxgep->rx_buf_pool_p->ndmas; 4429 dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p; 4430 4431 for (i = 0; i < ndmas; i++) { 4432 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 4433 if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) { 4434 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4435 "Could not recover channel %d", 4436 channel)); 4437 } 4438 } 4439 4440 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP...")); 4441 4442 /* Reset IPP */ 4443 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4444 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4445 "nxge_rx_port_fatal_err_recover: " 4446 "Failed to reset IPP")); 4447 goto fail; 4448 } 4449 4450 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4451 4452 /* Reset RxMAC */ 4453 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4454 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4455 "nxge_rx_port_fatal_err_recover: " 4456 "Failed to reset RxMAC")); 4457 goto fail; 4458 } 4459 4460 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4461 4462 /* Re-Initialize IPP */ 4463 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4464 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4465 "nxge_rx_port_fatal_err_recover: " 4466 "Failed to init IPP")); 4467 goto fail; 4468 } 4469 4470 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4471 4472 /* Re-Initialize RxMAC */ 4473 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4474 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4475 "nxge_rx_port_fatal_err_recover: " 4476 "Failed to reset RxMAC")); 4477 goto fail; 4478 } 4479 4480 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4481 4482 /* Re-enable RxMAC */ 4483 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4484 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4485 "nxge_rx_port_fatal_err_recover: " 4486 "Failed to enable RxMAC")); 4487 goto fail; 4488 } 4489 4490 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4491 "Recovery Successful, RxPort Restored")); 4492 4493 return (NXGE_OK); 4494 fail: 4495 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4496 return (status); 4497 } 4498 4499 void 4500 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4501 { 4502 rx_dma_ctl_stat_t cs; 4503 rx_ctl_dat_fifo_stat_t cdfs; 4504 4505 switch (err_id) { 4506 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4507 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4508 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4509 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4510 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4511 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4512 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4513 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4514 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4515 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4516 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4517 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4518 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4519 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4520 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4521 chan, &cs.value); 4522 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4523 cs.bits.hdw.rcr_ack_err = 1; 4524 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4525 cs.bits.hdw.dc_fifo_err = 1; 4526 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4527 cs.bits.hdw.rcr_sha_par = 1; 4528 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4529 cs.bits.hdw.rbr_pre_par = 1; 4530 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4531 cs.bits.hdw.rbr_tmout = 1; 4532 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4533 cs.bits.hdw.rsp_cnt_err = 1; 4534 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4535 cs.bits.hdw.byte_en_bus = 1; 4536 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4537 cs.bits.hdw.rsp_dat_err = 1; 4538 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4539 cs.bits.hdw.config_err = 1; 4540 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4541 cs.bits.hdw.rcrincon = 1; 4542 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4543 cs.bits.hdw.rcrfull = 1; 4544 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4545 cs.bits.hdw.rbrfull = 1; 4546 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4547 cs.bits.hdw.rbrlogpage = 1; 4548 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4549 cs.bits.hdw.cfiglogpage = 1; 4550 #if defined(__i386) 4551 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4552 cs.value); 4553 #else 4554 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4555 cs.value); 4556 #endif 4557 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4558 chan, cs.value); 4559 break; 4560 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4561 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4562 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4563 cdfs.value = 0; 4564 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4565 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4566 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4567 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4568 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4569 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4570 #if defined(__i386) 4571 cmn_err(CE_NOTE, 4572 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4573 cdfs.value); 4574 #else 4575 cmn_err(CE_NOTE, 4576 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4577 cdfs.value); 4578 #endif 4579 RXDMA_REG_WRITE64(nxgep->npi_handle, 4580 RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value); 4581 break; 4582 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4583 break; 4584 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4585 break; 4586 } 4587 } 4588 4589 4590 static uint16_t 4591 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb) 4592 { 4593 uint16_t sz = RBR_BKSIZE_8K_BYTES; 4594 4595 switch (bufsz_type) { 4596 case RCR_PKTBUFSZ_0: 4597 switch (rbr_cfgb.bits.ldw.bufsz0) { 4598 case RBR_BUFSZ0_256B: 4599 sz = RBR_BUFSZ0_256_BYTES; 4600 break; 4601 case RBR_BUFSZ0_512B: 4602 sz = RBR_BUFSZ0_512B_BYTES; 4603 break; 4604 case RBR_BUFSZ0_1K: 4605 sz = RBR_BUFSZ0_1K_BYTES; 4606 break; 4607 case RBR_BUFSZ0_2K: 4608 sz = RBR_BUFSZ0_2K_BYTES; 4609 break; 4610 default: 4611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4612 "nxge_get_pktbug_size: bad bufsz0")); 4613 break; 4614 } 4615 break; 4616 case RCR_PKTBUFSZ_1: 4617 switch (rbr_cfgb.bits.ldw.bufsz1) { 4618 case RBR_BUFSZ1_1K: 4619 sz = RBR_BUFSZ1_1K_BYTES; 4620 break; 4621 case RBR_BUFSZ1_2K: 4622 sz = RBR_BUFSZ1_2K_BYTES; 4623 break; 4624 case RBR_BUFSZ1_4K: 4625 sz = RBR_BUFSZ1_4K_BYTES; 4626 break; 4627 case RBR_BUFSZ1_8K: 4628 sz = RBR_BUFSZ1_8K_BYTES; 4629 break; 4630 default: 4631 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4632 "nxge_get_pktbug_size: bad bufsz1")); 4633 break; 4634 } 4635 break; 4636 case RCR_PKTBUFSZ_2: 4637 switch (rbr_cfgb.bits.ldw.bufsz2) { 4638 case RBR_BUFSZ2_2K: 4639 sz = RBR_BUFSZ2_2K_BYTES; 4640 break; 4641 case RBR_BUFSZ2_4K: 4642 sz = RBR_BUFSZ2_4K_BYTES; 4643 break; 4644 case RBR_BUFSZ2_8K: 4645 sz = RBR_BUFSZ2_8K_BYTES; 4646 break; 4647 case RBR_BUFSZ2_16K: 4648 sz = RBR_BUFSZ2_16K_BYTES; 4649 break; 4650 default: 4651 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4652 "nxge_get_pktbug_size: bad bufsz2")); 4653 break; 4654 } 4655 break; 4656 case RCR_SINGLE_BLOCK: 4657 switch (rbr_cfgb.bits.ldw.bksize) { 4658 case BKSIZE_4K: 4659 sz = RBR_BKSIZE_4K_BYTES; 4660 break; 4661 case BKSIZE_8K: 4662 sz = RBR_BKSIZE_8K_BYTES; 4663 break; 4664 case BKSIZE_16K: 4665 sz = RBR_BKSIZE_16K_BYTES; 4666 break; 4667 case BKSIZE_32K: 4668 sz = RBR_BKSIZE_32K_BYTES; 4669 break; 4670 default: 4671 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4672 "nxge_get_pktbug_size: bad bksize")); 4673 break; 4674 } 4675 break; 4676 default: 4677 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4678 "nxge_get_pktbug_size: bad bufsz_type")); 4679 break; 4680 } 4681 return (sz); 4682 } 4683