1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_rxdma.h> 30 31 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 32 (rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid) 33 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 34 (rdc + nxgep->pt_config.hw_config.start_rdc) 35 36 /* 37 * Globals: tunable parameters (/etc/system or adb) 38 * 39 */ 40 extern uint32_t nxge_rbr_size; 41 extern uint32_t nxge_rcr_size; 42 extern uint32_t nxge_rbr_spare_size; 43 44 extern uint32_t nxge_mblks_pending; 45 46 /* 47 * Tunable to reduce the amount of time spent in the 48 * ISR doing Rx Processing. 49 */ 50 extern uint32_t nxge_max_rx_pkts; 51 boolean_t nxge_jumbo_enable; 52 53 /* 54 * Tunables to manage the receive buffer blocks. 55 * 56 * nxge_rx_threshold_hi: copy all buffers. 57 * nxge_rx_bcopy_size_type: receive buffer block size type. 58 * nxge_rx_threshold_lo: copy only up to tunable block size type. 59 */ 60 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 61 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 62 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 63 64 static nxge_status_t nxge_map_rxdma(p_nxge_t); 65 static void nxge_unmap_rxdma(p_nxge_t); 66 67 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 68 static void nxge_rxdma_hw_stop_common(p_nxge_t); 69 70 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t); 71 static void nxge_rxdma_hw_stop(p_nxge_t); 72 73 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 75 uint32_t, 76 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 77 p_rx_mbox_t *); 78 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 79 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 80 81 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 82 uint16_t, 83 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 84 p_rx_rcr_ring_t *, p_rx_mbox_t *); 85 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 86 p_rx_rcr_ring_t, p_rx_mbox_t); 87 88 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 89 uint16_t, 90 p_nxge_dma_common_t *, 91 p_rx_rbr_ring_t *, uint32_t); 92 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 93 p_rx_rbr_ring_t); 94 95 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 96 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 97 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 98 99 mblk_t * 100 nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t, 101 p_rx_rcr_ring_t *, rx_dma_ctl_stat_t); 102 103 static void nxge_receive_packet(p_nxge_t, 104 p_rx_rcr_ring_t, 105 p_rcr_entry_t, 106 boolean_t *, 107 mblk_t **, mblk_t **); 108 109 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 110 111 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 112 static void nxge_freeb(p_rx_msg_t); 113 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, 114 p_nxge_ldv_t, rx_dma_ctl_stat_t); 115 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t, 116 p_nxge_ldv_t, rx_dma_ctl_stat_t); 117 118 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 119 uint32_t, uint32_t); 120 121 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 122 p_rx_rbr_ring_t); 123 124 125 static nxge_status_t 126 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 127 128 nxge_status_t 129 nxge_rx_port_fatal_err_recover(p_nxge_t); 130 131 static uint16_t 132 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb); 133 134 nxge_status_t 135 nxge_init_rxdma_channels(p_nxge_t nxgep) 136 { 137 nxge_status_t status = NXGE_OK; 138 139 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 140 141 status = nxge_map_rxdma(nxgep); 142 if (status != NXGE_OK) { 143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 144 "<== nxge_init_rxdma: status 0x%x", status)); 145 return (status); 146 } 147 148 status = nxge_rxdma_hw_start_common(nxgep); 149 if (status != NXGE_OK) { 150 nxge_unmap_rxdma(nxgep); 151 } 152 153 status = nxge_rxdma_hw_start(nxgep); 154 if (status != NXGE_OK) { 155 nxge_unmap_rxdma(nxgep); 156 } 157 158 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 159 "<== nxge_init_rxdma_channels: status 0x%x", status)); 160 161 return (status); 162 } 163 164 void 165 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 166 { 167 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 168 169 nxge_rxdma_hw_stop(nxgep); 170 nxge_rxdma_hw_stop_common(nxgep); 171 nxge_unmap_rxdma(nxgep); 172 173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 174 "<== nxge_uinit_rxdma_channels")); 175 } 176 177 nxge_status_t 178 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 179 { 180 npi_handle_t handle; 181 npi_status_t rs = NPI_SUCCESS; 182 nxge_status_t status = NXGE_OK; 183 184 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 185 186 handle = NXGE_DEV_NPI_HANDLE(nxgep); 187 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 188 189 if (rs != NPI_SUCCESS) { 190 status = NXGE_ERROR | rs; 191 } 192 193 return (status); 194 } 195 196 void 197 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 198 { 199 int i, ndmas; 200 uint16_t channel; 201 p_rx_rbr_rings_t rx_rbr_rings; 202 p_rx_rbr_ring_t *rbr_rings; 203 npi_handle_t handle; 204 205 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 206 207 handle = NXGE_DEV_NPI_HANDLE(nxgep); 208 (void) npi_rxdma_dump_fzc_regs(handle); 209 210 rx_rbr_rings = nxgep->rx_rbr_rings; 211 if (rx_rbr_rings == NULL) { 212 NXGE_DEBUG_MSG((nxgep, RX_CTL, 213 "<== nxge_rxdma_regs_dump_channels: " 214 "NULL ring pointer")); 215 return; 216 } 217 if (rx_rbr_rings->rbr_rings == NULL) { 218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 219 "<== nxge_rxdma_regs_dump_channels: " 220 " NULL rbr rings pointer")); 221 return; 222 } 223 224 ndmas = rx_rbr_rings->ndmas; 225 if (!ndmas) { 226 NXGE_DEBUG_MSG((nxgep, RX_CTL, 227 "<== nxge_rxdma_regs_dump_channels: no channel")); 228 return; 229 } 230 231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 232 "==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas)); 233 234 rbr_rings = rx_rbr_rings->rbr_rings; 235 for (i = 0; i < ndmas; i++) { 236 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 237 continue; 238 } 239 channel = rbr_rings[i]->rdc; 240 (void) nxge_dump_rxdma_channel(nxgep, channel); 241 } 242 243 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 244 245 } 246 247 nxge_status_t 248 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 249 { 250 npi_handle_t handle; 251 npi_status_t rs = NPI_SUCCESS; 252 nxge_status_t status = NXGE_OK; 253 254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 255 256 handle = NXGE_DEV_NPI_HANDLE(nxgep); 257 rs = npi_rxdma_dump_rdc_regs(handle, channel); 258 259 if (rs != NPI_SUCCESS) { 260 status = NXGE_ERROR | rs; 261 } 262 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 263 return (status); 264 } 265 266 nxge_status_t 267 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 268 p_rx_dma_ent_msk_t mask_p) 269 { 270 npi_handle_t handle; 271 npi_status_t rs = NPI_SUCCESS; 272 nxge_status_t status = NXGE_OK; 273 274 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 275 "<== nxge_init_rxdma_channel_event_mask")); 276 277 handle = NXGE_DEV_NPI_HANDLE(nxgep); 278 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 279 if (rs != NPI_SUCCESS) { 280 status = NXGE_ERROR | rs; 281 } 282 283 return (status); 284 } 285 286 nxge_status_t 287 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 288 p_rx_dma_ctl_stat_t cs_p) 289 { 290 npi_handle_t handle; 291 npi_status_t rs = NPI_SUCCESS; 292 nxge_status_t status = NXGE_OK; 293 294 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 295 "<== nxge_init_rxdma_channel_cntl_stat")); 296 297 handle = NXGE_DEV_NPI_HANDLE(nxgep); 298 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 299 300 if (rs != NPI_SUCCESS) { 301 status = NXGE_ERROR | rs; 302 } 303 304 return (status); 305 } 306 307 nxge_status_t 308 nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp, 309 uint8_t rdc) 310 { 311 npi_handle_t handle; 312 npi_status_t rs = NPI_SUCCESS; 313 p_nxge_dma_pt_cfg_t p_dma_cfgp; 314 p_nxge_rdc_grp_t rdc_grp_p; 315 uint8_t actual_rdcgrp, actual_rdc; 316 317 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 318 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 319 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 320 321 handle = NXGE_DEV_NPI_HANDLE(nxgep); 322 323 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 324 rdc_grp_p->rdc[0] = rdc; 325 326 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 327 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 328 329 rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp, 330 actual_rdc); 331 332 if (rs != NPI_SUCCESS) { 333 return (NXGE_ERROR | rs); 334 } 335 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 336 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 337 return (NXGE_OK); 338 } 339 340 nxge_status_t 341 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 342 { 343 npi_handle_t handle; 344 345 uint8_t actual_rdc; 346 npi_status_t rs = NPI_SUCCESS; 347 348 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 349 " ==> nxge_rxdma_cfg_port_default_rdc")); 350 351 handle = NXGE_DEV_NPI_HANDLE(nxgep); 352 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 353 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 354 355 356 if (rs != NPI_SUCCESS) { 357 return (NXGE_ERROR | rs); 358 } 359 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 360 " <== nxge_rxdma_cfg_port_default_rdc")); 361 362 return (NXGE_OK); 363 } 364 365 nxge_status_t 366 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 367 uint16_t pkts) 368 { 369 npi_status_t rs = NPI_SUCCESS; 370 npi_handle_t handle; 371 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 372 " ==> nxge_rxdma_cfg_rcr_threshold")); 373 handle = NXGE_DEV_NPI_HANDLE(nxgep); 374 375 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 376 377 if (rs != NPI_SUCCESS) { 378 return (NXGE_ERROR | rs); 379 } 380 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 381 return (NXGE_OK); 382 } 383 384 nxge_status_t 385 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 386 uint16_t tout, uint8_t enable) 387 { 388 npi_status_t rs = NPI_SUCCESS; 389 npi_handle_t handle; 390 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 391 handle = NXGE_DEV_NPI_HANDLE(nxgep); 392 if (enable == 0) { 393 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 394 } else { 395 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 396 tout); 397 } 398 399 if (rs != NPI_SUCCESS) { 400 return (NXGE_ERROR | rs); 401 } 402 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 403 return (NXGE_OK); 404 } 405 406 nxge_status_t 407 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 408 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 409 { 410 npi_handle_t handle; 411 rdc_desc_cfg_t rdc_desc; 412 p_rcrcfig_b_t cfgb_p; 413 npi_status_t rs = NPI_SUCCESS; 414 415 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 416 handle = NXGE_DEV_NPI_HANDLE(nxgep); 417 /* 418 * Use configuration data composed at init time. 419 * Write to hardware the receive ring configurations. 420 */ 421 rdc_desc.mbox_enable = 1; 422 rdc_desc.mbox_addr = mbox_p->mbox_addr; 423 NXGE_DEBUG_MSG((nxgep, RX_CTL, 424 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 425 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 426 427 rdc_desc.rbr_len = rbr_p->rbb_max; 428 rdc_desc.rbr_addr = rbr_p->rbr_addr; 429 430 switch (nxgep->rx_bksize_code) { 431 case RBR_BKSIZE_4K: 432 rdc_desc.page_size = SIZE_4KB; 433 break; 434 case RBR_BKSIZE_8K: 435 rdc_desc.page_size = SIZE_8KB; 436 break; 437 case RBR_BKSIZE_16K: 438 rdc_desc.page_size = SIZE_16KB; 439 break; 440 case RBR_BKSIZE_32K: 441 rdc_desc.page_size = SIZE_32KB; 442 break; 443 } 444 445 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 446 rdc_desc.valid0 = 1; 447 448 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 449 rdc_desc.valid1 = 1; 450 451 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 452 rdc_desc.valid2 = 1; 453 454 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 455 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 456 457 rdc_desc.rcr_len = rcr_p->comp_size; 458 rdc_desc.rcr_addr = rcr_p->rcr_addr; 459 460 cfgb_p = &(rcr_p->rcr_cfgb); 461 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 462 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 463 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 464 465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 466 "rbr_len qlen %d pagesize code %d rcr_len %d", 467 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 468 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 469 "size 0 %d size 1 %d size 2 %d", 470 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 471 rbr_p->npi_pkt_buf_size2)); 472 473 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 474 if (rs != NPI_SUCCESS) { 475 return (NXGE_ERROR | rs); 476 } 477 478 /* 479 * Enable the timeout and threshold. 480 */ 481 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 482 rdc_desc.rcr_threshold); 483 if (rs != NPI_SUCCESS) { 484 return (NXGE_ERROR | rs); 485 } 486 487 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 488 rdc_desc.rcr_timeout); 489 if (rs != NPI_SUCCESS) { 490 return (NXGE_ERROR | rs); 491 } 492 493 /* Enable the DMA */ 494 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 495 if (rs != NPI_SUCCESS) { 496 return (NXGE_ERROR | rs); 497 } 498 499 /* Kick the DMA engine. */ 500 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 501 /* Clear the rbr empty bit */ 502 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 503 504 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 505 506 return (NXGE_OK); 507 } 508 509 nxge_status_t 510 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 511 { 512 npi_handle_t handle; 513 npi_status_t rs = NPI_SUCCESS; 514 515 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 516 handle = NXGE_DEV_NPI_HANDLE(nxgep); 517 518 /* disable the DMA */ 519 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 520 if (rs != NPI_SUCCESS) { 521 NXGE_DEBUG_MSG((nxgep, RX_CTL, 522 "<== nxge_disable_rxdma_channel:failed (0x%x)", 523 rs)); 524 return (NXGE_ERROR | rs); 525 } 526 527 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 528 return (NXGE_OK); 529 } 530 531 nxge_status_t 532 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 533 { 534 npi_handle_t handle; 535 nxge_status_t status = NXGE_OK; 536 537 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 538 "<== nxge_init_rxdma_channel_rcrflush")); 539 540 handle = NXGE_DEV_NPI_HANDLE(nxgep); 541 npi_rxdma_rdc_rcr_flush(handle, channel); 542 543 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 544 "<== nxge_init_rxdma_channel_rcrflsh")); 545 return (status); 546 547 } 548 549 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 550 551 #define TO_LEFT -1 552 #define TO_RIGHT 1 553 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 554 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 555 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 556 #define NO_HINT 0xffffffff 557 558 /*ARGSUSED*/ 559 nxge_status_t 560 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 561 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 562 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 563 { 564 int bufsize; 565 uint64_t pktbuf_pp; 566 uint64_t dvma_addr; 567 rxring_info_t *ring_info; 568 int base_side, end_side; 569 int r_index, l_index, anchor_index; 570 int found, search_done; 571 uint32_t offset, chunk_size, block_size, page_size_mask; 572 uint32_t chunk_index, block_index, total_index; 573 int max_iterations, iteration; 574 rxbuf_index_info_t *bufinfo; 575 576 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 577 578 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 579 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 580 pkt_buf_addr_pp, 581 pktbufsz_type)); 582 #if defined(__i386) 583 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 584 #else 585 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 586 #endif 587 588 switch (pktbufsz_type) { 589 case 0: 590 bufsize = rbr_p->pkt_buf_size0; 591 break; 592 case 1: 593 bufsize = rbr_p->pkt_buf_size1; 594 break; 595 case 2: 596 bufsize = rbr_p->pkt_buf_size2; 597 break; 598 case RCR_SINGLE_BLOCK: 599 bufsize = 0; 600 anchor_index = 0; 601 break; 602 default: 603 return (NXGE_ERROR); 604 } 605 606 if (rbr_p->num_blocks == 1) { 607 anchor_index = 0; 608 ring_info = rbr_p->ring_info; 609 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 610 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 611 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 612 "buf_pp $%p btype %d anchor_index %d " 613 "bufinfo $%p", 614 pkt_buf_addr_pp, 615 pktbufsz_type, 616 anchor_index, 617 bufinfo)); 618 619 goto found_index; 620 } 621 622 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 623 "==> nxge_rxbuf_pp_to_vp: " 624 "buf_pp $%p btype %d anchor_index %d", 625 pkt_buf_addr_pp, 626 pktbufsz_type, 627 anchor_index)); 628 629 ring_info = rbr_p->ring_info; 630 found = B_FALSE; 631 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 632 iteration = 0; 633 max_iterations = ring_info->max_iterations; 634 /* 635 * First check if this block has been seen 636 * recently. This is indicated by a hint which 637 * is initialized when the first buffer of the block 638 * is seen. The hint is reset when the last buffer of 639 * the block has been processed. 640 * As three block sizes are supported, three hints 641 * are kept. The idea behind the hints is that once 642 * the hardware uses a block for a buffer of that 643 * size, it will use it exclusively for that size 644 * and will use it until it is exhausted. It is assumed 645 * that there would a single block being used for the same 646 * buffer sizes at any given time. 647 */ 648 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 649 anchor_index = ring_info->hint[pktbufsz_type]; 650 dvma_addr = bufinfo[anchor_index].dvma_addr; 651 chunk_size = bufinfo[anchor_index].buf_size; 652 if ((pktbuf_pp >= dvma_addr) && 653 (pktbuf_pp < (dvma_addr + chunk_size))) { 654 found = B_TRUE; 655 /* 656 * check if this is the last buffer in the block 657 * If so, then reset the hint for the size; 658 */ 659 660 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 661 ring_info->hint[pktbufsz_type] = NO_HINT; 662 } 663 } 664 665 if (found == B_FALSE) { 666 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 667 "==> nxge_rxbuf_pp_to_vp: (!found)" 668 "buf_pp $%p btype %d anchor_index %d", 669 pkt_buf_addr_pp, 670 pktbufsz_type, 671 anchor_index)); 672 673 /* 674 * This is the first buffer of the block of this 675 * size. Need to search the whole information 676 * array. 677 * the search algorithm uses a binary tree search 678 * algorithm. It assumes that the information is 679 * already sorted with increasing order 680 * info[0] < info[1] < info[2] .... < info[n-1] 681 * where n is the size of the information array 682 */ 683 r_index = rbr_p->num_blocks - 1; 684 l_index = 0; 685 search_done = B_FALSE; 686 anchor_index = MID_INDEX(r_index, l_index); 687 while (search_done == B_FALSE) { 688 if ((r_index == l_index) || 689 (iteration >= max_iterations)) 690 search_done = B_TRUE; 691 end_side = TO_RIGHT; /* to the right */ 692 base_side = TO_LEFT; /* to the left */ 693 /* read the DVMA address information and sort it */ 694 dvma_addr = bufinfo[anchor_index].dvma_addr; 695 chunk_size = bufinfo[anchor_index].buf_size; 696 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 697 "==> nxge_rxbuf_pp_to_vp: (searching)" 698 "buf_pp $%p btype %d " 699 "anchor_index %d chunk_size %d dvmaaddr $%p", 700 pkt_buf_addr_pp, 701 pktbufsz_type, 702 anchor_index, 703 chunk_size, 704 dvma_addr)); 705 706 if (pktbuf_pp >= dvma_addr) 707 base_side = TO_RIGHT; /* to the right */ 708 if (pktbuf_pp < (dvma_addr + chunk_size)) 709 end_side = TO_LEFT; /* to the left */ 710 711 switch (base_side + end_side) { 712 case IN_MIDDLE: 713 /* found */ 714 found = B_TRUE; 715 search_done = B_TRUE; 716 if ((pktbuf_pp + bufsize) < 717 (dvma_addr + chunk_size)) 718 ring_info->hint[pktbufsz_type] = 719 bufinfo[anchor_index].buf_index; 720 break; 721 case BOTH_RIGHT: 722 /* not found: go to the right */ 723 l_index = anchor_index + 1; 724 anchor_index = 725 MID_INDEX(r_index, l_index); 726 break; 727 728 case BOTH_LEFT: 729 /* not found: go to the left */ 730 r_index = anchor_index - 1; 731 anchor_index = MID_INDEX(r_index, 732 l_index); 733 break; 734 default: /* should not come here */ 735 return (NXGE_ERROR); 736 } 737 iteration++; 738 } 739 740 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 741 "==> nxge_rxbuf_pp_to_vp: (search done)" 742 "buf_pp $%p btype %d anchor_index %d", 743 pkt_buf_addr_pp, 744 pktbufsz_type, 745 anchor_index)); 746 } 747 748 if (found == B_FALSE) { 749 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 750 "==> nxge_rxbuf_pp_to_vp: (search failed)" 751 "buf_pp $%p btype %d anchor_index %d", 752 pkt_buf_addr_pp, 753 pktbufsz_type, 754 anchor_index)); 755 return (NXGE_ERROR); 756 } 757 758 found_index: 759 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 760 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 761 "buf_pp $%p btype %d bufsize %d anchor_index %d", 762 pkt_buf_addr_pp, 763 pktbufsz_type, 764 bufsize, 765 anchor_index)); 766 767 /* index of the first block in this chunk */ 768 chunk_index = bufinfo[anchor_index].start_index; 769 dvma_addr = bufinfo[anchor_index].dvma_addr; 770 page_size_mask = ring_info->block_size_mask; 771 772 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 773 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 774 "buf_pp $%p btype %d bufsize %d " 775 "anchor_index %d chunk_index %d dvma $%p", 776 pkt_buf_addr_pp, 777 pktbufsz_type, 778 bufsize, 779 anchor_index, 780 chunk_index, 781 dvma_addr)); 782 783 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 784 block_size = rbr_p->block_size; /* System block(page) size */ 785 786 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 787 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 788 "buf_pp $%p btype %d bufsize %d " 789 "anchor_index %d chunk_index %d dvma $%p " 790 "offset %d block_size %d", 791 pkt_buf_addr_pp, 792 pktbufsz_type, 793 bufsize, 794 anchor_index, 795 chunk_index, 796 dvma_addr, 797 offset, 798 block_size)); 799 800 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 801 802 block_index = (offset / block_size); /* index within chunk */ 803 total_index = chunk_index + block_index; 804 805 806 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 807 "==> nxge_rxbuf_pp_to_vp: " 808 "total_index %d dvma_addr $%p " 809 "offset %d block_size %d " 810 "block_index %d ", 811 total_index, dvma_addr, 812 offset, block_size, 813 block_index)); 814 #if defined(__i386) 815 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 816 (uint32_t)offset); 817 #else 818 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 819 (uint64_t)offset); 820 #endif 821 822 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 823 "==> nxge_rxbuf_pp_to_vp: " 824 "total_index %d dvma_addr $%p " 825 "offset %d block_size %d " 826 "block_index %d " 827 "*pkt_buf_addr_p $%p", 828 total_index, dvma_addr, 829 offset, block_size, 830 block_index, 831 *pkt_buf_addr_p)); 832 833 834 *msg_index = total_index; 835 *bufoffset = (offset & page_size_mask); 836 837 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 838 "==> nxge_rxbuf_pp_to_vp: get msg index: " 839 "msg_index %d bufoffset_index %d", 840 *msg_index, 841 *bufoffset)); 842 843 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 844 845 return (NXGE_OK); 846 } 847 848 /* 849 * used by quick sort (qsort) function 850 * to perform comparison 851 */ 852 static int 853 nxge_sort_compare(const void *p1, const void *p2) 854 { 855 856 rxbuf_index_info_t *a, *b; 857 858 a = (rxbuf_index_info_t *)p1; 859 b = (rxbuf_index_info_t *)p2; 860 861 if (a->dvma_addr > b->dvma_addr) 862 return (1); 863 if (a->dvma_addr < b->dvma_addr) 864 return (-1); 865 return (0); 866 } 867 868 869 870 /* 871 * grabbed this sort implementation from common/syscall/avl.c 872 * 873 */ 874 /* 875 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 876 * v = Ptr to array/vector of objs 877 * n = # objs in the array 878 * s = size of each obj (must be multiples of a word size) 879 * f = ptr to function to compare two objs 880 * returns (-1 = less than, 0 = equal, 1 = greater than 881 */ 882 void 883 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 884 { 885 int g, i, j, ii; 886 unsigned int *p1, *p2; 887 unsigned int tmp; 888 889 /* No work to do */ 890 if (v == NULL || n <= 1) 891 return; 892 /* Sanity check on arguments */ 893 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 894 ASSERT(s > 0); 895 896 for (g = n / 2; g > 0; g /= 2) { 897 for (i = g; i < n; i++) { 898 for (j = i - g; j >= 0 && 899 (*f)(v + j * s, v + (j + g) * s) == 1; 900 j -= g) { 901 p1 = (unsigned *)(v + j * s); 902 p2 = (unsigned *)(v + (j + g) * s); 903 for (ii = 0; ii < s / 4; ii++) { 904 tmp = *p1; 905 *p1++ = *p2; 906 *p2++ = tmp; 907 } 908 } 909 } 910 } 911 } 912 913 /* 914 * Initialize data structures required for rxdma 915 * buffer dvma->vmem address lookup 916 */ 917 /*ARGSUSED*/ 918 static nxge_status_t 919 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 920 { 921 922 int index; 923 rxring_info_t *ring_info; 924 int max_iteration = 0, max_index = 0; 925 926 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 927 928 ring_info = rbrp->ring_info; 929 ring_info->hint[0] = NO_HINT; 930 ring_info->hint[1] = NO_HINT; 931 ring_info->hint[2] = NO_HINT; 932 max_index = rbrp->num_blocks; 933 934 /* read the DVMA address information and sort it */ 935 /* do init of the information array */ 936 937 938 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 939 " nxge_rxbuf_index_info_init Sort ptrs")); 940 941 /* sort the array */ 942 nxge_ksort((void *)ring_info->buffer, max_index, 943 sizeof (rxbuf_index_info_t), nxge_sort_compare); 944 945 946 947 for (index = 0; index < max_index; index++) { 948 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 949 " nxge_rxbuf_index_info_init: sorted chunk %d " 950 " ioaddr $%p kaddr $%p size %x", 951 index, ring_info->buffer[index].dvma_addr, 952 ring_info->buffer[index].kaddr, 953 ring_info->buffer[index].buf_size)); 954 } 955 956 max_iteration = 0; 957 while (max_index >= (1ULL << max_iteration)) 958 max_iteration++; 959 ring_info->max_iterations = max_iteration + 1; 960 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 961 " nxge_rxbuf_index_info_init Find max iter %d", 962 ring_info->max_iterations)); 963 964 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 965 return (NXGE_OK); 966 } 967 968 /* ARGSUSED */ 969 void 970 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 971 { 972 #ifdef NXGE_DEBUG 973 974 uint32_t bptr; 975 uint64_t pp; 976 977 bptr = entry_p->bits.hdw.pkt_buf_addr; 978 979 NXGE_DEBUG_MSG((nxgep, RX_CTL, 980 "\trcr entry $%p " 981 "\trcr entry 0x%0llx " 982 "\trcr entry 0x%08x " 983 "\trcr entry 0x%08x " 984 "\tvalue 0x%0llx\n" 985 "\tmulti = %d\n" 986 "\tpkt_type = 0x%x\n" 987 "\tzero_copy = %d\n" 988 "\tnoport = %d\n" 989 "\tpromis = %d\n" 990 "\terror = 0x%04x\n" 991 "\tdcf_err = 0x%01x\n" 992 "\tl2_len = %d\n" 993 "\tpktbufsize = %d\n" 994 "\tpkt_buf_addr = $%p\n" 995 "\tpkt_buf_addr (<< 6) = $%p\n", 996 entry_p, 997 *(int64_t *)entry_p, 998 *(int32_t *)entry_p, 999 *(int32_t *)((char *)entry_p + 32), 1000 entry_p->value, 1001 entry_p->bits.hdw.multi, 1002 entry_p->bits.hdw.pkt_type, 1003 entry_p->bits.hdw.zero_copy, 1004 entry_p->bits.hdw.noport, 1005 entry_p->bits.hdw.promis, 1006 entry_p->bits.hdw.error, 1007 entry_p->bits.hdw.dcf_err, 1008 entry_p->bits.hdw.l2_len, 1009 entry_p->bits.hdw.pktbufsz, 1010 bptr, 1011 entry_p->bits.ldw.pkt_buf_addr)); 1012 1013 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1014 RCR_PKT_BUF_ADDR_SHIFT; 1015 1016 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1017 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1018 #endif 1019 } 1020 1021 void 1022 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1023 { 1024 npi_handle_t handle; 1025 rbr_stat_t rbr_stat; 1026 addr44_t hd_addr; 1027 addr44_t tail_addr; 1028 uint16_t qlen; 1029 1030 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1031 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1032 1033 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1034 1035 /* RBR head */ 1036 hd_addr.addr = 0; 1037 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1038 #if defined(__i386) 1039 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1040 (void *)(uint32_t)hd_addr.addr); 1041 #else 1042 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1043 (void *)hd_addr.addr); 1044 #endif 1045 1046 /* RBR stats */ 1047 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1048 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1049 1050 /* RCR tail */ 1051 tail_addr.addr = 0; 1052 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1053 #if defined(__i386) 1054 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1055 (void *)(uint32_t)tail_addr.addr); 1056 #else 1057 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1058 (void *)tail_addr.addr); 1059 #endif 1060 1061 /* RCR qlen */ 1062 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1063 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1064 1065 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1066 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1067 } 1068 1069 void 1070 nxge_rxdma_stop(p_nxge_t nxgep) 1071 { 1072 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1073 1074 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1075 (void) nxge_rx_mac_disable(nxgep); 1076 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1077 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1078 } 1079 1080 void 1081 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1082 { 1083 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1084 1085 (void) nxge_rxdma_stop(nxgep); 1086 (void) nxge_uninit_rxdma_channels(nxgep); 1087 (void) nxge_init_rxdma_channels(nxgep); 1088 1089 #ifndef AXIS_DEBUG_LB 1090 (void) nxge_xcvr_init(nxgep); 1091 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1092 #endif 1093 (void) nxge_rx_mac_enable(nxgep); 1094 1095 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1096 } 1097 1098 nxge_status_t 1099 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1100 { 1101 int i, ndmas; 1102 uint16_t channel; 1103 p_rx_rbr_rings_t rx_rbr_rings; 1104 p_rx_rbr_ring_t *rbr_rings; 1105 npi_handle_t handle; 1106 npi_status_t rs = NPI_SUCCESS; 1107 nxge_status_t status = NXGE_OK; 1108 1109 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1110 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1111 1112 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1113 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1114 "<== nxge_rxdma_mode: not initialized")); 1115 return (NXGE_ERROR); 1116 } 1117 1118 rx_rbr_rings = nxgep->rx_rbr_rings; 1119 if (rx_rbr_rings == NULL) { 1120 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1121 "<== nxge_rxdma_mode: NULL ring pointer")); 1122 return (NXGE_ERROR); 1123 } 1124 if (rx_rbr_rings->rbr_rings == NULL) { 1125 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1126 "<== nxge_rxdma_mode: NULL rbr rings pointer")); 1127 return (NXGE_ERROR); 1128 } 1129 1130 ndmas = rx_rbr_rings->ndmas; 1131 if (!ndmas) { 1132 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1133 "<== nxge_rxdma_mode: no channel")); 1134 return (NXGE_ERROR); 1135 } 1136 1137 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1138 "==> nxge_rxdma_mode (ndmas %d)", ndmas)); 1139 1140 rbr_rings = rx_rbr_rings->rbr_rings; 1141 1142 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1143 for (i = 0; i < ndmas; i++) { 1144 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 1145 continue; 1146 } 1147 channel = rbr_rings[i]->rdc; 1148 if (enable) { 1149 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1150 "==> nxge_rxdma_hw_mode: channel %d (enable)", 1151 channel)); 1152 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 1153 } else { 1154 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1155 "==> nxge_rxdma_hw_mode: channel %d (disable)", 1156 channel)); 1157 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 1158 } 1159 } 1160 1161 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1162 1163 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1164 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1165 1166 return (status); 1167 } 1168 1169 void 1170 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1171 { 1172 npi_handle_t handle; 1173 1174 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1175 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1176 1177 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1178 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1179 1180 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1181 } 1182 1183 void 1184 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1185 { 1186 npi_handle_t handle; 1187 1188 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1189 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1190 1191 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1192 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1193 1194 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1195 } 1196 1197 void 1198 nxge_hw_start_rx(p_nxge_t nxgep) 1199 { 1200 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1201 1202 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1203 (void) nxge_rx_mac_enable(nxgep); 1204 1205 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1206 } 1207 1208 /*ARGSUSED*/ 1209 void 1210 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1211 { 1212 int i, ndmas; 1213 uint16_t rdc; 1214 p_rx_rbr_rings_t rx_rbr_rings; 1215 p_rx_rbr_ring_t *rbr_rings; 1216 p_rx_rcr_rings_t rx_rcr_rings; 1217 1218 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1219 1220 rx_rbr_rings = nxgep->rx_rbr_rings; 1221 if (rx_rbr_rings == NULL) { 1222 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1223 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1224 return; 1225 } 1226 ndmas = rx_rbr_rings->ndmas; 1227 if (!ndmas) { 1228 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1229 "<== nxge_fixup_rxdma_rings: no channel")); 1230 return; 1231 } 1232 1233 rx_rcr_rings = nxgep->rx_rcr_rings; 1234 if (rx_rcr_rings == NULL) { 1235 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1236 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1237 return; 1238 } 1239 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1240 "==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas)); 1241 1242 nxge_rxdma_hw_stop(nxgep); 1243 1244 rbr_rings = rx_rbr_rings->rbr_rings; 1245 for (i = 0; i < ndmas; i++) { 1246 rdc = rbr_rings[i]->rdc; 1247 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1248 "==> nxge_fixup_rxdma_rings: channel %d " 1249 "ring $%px", rdc, rbr_rings[i])); 1250 (void) nxge_rxdma_fixup_channel(nxgep, rdc, i); 1251 } 1252 1253 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1254 } 1255 1256 void 1257 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1258 { 1259 int i; 1260 1261 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1262 i = nxge_rxdma_get_ring_index(nxgep, channel); 1263 if (i < 0) { 1264 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1265 "<== nxge_rxdma_fix_channel: no entry found")); 1266 return; 1267 } 1268 1269 nxge_rxdma_fixup_channel(nxgep, channel, i); 1270 1271 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel")); 1272 } 1273 1274 void 1275 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1276 { 1277 int ndmas; 1278 p_rx_rbr_rings_t rx_rbr_rings; 1279 p_rx_rbr_ring_t *rbr_rings; 1280 p_rx_rcr_rings_t rx_rcr_rings; 1281 p_rx_rcr_ring_t *rcr_rings; 1282 p_rx_mbox_areas_t rx_mbox_areas_p; 1283 p_rx_mbox_t *rx_mbox_p; 1284 p_nxge_dma_pool_t dma_buf_poolp; 1285 p_nxge_dma_pool_t dma_cntl_poolp; 1286 p_rx_rbr_ring_t rbrp; 1287 p_rx_rcr_ring_t rcrp; 1288 p_rx_mbox_t mboxp; 1289 p_nxge_dma_common_t dmap; 1290 nxge_status_t status = NXGE_OK; 1291 1292 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1293 1294 (void) nxge_rxdma_stop_channel(nxgep, channel); 1295 1296 dma_buf_poolp = nxgep->rx_buf_pool_p; 1297 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1298 1299 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1300 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1301 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1302 return; 1303 } 1304 1305 ndmas = dma_buf_poolp->ndmas; 1306 if (!ndmas) { 1307 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1308 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1309 return; 1310 } 1311 1312 rx_rbr_rings = nxgep->rx_rbr_rings; 1313 rx_rcr_rings = nxgep->rx_rcr_rings; 1314 rbr_rings = rx_rbr_rings->rbr_rings; 1315 rcr_rings = rx_rcr_rings->rcr_rings; 1316 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1317 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1318 1319 /* Reinitialize the receive block and completion rings */ 1320 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1321 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1322 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1323 1324 1325 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1326 rbrp->rbr_rd_index = 0; 1327 rcrp->comp_rd_index = 0; 1328 rcrp->comp_wt_index = 0; 1329 1330 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1331 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1332 1333 status = nxge_rxdma_start_channel(nxgep, channel, 1334 rbrp, rcrp, mboxp); 1335 if (status != NXGE_OK) { 1336 goto nxge_rxdma_fixup_channel_fail; 1337 } 1338 if (status != NXGE_OK) { 1339 goto nxge_rxdma_fixup_channel_fail; 1340 } 1341 1342 nxge_rxdma_fixup_channel_fail: 1343 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1344 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1345 1346 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1347 } 1348 1349 int 1350 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1351 { 1352 int i, ndmas; 1353 uint16_t rdc; 1354 p_rx_rbr_rings_t rx_rbr_rings; 1355 p_rx_rbr_ring_t *rbr_rings; 1356 1357 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1358 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1359 1360 rx_rbr_rings = nxgep->rx_rbr_rings; 1361 if (rx_rbr_rings == NULL) { 1362 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1363 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1364 return (-1); 1365 } 1366 ndmas = rx_rbr_rings->ndmas; 1367 if (!ndmas) { 1368 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1369 "<== nxge_rxdma_get_ring_index: no channel")); 1370 return (-1); 1371 } 1372 1373 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1374 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1375 1376 rbr_rings = rx_rbr_rings->rbr_rings; 1377 for (i = 0; i < ndmas; i++) { 1378 rdc = rbr_rings[i]->rdc; 1379 if (channel == rdc) { 1380 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1381 "==> nxge_rxdma_get_rbr_ring: " 1382 "channel %d (index %d) " 1383 "ring %d", channel, i, 1384 rbr_rings[i])); 1385 return (i); 1386 } 1387 } 1388 1389 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1390 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1391 1392 return (-1); 1393 } 1394 1395 p_rx_rbr_ring_t 1396 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1397 { 1398 int i, ndmas; 1399 uint16_t rdc; 1400 p_rx_rbr_rings_t rx_rbr_rings; 1401 p_rx_rbr_ring_t *rbr_rings; 1402 1403 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1404 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1405 1406 rx_rbr_rings = nxgep->rx_rbr_rings; 1407 if (rx_rbr_rings == NULL) { 1408 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1409 "<== nxge_rxdma_get_rbr_ring: NULL ring pointer")); 1410 return (NULL); 1411 } 1412 ndmas = rx_rbr_rings->ndmas; 1413 if (!ndmas) { 1414 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1415 "<== nxge_rxdma_get_rbr_ring: no channel")); 1416 return (NULL); 1417 } 1418 1419 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1420 "==> nxge_rxdma_get_ring (ndmas %d)", ndmas)); 1421 1422 rbr_rings = rx_rbr_rings->rbr_rings; 1423 for (i = 0; i < ndmas; i++) { 1424 rdc = rbr_rings[i]->rdc; 1425 if (channel == rdc) { 1426 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1427 "==> nxge_rxdma_get_rbr_ring: channel %d " 1428 "ring $%p", channel, rbr_rings[i])); 1429 return (rbr_rings[i]); 1430 } 1431 } 1432 1433 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1434 "<== nxge_rxdma_get_rbr_ring: not found")); 1435 1436 return (NULL); 1437 } 1438 1439 p_rx_rcr_ring_t 1440 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1441 { 1442 int i, ndmas; 1443 uint16_t rdc; 1444 p_rx_rcr_rings_t rx_rcr_rings; 1445 p_rx_rcr_ring_t *rcr_rings; 1446 1447 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1448 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1449 1450 rx_rcr_rings = nxgep->rx_rcr_rings; 1451 if (rx_rcr_rings == NULL) { 1452 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1453 "<== nxge_rxdma_get_rcr_ring: NULL ring pointer")); 1454 return (NULL); 1455 } 1456 ndmas = rx_rcr_rings->ndmas; 1457 if (!ndmas) { 1458 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1459 "<== nxge_rxdma_get_rcr_ring: no channel")); 1460 return (NULL); 1461 } 1462 1463 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1464 "==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas)); 1465 1466 rcr_rings = rx_rcr_rings->rcr_rings; 1467 for (i = 0; i < ndmas; i++) { 1468 rdc = rcr_rings[i]->rdc; 1469 if (channel == rdc) { 1470 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1471 "==> nxge_rxdma_get_rcr_ring: channel %d " 1472 "ring $%p", channel, rcr_rings[i])); 1473 return (rcr_rings[i]); 1474 } 1475 } 1476 1477 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1478 "<== nxge_rxdma_get_rcr_ring: not found")); 1479 1480 return (NULL); 1481 } 1482 1483 /* 1484 * Static functions start here. 1485 */ 1486 static p_rx_msg_t 1487 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1488 { 1489 p_rx_msg_t nxge_mp = NULL; 1490 p_nxge_dma_common_t dmamsg_p; 1491 uchar_t *buffer; 1492 1493 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1494 if (nxge_mp == NULL) { 1495 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1496 "Allocation of a rx msg failed.")); 1497 goto nxge_allocb_exit; 1498 } 1499 1500 nxge_mp->use_buf_pool = B_FALSE; 1501 if (dmabuf_p) { 1502 nxge_mp->use_buf_pool = B_TRUE; 1503 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1504 *dmamsg_p = *dmabuf_p; 1505 dmamsg_p->nblocks = 1; 1506 dmamsg_p->block_size = size; 1507 dmamsg_p->alength = size; 1508 buffer = (uchar_t *)dmabuf_p->kaddrp; 1509 1510 dmabuf_p->kaddrp = (void *) 1511 ((char *)dmabuf_p->kaddrp + size); 1512 dmabuf_p->ioaddr_pp = (void *) 1513 ((char *)dmabuf_p->ioaddr_pp + size); 1514 dmabuf_p->alength -= size; 1515 dmabuf_p->offset += size; 1516 dmabuf_p->dma_cookie.dmac_laddress += size; 1517 dmabuf_p->dma_cookie.dmac_size -= size; 1518 1519 } else { 1520 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1521 if (buffer == NULL) { 1522 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1523 "Allocation of a receive page failed.")); 1524 goto nxge_allocb_fail1; 1525 } 1526 } 1527 1528 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1529 if (nxge_mp->rx_mblk_p == NULL) { 1530 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1531 goto nxge_allocb_fail2; 1532 } 1533 1534 nxge_mp->buffer = buffer; 1535 nxge_mp->block_size = size; 1536 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1537 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1538 nxge_mp->ref_cnt = 1; 1539 nxge_mp->free = B_TRUE; 1540 nxge_mp->rx_use_bcopy = B_FALSE; 1541 1542 atomic_inc_32(&nxge_mblks_pending); 1543 1544 goto nxge_allocb_exit; 1545 1546 nxge_allocb_fail2: 1547 if (!nxge_mp->use_buf_pool) { 1548 KMEM_FREE(buffer, size); 1549 } 1550 1551 nxge_allocb_fail1: 1552 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1553 nxge_mp = NULL; 1554 1555 nxge_allocb_exit: 1556 return (nxge_mp); 1557 } 1558 1559 p_mblk_t 1560 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1561 { 1562 p_mblk_t mp; 1563 1564 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1565 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1566 "offset = 0x%08X " 1567 "size = 0x%08X", 1568 nxge_mp, offset, size)); 1569 1570 mp = desballoc(&nxge_mp->buffer[offset], size, 1571 0, &nxge_mp->freeb); 1572 if (mp == NULL) { 1573 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1574 goto nxge_dupb_exit; 1575 } 1576 atomic_inc_32(&nxge_mp->ref_cnt); 1577 atomic_inc_32(&nxge_mblks_pending); 1578 1579 1580 nxge_dupb_exit: 1581 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1582 nxge_mp)); 1583 return (mp); 1584 } 1585 1586 p_mblk_t 1587 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1588 { 1589 p_mblk_t mp; 1590 uchar_t *dp; 1591 1592 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1593 if (mp == NULL) { 1594 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1595 goto nxge_dupb_bcopy_exit; 1596 } 1597 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1598 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1599 mp->b_wptr = dp + size; 1600 1601 nxge_dupb_bcopy_exit: 1602 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1603 nxge_mp)); 1604 return (mp); 1605 } 1606 1607 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1608 p_rx_msg_t rx_msg_p); 1609 1610 void 1611 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1612 { 1613 1614 npi_handle_t handle; 1615 1616 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1617 1618 /* Reuse this buffer */ 1619 rx_msg_p->free = B_FALSE; 1620 rx_msg_p->cur_usage_cnt = 0; 1621 rx_msg_p->max_usage_cnt = 0; 1622 rx_msg_p->pkt_buf_size = 0; 1623 1624 if (rx_rbr_p->rbr_use_bcopy) { 1625 rx_msg_p->rx_use_bcopy = B_FALSE; 1626 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1627 } 1628 1629 /* 1630 * Get the rbr header pointer and its offset index. 1631 */ 1632 MUTEX_ENTER(&rx_rbr_p->post_lock); 1633 1634 1635 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1636 rx_rbr_p->rbr_wrap_mask); 1637 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1638 MUTEX_EXIT(&rx_rbr_p->post_lock); 1639 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1640 npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1); 1641 1642 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1643 "<== nxge_post_page (channel %d post_next_index %d)", 1644 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1645 1646 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1647 } 1648 1649 void 1650 nxge_freeb(p_rx_msg_t rx_msg_p) 1651 { 1652 size_t size; 1653 uchar_t *buffer = NULL; 1654 int ref_cnt; 1655 boolean_t free_state = B_FALSE; 1656 1657 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1658 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1659 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1660 rx_msg_p, nxge_mblks_pending)); 1661 1662 atomic_dec_32(&nxge_mblks_pending); 1663 /* 1664 * First we need to get the free state, then 1665 * atomic decrement the reference count to prevent 1666 * the race condition with the interrupt thread that 1667 * is processing a loaned up buffer block. 1668 */ 1669 free_state = rx_msg_p->free; 1670 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1671 if (!ref_cnt) { 1672 buffer = rx_msg_p->buffer; 1673 size = rx_msg_p->block_size; 1674 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1675 "will free: rx_msg_p = $%p (block pending %d)", 1676 rx_msg_p, nxge_mblks_pending)); 1677 1678 if (!rx_msg_p->use_buf_pool) { 1679 KMEM_FREE(buffer, size); 1680 } 1681 1682 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1683 return; 1684 } 1685 1686 /* 1687 * Repost buffer. 1688 */ 1689 if (free_state && (ref_cnt == 1)) { 1690 NXGE_DEBUG_MSG((NULL, RX_CTL, 1691 "nxge_freeb: post page $%p:", rx_msg_p)); 1692 nxge_post_page(rx_msg_p->nxgep, rx_msg_p->rx_rbr_p, 1693 rx_msg_p); 1694 } 1695 1696 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1697 } 1698 1699 uint_t 1700 nxge_rx_intr(void *arg1, void *arg2) 1701 { 1702 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1703 p_nxge_t nxgep = (p_nxge_t)arg2; 1704 p_nxge_ldg_t ldgp; 1705 uint8_t channel; 1706 npi_handle_t handle; 1707 rx_dma_ctl_stat_t cs; 1708 1709 #ifdef NXGE_DEBUG 1710 rxdma_cfig1_t cfg; 1711 #endif 1712 uint_t serviced = DDI_INTR_UNCLAIMED; 1713 1714 if (ldvp == NULL) { 1715 NXGE_DEBUG_MSG((NULL, INT_CTL, 1716 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1717 nxgep, ldvp)); 1718 1719 return (DDI_INTR_CLAIMED); 1720 } 1721 1722 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1723 nxgep = ldvp->nxgep; 1724 } 1725 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1726 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1727 nxgep, ldvp)); 1728 1729 /* 1730 * This interrupt handler is for a specific 1731 * receive dma channel. 1732 */ 1733 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1734 /* 1735 * Get the control and status for this channel. 1736 */ 1737 channel = ldvp->channel; 1738 ldgp = ldvp->ldgp; 1739 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1740 1741 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1742 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1743 channel, 1744 cs.value, 1745 cs.bits.hdw.rcrto, 1746 cs.bits.hdw.rcrthres)); 1747 1748 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs); 1749 serviced = DDI_INTR_CLAIMED; 1750 1751 /* error events. */ 1752 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1753 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1754 } 1755 1756 nxge_intr_exit: 1757 1758 1759 /* 1760 * Enable the mailbox update interrupt if we want 1761 * to use mailbox. We probably don't need to use 1762 * mailbox as it only saves us one pio read. 1763 * Also write 1 to rcrthres and rcrto to clear 1764 * these two edge triggered bits. 1765 */ 1766 1767 cs.value &= RX_DMA_CTL_STAT_WR1C; 1768 cs.bits.hdw.mex = 1; 1769 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1770 cs.value); 1771 1772 /* 1773 * Rearm this logical group if this is a single device 1774 * group. 1775 */ 1776 if (ldgp->nldvs == 1) { 1777 ldgimgm_t mgm; 1778 mgm.value = 0; 1779 mgm.bits.ldw.arm = 1; 1780 mgm.bits.ldw.timer = ldgp->ldg_timer; 1781 NXGE_REG_WR64(handle, 1782 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1783 mgm.value); 1784 } 1785 1786 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1787 serviced)); 1788 return (serviced); 1789 } 1790 1791 /* 1792 * Process the packets received in the specified logical device 1793 * and pass up a chain of message blocks to the upper layer. 1794 */ 1795 static void 1796 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1797 rx_dma_ctl_stat_t cs) 1798 { 1799 p_mblk_t mp; 1800 p_rx_rcr_ring_t rcrp; 1801 1802 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1803 if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1804 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1805 "<== nxge_rx_pkts_vring: no mp")); 1806 return; 1807 } 1808 1809 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1810 mp)); 1811 1812 #ifdef NXGE_DEBUG 1813 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1814 "==> nxge_rx_pkts_vring:calling mac_rx " 1815 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1816 "mac_handle $%p", 1817 mp->b_wptr - mp->b_rptr, 1818 mp, mp->b_cont, mp->b_next, 1819 rcrp, rcrp->rcr_mac_handle)); 1820 1821 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1822 "==> nxge_rx_pkts_vring: dump packets " 1823 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1824 mp, 1825 mp->b_rptr, 1826 mp->b_wptr, 1827 nxge_dump_packet((char *)mp->b_rptr, 1828 mp->b_wptr - mp->b_rptr))); 1829 if (mp->b_cont) { 1830 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1831 "==> nxge_rx_pkts_vring: dump b_cont packets " 1832 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1833 mp->b_cont, 1834 mp->b_cont->b_rptr, 1835 mp->b_cont->b_wptr, 1836 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1837 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1838 } 1839 if (mp->b_next) { 1840 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1841 "==> nxge_rx_pkts_vring: dump next packets " 1842 "(b_rptr $%p): %s", 1843 mp->b_next->b_rptr, 1844 nxge_dump_packet((char *)mp->b_next->b_rptr, 1845 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1846 } 1847 #endif 1848 1849 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1850 } 1851 1852 1853 /* 1854 * This routine is the main packet receive processing function. 1855 * It gets the packet type, error code, and buffer related 1856 * information from the receive completion entry. 1857 * How many completion entries to process is based on the number of packets 1858 * queued by the hardware, a hardware maintained tail pointer 1859 * and a configurable receive packet count. 1860 * 1861 * A chain of message blocks will be created as result of processing 1862 * the completion entries. This chain of message blocks will be returned and 1863 * a hardware control status register will be updated with the number of 1864 * packets were removed from the hardware queue. 1865 * 1866 */ 1867 mblk_t * 1868 nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1869 p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs) 1870 { 1871 npi_handle_t handle; 1872 uint8_t channel; 1873 p_rx_rcr_rings_t rx_rcr_rings; 1874 p_rx_rcr_ring_t rcr_p; 1875 uint32_t comp_rd_index; 1876 p_rcr_entry_t rcr_desc_rd_head_p; 1877 p_rcr_entry_t rcr_desc_rd_head_pp; 1878 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1879 uint16_t qlen, nrcr_read, npkt_read; 1880 uint32_t qlen_hw; 1881 boolean_t multi; 1882 rcrcfig_b_t rcr_cfg_b; 1883 #if defined(_BIG_ENDIAN) 1884 npi_status_t rs = NPI_SUCCESS; 1885 #endif 1886 1887 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d " 1888 "channel %d", vindex, ldvp->channel)); 1889 1890 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1891 return (NULL); 1892 } 1893 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1894 rx_rcr_rings = nxgep->rx_rcr_rings; 1895 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1896 channel = rcr_p->rdc; 1897 if (channel != ldvp->channel) { 1898 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1899 "channel %d, and rcr channel %d not matched.", 1900 vindex, ldvp->channel, channel)); 1901 return (NULL); 1902 } 1903 1904 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1905 "==> nxge_rx_pkts: START: rcr channel %d " 1906 "head_p $%p head_pp $%p index %d ", 1907 channel, rcr_p->rcr_desc_rd_head_p, 1908 rcr_p->rcr_desc_rd_head_pp, 1909 rcr_p->comp_rd_index)); 1910 1911 1912 #if !defined(_BIG_ENDIAN) 1913 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 1914 #else 1915 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1916 if (rs != NPI_SUCCESS) { 1917 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1918 "channel %d, get qlen failed 0x%08x", 1919 vindex, ldvp->channel, rs)); 1920 return (NULL); 1921 } 1922 #endif 1923 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 1924 "qlen %d", channel, qlen)); 1925 1926 1927 1928 if (!qlen) { 1929 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1930 "==> nxge_rx_pkts:rcr channel %d " 1931 "qlen %d (no pkts)", channel, qlen)); 1932 1933 return (NULL); 1934 } 1935 1936 comp_rd_index = rcr_p->comp_rd_index; 1937 1938 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1939 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1940 nrcr_read = npkt_read = 0; 1941 1942 /* 1943 * Number of packets queued 1944 * (The jumbo or multi packet will be counted as only one 1945 * packets and it may take up more than one completion entry). 1946 */ 1947 qlen_hw = (qlen < nxge_max_rx_pkts) ? 1948 qlen : nxge_max_rx_pkts; 1949 head_mp = NULL; 1950 tail_mp = &head_mp; 1951 nmp = mp_cont = NULL; 1952 multi = B_FALSE; 1953 1954 while (qlen_hw) { 1955 1956 #ifdef NXGE_DEBUG 1957 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 1958 #endif 1959 /* 1960 * Process one completion ring entry. 1961 */ 1962 nxge_receive_packet(nxgep, 1963 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 1964 1965 /* 1966 * message chaining modes 1967 */ 1968 if (nmp) { 1969 nmp->b_next = NULL; 1970 if (!multi && !mp_cont) { /* frame fits a partition */ 1971 *tail_mp = nmp; 1972 tail_mp = &nmp->b_next; 1973 nmp = NULL; 1974 } else if (multi && !mp_cont) { /* first segment */ 1975 *tail_mp = nmp; 1976 tail_mp = &nmp->b_cont; 1977 } else if (multi && mp_cont) { /* mid of multi segs */ 1978 *tail_mp = mp_cont; 1979 tail_mp = &mp_cont->b_cont; 1980 } else if (!multi && mp_cont) { /* last segment */ 1981 *tail_mp = mp_cont; 1982 tail_mp = &nmp->b_next; 1983 nmp = NULL; 1984 } 1985 } 1986 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1987 "==> nxge_rx_pkts: loop: rcr channel %d " 1988 "before updating: multi %d " 1989 "nrcr_read %d " 1990 "npk read %d " 1991 "head_pp $%p index %d ", 1992 channel, 1993 multi, 1994 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1995 comp_rd_index)); 1996 1997 if (!multi) { 1998 qlen_hw--; 1999 npkt_read++; 2000 } 2001 2002 /* 2003 * Update the next read entry. 2004 */ 2005 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2006 rcr_p->comp_wrap_mask); 2007 2008 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2009 rcr_p->rcr_desc_first_p, 2010 rcr_p->rcr_desc_last_p); 2011 2012 nrcr_read++; 2013 2014 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2015 "<== nxge_rx_pkts: (SAM, process one packet) " 2016 "nrcr_read %d", 2017 nrcr_read)); 2018 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2019 "==> nxge_rx_pkts: loop: rcr channel %d " 2020 "multi %d " 2021 "nrcr_read %d " 2022 "npk read %d " 2023 "head_pp $%p index %d ", 2024 channel, 2025 multi, 2026 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2027 comp_rd_index)); 2028 2029 } 2030 2031 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2032 rcr_p->comp_rd_index = comp_rd_index; 2033 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2034 2035 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2036 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2037 rcr_p->intr_timeout = nxgep->intr_timeout; 2038 rcr_p->intr_threshold = nxgep->intr_threshold; 2039 rcr_cfg_b.value = 0x0ULL; 2040 if (rcr_p->intr_timeout) 2041 rcr_cfg_b.bits.ldw.entout = 1; 2042 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2043 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2044 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2045 channel, rcr_cfg_b.value); 2046 } 2047 2048 cs.bits.ldw.pktread = npkt_read; 2049 cs.bits.ldw.ptrread = nrcr_read; 2050 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2051 channel, cs.value); 2052 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2053 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2054 "head_pp $%p index %016llx ", 2055 channel, 2056 rcr_p->rcr_desc_rd_head_pp, 2057 rcr_p->comp_rd_index)); 2058 /* 2059 * Update RCR buffer pointer read and number of packets 2060 * read. 2061 */ 2062 2063 *rcrp = rcr_p; 2064 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2065 return (head_mp); 2066 } 2067 2068 void 2069 nxge_receive_packet(p_nxge_t nxgep, 2070 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2071 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2072 { 2073 p_mblk_t nmp = NULL; 2074 uint64_t multi; 2075 uint64_t dcf_err; 2076 uint8_t channel; 2077 2078 boolean_t first_entry = B_TRUE; 2079 boolean_t is_tcp_udp = B_FALSE; 2080 boolean_t buffer_free = B_FALSE; 2081 boolean_t error_send_up = B_FALSE; 2082 uint8_t error_type; 2083 uint16_t l2_len; 2084 uint16_t skip_len; 2085 uint8_t pktbufsz_type; 2086 uint64_t rcr_entry; 2087 uint64_t *pkt_buf_addr_pp; 2088 uint64_t *pkt_buf_addr_p; 2089 uint32_t buf_offset; 2090 uint32_t bsize; 2091 uint32_t error_disp_cnt; 2092 uint32_t msg_index; 2093 p_rx_rbr_ring_t rx_rbr_p; 2094 p_rx_msg_t *rx_msg_ring_p; 2095 p_rx_msg_t rx_msg_p; 2096 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2097 nxge_status_t status = NXGE_OK; 2098 boolean_t is_valid = B_FALSE; 2099 p_nxge_rx_ring_stats_t rdc_stats; 2100 uint32_t bytes_read; 2101 uint64_t pkt_type; 2102 uint64_t frag; 2103 #ifdef NXGE_DEBUG 2104 int dump_len; 2105 #endif 2106 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2107 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2108 2109 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2110 2111 multi = (rcr_entry & RCR_MULTI_MASK); 2112 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2113 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2114 2115 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2116 frag = (rcr_entry & RCR_FRAG_MASK); 2117 2118 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2119 2120 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2121 RCR_PKTBUFSZ_SHIFT); 2122 #if defined(__i386) 2123 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2124 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2125 #else 2126 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2127 RCR_PKT_BUF_ADDR_SHIFT); 2128 #endif 2129 2130 channel = rcr_p->rdc; 2131 2132 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2133 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2134 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2135 "error_type 0x%x pkt_type 0x%x " 2136 "pktbufsz_type %d ", 2137 rcr_desc_rd_head_p, 2138 rcr_entry, pkt_buf_addr_pp, l2_len, 2139 multi, 2140 error_type, 2141 pkt_type, 2142 pktbufsz_type)); 2143 2144 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2145 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2146 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2147 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2148 rcr_entry, pkt_buf_addr_pp, l2_len, 2149 multi, 2150 error_type, 2151 pkt_type)); 2152 2153 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2154 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2155 "full pkt_buf_addr_pp $%p l2_len %d", 2156 rcr_entry, pkt_buf_addr_pp, l2_len)); 2157 2158 /* get the stats ptr */ 2159 rdc_stats = rcr_p->rdc_stats; 2160 2161 if (!l2_len) { 2162 2163 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2164 "<== nxge_receive_packet: failed: l2 length is 0.")); 2165 return; 2166 } 2167 2168 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2169 l2_len -= ETHERFCSL; 2170 2171 /* shift 6 bits to get the full io address */ 2172 #if defined(__i386) 2173 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2174 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2175 #else 2176 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2177 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2178 #endif 2179 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2180 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2181 "full pkt_buf_addr_pp $%p l2_len %d", 2182 rcr_entry, pkt_buf_addr_pp, l2_len)); 2183 2184 rx_rbr_p = rcr_p->rx_rbr_p; 2185 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2186 2187 if (first_entry) { 2188 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2189 RXDMA_HDR_SIZE_DEFAULT); 2190 2191 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2192 "==> nxge_receive_packet: first entry 0x%016llx " 2193 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2194 rcr_entry, pkt_buf_addr_pp, l2_len, 2195 hdr_size)); 2196 } 2197 2198 MUTEX_ENTER(&rcr_p->lock); 2199 MUTEX_ENTER(&rx_rbr_p->lock); 2200 2201 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2202 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2203 "full pkt_buf_addr_pp $%p l2_len %d", 2204 rcr_entry, pkt_buf_addr_pp, l2_len)); 2205 2206 /* 2207 * Packet buffer address in the completion entry points 2208 * to the starting buffer address (offset 0). 2209 * Use the starting buffer address to locate the corresponding 2210 * kernel address. 2211 */ 2212 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2213 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2214 &buf_offset, 2215 &msg_index); 2216 2217 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2218 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2219 "full pkt_buf_addr_pp $%p l2_len %d", 2220 rcr_entry, pkt_buf_addr_pp, l2_len)); 2221 2222 if (status != NXGE_OK) { 2223 MUTEX_EXIT(&rx_rbr_p->lock); 2224 MUTEX_EXIT(&rcr_p->lock); 2225 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2226 "<== nxge_receive_packet: found vaddr failed %d", 2227 status)); 2228 return; 2229 } 2230 2231 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2232 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2233 "full pkt_buf_addr_pp $%p l2_len %d", 2234 rcr_entry, pkt_buf_addr_pp, l2_len)); 2235 2236 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2237 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2238 "full pkt_buf_addr_pp $%p l2_len %d", 2239 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2240 2241 rx_msg_p = rx_msg_ring_p[msg_index]; 2242 2243 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2244 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2245 "full pkt_buf_addr_pp $%p l2_len %d", 2246 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2247 2248 switch (pktbufsz_type) { 2249 case RCR_PKTBUFSZ_0: 2250 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2251 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2252 "==> nxge_receive_packet: 0 buf %d", bsize)); 2253 break; 2254 case RCR_PKTBUFSZ_1: 2255 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2256 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2257 "==> nxge_receive_packet: 1 buf %d", bsize)); 2258 break; 2259 case RCR_PKTBUFSZ_2: 2260 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2261 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2262 "==> nxge_receive_packet: 2 buf %d", bsize)); 2263 break; 2264 case RCR_SINGLE_BLOCK: 2265 bsize = rx_msg_p->block_size; 2266 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2267 "==> nxge_receive_packet: single %d", bsize)); 2268 2269 break; 2270 default: 2271 MUTEX_EXIT(&rx_rbr_p->lock); 2272 MUTEX_EXIT(&rcr_p->lock); 2273 return; 2274 } 2275 2276 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2277 (buf_offset + sw_offset_bytes), 2278 (hdr_size + l2_len), 2279 DDI_DMA_SYNC_FORCPU); 2280 2281 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2282 "==> nxge_receive_packet: after first dump:usage count")); 2283 2284 if (rx_msg_p->cur_usage_cnt == 0) { 2285 if (rx_rbr_p->rbr_use_bcopy) { 2286 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2287 if (rx_rbr_p->rbr_consumed < 2288 rx_rbr_p->rbr_threshold_hi) { 2289 if (rx_rbr_p->rbr_threshold_lo == 0 || 2290 ((rx_rbr_p->rbr_consumed >= 2291 rx_rbr_p->rbr_threshold_lo) && 2292 (rx_rbr_p->rbr_bufsize_type >= 2293 pktbufsz_type))) { 2294 rx_msg_p->rx_use_bcopy = B_TRUE; 2295 } 2296 } else { 2297 rx_msg_p->rx_use_bcopy = B_TRUE; 2298 } 2299 } 2300 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2301 "==> nxge_receive_packet: buf %d (new block) ", 2302 bsize)); 2303 2304 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2305 rx_msg_p->pkt_buf_size = bsize; 2306 rx_msg_p->cur_usage_cnt = 1; 2307 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2308 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2309 "==> nxge_receive_packet: buf %d " 2310 "(single block) ", 2311 bsize)); 2312 /* 2313 * Buffer can be reused once the free function 2314 * is called. 2315 */ 2316 rx_msg_p->max_usage_cnt = 1; 2317 buffer_free = B_TRUE; 2318 } else { 2319 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2320 if (rx_msg_p->max_usage_cnt == 1) { 2321 buffer_free = B_TRUE; 2322 } 2323 } 2324 } else { 2325 rx_msg_p->cur_usage_cnt++; 2326 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2327 buffer_free = B_TRUE; 2328 } 2329 } 2330 2331 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2332 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2333 msg_index, l2_len, 2334 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2335 2336 if ((error_type) || (dcf_err)) { 2337 rdc_stats->ierrors++; 2338 if (dcf_err) { 2339 rdc_stats->dcf_err++; 2340 #ifdef NXGE_DEBUG 2341 if (!rdc_stats->dcf_err) { 2342 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2343 "nxge_receive_packet: channel %d dcf_err rcr" 2344 " 0x%llx", channel, rcr_entry)); 2345 } 2346 #endif 2347 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2348 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2349 } else { 2350 /* Update error stats */ 2351 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2352 rdc_stats->errlog.compl_err_type = error_type; 2353 2354 switch (error_type) { 2355 case RCR_L2_ERROR: 2356 rdc_stats->l2_err++; 2357 if (rdc_stats->l2_err < 2358 error_disp_cnt) { 2359 NXGE_FM_REPORT_ERROR(nxgep, 2360 nxgep->mac.portnum, NULL, 2361 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2362 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2363 " nxge_receive_packet:" 2364 " channel %d RCR L2_ERROR", 2365 channel)); 2366 } 2367 break; 2368 case RCR_L4_CSUM_ERROR: 2369 error_send_up = B_TRUE; 2370 rdc_stats->l4_cksum_err++; 2371 if (rdc_stats->l4_cksum_err < 2372 error_disp_cnt) { 2373 NXGE_FM_REPORT_ERROR(nxgep, 2374 nxgep->mac.portnum, NULL, 2375 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2376 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2377 " nxge_receive_packet:" 2378 " channel %d" 2379 " RCR L4_CSUM_ERROR", channel)); 2380 } 2381 break; 2382 case RCR_FFLP_SOFT_ERROR: 2383 error_send_up = B_TRUE; 2384 rdc_stats->fflp_soft_err++; 2385 if (rdc_stats->fflp_soft_err < 2386 error_disp_cnt) { 2387 NXGE_FM_REPORT_ERROR(nxgep, 2388 nxgep->mac.portnum, NULL, 2389 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2390 NXGE_ERROR_MSG((nxgep, 2391 NXGE_ERR_CTL, 2392 " nxge_receive_packet:" 2393 " channel %d" 2394 " RCR FFLP_SOFT_ERROR", channel)); 2395 } 2396 break; 2397 case RCR_ZCP_SOFT_ERROR: 2398 error_send_up = B_TRUE; 2399 rdc_stats->fflp_soft_err++; 2400 if (rdc_stats->zcp_soft_err < 2401 error_disp_cnt) 2402 NXGE_FM_REPORT_ERROR(nxgep, 2403 nxgep->mac.portnum, NULL, 2404 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2405 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2406 " nxge_receive_packet: Channel %d" 2407 " RCR ZCP_SOFT_ERROR", channel)); 2408 break; 2409 default: 2410 rdc_stats->rcr_unknown_err++; 2411 if (rdc_stats->rcr_unknown_err 2412 < error_disp_cnt) { 2413 NXGE_FM_REPORT_ERROR(nxgep, 2414 nxgep->mac.portnum, NULL, 2415 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2416 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2417 " nxge_receive_packet: Channel %d" 2418 " RCR entry 0x%llx error 0x%x", 2419 rcr_entry, channel, error_type)); 2420 } 2421 break; 2422 } 2423 } 2424 2425 /* 2426 * Update and repost buffer block if max usage 2427 * count is reached. 2428 */ 2429 if (error_send_up == B_FALSE) { 2430 atomic_inc_32(&rx_msg_p->ref_cnt); 2431 atomic_inc_32(&nxge_mblks_pending); 2432 if (buffer_free == B_TRUE) { 2433 rx_msg_p->free = B_TRUE; 2434 } 2435 2436 MUTEX_EXIT(&rx_rbr_p->lock); 2437 MUTEX_EXIT(&rcr_p->lock); 2438 nxge_freeb(rx_msg_p); 2439 return; 2440 } 2441 } 2442 2443 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2444 "==> nxge_receive_packet: DMA sync second ")); 2445 2446 bytes_read = rcr_p->rcvd_pkt_bytes; 2447 skip_len = sw_offset_bytes + hdr_size; 2448 if (!rx_msg_p->rx_use_bcopy) { 2449 /* 2450 * For loaned up buffers, the driver reference count 2451 * will be incremented first and then the free state. 2452 */ 2453 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2454 if (first_entry) { 2455 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2456 if (l2_len < bsize - skip_len) { 2457 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2458 } else { 2459 nmp->b_wptr = &nmp->b_rptr[bsize 2460 - skip_len]; 2461 } 2462 } else { 2463 if (l2_len - bytes_read < bsize) { 2464 nmp->b_wptr = 2465 &nmp->b_rptr[l2_len - bytes_read]; 2466 } else { 2467 nmp->b_wptr = &nmp->b_rptr[bsize]; 2468 } 2469 } 2470 } 2471 } else { 2472 if (first_entry) { 2473 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2474 l2_len < bsize - skip_len ? 2475 l2_len : bsize - skip_len); 2476 } else { 2477 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2478 l2_len - bytes_read < bsize ? 2479 l2_len - bytes_read : bsize); 2480 } 2481 } 2482 if (nmp != NULL) { 2483 if (first_entry) 2484 bytes_read = nmp->b_wptr - nmp->b_rptr; 2485 else 2486 bytes_read += nmp->b_wptr - nmp->b_rptr; 2487 2488 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2489 "==> nxge_receive_packet after dupb: " 2490 "rbr consumed %d " 2491 "pktbufsz_type %d " 2492 "nmp $%p rptr $%p wptr $%p " 2493 "buf_offset %d bzise %d l2_len %d skip_len %d", 2494 rx_rbr_p->rbr_consumed, 2495 pktbufsz_type, 2496 nmp, nmp->b_rptr, nmp->b_wptr, 2497 buf_offset, bsize, l2_len, skip_len)); 2498 } else { 2499 cmn_err(CE_WARN, "!nxge_receive_packet: " 2500 "update stats (error)"); 2501 atomic_inc_32(&rx_msg_p->ref_cnt); 2502 atomic_inc_32(&nxge_mblks_pending); 2503 if (buffer_free == B_TRUE) { 2504 rx_msg_p->free = B_TRUE; 2505 } 2506 MUTEX_EXIT(&rx_rbr_p->lock); 2507 MUTEX_EXIT(&rcr_p->lock); 2508 nxge_freeb(rx_msg_p); 2509 return; 2510 } 2511 2512 if (buffer_free == B_TRUE) { 2513 rx_msg_p->free = B_TRUE; 2514 } 2515 /* 2516 * ERROR, FRAG and PKT_TYPE are only reported 2517 * in the first entry. 2518 * If a packet is not fragmented and no error bit is set, then 2519 * L4 checksum is OK. 2520 */ 2521 is_valid = (nmp != NULL); 2522 if (first_entry) { 2523 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 2524 rdc_stats->ibytes += skip_len + l2_len < bsize ? 2525 l2_len : bsize; 2526 } else { 2527 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2528 l2_len - bytes_read : bsize; 2529 } 2530 2531 rcr_p->rcvd_pkt_bytes = bytes_read; 2532 2533 MUTEX_EXIT(&rx_rbr_p->lock); 2534 MUTEX_EXIT(&rcr_p->lock); 2535 2536 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2537 atomic_inc_32(&rx_msg_p->ref_cnt); 2538 atomic_inc_32(&nxge_mblks_pending); 2539 nxge_freeb(rx_msg_p); 2540 } 2541 2542 if (is_valid) { 2543 nmp->b_cont = NULL; 2544 if (first_entry) { 2545 *mp = nmp; 2546 *mp_cont = NULL; 2547 } else { 2548 *mp_cont = nmp; 2549 } 2550 } 2551 2552 /* 2553 * Update stats and hardware checksuming. 2554 */ 2555 if (is_valid && !multi) { 2556 2557 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2558 pkt_type == RCR_PKT_IS_UDP) ? 2559 B_TRUE: B_FALSE); 2560 2561 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2562 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2563 is_valid, multi, is_tcp_udp, frag, error_type)); 2564 2565 if (is_tcp_udp && !frag && !error_type) { 2566 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2567 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2568 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2569 "==> nxge_receive_packet: Full tcp/udp cksum " 2570 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2571 "error %d", 2572 is_valid, multi, is_tcp_udp, frag, error_type)); 2573 } 2574 } 2575 2576 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2577 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2578 2579 *multi_p = (multi == RCR_MULTI_MASK); 2580 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2581 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2582 *multi_p, nmp, *mp, *mp_cont)); 2583 } 2584 2585 /*ARGSUSED*/ 2586 static nxge_status_t 2587 nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, 2588 rx_dma_ctl_stat_t cs) 2589 { 2590 p_nxge_rx_ring_stats_t rdc_stats; 2591 npi_handle_t handle; 2592 npi_status_t rs; 2593 boolean_t rxchan_fatal = B_FALSE; 2594 boolean_t rxport_fatal = B_FALSE; 2595 uint8_t channel; 2596 uint8_t portn; 2597 nxge_status_t status = NXGE_OK; 2598 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2599 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2600 2601 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2602 channel = ldvp->channel; 2603 portn = nxgep->mac.portnum; 2604 rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index]; 2605 2606 if (cs.bits.hdw.rbr_tmout) { 2607 rdc_stats->rx_rbr_tmout++; 2608 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2609 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2610 rxchan_fatal = B_TRUE; 2611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2612 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2613 } 2614 if (cs.bits.hdw.rsp_cnt_err) { 2615 rdc_stats->rsp_cnt_err++; 2616 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2617 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2618 rxchan_fatal = B_TRUE; 2619 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2620 "==> nxge_rx_err_evnts(channel %d): " 2621 "rsp_cnt_err", channel)); 2622 } 2623 if (cs.bits.hdw.byte_en_bus) { 2624 rdc_stats->byte_en_bus++; 2625 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2626 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2627 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2628 "==> nxge_rx_err_evnts(channel %d): " 2629 "fatal error: byte_en_bus", channel)); 2630 rxchan_fatal = B_TRUE; 2631 } 2632 if (cs.bits.hdw.rsp_dat_err) { 2633 rdc_stats->rsp_dat_err++; 2634 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2635 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2636 rxchan_fatal = B_TRUE; 2637 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2638 "==> nxge_rx_err_evnts(channel %d): " 2639 "fatal error: rsp_dat_err", channel)); 2640 } 2641 if (cs.bits.hdw.rcr_ack_err) { 2642 rdc_stats->rcr_ack_err++; 2643 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2644 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2645 rxchan_fatal = B_TRUE; 2646 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2647 "==> nxge_rx_err_evnts(channel %d): " 2648 "fatal error: rcr_ack_err", channel)); 2649 } 2650 if (cs.bits.hdw.dc_fifo_err) { 2651 rdc_stats->dc_fifo_err++; 2652 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2653 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2654 /* This is not a fatal error! */ 2655 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2656 "==> nxge_rx_err_evnts(channel %d): " 2657 "dc_fifo_err", channel)); 2658 rxport_fatal = B_TRUE; 2659 } 2660 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2661 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2662 &rdc_stats->errlog.pre_par, 2663 &rdc_stats->errlog.sha_par)) 2664 != NPI_SUCCESS) { 2665 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2666 "==> nxge_rx_err_evnts(channel %d): " 2667 "rcr_sha_par: get perr", channel)); 2668 return (NXGE_ERROR | rs); 2669 } 2670 if (cs.bits.hdw.rcr_sha_par) { 2671 rdc_stats->rcr_sha_par++; 2672 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2673 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2674 rxchan_fatal = B_TRUE; 2675 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2676 "==> nxge_rx_err_evnts(channel %d): " 2677 "fatal error: rcr_sha_par", channel)); 2678 } 2679 if (cs.bits.hdw.rbr_pre_par) { 2680 rdc_stats->rbr_pre_par++; 2681 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2682 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2683 rxchan_fatal = B_TRUE; 2684 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2685 "==> nxge_rx_err_evnts(channel %d): " 2686 "fatal error: rbr_pre_par", channel)); 2687 } 2688 } 2689 if (cs.bits.hdw.port_drop_pkt) { 2690 rdc_stats->port_drop_pkt++; 2691 if (rdc_stats->port_drop_pkt < error_disp_cnt) 2692 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2693 "==> nxge_rx_err_evnts (channel %d): " 2694 "port_drop_pkt", channel)); 2695 } 2696 if (cs.bits.hdw.wred_drop) { 2697 rdc_stats->wred_drop++; 2698 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2699 "==> nxge_rx_err_evnts(channel %d): " 2700 "wred_drop", channel)); 2701 } 2702 if (cs.bits.hdw.rbr_pre_empty) { 2703 rdc_stats->rbr_pre_empty++; 2704 if (rdc_stats->rbr_pre_empty < error_disp_cnt) 2705 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2706 "==> nxge_rx_err_evnts(channel %d): " 2707 "rbr_pre_empty", channel)); 2708 } 2709 if (cs.bits.hdw.rcr_shadow_full) { 2710 rdc_stats->rcr_shadow_full++; 2711 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2712 "==> nxge_rx_err_evnts(channel %d): " 2713 "rcr_shadow_full", channel)); 2714 } 2715 if (cs.bits.hdw.config_err) { 2716 rdc_stats->config_err++; 2717 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2718 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2719 rxchan_fatal = B_TRUE; 2720 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2721 "==> nxge_rx_err_evnts(channel %d): " 2722 "config error", channel)); 2723 } 2724 if (cs.bits.hdw.rcrincon) { 2725 rdc_stats->rcrincon++; 2726 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2727 NXGE_FM_EREPORT_RDMC_RCRINCON); 2728 rxchan_fatal = B_TRUE; 2729 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2730 "==> nxge_rx_err_evnts(channel %d): " 2731 "fatal error: rcrincon error", channel)); 2732 } 2733 if (cs.bits.hdw.rcrfull) { 2734 rdc_stats->rcrfull++; 2735 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2736 NXGE_FM_EREPORT_RDMC_RCRFULL); 2737 rxchan_fatal = B_TRUE; 2738 if (rdc_stats->rcrfull < error_disp_cnt) 2739 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2740 "==> nxge_rx_err_evnts(channel %d): " 2741 "fatal error: rcrfull error", channel)); 2742 } 2743 if (cs.bits.hdw.rbr_empty) { 2744 rdc_stats->rbr_empty++; 2745 if (rdc_stats->rbr_empty < error_disp_cnt) 2746 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2747 "==> nxge_rx_err_evnts(channel %d): " 2748 "rbr empty error", channel)); 2749 } 2750 if (cs.bits.hdw.rbrfull) { 2751 rdc_stats->rbrfull++; 2752 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2753 NXGE_FM_EREPORT_RDMC_RBRFULL); 2754 rxchan_fatal = B_TRUE; 2755 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2756 "==> nxge_rx_err_evnts(channel %d): " 2757 "fatal error: rbr_full error", channel)); 2758 } 2759 if (cs.bits.hdw.rbrlogpage) { 2760 rdc_stats->rbrlogpage++; 2761 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2762 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2763 rxchan_fatal = B_TRUE; 2764 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2765 "==> nxge_rx_err_evnts(channel %d): " 2766 "fatal error: rbr logical page error", channel)); 2767 } 2768 if (cs.bits.hdw.cfiglogpage) { 2769 rdc_stats->cfiglogpage++; 2770 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2771 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2772 rxchan_fatal = B_TRUE; 2773 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2774 "==> nxge_rx_err_evnts(channel %d): " 2775 "fatal error: cfig logical page error", channel)); 2776 } 2777 2778 if (rxport_fatal) { 2779 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2780 " nxge_rx_err_evnts: " 2781 " fatal error on Port #%d\n", 2782 portn)); 2783 status = nxge_ipp_fatal_err_recover(nxgep); 2784 if (status == NXGE_OK) { 2785 FM_SERVICE_RESTORED(nxgep); 2786 } 2787 } 2788 2789 if (rxchan_fatal) { 2790 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2791 " nxge_rx_err_evnts: " 2792 " fatal error on Channel #%d\n", 2793 channel)); 2794 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2795 if (status == NXGE_OK) { 2796 FM_SERVICE_RESTORED(nxgep); 2797 } 2798 } 2799 2800 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2801 2802 return (status); 2803 } 2804 2805 static nxge_status_t 2806 nxge_map_rxdma(p_nxge_t nxgep) 2807 { 2808 int i, ndmas; 2809 uint16_t channel; 2810 p_rx_rbr_rings_t rx_rbr_rings; 2811 p_rx_rbr_ring_t *rbr_rings; 2812 p_rx_rcr_rings_t rx_rcr_rings; 2813 p_rx_rcr_ring_t *rcr_rings; 2814 p_rx_mbox_areas_t rx_mbox_areas_p; 2815 p_rx_mbox_t *rx_mbox_p; 2816 p_nxge_dma_pool_t dma_buf_poolp; 2817 p_nxge_dma_pool_t dma_cntl_poolp; 2818 p_nxge_dma_common_t *dma_buf_p; 2819 p_nxge_dma_common_t *dma_cntl_p; 2820 uint32_t *num_chunks; 2821 nxge_status_t status = NXGE_OK; 2822 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2823 p_nxge_dma_common_t t_dma_buf_p; 2824 p_nxge_dma_common_t t_dma_cntl_p; 2825 #endif 2826 2827 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 2828 2829 dma_buf_poolp = nxgep->rx_buf_pool_p; 2830 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2831 2832 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2833 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2834 "<== nxge_map_rxdma: buf not allocated")); 2835 return (NXGE_ERROR); 2836 } 2837 2838 ndmas = dma_buf_poolp->ndmas; 2839 if (!ndmas) { 2840 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2841 "<== nxge_map_rxdma: no dma allocated")); 2842 return (NXGE_ERROR); 2843 } 2844 2845 num_chunks = dma_buf_poolp->num_chunks; 2846 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2847 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2848 2849 rx_rbr_rings = (p_rx_rbr_rings_t) 2850 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2851 rbr_rings = (p_rx_rbr_ring_t *) 2852 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2853 rx_rcr_rings = (p_rx_rcr_rings_t) 2854 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2855 rcr_rings = (p_rx_rcr_ring_t *) 2856 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2857 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2858 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2859 rx_mbox_p = (p_rx_mbox_t *) 2860 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2861 2862 /* 2863 * Timeout should be set based on the system clock divider. 2864 * The following timeout value of 1 assumes that the 2865 * granularity (1000) is 3 microseconds running at 300MHz. 2866 */ 2867 2868 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2869 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2870 2871 /* 2872 * Map descriptors from the buffer polls for each dam channel. 2873 */ 2874 for (i = 0; i < ndmas; i++) { 2875 /* 2876 * Set up and prepare buffer blocks, descriptors 2877 * and mailbox. 2878 */ 2879 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2880 status = nxge_map_rxdma_channel(nxgep, channel, 2881 (p_nxge_dma_common_t *)&dma_buf_p[i], 2882 (p_rx_rbr_ring_t *)&rbr_rings[i], 2883 num_chunks[i], 2884 (p_nxge_dma_common_t *)&dma_cntl_p[i], 2885 (p_rx_rcr_ring_t *)&rcr_rings[i], 2886 (p_rx_mbox_t *)&rx_mbox_p[i]); 2887 if (status != NXGE_OK) { 2888 goto nxge_map_rxdma_fail1; 2889 } 2890 rbr_rings[i]->index = (uint16_t)i; 2891 rcr_rings[i]->index = (uint16_t)i; 2892 rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i]; 2893 2894 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2895 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 2896 rbr_rings[i]->hv_set = B_FALSE; 2897 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 2898 t_dma_cntl_p = 2899 (p_nxge_dma_common_t)dma_cntl_p[i]; 2900 2901 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp = 2902 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 2903 rbr_rings[i]->hv_rx_buf_ioaddr_size = 2904 (uint64_t)t_dma_buf_p->orig_alength; 2905 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2906 "==> nxge_map_rxdma_channel: " 2907 "channel %d " 2908 "data buf base io $%p ($%p) " 2909 "size 0x%llx (%d 0x%x)", 2910 channel, 2911 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp, 2912 t_dma_cntl_p->ioaddr_pp, 2913 rbr_rings[i]->hv_rx_buf_ioaddr_size, 2914 t_dma_buf_p->orig_alength, 2915 t_dma_buf_p->orig_alength)); 2916 2917 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp = 2918 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 2919 rbr_rings[i]->hv_rx_cntl_ioaddr_size = 2920 (uint64_t)t_dma_cntl_p->orig_alength; 2921 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2922 "==> nxge_map_rxdma_channel: " 2923 "channel %d " 2924 "cntl base io $%p ($%p) " 2925 "size 0x%llx (%d 0x%x)", 2926 channel, 2927 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp, 2928 t_dma_cntl_p->ioaddr_pp, 2929 rbr_rings[i]->hv_rx_cntl_ioaddr_size, 2930 t_dma_cntl_p->orig_alength, 2931 t_dma_cntl_p->orig_alength)); 2932 } 2933 2934 #endif /* sun4v and NIU_LP_WORKAROUND */ 2935 } 2936 2937 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2938 rx_rbr_rings->rbr_rings = rbr_rings; 2939 nxgep->rx_rbr_rings = rx_rbr_rings; 2940 rx_rcr_rings->rcr_rings = rcr_rings; 2941 nxgep->rx_rcr_rings = rx_rcr_rings; 2942 2943 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2944 nxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2945 2946 goto nxge_map_rxdma_exit; 2947 2948 nxge_map_rxdma_fail1: 2949 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2950 "==> nxge_map_rxdma: unmap rbr,rcr " 2951 "(status 0x%x channel %d i %d)", 2952 status, channel, i)); 2953 i--; 2954 for (; i >= 0; i--) { 2955 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2956 nxge_unmap_rxdma_channel(nxgep, channel, 2957 rbr_rings[i], 2958 rcr_rings[i], 2959 rx_mbox_p[i]); 2960 } 2961 2962 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2963 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2964 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2965 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2966 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2967 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2968 2969 nxge_map_rxdma_exit: 2970 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2971 "<== nxge_map_rxdma: " 2972 "(status 0x%x channel %d)", 2973 status, channel)); 2974 2975 return (status); 2976 } 2977 2978 static void 2979 nxge_unmap_rxdma(p_nxge_t nxgep) 2980 { 2981 int i, ndmas; 2982 uint16_t channel; 2983 p_rx_rbr_rings_t rx_rbr_rings; 2984 p_rx_rbr_ring_t *rbr_rings; 2985 p_rx_rcr_rings_t rx_rcr_rings; 2986 p_rx_rcr_ring_t *rcr_rings; 2987 p_rx_mbox_areas_t rx_mbox_areas_p; 2988 p_rx_mbox_t *rx_mbox_p; 2989 p_nxge_dma_pool_t dma_buf_poolp; 2990 p_nxge_dma_pool_t dma_cntl_poolp; 2991 p_nxge_dma_common_t *dma_buf_p; 2992 2993 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma")); 2994 2995 dma_buf_poolp = nxgep->rx_buf_pool_p; 2996 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2997 2998 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2999 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3000 "<== nxge_unmap_rxdma: NULL buf pointers")); 3001 return; 3002 } 3003 3004 rx_rbr_rings = nxgep->rx_rbr_rings; 3005 rx_rcr_rings = nxgep->rx_rcr_rings; 3006 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3007 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3008 "<== nxge_unmap_rxdma: NULL ring pointers")); 3009 return; 3010 } 3011 ndmas = rx_rbr_rings->ndmas; 3012 if (!ndmas) { 3013 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3014 "<== nxge_unmap_rxdma: no channel")); 3015 return; 3016 } 3017 3018 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3019 "==> nxge_unmap_rxdma (ndmas %d)", ndmas)); 3020 rbr_rings = rx_rbr_rings->rbr_rings; 3021 rcr_rings = rx_rcr_rings->rcr_rings; 3022 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3023 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3024 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 3025 3026 for (i = 0; i < ndmas; i++) { 3027 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 3028 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3029 "==> nxge_unmap_rxdma (ndmas %d) channel %d", 3030 ndmas, channel)); 3031 (void) nxge_unmap_rxdma_channel(nxgep, channel, 3032 (p_rx_rbr_ring_t)rbr_rings[i], 3033 (p_rx_rcr_ring_t)rcr_rings[i], 3034 (p_rx_mbox_t)rx_mbox_p[i]); 3035 } 3036 3037 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 3038 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 3039 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 3040 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 3041 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 3042 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 3043 3044 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3045 "<== nxge_unmap_rxdma")); 3046 } 3047 3048 nxge_status_t 3049 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3050 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3051 uint32_t num_chunks, 3052 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3053 p_rx_mbox_t *rx_mbox_p) 3054 { 3055 int status = NXGE_OK; 3056 3057 /* 3058 * Set up and prepare buffer blocks, descriptors 3059 * and mailbox. 3060 */ 3061 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3062 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3063 /* 3064 * Receive buffer blocks 3065 */ 3066 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3067 dma_buf_p, rbr_p, num_chunks); 3068 if (status != NXGE_OK) { 3069 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3070 "==> nxge_map_rxdma_channel (channel %d): " 3071 "map buffer failed 0x%x", channel, status)); 3072 goto nxge_map_rxdma_channel_exit; 3073 } 3074 3075 /* 3076 * Receive block ring, completion ring and mailbox. 3077 */ 3078 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3079 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3080 if (status != NXGE_OK) { 3081 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3082 "==> nxge_map_rxdma_channel (channel %d): " 3083 "map config failed 0x%x", channel, status)); 3084 goto nxge_map_rxdma_channel_fail2; 3085 } 3086 3087 goto nxge_map_rxdma_channel_exit; 3088 3089 nxge_map_rxdma_channel_fail3: 3090 /* Free rbr, rcr */ 3091 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3092 "==> nxge_map_rxdma_channel: free rbr/rcr " 3093 "(status 0x%x channel %d)", 3094 status, channel)); 3095 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3096 *rcr_p, *rx_mbox_p); 3097 3098 nxge_map_rxdma_channel_fail2: 3099 /* Free buffer blocks */ 3100 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3101 "==> nxge_map_rxdma_channel: free rx buffers" 3102 "(nxgep 0x%x status 0x%x channel %d)", 3103 nxgep, status, channel)); 3104 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3105 3106 status = NXGE_ERROR; 3107 3108 nxge_map_rxdma_channel_exit: 3109 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3110 "<== nxge_map_rxdma_channel: " 3111 "(nxgep 0x%x status 0x%x channel %d)", 3112 nxgep, status, channel)); 3113 3114 return (status); 3115 } 3116 3117 /*ARGSUSED*/ 3118 static void 3119 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3120 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3121 { 3122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3123 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3124 3125 /* 3126 * unmap receive block ring, completion ring and mailbox. 3127 */ 3128 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3129 rcr_p, rx_mbox_p); 3130 3131 /* unmap buffer blocks */ 3132 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3133 3134 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3135 } 3136 3137 /*ARGSUSED*/ 3138 static nxge_status_t 3139 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3140 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3141 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3142 { 3143 p_rx_rbr_ring_t rbrp; 3144 p_rx_rcr_ring_t rcrp; 3145 p_rx_mbox_t mboxp; 3146 p_nxge_dma_common_t cntl_dmap; 3147 p_nxge_dma_common_t dmap; 3148 p_rx_msg_t *rx_msg_ring; 3149 p_rx_msg_t rx_msg_p; 3150 p_rbr_cfig_a_t rcfga_p; 3151 p_rbr_cfig_b_t rcfgb_p; 3152 p_rcrcfig_a_t cfga_p; 3153 p_rcrcfig_b_t cfgb_p; 3154 p_rxdma_cfig1_t cfig1_p; 3155 p_rxdma_cfig2_t cfig2_p; 3156 p_rbr_kick_t kick_p; 3157 uint32_t dmaaddrp; 3158 uint32_t *rbr_vaddrp; 3159 uint32_t bkaddr; 3160 nxge_status_t status = NXGE_OK; 3161 int i; 3162 uint32_t nxge_port_rcr_size; 3163 3164 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3165 "==> nxge_map_rxdma_channel_cfg_ring")); 3166 3167 cntl_dmap = *dma_cntl_p; 3168 3169 /* Map in the receive block ring */ 3170 rbrp = *rbr_p; 3171 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3172 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3173 /* 3174 * Zero out buffer block ring descriptors. 3175 */ 3176 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3177 3178 rcfga_p = &(rbrp->rbr_cfga); 3179 rcfgb_p = &(rbrp->rbr_cfgb); 3180 kick_p = &(rbrp->rbr_kick); 3181 rcfga_p->value = 0; 3182 rcfgb_p->value = 0; 3183 kick_p->value = 0; 3184 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3185 rcfga_p->value = (rbrp->rbr_addr & 3186 (RBR_CFIG_A_STDADDR_MASK | 3187 RBR_CFIG_A_STDADDR_BASE_MASK)); 3188 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3189 3190 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3191 rcfgb_p->bits.ldw.vld0 = 1; 3192 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3193 rcfgb_p->bits.ldw.vld1 = 1; 3194 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3195 rcfgb_p->bits.ldw.vld2 = 1; 3196 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3197 3198 /* 3199 * For each buffer block, enter receive block address to the ring. 3200 */ 3201 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3202 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3203 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3204 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3205 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3206 3207 rx_msg_ring = rbrp->rx_msg_ring; 3208 for (i = 0; i < rbrp->tnblocks; i++) { 3209 rx_msg_p = rx_msg_ring[i]; 3210 rx_msg_p->nxgep = nxgep; 3211 rx_msg_p->rx_rbr_p = rbrp; 3212 bkaddr = (uint32_t) 3213 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3214 >> RBR_BKADDR_SHIFT)); 3215 rx_msg_p->free = B_FALSE; 3216 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3217 3218 *rbr_vaddrp++ = bkaddr; 3219 } 3220 3221 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3222 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3223 3224 rbrp->rbr_rd_index = 0; 3225 3226 rbrp->rbr_consumed = 0; 3227 rbrp->rbr_use_bcopy = B_TRUE; 3228 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3229 /* 3230 * Do bcopy on packets greater than bcopy size once 3231 * the lo threshold is reached. 3232 * This lo threshold should be less than the hi threshold. 3233 * 3234 * Do bcopy on every packet once the hi threshold is reached. 3235 */ 3236 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3237 /* default it to use hi */ 3238 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3239 } 3240 3241 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3242 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3243 } 3244 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3245 3246 switch (nxge_rx_threshold_hi) { 3247 default: 3248 case NXGE_RX_COPY_NONE: 3249 /* Do not do bcopy at all */ 3250 rbrp->rbr_use_bcopy = B_FALSE; 3251 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3252 break; 3253 3254 case NXGE_RX_COPY_1: 3255 case NXGE_RX_COPY_2: 3256 case NXGE_RX_COPY_3: 3257 case NXGE_RX_COPY_4: 3258 case NXGE_RX_COPY_5: 3259 case NXGE_RX_COPY_6: 3260 case NXGE_RX_COPY_7: 3261 rbrp->rbr_threshold_hi = 3262 rbrp->rbb_max * 3263 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3264 break; 3265 3266 case NXGE_RX_COPY_ALL: 3267 rbrp->rbr_threshold_hi = 0; 3268 break; 3269 } 3270 3271 switch (nxge_rx_threshold_lo) { 3272 default: 3273 case NXGE_RX_COPY_NONE: 3274 /* Do not do bcopy at all */ 3275 if (rbrp->rbr_use_bcopy) { 3276 rbrp->rbr_use_bcopy = B_FALSE; 3277 } 3278 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3279 break; 3280 3281 case NXGE_RX_COPY_1: 3282 case NXGE_RX_COPY_2: 3283 case NXGE_RX_COPY_3: 3284 case NXGE_RX_COPY_4: 3285 case NXGE_RX_COPY_5: 3286 case NXGE_RX_COPY_6: 3287 case NXGE_RX_COPY_7: 3288 rbrp->rbr_threshold_lo = 3289 rbrp->rbb_max * 3290 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3291 break; 3292 3293 case NXGE_RX_COPY_ALL: 3294 rbrp->rbr_threshold_lo = 0; 3295 break; 3296 } 3297 3298 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3299 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3300 "rbb_max %d " 3301 "rbrp->rbr_bufsize_type %d " 3302 "rbb_threshold_hi %d " 3303 "rbb_threshold_lo %d", 3304 dma_channel, 3305 rbrp->rbb_max, 3306 rbrp->rbr_bufsize_type, 3307 rbrp->rbr_threshold_hi, 3308 rbrp->rbr_threshold_lo)); 3309 3310 rbrp->page_valid.value = 0; 3311 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3312 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3313 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3314 rbrp->page_hdl.value = 0; 3315 3316 rbrp->page_valid.bits.ldw.page0 = 1; 3317 rbrp->page_valid.bits.ldw.page1 = 1; 3318 3319 /* Map in the receive completion ring */ 3320 rcrp = (p_rx_rcr_ring_t) 3321 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3322 rcrp->rdc = dma_channel; 3323 3324 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3325 rcrp->comp_size = nxge_port_rcr_size; 3326 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3327 3328 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3329 3330 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3331 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3332 sizeof (rcr_entry_t)); 3333 rcrp->comp_rd_index = 0; 3334 rcrp->comp_wt_index = 0; 3335 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3336 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3337 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3338 #if defined(__i386) 3339 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3340 #else 3341 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3342 #endif 3343 3344 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3345 (nxge_port_rcr_size - 1); 3346 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3347 (nxge_port_rcr_size - 1); 3348 3349 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3350 "==> nxge_map_rxdma_channel_cfg_ring: " 3351 "channel %d " 3352 "rbr_vaddrp $%p " 3353 "rcr_desc_rd_head_p $%p " 3354 "rcr_desc_rd_head_pp $%p " 3355 "rcr_desc_rd_last_p $%p " 3356 "rcr_desc_rd_last_pp $%p ", 3357 dma_channel, 3358 rbr_vaddrp, 3359 rcrp->rcr_desc_rd_head_p, 3360 rcrp->rcr_desc_rd_head_pp, 3361 rcrp->rcr_desc_last_p, 3362 rcrp->rcr_desc_last_pp)); 3363 3364 /* 3365 * Zero out buffer block ring descriptors. 3366 */ 3367 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3368 rcrp->intr_timeout = nxgep->intr_timeout; 3369 rcrp->intr_threshold = nxgep->intr_threshold; 3370 rcrp->full_hdr_flag = B_FALSE; 3371 rcrp->sw_priv_hdr_len = 0; 3372 3373 cfga_p = &(rcrp->rcr_cfga); 3374 cfgb_p = &(rcrp->rcr_cfgb); 3375 cfga_p->value = 0; 3376 cfgb_p->value = 0; 3377 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3378 cfga_p->value = (rcrp->rcr_addr & 3379 (RCRCFIG_A_STADDR_MASK | 3380 RCRCFIG_A_STADDR_BASE_MASK)); 3381 3382 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3383 RCRCFIG_A_LEN_SHIF); 3384 3385 /* 3386 * Timeout should be set based on the system clock divider. 3387 * The following timeout value of 1 assumes that the 3388 * granularity (1000) is 3 microseconds running at 300MHz. 3389 */ 3390 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3391 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3392 cfgb_p->bits.ldw.entout = 1; 3393 3394 /* Map in the mailbox */ 3395 mboxp = (p_rx_mbox_t) 3396 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3397 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3398 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3399 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3400 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3401 cfig1_p->value = cfig2_p->value = 0; 3402 3403 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3404 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3405 "==> nxge_map_rxdma_channel_cfg_ring: " 3406 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3407 dma_channel, cfig1_p->value, cfig2_p->value, 3408 mboxp->mbox_addr)); 3409 3410 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3411 & 0xfff); 3412 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3413 3414 3415 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3416 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3417 RXDMA_CFIG2_MBADDR_L_MASK); 3418 3419 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3420 3421 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3422 "==> nxge_map_rxdma_channel_cfg_ring: " 3423 "channel %d damaddrp $%p " 3424 "cfg1 0x%016llx cfig2 0x%016llx", 3425 dma_channel, dmaaddrp, 3426 cfig1_p->value, cfig2_p->value)); 3427 3428 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3429 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3430 3431 rbrp->rx_rcr_p = rcrp; 3432 rcrp->rx_rbr_p = rbrp; 3433 *rcr_p = rcrp; 3434 *rx_mbox_p = mboxp; 3435 3436 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3437 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3438 3439 return (status); 3440 } 3441 3442 /*ARGSUSED*/ 3443 static void 3444 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3445 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3446 { 3447 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3448 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3449 rcr_p->rdc)); 3450 3451 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3452 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3453 3454 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3455 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3456 } 3457 3458 static nxge_status_t 3459 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3460 p_nxge_dma_common_t *dma_buf_p, 3461 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3462 { 3463 p_rx_rbr_ring_t rbrp; 3464 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3465 p_rx_msg_t *rx_msg_ring; 3466 p_rx_msg_t rx_msg_p; 3467 p_mblk_t mblk_p; 3468 3469 rxring_info_t *ring_info; 3470 nxge_status_t status = NXGE_OK; 3471 int i, j, index; 3472 uint32_t size, bsize, nblocks, nmsgs; 3473 3474 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3475 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3476 channel)); 3477 3478 dma_bufp = tmp_bufp = *dma_buf_p; 3479 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3480 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3481 "chunks bufp 0x%016llx", 3482 channel, num_chunks, dma_bufp)); 3483 3484 nmsgs = 0; 3485 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3486 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3487 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3488 "bufp 0x%016llx nblocks %d nmsgs %d", 3489 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3490 nmsgs += tmp_bufp->nblocks; 3491 } 3492 if (!nmsgs) { 3493 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3494 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3495 "no msg blocks", 3496 channel)); 3497 status = NXGE_ERROR; 3498 goto nxge_map_rxdma_channel_buf_ring_exit; 3499 } 3500 3501 rbrp = (p_rx_rbr_ring_t) 3502 KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 3503 3504 size = nmsgs * sizeof (p_rx_msg_t); 3505 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3506 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3507 KM_SLEEP); 3508 3509 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3510 (void *)nxgep->interrupt_cookie); 3511 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3512 (void *)nxgep->interrupt_cookie); 3513 rbrp->rdc = channel; 3514 rbrp->num_blocks = num_chunks; 3515 rbrp->tnblocks = nmsgs; 3516 rbrp->rbb_max = nmsgs; 3517 rbrp->rbr_max_size = nmsgs; 3518 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3519 3520 /* 3521 * Buffer sizes suggested by NIU architect. 3522 * 256, 512 and 2K. 3523 */ 3524 3525 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3526 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3527 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3528 3529 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3530 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3531 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3532 3533 rbrp->block_size = nxgep->rx_default_block_size; 3534 3535 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3536 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3537 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3538 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3539 } else { 3540 if (rbrp->block_size >= 0x2000) { 3541 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3542 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3543 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3544 } else { 3545 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3546 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3547 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3548 } 3549 } 3550 3551 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3552 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3553 "actual rbr max %d rbb_max %d nmsgs %d " 3554 "rbrp->block_size %d default_block_size %d " 3555 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3556 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3557 rbrp->block_size, nxgep->rx_default_block_size, 3558 nxge_rbr_size, nxge_rbr_spare_size)); 3559 3560 /* Map in buffers from the buffer pool. */ 3561 index = 0; 3562 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3563 bsize = dma_bufp->block_size; 3564 nblocks = dma_bufp->nblocks; 3565 #if defined(__i386) 3566 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3567 #else 3568 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3569 #endif 3570 ring_info->buffer[i].buf_index = i; 3571 ring_info->buffer[i].buf_size = dma_bufp->alength; 3572 ring_info->buffer[i].start_index = index; 3573 #if defined(__i386) 3574 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3575 #else 3576 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3577 #endif 3578 3579 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3580 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3581 "chunk %d" 3582 " nblocks %d chunk_size %x block_size 0x%x " 3583 "dma_bufp $%p", channel, i, 3584 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3585 dma_bufp)); 3586 3587 for (j = 0; j < nblocks; j++) { 3588 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3589 dma_bufp)) == NULL) { 3590 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3591 "allocb failed (index %d i %d j %d)", 3592 index, i, j)); 3593 goto nxge_map_rxdma_channel_buf_ring_fail1; 3594 } 3595 rx_msg_ring[index] = rx_msg_p; 3596 rx_msg_p->block_index = index; 3597 rx_msg_p->shifted_addr = (uint32_t) 3598 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3599 RBR_BKADDR_SHIFT)); 3600 3601 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3602 "index %d j %d rx_msg_p $%p mblk %p", 3603 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3604 3605 mblk_p = rx_msg_p->rx_mblk_p; 3606 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3607 index++; 3608 rx_msg_p->buf_dma.dma_channel = channel; 3609 } 3610 } 3611 if (i < rbrp->num_blocks) { 3612 goto nxge_map_rxdma_channel_buf_ring_fail1; 3613 } 3614 3615 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3616 "nxge_map_rxdma_channel_buf_ring: done buf init " 3617 "channel %d msg block entries %d", 3618 channel, index)); 3619 ring_info->block_size_mask = bsize - 1; 3620 rbrp->rx_msg_ring = rx_msg_ring; 3621 rbrp->dma_bufp = dma_buf_p; 3622 rbrp->ring_info = ring_info; 3623 3624 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3625 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3626 " nxge_map_rxdma_channel_buf_ring: " 3627 "channel %d done buf info init", channel)); 3628 3629 *rbr_p = rbrp; 3630 goto nxge_map_rxdma_channel_buf_ring_exit; 3631 3632 nxge_map_rxdma_channel_buf_ring_fail1: 3633 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3634 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3635 channel, status)); 3636 3637 index--; 3638 for (; index >= 0; index--) { 3639 rx_msg_p = rx_msg_ring[index]; 3640 if (rx_msg_p != NULL) { 3641 freeb(rx_msg_p->rx_mblk_p); 3642 rx_msg_ring[index] = NULL; 3643 } 3644 } 3645 nxge_map_rxdma_channel_buf_ring_fail: 3646 MUTEX_DESTROY(&rbrp->post_lock); 3647 MUTEX_DESTROY(&rbrp->lock); 3648 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3649 KMEM_FREE(rx_msg_ring, size); 3650 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3651 3652 status = NXGE_ERROR; 3653 3654 nxge_map_rxdma_channel_buf_ring_exit: 3655 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3656 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3657 3658 return (status); 3659 } 3660 3661 /*ARGSUSED*/ 3662 static void 3663 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3664 p_rx_rbr_ring_t rbr_p) 3665 { 3666 p_rx_msg_t *rx_msg_ring; 3667 p_rx_msg_t rx_msg_p; 3668 rxring_info_t *ring_info; 3669 int i; 3670 uint32_t size; 3671 #ifdef NXGE_DEBUG 3672 int num_chunks; 3673 #endif 3674 3675 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3676 "==> nxge_unmap_rxdma_channel_buf_ring")); 3677 if (rbr_p == NULL) { 3678 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3679 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3680 return; 3681 } 3682 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3683 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3684 rbr_p->rdc)); 3685 3686 rx_msg_ring = rbr_p->rx_msg_ring; 3687 ring_info = rbr_p->ring_info; 3688 3689 if (rx_msg_ring == NULL || ring_info == NULL) { 3690 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3691 "<== nxge_unmap_rxdma_channel_buf_ring: " 3692 "rx_msg_ring $%p ring_info $%p", 3693 rx_msg_p, ring_info)); 3694 return; 3695 } 3696 3697 #ifdef NXGE_DEBUG 3698 num_chunks = rbr_p->num_blocks; 3699 #endif 3700 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3701 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3702 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3703 "tnblocks %d (max %d) size ptrs %d ", 3704 rbr_p->rdc, num_chunks, 3705 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3706 3707 for (i = 0; i < rbr_p->tnblocks; i++) { 3708 rx_msg_p = rx_msg_ring[i]; 3709 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3710 " nxge_unmap_rxdma_channel_buf_ring: " 3711 "rx_msg_p $%p", 3712 rx_msg_p)); 3713 if (rx_msg_p != NULL) { 3714 freeb(rx_msg_p->rx_mblk_p); 3715 rx_msg_ring[i] = NULL; 3716 } 3717 } 3718 3719 MUTEX_DESTROY(&rbr_p->post_lock); 3720 MUTEX_DESTROY(&rbr_p->lock); 3721 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3722 KMEM_FREE(rx_msg_ring, size); 3723 KMEM_FREE(rbr_p, sizeof (rx_rbr_ring_t)); 3724 3725 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3726 "<== nxge_unmap_rxdma_channel_buf_ring")); 3727 } 3728 3729 static nxge_status_t 3730 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3731 { 3732 nxge_status_t status = NXGE_OK; 3733 3734 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3735 3736 /* 3737 * Load the sharable parameters by writing to the 3738 * function zero control registers. These FZC registers 3739 * should be initialized only once for the entire chip. 3740 */ 3741 (void) nxge_init_fzc_rx_common(nxgep); 3742 3743 /* 3744 * Initialize the RXDMA port specific FZC control configurations. 3745 * These FZC registers are pertaining to each port. 3746 */ 3747 (void) nxge_init_fzc_rxdma_port(nxgep); 3748 3749 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3750 3751 return (status); 3752 } 3753 3754 /*ARGSUSED*/ 3755 static void 3756 nxge_rxdma_hw_stop_common(p_nxge_t nxgep) 3757 { 3758 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3759 3760 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3761 } 3762 3763 static nxge_status_t 3764 nxge_rxdma_hw_start(p_nxge_t nxgep) 3765 { 3766 int i, ndmas; 3767 uint16_t channel; 3768 p_rx_rbr_rings_t rx_rbr_rings; 3769 p_rx_rbr_ring_t *rbr_rings; 3770 p_rx_rcr_rings_t rx_rcr_rings; 3771 p_rx_rcr_ring_t *rcr_rings; 3772 p_rx_mbox_areas_t rx_mbox_areas_p; 3773 p_rx_mbox_t *rx_mbox_p; 3774 nxge_status_t status = NXGE_OK; 3775 3776 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3777 3778 rx_rbr_rings = nxgep->rx_rbr_rings; 3779 rx_rcr_rings = nxgep->rx_rcr_rings; 3780 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3781 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3782 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3783 return (NXGE_ERROR); 3784 } 3785 ndmas = rx_rbr_rings->ndmas; 3786 if (ndmas == 0) { 3787 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3788 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3789 return (NXGE_ERROR); 3790 } 3791 3792 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3793 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3794 3795 rbr_rings = rx_rbr_rings->rbr_rings; 3796 rcr_rings = rx_rcr_rings->rcr_rings; 3797 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3798 if (rx_mbox_areas_p) { 3799 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3800 } 3801 3802 for (i = 0; i < ndmas; i++) { 3803 channel = rbr_rings[i]->rdc; 3804 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3805 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3806 ndmas, channel)); 3807 status = nxge_rxdma_start_channel(nxgep, channel, 3808 (p_rx_rbr_ring_t)rbr_rings[i], 3809 (p_rx_rcr_ring_t)rcr_rings[i], 3810 (p_rx_mbox_t)rx_mbox_p[i]); 3811 if (status != NXGE_OK) { 3812 goto nxge_rxdma_hw_start_fail1; 3813 } 3814 } 3815 3816 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3817 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3818 rx_rbr_rings, rx_rcr_rings)); 3819 3820 goto nxge_rxdma_hw_start_exit; 3821 3822 nxge_rxdma_hw_start_fail1: 3823 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3824 "==> nxge_rxdma_hw_start: disable " 3825 "(status 0x%x channel %d i %d)", status, channel, i)); 3826 for (; i >= 0; i--) { 3827 channel = rbr_rings[i]->rdc; 3828 (void) nxge_rxdma_stop_channel(nxgep, channel); 3829 } 3830 3831 nxge_rxdma_hw_start_exit: 3832 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3833 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3834 3835 return (status); 3836 } 3837 3838 static void 3839 nxge_rxdma_hw_stop(p_nxge_t nxgep) 3840 { 3841 int i, ndmas; 3842 uint16_t channel; 3843 p_rx_rbr_rings_t rx_rbr_rings; 3844 p_rx_rbr_ring_t *rbr_rings; 3845 p_rx_rcr_rings_t rx_rcr_rings; 3846 3847 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3848 3849 rx_rbr_rings = nxgep->rx_rbr_rings; 3850 rx_rcr_rings = nxgep->rx_rcr_rings; 3851 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3852 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3853 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3854 return; 3855 } 3856 ndmas = rx_rbr_rings->ndmas; 3857 if (!ndmas) { 3858 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3859 "<== nxge_rxdma_hw_stop: no dma channel allocated")); 3860 return; 3861 } 3862 3863 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3864 "==> nxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3865 3866 rbr_rings = rx_rbr_rings->rbr_rings; 3867 3868 for (i = 0; i < ndmas; i++) { 3869 channel = rbr_rings[i]->rdc; 3870 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3871 "==> nxge_rxdma_hw_stop (ndmas %d) channel %d", 3872 ndmas, channel)); 3873 (void) nxge_rxdma_stop_channel(nxgep, channel); 3874 } 3875 3876 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3877 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3878 rx_rbr_rings, rx_rcr_rings)); 3879 3880 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3881 } 3882 3883 3884 static nxge_status_t 3885 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3886 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3887 3888 { 3889 npi_handle_t handle; 3890 npi_status_t rs = NPI_SUCCESS; 3891 rx_dma_ctl_stat_t cs; 3892 rx_dma_ent_msk_t ent_mask; 3893 nxge_status_t status = NXGE_OK; 3894 3895 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3896 3897 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3898 3899 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3900 "npi handle addr $%p acc $%p", 3901 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 3902 3903 /* Reset RXDMA channel */ 3904 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3905 if (rs != NPI_SUCCESS) { 3906 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3907 "==> nxge_rxdma_start_channel: " 3908 "reset rxdma failed (0x%08x channel %d)", 3909 status, channel)); 3910 return (NXGE_ERROR | rs); 3911 } 3912 3913 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3914 "==> nxge_rxdma_start_channel: reset done: channel %d", 3915 channel)); 3916 3917 /* 3918 * Initialize the RXDMA channel specific FZC control 3919 * configurations. These FZC registers are pertaining 3920 * to each RX channel (logical pages). 3921 */ 3922 status = nxge_init_fzc_rxdma_channel(nxgep, 3923 channel, rbr_p, rcr_p, mbox_p); 3924 if (status != NXGE_OK) { 3925 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3926 "==> nxge_rxdma_start_channel: " 3927 "init fzc rxdma failed (0x%08x channel %d)", 3928 status, channel)); 3929 return (status); 3930 } 3931 3932 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3933 "==> nxge_rxdma_start_channel: fzc done")); 3934 3935 /* 3936 * Zero out the shadow and prefetch ram. 3937 */ 3938 3939 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3940 "ram done")); 3941 3942 /* Set up the interrupt event masks. */ 3943 ent_mask.value = 0; 3944 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 3945 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 3946 &ent_mask); 3947 if (rs != NPI_SUCCESS) { 3948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3949 "==> nxge_rxdma_start_channel: " 3950 "init rxdma event masks failed (0x%08x channel %d)", 3951 status, channel)); 3952 return (NXGE_ERROR | rs); 3953 } 3954 3955 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3956 "event done: channel %d (mask 0x%016llx)", 3957 channel, ent_mask.value)); 3958 3959 /* Initialize the receive DMA control and status register */ 3960 cs.value = 0; 3961 cs.bits.hdw.mex = 1; 3962 cs.bits.hdw.rcrthres = 1; 3963 cs.bits.hdw.rcrto = 1; 3964 cs.bits.hdw.rbr_empty = 1; 3965 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 3966 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3967 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3968 if (status != NXGE_OK) { 3969 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3970 "==> nxge_rxdma_start_channel: " 3971 "init rxdma control register failed (0x%08x channel %d", 3972 status, channel)); 3973 return (status); 3974 } 3975 3976 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3977 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3978 3979 /* 3980 * Load RXDMA descriptors, buffers, mailbox, 3981 * initialise the receive DMA channels and 3982 * enable each DMA channel. 3983 */ 3984 status = nxge_enable_rxdma_channel(nxgep, 3985 channel, rbr_p, rcr_p, mbox_p); 3986 3987 if (status != NXGE_OK) { 3988 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3989 " nxge_rxdma_start_channel: " 3990 " init enable rxdma failed (0x%08x channel %d)", 3991 status, channel)); 3992 return (status); 3993 } 3994 3995 ent_mask.value = 0; 3996 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 3997 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 3998 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 3999 &ent_mask); 4000 if (rs != NPI_SUCCESS) { 4001 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4002 "==> nxge_rxdma_start_channel: " 4003 "init rxdma event masks failed (0x%08x channel %d)", 4004 status, channel)); 4005 return (NXGE_ERROR | rs); 4006 } 4007 4008 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 4009 "control done - channel %d cs 0x%016llx", channel, cs.value)); 4010 4011 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4012 "==> nxge_rxdma_start_channel: enable done")); 4013 4014 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 4015 4016 return (NXGE_OK); 4017 } 4018 4019 static nxge_status_t 4020 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4021 { 4022 npi_handle_t handle; 4023 npi_status_t rs = NPI_SUCCESS; 4024 rx_dma_ctl_stat_t cs; 4025 rx_dma_ent_msk_t ent_mask; 4026 nxge_status_t status = NXGE_OK; 4027 4028 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4029 4030 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4031 4032 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4033 "npi handle addr $%p acc $%p", 4034 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4035 4036 /* Reset RXDMA channel */ 4037 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4038 if (rs != NPI_SUCCESS) { 4039 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4040 " nxge_rxdma_stop_channel: " 4041 " reset rxdma failed (0x%08x channel %d)", 4042 rs, channel)); 4043 return (NXGE_ERROR | rs); 4044 } 4045 4046 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4047 "==> nxge_rxdma_stop_channel: reset done")); 4048 4049 /* Set up the interrupt event masks. */ 4050 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4051 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4052 &ent_mask); 4053 if (rs != NPI_SUCCESS) { 4054 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4055 "==> nxge_rxdma_stop_channel: " 4056 "set rxdma event masks failed (0x%08x channel %d)", 4057 rs, channel)); 4058 return (NXGE_ERROR | rs); 4059 } 4060 4061 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4062 "==> nxge_rxdma_stop_channel: event done")); 4063 4064 /* Initialize the receive DMA control and status register */ 4065 cs.value = 0; 4066 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4067 &cs); 4068 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4069 " to default (all 0s) 0x%08x", cs.value)); 4070 if (status != NXGE_OK) { 4071 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4072 " nxge_rxdma_stop_channel: init rxdma" 4073 " control register failed (0x%08x channel %d", 4074 status, channel)); 4075 return (status); 4076 } 4077 4078 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4079 "==> nxge_rxdma_stop_channel: control done")); 4080 4081 /* disable dma channel */ 4082 status = nxge_disable_rxdma_channel(nxgep, channel); 4083 4084 if (status != NXGE_OK) { 4085 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4086 " nxge_rxdma_stop_channel: " 4087 " init enable rxdma failed (0x%08x channel %d)", 4088 status, channel)); 4089 return (status); 4090 } 4091 4092 NXGE_DEBUG_MSG((nxgep, 4093 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4094 4095 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4096 4097 return (NXGE_OK); 4098 } 4099 4100 nxge_status_t 4101 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4102 { 4103 npi_handle_t handle; 4104 p_nxge_rdc_sys_stats_t statsp; 4105 rx_ctl_dat_fifo_stat_t stat; 4106 uint32_t zcp_err_status; 4107 uint32_t ipp_err_status; 4108 nxge_status_t status = NXGE_OK; 4109 npi_status_t rs = NPI_SUCCESS; 4110 boolean_t my_err = B_FALSE; 4111 4112 handle = nxgep->npi_handle; 4113 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4114 4115 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4116 4117 if (rs != NPI_SUCCESS) 4118 return (NXGE_ERROR | rs); 4119 4120 if (stat.bits.ldw.id_mismatch) { 4121 statsp->id_mismatch++; 4122 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4123 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4124 /* Global fatal error encountered */ 4125 } 4126 4127 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4128 switch (nxgep->mac.portnum) { 4129 case 0: 4130 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4131 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4132 my_err = B_TRUE; 4133 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4134 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4135 } 4136 break; 4137 case 1: 4138 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4139 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4140 my_err = B_TRUE; 4141 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4142 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4143 } 4144 break; 4145 case 2: 4146 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4147 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4148 my_err = B_TRUE; 4149 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4150 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4151 } 4152 break; 4153 case 3: 4154 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4155 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4156 my_err = B_TRUE; 4157 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4158 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4159 } 4160 break; 4161 default: 4162 return (NXGE_ERROR); 4163 } 4164 } 4165 4166 if (my_err) { 4167 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4168 zcp_err_status); 4169 if (status != NXGE_OK) 4170 return (status); 4171 } 4172 4173 return (NXGE_OK); 4174 } 4175 4176 static nxge_status_t 4177 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4178 uint32_t zcp_status) 4179 { 4180 boolean_t rxport_fatal = B_FALSE; 4181 p_nxge_rdc_sys_stats_t statsp; 4182 nxge_status_t status = NXGE_OK; 4183 uint8_t portn; 4184 4185 portn = nxgep->mac.portnum; 4186 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4187 4188 if (ipp_status & (0x1 << portn)) { 4189 statsp->ipp_eop_err++; 4190 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4191 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4192 rxport_fatal = B_TRUE; 4193 } 4194 4195 if (zcp_status & (0x1 << portn)) { 4196 statsp->zcp_eop_err++; 4197 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4198 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4199 rxport_fatal = B_TRUE; 4200 } 4201 4202 if (rxport_fatal) { 4203 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4204 " nxge_rxdma_handle_port_error: " 4205 " fatal error on Port #%d\n", 4206 portn)); 4207 status = nxge_rx_port_fatal_err_recover(nxgep); 4208 if (status == NXGE_OK) { 4209 FM_SERVICE_RESTORED(nxgep); 4210 } 4211 } 4212 4213 return (status); 4214 } 4215 4216 static nxge_status_t 4217 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4218 { 4219 npi_handle_t handle; 4220 npi_status_t rs = NPI_SUCCESS; 4221 nxge_status_t status = NXGE_OK; 4222 p_rx_rbr_ring_t rbrp; 4223 p_rx_rcr_ring_t rcrp; 4224 p_rx_mbox_t mboxp; 4225 rx_dma_ent_msk_t ent_mask; 4226 p_nxge_dma_common_t dmap; 4227 int ring_idx; 4228 uint32_t ref_cnt; 4229 p_rx_msg_t rx_msg_p; 4230 int i; 4231 uint32_t nxge_port_rcr_size; 4232 4233 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4234 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4235 "Recovering from RxDMAChannel#%d error...", channel)); 4236 4237 /* 4238 * Stop the dma channel waits for the stop done. 4239 * If the stop done bit is not set, then create 4240 * an error. 4241 */ 4242 4243 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4244 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4245 4246 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4247 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4248 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4249 4250 MUTEX_ENTER(&rcrp->lock); 4251 MUTEX_ENTER(&rbrp->lock); 4252 MUTEX_ENTER(&rbrp->post_lock); 4253 4254 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4255 4256 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4257 if (rs != NPI_SUCCESS) { 4258 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4259 "nxge_disable_rxdma_channel:failed")); 4260 goto fail; 4261 } 4262 4263 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4264 4265 /* Disable interrupt */ 4266 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4267 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4268 if (rs != NPI_SUCCESS) { 4269 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4270 "nxge_rxdma_stop_channel: " 4271 "set rxdma event masks failed (channel %d)", 4272 channel)); 4273 } 4274 4275 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4276 4277 /* Reset RXDMA channel */ 4278 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4279 if (rs != NPI_SUCCESS) { 4280 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4281 "nxge_rxdma_fatal_err_recover: " 4282 " reset rxdma failed (channel %d)", channel)); 4283 goto fail; 4284 } 4285 4286 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4287 4288 mboxp = 4289 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4290 4291 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4292 rbrp->rbr_rd_index = 0; 4293 4294 rcrp->comp_rd_index = 0; 4295 rcrp->comp_wt_index = 0; 4296 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4297 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4298 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4299 #if defined(__i386) 4300 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4301 #else 4302 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4303 #endif 4304 4305 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4306 (nxge_port_rcr_size - 1); 4307 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4308 (nxge_port_rcr_size - 1); 4309 4310 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4311 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4312 4313 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4314 4315 for (i = 0; i < rbrp->rbr_max_size; i++) { 4316 rx_msg_p = rbrp->rx_msg_ring[i]; 4317 ref_cnt = rx_msg_p->ref_cnt; 4318 if (ref_cnt != 1) { 4319 if (rx_msg_p->cur_usage_cnt != 4320 rx_msg_p->max_usage_cnt) { 4321 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4322 "buf[%d]: cur_usage_cnt = %d " 4323 "max_usage_cnt = %d\n", i, 4324 rx_msg_p->cur_usage_cnt, 4325 rx_msg_p->max_usage_cnt)); 4326 } else { 4327 /* Buffer can be re-posted */ 4328 rx_msg_p->free = B_TRUE; 4329 rx_msg_p->cur_usage_cnt = 0; 4330 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4331 rx_msg_p->pkt_buf_size = 0; 4332 } 4333 } 4334 } 4335 4336 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4337 4338 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4339 if (status != NXGE_OK) { 4340 goto fail; 4341 } 4342 4343 MUTEX_EXIT(&rbrp->post_lock); 4344 MUTEX_EXIT(&rbrp->lock); 4345 MUTEX_EXIT(&rcrp->lock); 4346 4347 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4348 "Recovery Successful, RxDMAChannel#%d Restored", 4349 channel)); 4350 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4351 4352 return (NXGE_OK); 4353 fail: 4354 MUTEX_EXIT(&rbrp->post_lock); 4355 MUTEX_EXIT(&rbrp->lock); 4356 MUTEX_EXIT(&rcrp->lock); 4357 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4358 4359 return (NXGE_ERROR | rs); 4360 } 4361 4362 nxge_status_t 4363 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4364 { 4365 nxge_status_t status = NXGE_OK; 4366 p_nxge_dma_common_t *dma_buf_p; 4367 uint16_t channel; 4368 int ndmas; 4369 int i; 4370 4371 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4372 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4373 "Recovering from RxPort error...")); 4374 /* Disable RxMAC */ 4375 4376 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n")); 4377 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4378 goto fail; 4379 4380 NXGE_DELAY(1000); 4381 4382 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels...")); 4383 4384 ndmas = nxgep->rx_buf_pool_p->ndmas; 4385 dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p; 4386 4387 for (i = 0; i < ndmas; i++) { 4388 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 4389 if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) { 4390 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4391 "Could not recover channel %d", 4392 channel)); 4393 } 4394 } 4395 4396 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP...")); 4397 4398 /* Reset IPP */ 4399 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4400 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4401 "nxge_rx_port_fatal_err_recover: " 4402 "Failed to reset IPP")); 4403 goto fail; 4404 } 4405 4406 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4407 4408 /* Reset RxMAC */ 4409 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4410 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4411 "nxge_rx_port_fatal_err_recover: " 4412 "Failed to reset RxMAC")); 4413 goto fail; 4414 } 4415 4416 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4417 4418 /* Re-Initialize IPP */ 4419 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4420 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4421 "nxge_rx_port_fatal_err_recover: " 4422 "Failed to init IPP")); 4423 goto fail; 4424 } 4425 4426 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4427 4428 /* Re-Initialize RxMAC */ 4429 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4430 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4431 "nxge_rx_port_fatal_err_recover: " 4432 "Failed to reset RxMAC")); 4433 goto fail; 4434 } 4435 4436 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4437 4438 /* Re-enable RxMAC */ 4439 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4440 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4441 "nxge_rx_port_fatal_err_recover: " 4442 "Failed to enable RxMAC")); 4443 goto fail; 4444 } 4445 4446 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4447 "Recovery Successful, RxPort Restored")); 4448 4449 return (NXGE_OK); 4450 fail: 4451 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4452 return (status); 4453 } 4454 4455 void 4456 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4457 { 4458 rx_dma_ctl_stat_t cs; 4459 rx_ctl_dat_fifo_stat_t cdfs; 4460 4461 switch (err_id) { 4462 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4463 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4464 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4465 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4466 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4467 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4468 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4469 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4470 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4471 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4472 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4473 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4474 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4475 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4476 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4477 chan, &cs.value); 4478 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4479 cs.bits.hdw.rcr_ack_err = 1; 4480 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4481 cs.bits.hdw.dc_fifo_err = 1; 4482 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4483 cs.bits.hdw.rcr_sha_par = 1; 4484 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4485 cs.bits.hdw.rbr_pre_par = 1; 4486 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4487 cs.bits.hdw.rbr_tmout = 1; 4488 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4489 cs.bits.hdw.rsp_cnt_err = 1; 4490 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4491 cs.bits.hdw.byte_en_bus = 1; 4492 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4493 cs.bits.hdw.rsp_dat_err = 1; 4494 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4495 cs.bits.hdw.config_err = 1; 4496 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4497 cs.bits.hdw.rcrincon = 1; 4498 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4499 cs.bits.hdw.rcrfull = 1; 4500 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4501 cs.bits.hdw.rbrfull = 1; 4502 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4503 cs.bits.hdw.rbrlogpage = 1; 4504 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4505 cs.bits.hdw.cfiglogpage = 1; 4506 #if defined(__i386) 4507 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4508 cs.value); 4509 #else 4510 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4511 cs.value); 4512 #endif 4513 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4514 chan, cs.value); 4515 break; 4516 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4517 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4518 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4519 cdfs.value = 0; 4520 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4521 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4522 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4523 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4524 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4525 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4526 #if defined(__i386) 4527 cmn_err(CE_NOTE, 4528 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4529 cdfs.value); 4530 #else 4531 cmn_err(CE_NOTE, 4532 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4533 cdfs.value); 4534 #endif 4535 RXDMA_REG_WRITE64(nxgep->npi_handle, 4536 RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value); 4537 break; 4538 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4539 break; 4540 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 4541 break; 4542 } 4543 } 4544 4545 4546 static uint16_t 4547 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb) 4548 { 4549 uint16_t sz = RBR_BKSIZE_8K_BYTES; 4550 4551 switch (bufsz_type) { 4552 case RCR_PKTBUFSZ_0: 4553 switch (rbr_cfgb.bits.ldw.bufsz0) { 4554 case RBR_BUFSZ0_256B: 4555 sz = RBR_BUFSZ0_256_BYTES; 4556 break; 4557 case RBR_BUFSZ0_512B: 4558 sz = RBR_BUFSZ0_512B_BYTES; 4559 break; 4560 case RBR_BUFSZ0_1K: 4561 sz = RBR_BUFSZ0_1K_BYTES; 4562 break; 4563 case RBR_BUFSZ0_2K: 4564 sz = RBR_BUFSZ0_2K_BYTES; 4565 break; 4566 default: 4567 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4568 "nxge_get_pktbug_size: bad bufsz0")); 4569 break; 4570 } 4571 break; 4572 case RCR_PKTBUFSZ_1: 4573 switch (rbr_cfgb.bits.ldw.bufsz1) { 4574 case RBR_BUFSZ1_1K: 4575 sz = RBR_BUFSZ1_1K_BYTES; 4576 break; 4577 case RBR_BUFSZ1_2K: 4578 sz = RBR_BUFSZ1_2K_BYTES; 4579 break; 4580 case RBR_BUFSZ1_4K: 4581 sz = RBR_BUFSZ1_4K_BYTES; 4582 break; 4583 case RBR_BUFSZ1_8K: 4584 sz = RBR_BUFSZ1_8K_BYTES; 4585 break; 4586 default: 4587 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4588 "nxge_get_pktbug_size: bad bufsz1")); 4589 break; 4590 } 4591 break; 4592 case RCR_PKTBUFSZ_2: 4593 switch (rbr_cfgb.bits.ldw.bufsz2) { 4594 case RBR_BUFSZ2_2K: 4595 sz = RBR_BUFSZ2_2K_BYTES; 4596 break; 4597 case RBR_BUFSZ2_4K: 4598 sz = RBR_BUFSZ2_4K_BYTES; 4599 break; 4600 case RBR_BUFSZ2_8K: 4601 sz = RBR_BUFSZ2_8K_BYTES; 4602 break; 4603 case RBR_BUFSZ2_16K: 4604 sz = RBR_BUFSZ2_16K_BYTES; 4605 break; 4606 default: 4607 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4608 "nxge_get_pktbug_size: bad bufsz2")); 4609 break; 4610 } 4611 break; 4612 case RCR_SINGLE_BLOCK: 4613 switch (rbr_cfgb.bits.ldw.bksize) { 4614 case BKSIZE_4K: 4615 sz = RBR_BKSIZE_4K_BYTES; 4616 break; 4617 case BKSIZE_8K: 4618 sz = RBR_BKSIZE_8K_BYTES; 4619 break; 4620 case BKSIZE_16K: 4621 sz = RBR_BKSIZE_16K_BYTES; 4622 break; 4623 case BKSIZE_32K: 4624 sz = RBR_BKSIZE_32K_BYTES; 4625 break; 4626 default: 4627 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4628 "nxge_get_pktbug_size: bad bksize")); 4629 break; 4630 } 4631 break; 4632 default: 4633 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4634 "nxge_get_pktbug_size: bad bufsz_type")); 4635 break; 4636 } 4637 return (sz); 4638 } 4639