1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_rxdma.h> 30 31 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 32 (rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid) 33 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 34 (rdc + nxgep->pt_config.hw_config.start_rdc) 35 36 /* 37 * Globals: tunable parameters (/etc/system or adb) 38 * 39 */ 40 extern uint32_t nxge_rbr_size; 41 extern uint32_t nxge_rcr_size; 42 extern uint32_t nxge_rbr_spare_size; 43 44 extern uint32_t nxge_mblks_pending; 45 46 /* 47 * Tunable to reduce the amount of time spent in the 48 * ISR doing Rx Processing. 49 */ 50 extern uint32_t nxge_max_rx_pkts; 51 boolean_t nxge_jumbo_enable; 52 53 /* 54 * Tunables to manage the receive buffer blocks. 55 * 56 * nxge_rx_threshold_hi: copy all buffers. 57 * nxge_rx_bcopy_size_type: receive buffer block size type. 58 * nxge_rx_threshold_lo: copy only up to tunable block size type. 59 */ 60 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 61 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 62 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 63 64 static nxge_status_t nxge_map_rxdma(p_nxge_t); 65 static void nxge_unmap_rxdma(p_nxge_t); 66 67 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 68 static void nxge_rxdma_hw_stop_common(p_nxge_t); 69 70 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t); 71 static void nxge_rxdma_hw_stop(p_nxge_t); 72 73 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 75 uint32_t, 76 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 77 p_rx_mbox_t *); 78 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 79 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 80 81 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 82 uint16_t, 83 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 84 p_rx_rcr_ring_t *, p_rx_mbox_t *); 85 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 86 p_rx_rcr_ring_t, p_rx_mbox_t); 87 88 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 89 uint16_t, 90 p_nxge_dma_common_t *, 91 p_rx_rbr_ring_t *, uint32_t); 92 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 93 p_rx_rbr_ring_t); 94 95 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 96 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 97 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 98 99 mblk_t * 100 nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t, 101 p_rx_rcr_ring_t *, rx_dma_ctl_stat_t); 102 103 static void nxge_receive_packet(p_nxge_t, 104 p_rx_rcr_ring_t, 105 p_rcr_entry_t, 106 boolean_t *, 107 mblk_t **, mblk_t **); 108 109 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 110 111 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 112 static void nxge_freeb(p_rx_msg_t); 113 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, 114 p_nxge_ldv_t, rx_dma_ctl_stat_t); 115 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t, 116 p_nxge_ldv_t, rx_dma_ctl_stat_t); 117 118 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 119 uint32_t, uint32_t); 120 121 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 122 p_rx_rbr_ring_t); 123 124 125 static nxge_status_t 126 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 127 128 nxge_status_t 129 nxge_rx_port_fatal_err_recover(p_nxge_t); 130 131 static uint16_t 132 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb); 133 134 nxge_status_t 135 nxge_init_rxdma_channels(p_nxge_t nxgep) 136 { 137 nxge_status_t status = NXGE_OK; 138 139 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 140 141 status = nxge_map_rxdma(nxgep); 142 if (status != NXGE_OK) { 143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 144 "<== nxge_init_rxdma: status 0x%x", status)); 145 return (status); 146 } 147 148 status = nxge_rxdma_hw_start_common(nxgep); 149 if (status != NXGE_OK) { 150 nxge_unmap_rxdma(nxgep); 151 } 152 153 status = nxge_rxdma_hw_start(nxgep); 154 if (status != NXGE_OK) { 155 nxge_unmap_rxdma(nxgep); 156 } 157 158 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 159 "<== nxge_init_rxdma_channels: status 0x%x", status)); 160 161 return (status); 162 } 163 164 void 165 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 166 { 167 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 168 169 nxge_rxdma_hw_stop(nxgep); 170 nxge_rxdma_hw_stop_common(nxgep); 171 nxge_unmap_rxdma(nxgep); 172 173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 174 "<== nxge_uinit_rxdma_channels")); 175 } 176 177 nxge_status_t 178 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 179 { 180 npi_handle_t handle; 181 npi_status_t rs = NPI_SUCCESS; 182 nxge_status_t status = NXGE_OK; 183 184 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 185 186 handle = NXGE_DEV_NPI_HANDLE(nxgep); 187 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 188 189 if (rs != NPI_SUCCESS) { 190 status = NXGE_ERROR | rs; 191 } 192 193 return (status); 194 } 195 196 void 197 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 198 { 199 int i, ndmas; 200 uint16_t channel; 201 p_rx_rbr_rings_t rx_rbr_rings; 202 p_rx_rbr_ring_t *rbr_rings; 203 npi_handle_t handle; 204 205 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 206 207 handle = NXGE_DEV_NPI_HANDLE(nxgep); 208 (void) npi_rxdma_dump_fzc_regs(handle); 209 210 rx_rbr_rings = nxgep->rx_rbr_rings; 211 if (rx_rbr_rings == NULL) { 212 NXGE_DEBUG_MSG((nxgep, RX_CTL, 213 "<== nxge_rxdma_regs_dump_channels: " 214 "NULL ring pointer")); 215 return; 216 } 217 if (rx_rbr_rings->rbr_rings == NULL) { 218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 219 "<== nxge_rxdma_regs_dump_channels: " 220 " NULL rbr rings pointer")); 221 return; 222 } 223 224 ndmas = rx_rbr_rings->ndmas; 225 if (!ndmas) { 226 NXGE_DEBUG_MSG((nxgep, RX_CTL, 227 "<== nxge_rxdma_regs_dump_channels: no channel")); 228 return; 229 } 230 231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 232 "==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas)); 233 234 rbr_rings = rx_rbr_rings->rbr_rings; 235 for (i = 0; i < ndmas; i++) { 236 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 237 continue; 238 } 239 channel = rbr_rings[i]->rdc; 240 (void) nxge_dump_rxdma_channel(nxgep, channel); 241 } 242 243 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 244 245 } 246 247 nxge_status_t 248 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 249 { 250 npi_handle_t handle; 251 npi_status_t rs = NPI_SUCCESS; 252 nxge_status_t status = NXGE_OK; 253 254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 255 256 handle = NXGE_DEV_NPI_HANDLE(nxgep); 257 rs = npi_rxdma_dump_rdc_regs(handle, channel); 258 259 if (rs != NPI_SUCCESS) { 260 status = NXGE_ERROR | rs; 261 } 262 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 263 return (status); 264 } 265 266 nxge_status_t 267 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 268 p_rx_dma_ent_msk_t mask_p) 269 { 270 npi_handle_t handle; 271 npi_status_t rs = NPI_SUCCESS; 272 nxge_status_t status = NXGE_OK; 273 274 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 275 "<== nxge_init_rxdma_channel_event_mask")); 276 277 handle = NXGE_DEV_NPI_HANDLE(nxgep); 278 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 279 if (rs != NPI_SUCCESS) { 280 status = NXGE_ERROR | rs; 281 } 282 283 return (status); 284 } 285 286 nxge_status_t 287 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 288 p_rx_dma_ctl_stat_t cs_p) 289 { 290 npi_handle_t handle; 291 npi_status_t rs = NPI_SUCCESS; 292 nxge_status_t status = NXGE_OK; 293 294 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 295 "<== nxge_init_rxdma_channel_cntl_stat")); 296 297 handle = NXGE_DEV_NPI_HANDLE(nxgep); 298 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 299 300 if (rs != NPI_SUCCESS) { 301 status = NXGE_ERROR | rs; 302 } 303 304 return (status); 305 } 306 307 nxge_status_t 308 nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp, 309 uint8_t rdc) 310 { 311 npi_handle_t handle; 312 npi_status_t rs = NPI_SUCCESS; 313 p_nxge_dma_pt_cfg_t p_dma_cfgp; 314 p_nxge_rdc_grp_t rdc_grp_p; 315 uint8_t actual_rdcgrp, actual_rdc; 316 317 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 318 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 319 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 320 321 handle = NXGE_DEV_NPI_HANDLE(nxgep); 322 323 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 324 rdc_grp_p->rdc[0] = rdc; 325 326 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 327 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 328 329 rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp, 330 actual_rdc); 331 332 if (rs != NPI_SUCCESS) { 333 return (NXGE_ERROR | rs); 334 } 335 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 336 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 337 return (NXGE_OK); 338 } 339 340 nxge_status_t 341 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 342 { 343 npi_handle_t handle; 344 345 uint8_t actual_rdc; 346 npi_status_t rs = NPI_SUCCESS; 347 348 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 349 " ==> nxge_rxdma_cfg_port_default_rdc")); 350 351 handle = NXGE_DEV_NPI_HANDLE(nxgep); 352 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 353 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 354 355 356 if (rs != NPI_SUCCESS) { 357 return (NXGE_ERROR | rs); 358 } 359 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 360 " <== nxge_rxdma_cfg_port_default_rdc")); 361 362 return (NXGE_OK); 363 } 364 365 nxge_status_t 366 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 367 uint16_t pkts) 368 { 369 npi_status_t rs = NPI_SUCCESS; 370 npi_handle_t handle; 371 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 372 " ==> nxge_rxdma_cfg_rcr_threshold")); 373 handle = NXGE_DEV_NPI_HANDLE(nxgep); 374 375 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 376 377 if (rs != NPI_SUCCESS) { 378 return (NXGE_ERROR | rs); 379 } 380 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 381 return (NXGE_OK); 382 } 383 384 nxge_status_t 385 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 386 uint16_t tout, uint8_t enable) 387 { 388 npi_status_t rs = NPI_SUCCESS; 389 npi_handle_t handle; 390 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 391 handle = NXGE_DEV_NPI_HANDLE(nxgep); 392 if (enable == 0) { 393 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 394 } else { 395 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 396 tout); 397 } 398 399 if (rs != NPI_SUCCESS) { 400 return (NXGE_ERROR | rs); 401 } 402 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 403 return (NXGE_OK); 404 } 405 406 nxge_status_t 407 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 408 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 409 { 410 npi_handle_t handle; 411 rdc_desc_cfg_t rdc_desc; 412 p_rcrcfig_b_t cfgb_p; 413 npi_status_t rs = NPI_SUCCESS; 414 415 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 416 handle = NXGE_DEV_NPI_HANDLE(nxgep); 417 /* 418 * Use configuration data composed at init time. 419 * Write to hardware the receive ring configurations. 420 */ 421 rdc_desc.mbox_enable = 1; 422 rdc_desc.mbox_addr = mbox_p->mbox_addr; 423 NXGE_DEBUG_MSG((nxgep, RX_CTL, 424 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 425 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 426 427 rdc_desc.rbr_len = rbr_p->rbb_max; 428 rdc_desc.rbr_addr = rbr_p->rbr_addr; 429 430 switch (nxgep->rx_bksize_code) { 431 case RBR_BKSIZE_4K: 432 rdc_desc.page_size = SIZE_4KB; 433 break; 434 case RBR_BKSIZE_8K: 435 rdc_desc.page_size = SIZE_8KB; 436 break; 437 case RBR_BKSIZE_16K: 438 rdc_desc.page_size = SIZE_16KB; 439 break; 440 case RBR_BKSIZE_32K: 441 rdc_desc.page_size = SIZE_32KB; 442 break; 443 } 444 445 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 446 rdc_desc.valid0 = 1; 447 448 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 449 rdc_desc.valid1 = 1; 450 451 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 452 rdc_desc.valid2 = 1; 453 454 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 455 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 456 457 rdc_desc.rcr_len = rcr_p->comp_size; 458 rdc_desc.rcr_addr = rcr_p->rcr_addr; 459 460 cfgb_p = &(rcr_p->rcr_cfgb); 461 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 462 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 463 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 464 465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 466 "rbr_len qlen %d pagesize code %d rcr_len %d", 467 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 468 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 469 "size 0 %d size 1 %d size 2 %d", 470 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 471 rbr_p->npi_pkt_buf_size2)); 472 473 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 474 if (rs != NPI_SUCCESS) { 475 return (NXGE_ERROR | rs); 476 } 477 478 /* 479 * Enable the timeout and threshold. 480 */ 481 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 482 rdc_desc.rcr_threshold); 483 if (rs != NPI_SUCCESS) { 484 return (NXGE_ERROR | rs); 485 } 486 487 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 488 rdc_desc.rcr_timeout); 489 if (rs != NPI_SUCCESS) { 490 return (NXGE_ERROR | rs); 491 } 492 493 /* Enable the DMA */ 494 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 495 if (rs != NPI_SUCCESS) { 496 return (NXGE_ERROR | rs); 497 } 498 499 /* Kick the DMA engine. */ 500 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 501 /* Clear the rbr empty bit */ 502 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 503 504 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 505 506 return (NXGE_OK); 507 } 508 509 nxge_status_t 510 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 511 { 512 npi_handle_t handle; 513 npi_status_t rs = NPI_SUCCESS; 514 515 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 516 handle = NXGE_DEV_NPI_HANDLE(nxgep); 517 518 /* disable the DMA */ 519 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 520 if (rs != NPI_SUCCESS) { 521 NXGE_DEBUG_MSG((nxgep, RX_CTL, 522 "<== nxge_disable_rxdma_channel:failed (0x%x)", 523 rs)); 524 return (NXGE_ERROR | rs); 525 } 526 527 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 528 return (NXGE_OK); 529 } 530 531 nxge_status_t 532 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 533 { 534 npi_handle_t handle; 535 nxge_status_t status = NXGE_OK; 536 537 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 538 "<== nxge_init_rxdma_channel_rcrflush")); 539 540 handle = NXGE_DEV_NPI_HANDLE(nxgep); 541 npi_rxdma_rdc_rcr_flush(handle, channel); 542 543 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 544 "<== nxge_init_rxdma_channel_rcrflsh")); 545 return (status); 546 547 } 548 549 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 550 551 #define TO_LEFT -1 552 #define TO_RIGHT 1 553 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 554 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 555 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 556 #define NO_HINT 0xffffffff 557 558 /*ARGSUSED*/ 559 nxge_status_t 560 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 561 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 562 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 563 { 564 int bufsize; 565 uint64_t pktbuf_pp; 566 uint64_t dvma_addr; 567 rxring_info_t *ring_info; 568 int base_side, end_side; 569 int r_index, l_index, anchor_index; 570 int found, search_done; 571 uint32_t offset, chunk_size, block_size, page_size_mask; 572 uint32_t chunk_index, block_index, total_index; 573 int max_iterations, iteration; 574 rxbuf_index_info_t *bufinfo; 575 576 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 577 578 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 579 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 580 pkt_buf_addr_pp, 581 pktbufsz_type)); 582 #if defined(__i386) 583 pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 584 #else 585 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 586 #endif 587 588 switch (pktbufsz_type) { 589 case 0: 590 bufsize = rbr_p->pkt_buf_size0; 591 break; 592 case 1: 593 bufsize = rbr_p->pkt_buf_size1; 594 break; 595 case 2: 596 bufsize = rbr_p->pkt_buf_size2; 597 break; 598 case RCR_SINGLE_BLOCK: 599 bufsize = 0; 600 anchor_index = 0; 601 break; 602 default: 603 return (NXGE_ERROR); 604 } 605 606 if (rbr_p->num_blocks == 1) { 607 anchor_index = 0; 608 ring_info = rbr_p->ring_info; 609 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 610 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 611 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 612 "buf_pp $%p btype %d anchor_index %d " 613 "bufinfo $%p", 614 pkt_buf_addr_pp, 615 pktbufsz_type, 616 anchor_index, 617 bufinfo)); 618 619 goto found_index; 620 } 621 622 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 623 "==> nxge_rxbuf_pp_to_vp: " 624 "buf_pp $%p btype %d anchor_index %d", 625 pkt_buf_addr_pp, 626 pktbufsz_type, 627 anchor_index)); 628 629 ring_info = rbr_p->ring_info; 630 found = B_FALSE; 631 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 632 iteration = 0; 633 max_iterations = ring_info->max_iterations; 634 /* 635 * First check if this block has been seen 636 * recently. This is indicated by a hint which 637 * is initialized when the first buffer of the block 638 * is seen. The hint is reset when the last buffer of 639 * the block has been processed. 640 * As three block sizes are supported, three hints 641 * are kept. The idea behind the hints is that once 642 * the hardware uses a block for a buffer of that 643 * size, it will use it exclusively for that size 644 * and will use it until it is exhausted. It is assumed 645 * that there would a single block being used for the same 646 * buffer sizes at any given time. 647 */ 648 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 649 anchor_index = ring_info->hint[pktbufsz_type]; 650 dvma_addr = bufinfo[anchor_index].dvma_addr; 651 chunk_size = bufinfo[anchor_index].buf_size; 652 if ((pktbuf_pp >= dvma_addr) && 653 (pktbuf_pp < (dvma_addr + chunk_size))) { 654 found = B_TRUE; 655 /* 656 * check if this is the last buffer in the block 657 * If so, then reset the hint for the size; 658 */ 659 660 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 661 ring_info->hint[pktbufsz_type] = NO_HINT; 662 } 663 } 664 665 if (found == B_FALSE) { 666 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 667 "==> nxge_rxbuf_pp_to_vp: (!found)" 668 "buf_pp $%p btype %d anchor_index %d", 669 pkt_buf_addr_pp, 670 pktbufsz_type, 671 anchor_index)); 672 673 /* 674 * This is the first buffer of the block of this 675 * size. Need to search the whole information 676 * array. 677 * the search algorithm uses a binary tree search 678 * algorithm. It assumes that the information is 679 * already sorted with increasing order 680 * info[0] < info[1] < info[2] .... < info[n-1] 681 * where n is the size of the information array 682 */ 683 r_index = rbr_p->num_blocks - 1; 684 l_index = 0; 685 search_done = B_FALSE; 686 anchor_index = MID_INDEX(r_index, l_index); 687 while (search_done == B_FALSE) { 688 if ((r_index == l_index) || 689 (iteration >= max_iterations)) 690 search_done = B_TRUE; 691 end_side = TO_RIGHT; /* to the right */ 692 base_side = TO_LEFT; /* to the left */ 693 /* read the DVMA address information and sort it */ 694 dvma_addr = bufinfo[anchor_index].dvma_addr; 695 chunk_size = bufinfo[anchor_index].buf_size; 696 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 697 "==> nxge_rxbuf_pp_to_vp: (searching)" 698 "buf_pp $%p btype %d " 699 "anchor_index %d chunk_size %d dvmaaddr $%p", 700 pkt_buf_addr_pp, 701 pktbufsz_type, 702 anchor_index, 703 chunk_size, 704 dvma_addr)); 705 706 if (pktbuf_pp >= dvma_addr) 707 base_side = TO_RIGHT; /* to the right */ 708 if (pktbuf_pp < (dvma_addr + chunk_size)) 709 end_side = TO_LEFT; /* to the left */ 710 711 switch (base_side + end_side) { 712 case IN_MIDDLE: 713 /* found */ 714 found = B_TRUE; 715 search_done = B_TRUE; 716 if ((pktbuf_pp + bufsize) < 717 (dvma_addr + chunk_size)) 718 ring_info->hint[pktbufsz_type] = 719 bufinfo[anchor_index].buf_index; 720 break; 721 case BOTH_RIGHT: 722 /* not found: go to the right */ 723 l_index = anchor_index + 1; 724 anchor_index = 725 MID_INDEX(r_index, l_index); 726 break; 727 728 case BOTH_LEFT: 729 /* not found: go to the left */ 730 r_index = anchor_index - 1; 731 anchor_index = MID_INDEX(r_index, 732 l_index); 733 break; 734 default: /* should not come here */ 735 return (NXGE_ERROR); 736 } 737 iteration++; 738 } 739 740 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 741 "==> nxge_rxbuf_pp_to_vp: (search done)" 742 "buf_pp $%p btype %d anchor_index %d", 743 pkt_buf_addr_pp, 744 pktbufsz_type, 745 anchor_index)); 746 } 747 748 if (found == B_FALSE) { 749 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 750 "==> nxge_rxbuf_pp_to_vp: (search failed)" 751 "buf_pp $%p btype %d anchor_index %d", 752 pkt_buf_addr_pp, 753 pktbufsz_type, 754 anchor_index)); 755 return (NXGE_ERROR); 756 } 757 758 found_index: 759 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 760 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 761 "buf_pp $%p btype %d bufsize %d anchor_index %d", 762 pkt_buf_addr_pp, 763 pktbufsz_type, 764 bufsize, 765 anchor_index)); 766 767 /* index of the first block in this chunk */ 768 chunk_index = bufinfo[anchor_index].start_index; 769 dvma_addr = bufinfo[anchor_index].dvma_addr; 770 page_size_mask = ring_info->block_size_mask; 771 772 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 773 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 774 "buf_pp $%p btype %d bufsize %d " 775 "anchor_index %d chunk_index %d dvma $%p", 776 pkt_buf_addr_pp, 777 pktbufsz_type, 778 bufsize, 779 anchor_index, 780 chunk_index, 781 dvma_addr)); 782 783 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 784 block_size = rbr_p->block_size; /* System block(page) size */ 785 786 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 787 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 788 "buf_pp $%p btype %d bufsize %d " 789 "anchor_index %d chunk_index %d dvma $%p " 790 "offset %d block_size %d", 791 pkt_buf_addr_pp, 792 pktbufsz_type, 793 bufsize, 794 anchor_index, 795 chunk_index, 796 dvma_addr, 797 offset, 798 block_size)); 799 800 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 801 802 block_index = (offset / block_size); /* index within chunk */ 803 total_index = chunk_index + block_index; 804 805 806 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 807 "==> nxge_rxbuf_pp_to_vp: " 808 "total_index %d dvma_addr $%p " 809 "offset %d block_size %d " 810 "block_index %d ", 811 total_index, dvma_addr, 812 offset, block_size, 813 block_index)); 814 #if defined(__i386) 815 *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 816 (uint32_t)offset); 817 #else 818 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 819 (uint64_t)offset); 820 #endif 821 822 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 823 "==> nxge_rxbuf_pp_to_vp: " 824 "total_index %d dvma_addr $%p " 825 "offset %d block_size %d " 826 "block_index %d " 827 "*pkt_buf_addr_p $%p", 828 total_index, dvma_addr, 829 offset, block_size, 830 block_index, 831 *pkt_buf_addr_p)); 832 833 834 *msg_index = total_index; 835 *bufoffset = (offset & page_size_mask); 836 837 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 838 "==> nxge_rxbuf_pp_to_vp: get msg index: " 839 "msg_index %d bufoffset_index %d", 840 *msg_index, 841 *bufoffset)); 842 843 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 844 845 return (NXGE_OK); 846 } 847 848 /* 849 * used by quick sort (qsort) function 850 * to perform comparison 851 */ 852 static int 853 nxge_sort_compare(const void *p1, const void *p2) 854 { 855 856 rxbuf_index_info_t *a, *b; 857 858 a = (rxbuf_index_info_t *)p1; 859 b = (rxbuf_index_info_t *)p2; 860 861 if (a->dvma_addr > b->dvma_addr) 862 return (1); 863 if (a->dvma_addr < b->dvma_addr) 864 return (-1); 865 return (0); 866 } 867 868 869 870 /* 871 * grabbed this sort implementation from common/syscall/avl.c 872 * 873 */ 874 /* 875 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 876 * v = Ptr to array/vector of objs 877 * n = # objs in the array 878 * s = size of each obj (must be multiples of a word size) 879 * f = ptr to function to compare two objs 880 * returns (-1 = less than, 0 = equal, 1 = greater than 881 */ 882 void 883 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 884 { 885 int g, i, j, ii; 886 unsigned int *p1, *p2; 887 unsigned int tmp; 888 889 /* No work to do */ 890 if (v == NULL || n <= 1) 891 return; 892 /* Sanity check on arguments */ 893 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 894 ASSERT(s > 0); 895 896 for (g = n / 2; g > 0; g /= 2) { 897 for (i = g; i < n; i++) { 898 for (j = i - g; j >= 0 && 899 (*f)(v + j * s, v + (j + g) * s) == 1; 900 j -= g) { 901 p1 = (unsigned *)(v + j * s); 902 p2 = (unsigned *)(v + (j + g) * s); 903 for (ii = 0; ii < s / 4; ii++) { 904 tmp = *p1; 905 *p1++ = *p2; 906 *p2++ = tmp; 907 } 908 } 909 } 910 } 911 } 912 913 /* 914 * Initialize data structures required for rxdma 915 * buffer dvma->vmem address lookup 916 */ 917 /*ARGSUSED*/ 918 static nxge_status_t 919 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 920 { 921 922 int index; 923 rxring_info_t *ring_info; 924 int max_iteration = 0, max_index = 0; 925 926 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 927 928 ring_info = rbrp->ring_info; 929 ring_info->hint[0] = NO_HINT; 930 ring_info->hint[1] = NO_HINT; 931 ring_info->hint[2] = NO_HINT; 932 max_index = rbrp->num_blocks; 933 934 /* read the DVMA address information and sort it */ 935 /* do init of the information array */ 936 937 938 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 939 " nxge_rxbuf_index_info_init Sort ptrs")); 940 941 /* sort the array */ 942 nxge_ksort((void *)ring_info->buffer, max_index, 943 sizeof (rxbuf_index_info_t), nxge_sort_compare); 944 945 946 947 for (index = 0; index < max_index; index++) { 948 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 949 " nxge_rxbuf_index_info_init: sorted chunk %d " 950 " ioaddr $%p kaddr $%p size %x", 951 index, ring_info->buffer[index].dvma_addr, 952 ring_info->buffer[index].kaddr, 953 ring_info->buffer[index].buf_size)); 954 } 955 956 max_iteration = 0; 957 while (max_index >= (1ULL << max_iteration)) 958 max_iteration++; 959 ring_info->max_iterations = max_iteration + 1; 960 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 961 " nxge_rxbuf_index_info_init Find max iter %d", 962 ring_info->max_iterations)); 963 964 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 965 return (NXGE_OK); 966 } 967 968 /* ARGSUSED */ 969 void 970 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 971 { 972 #ifdef NXGE_DEBUG 973 974 uint32_t bptr; 975 uint64_t pp; 976 977 bptr = entry_p->bits.hdw.pkt_buf_addr; 978 979 NXGE_DEBUG_MSG((nxgep, RX_CTL, 980 "\trcr entry $%p " 981 "\trcr entry 0x%0llx " 982 "\trcr entry 0x%08x " 983 "\trcr entry 0x%08x " 984 "\tvalue 0x%0llx\n" 985 "\tmulti = %d\n" 986 "\tpkt_type = 0x%x\n" 987 "\tzero_copy = %d\n" 988 "\tnoport = %d\n" 989 "\tpromis = %d\n" 990 "\terror = 0x%04x\n" 991 "\tdcf_err = 0x%01x\n" 992 "\tl2_len = %d\n" 993 "\tpktbufsize = %d\n" 994 "\tpkt_buf_addr = $%p\n" 995 "\tpkt_buf_addr (<< 6) = $%p\n", 996 entry_p, 997 *(int64_t *)entry_p, 998 *(int32_t *)entry_p, 999 *(int32_t *)((char *)entry_p + 32), 1000 entry_p->value, 1001 entry_p->bits.hdw.multi, 1002 entry_p->bits.hdw.pkt_type, 1003 entry_p->bits.hdw.zero_copy, 1004 entry_p->bits.hdw.noport, 1005 entry_p->bits.hdw.promis, 1006 entry_p->bits.hdw.error, 1007 entry_p->bits.hdw.dcf_err, 1008 entry_p->bits.hdw.l2_len, 1009 entry_p->bits.hdw.pktbufsz, 1010 bptr, 1011 entry_p->bits.ldw.pkt_buf_addr)); 1012 1013 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1014 RCR_PKT_BUF_ADDR_SHIFT; 1015 1016 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1017 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1018 #endif 1019 } 1020 1021 void 1022 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1023 { 1024 npi_handle_t handle; 1025 rbr_stat_t rbr_stat; 1026 addr44_t hd_addr; 1027 addr44_t tail_addr; 1028 uint16_t qlen; 1029 1030 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1031 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1032 1033 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1034 1035 /* RBR head */ 1036 hd_addr.addr = 0; 1037 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1038 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1039 #if defined(__i386) 1040 (void *)(uint32_t)hd_addr.addr); 1041 #else 1042 (void *)hd_addr.addr); 1043 #endif 1044 1045 /* RBR stats */ 1046 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1047 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1048 1049 /* RCR tail */ 1050 tail_addr.addr = 0; 1051 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1052 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1053 #if defined(__i386) 1054 (void *)(uint32_t)tail_addr.addr); 1055 #else 1056 (void *)tail_addr.addr); 1057 #endif 1058 1059 /* RCR qlen */ 1060 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1061 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1062 1063 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1064 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1065 } 1066 1067 void 1068 nxge_rxdma_stop(p_nxge_t nxgep) 1069 { 1070 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1071 1072 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1073 (void) nxge_rx_mac_disable(nxgep); 1074 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1075 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1076 } 1077 1078 void 1079 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1080 { 1081 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1082 1083 (void) nxge_rxdma_stop(nxgep); 1084 (void) nxge_uninit_rxdma_channels(nxgep); 1085 (void) nxge_init_rxdma_channels(nxgep); 1086 1087 #ifndef AXIS_DEBUG_LB 1088 (void) nxge_xcvr_init(nxgep); 1089 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1090 #endif 1091 (void) nxge_rx_mac_enable(nxgep); 1092 1093 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1094 } 1095 1096 nxge_status_t 1097 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1098 { 1099 int i, ndmas; 1100 uint16_t channel; 1101 p_rx_rbr_rings_t rx_rbr_rings; 1102 p_rx_rbr_ring_t *rbr_rings; 1103 npi_handle_t handle; 1104 npi_status_t rs = NPI_SUCCESS; 1105 nxge_status_t status = NXGE_OK; 1106 1107 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1108 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1109 1110 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1111 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1112 "<== nxge_rxdma_mode: not initialized")); 1113 return (NXGE_ERROR); 1114 } 1115 1116 rx_rbr_rings = nxgep->rx_rbr_rings; 1117 if (rx_rbr_rings == NULL) { 1118 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1119 "<== nxge_rxdma_mode: NULL ring pointer")); 1120 return (NXGE_ERROR); 1121 } 1122 if (rx_rbr_rings->rbr_rings == NULL) { 1123 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1124 "<== nxge_rxdma_mode: NULL rbr rings pointer")); 1125 return (NXGE_ERROR); 1126 } 1127 1128 ndmas = rx_rbr_rings->ndmas; 1129 if (!ndmas) { 1130 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1131 "<== nxge_rxdma_mode: no channel")); 1132 return (NXGE_ERROR); 1133 } 1134 1135 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1136 "==> nxge_rxdma_mode (ndmas %d)", ndmas)); 1137 1138 rbr_rings = rx_rbr_rings->rbr_rings; 1139 1140 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1141 for (i = 0; i < ndmas; i++) { 1142 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 1143 continue; 1144 } 1145 channel = rbr_rings[i]->rdc; 1146 if (enable) { 1147 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1148 "==> nxge_rxdma_hw_mode: channel %d (enable)", 1149 channel)); 1150 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 1151 } else { 1152 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1153 "==> nxge_rxdma_hw_mode: channel %d (disable)", 1154 channel)); 1155 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 1156 } 1157 } 1158 1159 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1160 1161 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1162 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1163 1164 return (status); 1165 } 1166 1167 void 1168 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1169 { 1170 npi_handle_t handle; 1171 1172 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1173 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1174 1175 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1176 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1177 1178 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1179 } 1180 1181 void 1182 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1183 { 1184 npi_handle_t handle; 1185 1186 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1187 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1188 1189 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1190 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1191 1192 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1193 } 1194 1195 void 1196 nxge_hw_start_rx(p_nxge_t nxgep) 1197 { 1198 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1199 1200 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1201 (void) nxge_rx_mac_enable(nxgep); 1202 1203 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1204 } 1205 1206 /*ARGSUSED*/ 1207 void 1208 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1209 { 1210 int i, ndmas; 1211 uint16_t rdc; 1212 p_rx_rbr_rings_t rx_rbr_rings; 1213 p_rx_rbr_ring_t *rbr_rings; 1214 p_rx_rcr_rings_t rx_rcr_rings; 1215 1216 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1217 1218 rx_rbr_rings = nxgep->rx_rbr_rings; 1219 if (rx_rbr_rings == NULL) { 1220 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1221 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1222 return; 1223 } 1224 ndmas = rx_rbr_rings->ndmas; 1225 if (!ndmas) { 1226 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1227 "<== nxge_fixup_rxdma_rings: no channel")); 1228 return; 1229 } 1230 1231 rx_rcr_rings = nxgep->rx_rcr_rings; 1232 if (rx_rcr_rings == NULL) { 1233 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1234 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1235 return; 1236 } 1237 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1238 "==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas)); 1239 1240 nxge_rxdma_hw_stop(nxgep); 1241 1242 rbr_rings = rx_rbr_rings->rbr_rings; 1243 for (i = 0; i < ndmas; i++) { 1244 rdc = rbr_rings[i]->rdc; 1245 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1246 "==> nxge_fixup_rxdma_rings: channel %d " 1247 "ring $%px", rdc, rbr_rings[i])); 1248 (void) nxge_rxdma_fixup_channel(nxgep, rdc, i); 1249 } 1250 1251 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1252 } 1253 1254 void 1255 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1256 { 1257 int i; 1258 1259 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1260 i = nxge_rxdma_get_ring_index(nxgep, channel); 1261 if (i < 0) { 1262 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1263 "<== nxge_rxdma_fix_channel: no entry found")); 1264 return; 1265 } 1266 1267 nxge_rxdma_fixup_channel(nxgep, channel, i); 1268 1269 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel")); 1270 } 1271 1272 void 1273 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1274 { 1275 int ndmas; 1276 p_rx_rbr_rings_t rx_rbr_rings; 1277 p_rx_rbr_ring_t *rbr_rings; 1278 p_rx_rcr_rings_t rx_rcr_rings; 1279 p_rx_rcr_ring_t *rcr_rings; 1280 p_rx_mbox_areas_t rx_mbox_areas_p; 1281 p_rx_mbox_t *rx_mbox_p; 1282 p_nxge_dma_pool_t dma_buf_poolp; 1283 p_nxge_dma_pool_t dma_cntl_poolp; 1284 p_rx_rbr_ring_t rbrp; 1285 p_rx_rcr_ring_t rcrp; 1286 p_rx_mbox_t mboxp; 1287 p_nxge_dma_common_t dmap; 1288 nxge_status_t status = NXGE_OK; 1289 1290 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1291 1292 (void) nxge_rxdma_stop_channel(nxgep, channel); 1293 1294 dma_buf_poolp = nxgep->rx_buf_pool_p; 1295 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1296 1297 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1298 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1299 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1300 return; 1301 } 1302 1303 ndmas = dma_buf_poolp->ndmas; 1304 if (!ndmas) { 1305 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1306 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1307 return; 1308 } 1309 1310 rx_rbr_rings = nxgep->rx_rbr_rings; 1311 rx_rcr_rings = nxgep->rx_rcr_rings; 1312 rbr_rings = rx_rbr_rings->rbr_rings; 1313 rcr_rings = rx_rcr_rings->rcr_rings; 1314 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1315 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1316 1317 /* Reinitialize the receive block and completion rings */ 1318 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1319 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1320 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1321 1322 1323 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1324 rbrp->rbr_rd_index = 0; 1325 rcrp->comp_rd_index = 0; 1326 rcrp->comp_wt_index = 0; 1327 1328 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1329 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1330 1331 status = nxge_rxdma_start_channel(nxgep, channel, 1332 rbrp, rcrp, mboxp); 1333 if (status != NXGE_OK) { 1334 goto nxge_rxdma_fixup_channel_fail; 1335 } 1336 if (status != NXGE_OK) { 1337 goto nxge_rxdma_fixup_channel_fail; 1338 } 1339 1340 nxge_rxdma_fixup_channel_fail: 1341 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1342 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1343 1344 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1345 } 1346 1347 int 1348 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1349 { 1350 int i, ndmas; 1351 uint16_t rdc; 1352 p_rx_rbr_rings_t rx_rbr_rings; 1353 p_rx_rbr_ring_t *rbr_rings; 1354 1355 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1356 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1357 1358 rx_rbr_rings = nxgep->rx_rbr_rings; 1359 if (rx_rbr_rings == NULL) { 1360 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1361 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1362 return (-1); 1363 } 1364 ndmas = rx_rbr_rings->ndmas; 1365 if (!ndmas) { 1366 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1367 "<== nxge_rxdma_get_ring_index: no channel")); 1368 return (-1); 1369 } 1370 1371 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1372 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1373 1374 rbr_rings = rx_rbr_rings->rbr_rings; 1375 for (i = 0; i < ndmas; i++) { 1376 rdc = rbr_rings[i]->rdc; 1377 if (channel == rdc) { 1378 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1379 "==> nxge_rxdma_get_rbr_ring: " 1380 "channel %d (index %d) " 1381 "ring %d", channel, i, 1382 rbr_rings[i])); 1383 return (i); 1384 } 1385 } 1386 1387 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1388 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1389 1390 return (-1); 1391 } 1392 1393 p_rx_rbr_ring_t 1394 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1395 { 1396 int i, ndmas; 1397 uint16_t rdc; 1398 p_rx_rbr_rings_t rx_rbr_rings; 1399 p_rx_rbr_ring_t *rbr_rings; 1400 1401 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1402 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1403 1404 rx_rbr_rings = nxgep->rx_rbr_rings; 1405 if (rx_rbr_rings == NULL) { 1406 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1407 "<== nxge_rxdma_get_rbr_ring: NULL ring pointer")); 1408 return (NULL); 1409 } 1410 ndmas = rx_rbr_rings->ndmas; 1411 if (!ndmas) { 1412 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1413 "<== nxge_rxdma_get_rbr_ring: no channel")); 1414 return (NULL); 1415 } 1416 1417 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1418 "==> nxge_rxdma_get_ring (ndmas %d)", ndmas)); 1419 1420 rbr_rings = rx_rbr_rings->rbr_rings; 1421 for (i = 0; i < ndmas; i++) { 1422 rdc = rbr_rings[i]->rdc; 1423 if (channel == rdc) { 1424 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1425 "==> nxge_rxdma_get_rbr_ring: channel %d " 1426 "ring $%p", channel, rbr_rings[i])); 1427 return (rbr_rings[i]); 1428 } 1429 } 1430 1431 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1432 "<== nxge_rxdma_get_rbr_ring: not found")); 1433 1434 return (NULL); 1435 } 1436 1437 p_rx_rcr_ring_t 1438 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1439 { 1440 int i, ndmas; 1441 uint16_t rdc; 1442 p_rx_rcr_rings_t rx_rcr_rings; 1443 p_rx_rcr_ring_t *rcr_rings; 1444 1445 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1446 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1447 1448 rx_rcr_rings = nxgep->rx_rcr_rings; 1449 if (rx_rcr_rings == NULL) { 1450 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1451 "<== nxge_rxdma_get_rcr_ring: NULL ring pointer")); 1452 return (NULL); 1453 } 1454 ndmas = rx_rcr_rings->ndmas; 1455 if (!ndmas) { 1456 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1457 "<== nxge_rxdma_get_rcr_ring: no channel")); 1458 return (NULL); 1459 } 1460 1461 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1462 "==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas)); 1463 1464 rcr_rings = rx_rcr_rings->rcr_rings; 1465 for (i = 0; i < ndmas; i++) { 1466 rdc = rcr_rings[i]->rdc; 1467 if (channel == rdc) { 1468 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1469 "==> nxge_rxdma_get_rcr_ring: channel %d " 1470 "ring $%p", channel, rcr_rings[i])); 1471 return (rcr_rings[i]); 1472 } 1473 } 1474 1475 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1476 "<== nxge_rxdma_get_rcr_ring: not found")); 1477 1478 return (NULL); 1479 } 1480 1481 /* 1482 * Static functions start here. 1483 */ 1484 static p_rx_msg_t 1485 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1486 { 1487 p_rx_msg_t nxge_mp = NULL; 1488 p_nxge_dma_common_t dmamsg_p; 1489 uchar_t *buffer; 1490 1491 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1492 if (nxge_mp == NULL) { 1493 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1494 "Allocation of a rx msg failed.")); 1495 goto nxge_allocb_exit; 1496 } 1497 1498 nxge_mp->use_buf_pool = B_FALSE; 1499 if (dmabuf_p) { 1500 nxge_mp->use_buf_pool = B_TRUE; 1501 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1502 *dmamsg_p = *dmabuf_p; 1503 dmamsg_p->nblocks = 1; 1504 dmamsg_p->block_size = size; 1505 dmamsg_p->alength = size; 1506 buffer = (uchar_t *)dmabuf_p->kaddrp; 1507 1508 dmabuf_p->kaddrp = (void *) 1509 ((char *)dmabuf_p->kaddrp + size); 1510 dmabuf_p->ioaddr_pp = (void *) 1511 ((char *)dmabuf_p->ioaddr_pp + size); 1512 dmabuf_p->alength -= size; 1513 dmabuf_p->offset += size; 1514 dmabuf_p->dma_cookie.dmac_laddress += size; 1515 dmabuf_p->dma_cookie.dmac_size -= size; 1516 1517 } else { 1518 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1519 if (buffer == NULL) { 1520 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1521 "Allocation of a receive page failed.")); 1522 goto nxge_allocb_fail1; 1523 } 1524 } 1525 1526 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1527 if (nxge_mp->rx_mblk_p == NULL) { 1528 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1529 goto nxge_allocb_fail2; 1530 } 1531 1532 nxge_mp->buffer = buffer; 1533 nxge_mp->block_size = size; 1534 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1535 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1536 nxge_mp->ref_cnt = 1; 1537 nxge_mp->free = B_TRUE; 1538 nxge_mp->rx_use_bcopy = B_FALSE; 1539 1540 atomic_inc_32(&nxge_mblks_pending); 1541 1542 goto nxge_allocb_exit; 1543 1544 nxge_allocb_fail2: 1545 if (!nxge_mp->use_buf_pool) { 1546 KMEM_FREE(buffer, size); 1547 } 1548 1549 nxge_allocb_fail1: 1550 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1551 nxge_mp = NULL; 1552 1553 nxge_allocb_exit: 1554 return (nxge_mp); 1555 } 1556 1557 p_mblk_t 1558 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1559 { 1560 p_mblk_t mp; 1561 1562 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1563 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1564 "offset = 0x%08X " 1565 "size = 0x%08X", 1566 nxge_mp, offset, size)); 1567 1568 mp = desballoc(&nxge_mp->buffer[offset], size, 1569 0, &nxge_mp->freeb); 1570 if (mp == NULL) { 1571 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1572 goto nxge_dupb_exit; 1573 } 1574 atomic_inc_32(&nxge_mp->ref_cnt); 1575 atomic_inc_32(&nxge_mblks_pending); 1576 1577 1578 nxge_dupb_exit: 1579 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1580 nxge_mp)); 1581 return (mp); 1582 } 1583 1584 p_mblk_t 1585 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1586 { 1587 p_mblk_t mp; 1588 uchar_t *dp; 1589 1590 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1591 if (mp == NULL) { 1592 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1593 goto nxge_dupb_bcopy_exit; 1594 } 1595 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1596 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1597 mp->b_wptr = dp + size; 1598 1599 nxge_dupb_bcopy_exit: 1600 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1601 nxge_mp)); 1602 return (mp); 1603 } 1604 1605 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1606 p_rx_msg_t rx_msg_p); 1607 1608 void 1609 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1610 { 1611 1612 npi_handle_t handle; 1613 1614 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1615 1616 /* Reuse this buffer */ 1617 rx_msg_p->free = B_FALSE; 1618 rx_msg_p->cur_usage_cnt = 0; 1619 rx_msg_p->max_usage_cnt = 0; 1620 rx_msg_p->pkt_buf_size = 0; 1621 1622 if (rx_rbr_p->rbr_use_bcopy) { 1623 rx_msg_p->rx_use_bcopy = B_FALSE; 1624 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1625 } 1626 1627 /* 1628 * Get the rbr header pointer and its offset index. 1629 */ 1630 MUTEX_ENTER(&rx_rbr_p->post_lock); 1631 1632 1633 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1634 rx_rbr_p->rbr_wrap_mask); 1635 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1636 MUTEX_EXIT(&rx_rbr_p->post_lock); 1637 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1638 npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1); 1639 1640 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1641 "<== nxge_post_page (channel %d post_next_index %d)", 1642 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1643 1644 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1645 } 1646 1647 void 1648 nxge_freeb(p_rx_msg_t rx_msg_p) 1649 { 1650 size_t size; 1651 uchar_t *buffer = NULL; 1652 int ref_cnt; 1653 boolean_t free_state = B_FALSE; 1654 1655 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1656 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1657 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1658 rx_msg_p, nxge_mblks_pending)); 1659 1660 atomic_dec_32(&nxge_mblks_pending); 1661 /* 1662 * First we need to get the free state, then 1663 * atomic decrement the reference count to prevent 1664 * the race condition with the interrupt thread that 1665 * is processing a loaned up buffer block. 1666 */ 1667 free_state = rx_msg_p->free; 1668 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1669 if (!ref_cnt) { 1670 buffer = rx_msg_p->buffer; 1671 size = rx_msg_p->block_size; 1672 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1673 "will free: rx_msg_p = $%p (block pending %d)", 1674 rx_msg_p, nxge_mblks_pending)); 1675 1676 if (!rx_msg_p->use_buf_pool) { 1677 KMEM_FREE(buffer, size); 1678 } 1679 1680 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1681 return; 1682 } 1683 1684 /* 1685 * Repost buffer. 1686 */ 1687 if (free_state && (ref_cnt == 1)) { 1688 NXGE_DEBUG_MSG((NULL, RX_CTL, 1689 "nxge_freeb: post page $%p:", rx_msg_p)); 1690 nxge_post_page(rx_msg_p->nxgep, rx_msg_p->rx_rbr_p, 1691 rx_msg_p); 1692 } 1693 1694 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1695 } 1696 1697 uint_t 1698 nxge_rx_intr(void *arg1, void *arg2) 1699 { 1700 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1701 p_nxge_t nxgep = (p_nxge_t)arg2; 1702 p_nxge_ldg_t ldgp; 1703 uint8_t channel; 1704 npi_handle_t handle; 1705 rx_dma_ctl_stat_t cs; 1706 1707 #ifdef NXGE_DEBUG 1708 rxdma_cfig1_t cfg; 1709 #endif 1710 uint_t serviced = DDI_INTR_UNCLAIMED; 1711 1712 if (ldvp == NULL) { 1713 NXGE_DEBUG_MSG((NULL, INT_CTL, 1714 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1715 nxgep, ldvp)); 1716 1717 return (DDI_INTR_CLAIMED); 1718 } 1719 1720 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1721 nxgep = ldvp->nxgep; 1722 } 1723 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1724 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1725 nxgep, ldvp)); 1726 1727 /* 1728 * This interrupt handler is for a specific 1729 * receive dma channel. 1730 */ 1731 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1732 /* 1733 * Get the control and status for this channel. 1734 */ 1735 channel = ldvp->channel; 1736 ldgp = ldvp->ldgp; 1737 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1738 1739 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1740 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1741 channel, 1742 cs.value, 1743 cs.bits.hdw.rcrto, 1744 cs.bits.hdw.rcrthres)); 1745 1746 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs); 1747 serviced = DDI_INTR_CLAIMED; 1748 1749 /* error events. */ 1750 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1751 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1752 } 1753 1754 nxge_intr_exit: 1755 1756 1757 /* 1758 * Enable the mailbox update interrupt if we want 1759 * to use mailbox. We probably don't need to use 1760 * mailbox as it only saves us one pio read. 1761 * Also write 1 to rcrthres and rcrto to clear 1762 * these two edge triggered bits. 1763 */ 1764 1765 cs.value &= RX_DMA_CTL_STAT_WR1C; 1766 cs.bits.hdw.mex = 1; 1767 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1768 cs.value); 1769 1770 /* 1771 * Rearm this logical group if this is a single device 1772 * group. 1773 */ 1774 if (ldgp->nldvs == 1) { 1775 ldgimgm_t mgm; 1776 mgm.value = 0; 1777 mgm.bits.ldw.arm = 1; 1778 mgm.bits.ldw.timer = ldgp->ldg_timer; 1779 NXGE_REG_WR64(handle, 1780 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1781 mgm.value); 1782 } 1783 1784 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1785 serviced)); 1786 return (serviced); 1787 } 1788 1789 /* 1790 * Process the packets received in the specified logical device 1791 * and pass up a chain of message blocks to the upper layer. 1792 */ 1793 static void 1794 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1795 rx_dma_ctl_stat_t cs) 1796 { 1797 p_mblk_t mp; 1798 p_rx_rcr_ring_t rcrp; 1799 1800 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1801 if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1802 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1803 "<== nxge_rx_pkts_vring: no mp")); 1804 return; 1805 } 1806 1807 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1808 mp)); 1809 1810 #ifdef NXGE_DEBUG 1811 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1812 "==> nxge_rx_pkts_vring:calling mac_rx " 1813 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1814 "mac_handle $%p", 1815 mp->b_wptr - mp->b_rptr, 1816 mp, mp->b_cont, mp->b_next, 1817 rcrp, rcrp->rcr_mac_handle)); 1818 1819 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1820 "==> nxge_rx_pkts_vring: dump packets " 1821 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1822 mp, 1823 mp->b_rptr, 1824 mp->b_wptr, 1825 nxge_dump_packet((char *)mp->b_rptr, 1826 mp->b_wptr - mp->b_rptr))); 1827 if (mp->b_cont) { 1828 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1829 "==> nxge_rx_pkts_vring: dump b_cont packets " 1830 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1831 mp->b_cont, 1832 mp->b_cont->b_rptr, 1833 mp->b_cont->b_wptr, 1834 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1835 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1836 } 1837 if (mp->b_next) { 1838 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1839 "==> nxge_rx_pkts_vring: dump next packets " 1840 "(b_rptr $%p): %s", 1841 mp->b_next->b_rptr, 1842 nxge_dump_packet((char *)mp->b_next->b_rptr, 1843 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1844 } 1845 #endif 1846 1847 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1848 } 1849 1850 1851 /* 1852 * This routine is the main packet receive processing function. 1853 * It gets the packet type, error code, and buffer related 1854 * information from the receive completion entry. 1855 * How many completion entries to process is based on the number of packets 1856 * queued by the hardware, a hardware maintained tail pointer 1857 * and a configurable receive packet count. 1858 * 1859 * A chain of message blocks will be created as result of processing 1860 * the completion entries. This chain of message blocks will be returned and 1861 * a hardware control status register will be updated with the number of 1862 * packets were removed from the hardware queue. 1863 * 1864 */ 1865 mblk_t * 1866 nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1867 p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs) 1868 { 1869 npi_handle_t handle; 1870 uint8_t channel; 1871 p_rx_rcr_rings_t rx_rcr_rings; 1872 p_rx_rcr_ring_t rcr_p; 1873 uint32_t comp_rd_index; 1874 p_rcr_entry_t rcr_desc_rd_head_p; 1875 p_rcr_entry_t rcr_desc_rd_head_pp; 1876 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1877 uint16_t qlen, nrcr_read, npkt_read; 1878 uint32_t qlen_hw; 1879 boolean_t multi; 1880 rcrcfig_b_t rcr_cfg_b; 1881 #if defined(_BIG_ENDIAN) 1882 npi_status_t rs = NPI_SUCCESS; 1883 #endif 1884 1885 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d " 1886 "channel %d", vindex, ldvp->channel)); 1887 1888 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1889 return (NULL); 1890 } 1891 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1892 rx_rcr_rings = nxgep->rx_rcr_rings; 1893 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1894 channel = rcr_p->rdc; 1895 if (channel != ldvp->channel) { 1896 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1897 "channel %d, and rcr channel %d not matched.", 1898 vindex, ldvp->channel, channel)); 1899 return (NULL); 1900 } 1901 1902 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1903 "==> nxge_rx_pkts: START: rcr channel %d " 1904 "head_p $%p head_pp $%p index %d ", 1905 channel, rcr_p->rcr_desc_rd_head_p, 1906 rcr_p->rcr_desc_rd_head_pp, 1907 rcr_p->comp_rd_index)); 1908 1909 1910 #if !defined(_BIG_ENDIAN) 1911 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 1912 #else 1913 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1914 if (rs != NPI_SUCCESS) { 1915 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1916 "channel %d, get qlen failed 0x%08x", 1917 vindex, ldvp->channel, rs)); 1918 return (NULL); 1919 } 1920 #endif 1921 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 1922 "qlen %d", channel, qlen)); 1923 1924 1925 1926 if (!qlen) { 1927 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1928 "==> nxge_rx_pkts:rcr channel %d " 1929 "qlen %d (no pkts)", channel, qlen)); 1930 1931 return (NULL); 1932 } 1933 1934 comp_rd_index = rcr_p->comp_rd_index; 1935 1936 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1937 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1938 nrcr_read = npkt_read = 0; 1939 1940 /* 1941 * Number of packets queued 1942 * (The jumbo or multi packet will be counted as only one 1943 * packets and it may take up more than one completion entry). 1944 */ 1945 qlen_hw = (qlen < nxge_max_rx_pkts) ? 1946 qlen : nxge_max_rx_pkts; 1947 head_mp = NULL; 1948 tail_mp = &head_mp; 1949 nmp = mp_cont = NULL; 1950 multi = B_FALSE; 1951 1952 while (qlen_hw) { 1953 1954 #ifdef NXGE_DEBUG 1955 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 1956 #endif 1957 /* 1958 * Process one completion ring entry. 1959 */ 1960 nxge_receive_packet(nxgep, 1961 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 1962 1963 /* 1964 * message chaining modes 1965 */ 1966 if (nmp) { 1967 nmp->b_next = NULL; 1968 if (!multi && !mp_cont) { /* frame fits a partition */ 1969 *tail_mp = nmp; 1970 tail_mp = &nmp->b_next; 1971 nmp = NULL; 1972 } else if (multi && !mp_cont) { /* first segment */ 1973 *tail_mp = nmp; 1974 tail_mp = &nmp->b_cont; 1975 } else if (multi && mp_cont) { /* mid of multi segs */ 1976 *tail_mp = mp_cont; 1977 tail_mp = &mp_cont->b_cont; 1978 } else if (!multi && mp_cont) { /* last segment */ 1979 *tail_mp = mp_cont; 1980 tail_mp = &nmp->b_next; 1981 nmp = NULL; 1982 } 1983 } 1984 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1985 "==> nxge_rx_pkts: loop: rcr channel %d " 1986 "before updating: multi %d " 1987 "nrcr_read %d " 1988 "npk read %d " 1989 "head_pp $%p index %d ", 1990 channel, 1991 multi, 1992 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1993 comp_rd_index)); 1994 1995 if (!multi) { 1996 qlen_hw--; 1997 npkt_read++; 1998 } 1999 2000 /* 2001 * Update the next read entry. 2002 */ 2003 comp_rd_index = NEXT_ENTRY(comp_rd_index, 2004 rcr_p->comp_wrap_mask); 2005 2006 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 2007 rcr_p->rcr_desc_first_p, 2008 rcr_p->rcr_desc_last_p); 2009 2010 nrcr_read++; 2011 2012 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2013 "<== nxge_rx_pkts: (SAM, process one packet) " 2014 "nrcr_read %d", 2015 nrcr_read)); 2016 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2017 "==> nxge_rx_pkts: loop: rcr channel %d " 2018 "multi %d " 2019 "nrcr_read %d " 2020 "npk read %d " 2021 "head_pp $%p index %d ", 2022 channel, 2023 multi, 2024 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2025 comp_rd_index)); 2026 2027 } 2028 2029 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2030 rcr_p->comp_rd_index = comp_rd_index; 2031 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2032 2033 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2034 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2035 rcr_p->intr_timeout = nxgep->intr_timeout; 2036 rcr_p->intr_threshold = nxgep->intr_threshold; 2037 rcr_cfg_b.value = 0x0ULL; 2038 if (rcr_p->intr_timeout) 2039 rcr_cfg_b.bits.ldw.entout = 1; 2040 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2041 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2042 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2043 channel, rcr_cfg_b.value); 2044 } 2045 2046 cs.bits.ldw.pktread = npkt_read; 2047 cs.bits.ldw.ptrread = nrcr_read; 2048 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2049 channel, cs.value); 2050 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2051 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2052 "head_pp $%p index %016llx ", 2053 channel, 2054 rcr_p->rcr_desc_rd_head_pp, 2055 rcr_p->comp_rd_index)); 2056 /* 2057 * Update RCR buffer pointer read and number of packets 2058 * read. 2059 */ 2060 2061 *rcrp = rcr_p; 2062 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2063 return (head_mp); 2064 } 2065 2066 void 2067 nxge_receive_packet(p_nxge_t nxgep, 2068 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2069 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2070 { 2071 p_mblk_t nmp = NULL; 2072 uint64_t multi; 2073 uint64_t dcf_err; 2074 uint8_t channel; 2075 2076 boolean_t first_entry = B_TRUE; 2077 boolean_t is_tcp_udp = B_FALSE; 2078 boolean_t buffer_free = B_FALSE; 2079 boolean_t error_send_up = B_FALSE; 2080 uint8_t error_type; 2081 uint16_t l2_len; 2082 uint16_t skip_len; 2083 uint8_t pktbufsz_type; 2084 uint16_t pktbufsz; 2085 uint64_t rcr_entry; 2086 uint64_t *pkt_buf_addr_pp; 2087 uint64_t *pkt_buf_addr_p; 2088 uint32_t buf_offset; 2089 uint32_t bsize; 2090 uint32_t error_disp_cnt; 2091 uint32_t msg_index; 2092 p_rx_rbr_ring_t rx_rbr_p; 2093 p_rx_msg_t *rx_msg_ring_p; 2094 p_rx_msg_t rx_msg_p; 2095 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2096 nxge_status_t status = NXGE_OK; 2097 boolean_t is_valid = B_FALSE; 2098 p_nxge_rx_ring_stats_t rdc_stats; 2099 uint32_t bytes_read; 2100 uint64_t pkt_type; 2101 uint64_t frag; 2102 #ifdef NXGE_DEBUG 2103 int dump_len; 2104 #endif 2105 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2106 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2107 2108 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2109 2110 multi = (rcr_entry & RCR_MULTI_MASK); 2111 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2112 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2113 2114 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2115 frag = (rcr_entry & RCR_FRAG_MASK); 2116 2117 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2118 2119 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2120 RCR_PKTBUFSZ_SHIFT); 2121 #if defined(__i386) 2122 pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2123 RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2124 #else 2125 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2126 RCR_PKT_BUF_ADDR_SHIFT); 2127 #endif 2128 2129 channel = rcr_p->rdc; 2130 2131 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2132 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2133 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2134 "error_type 0x%x pkt_type 0x%x " 2135 "pktbufsz_type %d ", 2136 rcr_desc_rd_head_p, 2137 rcr_entry, pkt_buf_addr_pp, l2_len, 2138 multi, 2139 error_type, 2140 pkt_type, 2141 pktbufsz_type)); 2142 2143 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2144 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2145 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2146 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2147 rcr_entry, pkt_buf_addr_pp, l2_len, 2148 multi, 2149 error_type, 2150 pkt_type)); 2151 2152 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2153 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2154 "full pkt_buf_addr_pp $%p l2_len %d", 2155 rcr_entry, pkt_buf_addr_pp, l2_len)); 2156 2157 /* get the stats ptr */ 2158 rdc_stats = rcr_p->rdc_stats; 2159 2160 if (!l2_len) { 2161 2162 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2163 "<== nxge_receive_packet: failed: l2 length is 0.")); 2164 return; 2165 } 2166 2167 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2168 l2_len -= ETHERFCSL; 2169 2170 /* shift 6 bits to get the full io address */ 2171 #if defined(__i386) 2172 pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2173 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2174 #else 2175 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2176 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2177 #endif 2178 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2179 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2180 "full pkt_buf_addr_pp $%p l2_len %d", 2181 rcr_entry, pkt_buf_addr_pp, l2_len)); 2182 2183 rx_rbr_p = rcr_p->rx_rbr_p; 2184 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2185 2186 if (first_entry) { 2187 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2188 RXDMA_HDR_SIZE_DEFAULT); 2189 2190 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2191 "==> nxge_receive_packet: first entry 0x%016llx " 2192 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2193 rcr_entry, pkt_buf_addr_pp, l2_len, 2194 hdr_size)); 2195 } 2196 2197 MUTEX_ENTER(&rcr_p->lock); 2198 MUTEX_ENTER(&rx_rbr_p->lock); 2199 2200 bytes_read = rcr_p->rcvd_pkt_bytes; 2201 2202 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2203 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2204 "full pkt_buf_addr_pp $%p l2_len %d", 2205 rcr_entry, pkt_buf_addr_pp, l2_len)); 2206 2207 /* 2208 * Packet buffer address in the completion entry points 2209 * to the starting buffer address (offset 0). 2210 * Use the starting buffer address to locate the corresponding 2211 * kernel address. 2212 */ 2213 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2214 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2215 &buf_offset, 2216 &msg_index); 2217 2218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2219 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2220 "full pkt_buf_addr_pp $%p l2_len %d", 2221 rcr_entry, pkt_buf_addr_pp, l2_len)); 2222 2223 if (status != NXGE_OK) { 2224 MUTEX_EXIT(&rx_rbr_p->lock); 2225 MUTEX_EXIT(&rcr_p->lock); 2226 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2227 "<== nxge_receive_packet: found vaddr failed %d", 2228 status)); 2229 return; 2230 } 2231 2232 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2233 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2234 "full pkt_buf_addr_pp $%p l2_len %d", 2235 rcr_entry, pkt_buf_addr_pp, l2_len)); 2236 2237 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2238 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2239 "full pkt_buf_addr_pp $%p l2_len %d", 2240 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2241 2242 rx_msg_p = rx_msg_ring_p[msg_index]; 2243 2244 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2245 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2246 "full pkt_buf_addr_pp $%p l2_len %d", 2247 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2248 2249 switch (pktbufsz_type) { 2250 case RCR_PKTBUFSZ_0: 2251 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2252 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2253 "==> nxge_receive_packet: 0 buf %d", bsize)); 2254 break; 2255 case RCR_PKTBUFSZ_1: 2256 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2257 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2258 "==> nxge_receive_packet: 1 buf %d", bsize)); 2259 break; 2260 case RCR_PKTBUFSZ_2: 2261 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2262 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2263 "==> nxge_receive_packet: 2 buf %d", bsize)); 2264 break; 2265 case RCR_SINGLE_BLOCK: 2266 bsize = rx_msg_p->block_size; 2267 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2268 "==> nxge_receive_packet: single %d", bsize)); 2269 2270 break; 2271 default: 2272 MUTEX_EXIT(&rx_rbr_p->lock); 2273 MUTEX_EXIT(&rcr_p->lock); 2274 return; 2275 } 2276 2277 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2278 (buf_offset + sw_offset_bytes), 2279 (hdr_size + l2_len), 2280 DDI_DMA_SYNC_FORCPU); 2281 2282 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2283 "==> nxge_receive_packet: after first dump:usage count")); 2284 2285 if (rx_msg_p->cur_usage_cnt == 0) { 2286 if (rx_rbr_p->rbr_use_bcopy) { 2287 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2288 if (rx_rbr_p->rbr_consumed < 2289 rx_rbr_p->rbr_threshold_hi) { 2290 if (rx_rbr_p->rbr_threshold_lo == 0 || 2291 ((rx_rbr_p->rbr_consumed >= 2292 rx_rbr_p->rbr_threshold_lo) && 2293 (rx_rbr_p->rbr_bufsize_type >= 2294 pktbufsz_type))) { 2295 rx_msg_p->rx_use_bcopy = B_TRUE; 2296 } 2297 } else { 2298 rx_msg_p->rx_use_bcopy = B_TRUE; 2299 } 2300 } 2301 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2302 "==> nxge_receive_packet: buf %d (new block) ", 2303 bsize)); 2304 2305 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2306 rx_msg_p->pkt_buf_size = bsize; 2307 rx_msg_p->cur_usage_cnt = 1; 2308 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2309 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2310 "==> nxge_receive_packet: buf %d " 2311 "(single block) ", 2312 bsize)); 2313 /* 2314 * Buffer can be reused once the free function 2315 * is called. 2316 */ 2317 rx_msg_p->max_usage_cnt = 1; 2318 buffer_free = B_TRUE; 2319 } else { 2320 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2321 if (rx_msg_p->max_usage_cnt == 1) { 2322 buffer_free = B_TRUE; 2323 } 2324 } 2325 } else { 2326 rx_msg_p->cur_usage_cnt++; 2327 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2328 buffer_free = B_TRUE; 2329 } 2330 } 2331 2332 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2333 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2334 msg_index, l2_len, 2335 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2336 2337 if ((error_type) || (dcf_err)) { 2338 rdc_stats->ierrors++; 2339 if (dcf_err) { 2340 rdc_stats->dcf_err++; 2341 #ifdef NXGE_DEBUG 2342 if (!rdc_stats->dcf_err) { 2343 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2344 "nxge_receive_packet: channel %d dcf_err rcr" 2345 " 0x%llx", channel, rcr_entry)); 2346 } 2347 #endif 2348 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2349 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2350 } else { 2351 /* Update error stats */ 2352 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2353 rdc_stats->errlog.compl_err_type = error_type; 2354 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2355 NXGE_FM_EREPORT_RDMC_COMPLETION_ERR); 2356 2357 switch (error_type) { 2358 case RCR_L2_ERROR: 2359 rdc_stats->l2_err++; 2360 if (rdc_stats->l2_err < 2361 error_disp_cnt) 2362 NXGE_ERROR_MSG((nxgep, 2363 NXGE_ERR_CTL, 2364 " nxge_receive_packet:" 2365 " channel %d RCR L2_ERROR", 2366 channel)); 2367 break; 2368 case RCR_L4_CSUM_ERROR: 2369 error_send_up = B_TRUE; 2370 rdc_stats->l4_cksum_err++; 2371 if (rdc_stats->l4_cksum_err < 2372 error_disp_cnt) 2373 NXGE_ERROR_MSG((nxgep, 2374 NXGE_ERR_CTL, 2375 " nxge_receive_packet:" 2376 " channel %d" 2377 " RCR L4_CSUM_ERROR", 2378 channel)); 2379 break; 2380 case RCR_FFLP_SOFT_ERROR: 2381 error_send_up = B_TRUE; 2382 rdc_stats->fflp_soft_err++; 2383 if (rdc_stats->fflp_soft_err < 2384 error_disp_cnt) 2385 NXGE_ERROR_MSG((nxgep, 2386 NXGE_ERR_CTL, 2387 " nxge_receive_packet:" 2388 " channel %d" 2389 " RCR FFLP_SOFT_ERROR", 2390 channel)); 2391 break; 2392 case RCR_ZCP_SOFT_ERROR: 2393 error_send_up = B_TRUE; 2394 rdc_stats->fflp_soft_err++; 2395 if (rdc_stats->zcp_soft_err < 2396 error_disp_cnt) 2397 NXGE_ERROR_MSG((nxgep, 2398 NXGE_ERR_CTL, 2399 " nxge_receive_packet:" 2400 " Channel %d" 2401 " RCR ZCP_SOFT_ERROR", 2402 channel)); 2403 break; 2404 default: 2405 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2406 " nxge_receive_packet:" 2407 " Channel %d" 2408 " RCR entry 0x%llx" 2409 " error 0x%x", 2410 rcr_entry, channel, 2411 error_type)); 2412 break; 2413 } 2414 } 2415 2416 /* 2417 * Update and repost buffer block if max usage 2418 * count is reached. 2419 */ 2420 if (error_send_up == B_FALSE) { 2421 atomic_inc_32(&rx_msg_p->ref_cnt); 2422 atomic_inc_32(&nxge_mblks_pending); 2423 if (buffer_free == B_TRUE) { 2424 rx_msg_p->free = B_TRUE; 2425 } 2426 2427 MUTEX_EXIT(&rx_rbr_p->lock); 2428 MUTEX_EXIT(&rcr_p->lock); 2429 nxge_freeb(rx_msg_p); 2430 return; 2431 } 2432 } 2433 2434 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2435 "==> nxge_receive_packet: DMA sync second ")); 2436 2437 skip_len = sw_offset_bytes + hdr_size; 2438 if (!rx_msg_p->rx_use_bcopy) { 2439 /* 2440 * For loaned up buffers, the driver reference count 2441 * will be incremented first and then the free state. 2442 */ 2443 nmp = nxge_dupb(rx_msg_p, buf_offset, bsize); 2444 } else { 2445 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, l2_len); 2446 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2447 "==> nxge_receive_packet: use bcopy " 2448 "rbr consumed %d " 2449 "pktbufsz_type %d " 2450 "offset %d " 2451 "hdr_size %d l2_len %d " 2452 "nmp->b_rptr $%p", 2453 rx_rbr_p->rbr_consumed, 2454 pktbufsz_type, 2455 buf_offset, hdr_size, l2_len, 2456 nmp->b_rptr)); 2457 } 2458 if (nmp != NULL) { 2459 pktbufsz = nxge_get_pktbuf_size(nxgep, pktbufsz_type, 2460 rx_rbr_p->rbr_cfgb); 2461 if (!rx_msg_p->rx_use_bcopy) { 2462 if (first_entry) { 2463 bytes_read = 0; 2464 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2465 if (l2_len > pktbufsz - skip_len) 2466 nmp->b_wptr = &nmp->b_rptr[pktbufsz 2467 - skip_len]; 2468 else 2469 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2470 } else { 2471 if (l2_len - bytes_read > pktbufsz) 2472 nmp->b_wptr = &nmp->b_rptr[pktbufsz]; 2473 else 2474 nmp->b_wptr = 2475 &nmp->b_rptr[l2_len - bytes_read]; 2476 } 2477 bytes_read += nmp->b_wptr - nmp->b_rptr; 2478 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2479 "==> nxge_receive_packet after dupb: " 2480 "rbr consumed %d " 2481 "pktbufsz_type %d " 2482 "nmp $%p rptr $%p wptr $%p " 2483 "buf_offset %d bzise %d l2_len %d skip_len %d", 2484 rx_rbr_p->rbr_consumed, 2485 pktbufsz_type, 2486 nmp, nmp->b_rptr, nmp->b_wptr, 2487 buf_offset, bsize, l2_len, skip_len)); 2488 } 2489 } else { 2490 cmn_err(CE_WARN, "!nxge_receive_packet: " 2491 "update stats (error)"); 2492 atomic_inc_32(&rx_msg_p->ref_cnt); 2493 atomic_inc_32(&nxge_mblks_pending); 2494 if (buffer_free == B_TRUE) { 2495 rx_msg_p->free = B_TRUE; 2496 } 2497 MUTEX_EXIT(&rx_rbr_p->lock); 2498 MUTEX_EXIT(&rcr_p->lock); 2499 nxge_freeb(rx_msg_p); 2500 return; 2501 } 2502 2503 rcr_p->rcvd_pkt_bytes = bytes_read; 2504 2505 if (buffer_free == B_TRUE) { 2506 rx_msg_p->free = B_TRUE; 2507 } 2508 2509 /* 2510 * ERROR, FRAG and PKT_TYPE are only reported 2511 * in the first entry. 2512 * If a packet is not fragmented and no error bit is set, then 2513 * L4 checksum is OK. 2514 */ 2515 is_valid = (nmp != NULL); 2516 rdc_stats->ibytes += l2_len; 2517 rdc_stats->ipackets++; 2518 MUTEX_EXIT(&rx_rbr_p->lock); 2519 MUTEX_EXIT(&rcr_p->lock); 2520 2521 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2522 atomic_inc_32(&rx_msg_p->ref_cnt); 2523 atomic_inc_32(&nxge_mblks_pending); 2524 nxge_freeb(rx_msg_p); 2525 } 2526 2527 if (is_valid) { 2528 nmp->b_cont = NULL; 2529 if (first_entry) { 2530 *mp = nmp; 2531 *mp_cont = NULL; 2532 } else 2533 *mp_cont = nmp; 2534 } 2535 2536 /* 2537 * Update stats and hardware checksuming. 2538 */ 2539 if (is_valid && !multi) { 2540 2541 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2542 pkt_type == RCR_PKT_IS_UDP) ? 2543 B_TRUE: B_FALSE); 2544 2545 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2546 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2547 is_valid, multi, is_tcp_udp, frag, error_type)); 2548 2549 if (is_tcp_udp && !frag && !error_type) { 2550 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2551 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2552 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2553 "==> nxge_receive_packet: Full tcp/udp cksum " 2554 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2555 "error %d", 2556 is_valid, multi, is_tcp_udp, frag, error_type)); 2557 } 2558 } 2559 2560 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2561 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2562 2563 *multi_p = (multi == RCR_MULTI_MASK); 2564 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2565 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2566 *multi_p, nmp, *mp, *mp_cont)); 2567 } 2568 2569 /*ARGSUSED*/ 2570 static nxge_status_t 2571 nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, 2572 rx_dma_ctl_stat_t cs) 2573 { 2574 p_nxge_rx_ring_stats_t rdc_stats; 2575 npi_handle_t handle; 2576 npi_status_t rs; 2577 boolean_t rxchan_fatal = B_FALSE; 2578 boolean_t rxport_fatal = B_FALSE; 2579 uint8_t channel; 2580 uint8_t portn; 2581 nxge_status_t status = NXGE_OK; 2582 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2583 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2584 2585 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2586 channel = ldvp->channel; 2587 portn = nxgep->mac.portnum; 2588 rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index]; 2589 2590 if (cs.bits.hdw.rbr_tmout) { 2591 rdc_stats->rx_rbr_tmout++; 2592 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2593 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2594 rxchan_fatal = B_TRUE; 2595 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2596 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2597 } 2598 if (cs.bits.hdw.rsp_cnt_err) { 2599 rdc_stats->rsp_cnt_err++; 2600 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2601 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2602 rxchan_fatal = B_TRUE; 2603 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2604 "==> nxge_rx_err_evnts(channel %d): " 2605 "rsp_cnt_err", channel)); 2606 } 2607 if (cs.bits.hdw.byte_en_bus) { 2608 rdc_stats->byte_en_bus++; 2609 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2610 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2612 "==> nxge_rx_err_evnts(channel %d): " 2613 "fatal error: byte_en_bus", channel)); 2614 rxchan_fatal = B_TRUE; 2615 } 2616 if (cs.bits.hdw.rsp_dat_err) { 2617 rdc_stats->rsp_dat_err++; 2618 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2619 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2620 rxchan_fatal = B_TRUE; 2621 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2622 "==> nxge_rx_err_evnts(channel %d): " 2623 "fatal error: rsp_dat_err", channel)); 2624 } 2625 if (cs.bits.hdw.rcr_ack_err) { 2626 rdc_stats->rcr_ack_err++; 2627 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2628 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2629 rxchan_fatal = B_TRUE; 2630 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2631 "==> nxge_rx_err_evnts(channel %d): " 2632 "fatal error: rcr_ack_err", channel)); 2633 } 2634 if (cs.bits.hdw.dc_fifo_err) { 2635 rdc_stats->dc_fifo_err++; 2636 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2637 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2638 /* This is not a fatal error! */ 2639 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2640 "==> nxge_rx_err_evnts(channel %d): " 2641 "dc_fifo_err", channel)); 2642 rxport_fatal = B_TRUE; 2643 } 2644 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2645 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2646 &rdc_stats->errlog.pre_par, 2647 &rdc_stats->errlog.sha_par)) 2648 != NPI_SUCCESS) { 2649 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2650 "==> nxge_rx_err_evnts(channel %d): " 2651 "rcr_sha_par: get perr", channel)); 2652 return (NXGE_ERROR | rs); 2653 } 2654 if (cs.bits.hdw.rcr_sha_par) { 2655 rdc_stats->rcr_sha_par++; 2656 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2657 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2658 rxchan_fatal = B_TRUE; 2659 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2660 "==> nxge_rx_err_evnts(channel %d): " 2661 "fatal error: rcr_sha_par", channel)); 2662 } 2663 if (cs.bits.hdw.rbr_pre_par) { 2664 rdc_stats->rbr_pre_par++; 2665 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2666 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2667 rxchan_fatal = B_TRUE; 2668 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2669 "==> nxge_rx_err_evnts(channel %d): " 2670 "fatal error: rbr_pre_par", channel)); 2671 } 2672 } 2673 if (cs.bits.hdw.port_drop_pkt) { 2674 rdc_stats->port_drop_pkt++; 2675 if (rdc_stats->port_drop_pkt < error_disp_cnt) 2676 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2677 "==> nxge_rx_err_evnts (channel %d): " 2678 "port_drop_pkt", channel)); 2679 } 2680 if (cs.bits.hdw.wred_drop) { 2681 rdc_stats->wred_drop++; 2682 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2683 "==> nxge_rx_err_evnts(channel %d): " 2684 "wred_drop", channel)); 2685 } 2686 if (cs.bits.hdw.rbr_pre_empty) { 2687 rdc_stats->rbr_pre_empty++; 2688 if (rdc_stats->rbr_pre_empty < error_disp_cnt) 2689 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2690 "==> nxge_rx_err_evnts(channel %d): " 2691 "rbr_pre_empty", channel)); 2692 } 2693 if (cs.bits.hdw.rcr_shadow_full) { 2694 rdc_stats->rcr_shadow_full++; 2695 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2696 "==> nxge_rx_err_evnts(channel %d): " 2697 "rcr_shadow_full", channel)); 2698 } 2699 if (cs.bits.hdw.config_err) { 2700 rdc_stats->config_err++; 2701 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2702 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2703 rxchan_fatal = B_TRUE; 2704 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2705 "==> nxge_rx_err_evnts(channel %d): " 2706 "config error", channel)); 2707 } 2708 if (cs.bits.hdw.rcrincon) { 2709 rdc_stats->rcrincon++; 2710 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2711 NXGE_FM_EREPORT_RDMC_RCRINCON); 2712 rxchan_fatal = B_TRUE; 2713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2714 "==> nxge_rx_err_evnts(channel %d): " 2715 "fatal error: rcrincon error", channel)); 2716 } 2717 if (cs.bits.hdw.rcrfull) { 2718 rdc_stats->rcrfull++; 2719 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2720 NXGE_FM_EREPORT_RDMC_RCRFULL); 2721 rxchan_fatal = B_TRUE; 2722 if (rdc_stats->rcrfull < error_disp_cnt) 2723 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2724 "==> nxge_rx_err_evnts(channel %d): " 2725 "fatal error: rcrfull error", channel)); 2726 } 2727 if (cs.bits.hdw.rbr_empty) { 2728 rdc_stats->rbr_empty++; 2729 if (rdc_stats->rbr_empty < error_disp_cnt) 2730 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2731 "==> nxge_rx_err_evnts(channel %d): " 2732 "rbr empty error", channel)); 2733 } 2734 if (cs.bits.hdw.rbrfull) { 2735 rdc_stats->rbrfull++; 2736 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2737 NXGE_FM_EREPORT_RDMC_RBRFULL); 2738 rxchan_fatal = B_TRUE; 2739 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2740 "==> nxge_rx_err_evnts(channel %d): " 2741 "fatal error: rbr_full error", channel)); 2742 } 2743 if (cs.bits.hdw.rbrlogpage) { 2744 rdc_stats->rbrlogpage++; 2745 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2746 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2747 rxchan_fatal = B_TRUE; 2748 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2749 "==> nxge_rx_err_evnts(channel %d): " 2750 "fatal error: rbr logical page error", channel)); 2751 } 2752 if (cs.bits.hdw.cfiglogpage) { 2753 rdc_stats->cfiglogpage++; 2754 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2755 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2756 rxchan_fatal = B_TRUE; 2757 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2758 "==> nxge_rx_err_evnts(channel %d): " 2759 "fatal error: cfig logical page error", channel)); 2760 } 2761 2762 if (rxport_fatal) { 2763 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2764 " nxge_rx_err_evnts: " 2765 " fatal error on Port #%d\n", 2766 portn)); 2767 status = nxge_ipp_fatal_err_recover(nxgep); 2768 if (status == NXGE_OK) { 2769 FM_SERVICE_RESTORED(nxgep); 2770 } 2771 } 2772 2773 if (rxchan_fatal) { 2774 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2775 " nxge_rx_err_evnts: " 2776 " fatal error on Channel #%d\n", 2777 channel)); 2778 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2779 if (status == NXGE_OK) { 2780 FM_SERVICE_RESTORED(nxgep); 2781 } 2782 } 2783 2784 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2785 2786 return (status); 2787 } 2788 2789 static nxge_status_t 2790 nxge_map_rxdma(p_nxge_t nxgep) 2791 { 2792 int i, ndmas; 2793 uint16_t channel; 2794 p_rx_rbr_rings_t rx_rbr_rings; 2795 p_rx_rbr_ring_t *rbr_rings; 2796 p_rx_rcr_rings_t rx_rcr_rings; 2797 p_rx_rcr_ring_t *rcr_rings; 2798 p_rx_mbox_areas_t rx_mbox_areas_p; 2799 p_rx_mbox_t *rx_mbox_p; 2800 p_nxge_dma_pool_t dma_buf_poolp; 2801 p_nxge_dma_pool_t dma_cntl_poolp; 2802 p_nxge_dma_common_t *dma_buf_p; 2803 p_nxge_dma_common_t *dma_cntl_p; 2804 uint32_t *num_chunks; 2805 nxge_status_t status = NXGE_OK; 2806 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2807 p_nxge_dma_common_t t_dma_buf_p; 2808 p_nxge_dma_common_t t_dma_cntl_p; 2809 #endif 2810 2811 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 2812 2813 dma_buf_poolp = nxgep->rx_buf_pool_p; 2814 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2815 2816 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2817 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2818 "<== nxge_map_rxdma: buf not allocated")); 2819 return (NXGE_ERROR); 2820 } 2821 2822 ndmas = dma_buf_poolp->ndmas; 2823 if (!ndmas) { 2824 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2825 "<== nxge_map_rxdma: no dma allocated")); 2826 return (NXGE_ERROR); 2827 } 2828 2829 num_chunks = dma_buf_poolp->num_chunks; 2830 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2831 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2832 2833 rx_rbr_rings = (p_rx_rbr_rings_t) 2834 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2835 rbr_rings = (p_rx_rbr_ring_t *) 2836 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2837 rx_rcr_rings = (p_rx_rcr_rings_t) 2838 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2839 rcr_rings = (p_rx_rcr_ring_t *) 2840 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2841 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2842 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2843 rx_mbox_p = (p_rx_mbox_t *) 2844 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2845 2846 /* 2847 * Timeout should be set based on the system clock divider. 2848 * The following timeout value of 1 assumes that the 2849 * granularity (1000) is 3 microseconds running at 300MHz. 2850 */ 2851 2852 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2853 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2854 2855 /* 2856 * Map descriptors from the buffer polls for each dam channel. 2857 */ 2858 for (i = 0; i < ndmas; i++) { 2859 /* 2860 * Set up and prepare buffer blocks, descriptors 2861 * and mailbox. 2862 */ 2863 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2864 status = nxge_map_rxdma_channel(nxgep, channel, 2865 (p_nxge_dma_common_t *)&dma_buf_p[i], 2866 (p_rx_rbr_ring_t *)&rbr_rings[i], 2867 num_chunks[i], 2868 (p_nxge_dma_common_t *)&dma_cntl_p[i], 2869 (p_rx_rcr_ring_t *)&rcr_rings[i], 2870 (p_rx_mbox_t *)&rx_mbox_p[i]); 2871 if (status != NXGE_OK) { 2872 goto nxge_map_rxdma_fail1; 2873 } 2874 rbr_rings[i]->index = (uint16_t)i; 2875 rcr_rings[i]->index = (uint16_t)i; 2876 rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i]; 2877 2878 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2879 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 2880 rbr_rings[i]->hv_set = B_FALSE; 2881 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 2882 t_dma_cntl_p = 2883 (p_nxge_dma_common_t)dma_cntl_p[i]; 2884 2885 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp = 2886 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 2887 rbr_rings[i]->hv_rx_buf_ioaddr_size = 2888 (uint64_t)t_dma_buf_p->orig_alength; 2889 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2890 "==> nxge_map_rxdma_channel: " 2891 "channel %d " 2892 "data buf base io $%p ($%p) " 2893 "size 0x%llx (%d 0x%x)", 2894 channel, 2895 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp, 2896 t_dma_cntl_p->ioaddr_pp, 2897 rbr_rings[i]->hv_rx_buf_ioaddr_size, 2898 t_dma_buf_p->orig_alength, 2899 t_dma_buf_p->orig_alength)); 2900 2901 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp = 2902 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 2903 rbr_rings[i]->hv_rx_cntl_ioaddr_size = 2904 (uint64_t)t_dma_cntl_p->orig_alength; 2905 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2906 "==> nxge_map_rxdma_channel: " 2907 "channel %d " 2908 "cntl base io $%p ($%p) " 2909 "size 0x%llx (%d 0x%x)", 2910 channel, 2911 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp, 2912 t_dma_cntl_p->ioaddr_pp, 2913 rbr_rings[i]->hv_rx_cntl_ioaddr_size, 2914 t_dma_cntl_p->orig_alength, 2915 t_dma_cntl_p->orig_alength)); 2916 } 2917 2918 #endif /* sun4v and NIU_LP_WORKAROUND */ 2919 } 2920 2921 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2922 rx_rbr_rings->rbr_rings = rbr_rings; 2923 nxgep->rx_rbr_rings = rx_rbr_rings; 2924 rx_rcr_rings->rcr_rings = rcr_rings; 2925 nxgep->rx_rcr_rings = rx_rcr_rings; 2926 2927 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2928 nxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2929 2930 goto nxge_map_rxdma_exit; 2931 2932 nxge_map_rxdma_fail1: 2933 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2934 "==> nxge_map_rxdma: unmap rbr,rcr " 2935 "(status 0x%x channel %d i %d)", 2936 status, channel, i)); 2937 i--; 2938 for (; i >= 0; i--) { 2939 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2940 nxge_unmap_rxdma_channel(nxgep, channel, 2941 rbr_rings[i], 2942 rcr_rings[i], 2943 rx_mbox_p[i]); 2944 } 2945 2946 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2947 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2948 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2949 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2950 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2951 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2952 2953 nxge_map_rxdma_exit: 2954 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2955 "<== nxge_map_rxdma: " 2956 "(status 0x%x channel %d)", 2957 status, channel)); 2958 2959 return (status); 2960 } 2961 2962 static void 2963 nxge_unmap_rxdma(p_nxge_t nxgep) 2964 { 2965 int i, ndmas; 2966 uint16_t channel; 2967 p_rx_rbr_rings_t rx_rbr_rings; 2968 p_rx_rbr_ring_t *rbr_rings; 2969 p_rx_rcr_rings_t rx_rcr_rings; 2970 p_rx_rcr_ring_t *rcr_rings; 2971 p_rx_mbox_areas_t rx_mbox_areas_p; 2972 p_rx_mbox_t *rx_mbox_p; 2973 p_nxge_dma_pool_t dma_buf_poolp; 2974 p_nxge_dma_pool_t dma_cntl_poolp; 2975 p_nxge_dma_common_t *dma_buf_p; 2976 2977 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma")); 2978 2979 dma_buf_poolp = nxgep->rx_buf_pool_p; 2980 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2981 2982 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2983 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2984 "<== nxge_unmap_rxdma: NULL buf pointers")); 2985 return; 2986 } 2987 2988 rx_rbr_rings = nxgep->rx_rbr_rings; 2989 rx_rcr_rings = nxgep->rx_rcr_rings; 2990 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2991 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2992 "<== nxge_unmap_rxdma: NULL ring pointers")); 2993 return; 2994 } 2995 ndmas = rx_rbr_rings->ndmas; 2996 if (!ndmas) { 2997 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2998 "<== nxge_unmap_rxdma: no channel")); 2999 return; 3000 } 3001 3002 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3003 "==> nxge_unmap_rxdma (ndmas %d)", ndmas)); 3004 rbr_rings = rx_rbr_rings->rbr_rings; 3005 rcr_rings = rx_rcr_rings->rcr_rings; 3006 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3007 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3008 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 3009 3010 for (i = 0; i < ndmas; i++) { 3011 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 3012 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3013 "==> nxge_unmap_rxdma (ndmas %d) channel %d", 3014 ndmas, channel)); 3015 (void) nxge_unmap_rxdma_channel(nxgep, channel, 3016 (p_rx_rbr_ring_t)rbr_rings[i], 3017 (p_rx_rcr_ring_t)rcr_rings[i], 3018 (p_rx_mbox_t)rx_mbox_p[i]); 3019 } 3020 3021 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 3022 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 3023 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 3024 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 3025 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 3026 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 3027 3028 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3029 "<== nxge_unmap_rxdma")); 3030 } 3031 3032 nxge_status_t 3033 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3034 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3035 uint32_t num_chunks, 3036 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3037 p_rx_mbox_t *rx_mbox_p) 3038 { 3039 int status = NXGE_OK; 3040 3041 /* 3042 * Set up and prepare buffer blocks, descriptors 3043 * and mailbox. 3044 */ 3045 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3046 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3047 /* 3048 * Receive buffer blocks 3049 */ 3050 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3051 dma_buf_p, rbr_p, num_chunks); 3052 if (status != NXGE_OK) { 3053 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3054 "==> nxge_map_rxdma_channel (channel %d): " 3055 "map buffer failed 0x%x", channel, status)); 3056 goto nxge_map_rxdma_channel_exit; 3057 } 3058 3059 /* 3060 * Receive block ring, completion ring and mailbox. 3061 */ 3062 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3063 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3064 if (status != NXGE_OK) { 3065 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3066 "==> nxge_map_rxdma_channel (channel %d): " 3067 "map config failed 0x%x", channel, status)); 3068 goto nxge_map_rxdma_channel_fail2; 3069 } 3070 3071 goto nxge_map_rxdma_channel_exit; 3072 3073 nxge_map_rxdma_channel_fail3: 3074 /* Free rbr, rcr */ 3075 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3076 "==> nxge_map_rxdma_channel: free rbr/rcr " 3077 "(status 0x%x channel %d)", 3078 status, channel)); 3079 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3080 *rcr_p, *rx_mbox_p); 3081 3082 nxge_map_rxdma_channel_fail2: 3083 /* Free buffer blocks */ 3084 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3085 "==> nxge_map_rxdma_channel: free rx buffers" 3086 "(nxgep 0x%x status 0x%x channel %d)", 3087 nxgep, status, channel)); 3088 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3089 3090 status = NXGE_ERROR; 3091 3092 nxge_map_rxdma_channel_exit: 3093 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3094 "<== nxge_map_rxdma_channel: " 3095 "(nxgep 0x%x status 0x%x channel %d)", 3096 nxgep, status, channel)); 3097 3098 return (status); 3099 } 3100 3101 /*ARGSUSED*/ 3102 static void 3103 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3104 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3105 { 3106 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3107 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3108 3109 /* 3110 * unmap receive block ring, completion ring and mailbox. 3111 */ 3112 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3113 rcr_p, rx_mbox_p); 3114 3115 /* unmap buffer blocks */ 3116 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3117 3118 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3119 } 3120 3121 /*ARGSUSED*/ 3122 static nxge_status_t 3123 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3124 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3125 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3126 { 3127 p_rx_rbr_ring_t rbrp; 3128 p_rx_rcr_ring_t rcrp; 3129 p_rx_mbox_t mboxp; 3130 p_nxge_dma_common_t cntl_dmap; 3131 p_nxge_dma_common_t dmap; 3132 p_rx_msg_t *rx_msg_ring; 3133 p_rx_msg_t rx_msg_p; 3134 p_rbr_cfig_a_t rcfga_p; 3135 p_rbr_cfig_b_t rcfgb_p; 3136 p_rcrcfig_a_t cfga_p; 3137 p_rcrcfig_b_t cfgb_p; 3138 p_rxdma_cfig1_t cfig1_p; 3139 p_rxdma_cfig2_t cfig2_p; 3140 p_rbr_kick_t kick_p; 3141 uint32_t dmaaddrp; 3142 uint32_t *rbr_vaddrp; 3143 uint32_t bkaddr; 3144 nxge_status_t status = NXGE_OK; 3145 int i; 3146 uint32_t nxge_port_rcr_size; 3147 3148 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3149 "==> nxge_map_rxdma_channel_cfg_ring")); 3150 3151 cntl_dmap = *dma_cntl_p; 3152 3153 /* Map in the receive block ring */ 3154 rbrp = *rbr_p; 3155 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3156 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3157 /* 3158 * Zero out buffer block ring descriptors. 3159 */ 3160 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3161 3162 rcfga_p = &(rbrp->rbr_cfga); 3163 rcfgb_p = &(rbrp->rbr_cfgb); 3164 kick_p = &(rbrp->rbr_kick); 3165 rcfga_p->value = 0; 3166 rcfgb_p->value = 0; 3167 kick_p->value = 0; 3168 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3169 rcfga_p->value = (rbrp->rbr_addr & 3170 (RBR_CFIG_A_STDADDR_MASK | 3171 RBR_CFIG_A_STDADDR_BASE_MASK)); 3172 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3173 3174 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3175 rcfgb_p->bits.ldw.vld0 = 1; 3176 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3177 rcfgb_p->bits.ldw.vld1 = 1; 3178 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3179 rcfgb_p->bits.ldw.vld2 = 1; 3180 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3181 3182 /* 3183 * For each buffer block, enter receive block address to the ring. 3184 */ 3185 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3186 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3187 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3188 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3189 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3190 3191 rx_msg_ring = rbrp->rx_msg_ring; 3192 for (i = 0; i < rbrp->tnblocks; i++) { 3193 rx_msg_p = rx_msg_ring[i]; 3194 rx_msg_p->nxgep = nxgep; 3195 rx_msg_p->rx_rbr_p = rbrp; 3196 bkaddr = (uint32_t) 3197 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3198 >> RBR_BKADDR_SHIFT)); 3199 rx_msg_p->free = B_FALSE; 3200 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3201 3202 *rbr_vaddrp++ = bkaddr; 3203 } 3204 3205 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3206 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3207 3208 rbrp->rbr_rd_index = 0; 3209 3210 rbrp->rbr_consumed = 0; 3211 rbrp->rbr_use_bcopy = B_TRUE; 3212 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3213 /* 3214 * Do bcopy on packets greater than bcopy size once 3215 * the lo threshold is reached. 3216 * This lo threshold should be less than the hi threshold. 3217 * 3218 * Do bcopy on every packet once the hi threshold is reached. 3219 */ 3220 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3221 /* default it to use hi */ 3222 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3223 } 3224 3225 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3226 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3227 } 3228 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3229 3230 switch (nxge_rx_threshold_hi) { 3231 default: 3232 case NXGE_RX_COPY_NONE: 3233 /* Do not do bcopy at all */ 3234 rbrp->rbr_use_bcopy = B_FALSE; 3235 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3236 break; 3237 3238 case NXGE_RX_COPY_1: 3239 case NXGE_RX_COPY_2: 3240 case NXGE_RX_COPY_3: 3241 case NXGE_RX_COPY_4: 3242 case NXGE_RX_COPY_5: 3243 case NXGE_RX_COPY_6: 3244 case NXGE_RX_COPY_7: 3245 rbrp->rbr_threshold_hi = 3246 rbrp->rbb_max * 3247 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3248 break; 3249 3250 case NXGE_RX_COPY_ALL: 3251 rbrp->rbr_threshold_hi = 0; 3252 break; 3253 } 3254 3255 switch (nxge_rx_threshold_lo) { 3256 default: 3257 case NXGE_RX_COPY_NONE: 3258 /* Do not do bcopy at all */ 3259 if (rbrp->rbr_use_bcopy) { 3260 rbrp->rbr_use_bcopy = B_FALSE; 3261 } 3262 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3263 break; 3264 3265 case NXGE_RX_COPY_1: 3266 case NXGE_RX_COPY_2: 3267 case NXGE_RX_COPY_3: 3268 case NXGE_RX_COPY_4: 3269 case NXGE_RX_COPY_5: 3270 case NXGE_RX_COPY_6: 3271 case NXGE_RX_COPY_7: 3272 rbrp->rbr_threshold_lo = 3273 rbrp->rbb_max * 3274 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3275 break; 3276 3277 case NXGE_RX_COPY_ALL: 3278 rbrp->rbr_threshold_lo = 0; 3279 break; 3280 } 3281 3282 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3283 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3284 "rbb_max %d " 3285 "rbrp->rbr_bufsize_type %d " 3286 "rbb_threshold_hi %d " 3287 "rbb_threshold_lo %d", 3288 dma_channel, 3289 rbrp->rbb_max, 3290 rbrp->rbr_bufsize_type, 3291 rbrp->rbr_threshold_hi, 3292 rbrp->rbr_threshold_lo)); 3293 3294 rbrp->page_valid.value = 0; 3295 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3296 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3297 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3298 rbrp->page_hdl.value = 0; 3299 3300 rbrp->page_valid.bits.ldw.page0 = 1; 3301 rbrp->page_valid.bits.ldw.page1 = 1; 3302 3303 /* Map in the receive completion ring */ 3304 rcrp = (p_rx_rcr_ring_t) 3305 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3306 rcrp->rdc = dma_channel; 3307 3308 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3309 rcrp->comp_size = nxge_port_rcr_size; 3310 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3311 3312 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3313 3314 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3315 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3316 sizeof (rcr_entry_t)); 3317 rcrp->comp_rd_index = 0; 3318 rcrp->comp_wt_index = 0; 3319 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3320 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3321 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3322 #if defined(__i386) 3323 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3324 #else 3325 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3326 #endif 3327 3328 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3329 (nxge_port_rcr_size - 1); 3330 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3331 (nxge_port_rcr_size - 1); 3332 3333 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3334 "==> nxge_map_rxdma_channel_cfg_ring: " 3335 "channel %d " 3336 "rbr_vaddrp $%p " 3337 "rcr_desc_rd_head_p $%p " 3338 "rcr_desc_rd_head_pp $%p " 3339 "rcr_desc_rd_last_p $%p " 3340 "rcr_desc_rd_last_pp $%p ", 3341 dma_channel, 3342 rbr_vaddrp, 3343 rcrp->rcr_desc_rd_head_p, 3344 rcrp->rcr_desc_rd_head_pp, 3345 rcrp->rcr_desc_last_p, 3346 rcrp->rcr_desc_last_pp)); 3347 3348 /* 3349 * Zero out buffer block ring descriptors. 3350 */ 3351 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3352 rcrp->intr_timeout = nxgep->intr_timeout; 3353 rcrp->intr_threshold = nxgep->intr_threshold; 3354 rcrp->full_hdr_flag = B_FALSE; 3355 rcrp->sw_priv_hdr_len = 0; 3356 3357 cfga_p = &(rcrp->rcr_cfga); 3358 cfgb_p = &(rcrp->rcr_cfgb); 3359 cfga_p->value = 0; 3360 cfgb_p->value = 0; 3361 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3362 cfga_p->value = (rcrp->rcr_addr & 3363 (RCRCFIG_A_STADDR_MASK | 3364 RCRCFIG_A_STADDR_BASE_MASK)); 3365 3366 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3367 RCRCFIG_A_LEN_SHIF); 3368 3369 /* 3370 * Timeout should be set based on the system clock divider. 3371 * The following timeout value of 1 assumes that the 3372 * granularity (1000) is 3 microseconds running at 300MHz. 3373 */ 3374 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3375 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3376 cfgb_p->bits.ldw.entout = 1; 3377 3378 /* Map in the mailbox */ 3379 mboxp = (p_rx_mbox_t) 3380 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3381 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3382 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3383 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3384 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3385 cfig1_p->value = cfig2_p->value = 0; 3386 3387 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3388 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3389 "==> nxge_map_rxdma_channel_cfg_ring: " 3390 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3391 dma_channel, cfig1_p->value, cfig2_p->value, 3392 mboxp->mbox_addr)); 3393 3394 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3395 & 0xfff); 3396 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3397 3398 3399 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3400 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3401 RXDMA_CFIG2_MBADDR_L_MASK); 3402 3403 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3404 3405 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3406 "==> nxge_map_rxdma_channel_cfg_ring: " 3407 "channel %d damaddrp $%p " 3408 "cfg1 0x%016llx cfig2 0x%016llx", 3409 dma_channel, dmaaddrp, 3410 cfig1_p->value, cfig2_p->value)); 3411 3412 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3413 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3414 3415 rbrp->rx_rcr_p = rcrp; 3416 rcrp->rx_rbr_p = rbrp; 3417 *rcr_p = rcrp; 3418 *rx_mbox_p = mboxp; 3419 3420 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3421 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3422 3423 return (status); 3424 } 3425 3426 /*ARGSUSED*/ 3427 static void 3428 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3429 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3430 { 3431 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3432 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3433 rcr_p->rdc)); 3434 3435 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3436 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3437 3438 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3439 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3440 } 3441 3442 static nxge_status_t 3443 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3444 p_nxge_dma_common_t *dma_buf_p, 3445 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3446 { 3447 p_rx_rbr_ring_t rbrp; 3448 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3449 p_rx_msg_t *rx_msg_ring; 3450 p_rx_msg_t rx_msg_p; 3451 p_mblk_t mblk_p; 3452 3453 rxring_info_t *ring_info; 3454 nxge_status_t status = NXGE_OK; 3455 int i, j, index; 3456 uint32_t size, bsize, nblocks, nmsgs; 3457 3458 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3459 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3460 channel)); 3461 3462 dma_bufp = tmp_bufp = *dma_buf_p; 3463 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3464 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3465 "chunks bufp 0x%016llx", 3466 channel, num_chunks, dma_bufp)); 3467 3468 nmsgs = 0; 3469 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3470 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3471 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3472 "bufp 0x%016llx nblocks %d nmsgs %d", 3473 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3474 nmsgs += tmp_bufp->nblocks; 3475 } 3476 if (!nmsgs) { 3477 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3478 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3479 "no msg blocks", 3480 channel)); 3481 status = NXGE_ERROR; 3482 goto nxge_map_rxdma_channel_buf_ring_exit; 3483 } 3484 3485 rbrp = (p_rx_rbr_ring_t) 3486 KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 3487 3488 size = nmsgs * sizeof (p_rx_msg_t); 3489 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3490 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3491 KM_SLEEP); 3492 3493 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3494 (void *)nxgep->interrupt_cookie); 3495 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3496 (void *)nxgep->interrupt_cookie); 3497 rbrp->rdc = channel; 3498 rbrp->num_blocks = num_chunks; 3499 rbrp->tnblocks = nmsgs; 3500 rbrp->rbb_max = nmsgs; 3501 rbrp->rbr_max_size = nmsgs; 3502 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3503 3504 /* 3505 * Buffer sizes suggested by NIU architect. 3506 * 256, 512 and 2K. 3507 */ 3508 3509 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3510 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3511 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3512 3513 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3514 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3515 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3516 3517 rbrp->block_size = nxgep->rx_default_block_size; 3518 3519 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3520 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3521 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3522 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3523 } else { 3524 if (rbrp->block_size >= 0x2000) { 3525 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3526 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3527 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3528 } else { 3529 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3530 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3531 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3532 } 3533 } 3534 3535 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3536 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3537 "actual rbr max %d rbb_max %d nmsgs %d " 3538 "rbrp->block_size %d default_block_size %d " 3539 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3540 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3541 rbrp->block_size, nxgep->rx_default_block_size, 3542 nxge_rbr_size, nxge_rbr_spare_size)); 3543 3544 /* Map in buffers from the buffer pool. */ 3545 index = 0; 3546 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3547 bsize = dma_bufp->block_size; 3548 nblocks = dma_bufp->nblocks; 3549 #if defined(__i386) 3550 ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3551 #else 3552 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3553 #endif 3554 ring_info->buffer[i].buf_index = i; 3555 ring_info->buffer[i].buf_size = dma_bufp->alength; 3556 ring_info->buffer[i].start_index = index; 3557 #if defined(__i386) 3558 ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3559 #else 3560 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3561 #endif 3562 3563 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3564 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3565 "chunk %d" 3566 " nblocks %d chunk_size %x block_size 0x%x " 3567 "dma_bufp $%p", channel, i, 3568 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3569 dma_bufp)); 3570 3571 for (j = 0; j < nblocks; j++) { 3572 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3573 dma_bufp)) == NULL) { 3574 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3575 "allocb failed (index %d i %d j %d)", 3576 index, i, j)); 3577 goto nxge_map_rxdma_channel_buf_ring_fail1; 3578 } 3579 rx_msg_ring[index] = rx_msg_p; 3580 rx_msg_p->block_index = index; 3581 rx_msg_p->shifted_addr = (uint32_t) 3582 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3583 RBR_BKADDR_SHIFT)); 3584 3585 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3586 "index %d j %d rx_msg_p $%p mblk %p", 3587 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3588 3589 mblk_p = rx_msg_p->rx_mblk_p; 3590 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3591 index++; 3592 rx_msg_p->buf_dma.dma_channel = channel; 3593 } 3594 } 3595 if (i < rbrp->num_blocks) { 3596 goto nxge_map_rxdma_channel_buf_ring_fail1; 3597 } 3598 3599 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3600 "nxge_map_rxdma_channel_buf_ring: done buf init " 3601 "channel %d msg block entries %d", 3602 channel, index)); 3603 ring_info->block_size_mask = bsize - 1; 3604 rbrp->rx_msg_ring = rx_msg_ring; 3605 rbrp->dma_bufp = dma_buf_p; 3606 rbrp->ring_info = ring_info; 3607 3608 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3609 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3610 " nxge_map_rxdma_channel_buf_ring: " 3611 "channel %d done buf info init", channel)); 3612 3613 *rbr_p = rbrp; 3614 goto nxge_map_rxdma_channel_buf_ring_exit; 3615 3616 nxge_map_rxdma_channel_buf_ring_fail1: 3617 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3618 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3619 channel, status)); 3620 3621 index--; 3622 for (; index >= 0; index--) { 3623 rx_msg_p = rx_msg_ring[index]; 3624 if (rx_msg_p != NULL) { 3625 freeb(rx_msg_p->rx_mblk_p); 3626 rx_msg_ring[index] = NULL; 3627 } 3628 } 3629 nxge_map_rxdma_channel_buf_ring_fail: 3630 MUTEX_DESTROY(&rbrp->post_lock); 3631 MUTEX_DESTROY(&rbrp->lock); 3632 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3633 KMEM_FREE(rx_msg_ring, size); 3634 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3635 3636 status = NXGE_ERROR; 3637 3638 nxge_map_rxdma_channel_buf_ring_exit: 3639 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3640 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3641 3642 return (status); 3643 } 3644 3645 /*ARGSUSED*/ 3646 static void 3647 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3648 p_rx_rbr_ring_t rbr_p) 3649 { 3650 p_rx_msg_t *rx_msg_ring; 3651 p_rx_msg_t rx_msg_p; 3652 rxring_info_t *ring_info; 3653 int i; 3654 uint32_t size; 3655 #ifdef NXGE_DEBUG 3656 int num_chunks; 3657 #endif 3658 3659 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3660 "==> nxge_unmap_rxdma_channel_buf_ring")); 3661 if (rbr_p == NULL) { 3662 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3663 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3664 return; 3665 } 3666 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3667 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3668 rbr_p->rdc)); 3669 3670 rx_msg_ring = rbr_p->rx_msg_ring; 3671 ring_info = rbr_p->ring_info; 3672 3673 if (rx_msg_ring == NULL || ring_info == NULL) { 3674 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3675 "<== nxge_unmap_rxdma_channel_buf_ring: " 3676 "rx_msg_ring $%p ring_info $%p", 3677 rx_msg_p, ring_info)); 3678 return; 3679 } 3680 3681 #ifdef NXGE_DEBUG 3682 num_chunks = rbr_p->num_blocks; 3683 #endif 3684 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3685 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3686 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3687 "tnblocks %d (max %d) size ptrs %d ", 3688 rbr_p->rdc, num_chunks, 3689 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3690 3691 for (i = 0; i < rbr_p->tnblocks; i++) { 3692 rx_msg_p = rx_msg_ring[i]; 3693 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3694 " nxge_unmap_rxdma_channel_buf_ring: " 3695 "rx_msg_p $%p", 3696 rx_msg_p)); 3697 if (rx_msg_p != NULL) { 3698 freeb(rx_msg_p->rx_mblk_p); 3699 rx_msg_ring[i] = NULL; 3700 } 3701 } 3702 3703 MUTEX_DESTROY(&rbr_p->post_lock); 3704 MUTEX_DESTROY(&rbr_p->lock); 3705 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3706 KMEM_FREE(rx_msg_ring, size); 3707 KMEM_FREE(rbr_p, sizeof (rx_rbr_ring_t)); 3708 3709 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3710 "<== nxge_unmap_rxdma_channel_buf_ring")); 3711 } 3712 3713 static nxge_status_t 3714 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3715 { 3716 nxge_status_t status = NXGE_OK; 3717 3718 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3719 3720 /* 3721 * Load the sharable parameters by writing to the 3722 * function zero control registers. These FZC registers 3723 * should be initialized only once for the entire chip. 3724 */ 3725 (void) nxge_init_fzc_rx_common(nxgep); 3726 3727 /* 3728 * Initialize the RXDMA port specific FZC control configurations. 3729 * These FZC registers are pertaining to each port. 3730 */ 3731 (void) nxge_init_fzc_rxdma_port(nxgep); 3732 3733 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3734 3735 return (status); 3736 } 3737 3738 /*ARGSUSED*/ 3739 static void 3740 nxge_rxdma_hw_stop_common(p_nxge_t nxgep) 3741 { 3742 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3743 3744 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3745 } 3746 3747 static nxge_status_t 3748 nxge_rxdma_hw_start(p_nxge_t nxgep) 3749 { 3750 int i, ndmas; 3751 uint16_t channel; 3752 p_rx_rbr_rings_t rx_rbr_rings; 3753 p_rx_rbr_ring_t *rbr_rings; 3754 p_rx_rcr_rings_t rx_rcr_rings; 3755 p_rx_rcr_ring_t *rcr_rings; 3756 p_rx_mbox_areas_t rx_mbox_areas_p; 3757 p_rx_mbox_t *rx_mbox_p; 3758 nxge_status_t status = NXGE_OK; 3759 3760 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3761 3762 rx_rbr_rings = nxgep->rx_rbr_rings; 3763 rx_rcr_rings = nxgep->rx_rcr_rings; 3764 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3765 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3766 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3767 return (NXGE_ERROR); 3768 } 3769 ndmas = rx_rbr_rings->ndmas; 3770 if (ndmas == 0) { 3771 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3772 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3773 return (NXGE_ERROR); 3774 } 3775 3776 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3777 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3778 3779 rbr_rings = rx_rbr_rings->rbr_rings; 3780 rcr_rings = rx_rcr_rings->rcr_rings; 3781 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3782 if (rx_mbox_areas_p) { 3783 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3784 } 3785 3786 for (i = 0; i < ndmas; i++) { 3787 channel = rbr_rings[i]->rdc; 3788 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3789 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3790 ndmas, channel)); 3791 status = nxge_rxdma_start_channel(nxgep, channel, 3792 (p_rx_rbr_ring_t)rbr_rings[i], 3793 (p_rx_rcr_ring_t)rcr_rings[i], 3794 (p_rx_mbox_t)rx_mbox_p[i]); 3795 if (status != NXGE_OK) { 3796 goto nxge_rxdma_hw_start_fail1; 3797 } 3798 } 3799 3800 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3801 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3802 rx_rbr_rings, rx_rcr_rings)); 3803 3804 goto nxge_rxdma_hw_start_exit; 3805 3806 nxge_rxdma_hw_start_fail1: 3807 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3808 "==> nxge_rxdma_hw_start: disable " 3809 "(status 0x%x channel %d i %d)", status, channel, i)); 3810 for (; i >= 0; i--) { 3811 channel = rbr_rings[i]->rdc; 3812 (void) nxge_rxdma_stop_channel(nxgep, channel); 3813 } 3814 3815 nxge_rxdma_hw_start_exit: 3816 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3817 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3818 3819 return (status); 3820 } 3821 3822 static void 3823 nxge_rxdma_hw_stop(p_nxge_t nxgep) 3824 { 3825 int i, ndmas; 3826 uint16_t channel; 3827 p_rx_rbr_rings_t rx_rbr_rings; 3828 p_rx_rbr_ring_t *rbr_rings; 3829 p_rx_rcr_rings_t rx_rcr_rings; 3830 3831 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3832 3833 rx_rbr_rings = nxgep->rx_rbr_rings; 3834 rx_rcr_rings = nxgep->rx_rcr_rings; 3835 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3836 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3837 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3838 return; 3839 } 3840 ndmas = rx_rbr_rings->ndmas; 3841 if (!ndmas) { 3842 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3843 "<== nxge_rxdma_hw_stop: no dma channel allocated")); 3844 return; 3845 } 3846 3847 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3848 "==> nxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3849 3850 rbr_rings = rx_rbr_rings->rbr_rings; 3851 3852 for (i = 0; i < ndmas; i++) { 3853 channel = rbr_rings[i]->rdc; 3854 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3855 "==> nxge_rxdma_hw_stop (ndmas %d) channel %d", 3856 ndmas, channel)); 3857 (void) nxge_rxdma_stop_channel(nxgep, channel); 3858 } 3859 3860 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3861 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3862 rx_rbr_rings, rx_rcr_rings)); 3863 3864 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3865 } 3866 3867 3868 static nxge_status_t 3869 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3870 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3871 3872 { 3873 npi_handle_t handle; 3874 npi_status_t rs = NPI_SUCCESS; 3875 rx_dma_ctl_stat_t cs; 3876 rx_dma_ent_msk_t ent_mask; 3877 nxge_status_t status = NXGE_OK; 3878 3879 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3880 3881 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3882 3883 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3884 "npi handle addr $%p acc $%p", 3885 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 3886 3887 /* Reset RXDMA channel */ 3888 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3889 if (rs != NPI_SUCCESS) { 3890 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3891 "==> nxge_rxdma_start_channel: " 3892 "reset rxdma failed (0x%08x channel %d)", 3893 status, channel)); 3894 return (NXGE_ERROR | rs); 3895 } 3896 3897 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3898 "==> nxge_rxdma_start_channel: reset done: channel %d", 3899 channel)); 3900 3901 /* 3902 * Initialize the RXDMA channel specific FZC control 3903 * configurations. These FZC registers are pertaining 3904 * to each RX channel (logical pages). 3905 */ 3906 status = nxge_init_fzc_rxdma_channel(nxgep, 3907 channel, rbr_p, rcr_p, mbox_p); 3908 if (status != NXGE_OK) { 3909 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3910 "==> nxge_rxdma_start_channel: " 3911 "init fzc rxdma failed (0x%08x channel %d)", 3912 status, channel)); 3913 return (status); 3914 } 3915 3916 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3917 "==> nxge_rxdma_start_channel: fzc done")); 3918 3919 /* 3920 * Zero out the shadow and prefetch ram. 3921 */ 3922 3923 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3924 "ram done")); 3925 3926 /* Set up the interrupt event masks. */ 3927 ent_mask.value = 0; 3928 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 3929 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 3930 &ent_mask); 3931 if (rs != NPI_SUCCESS) { 3932 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3933 "==> nxge_rxdma_start_channel: " 3934 "init rxdma event masks failed (0x%08x channel %d)", 3935 status, channel)); 3936 return (NXGE_ERROR | rs); 3937 } 3938 3939 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3940 "event done: channel %d (mask 0x%016llx)", 3941 channel, ent_mask.value)); 3942 3943 /* Initialize the receive DMA control and status register */ 3944 cs.value = 0; 3945 cs.bits.hdw.mex = 1; 3946 cs.bits.hdw.rcrthres = 1; 3947 cs.bits.hdw.rcrto = 1; 3948 cs.bits.hdw.rbr_empty = 1; 3949 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 3950 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3951 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3952 if (status != NXGE_OK) { 3953 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3954 "==> nxge_rxdma_start_channel: " 3955 "init rxdma control register failed (0x%08x channel %d", 3956 status, channel)); 3957 return (status); 3958 } 3959 3960 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3961 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3962 3963 /* 3964 * Load RXDMA descriptors, buffers, mailbox, 3965 * initialise the receive DMA channels and 3966 * enable each DMA channel. 3967 */ 3968 status = nxge_enable_rxdma_channel(nxgep, 3969 channel, rbr_p, rcr_p, mbox_p); 3970 3971 if (status != NXGE_OK) { 3972 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3973 " nxge_rxdma_start_channel: " 3974 " init enable rxdma failed (0x%08x channel %d)", 3975 status, channel)); 3976 return (status); 3977 } 3978 3979 ent_mask.value = 0; 3980 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 3981 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 3982 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 3983 &ent_mask); 3984 if (rs != NPI_SUCCESS) { 3985 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3986 "==> nxge_rxdma_start_channel: " 3987 "init rxdma event masks failed (0x%08x channel %d)", 3988 status, channel)); 3989 return (NXGE_ERROR | rs); 3990 } 3991 3992 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3993 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3994 3995 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3996 "==> nxge_rxdma_start_channel: enable done")); 3997 3998 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 3999 4000 return (NXGE_OK); 4001 } 4002 4003 static nxge_status_t 4004 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 4005 { 4006 npi_handle_t handle; 4007 npi_status_t rs = NPI_SUCCESS; 4008 rx_dma_ctl_stat_t cs; 4009 rx_dma_ent_msk_t ent_mask; 4010 nxge_status_t status = NXGE_OK; 4011 4012 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 4013 4014 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4015 4016 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 4017 "npi handle addr $%p acc $%p", 4018 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 4019 4020 /* Reset RXDMA channel */ 4021 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4022 if (rs != NPI_SUCCESS) { 4023 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4024 " nxge_rxdma_stop_channel: " 4025 " reset rxdma failed (0x%08x channel %d)", 4026 rs, channel)); 4027 return (NXGE_ERROR | rs); 4028 } 4029 4030 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4031 "==> nxge_rxdma_stop_channel: reset done")); 4032 4033 /* Set up the interrupt event masks. */ 4034 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4035 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4036 &ent_mask); 4037 if (rs != NPI_SUCCESS) { 4038 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4039 "==> nxge_rxdma_stop_channel: " 4040 "set rxdma event masks failed (0x%08x channel %d)", 4041 rs, channel)); 4042 return (NXGE_ERROR | rs); 4043 } 4044 4045 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4046 "==> nxge_rxdma_stop_channel: event done")); 4047 4048 /* Initialize the receive DMA control and status register */ 4049 cs.value = 0; 4050 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4051 &cs); 4052 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4053 " to default (all 0s) 0x%08x", cs.value)); 4054 if (status != NXGE_OK) { 4055 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4056 " nxge_rxdma_stop_channel: init rxdma" 4057 " control register failed (0x%08x channel %d", 4058 status, channel)); 4059 return (status); 4060 } 4061 4062 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4063 "==> nxge_rxdma_stop_channel: control done")); 4064 4065 /* disable dma channel */ 4066 status = nxge_disable_rxdma_channel(nxgep, channel); 4067 4068 if (status != NXGE_OK) { 4069 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4070 " nxge_rxdma_stop_channel: " 4071 " init enable rxdma failed (0x%08x channel %d)", 4072 status, channel)); 4073 return (status); 4074 } 4075 4076 NXGE_DEBUG_MSG((nxgep, 4077 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4078 4079 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4080 4081 return (NXGE_OK); 4082 } 4083 4084 nxge_status_t 4085 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4086 { 4087 npi_handle_t handle; 4088 p_nxge_rdc_sys_stats_t statsp; 4089 rx_ctl_dat_fifo_stat_t stat; 4090 uint32_t zcp_err_status; 4091 uint32_t ipp_err_status; 4092 nxge_status_t status = NXGE_OK; 4093 npi_status_t rs = NPI_SUCCESS; 4094 boolean_t my_err = B_FALSE; 4095 4096 handle = nxgep->npi_handle; 4097 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4098 4099 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4100 4101 if (rs != NPI_SUCCESS) 4102 return (NXGE_ERROR | rs); 4103 4104 if (stat.bits.ldw.id_mismatch) { 4105 statsp->id_mismatch++; 4106 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4107 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4108 /* Global fatal error encountered */ 4109 } 4110 4111 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4112 switch (nxgep->mac.portnum) { 4113 case 0: 4114 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4115 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4116 my_err = B_TRUE; 4117 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4118 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4119 } 4120 break; 4121 case 1: 4122 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4123 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4124 my_err = B_TRUE; 4125 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4126 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4127 } 4128 break; 4129 case 2: 4130 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4131 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4132 my_err = B_TRUE; 4133 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4134 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4135 } 4136 break; 4137 case 3: 4138 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4139 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4140 my_err = B_TRUE; 4141 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4142 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4143 } 4144 break; 4145 default: 4146 return (NXGE_ERROR); 4147 } 4148 } 4149 4150 if (my_err) { 4151 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4152 zcp_err_status); 4153 if (status != NXGE_OK) 4154 return (status); 4155 } 4156 4157 return (NXGE_OK); 4158 } 4159 4160 static nxge_status_t 4161 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4162 uint32_t zcp_status) 4163 { 4164 boolean_t rxport_fatal = B_FALSE; 4165 p_nxge_rdc_sys_stats_t statsp; 4166 nxge_status_t status = NXGE_OK; 4167 uint8_t portn; 4168 4169 portn = nxgep->mac.portnum; 4170 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4171 4172 if (ipp_status & (0x1 << portn)) { 4173 statsp->ipp_eop_err++; 4174 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4175 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4176 rxport_fatal = B_TRUE; 4177 } 4178 4179 if (zcp_status & (0x1 << portn)) { 4180 statsp->zcp_eop_err++; 4181 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4182 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4183 rxport_fatal = B_TRUE; 4184 } 4185 4186 if (rxport_fatal) { 4187 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4188 " nxge_rxdma_handle_port_error: " 4189 " fatal error on Port #%d\n", 4190 portn)); 4191 status = nxge_rx_port_fatal_err_recover(nxgep); 4192 if (status == NXGE_OK) { 4193 FM_SERVICE_RESTORED(nxgep); 4194 } 4195 } 4196 4197 return (status); 4198 } 4199 4200 static nxge_status_t 4201 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4202 { 4203 npi_handle_t handle; 4204 npi_status_t rs = NPI_SUCCESS; 4205 nxge_status_t status = NXGE_OK; 4206 p_rx_rbr_ring_t rbrp; 4207 p_rx_rcr_ring_t rcrp; 4208 p_rx_mbox_t mboxp; 4209 rx_dma_ent_msk_t ent_mask; 4210 p_nxge_dma_common_t dmap; 4211 int ring_idx; 4212 uint32_t ref_cnt; 4213 p_rx_msg_t rx_msg_p; 4214 int i; 4215 uint32_t nxge_port_rcr_size; 4216 4217 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4218 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4219 "Recovering from RxDMAChannel#%d error...", channel)); 4220 4221 /* 4222 * Stop the dma channel waits for the stop done. 4223 * If the stop done bit is not set, then create 4224 * an error. 4225 */ 4226 4227 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4228 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4229 4230 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4231 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4232 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4233 4234 MUTEX_ENTER(&rcrp->lock); 4235 MUTEX_ENTER(&rbrp->lock); 4236 MUTEX_ENTER(&rbrp->post_lock); 4237 4238 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4239 4240 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4241 if (rs != NPI_SUCCESS) { 4242 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4243 "nxge_disable_rxdma_channel:failed")); 4244 goto fail; 4245 } 4246 4247 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4248 4249 /* Disable interrupt */ 4250 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4251 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4252 if (rs != NPI_SUCCESS) { 4253 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4254 "nxge_rxdma_stop_channel: " 4255 "set rxdma event masks failed (channel %d)", 4256 channel)); 4257 } 4258 4259 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4260 4261 /* Reset RXDMA channel */ 4262 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4263 if (rs != NPI_SUCCESS) { 4264 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4265 "nxge_rxdma_fatal_err_recover: " 4266 " reset rxdma failed (channel %d)", channel)); 4267 goto fail; 4268 } 4269 4270 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4271 4272 mboxp = 4273 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4274 4275 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4276 rbrp->rbr_rd_index = 0; 4277 4278 rcrp->comp_rd_index = 0; 4279 rcrp->comp_wt_index = 0; 4280 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4281 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4282 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4283 #if defined(__i386) 4284 (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4285 #else 4286 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4287 #endif 4288 4289 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4290 (nxge_port_rcr_size - 1); 4291 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4292 (nxge_port_rcr_size - 1); 4293 4294 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4295 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4296 4297 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4298 4299 for (i = 0; i < rbrp->rbr_max_size; i++) { 4300 rx_msg_p = rbrp->rx_msg_ring[i]; 4301 ref_cnt = rx_msg_p->ref_cnt; 4302 if (ref_cnt != 1) { 4303 if (rx_msg_p->cur_usage_cnt != 4304 rx_msg_p->max_usage_cnt) { 4305 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4306 "buf[%d]: cur_usage_cnt = %d " 4307 "max_usage_cnt = %d\n", i, 4308 rx_msg_p->cur_usage_cnt, 4309 rx_msg_p->max_usage_cnt)); 4310 } else { 4311 /* Buffer can be re-posted */ 4312 rx_msg_p->free = B_TRUE; 4313 rx_msg_p->cur_usage_cnt = 0; 4314 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4315 rx_msg_p->pkt_buf_size = 0; 4316 } 4317 } 4318 } 4319 4320 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4321 4322 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4323 if (status != NXGE_OK) { 4324 goto fail; 4325 } 4326 4327 MUTEX_EXIT(&rbrp->post_lock); 4328 MUTEX_EXIT(&rbrp->lock); 4329 MUTEX_EXIT(&rcrp->lock); 4330 4331 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4332 "Recovery Successful, RxDMAChannel#%d Restored", 4333 channel)); 4334 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4335 4336 return (NXGE_OK); 4337 fail: 4338 MUTEX_EXIT(&rbrp->post_lock); 4339 MUTEX_EXIT(&rbrp->lock); 4340 MUTEX_EXIT(&rcrp->lock); 4341 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4342 4343 return (NXGE_ERROR | rs); 4344 } 4345 4346 nxge_status_t 4347 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4348 { 4349 nxge_status_t status = NXGE_OK; 4350 p_nxge_dma_common_t *dma_buf_p; 4351 uint16_t channel; 4352 int ndmas; 4353 int i; 4354 4355 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4356 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4357 "Recovering from RxPort error...")); 4358 /* Disable RxMAC */ 4359 4360 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n")); 4361 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4362 goto fail; 4363 4364 NXGE_DELAY(1000); 4365 4366 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels...")); 4367 4368 ndmas = nxgep->rx_buf_pool_p->ndmas; 4369 dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p; 4370 4371 for (i = 0; i < ndmas; i++) { 4372 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 4373 if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) { 4374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4375 "Could not recover channel %d", 4376 channel)); 4377 } 4378 } 4379 4380 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP...")); 4381 4382 /* Reset IPP */ 4383 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4384 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4385 "nxge_rx_port_fatal_err_recover: " 4386 "Failed to reset IPP")); 4387 goto fail; 4388 } 4389 4390 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4391 4392 /* Reset RxMAC */ 4393 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4394 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4395 "nxge_rx_port_fatal_err_recover: " 4396 "Failed to reset RxMAC")); 4397 goto fail; 4398 } 4399 4400 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4401 4402 /* Re-Initialize IPP */ 4403 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4404 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4405 "nxge_rx_port_fatal_err_recover: " 4406 "Failed to init IPP")); 4407 goto fail; 4408 } 4409 4410 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4411 4412 /* Re-Initialize RxMAC */ 4413 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4414 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4415 "nxge_rx_port_fatal_err_recover: " 4416 "Failed to reset RxMAC")); 4417 goto fail; 4418 } 4419 4420 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4421 4422 /* Re-enable RxMAC */ 4423 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4424 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4425 "nxge_rx_port_fatal_err_recover: " 4426 "Failed to enable RxMAC")); 4427 goto fail; 4428 } 4429 4430 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4431 "Recovery Successful, RxPort Restored")); 4432 4433 return (NXGE_OK); 4434 fail: 4435 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4436 return (status); 4437 } 4438 4439 void 4440 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4441 { 4442 rx_dma_ctl_stat_t cs; 4443 rx_ctl_dat_fifo_stat_t cdfs; 4444 4445 switch (err_id) { 4446 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4447 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4448 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4449 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4450 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4451 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4452 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4453 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4454 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4455 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4456 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4457 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4458 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4459 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4460 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4461 chan, &cs.value); 4462 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4463 cs.bits.hdw.rcr_ack_err = 1; 4464 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4465 cs.bits.hdw.dc_fifo_err = 1; 4466 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4467 cs.bits.hdw.rcr_sha_par = 1; 4468 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4469 cs.bits.hdw.rbr_pre_par = 1; 4470 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4471 cs.bits.hdw.rbr_tmout = 1; 4472 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4473 cs.bits.hdw.rsp_cnt_err = 1; 4474 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4475 cs.bits.hdw.byte_en_bus = 1; 4476 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4477 cs.bits.hdw.rsp_dat_err = 1; 4478 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4479 cs.bits.hdw.config_err = 1; 4480 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4481 cs.bits.hdw.rcrincon = 1; 4482 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4483 cs.bits.hdw.rcrfull = 1; 4484 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4485 cs.bits.hdw.rbrfull = 1; 4486 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4487 cs.bits.hdw.rbrlogpage = 1; 4488 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4489 cs.bits.hdw.cfiglogpage = 1; 4490 #if defined(__i386) 4491 cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4492 cs.value); 4493 #else 4494 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4495 cs.value); 4496 #endif 4497 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4498 chan, cs.value); 4499 break; 4500 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4501 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4502 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4503 cdfs.value = 0; 4504 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4505 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4506 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4507 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4508 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4509 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4510 #if defined(__i386) 4511 cmn_err(CE_NOTE, 4512 "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4513 cdfs.value); 4514 #else 4515 cmn_err(CE_NOTE, 4516 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4517 cdfs.value); 4518 #endif 4519 RXDMA_REG_WRITE64(nxgep->npi_handle, 4520 RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value); 4521 break; 4522 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4523 break; 4524 case NXGE_FM_EREPORT_RDMC_COMPLETION_ERR: 4525 break; 4526 } 4527 } 4528 4529 4530 static uint16_t 4531 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb) 4532 { 4533 uint16_t sz = RBR_BKSIZE_8K_BYTES; 4534 4535 switch (bufsz_type) { 4536 case RCR_PKTBUFSZ_0: 4537 switch (rbr_cfgb.bits.ldw.bufsz0) { 4538 case RBR_BUFSZ0_256B: 4539 sz = RBR_BUFSZ0_256_BYTES; 4540 break; 4541 case RBR_BUFSZ0_512B: 4542 sz = RBR_BUFSZ0_512B_BYTES; 4543 break; 4544 case RBR_BUFSZ0_1K: 4545 sz = RBR_BUFSZ0_1K_BYTES; 4546 break; 4547 case RBR_BUFSZ0_2K: 4548 sz = RBR_BUFSZ0_2K_BYTES; 4549 break; 4550 default: 4551 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4552 "nxge_get_pktbug_size: bad bufsz0")); 4553 break; 4554 } 4555 break; 4556 case RCR_PKTBUFSZ_1: 4557 switch (rbr_cfgb.bits.ldw.bufsz1) { 4558 case RBR_BUFSZ1_1K: 4559 sz = RBR_BUFSZ1_1K_BYTES; 4560 break; 4561 case RBR_BUFSZ1_2K: 4562 sz = RBR_BUFSZ1_2K_BYTES; 4563 break; 4564 case RBR_BUFSZ1_4K: 4565 sz = RBR_BUFSZ1_4K_BYTES; 4566 break; 4567 case RBR_BUFSZ1_8K: 4568 sz = RBR_BUFSZ1_8K_BYTES; 4569 break; 4570 default: 4571 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4572 "nxge_get_pktbug_size: bad bufsz1")); 4573 break; 4574 } 4575 break; 4576 case RCR_PKTBUFSZ_2: 4577 switch (rbr_cfgb.bits.ldw.bufsz2) { 4578 case RBR_BUFSZ2_2K: 4579 sz = RBR_BUFSZ2_2K_BYTES; 4580 break; 4581 case RBR_BUFSZ2_4K: 4582 sz = RBR_BUFSZ2_4K_BYTES; 4583 break; 4584 case RBR_BUFSZ2_8K: 4585 sz = RBR_BUFSZ2_8K_BYTES; 4586 break; 4587 case RBR_BUFSZ2_16K: 4588 sz = RBR_BUFSZ2_16K_BYTES; 4589 break; 4590 default: 4591 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4592 "nxge_get_pktbug_size: bad bufsz2")); 4593 break; 4594 } 4595 break; 4596 case RCR_SINGLE_BLOCK: 4597 switch (rbr_cfgb.bits.ldw.bksize) { 4598 case BKSIZE_4K: 4599 sz = RBR_BKSIZE_4K_BYTES; 4600 break; 4601 case BKSIZE_8K: 4602 sz = RBR_BKSIZE_8K_BYTES; 4603 break; 4604 case BKSIZE_16K: 4605 sz = RBR_BKSIZE_16K_BYTES; 4606 break; 4607 case BKSIZE_32K: 4608 sz = RBR_BKSIZE_32K_BYTES; 4609 break; 4610 default: 4611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4612 "nxge_get_pktbug_size: bad bksize")); 4613 break; 4614 } 4615 break; 4616 default: 4617 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4618 "nxge_get_pktbug_size: bad bufsz_type")); 4619 break; 4620 } 4621 return (sz); 4622 } 4623