1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_rxdma.h> 30 31 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 32 (rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid) 33 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 34 (rdc + nxgep->pt_config.hw_config.start_rdc) 35 36 /* 37 * Globals: tunable parameters (/etc/system or adb) 38 * 39 */ 40 extern uint32_t nxge_rbr_size; 41 extern uint32_t nxge_rcr_size; 42 extern uint32_t nxge_rbr_spare_size; 43 44 extern uint32_t nxge_mblks_pending; 45 46 /* 47 * Tunable to reduce the amount of time spent in the 48 * ISR doing Rx Processing. 49 */ 50 extern uint32_t nxge_max_rx_pkts; 51 boolean_t nxge_jumbo_enable; 52 53 /* 54 * Tunables to manage the receive buffer blocks. 55 * 56 * nxge_rx_threshold_hi: copy all buffers. 57 * nxge_rx_bcopy_size_type: receive buffer block size type. 58 * nxge_rx_threshold_lo: copy only up to tunable block size type. 59 */ 60 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 61 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 62 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 63 64 static nxge_status_t nxge_map_rxdma(p_nxge_t); 65 static void nxge_unmap_rxdma(p_nxge_t); 66 67 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 68 static void nxge_rxdma_hw_stop_common(p_nxge_t); 69 70 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t); 71 static void nxge_rxdma_hw_stop(p_nxge_t); 72 73 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 74 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 75 uint32_t, 76 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 77 p_rx_mbox_t *); 78 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 79 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 80 81 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 82 uint16_t, 83 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 84 p_rx_rcr_ring_t *, p_rx_mbox_t *); 85 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 86 p_rx_rcr_ring_t, p_rx_mbox_t); 87 88 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 89 uint16_t, 90 p_nxge_dma_common_t *, 91 p_rx_rbr_ring_t *, uint32_t); 92 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 93 p_rx_rbr_ring_t); 94 95 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 96 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 97 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 98 99 mblk_t * 100 nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t, 101 p_rx_rcr_ring_t *, rx_dma_ctl_stat_t); 102 103 static void nxge_receive_packet(p_nxge_t, 104 p_rx_rcr_ring_t, 105 p_rcr_entry_t, 106 boolean_t *, 107 mblk_t **, mblk_t **); 108 109 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 110 111 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 112 static void nxge_freeb(p_rx_msg_t); 113 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, 114 p_nxge_ldv_t, rx_dma_ctl_stat_t); 115 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t, 116 p_nxge_ldv_t, rx_dma_ctl_stat_t); 117 118 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 119 uint32_t, uint32_t); 120 121 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 122 p_rx_rbr_ring_t); 123 124 125 static nxge_status_t 126 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 127 128 nxge_status_t 129 nxge_rx_port_fatal_err_recover(p_nxge_t); 130 131 static uint16_t 132 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb); 133 134 nxge_status_t 135 nxge_init_rxdma_channels(p_nxge_t nxgep) 136 { 137 nxge_status_t status = NXGE_OK; 138 139 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 140 141 status = nxge_map_rxdma(nxgep); 142 if (status != NXGE_OK) { 143 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 144 "<== nxge_init_rxdma: status 0x%x", status)); 145 return (status); 146 } 147 148 status = nxge_rxdma_hw_start_common(nxgep); 149 if (status != NXGE_OK) { 150 nxge_unmap_rxdma(nxgep); 151 } 152 153 status = nxge_rxdma_hw_start(nxgep); 154 if (status != NXGE_OK) { 155 nxge_unmap_rxdma(nxgep); 156 } 157 158 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 159 "<== nxge_init_rxdma_channels: status 0x%x", status)); 160 161 return (status); 162 } 163 164 void 165 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 166 { 167 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 168 169 nxge_rxdma_hw_stop(nxgep); 170 nxge_rxdma_hw_stop_common(nxgep); 171 nxge_unmap_rxdma(nxgep); 172 173 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 174 "<== nxge_uinit_rxdma_channels")); 175 } 176 177 nxge_status_t 178 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 179 { 180 npi_handle_t handle; 181 npi_status_t rs = NPI_SUCCESS; 182 nxge_status_t status = NXGE_OK; 183 184 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 185 186 handle = NXGE_DEV_NPI_HANDLE(nxgep); 187 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 188 189 if (rs != NPI_SUCCESS) { 190 status = NXGE_ERROR | rs; 191 } 192 193 return (status); 194 } 195 196 void 197 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 198 { 199 int i, ndmas; 200 uint16_t channel; 201 p_rx_rbr_rings_t rx_rbr_rings; 202 p_rx_rbr_ring_t *rbr_rings; 203 npi_handle_t handle; 204 205 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 206 207 handle = NXGE_DEV_NPI_HANDLE(nxgep); 208 (void) npi_rxdma_dump_fzc_regs(handle); 209 210 rx_rbr_rings = nxgep->rx_rbr_rings; 211 if (rx_rbr_rings == NULL) { 212 NXGE_DEBUG_MSG((nxgep, RX_CTL, 213 "<== nxge_rxdma_regs_dump_channels: " 214 "NULL ring pointer")); 215 return; 216 } 217 if (rx_rbr_rings->rbr_rings == NULL) { 218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 219 "<== nxge_rxdma_regs_dump_channels: " 220 " NULL rbr rings pointer")); 221 return; 222 } 223 224 ndmas = rx_rbr_rings->ndmas; 225 if (!ndmas) { 226 NXGE_DEBUG_MSG((nxgep, RX_CTL, 227 "<== nxge_rxdma_regs_dump_channels: no channel")); 228 return; 229 } 230 231 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 232 "==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas)); 233 234 rbr_rings = rx_rbr_rings->rbr_rings; 235 for (i = 0; i < ndmas; i++) { 236 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 237 continue; 238 } 239 channel = rbr_rings[i]->rdc; 240 (void) nxge_dump_rxdma_channel(nxgep, channel); 241 } 242 243 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 244 245 } 246 247 nxge_status_t 248 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 249 { 250 npi_handle_t handle; 251 npi_status_t rs = NPI_SUCCESS; 252 nxge_status_t status = NXGE_OK; 253 254 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 255 256 handle = NXGE_DEV_NPI_HANDLE(nxgep); 257 rs = npi_rxdma_dump_rdc_regs(handle, channel); 258 259 if (rs != NPI_SUCCESS) { 260 status = NXGE_ERROR | rs; 261 } 262 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 263 return (status); 264 } 265 266 nxge_status_t 267 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 268 p_rx_dma_ent_msk_t mask_p) 269 { 270 npi_handle_t handle; 271 npi_status_t rs = NPI_SUCCESS; 272 nxge_status_t status = NXGE_OK; 273 274 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 275 "<== nxge_init_rxdma_channel_event_mask")); 276 277 handle = NXGE_DEV_NPI_HANDLE(nxgep); 278 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 279 if (rs != NPI_SUCCESS) { 280 status = NXGE_ERROR | rs; 281 } 282 283 return (status); 284 } 285 286 nxge_status_t 287 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 288 p_rx_dma_ctl_stat_t cs_p) 289 { 290 npi_handle_t handle; 291 npi_status_t rs = NPI_SUCCESS; 292 nxge_status_t status = NXGE_OK; 293 294 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 295 "<== nxge_init_rxdma_channel_cntl_stat")); 296 297 handle = NXGE_DEV_NPI_HANDLE(nxgep); 298 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 299 300 if (rs != NPI_SUCCESS) { 301 status = NXGE_ERROR | rs; 302 } 303 304 return (status); 305 } 306 307 nxge_status_t 308 nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp, 309 uint8_t rdc) 310 { 311 npi_handle_t handle; 312 npi_status_t rs = NPI_SUCCESS; 313 p_nxge_dma_pt_cfg_t p_dma_cfgp; 314 p_nxge_rdc_grp_t rdc_grp_p; 315 uint8_t actual_rdcgrp, actual_rdc; 316 317 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 318 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 319 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 320 321 handle = NXGE_DEV_NPI_HANDLE(nxgep); 322 323 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 324 rdc_grp_p->rdc[0] = rdc; 325 326 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 327 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 328 329 rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp, 330 actual_rdc); 331 332 if (rs != NPI_SUCCESS) { 333 return (NXGE_ERROR | rs); 334 } 335 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 336 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 337 return (NXGE_OK); 338 } 339 340 nxge_status_t 341 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 342 { 343 npi_handle_t handle; 344 345 uint8_t actual_rdc; 346 npi_status_t rs = NPI_SUCCESS; 347 348 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 349 " ==> nxge_rxdma_cfg_port_default_rdc")); 350 351 handle = NXGE_DEV_NPI_HANDLE(nxgep); 352 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 353 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 354 355 356 if (rs != NPI_SUCCESS) { 357 return (NXGE_ERROR | rs); 358 } 359 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 360 " <== nxge_rxdma_cfg_port_default_rdc")); 361 362 return (NXGE_OK); 363 } 364 365 nxge_status_t 366 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 367 uint16_t pkts) 368 { 369 npi_status_t rs = NPI_SUCCESS; 370 npi_handle_t handle; 371 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 372 " ==> nxge_rxdma_cfg_rcr_threshold")); 373 handle = NXGE_DEV_NPI_HANDLE(nxgep); 374 375 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 376 377 if (rs != NPI_SUCCESS) { 378 return (NXGE_ERROR | rs); 379 } 380 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 381 return (NXGE_OK); 382 } 383 384 nxge_status_t 385 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 386 uint16_t tout, uint8_t enable) 387 { 388 npi_status_t rs = NPI_SUCCESS; 389 npi_handle_t handle; 390 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 391 handle = NXGE_DEV_NPI_HANDLE(nxgep); 392 if (enable == 0) { 393 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 394 } else { 395 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 396 tout); 397 } 398 399 if (rs != NPI_SUCCESS) { 400 return (NXGE_ERROR | rs); 401 } 402 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 403 return (NXGE_OK); 404 } 405 406 nxge_status_t 407 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 408 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 409 { 410 npi_handle_t handle; 411 rdc_desc_cfg_t rdc_desc; 412 p_rcrcfig_b_t cfgb_p; 413 npi_status_t rs = NPI_SUCCESS; 414 415 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 416 handle = NXGE_DEV_NPI_HANDLE(nxgep); 417 /* 418 * Use configuration data composed at init time. 419 * Write to hardware the receive ring configurations. 420 */ 421 rdc_desc.mbox_enable = 1; 422 rdc_desc.mbox_addr = mbox_p->mbox_addr; 423 NXGE_DEBUG_MSG((nxgep, RX_CTL, 424 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 425 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 426 427 rdc_desc.rbr_len = rbr_p->rbb_max; 428 rdc_desc.rbr_addr = rbr_p->rbr_addr; 429 430 switch (nxgep->rx_bksize_code) { 431 case RBR_BKSIZE_4K: 432 rdc_desc.page_size = SIZE_4KB; 433 break; 434 case RBR_BKSIZE_8K: 435 rdc_desc.page_size = SIZE_8KB; 436 break; 437 case RBR_BKSIZE_16K: 438 rdc_desc.page_size = SIZE_16KB; 439 break; 440 case RBR_BKSIZE_32K: 441 rdc_desc.page_size = SIZE_32KB; 442 break; 443 } 444 445 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 446 rdc_desc.valid0 = 1; 447 448 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 449 rdc_desc.valid1 = 1; 450 451 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 452 rdc_desc.valid2 = 1; 453 454 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 455 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 456 457 rdc_desc.rcr_len = rcr_p->comp_size; 458 rdc_desc.rcr_addr = rcr_p->rcr_addr; 459 460 cfgb_p = &(rcr_p->rcr_cfgb); 461 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 462 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 463 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 464 465 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 466 "rbr_len qlen %d pagesize code %d rcr_len %d", 467 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 468 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 469 "size 0 %d size 1 %d size 2 %d", 470 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 471 rbr_p->npi_pkt_buf_size2)); 472 473 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 474 if (rs != NPI_SUCCESS) { 475 return (NXGE_ERROR | rs); 476 } 477 478 /* 479 * Enable the timeout and threshold. 480 */ 481 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 482 rdc_desc.rcr_threshold); 483 if (rs != NPI_SUCCESS) { 484 return (NXGE_ERROR | rs); 485 } 486 487 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 488 rdc_desc.rcr_timeout); 489 if (rs != NPI_SUCCESS) { 490 return (NXGE_ERROR | rs); 491 } 492 493 /* Enable the DMA */ 494 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 495 if (rs != NPI_SUCCESS) { 496 return (NXGE_ERROR | rs); 497 } 498 499 /* Kick the DMA engine. */ 500 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 501 /* Clear the rbr empty bit */ 502 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 503 504 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 505 506 return (NXGE_OK); 507 } 508 509 nxge_status_t 510 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 511 { 512 npi_handle_t handle; 513 npi_status_t rs = NPI_SUCCESS; 514 515 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 516 handle = NXGE_DEV_NPI_HANDLE(nxgep); 517 518 /* disable the DMA */ 519 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 520 if (rs != NPI_SUCCESS) { 521 NXGE_DEBUG_MSG((nxgep, RX_CTL, 522 "<== nxge_disable_rxdma_channel:failed (0x%x)", 523 rs)); 524 return (NXGE_ERROR | rs); 525 } 526 527 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 528 return (NXGE_OK); 529 } 530 531 nxge_status_t 532 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 533 { 534 npi_handle_t handle; 535 nxge_status_t status = NXGE_OK; 536 537 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 538 "<== nxge_init_rxdma_channel_rcrflush")); 539 540 handle = NXGE_DEV_NPI_HANDLE(nxgep); 541 npi_rxdma_rdc_rcr_flush(handle, channel); 542 543 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 544 "<== nxge_init_rxdma_channel_rcrflsh")); 545 return (status); 546 547 } 548 549 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 550 551 #define TO_LEFT -1 552 #define TO_RIGHT 1 553 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 554 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 555 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 556 #define NO_HINT 0xffffffff 557 558 /*ARGSUSED*/ 559 nxge_status_t 560 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 561 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 562 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 563 { 564 int bufsize; 565 uint64_t pktbuf_pp; 566 uint64_t dvma_addr; 567 rxring_info_t *ring_info; 568 int base_side, end_side; 569 int r_index, l_index, anchor_index; 570 int found, search_done; 571 uint32_t offset, chunk_size, block_size, page_size_mask; 572 uint32_t chunk_index, block_index, total_index; 573 int max_iterations, iteration; 574 rxbuf_index_info_t *bufinfo; 575 576 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 577 578 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 579 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 580 pkt_buf_addr_pp, 581 pktbufsz_type)); 582 583 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 584 585 switch (pktbufsz_type) { 586 case 0: 587 bufsize = rbr_p->pkt_buf_size0; 588 break; 589 case 1: 590 bufsize = rbr_p->pkt_buf_size1; 591 break; 592 case 2: 593 bufsize = rbr_p->pkt_buf_size2; 594 break; 595 case RCR_SINGLE_BLOCK: 596 bufsize = 0; 597 anchor_index = 0; 598 break; 599 default: 600 return (NXGE_ERROR); 601 } 602 603 if (rbr_p->num_blocks == 1) { 604 anchor_index = 0; 605 ring_info = rbr_p->ring_info; 606 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 607 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 608 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 609 "buf_pp $%p btype %d anchor_index %d " 610 "bufinfo $%p", 611 pkt_buf_addr_pp, 612 pktbufsz_type, 613 anchor_index, 614 bufinfo)); 615 616 goto found_index; 617 } 618 619 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 620 "==> nxge_rxbuf_pp_to_vp: " 621 "buf_pp $%p btype %d anchor_index %d", 622 pkt_buf_addr_pp, 623 pktbufsz_type, 624 anchor_index)); 625 626 ring_info = rbr_p->ring_info; 627 found = B_FALSE; 628 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 629 iteration = 0; 630 max_iterations = ring_info->max_iterations; 631 /* 632 * First check if this block has been seen 633 * recently. This is indicated by a hint which 634 * is initialized when the first buffer of the block 635 * is seen. The hint is reset when the last buffer of 636 * the block has been processed. 637 * As three block sizes are supported, three hints 638 * are kept. The idea behind the hints is that once 639 * the hardware uses a block for a buffer of that 640 * size, it will use it exclusively for that size 641 * and will use it until it is exhausted. It is assumed 642 * that there would a single block being used for the same 643 * buffer sizes at any given time. 644 */ 645 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 646 anchor_index = ring_info->hint[pktbufsz_type]; 647 dvma_addr = bufinfo[anchor_index].dvma_addr; 648 chunk_size = bufinfo[anchor_index].buf_size; 649 if ((pktbuf_pp >= dvma_addr) && 650 (pktbuf_pp < (dvma_addr + chunk_size))) { 651 found = B_TRUE; 652 /* 653 * check if this is the last buffer in the block 654 * If so, then reset the hint for the size; 655 */ 656 657 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 658 ring_info->hint[pktbufsz_type] = NO_HINT; 659 } 660 } 661 662 if (found == B_FALSE) { 663 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 664 "==> nxge_rxbuf_pp_to_vp: (!found)" 665 "buf_pp $%p btype %d anchor_index %d", 666 pkt_buf_addr_pp, 667 pktbufsz_type, 668 anchor_index)); 669 670 /* 671 * This is the first buffer of the block of this 672 * size. Need to search the whole information 673 * array. 674 * the search algorithm uses a binary tree search 675 * algorithm. It assumes that the information is 676 * already sorted with increasing order 677 * info[0] < info[1] < info[2] .... < info[n-1] 678 * where n is the size of the information array 679 */ 680 r_index = rbr_p->num_blocks - 1; 681 l_index = 0; 682 search_done = B_FALSE; 683 anchor_index = MID_INDEX(r_index, l_index); 684 while (search_done == B_FALSE) { 685 if ((r_index == l_index) || 686 (iteration >= max_iterations)) 687 search_done = B_TRUE; 688 end_side = TO_RIGHT; /* to the right */ 689 base_side = TO_LEFT; /* to the left */ 690 /* read the DVMA address information and sort it */ 691 dvma_addr = bufinfo[anchor_index].dvma_addr; 692 chunk_size = bufinfo[anchor_index].buf_size; 693 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 694 "==> nxge_rxbuf_pp_to_vp: (searching)" 695 "buf_pp $%p btype %d " 696 "anchor_index %d chunk_size %d dvmaaddr $%p", 697 pkt_buf_addr_pp, 698 pktbufsz_type, 699 anchor_index, 700 chunk_size, 701 dvma_addr)); 702 703 if (pktbuf_pp >= dvma_addr) 704 base_side = TO_RIGHT; /* to the right */ 705 if (pktbuf_pp < (dvma_addr + chunk_size)) 706 end_side = TO_LEFT; /* to the left */ 707 708 switch (base_side + end_side) { 709 case IN_MIDDLE: 710 /* found */ 711 found = B_TRUE; 712 search_done = B_TRUE; 713 if ((pktbuf_pp + bufsize) < 714 (dvma_addr + chunk_size)) 715 ring_info->hint[pktbufsz_type] = 716 bufinfo[anchor_index].buf_index; 717 break; 718 case BOTH_RIGHT: 719 /* not found: go to the right */ 720 l_index = anchor_index + 1; 721 anchor_index = 722 MID_INDEX(r_index, l_index); 723 break; 724 725 case BOTH_LEFT: 726 /* not found: go to the left */ 727 r_index = anchor_index - 1; 728 anchor_index = MID_INDEX(r_index, 729 l_index); 730 break; 731 default: /* should not come here */ 732 return (NXGE_ERROR); 733 } 734 iteration++; 735 } 736 737 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 738 "==> nxge_rxbuf_pp_to_vp: (search done)" 739 "buf_pp $%p btype %d anchor_index %d", 740 pkt_buf_addr_pp, 741 pktbufsz_type, 742 anchor_index)); 743 } 744 745 if (found == B_FALSE) { 746 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 747 "==> nxge_rxbuf_pp_to_vp: (search failed)" 748 "buf_pp $%p btype %d anchor_index %d", 749 pkt_buf_addr_pp, 750 pktbufsz_type, 751 anchor_index)); 752 return (NXGE_ERROR); 753 } 754 755 found_index: 756 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 757 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 758 "buf_pp $%p btype %d bufsize %d anchor_index %d", 759 pkt_buf_addr_pp, 760 pktbufsz_type, 761 bufsize, 762 anchor_index)); 763 764 /* index of the first block in this chunk */ 765 chunk_index = bufinfo[anchor_index].start_index; 766 dvma_addr = bufinfo[anchor_index].dvma_addr; 767 page_size_mask = ring_info->block_size_mask; 768 769 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 770 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 771 "buf_pp $%p btype %d bufsize %d " 772 "anchor_index %d chunk_index %d dvma $%p", 773 pkt_buf_addr_pp, 774 pktbufsz_type, 775 bufsize, 776 anchor_index, 777 chunk_index, 778 dvma_addr)); 779 780 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 781 block_size = rbr_p->block_size; /* System block(page) size */ 782 783 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 784 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 785 "buf_pp $%p btype %d bufsize %d " 786 "anchor_index %d chunk_index %d dvma $%p " 787 "offset %d block_size %d", 788 pkt_buf_addr_pp, 789 pktbufsz_type, 790 bufsize, 791 anchor_index, 792 chunk_index, 793 dvma_addr, 794 offset, 795 block_size)); 796 797 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 798 799 block_index = (offset / block_size); /* index within chunk */ 800 total_index = chunk_index + block_index; 801 802 803 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 804 "==> nxge_rxbuf_pp_to_vp: " 805 "total_index %d dvma_addr $%p " 806 "offset %d block_size %d " 807 "block_index %d ", 808 total_index, dvma_addr, 809 offset, block_size, 810 block_index)); 811 812 *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr 813 + offset); 814 815 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 816 "==> nxge_rxbuf_pp_to_vp: " 817 "total_index %d dvma_addr $%p " 818 "offset %d block_size %d " 819 "block_index %d " 820 "*pkt_buf_addr_p $%p", 821 total_index, dvma_addr, 822 offset, block_size, 823 block_index, 824 *pkt_buf_addr_p)); 825 826 827 *msg_index = total_index; 828 *bufoffset = (offset & page_size_mask); 829 830 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 831 "==> nxge_rxbuf_pp_to_vp: get msg index: " 832 "msg_index %d bufoffset_index %d", 833 *msg_index, 834 *bufoffset)); 835 836 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 837 838 return (NXGE_OK); 839 } 840 841 /* 842 * used by quick sort (qsort) function 843 * to perform comparison 844 */ 845 static int 846 nxge_sort_compare(const void *p1, const void *p2) 847 { 848 849 rxbuf_index_info_t *a, *b; 850 851 a = (rxbuf_index_info_t *)p1; 852 b = (rxbuf_index_info_t *)p2; 853 854 if (a->dvma_addr > b->dvma_addr) 855 return (1); 856 if (a->dvma_addr < b->dvma_addr) 857 return (-1); 858 return (0); 859 } 860 861 862 863 /* 864 * grabbed this sort implementation from common/syscall/avl.c 865 * 866 */ 867 /* 868 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 869 * v = Ptr to array/vector of objs 870 * n = # objs in the array 871 * s = size of each obj (must be multiples of a word size) 872 * f = ptr to function to compare two objs 873 * returns (-1 = less than, 0 = equal, 1 = greater than 874 */ 875 void 876 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 877 { 878 int g, i, j, ii; 879 unsigned int *p1, *p2; 880 unsigned int tmp; 881 882 /* No work to do */ 883 if (v == NULL || n <= 1) 884 return; 885 /* Sanity check on arguments */ 886 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 887 ASSERT(s > 0); 888 889 for (g = n / 2; g > 0; g /= 2) { 890 for (i = g; i < n; i++) { 891 for (j = i - g; j >= 0 && 892 (*f)(v + j * s, v + (j + g) * s) == 1; 893 j -= g) { 894 p1 = (unsigned *)(v + j * s); 895 p2 = (unsigned *)(v + (j + g) * s); 896 for (ii = 0; ii < s / 4; ii++) { 897 tmp = *p1; 898 *p1++ = *p2; 899 *p2++ = tmp; 900 } 901 } 902 } 903 } 904 } 905 906 /* 907 * Initialize data structures required for rxdma 908 * buffer dvma->vmem address lookup 909 */ 910 /*ARGSUSED*/ 911 static nxge_status_t 912 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 913 { 914 915 int index; 916 rxring_info_t *ring_info; 917 int max_iteration = 0, max_index = 0; 918 919 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 920 921 ring_info = rbrp->ring_info; 922 ring_info->hint[0] = NO_HINT; 923 ring_info->hint[1] = NO_HINT; 924 ring_info->hint[2] = NO_HINT; 925 max_index = rbrp->num_blocks; 926 927 /* read the DVMA address information and sort it */ 928 /* do init of the information array */ 929 930 931 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 932 " nxge_rxbuf_index_info_init Sort ptrs")); 933 934 /* sort the array */ 935 nxge_ksort((void *)ring_info->buffer, max_index, 936 sizeof (rxbuf_index_info_t), nxge_sort_compare); 937 938 939 940 for (index = 0; index < max_index; index++) { 941 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 942 " nxge_rxbuf_index_info_init: sorted chunk %d " 943 " ioaddr $%p kaddr $%p size %x", 944 index, ring_info->buffer[index].dvma_addr, 945 ring_info->buffer[index].kaddr, 946 ring_info->buffer[index].buf_size)); 947 } 948 949 max_iteration = 0; 950 while (max_index >= (1ULL << max_iteration)) 951 max_iteration++; 952 ring_info->max_iterations = max_iteration + 1; 953 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 954 " nxge_rxbuf_index_info_init Find max iter %d", 955 ring_info->max_iterations)); 956 957 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 958 return (NXGE_OK); 959 } 960 961 /* ARGSUSED */ 962 void 963 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 964 { 965 #ifdef NXGE_DEBUG 966 967 uint32_t bptr; 968 uint64_t pp; 969 970 bptr = entry_p->bits.hdw.pkt_buf_addr; 971 972 NXGE_DEBUG_MSG((nxgep, RX_CTL, 973 "\trcr entry $%p " 974 "\trcr entry 0x%0llx " 975 "\trcr entry 0x%08x " 976 "\trcr entry 0x%08x " 977 "\tvalue 0x%0llx\n" 978 "\tmulti = %d\n" 979 "\tpkt_type = 0x%x\n" 980 "\tzero_copy = %d\n" 981 "\tnoport = %d\n" 982 "\tpromis = %d\n" 983 "\terror = 0x%04x\n" 984 "\tdcf_err = 0x%01x\n" 985 "\tl2_len = %d\n" 986 "\tpktbufsize = %d\n" 987 "\tpkt_buf_addr = $%p\n" 988 "\tpkt_buf_addr (<< 6) = $%p\n", 989 entry_p, 990 *(int64_t *)entry_p, 991 *(int32_t *)entry_p, 992 *(int32_t *)((char *)entry_p + 32), 993 entry_p->value, 994 entry_p->bits.hdw.multi, 995 entry_p->bits.hdw.pkt_type, 996 entry_p->bits.hdw.zero_copy, 997 entry_p->bits.hdw.noport, 998 entry_p->bits.hdw.promis, 999 entry_p->bits.hdw.error, 1000 entry_p->bits.hdw.dcf_err, 1001 entry_p->bits.hdw.l2_len, 1002 entry_p->bits.hdw.pktbufsz, 1003 bptr, 1004 entry_p->bits.ldw.pkt_buf_addr)); 1005 1006 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 1007 RCR_PKT_BUF_ADDR_SHIFT; 1008 1009 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 1010 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 1011 #endif 1012 } 1013 1014 void 1015 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 1016 { 1017 npi_handle_t handle; 1018 rbr_stat_t rbr_stat; 1019 addr44_t hd_addr; 1020 addr44_t tail_addr; 1021 uint16_t qlen; 1022 1023 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1024 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 1025 1026 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1027 1028 /* RBR head */ 1029 hd_addr.addr = 0; 1030 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1031 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1032 (void *)hd_addr.addr); 1033 1034 /* RBR stats */ 1035 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 1036 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 1037 1038 /* RCR tail */ 1039 tail_addr.addr = 0; 1040 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1041 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1042 (void *)tail_addr.addr); 1043 1044 /* RCR qlen */ 1045 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 1046 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 1047 1048 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1049 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 1050 } 1051 1052 void 1053 nxge_rxdma_stop(p_nxge_t nxgep) 1054 { 1055 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 1056 1057 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1058 (void) nxge_rx_mac_disable(nxgep); 1059 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 1060 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 1061 } 1062 1063 void 1064 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 1065 { 1066 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 1067 1068 (void) nxge_rxdma_stop(nxgep); 1069 (void) nxge_uninit_rxdma_channels(nxgep); 1070 (void) nxge_init_rxdma_channels(nxgep); 1071 1072 #ifndef AXIS_DEBUG_LB 1073 (void) nxge_xcvr_init(nxgep); 1074 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 1075 #endif 1076 (void) nxge_rx_mac_enable(nxgep); 1077 1078 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 1079 } 1080 1081 nxge_status_t 1082 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1083 { 1084 int i, ndmas; 1085 uint16_t channel; 1086 p_rx_rbr_rings_t rx_rbr_rings; 1087 p_rx_rbr_ring_t *rbr_rings; 1088 npi_handle_t handle; 1089 npi_status_t rs = NPI_SUCCESS; 1090 nxge_status_t status = NXGE_OK; 1091 1092 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1093 "==> nxge_rxdma_hw_mode: mode %d", enable)); 1094 1095 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1096 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1097 "<== nxge_rxdma_mode: not initialized")); 1098 return (NXGE_ERROR); 1099 } 1100 1101 rx_rbr_rings = nxgep->rx_rbr_rings; 1102 if (rx_rbr_rings == NULL) { 1103 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1104 "<== nxge_rxdma_mode: NULL ring pointer")); 1105 return (NXGE_ERROR); 1106 } 1107 if (rx_rbr_rings->rbr_rings == NULL) { 1108 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1109 "<== nxge_rxdma_mode: NULL rbr rings pointer")); 1110 return (NXGE_ERROR); 1111 } 1112 1113 ndmas = rx_rbr_rings->ndmas; 1114 if (!ndmas) { 1115 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1116 "<== nxge_rxdma_mode: no channel")); 1117 return (NXGE_ERROR); 1118 } 1119 1120 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1121 "==> nxge_rxdma_mode (ndmas %d)", ndmas)); 1122 1123 rbr_rings = rx_rbr_rings->rbr_rings; 1124 1125 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1126 for (i = 0; i < ndmas; i++) { 1127 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 1128 continue; 1129 } 1130 channel = rbr_rings[i]->rdc; 1131 if (enable) { 1132 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1133 "==> nxge_rxdma_hw_mode: channel %d (enable)", 1134 channel)); 1135 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 1136 } else { 1137 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1138 "==> nxge_rxdma_hw_mode: channel %d (disable)", 1139 channel)); 1140 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 1141 } 1142 } 1143 1144 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1145 1146 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1147 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 1148 1149 return (status); 1150 } 1151 1152 void 1153 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1154 { 1155 npi_handle_t handle; 1156 1157 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1158 "==> nxge_rxdma_enable_channel: channel %d", channel)); 1159 1160 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1161 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 1162 1163 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 1164 } 1165 1166 void 1167 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1168 { 1169 npi_handle_t handle; 1170 1171 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1172 "==> nxge_rxdma_disable_channel: channel %d", channel)); 1173 1174 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1175 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 1176 1177 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 1178 } 1179 1180 void 1181 nxge_hw_start_rx(p_nxge_t nxgep) 1182 { 1183 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 1184 1185 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 1186 (void) nxge_rx_mac_enable(nxgep); 1187 1188 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 1189 } 1190 1191 /*ARGSUSED*/ 1192 void 1193 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 1194 { 1195 int i, ndmas; 1196 uint16_t rdc; 1197 p_rx_rbr_rings_t rx_rbr_rings; 1198 p_rx_rbr_ring_t *rbr_rings; 1199 p_rx_rcr_rings_t rx_rcr_rings; 1200 1201 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 1202 1203 rx_rbr_rings = nxgep->rx_rbr_rings; 1204 if (rx_rbr_rings == NULL) { 1205 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1206 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1207 return; 1208 } 1209 ndmas = rx_rbr_rings->ndmas; 1210 if (!ndmas) { 1211 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1212 "<== nxge_fixup_rxdma_rings: no channel")); 1213 return; 1214 } 1215 1216 rx_rcr_rings = nxgep->rx_rcr_rings; 1217 if (rx_rcr_rings == NULL) { 1218 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1219 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 1220 return; 1221 } 1222 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1223 "==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas)); 1224 1225 nxge_rxdma_hw_stop(nxgep); 1226 1227 rbr_rings = rx_rbr_rings->rbr_rings; 1228 for (i = 0; i < ndmas; i++) { 1229 rdc = rbr_rings[i]->rdc; 1230 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1231 "==> nxge_fixup_rxdma_rings: channel %d " 1232 "ring $%px", rdc, rbr_rings[i])); 1233 (void) nxge_rxdma_fixup_channel(nxgep, rdc, i); 1234 } 1235 1236 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 1237 } 1238 1239 void 1240 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1241 { 1242 int i; 1243 1244 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 1245 i = nxge_rxdma_get_ring_index(nxgep, channel); 1246 if (i < 0) { 1247 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1248 "<== nxge_rxdma_fix_channel: no entry found")); 1249 return; 1250 } 1251 1252 nxge_rxdma_fixup_channel(nxgep, channel, i); 1253 1254 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel")); 1255 } 1256 1257 void 1258 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 1259 { 1260 int ndmas; 1261 p_rx_rbr_rings_t rx_rbr_rings; 1262 p_rx_rbr_ring_t *rbr_rings; 1263 p_rx_rcr_rings_t rx_rcr_rings; 1264 p_rx_rcr_ring_t *rcr_rings; 1265 p_rx_mbox_areas_t rx_mbox_areas_p; 1266 p_rx_mbox_t *rx_mbox_p; 1267 p_nxge_dma_pool_t dma_buf_poolp; 1268 p_nxge_dma_pool_t dma_cntl_poolp; 1269 p_rx_rbr_ring_t rbrp; 1270 p_rx_rcr_ring_t rcrp; 1271 p_rx_mbox_t mboxp; 1272 p_nxge_dma_common_t dmap; 1273 nxge_status_t status = NXGE_OK; 1274 1275 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 1276 1277 (void) nxge_rxdma_stop_channel(nxgep, channel); 1278 1279 dma_buf_poolp = nxgep->rx_buf_pool_p; 1280 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 1281 1282 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 1283 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1284 "<== nxge_rxdma_fixup_channel: buf not allocated")); 1285 return; 1286 } 1287 1288 ndmas = dma_buf_poolp->ndmas; 1289 if (!ndmas) { 1290 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1291 "<== nxge_rxdma_fixup_channel: no dma allocated")); 1292 return; 1293 } 1294 1295 rx_rbr_rings = nxgep->rx_rbr_rings; 1296 rx_rcr_rings = nxgep->rx_rcr_rings; 1297 rbr_rings = rx_rbr_rings->rbr_rings; 1298 rcr_rings = rx_rcr_rings->rcr_rings; 1299 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 1300 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 1301 1302 /* Reinitialize the receive block and completion rings */ 1303 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 1304 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 1305 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 1306 1307 1308 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 1309 rbrp->rbr_rd_index = 0; 1310 rcrp->comp_rd_index = 0; 1311 rcrp->comp_wt_index = 0; 1312 1313 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 1314 bzero((caddr_t)dmap->kaddrp, dmap->alength); 1315 1316 status = nxge_rxdma_start_channel(nxgep, channel, 1317 rbrp, rcrp, mboxp); 1318 if (status != NXGE_OK) { 1319 goto nxge_rxdma_fixup_channel_fail; 1320 } 1321 if (status != NXGE_OK) { 1322 goto nxge_rxdma_fixup_channel_fail; 1323 } 1324 1325 nxge_rxdma_fixup_channel_fail: 1326 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1327 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 1328 1329 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 1330 } 1331 1332 int 1333 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 1334 { 1335 int i, ndmas; 1336 uint16_t rdc; 1337 p_rx_rbr_rings_t rx_rbr_rings; 1338 p_rx_rbr_ring_t *rbr_rings; 1339 1340 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1341 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 1342 1343 rx_rbr_rings = nxgep->rx_rbr_rings; 1344 if (rx_rbr_rings == NULL) { 1345 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1346 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 1347 return (-1); 1348 } 1349 ndmas = rx_rbr_rings->ndmas; 1350 if (!ndmas) { 1351 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1352 "<== nxge_rxdma_get_ring_index: no channel")); 1353 return (-1); 1354 } 1355 1356 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1357 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 1358 1359 rbr_rings = rx_rbr_rings->rbr_rings; 1360 for (i = 0; i < ndmas; i++) { 1361 rdc = rbr_rings[i]->rdc; 1362 if (channel == rdc) { 1363 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1364 "==> nxge_rxdma_get_rbr_ring: " 1365 "channel %d (index %d) " 1366 "ring %d", channel, i, 1367 rbr_rings[i])); 1368 return (i); 1369 } 1370 } 1371 1372 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1373 "<== nxge_rxdma_get_rbr_ring_index: not found")); 1374 1375 return (-1); 1376 } 1377 1378 p_rx_rbr_ring_t 1379 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 1380 { 1381 int i, ndmas; 1382 uint16_t rdc; 1383 p_rx_rbr_rings_t rx_rbr_rings; 1384 p_rx_rbr_ring_t *rbr_rings; 1385 1386 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1387 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 1388 1389 rx_rbr_rings = nxgep->rx_rbr_rings; 1390 if (rx_rbr_rings == NULL) { 1391 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1392 "<== nxge_rxdma_get_rbr_ring: NULL ring pointer")); 1393 return (NULL); 1394 } 1395 ndmas = rx_rbr_rings->ndmas; 1396 if (!ndmas) { 1397 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1398 "<== nxge_rxdma_get_rbr_ring: no channel")); 1399 return (NULL); 1400 } 1401 1402 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1403 "==> nxge_rxdma_get_ring (ndmas %d)", ndmas)); 1404 1405 rbr_rings = rx_rbr_rings->rbr_rings; 1406 for (i = 0; i < ndmas; i++) { 1407 rdc = rbr_rings[i]->rdc; 1408 if (channel == rdc) { 1409 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1410 "==> nxge_rxdma_get_rbr_ring: channel %d " 1411 "ring $%p", channel, rbr_rings[i])); 1412 return (rbr_rings[i]); 1413 } 1414 } 1415 1416 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1417 "<== nxge_rxdma_get_rbr_ring: not found")); 1418 1419 return (NULL); 1420 } 1421 1422 p_rx_rcr_ring_t 1423 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 1424 { 1425 int i, ndmas; 1426 uint16_t rdc; 1427 p_rx_rcr_rings_t rx_rcr_rings; 1428 p_rx_rcr_ring_t *rcr_rings; 1429 1430 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1431 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 1432 1433 rx_rcr_rings = nxgep->rx_rcr_rings; 1434 if (rx_rcr_rings == NULL) { 1435 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1436 "<== nxge_rxdma_get_rcr_ring: NULL ring pointer")); 1437 return (NULL); 1438 } 1439 ndmas = rx_rcr_rings->ndmas; 1440 if (!ndmas) { 1441 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1442 "<== nxge_rxdma_get_rcr_ring: no channel")); 1443 return (NULL); 1444 } 1445 1446 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1447 "==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas)); 1448 1449 rcr_rings = rx_rcr_rings->rcr_rings; 1450 for (i = 0; i < ndmas; i++) { 1451 rdc = rcr_rings[i]->rdc; 1452 if (channel == rdc) { 1453 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1454 "==> nxge_rxdma_get_rcr_ring: channel %d " 1455 "ring $%p", channel, rcr_rings[i])); 1456 return (rcr_rings[i]); 1457 } 1458 } 1459 1460 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1461 "<== nxge_rxdma_get_rcr_ring: not found")); 1462 1463 return (NULL); 1464 } 1465 1466 /* 1467 * Static functions start here. 1468 */ 1469 static p_rx_msg_t 1470 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 1471 { 1472 p_rx_msg_t nxge_mp = NULL; 1473 p_nxge_dma_common_t dmamsg_p; 1474 uchar_t *buffer; 1475 1476 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 1477 if (nxge_mp == NULL) { 1478 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1479 "Allocation of a rx msg failed.")); 1480 goto nxge_allocb_exit; 1481 } 1482 1483 nxge_mp->use_buf_pool = B_FALSE; 1484 if (dmabuf_p) { 1485 nxge_mp->use_buf_pool = B_TRUE; 1486 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 1487 *dmamsg_p = *dmabuf_p; 1488 dmamsg_p->nblocks = 1; 1489 dmamsg_p->block_size = size; 1490 dmamsg_p->alength = size; 1491 buffer = (uchar_t *)dmabuf_p->kaddrp; 1492 1493 dmabuf_p->kaddrp = (void *) 1494 ((char *)dmabuf_p->kaddrp + size); 1495 dmabuf_p->ioaddr_pp = (void *) 1496 ((char *)dmabuf_p->ioaddr_pp + size); 1497 dmabuf_p->alength -= size; 1498 dmabuf_p->offset += size; 1499 dmabuf_p->dma_cookie.dmac_laddress += size; 1500 dmabuf_p->dma_cookie.dmac_size -= size; 1501 1502 } else { 1503 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 1504 if (buffer == NULL) { 1505 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1506 "Allocation of a receive page failed.")); 1507 goto nxge_allocb_fail1; 1508 } 1509 } 1510 1511 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 1512 if (nxge_mp->rx_mblk_p == NULL) { 1513 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 1514 goto nxge_allocb_fail2; 1515 } 1516 1517 nxge_mp->buffer = buffer; 1518 nxge_mp->block_size = size; 1519 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 1520 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 1521 nxge_mp->ref_cnt = 1; 1522 nxge_mp->free = B_TRUE; 1523 nxge_mp->rx_use_bcopy = B_FALSE; 1524 1525 atomic_inc_32(&nxge_mblks_pending); 1526 1527 goto nxge_allocb_exit; 1528 1529 nxge_allocb_fail2: 1530 if (!nxge_mp->use_buf_pool) { 1531 KMEM_FREE(buffer, size); 1532 } 1533 1534 nxge_allocb_fail1: 1535 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 1536 nxge_mp = NULL; 1537 1538 nxge_allocb_exit: 1539 return (nxge_mp); 1540 } 1541 1542 p_mblk_t 1543 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1544 { 1545 p_mblk_t mp; 1546 1547 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 1548 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 1549 "offset = 0x%08X " 1550 "size = 0x%08X", 1551 nxge_mp, offset, size)); 1552 1553 mp = desballoc(&nxge_mp->buffer[offset], size, 1554 0, &nxge_mp->freeb); 1555 if (mp == NULL) { 1556 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1557 goto nxge_dupb_exit; 1558 } 1559 atomic_inc_32(&nxge_mp->ref_cnt); 1560 atomic_inc_32(&nxge_mblks_pending); 1561 1562 1563 nxge_dupb_exit: 1564 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1565 nxge_mp)); 1566 return (mp); 1567 } 1568 1569 p_mblk_t 1570 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 1571 { 1572 p_mblk_t mp; 1573 uchar_t *dp; 1574 1575 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 1576 if (mp == NULL) { 1577 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 1578 goto nxge_dupb_bcopy_exit; 1579 } 1580 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 1581 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 1582 mp->b_wptr = dp + size; 1583 1584 nxge_dupb_bcopy_exit: 1585 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 1586 nxge_mp)); 1587 return (mp); 1588 } 1589 1590 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 1591 p_rx_msg_t rx_msg_p); 1592 1593 void 1594 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 1595 { 1596 1597 npi_handle_t handle; 1598 1599 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 1600 1601 /* Reuse this buffer */ 1602 rx_msg_p->free = B_FALSE; 1603 rx_msg_p->cur_usage_cnt = 0; 1604 rx_msg_p->max_usage_cnt = 0; 1605 rx_msg_p->pkt_buf_size = 0; 1606 1607 if (rx_rbr_p->rbr_use_bcopy) { 1608 rx_msg_p->rx_use_bcopy = B_FALSE; 1609 atomic_dec_32(&rx_rbr_p->rbr_consumed); 1610 } 1611 1612 /* 1613 * Get the rbr header pointer and its offset index. 1614 */ 1615 MUTEX_ENTER(&rx_rbr_p->post_lock); 1616 1617 1618 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 1619 rx_rbr_p->rbr_wrap_mask); 1620 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 1621 MUTEX_EXIT(&rx_rbr_p->post_lock); 1622 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1623 npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1); 1624 1625 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1626 "<== nxge_post_page (channel %d post_next_index %d)", 1627 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 1628 1629 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 1630 } 1631 1632 void 1633 nxge_freeb(p_rx_msg_t rx_msg_p) 1634 { 1635 size_t size; 1636 uchar_t *buffer = NULL; 1637 int ref_cnt; 1638 boolean_t free_state = B_FALSE; 1639 1640 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 1641 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 1642 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 1643 rx_msg_p, nxge_mblks_pending)); 1644 1645 atomic_dec_32(&nxge_mblks_pending); 1646 /* 1647 * First we need to get the free state, then 1648 * atomic decrement the reference count to prevent 1649 * the race condition with the interrupt thread that 1650 * is processing a loaned up buffer block. 1651 */ 1652 free_state = rx_msg_p->free; 1653 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 1654 if (!ref_cnt) { 1655 buffer = rx_msg_p->buffer; 1656 size = rx_msg_p->block_size; 1657 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 1658 "will free: rx_msg_p = $%p (block pending %d)", 1659 rx_msg_p, nxge_mblks_pending)); 1660 1661 if (!rx_msg_p->use_buf_pool) { 1662 KMEM_FREE(buffer, size); 1663 } 1664 1665 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1666 return; 1667 } 1668 1669 /* 1670 * Repost buffer. 1671 */ 1672 if (free_state && (ref_cnt == 1)) { 1673 NXGE_DEBUG_MSG((NULL, RX_CTL, 1674 "nxge_freeb: post page $%p:", rx_msg_p)); 1675 nxge_post_page(rx_msg_p->nxgep, rx_msg_p->rx_rbr_p, 1676 rx_msg_p); 1677 } 1678 1679 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 1680 } 1681 1682 uint_t 1683 nxge_rx_intr(void *arg1, void *arg2) 1684 { 1685 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1686 p_nxge_t nxgep = (p_nxge_t)arg2; 1687 p_nxge_ldg_t ldgp; 1688 uint8_t channel; 1689 npi_handle_t handle; 1690 rx_dma_ctl_stat_t cs; 1691 1692 #ifdef NXGE_DEBUG 1693 rxdma_cfig1_t cfg; 1694 #endif 1695 uint_t serviced = DDI_INTR_UNCLAIMED; 1696 1697 if (ldvp == NULL) { 1698 NXGE_DEBUG_MSG((NULL, INT_CTL, 1699 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 1700 nxgep, ldvp)); 1701 1702 return (DDI_INTR_CLAIMED); 1703 } 1704 1705 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1706 nxgep = ldvp->nxgep; 1707 } 1708 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1709 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 1710 nxgep, ldvp)); 1711 1712 /* 1713 * This interrupt handler is for a specific 1714 * receive dma channel. 1715 */ 1716 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1717 /* 1718 * Get the control and status for this channel. 1719 */ 1720 channel = ldvp->channel; 1721 ldgp = ldvp->ldgp; 1722 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 1723 1724 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 1725 "cs 0x%016llx rcrto 0x%x rcrthres %x", 1726 channel, 1727 cs.value, 1728 cs.bits.hdw.rcrto, 1729 cs.bits.hdw.rcrthres)); 1730 1731 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs); 1732 serviced = DDI_INTR_CLAIMED; 1733 1734 /* error events. */ 1735 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1736 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1737 } 1738 1739 nxge_intr_exit: 1740 1741 1742 /* 1743 * Enable the mailbox update interrupt if we want 1744 * to use mailbox. We probably don't need to use 1745 * mailbox as it only saves us one pio read. 1746 * Also write 1 to rcrthres and rcrto to clear 1747 * these two edge triggered bits. 1748 */ 1749 1750 cs.value &= RX_DMA_CTL_STAT_WR1C; 1751 cs.bits.hdw.mex = 1; 1752 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1753 cs.value); 1754 1755 /* 1756 * Rearm this logical group if this is a single device 1757 * group. 1758 */ 1759 if (ldgp->nldvs == 1) { 1760 ldgimgm_t mgm; 1761 mgm.value = 0; 1762 mgm.bits.ldw.arm = 1; 1763 mgm.bits.ldw.timer = ldgp->ldg_timer; 1764 NXGE_REG_WR64(handle, 1765 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1766 mgm.value); 1767 } 1768 1769 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 1770 serviced)); 1771 return (serviced); 1772 } 1773 1774 /* 1775 * Process the packets received in the specified logical device 1776 * and pass up a chain of message blocks to the upper layer. 1777 */ 1778 static void 1779 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1780 rx_dma_ctl_stat_t cs) 1781 { 1782 p_mblk_t mp; 1783 p_rx_rcr_ring_t rcrp; 1784 1785 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 1786 if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 1787 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1788 "<== nxge_rx_pkts_vring: no mp")); 1789 return; 1790 } 1791 1792 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 1793 mp)); 1794 1795 #ifdef NXGE_DEBUG 1796 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1797 "==> nxge_rx_pkts_vring:calling mac_rx " 1798 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 1799 "mac_handle $%p", 1800 mp->b_wptr - mp->b_rptr, 1801 mp, mp->b_cont, mp->b_next, 1802 rcrp, rcrp->rcr_mac_handle)); 1803 1804 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1805 "==> nxge_rx_pkts_vring: dump packets " 1806 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 1807 mp, 1808 mp->b_rptr, 1809 mp->b_wptr, 1810 nxge_dump_packet((char *)mp->b_rptr, 1811 mp->b_wptr - mp->b_rptr))); 1812 if (mp->b_cont) { 1813 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1814 "==> nxge_rx_pkts_vring: dump b_cont packets " 1815 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 1816 mp->b_cont, 1817 mp->b_cont->b_rptr, 1818 mp->b_cont->b_wptr, 1819 nxge_dump_packet((char *)mp->b_cont->b_rptr, 1820 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 1821 } 1822 if (mp->b_next) { 1823 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1824 "==> nxge_rx_pkts_vring: dump next packets " 1825 "(b_rptr $%p): %s", 1826 mp->b_next->b_rptr, 1827 nxge_dump_packet((char *)mp->b_next->b_rptr, 1828 mp->b_next->b_wptr - mp->b_next->b_rptr))); 1829 } 1830 #endif 1831 1832 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 1833 } 1834 1835 1836 /* 1837 * This routine is the main packet receive processing function. 1838 * It gets the packet type, error code, and buffer related 1839 * information from the receive completion entry. 1840 * How many completion entries to process is based on the number of packets 1841 * queued by the hardware, a hardware maintained tail pointer 1842 * and a configurable receive packet count. 1843 * 1844 * A chain of message blocks will be created as result of processing 1845 * the completion entries. This chain of message blocks will be returned and 1846 * a hardware control status register will be updated with the number of 1847 * packets were removed from the hardware queue. 1848 * 1849 */ 1850 mblk_t * 1851 nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 1852 p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs) 1853 { 1854 npi_handle_t handle; 1855 uint8_t channel; 1856 p_rx_rcr_rings_t rx_rcr_rings; 1857 p_rx_rcr_ring_t rcr_p; 1858 uint32_t comp_rd_index; 1859 p_rcr_entry_t rcr_desc_rd_head_p; 1860 p_rcr_entry_t rcr_desc_rd_head_pp; 1861 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 1862 uint16_t qlen, nrcr_read, npkt_read; 1863 uint32_t qlen_hw; 1864 boolean_t multi; 1865 rcrcfig_b_t rcr_cfg_b; 1866 #if defined(_BIG_ENDIAN) 1867 npi_status_t rs = NPI_SUCCESS; 1868 #endif 1869 1870 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d " 1871 "channel %d", vindex, ldvp->channel)); 1872 1873 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1874 return (NULL); 1875 } 1876 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1877 rx_rcr_rings = nxgep->rx_rcr_rings; 1878 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 1879 channel = rcr_p->rdc; 1880 if (channel != ldvp->channel) { 1881 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1882 "channel %d, and rcr channel %d not matched.", 1883 vindex, ldvp->channel, channel)); 1884 return (NULL); 1885 } 1886 1887 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1888 "==> nxge_rx_pkts: START: rcr channel %d " 1889 "head_p $%p head_pp $%p index %d ", 1890 channel, rcr_p->rcr_desc_rd_head_p, 1891 rcr_p->rcr_desc_rd_head_pp, 1892 rcr_p->comp_rd_index)); 1893 1894 1895 #if !defined(_BIG_ENDIAN) 1896 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 1897 #else 1898 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 1899 if (rs != NPI_SUCCESS) { 1900 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 1901 "channel %d, get qlen failed 0x%08x", 1902 vindex, ldvp->channel, rs)); 1903 return (NULL); 1904 } 1905 #endif 1906 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 1907 "qlen %d", channel, qlen)); 1908 1909 1910 1911 if (!qlen) { 1912 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1913 "==> nxge_rx_pkts:rcr channel %d " 1914 "qlen %d (no pkts)", channel, qlen)); 1915 1916 return (NULL); 1917 } 1918 1919 comp_rd_index = rcr_p->comp_rd_index; 1920 1921 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 1922 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 1923 nrcr_read = npkt_read = 0; 1924 1925 /* 1926 * Number of packets queued 1927 * (The jumbo or multi packet will be counted as only one 1928 * packets and it may take up more than one completion entry). 1929 */ 1930 qlen_hw = (qlen < nxge_max_rx_pkts) ? 1931 qlen : nxge_max_rx_pkts; 1932 head_mp = NULL; 1933 tail_mp = &head_mp; 1934 nmp = mp_cont = NULL; 1935 multi = B_FALSE; 1936 1937 while (qlen_hw) { 1938 1939 #ifdef NXGE_DEBUG 1940 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 1941 #endif 1942 /* 1943 * Process one completion ring entry. 1944 */ 1945 nxge_receive_packet(nxgep, 1946 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 1947 1948 /* 1949 * message chaining modes 1950 */ 1951 if (nmp) { 1952 nmp->b_next = NULL; 1953 if (!multi && !mp_cont) { /* frame fits a partition */ 1954 *tail_mp = nmp; 1955 tail_mp = &nmp->b_next; 1956 nmp = NULL; 1957 } else if (multi && !mp_cont) { /* first segment */ 1958 *tail_mp = nmp; 1959 tail_mp = &nmp->b_cont; 1960 } else if (multi && mp_cont) { /* mid of multi segs */ 1961 *tail_mp = mp_cont; 1962 tail_mp = &mp_cont->b_cont; 1963 } else if (!multi && mp_cont) { /* last segment */ 1964 *tail_mp = mp_cont; 1965 tail_mp = &nmp->b_next; 1966 nmp = NULL; 1967 } 1968 } 1969 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1970 "==> nxge_rx_pkts: loop: rcr channel %d " 1971 "before updating: multi %d " 1972 "nrcr_read %d " 1973 "npk read %d " 1974 "head_pp $%p index %d ", 1975 channel, 1976 multi, 1977 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 1978 comp_rd_index)); 1979 1980 if (!multi) { 1981 qlen_hw--; 1982 npkt_read++; 1983 } 1984 1985 /* 1986 * Update the next read entry. 1987 */ 1988 comp_rd_index = NEXT_ENTRY(comp_rd_index, 1989 rcr_p->comp_wrap_mask); 1990 1991 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 1992 rcr_p->rcr_desc_first_p, 1993 rcr_p->rcr_desc_last_p); 1994 1995 nrcr_read++; 1996 1997 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1998 "<== nxge_rx_pkts: (SAM, process one packet) " 1999 "nrcr_read %d", 2000 nrcr_read)); 2001 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2002 "==> nxge_rx_pkts: loop: rcr channel %d " 2003 "multi %d " 2004 "nrcr_read %d " 2005 "npk read %d " 2006 "head_pp $%p index %d ", 2007 channel, 2008 multi, 2009 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 2010 comp_rd_index)); 2011 2012 } 2013 2014 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 2015 rcr_p->comp_rd_index = comp_rd_index; 2016 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 2017 2018 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 2019 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2020 rcr_p->intr_timeout = nxgep->intr_timeout; 2021 rcr_p->intr_threshold = nxgep->intr_threshold; 2022 rcr_cfg_b.value = 0x0ULL; 2023 if (rcr_p->intr_timeout) 2024 rcr_cfg_b.bits.ldw.entout = 1; 2025 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 2026 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2027 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 2028 channel, rcr_cfg_b.value); 2029 } 2030 2031 cs.bits.ldw.pktread = npkt_read; 2032 cs.bits.ldw.ptrread = nrcr_read; 2033 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 2034 channel, cs.value); 2035 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2036 "==> nxge_rx_pkts: EXIT: rcr channel %d " 2037 "head_pp $%p index %016llx ", 2038 channel, 2039 rcr_p->rcr_desc_rd_head_pp, 2040 rcr_p->comp_rd_index)); 2041 /* 2042 * Update RCR buffer pointer read and number of packets 2043 * read. 2044 */ 2045 2046 *rcrp = rcr_p; 2047 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 2048 return (head_mp); 2049 } 2050 2051 void 2052 nxge_receive_packet(p_nxge_t nxgep, 2053 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 2054 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 2055 { 2056 p_mblk_t nmp = NULL; 2057 uint64_t multi; 2058 uint64_t dcf_err; 2059 uint8_t channel; 2060 2061 boolean_t first_entry = B_TRUE; 2062 boolean_t is_tcp_udp = B_FALSE; 2063 boolean_t buffer_free = B_FALSE; 2064 boolean_t error_send_up = B_FALSE; 2065 uint8_t error_type; 2066 uint16_t l2_len; 2067 uint16_t skip_len; 2068 uint8_t pktbufsz_type; 2069 uint16_t pktbufsz; 2070 uint64_t rcr_entry; 2071 uint64_t *pkt_buf_addr_pp; 2072 uint64_t *pkt_buf_addr_p; 2073 uint32_t buf_offset; 2074 uint32_t bsize; 2075 uint32_t error_disp_cnt; 2076 uint32_t msg_index; 2077 p_rx_rbr_ring_t rx_rbr_p; 2078 p_rx_msg_t *rx_msg_ring_p; 2079 p_rx_msg_t rx_msg_p; 2080 uint16_t sw_offset_bytes = 0, hdr_size = 0; 2081 nxge_status_t status = NXGE_OK; 2082 boolean_t is_valid = B_FALSE; 2083 p_nxge_rx_ring_stats_t rdc_stats; 2084 uint32_t bytes_read; 2085 uint64_t pkt_type; 2086 uint64_t frag; 2087 #ifdef NXGE_DEBUG 2088 int dump_len; 2089 #endif 2090 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 2091 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 2092 2093 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 2094 2095 multi = (rcr_entry & RCR_MULTI_MASK); 2096 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 2097 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 2098 2099 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 2100 frag = (rcr_entry & RCR_FRAG_MASK); 2101 2102 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 2103 2104 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 2105 RCR_PKTBUFSZ_SHIFT); 2106 2107 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 2108 RCR_PKT_BUF_ADDR_SHIFT); 2109 2110 channel = rcr_p->rdc; 2111 2112 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2113 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2114 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2115 "error_type 0x%x pkt_type 0x%x " 2116 "pktbufsz_type %d ", 2117 rcr_desc_rd_head_p, 2118 rcr_entry, pkt_buf_addr_pp, l2_len, 2119 multi, 2120 error_type, 2121 pkt_type, 2122 pktbufsz_type)); 2123 2124 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2125 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 2126 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 2127 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 2128 rcr_entry, pkt_buf_addr_pp, l2_len, 2129 multi, 2130 error_type, 2131 pkt_type)); 2132 2133 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2134 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2135 "full pkt_buf_addr_pp $%p l2_len %d", 2136 rcr_entry, pkt_buf_addr_pp, l2_len)); 2137 2138 /* get the stats ptr */ 2139 rdc_stats = rcr_p->rdc_stats; 2140 2141 if (!l2_len) { 2142 2143 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2144 "<== nxge_receive_packet: failed: l2 length is 0.")); 2145 return; 2146 } 2147 2148 /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 2149 l2_len -= ETHERFCSL; 2150 2151 /* shift 6 bits to get the full io address */ 2152 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 2153 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2154 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2155 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 2156 "full pkt_buf_addr_pp $%p l2_len %d", 2157 rcr_entry, pkt_buf_addr_pp, l2_len)); 2158 2159 rx_rbr_p = rcr_p->rx_rbr_p; 2160 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 2161 2162 if (first_entry) { 2163 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 2164 RXDMA_HDR_SIZE_DEFAULT); 2165 2166 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2167 "==> nxge_receive_packet: first entry 0x%016llx " 2168 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 2169 rcr_entry, pkt_buf_addr_pp, l2_len, 2170 hdr_size)); 2171 } 2172 2173 MUTEX_ENTER(&rcr_p->lock); 2174 MUTEX_ENTER(&rx_rbr_p->lock); 2175 2176 bytes_read = rcr_p->rcvd_pkt_bytes; 2177 2178 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2179 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 2180 "full pkt_buf_addr_pp $%p l2_len %d", 2181 rcr_entry, pkt_buf_addr_pp, l2_len)); 2182 2183 /* 2184 * Packet buffer address in the completion entry points 2185 * to the starting buffer address (offset 0). 2186 * Use the starting buffer address to locate the corresponding 2187 * kernel address. 2188 */ 2189 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 2190 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 2191 &buf_offset, 2192 &msg_index); 2193 2194 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2195 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 2196 "full pkt_buf_addr_pp $%p l2_len %d", 2197 rcr_entry, pkt_buf_addr_pp, l2_len)); 2198 2199 if (status != NXGE_OK) { 2200 MUTEX_EXIT(&rx_rbr_p->lock); 2201 MUTEX_EXIT(&rcr_p->lock); 2202 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2203 "<== nxge_receive_packet: found vaddr failed %d", 2204 status)); 2205 return; 2206 } 2207 2208 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2209 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 2210 "full pkt_buf_addr_pp $%p l2_len %d", 2211 rcr_entry, pkt_buf_addr_pp, l2_len)); 2212 2213 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2214 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2215 "full pkt_buf_addr_pp $%p l2_len %d", 2216 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2217 2218 rx_msg_p = rx_msg_ring_p[msg_index]; 2219 2220 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2221 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 2222 "full pkt_buf_addr_pp $%p l2_len %d", 2223 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 2224 2225 switch (pktbufsz_type) { 2226 case RCR_PKTBUFSZ_0: 2227 bsize = rx_rbr_p->pkt_buf_size0_bytes; 2228 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2229 "==> nxge_receive_packet: 0 buf %d", bsize)); 2230 break; 2231 case RCR_PKTBUFSZ_1: 2232 bsize = rx_rbr_p->pkt_buf_size1_bytes; 2233 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2234 "==> nxge_receive_packet: 1 buf %d", bsize)); 2235 break; 2236 case RCR_PKTBUFSZ_2: 2237 bsize = rx_rbr_p->pkt_buf_size2_bytes; 2238 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2239 "==> nxge_receive_packet: 2 buf %d", bsize)); 2240 break; 2241 case RCR_SINGLE_BLOCK: 2242 bsize = rx_msg_p->block_size; 2243 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2244 "==> nxge_receive_packet: single %d", bsize)); 2245 2246 break; 2247 default: 2248 MUTEX_EXIT(&rx_rbr_p->lock); 2249 MUTEX_EXIT(&rcr_p->lock); 2250 return; 2251 } 2252 2253 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 2254 (buf_offset + sw_offset_bytes), 2255 (hdr_size + l2_len), 2256 DDI_DMA_SYNC_FORCPU); 2257 2258 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2259 "==> nxge_receive_packet: after first dump:usage count")); 2260 2261 if (rx_msg_p->cur_usage_cnt == 0) { 2262 if (rx_rbr_p->rbr_use_bcopy) { 2263 atomic_inc_32(&rx_rbr_p->rbr_consumed); 2264 if (rx_rbr_p->rbr_consumed < 2265 rx_rbr_p->rbr_threshold_hi) { 2266 if (rx_rbr_p->rbr_threshold_lo == 0 || 2267 ((rx_rbr_p->rbr_consumed >= 2268 rx_rbr_p->rbr_threshold_lo) && 2269 (rx_rbr_p->rbr_bufsize_type >= 2270 pktbufsz_type))) { 2271 rx_msg_p->rx_use_bcopy = B_TRUE; 2272 } 2273 } else { 2274 rx_msg_p->rx_use_bcopy = B_TRUE; 2275 } 2276 } 2277 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2278 "==> nxge_receive_packet: buf %d (new block) ", 2279 bsize)); 2280 2281 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 2282 rx_msg_p->pkt_buf_size = bsize; 2283 rx_msg_p->cur_usage_cnt = 1; 2284 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 2285 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2286 "==> nxge_receive_packet: buf %d " 2287 "(single block) ", 2288 bsize)); 2289 /* 2290 * Buffer can be reused once the free function 2291 * is called. 2292 */ 2293 rx_msg_p->max_usage_cnt = 1; 2294 buffer_free = B_TRUE; 2295 } else { 2296 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 2297 if (rx_msg_p->max_usage_cnt == 1) { 2298 buffer_free = B_TRUE; 2299 } 2300 } 2301 } else { 2302 rx_msg_p->cur_usage_cnt++; 2303 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 2304 buffer_free = B_TRUE; 2305 } 2306 } 2307 2308 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2309 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 2310 msg_index, l2_len, 2311 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 2312 2313 if ((error_type) || (dcf_err)) { 2314 rdc_stats->ierrors++; 2315 if (dcf_err) { 2316 rdc_stats->dcf_err++; 2317 #ifdef NXGE_DEBUG 2318 if (!rdc_stats->dcf_err) { 2319 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2320 "nxge_receive_packet: channel %d dcf_err rcr" 2321 " 0x%llx", channel, rcr_entry)); 2322 } 2323 #endif 2324 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2325 NXGE_FM_EREPORT_RDMC_DCF_ERR); 2326 } else { 2327 /* Update error stats */ 2328 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2329 rdc_stats->errlog.compl_err_type = error_type; 2330 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 2331 NXGE_FM_EREPORT_RDMC_COMPLETION_ERR); 2332 2333 switch (error_type) { 2334 case RCR_L2_ERROR: 2335 rdc_stats->l2_err++; 2336 if (rdc_stats->l2_err < 2337 error_disp_cnt) 2338 NXGE_ERROR_MSG((nxgep, 2339 NXGE_ERR_CTL, 2340 " nxge_receive_packet:" 2341 " channel %d RCR L2_ERROR", 2342 channel)); 2343 break; 2344 case RCR_L4_CSUM_ERROR: 2345 error_send_up = B_TRUE; 2346 rdc_stats->l4_cksum_err++; 2347 if (rdc_stats->l4_cksum_err < 2348 error_disp_cnt) 2349 NXGE_ERROR_MSG((nxgep, 2350 NXGE_ERR_CTL, 2351 " nxge_receive_packet:" 2352 " channel %d" 2353 " RCR L4_CSUM_ERROR", 2354 channel)); 2355 break; 2356 case RCR_FFLP_SOFT_ERROR: 2357 error_send_up = B_TRUE; 2358 rdc_stats->fflp_soft_err++; 2359 if (rdc_stats->fflp_soft_err < 2360 error_disp_cnt) 2361 NXGE_ERROR_MSG((nxgep, 2362 NXGE_ERR_CTL, 2363 " nxge_receive_packet:" 2364 " channel %d" 2365 " RCR FFLP_SOFT_ERROR", 2366 channel)); 2367 break; 2368 case RCR_ZCP_SOFT_ERROR: 2369 error_send_up = B_TRUE; 2370 rdc_stats->fflp_soft_err++; 2371 if (rdc_stats->zcp_soft_err < 2372 error_disp_cnt) 2373 NXGE_ERROR_MSG((nxgep, 2374 NXGE_ERR_CTL, 2375 " nxge_receive_packet:" 2376 " Channel %d" 2377 " RCR ZCP_SOFT_ERROR", 2378 channel)); 2379 break; 2380 default: 2381 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2382 " nxge_receive_packet:" 2383 " Channel %d" 2384 " RCR entry 0x%llx" 2385 " error 0x%x", 2386 rcr_entry, channel, 2387 error_type)); 2388 break; 2389 } 2390 } 2391 2392 /* 2393 * Update and repost buffer block if max usage 2394 * count is reached. 2395 */ 2396 if (error_send_up == B_FALSE) { 2397 atomic_inc_32(&rx_msg_p->ref_cnt); 2398 atomic_inc_32(&nxge_mblks_pending); 2399 if (buffer_free == B_TRUE) { 2400 rx_msg_p->free = B_TRUE; 2401 } 2402 2403 MUTEX_EXIT(&rx_rbr_p->lock); 2404 MUTEX_EXIT(&rcr_p->lock); 2405 nxge_freeb(rx_msg_p); 2406 return; 2407 } 2408 } 2409 2410 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2411 "==> nxge_receive_packet: DMA sync second ")); 2412 2413 skip_len = sw_offset_bytes + hdr_size; 2414 if (!rx_msg_p->rx_use_bcopy) { 2415 /* 2416 * For loaned up buffers, the driver reference count 2417 * will be incremented first and then the free state. 2418 */ 2419 nmp = nxge_dupb(rx_msg_p, buf_offset, bsize); 2420 } else { 2421 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, l2_len); 2422 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2423 "==> nxge_receive_packet: use bcopy " 2424 "rbr consumed %d " 2425 "pktbufsz_type %d " 2426 "offset %d " 2427 "hdr_size %d l2_len %d " 2428 "nmp->b_rptr $%p", 2429 rx_rbr_p->rbr_consumed, 2430 pktbufsz_type, 2431 buf_offset, hdr_size, l2_len, 2432 nmp->b_rptr)); 2433 } 2434 if (nmp != NULL) { 2435 pktbufsz = nxge_get_pktbuf_size(nxgep, pktbufsz_type, 2436 rx_rbr_p->rbr_cfgb); 2437 if (!rx_msg_p->rx_use_bcopy) { 2438 if (first_entry) { 2439 bytes_read = 0; 2440 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2441 if (l2_len > pktbufsz - skip_len) 2442 nmp->b_wptr = &nmp->b_rptr[pktbufsz 2443 - skip_len]; 2444 else 2445 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2446 } else { 2447 if (l2_len - bytes_read > pktbufsz) 2448 nmp->b_wptr = &nmp->b_rptr[pktbufsz]; 2449 else 2450 nmp->b_wptr = 2451 &nmp->b_rptr[l2_len - bytes_read]; 2452 } 2453 bytes_read += nmp->b_wptr - nmp->b_rptr; 2454 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2455 "==> nxge_receive_packet after dupb: " 2456 "rbr consumed %d " 2457 "pktbufsz_type %d " 2458 "nmp $%p rptr $%p wptr $%p " 2459 "buf_offset %d bzise %d l2_len %d skip_len %d", 2460 rx_rbr_p->rbr_consumed, 2461 pktbufsz_type, 2462 nmp, nmp->b_rptr, nmp->b_wptr, 2463 buf_offset, bsize, l2_len, skip_len)); 2464 } 2465 } else { 2466 cmn_err(CE_WARN, "!nxge_receive_packet: " 2467 "update stats (error)"); 2468 atomic_inc_32(&rx_msg_p->ref_cnt); 2469 atomic_inc_32(&nxge_mblks_pending); 2470 if (buffer_free == B_TRUE) { 2471 rx_msg_p->free = B_TRUE; 2472 } 2473 MUTEX_EXIT(&rx_rbr_p->lock); 2474 MUTEX_EXIT(&rcr_p->lock); 2475 nxge_freeb(rx_msg_p); 2476 return; 2477 } 2478 2479 rcr_p->rcvd_pkt_bytes = bytes_read; 2480 2481 if (buffer_free == B_TRUE) { 2482 rx_msg_p->free = B_TRUE; 2483 } 2484 2485 /* 2486 * ERROR, FRAG and PKT_TYPE are only reported 2487 * in the first entry. 2488 * If a packet is not fragmented and no error bit is set, then 2489 * L4 checksum is OK. 2490 */ 2491 is_valid = (nmp != NULL); 2492 rdc_stats->ibytes += l2_len; 2493 rdc_stats->ipackets++; 2494 MUTEX_EXIT(&rx_rbr_p->lock); 2495 MUTEX_EXIT(&rcr_p->lock); 2496 2497 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 2498 atomic_inc_32(&rx_msg_p->ref_cnt); 2499 atomic_inc_32(&nxge_mblks_pending); 2500 nxge_freeb(rx_msg_p); 2501 } 2502 2503 if (is_valid) { 2504 nmp->b_cont = NULL; 2505 if (first_entry) { 2506 *mp = nmp; 2507 *mp_cont = NULL; 2508 } else 2509 *mp_cont = nmp; 2510 } 2511 2512 /* 2513 * Update stats and hardware checksuming. 2514 */ 2515 if (is_valid && !multi) { 2516 2517 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 2518 pkt_type == RCR_PKT_IS_UDP) ? 2519 B_TRUE: B_FALSE); 2520 2521 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 2522 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 2523 is_valid, multi, is_tcp_udp, frag, error_type)); 2524 2525 if (is_tcp_udp && !frag && !error_type) { 2526 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 2527 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 2528 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2529 "==> nxge_receive_packet: Full tcp/udp cksum " 2530 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 2531 "error %d", 2532 is_valid, multi, is_tcp_udp, frag, error_type)); 2533 } 2534 } 2535 2536 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 2537 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 2538 2539 *multi_p = (multi == RCR_MULTI_MASK); 2540 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 2541 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 2542 *multi_p, nmp, *mp, *mp_cont)); 2543 } 2544 2545 /*ARGSUSED*/ 2546 static nxge_status_t 2547 nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, 2548 rx_dma_ctl_stat_t cs) 2549 { 2550 p_nxge_rx_ring_stats_t rdc_stats; 2551 npi_handle_t handle; 2552 npi_status_t rs; 2553 boolean_t rxchan_fatal = B_FALSE; 2554 boolean_t rxport_fatal = B_FALSE; 2555 uint8_t channel; 2556 uint8_t portn; 2557 nxge_status_t status = NXGE_OK; 2558 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 2559 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 2560 2561 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2562 channel = ldvp->channel; 2563 portn = nxgep->mac.portnum; 2564 rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index]; 2565 2566 if (cs.bits.hdw.rbr_tmout) { 2567 rdc_stats->rx_rbr_tmout++; 2568 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2569 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 2570 rxchan_fatal = B_TRUE; 2571 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2572 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 2573 } 2574 if (cs.bits.hdw.rsp_cnt_err) { 2575 rdc_stats->rsp_cnt_err++; 2576 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2577 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 2578 rxchan_fatal = B_TRUE; 2579 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2580 "==> nxge_rx_err_evnts(channel %d): " 2581 "rsp_cnt_err", channel)); 2582 } 2583 if (cs.bits.hdw.byte_en_bus) { 2584 rdc_stats->byte_en_bus++; 2585 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2586 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 2587 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2588 "==> nxge_rx_err_evnts(channel %d): " 2589 "fatal error: byte_en_bus", channel)); 2590 rxchan_fatal = B_TRUE; 2591 } 2592 if (cs.bits.hdw.rsp_dat_err) { 2593 rdc_stats->rsp_dat_err++; 2594 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2595 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 2596 rxchan_fatal = B_TRUE; 2597 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2598 "==> nxge_rx_err_evnts(channel %d): " 2599 "fatal error: rsp_dat_err", channel)); 2600 } 2601 if (cs.bits.hdw.rcr_ack_err) { 2602 rdc_stats->rcr_ack_err++; 2603 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2604 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 2605 rxchan_fatal = B_TRUE; 2606 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2607 "==> nxge_rx_err_evnts(channel %d): " 2608 "fatal error: rcr_ack_err", channel)); 2609 } 2610 if (cs.bits.hdw.dc_fifo_err) { 2611 rdc_stats->dc_fifo_err++; 2612 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2613 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 2614 /* This is not a fatal error! */ 2615 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2616 "==> nxge_rx_err_evnts(channel %d): " 2617 "dc_fifo_err", channel)); 2618 rxport_fatal = B_TRUE; 2619 } 2620 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 2621 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 2622 &rdc_stats->errlog.pre_par, 2623 &rdc_stats->errlog.sha_par)) 2624 != NPI_SUCCESS) { 2625 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2626 "==> nxge_rx_err_evnts(channel %d): " 2627 "rcr_sha_par: get perr", channel)); 2628 return (NXGE_ERROR | rs); 2629 } 2630 if (cs.bits.hdw.rcr_sha_par) { 2631 rdc_stats->rcr_sha_par++; 2632 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2633 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 2634 rxchan_fatal = B_TRUE; 2635 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2636 "==> nxge_rx_err_evnts(channel %d): " 2637 "fatal error: rcr_sha_par", channel)); 2638 } 2639 if (cs.bits.hdw.rbr_pre_par) { 2640 rdc_stats->rbr_pre_par++; 2641 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2642 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 2643 rxchan_fatal = B_TRUE; 2644 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2645 "==> nxge_rx_err_evnts(channel %d): " 2646 "fatal error: rbr_pre_par", channel)); 2647 } 2648 } 2649 if (cs.bits.hdw.port_drop_pkt) { 2650 rdc_stats->port_drop_pkt++; 2651 if (rdc_stats->port_drop_pkt < error_disp_cnt) 2652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2653 "==> nxge_rx_err_evnts (channel %d): " 2654 "port_drop_pkt", channel)); 2655 } 2656 if (cs.bits.hdw.wred_drop) { 2657 rdc_stats->wred_drop++; 2658 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2659 "==> nxge_rx_err_evnts(channel %d): " 2660 "wred_drop", channel)); 2661 } 2662 if (cs.bits.hdw.rbr_pre_empty) { 2663 rdc_stats->rbr_pre_empty++; 2664 if (rdc_stats->rbr_pre_empty < error_disp_cnt) 2665 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2666 "==> nxge_rx_err_evnts(channel %d): " 2667 "rbr_pre_empty", channel)); 2668 } 2669 if (cs.bits.hdw.rcr_shadow_full) { 2670 rdc_stats->rcr_shadow_full++; 2671 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2672 "==> nxge_rx_err_evnts(channel %d): " 2673 "rcr_shadow_full", channel)); 2674 } 2675 if (cs.bits.hdw.config_err) { 2676 rdc_stats->config_err++; 2677 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2678 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 2679 rxchan_fatal = B_TRUE; 2680 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2681 "==> nxge_rx_err_evnts(channel %d): " 2682 "config error", channel)); 2683 } 2684 if (cs.bits.hdw.rcrincon) { 2685 rdc_stats->rcrincon++; 2686 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2687 NXGE_FM_EREPORT_RDMC_RCRINCON); 2688 rxchan_fatal = B_TRUE; 2689 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2690 "==> nxge_rx_err_evnts(channel %d): " 2691 "fatal error: rcrincon error", channel)); 2692 } 2693 if (cs.bits.hdw.rcrfull) { 2694 rdc_stats->rcrfull++; 2695 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2696 NXGE_FM_EREPORT_RDMC_RCRFULL); 2697 rxchan_fatal = B_TRUE; 2698 if (rdc_stats->rcrfull < error_disp_cnt) 2699 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2700 "==> nxge_rx_err_evnts(channel %d): " 2701 "fatal error: rcrfull error", channel)); 2702 } 2703 if (cs.bits.hdw.rbr_empty) { 2704 rdc_stats->rbr_empty++; 2705 if (rdc_stats->rbr_empty < error_disp_cnt) 2706 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2707 "==> nxge_rx_err_evnts(channel %d): " 2708 "rbr empty error", channel)); 2709 } 2710 if (cs.bits.hdw.rbrfull) { 2711 rdc_stats->rbrfull++; 2712 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2713 NXGE_FM_EREPORT_RDMC_RBRFULL); 2714 rxchan_fatal = B_TRUE; 2715 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2716 "==> nxge_rx_err_evnts(channel %d): " 2717 "fatal error: rbr_full error", channel)); 2718 } 2719 if (cs.bits.hdw.rbrlogpage) { 2720 rdc_stats->rbrlogpage++; 2721 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2722 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 2723 rxchan_fatal = B_TRUE; 2724 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2725 "==> nxge_rx_err_evnts(channel %d): " 2726 "fatal error: rbr logical page error", channel)); 2727 } 2728 if (cs.bits.hdw.cfiglogpage) { 2729 rdc_stats->cfiglogpage++; 2730 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 2731 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 2732 rxchan_fatal = B_TRUE; 2733 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2734 "==> nxge_rx_err_evnts(channel %d): " 2735 "fatal error: cfig logical page error", channel)); 2736 } 2737 2738 if (rxport_fatal) { 2739 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2740 " nxge_rx_err_evnts: " 2741 " fatal error on Port #%d\n", 2742 portn)); 2743 status = nxge_ipp_fatal_err_recover(nxgep); 2744 if (status == NXGE_OK) { 2745 FM_SERVICE_RESTORED(nxgep); 2746 } 2747 } 2748 2749 if (rxchan_fatal) { 2750 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2751 " nxge_rx_err_evnts: " 2752 " fatal error on Channel #%d\n", 2753 channel)); 2754 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 2755 if (status == NXGE_OK) { 2756 FM_SERVICE_RESTORED(nxgep); 2757 } 2758 } 2759 2760 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 2761 2762 return (status); 2763 } 2764 2765 static nxge_status_t 2766 nxge_map_rxdma(p_nxge_t nxgep) 2767 { 2768 int i, ndmas; 2769 uint16_t channel; 2770 p_rx_rbr_rings_t rx_rbr_rings; 2771 p_rx_rbr_ring_t *rbr_rings; 2772 p_rx_rcr_rings_t rx_rcr_rings; 2773 p_rx_rcr_ring_t *rcr_rings; 2774 p_rx_mbox_areas_t rx_mbox_areas_p; 2775 p_rx_mbox_t *rx_mbox_p; 2776 p_nxge_dma_pool_t dma_buf_poolp; 2777 p_nxge_dma_pool_t dma_cntl_poolp; 2778 p_nxge_dma_common_t *dma_buf_p; 2779 p_nxge_dma_common_t *dma_cntl_p; 2780 uint32_t *num_chunks; 2781 nxge_status_t status = NXGE_OK; 2782 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2783 p_nxge_dma_common_t t_dma_buf_p; 2784 p_nxge_dma_common_t t_dma_cntl_p; 2785 #endif 2786 2787 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 2788 2789 dma_buf_poolp = nxgep->rx_buf_pool_p; 2790 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2791 2792 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2793 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2794 "<== nxge_map_rxdma: buf not allocated")); 2795 return (NXGE_ERROR); 2796 } 2797 2798 ndmas = dma_buf_poolp->ndmas; 2799 if (!ndmas) { 2800 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2801 "<== nxge_map_rxdma: no dma allocated")); 2802 return (NXGE_ERROR); 2803 } 2804 2805 num_chunks = dma_buf_poolp->num_chunks; 2806 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2807 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 2808 2809 rx_rbr_rings = (p_rx_rbr_rings_t) 2810 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 2811 rbr_rings = (p_rx_rbr_ring_t *) 2812 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 2813 rx_rcr_rings = (p_rx_rcr_rings_t) 2814 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 2815 rcr_rings = (p_rx_rcr_ring_t *) 2816 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 2817 rx_mbox_areas_p = (p_rx_mbox_areas_t) 2818 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 2819 rx_mbox_p = (p_rx_mbox_t *) 2820 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 2821 2822 /* 2823 * Timeout should be set based on the system clock divider. 2824 * The following timeout value of 1 assumes that the 2825 * granularity (1000) is 3 microseconds running at 300MHz. 2826 */ 2827 2828 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 2829 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 2830 2831 /* 2832 * Map descriptors from the buffer polls for each dam channel. 2833 */ 2834 for (i = 0; i < ndmas; i++) { 2835 /* 2836 * Set up and prepare buffer blocks, descriptors 2837 * and mailbox. 2838 */ 2839 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2840 status = nxge_map_rxdma_channel(nxgep, channel, 2841 (p_nxge_dma_common_t *)&dma_buf_p[i], 2842 (p_rx_rbr_ring_t *)&rbr_rings[i], 2843 num_chunks[i], 2844 (p_nxge_dma_common_t *)&dma_cntl_p[i], 2845 (p_rx_rcr_ring_t *)&rcr_rings[i], 2846 (p_rx_mbox_t *)&rx_mbox_p[i]); 2847 if (status != NXGE_OK) { 2848 goto nxge_map_rxdma_fail1; 2849 } 2850 rbr_rings[i]->index = (uint16_t)i; 2851 rcr_rings[i]->index = (uint16_t)i; 2852 rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i]; 2853 2854 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2855 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 2856 rbr_rings[i]->hv_set = B_FALSE; 2857 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 2858 t_dma_cntl_p = 2859 (p_nxge_dma_common_t)dma_cntl_p[i]; 2860 2861 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp = 2862 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 2863 rbr_rings[i]->hv_rx_buf_ioaddr_size = 2864 (uint64_t)t_dma_buf_p->orig_alength; 2865 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2866 "==> nxge_map_rxdma_channel: " 2867 "channel %d " 2868 "data buf base io $%p ($%p) " 2869 "size 0x%llx (%d 0x%x)", 2870 channel, 2871 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp, 2872 t_dma_cntl_p->ioaddr_pp, 2873 rbr_rings[i]->hv_rx_buf_ioaddr_size, 2874 t_dma_buf_p->orig_alength, 2875 t_dma_buf_p->orig_alength)); 2876 2877 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp = 2878 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 2879 rbr_rings[i]->hv_rx_cntl_ioaddr_size = 2880 (uint64_t)t_dma_cntl_p->orig_alength; 2881 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2882 "==> nxge_map_rxdma_channel: " 2883 "channel %d " 2884 "cntl base io $%p ($%p) " 2885 "size 0x%llx (%d 0x%x)", 2886 channel, 2887 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp, 2888 t_dma_cntl_p->ioaddr_pp, 2889 rbr_rings[i]->hv_rx_cntl_ioaddr_size, 2890 t_dma_cntl_p->orig_alength, 2891 t_dma_cntl_p->orig_alength)); 2892 } 2893 2894 #endif /* sun4v and NIU_LP_WORKAROUND */ 2895 } 2896 2897 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 2898 rx_rbr_rings->rbr_rings = rbr_rings; 2899 nxgep->rx_rbr_rings = rx_rbr_rings; 2900 rx_rcr_rings->rcr_rings = rcr_rings; 2901 nxgep->rx_rcr_rings = rx_rcr_rings; 2902 2903 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 2904 nxgep->rx_mbox_areas_p = rx_mbox_areas_p; 2905 2906 goto nxge_map_rxdma_exit; 2907 2908 nxge_map_rxdma_fail1: 2909 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2910 "==> nxge_map_rxdma: unmap rbr,rcr " 2911 "(status 0x%x channel %d i %d)", 2912 status, channel, i)); 2913 i--; 2914 for (; i >= 0; i--) { 2915 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2916 nxge_unmap_rxdma_channel(nxgep, channel, 2917 rbr_rings[i], 2918 rcr_rings[i], 2919 rx_mbox_p[i]); 2920 } 2921 2922 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2923 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2924 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 2925 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 2926 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 2927 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 2928 2929 nxge_map_rxdma_exit: 2930 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2931 "<== nxge_map_rxdma: " 2932 "(status 0x%x channel %d)", 2933 status, channel)); 2934 2935 return (status); 2936 } 2937 2938 static void 2939 nxge_unmap_rxdma(p_nxge_t nxgep) 2940 { 2941 int i, ndmas; 2942 uint16_t channel; 2943 p_rx_rbr_rings_t rx_rbr_rings; 2944 p_rx_rbr_ring_t *rbr_rings; 2945 p_rx_rcr_rings_t rx_rcr_rings; 2946 p_rx_rcr_ring_t *rcr_rings; 2947 p_rx_mbox_areas_t rx_mbox_areas_p; 2948 p_rx_mbox_t *rx_mbox_p; 2949 p_nxge_dma_pool_t dma_buf_poolp; 2950 p_nxge_dma_pool_t dma_cntl_poolp; 2951 p_nxge_dma_common_t *dma_buf_p; 2952 2953 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma")); 2954 2955 dma_buf_poolp = nxgep->rx_buf_pool_p; 2956 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 2957 2958 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 2959 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2960 "<== nxge_unmap_rxdma: NULL buf pointers")); 2961 return; 2962 } 2963 2964 rx_rbr_rings = nxgep->rx_rbr_rings; 2965 rx_rcr_rings = nxgep->rx_rcr_rings; 2966 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 2967 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2968 "<== nxge_unmap_rxdma: NULL ring pointers")); 2969 return; 2970 } 2971 ndmas = rx_rbr_rings->ndmas; 2972 if (!ndmas) { 2973 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2974 "<== nxge_unmap_rxdma: no channel")); 2975 return; 2976 } 2977 2978 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2979 "==> nxge_unmap_rxdma (ndmas %d)", ndmas)); 2980 rbr_rings = rx_rbr_rings->rbr_rings; 2981 rcr_rings = rx_rcr_rings->rcr_rings; 2982 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 2983 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 2984 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 2985 2986 for (i = 0; i < ndmas; i++) { 2987 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 2988 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2989 "==> nxge_unmap_rxdma (ndmas %d) channel %d", 2990 ndmas, channel)); 2991 (void) nxge_unmap_rxdma_channel(nxgep, channel, 2992 (p_rx_rbr_ring_t)rbr_rings[i], 2993 (p_rx_rcr_ring_t)rcr_rings[i], 2994 (p_rx_mbox_t)rx_mbox_p[i]); 2995 } 2996 2997 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 2998 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 2999 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 3000 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 3001 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 3002 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 3003 3004 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3005 "<== nxge_unmap_rxdma")); 3006 } 3007 3008 nxge_status_t 3009 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3010 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 3011 uint32_t num_chunks, 3012 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 3013 p_rx_mbox_t *rx_mbox_p) 3014 { 3015 int status = NXGE_OK; 3016 3017 /* 3018 * Set up and prepare buffer blocks, descriptors 3019 * and mailbox. 3020 */ 3021 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3022 "==> nxge_map_rxdma_channel (channel %d)", channel)); 3023 /* 3024 * Receive buffer blocks 3025 */ 3026 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 3027 dma_buf_p, rbr_p, num_chunks); 3028 if (status != NXGE_OK) { 3029 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3030 "==> nxge_map_rxdma_channel (channel %d): " 3031 "map buffer failed 0x%x", channel, status)); 3032 goto nxge_map_rxdma_channel_exit; 3033 } 3034 3035 /* 3036 * Receive block ring, completion ring and mailbox. 3037 */ 3038 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 3039 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 3040 if (status != NXGE_OK) { 3041 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3042 "==> nxge_map_rxdma_channel (channel %d): " 3043 "map config failed 0x%x", channel, status)); 3044 goto nxge_map_rxdma_channel_fail2; 3045 } 3046 3047 goto nxge_map_rxdma_channel_exit; 3048 3049 nxge_map_rxdma_channel_fail3: 3050 /* Free rbr, rcr */ 3051 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3052 "==> nxge_map_rxdma_channel: free rbr/rcr " 3053 "(status 0x%x channel %d)", 3054 status, channel)); 3055 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3056 *rcr_p, *rx_mbox_p); 3057 3058 nxge_map_rxdma_channel_fail2: 3059 /* Free buffer blocks */ 3060 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3061 "==> nxge_map_rxdma_channel: free rx buffers" 3062 "(nxgep 0x%x status 0x%x channel %d)", 3063 nxgep, status, channel)); 3064 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 3065 3066 status = NXGE_ERROR; 3067 3068 nxge_map_rxdma_channel_exit: 3069 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3070 "<== nxge_map_rxdma_channel: " 3071 "(nxgep 0x%x status 0x%x channel %d)", 3072 nxgep, status, channel)); 3073 3074 return (status); 3075 } 3076 3077 /*ARGSUSED*/ 3078 static void 3079 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 3080 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3081 { 3082 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3083 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 3084 3085 /* 3086 * unmap receive block ring, completion ring and mailbox. 3087 */ 3088 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 3089 rcr_p, rx_mbox_p); 3090 3091 /* unmap buffer blocks */ 3092 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 3093 3094 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 3095 } 3096 3097 /*ARGSUSED*/ 3098 static nxge_status_t 3099 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 3100 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 3101 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 3102 { 3103 p_rx_rbr_ring_t rbrp; 3104 p_rx_rcr_ring_t rcrp; 3105 p_rx_mbox_t mboxp; 3106 p_nxge_dma_common_t cntl_dmap; 3107 p_nxge_dma_common_t dmap; 3108 p_rx_msg_t *rx_msg_ring; 3109 p_rx_msg_t rx_msg_p; 3110 p_rbr_cfig_a_t rcfga_p; 3111 p_rbr_cfig_b_t rcfgb_p; 3112 p_rcrcfig_a_t cfga_p; 3113 p_rcrcfig_b_t cfgb_p; 3114 p_rxdma_cfig1_t cfig1_p; 3115 p_rxdma_cfig2_t cfig2_p; 3116 p_rbr_kick_t kick_p; 3117 uint32_t dmaaddrp; 3118 uint32_t *rbr_vaddrp; 3119 uint32_t bkaddr; 3120 nxge_status_t status = NXGE_OK; 3121 int i; 3122 uint32_t nxge_port_rcr_size; 3123 3124 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3125 "==> nxge_map_rxdma_channel_cfg_ring")); 3126 3127 cntl_dmap = *dma_cntl_p; 3128 3129 /* Map in the receive block ring */ 3130 rbrp = *rbr_p; 3131 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 3132 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 3133 /* 3134 * Zero out buffer block ring descriptors. 3135 */ 3136 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3137 3138 rcfga_p = &(rbrp->rbr_cfga); 3139 rcfgb_p = &(rbrp->rbr_cfgb); 3140 kick_p = &(rbrp->rbr_kick); 3141 rcfga_p->value = 0; 3142 rcfgb_p->value = 0; 3143 kick_p->value = 0; 3144 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 3145 rcfga_p->value = (rbrp->rbr_addr & 3146 (RBR_CFIG_A_STDADDR_MASK | 3147 RBR_CFIG_A_STDADDR_BASE_MASK)); 3148 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 3149 3150 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 3151 rcfgb_p->bits.ldw.vld0 = 1; 3152 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 3153 rcfgb_p->bits.ldw.vld1 = 1; 3154 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 3155 rcfgb_p->bits.ldw.vld2 = 1; 3156 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 3157 3158 /* 3159 * For each buffer block, enter receive block address to the ring. 3160 */ 3161 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 3162 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 3163 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3164 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 3165 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 3166 3167 rx_msg_ring = rbrp->rx_msg_ring; 3168 for (i = 0; i < rbrp->tnblocks; i++) { 3169 rx_msg_p = rx_msg_ring[i]; 3170 rx_msg_p->nxgep = nxgep; 3171 rx_msg_p->rx_rbr_p = rbrp; 3172 bkaddr = (uint32_t) 3173 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 3174 >> RBR_BKADDR_SHIFT)); 3175 rx_msg_p->free = B_FALSE; 3176 rx_msg_p->max_usage_cnt = 0xbaddcafe; 3177 3178 *rbr_vaddrp++ = bkaddr; 3179 } 3180 3181 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 3182 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 3183 3184 rbrp->rbr_rd_index = 0; 3185 3186 rbrp->rbr_consumed = 0; 3187 rbrp->rbr_use_bcopy = B_TRUE; 3188 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 3189 /* 3190 * Do bcopy on packets greater than bcopy size once 3191 * the lo threshold is reached. 3192 * This lo threshold should be less than the hi threshold. 3193 * 3194 * Do bcopy on every packet once the hi threshold is reached. 3195 */ 3196 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 3197 /* default it to use hi */ 3198 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 3199 } 3200 3201 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 3202 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 3203 } 3204 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 3205 3206 switch (nxge_rx_threshold_hi) { 3207 default: 3208 case NXGE_RX_COPY_NONE: 3209 /* Do not do bcopy at all */ 3210 rbrp->rbr_use_bcopy = B_FALSE; 3211 rbrp->rbr_threshold_hi = rbrp->rbb_max; 3212 break; 3213 3214 case NXGE_RX_COPY_1: 3215 case NXGE_RX_COPY_2: 3216 case NXGE_RX_COPY_3: 3217 case NXGE_RX_COPY_4: 3218 case NXGE_RX_COPY_5: 3219 case NXGE_RX_COPY_6: 3220 case NXGE_RX_COPY_7: 3221 rbrp->rbr_threshold_hi = 3222 rbrp->rbb_max * 3223 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 3224 break; 3225 3226 case NXGE_RX_COPY_ALL: 3227 rbrp->rbr_threshold_hi = 0; 3228 break; 3229 } 3230 3231 switch (nxge_rx_threshold_lo) { 3232 default: 3233 case NXGE_RX_COPY_NONE: 3234 /* Do not do bcopy at all */ 3235 if (rbrp->rbr_use_bcopy) { 3236 rbrp->rbr_use_bcopy = B_FALSE; 3237 } 3238 rbrp->rbr_threshold_lo = rbrp->rbb_max; 3239 break; 3240 3241 case NXGE_RX_COPY_1: 3242 case NXGE_RX_COPY_2: 3243 case NXGE_RX_COPY_3: 3244 case NXGE_RX_COPY_4: 3245 case NXGE_RX_COPY_5: 3246 case NXGE_RX_COPY_6: 3247 case NXGE_RX_COPY_7: 3248 rbrp->rbr_threshold_lo = 3249 rbrp->rbb_max * 3250 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 3251 break; 3252 3253 case NXGE_RX_COPY_ALL: 3254 rbrp->rbr_threshold_lo = 0; 3255 break; 3256 } 3257 3258 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3259 "nxge_map_rxdma_channel_cfg_ring: channel %d " 3260 "rbb_max %d " 3261 "rbrp->rbr_bufsize_type %d " 3262 "rbb_threshold_hi %d " 3263 "rbb_threshold_lo %d", 3264 dma_channel, 3265 rbrp->rbb_max, 3266 rbrp->rbr_bufsize_type, 3267 rbrp->rbr_threshold_hi, 3268 rbrp->rbr_threshold_lo)); 3269 3270 rbrp->page_valid.value = 0; 3271 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 3272 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 3273 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 3274 rbrp->page_hdl.value = 0; 3275 3276 rbrp->page_valid.bits.ldw.page0 = 1; 3277 rbrp->page_valid.bits.ldw.page1 = 1; 3278 3279 /* Map in the receive completion ring */ 3280 rcrp = (p_rx_rcr_ring_t) 3281 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 3282 rcrp->rdc = dma_channel; 3283 3284 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 3285 rcrp->comp_size = nxge_port_rcr_size; 3286 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 3287 3288 rcrp->max_receive_pkts = nxge_max_rx_pkts; 3289 3290 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 3291 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 3292 sizeof (rcr_entry_t)); 3293 rcrp->comp_rd_index = 0; 3294 rcrp->comp_wt_index = 0; 3295 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 3296 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3297 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3298 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3299 3300 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 3301 (nxge_port_rcr_size - 1); 3302 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 3303 (nxge_port_rcr_size - 1); 3304 3305 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3306 "==> nxge_map_rxdma_channel_cfg_ring: " 3307 "channel %d " 3308 "rbr_vaddrp $%p " 3309 "rcr_desc_rd_head_p $%p " 3310 "rcr_desc_rd_head_pp $%p " 3311 "rcr_desc_rd_last_p $%p " 3312 "rcr_desc_rd_last_pp $%p ", 3313 dma_channel, 3314 rbr_vaddrp, 3315 rcrp->rcr_desc_rd_head_p, 3316 rcrp->rcr_desc_rd_head_pp, 3317 rcrp->rcr_desc_last_p, 3318 rcrp->rcr_desc_last_pp)); 3319 3320 /* 3321 * Zero out buffer block ring descriptors. 3322 */ 3323 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3324 rcrp->intr_timeout = nxgep->intr_timeout; 3325 rcrp->intr_threshold = nxgep->intr_threshold; 3326 rcrp->full_hdr_flag = B_FALSE; 3327 rcrp->sw_priv_hdr_len = 0; 3328 3329 cfga_p = &(rcrp->rcr_cfga); 3330 cfgb_p = &(rcrp->rcr_cfgb); 3331 cfga_p->value = 0; 3332 cfgb_p->value = 0; 3333 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 3334 cfga_p->value = (rcrp->rcr_addr & 3335 (RCRCFIG_A_STADDR_MASK | 3336 RCRCFIG_A_STADDR_BASE_MASK)); 3337 3338 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 3339 RCRCFIG_A_LEN_SHIF); 3340 3341 /* 3342 * Timeout should be set based on the system clock divider. 3343 * The following timeout value of 1 assumes that the 3344 * granularity (1000) is 3 microseconds running at 300MHz. 3345 */ 3346 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 3347 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 3348 cfgb_p->bits.ldw.entout = 1; 3349 3350 /* Map in the mailbox */ 3351 mboxp = (p_rx_mbox_t) 3352 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 3353 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 3354 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 3355 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 3356 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 3357 cfig1_p->value = cfig2_p->value = 0; 3358 3359 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 3360 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3361 "==> nxge_map_rxdma_channel_cfg_ring: " 3362 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 3363 dma_channel, cfig1_p->value, cfig2_p->value, 3364 mboxp->mbox_addr)); 3365 3366 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 3367 & 0xfff); 3368 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 3369 3370 3371 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 3372 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 3373 RXDMA_CFIG2_MBADDR_L_MASK); 3374 3375 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 3376 3377 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3378 "==> nxge_map_rxdma_channel_cfg_ring: " 3379 "channel %d damaddrp $%p " 3380 "cfg1 0x%016llx cfig2 0x%016llx", 3381 dma_channel, dmaaddrp, 3382 cfig1_p->value, cfig2_p->value)); 3383 3384 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 3385 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 3386 3387 rbrp->rx_rcr_p = rcrp; 3388 rcrp->rx_rbr_p = rbrp; 3389 *rcr_p = rcrp; 3390 *rx_mbox_p = mboxp; 3391 3392 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3393 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 3394 3395 return (status); 3396 } 3397 3398 /*ARGSUSED*/ 3399 static void 3400 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 3401 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 3402 { 3403 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3404 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 3405 rcr_p->rdc)); 3406 3407 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 3408 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 3409 3410 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3411 "<== nxge_unmap_rxdma_channel_cfg_ring")); 3412 } 3413 3414 static nxge_status_t 3415 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 3416 p_nxge_dma_common_t *dma_buf_p, 3417 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 3418 { 3419 p_rx_rbr_ring_t rbrp; 3420 p_nxge_dma_common_t dma_bufp, tmp_bufp; 3421 p_rx_msg_t *rx_msg_ring; 3422 p_rx_msg_t rx_msg_p; 3423 p_mblk_t mblk_p; 3424 3425 rxring_info_t *ring_info; 3426 nxge_status_t status = NXGE_OK; 3427 int i, j, index; 3428 uint32_t size, bsize, nblocks, nmsgs; 3429 3430 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3431 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 3432 channel)); 3433 3434 dma_bufp = tmp_bufp = *dma_buf_p; 3435 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3436 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 3437 "chunks bufp 0x%016llx", 3438 channel, num_chunks, dma_bufp)); 3439 3440 nmsgs = 0; 3441 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 3442 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3443 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3444 "bufp 0x%016llx nblocks %d nmsgs %d", 3445 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 3446 nmsgs += tmp_bufp->nblocks; 3447 } 3448 if (!nmsgs) { 3449 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3450 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 3451 "no msg blocks", 3452 channel)); 3453 status = NXGE_ERROR; 3454 goto nxge_map_rxdma_channel_buf_ring_exit; 3455 } 3456 3457 rbrp = (p_rx_rbr_ring_t) 3458 KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 3459 3460 size = nmsgs * sizeof (p_rx_msg_t); 3461 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 3462 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 3463 KM_SLEEP); 3464 3465 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 3466 (void *)nxgep->interrupt_cookie); 3467 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 3468 (void *)nxgep->interrupt_cookie); 3469 rbrp->rdc = channel; 3470 rbrp->num_blocks = num_chunks; 3471 rbrp->tnblocks = nmsgs; 3472 rbrp->rbb_max = nmsgs; 3473 rbrp->rbr_max_size = nmsgs; 3474 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 3475 3476 /* 3477 * Buffer sizes suggested by NIU architect. 3478 * 256, 512 and 2K. 3479 */ 3480 3481 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 3482 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 3483 rbrp->npi_pkt_buf_size0 = SIZE_256B; 3484 3485 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 3486 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 3487 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 3488 3489 rbrp->block_size = nxgep->rx_default_block_size; 3490 3491 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 3492 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 3493 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 3494 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 3495 } else { 3496 if (rbrp->block_size >= 0x2000) { 3497 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 3498 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 3499 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 3500 } else { 3501 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 3502 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 3503 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 3504 } 3505 } 3506 3507 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3508 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 3509 "actual rbr max %d rbb_max %d nmsgs %d " 3510 "rbrp->block_size %d default_block_size %d " 3511 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 3512 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 3513 rbrp->block_size, nxgep->rx_default_block_size, 3514 nxge_rbr_size, nxge_rbr_spare_size)); 3515 3516 /* Map in buffers from the buffer pool. */ 3517 index = 0; 3518 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 3519 bsize = dma_bufp->block_size; 3520 nblocks = dma_bufp->nblocks; 3521 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3522 ring_info->buffer[i].buf_index = i; 3523 ring_info->buffer[i].buf_size = dma_bufp->alength; 3524 ring_info->buffer[i].start_index = index; 3525 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3526 3527 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3528 " nxge_map_rxdma_channel_buf_ring: map channel %d " 3529 "chunk %d" 3530 " nblocks %d chunk_size %x block_size 0x%x " 3531 "dma_bufp $%p", channel, i, 3532 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3533 dma_bufp)); 3534 3535 for (j = 0; j < nblocks; j++) { 3536 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 3537 dma_bufp)) == NULL) { 3538 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3539 "allocb failed (index %d i %d j %d)", 3540 index, i, j)); 3541 goto nxge_map_rxdma_channel_buf_ring_fail1; 3542 } 3543 rx_msg_ring[index] = rx_msg_p; 3544 rx_msg_p->block_index = index; 3545 rx_msg_p->shifted_addr = (uint32_t) 3546 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 3547 RBR_BKADDR_SHIFT)); 3548 3549 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3550 "index %d j %d rx_msg_p $%p mblk %p", 3551 index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 3552 3553 mblk_p = rx_msg_p->rx_mblk_p; 3554 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3555 index++; 3556 rx_msg_p->buf_dma.dma_channel = channel; 3557 } 3558 } 3559 if (i < rbrp->num_blocks) { 3560 goto nxge_map_rxdma_channel_buf_ring_fail1; 3561 } 3562 3563 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3564 "nxge_map_rxdma_channel_buf_ring: done buf init " 3565 "channel %d msg block entries %d", 3566 channel, index)); 3567 ring_info->block_size_mask = bsize - 1; 3568 rbrp->rx_msg_ring = rx_msg_ring; 3569 rbrp->dma_bufp = dma_buf_p; 3570 rbrp->ring_info = ring_info; 3571 3572 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 3573 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3574 " nxge_map_rxdma_channel_buf_ring: " 3575 "channel %d done buf info init", channel)); 3576 3577 *rbr_p = rbrp; 3578 goto nxge_map_rxdma_channel_buf_ring_exit; 3579 3580 nxge_map_rxdma_channel_buf_ring_fail1: 3581 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3582 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 3583 channel, status)); 3584 3585 index--; 3586 for (; index >= 0; index--) { 3587 rx_msg_p = rx_msg_ring[index]; 3588 if (rx_msg_p != NULL) { 3589 freeb(rx_msg_p->rx_mblk_p); 3590 rx_msg_ring[index] = NULL; 3591 } 3592 } 3593 nxge_map_rxdma_channel_buf_ring_fail: 3594 MUTEX_DESTROY(&rbrp->post_lock); 3595 MUTEX_DESTROY(&rbrp->lock); 3596 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3597 KMEM_FREE(rx_msg_ring, size); 3598 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 3599 3600 status = NXGE_ERROR; 3601 3602 nxge_map_rxdma_channel_buf_ring_exit: 3603 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3604 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 3605 3606 return (status); 3607 } 3608 3609 /*ARGSUSED*/ 3610 static void 3611 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 3612 p_rx_rbr_ring_t rbr_p) 3613 { 3614 p_rx_msg_t *rx_msg_ring; 3615 p_rx_msg_t rx_msg_p; 3616 rxring_info_t *ring_info; 3617 int i; 3618 uint32_t size; 3619 #ifdef NXGE_DEBUG 3620 int num_chunks; 3621 #endif 3622 3623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3624 "==> nxge_unmap_rxdma_channel_buf_ring")); 3625 if (rbr_p == NULL) { 3626 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3627 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 3628 return; 3629 } 3630 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3631 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 3632 rbr_p->rdc)); 3633 3634 rx_msg_ring = rbr_p->rx_msg_ring; 3635 ring_info = rbr_p->ring_info; 3636 3637 if (rx_msg_ring == NULL || ring_info == NULL) { 3638 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3639 "<== nxge_unmap_rxdma_channel_buf_ring: " 3640 "rx_msg_ring $%p ring_info $%p", 3641 rx_msg_p, ring_info)); 3642 return; 3643 } 3644 3645 #ifdef NXGE_DEBUG 3646 num_chunks = rbr_p->num_blocks; 3647 #endif 3648 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 3649 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3650 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 3651 "tnblocks %d (max %d) size ptrs %d ", 3652 rbr_p->rdc, num_chunks, 3653 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 3654 3655 for (i = 0; i < rbr_p->tnblocks; i++) { 3656 rx_msg_p = rx_msg_ring[i]; 3657 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3658 " nxge_unmap_rxdma_channel_buf_ring: " 3659 "rx_msg_p $%p", 3660 rx_msg_p)); 3661 if (rx_msg_p != NULL) { 3662 freeb(rx_msg_p->rx_mblk_p); 3663 rx_msg_ring[i] = NULL; 3664 } 3665 } 3666 3667 MUTEX_DESTROY(&rbr_p->post_lock); 3668 MUTEX_DESTROY(&rbr_p->lock); 3669 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 3670 KMEM_FREE(rx_msg_ring, size); 3671 KMEM_FREE(rbr_p, sizeof (rx_rbr_ring_t)); 3672 3673 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3674 "<== nxge_unmap_rxdma_channel_buf_ring")); 3675 } 3676 3677 static nxge_status_t 3678 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 3679 { 3680 nxge_status_t status = NXGE_OK; 3681 3682 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3683 3684 /* 3685 * Load the sharable parameters by writing to the 3686 * function zero control registers. These FZC registers 3687 * should be initialized only once for the entire chip. 3688 */ 3689 (void) nxge_init_fzc_rx_common(nxgep); 3690 3691 /* 3692 * Initialize the RXDMA port specific FZC control configurations. 3693 * These FZC registers are pertaining to each port. 3694 */ 3695 (void) nxge_init_fzc_rxdma_port(nxgep); 3696 3697 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 3698 3699 return (status); 3700 } 3701 3702 /*ARGSUSED*/ 3703 static void 3704 nxge_rxdma_hw_stop_common(p_nxge_t nxgep) 3705 { 3706 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3707 3708 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 3709 } 3710 3711 static nxge_status_t 3712 nxge_rxdma_hw_start(p_nxge_t nxgep) 3713 { 3714 int i, ndmas; 3715 uint16_t channel; 3716 p_rx_rbr_rings_t rx_rbr_rings; 3717 p_rx_rbr_ring_t *rbr_rings; 3718 p_rx_rcr_rings_t rx_rcr_rings; 3719 p_rx_rcr_ring_t *rcr_rings; 3720 p_rx_mbox_areas_t rx_mbox_areas_p; 3721 p_rx_mbox_t *rx_mbox_p; 3722 nxge_status_t status = NXGE_OK; 3723 3724 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 3725 3726 rx_rbr_rings = nxgep->rx_rbr_rings; 3727 rx_rcr_rings = nxgep->rx_rcr_rings; 3728 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3729 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3730 "<== nxge_rxdma_hw_start: NULL ring pointers")); 3731 return (NXGE_ERROR); 3732 } 3733 ndmas = rx_rbr_rings->ndmas; 3734 if (ndmas == 0) { 3735 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3736 "<== nxge_rxdma_hw_start: no dma channel allocated")); 3737 return (NXGE_ERROR); 3738 } 3739 3740 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3741 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 3742 3743 rbr_rings = rx_rbr_rings->rbr_rings; 3744 rcr_rings = rx_rcr_rings->rcr_rings; 3745 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 3746 if (rx_mbox_areas_p) { 3747 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 3748 } 3749 3750 for (i = 0; i < ndmas; i++) { 3751 channel = rbr_rings[i]->rdc; 3752 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3753 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 3754 ndmas, channel)); 3755 status = nxge_rxdma_start_channel(nxgep, channel, 3756 (p_rx_rbr_ring_t)rbr_rings[i], 3757 (p_rx_rcr_ring_t)rcr_rings[i], 3758 (p_rx_mbox_t)rx_mbox_p[i]); 3759 if (status != NXGE_OK) { 3760 goto nxge_rxdma_hw_start_fail1; 3761 } 3762 } 3763 3764 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 3765 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3766 rx_rbr_rings, rx_rcr_rings)); 3767 3768 goto nxge_rxdma_hw_start_exit; 3769 3770 nxge_rxdma_hw_start_fail1: 3771 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3772 "==> nxge_rxdma_hw_start: disable " 3773 "(status 0x%x channel %d i %d)", status, channel, i)); 3774 for (; i >= 0; i--) { 3775 channel = rbr_rings[i]->rdc; 3776 (void) nxge_rxdma_stop_channel(nxgep, channel); 3777 } 3778 3779 nxge_rxdma_hw_start_exit: 3780 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3781 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 3782 3783 return (status); 3784 } 3785 3786 static void 3787 nxge_rxdma_hw_stop(p_nxge_t nxgep) 3788 { 3789 int i, ndmas; 3790 uint16_t channel; 3791 p_rx_rbr_rings_t rx_rbr_rings; 3792 p_rx_rbr_ring_t *rbr_rings; 3793 p_rx_rcr_rings_t rx_rcr_rings; 3794 3795 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 3796 3797 rx_rbr_rings = nxgep->rx_rbr_rings; 3798 rx_rcr_rings = nxgep->rx_rcr_rings; 3799 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 3800 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3801 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 3802 return; 3803 } 3804 ndmas = rx_rbr_rings->ndmas; 3805 if (!ndmas) { 3806 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3807 "<== nxge_rxdma_hw_stop: no dma channel allocated")); 3808 return; 3809 } 3810 3811 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3812 "==> nxge_rxdma_hw_stop (ndmas %d)", ndmas)); 3813 3814 rbr_rings = rx_rbr_rings->rbr_rings; 3815 3816 for (i = 0; i < ndmas; i++) { 3817 channel = rbr_rings[i]->rdc; 3818 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3819 "==> nxge_rxdma_hw_stop (ndmas %d) channel %d", 3820 ndmas, channel)); 3821 (void) nxge_rxdma_stop_channel(nxgep, channel); 3822 } 3823 3824 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 3825 "rx_rbr_rings 0x%016llx rings 0x%016llx", 3826 rx_rbr_rings, rx_rcr_rings)); 3827 3828 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 3829 } 3830 3831 3832 static nxge_status_t 3833 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 3834 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 3835 3836 { 3837 npi_handle_t handle; 3838 npi_status_t rs = NPI_SUCCESS; 3839 rx_dma_ctl_stat_t cs; 3840 rx_dma_ent_msk_t ent_mask; 3841 nxge_status_t status = NXGE_OK; 3842 3843 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 3844 3845 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3846 3847 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 3848 "npi handle addr $%p acc $%p", 3849 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 3850 3851 /* Reset RXDMA channel */ 3852 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3853 if (rs != NPI_SUCCESS) { 3854 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3855 "==> nxge_rxdma_start_channel: " 3856 "reset rxdma failed (0x%08x channel %d)", 3857 status, channel)); 3858 return (NXGE_ERROR | rs); 3859 } 3860 3861 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3862 "==> nxge_rxdma_start_channel: reset done: channel %d", 3863 channel)); 3864 3865 /* 3866 * Initialize the RXDMA channel specific FZC control 3867 * configurations. These FZC registers are pertaining 3868 * to each RX channel (logical pages). 3869 */ 3870 status = nxge_init_fzc_rxdma_channel(nxgep, 3871 channel, rbr_p, rcr_p, mbox_p); 3872 if (status != NXGE_OK) { 3873 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3874 "==> nxge_rxdma_start_channel: " 3875 "init fzc rxdma failed (0x%08x channel %d)", 3876 status, channel)); 3877 return (status); 3878 } 3879 3880 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3881 "==> nxge_rxdma_start_channel: fzc done")); 3882 3883 /* 3884 * Zero out the shadow and prefetch ram. 3885 */ 3886 3887 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3888 "ram done")); 3889 3890 /* Set up the interrupt event masks. */ 3891 ent_mask.value = 0; 3892 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 3893 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 3894 &ent_mask); 3895 if (rs != NPI_SUCCESS) { 3896 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3897 "==> nxge_rxdma_start_channel: " 3898 "init rxdma event masks failed (0x%08x channel %d)", 3899 status, channel)); 3900 return (NXGE_ERROR | rs); 3901 } 3902 3903 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3904 "event done: channel %d (mask 0x%016llx)", 3905 channel, ent_mask.value)); 3906 3907 /* Initialize the receive DMA control and status register */ 3908 cs.value = 0; 3909 cs.bits.hdw.mex = 1; 3910 cs.bits.hdw.rcrthres = 1; 3911 cs.bits.hdw.rcrto = 1; 3912 cs.bits.hdw.rbr_empty = 1; 3913 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 3914 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3915 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 3916 if (status != NXGE_OK) { 3917 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3918 "==> nxge_rxdma_start_channel: " 3919 "init rxdma control register failed (0x%08x channel %d", 3920 status, channel)); 3921 return (status); 3922 } 3923 3924 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3925 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3926 3927 /* 3928 * Load RXDMA descriptors, buffers, mailbox, 3929 * initialise the receive DMA channels and 3930 * enable each DMA channel. 3931 */ 3932 status = nxge_enable_rxdma_channel(nxgep, 3933 channel, rbr_p, rcr_p, mbox_p); 3934 3935 if (status != NXGE_OK) { 3936 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3937 " nxge_rxdma_start_channel: " 3938 " init enable rxdma failed (0x%08x channel %d)", 3939 status, channel)); 3940 return (status); 3941 } 3942 3943 ent_mask.value = 0; 3944 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 3945 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 3946 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 3947 &ent_mask); 3948 if (rs != NPI_SUCCESS) { 3949 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3950 "==> nxge_rxdma_start_channel: " 3951 "init rxdma event masks failed (0x%08x channel %d)", 3952 status, channel)); 3953 return (NXGE_ERROR | rs); 3954 } 3955 3956 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 3957 "control done - channel %d cs 0x%016llx", channel, cs.value)); 3958 3959 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3960 "==> nxge_rxdma_start_channel: enable done")); 3961 3962 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 3963 3964 return (NXGE_OK); 3965 } 3966 3967 static nxge_status_t 3968 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 3969 { 3970 npi_handle_t handle; 3971 npi_status_t rs = NPI_SUCCESS; 3972 rx_dma_ctl_stat_t cs; 3973 rx_dma_ent_msk_t ent_mask; 3974 nxge_status_t status = NXGE_OK; 3975 3976 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 3977 3978 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3979 3980 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 3981 "npi handle addr $%p acc $%p", 3982 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 3983 3984 /* Reset RXDMA channel */ 3985 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3986 if (rs != NPI_SUCCESS) { 3987 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3988 " nxge_rxdma_stop_channel: " 3989 " reset rxdma failed (0x%08x channel %d)", 3990 rs, channel)); 3991 return (NXGE_ERROR | rs); 3992 } 3993 3994 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3995 "==> nxge_rxdma_stop_channel: reset done")); 3996 3997 /* Set up the interrupt event masks. */ 3998 ent_mask.value = RX_DMA_ENT_MSK_ALL; 3999 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 4000 &ent_mask); 4001 if (rs != NPI_SUCCESS) { 4002 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4003 "==> nxge_rxdma_stop_channel: " 4004 "set rxdma event masks failed (0x%08x channel %d)", 4005 rs, channel)); 4006 return (NXGE_ERROR | rs); 4007 } 4008 4009 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4010 "==> nxge_rxdma_stop_channel: event done")); 4011 4012 /* Initialize the receive DMA control and status register */ 4013 cs.value = 0; 4014 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 4015 &cs); 4016 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 4017 " to default (all 0s) 0x%08x", cs.value)); 4018 if (status != NXGE_OK) { 4019 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4020 " nxge_rxdma_stop_channel: init rxdma" 4021 " control register failed (0x%08x channel %d", 4022 status, channel)); 4023 return (status); 4024 } 4025 4026 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4027 "==> nxge_rxdma_stop_channel: control done")); 4028 4029 /* disable dma channel */ 4030 status = nxge_disable_rxdma_channel(nxgep, channel); 4031 4032 if (status != NXGE_OK) { 4033 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4034 " nxge_rxdma_stop_channel: " 4035 " init enable rxdma failed (0x%08x channel %d)", 4036 status, channel)); 4037 return (status); 4038 } 4039 4040 NXGE_DEBUG_MSG((nxgep, 4041 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 4042 4043 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 4044 4045 return (NXGE_OK); 4046 } 4047 4048 nxge_status_t 4049 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 4050 { 4051 npi_handle_t handle; 4052 p_nxge_rdc_sys_stats_t statsp; 4053 rx_ctl_dat_fifo_stat_t stat; 4054 uint32_t zcp_err_status; 4055 uint32_t ipp_err_status; 4056 nxge_status_t status = NXGE_OK; 4057 npi_status_t rs = NPI_SUCCESS; 4058 boolean_t my_err = B_FALSE; 4059 4060 handle = nxgep->npi_handle; 4061 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4062 4063 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 4064 4065 if (rs != NPI_SUCCESS) 4066 return (NXGE_ERROR | rs); 4067 4068 if (stat.bits.ldw.id_mismatch) { 4069 statsp->id_mismatch++; 4070 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 4071 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 4072 /* Global fatal error encountered */ 4073 } 4074 4075 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 4076 switch (nxgep->mac.portnum) { 4077 case 0: 4078 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 4079 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 4080 my_err = B_TRUE; 4081 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4082 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4083 } 4084 break; 4085 case 1: 4086 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 4087 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 4088 my_err = B_TRUE; 4089 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4090 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4091 } 4092 break; 4093 case 2: 4094 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 4095 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 4096 my_err = B_TRUE; 4097 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4098 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4099 } 4100 break; 4101 case 3: 4102 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 4103 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 4104 my_err = B_TRUE; 4105 zcp_err_status = stat.bits.ldw.zcp_eop_err; 4106 ipp_err_status = stat.bits.ldw.ipp_eop_err; 4107 } 4108 break; 4109 default: 4110 return (NXGE_ERROR); 4111 } 4112 } 4113 4114 if (my_err) { 4115 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 4116 zcp_err_status); 4117 if (status != NXGE_OK) 4118 return (status); 4119 } 4120 4121 return (NXGE_OK); 4122 } 4123 4124 static nxge_status_t 4125 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 4126 uint32_t zcp_status) 4127 { 4128 boolean_t rxport_fatal = B_FALSE; 4129 p_nxge_rdc_sys_stats_t statsp; 4130 nxge_status_t status = NXGE_OK; 4131 uint8_t portn; 4132 4133 portn = nxgep->mac.portnum; 4134 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 4135 4136 if (ipp_status & (0x1 << portn)) { 4137 statsp->ipp_eop_err++; 4138 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4139 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 4140 rxport_fatal = B_TRUE; 4141 } 4142 4143 if (zcp_status & (0x1 << portn)) { 4144 statsp->zcp_eop_err++; 4145 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 4146 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 4147 rxport_fatal = B_TRUE; 4148 } 4149 4150 if (rxport_fatal) { 4151 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4152 " nxge_rxdma_handle_port_error: " 4153 " fatal error on Port #%d\n", 4154 portn)); 4155 status = nxge_rx_port_fatal_err_recover(nxgep); 4156 if (status == NXGE_OK) { 4157 FM_SERVICE_RESTORED(nxgep); 4158 } 4159 } 4160 4161 return (status); 4162 } 4163 4164 static nxge_status_t 4165 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 4166 { 4167 npi_handle_t handle; 4168 npi_status_t rs = NPI_SUCCESS; 4169 nxge_status_t status = NXGE_OK; 4170 p_rx_rbr_ring_t rbrp; 4171 p_rx_rcr_ring_t rcrp; 4172 p_rx_mbox_t mboxp; 4173 rx_dma_ent_msk_t ent_mask; 4174 p_nxge_dma_common_t dmap; 4175 int ring_idx; 4176 uint32_t ref_cnt; 4177 p_rx_msg_t rx_msg_p; 4178 int i; 4179 uint32_t nxge_port_rcr_size; 4180 4181 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 4182 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4183 "Recovering from RxDMAChannel#%d error...", channel)); 4184 4185 /* 4186 * Stop the dma channel waits for the stop done. 4187 * If the stop done bit is not set, then create 4188 * an error. 4189 */ 4190 4191 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4192 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 4193 4194 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 4195 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 4196 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 4197 4198 MUTEX_ENTER(&rcrp->lock); 4199 MUTEX_ENTER(&rbrp->lock); 4200 MUTEX_ENTER(&rbrp->post_lock); 4201 4202 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 4203 4204 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 4205 if (rs != NPI_SUCCESS) { 4206 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4207 "nxge_disable_rxdma_channel:failed")); 4208 goto fail; 4209 } 4210 4211 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 4212 4213 /* Disable interrupt */ 4214 ent_mask.value = RX_DMA_ENT_MSK_ALL; 4215 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 4216 if (rs != NPI_SUCCESS) { 4217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4218 "nxge_rxdma_stop_channel: " 4219 "set rxdma event masks failed (channel %d)", 4220 channel)); 4221 } 4222 4223 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 4224 4225 /* Reset RXDMA channel */ 4226 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 4227 if (rs != NPI_SUCCESS) { 4228 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4229 "nxge_rxdma_fatal_err_recover: " 4230 " reset rxdma failed (channel %d)", channel)); 4231 goto fail; 4232 } 4233 4234 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 4235 4236 mboxp = 4237 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 4238 4239 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 4240 rbrp->rbr_rd_index = 0; 4241 4242 rcrp->comp_rd_index = 0; 4243 rcrp->comp_wt_index = 0; 4244 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 4245 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4246 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4247 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4248 4249 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 4250 (nxge_port_rcr_size - 1); 4251 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 4252 (nxge_port_rcr_size - 1); 4253 4254 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 4255 bzero((caddr_t)dmap->kaddrp, dmap->alength); 4256 4257 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 4258 4259 for (i = 0; i < rbrp->rbr_max_size; i++) { 4260 rx_msg_p = rbrp->rx_msg_ring[i]; 4261 ref_cnt = rx_msg_p->ref_cnt; 4262 if (ref_cnt != 1) { 4263 if (rx_msg_p->cur_usage_cnt != 4264 rx_msg_p->max_usage_cnt) { 4265 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4266 "buf[%d]: cur_usage_cnt = %d " 4267 "max_usage_cnt = %d\n", i, 4268 rx_msg_p->cur_usage_cnt, 4269 rx_msg_p->max_usage_cnt)); 4270 } else { 4271 /* Buffer can be re-posted */ 4272 rx_msg_p->free = B_TRUE; 4273 rx_msg_p->cur_usage_cnt = 0; 4274 rx_msg_p->max_usage_cnt = 0xbaddcafe; 4275 rx_msg_p->pkt_buf_size = 0; 4276 } 4277 } 4278 } 4279 4280 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 4281 4282 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 4283 if (status != NXGE_OK) { 4284 goto fail; 4285 } 4286 4287 MUTEX_EXIT(&rbrp->post_lock); 4288 MUTEX_EXIT(&rbrp->lock); 4289 MUTEX_EXIT(&rcrp->lock); 4290 4291 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4292 "Recovery Successful, RxDMAChannel#%d Restored", 4293 channel)); 4294 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 4295 4296 return (NXGE_OK); 4297 fail: 4298 MUTEX_EXIT(&rbrp->post_lock); 4299 MUTEX_EXIT(&rbrp->lock); 4300 MUTEX_EXIT(&rcrp->lock); 4301 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4302 4303 return (NXGE_ERROR | rs); 4304 } 4305 4306 nxge_status_t 4307 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 4308 { 4309 nxge_status_t status = NXGE_OK; 4310 p_nxge_dma_common_t *dma_buf_p; 4311 uint16_t channel; 4312 int ndmas; 4313 int i; 4314 4315 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 4316 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4317 "Recovering from RxPort error...")); 4318 /* Disable RxMAC */ 4319 4320 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n")); 4321 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 4322 goto fail; 4323 4324 NXGE_DELAY(1000); 4325 4326 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels...")); 4327 4328 ndmas = nxgep->rx_buf_pool_p->ndmas; 4329 dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p; 4330 4331 for (i = 0; i < ndmas; i++) { 4332 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 4333 if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) { 4334 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4335 "Could not recover channel %d", 4336 channel)); 4337 } 4338 } 4339 4340 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP...")); 4341 4342 /* Reset IPP */ 4343 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 4344 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4345 "nxge_rx_port_fatal_err_recover: " 4346 "Failed to reset IPP")); 4347 goto fail; 4348 } 4349 4350 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 4351 4352 /* Reset RxMAC */ 4353 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 4354 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4355 "nxge_rx_port_fatal_err_recover: " 4356 "Failed to reset RxMAC")); 4357 goto fail; 4358 } 4359 4360 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 4361 4362 /* Re-Initialize IPP */ 4363 if (nxge_ipp_init(nxgep) != NXGE_OK) { 4364 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4365 "nxge_rx_port_fatal_err_recover: " 4366 "Failed to init IPP")); 4367 goto fail; 4368 } 4369 4370 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 4371 4372 /* Re-Initialize RxMAC */ 4373 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 4374 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4375 "nxge_rx_port_fatal_err_recover: " 4376 "Failed to reset RxMAC")); 4377 goto fail; 4378 } 4379 4380 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 4381 4382 /* Re-enable RxMAC */ 4383 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 4384 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4385 "nxge_rx_port_fatal_err_recover: " 4386 "Failed to enable RxMAC")); 4387 goto fail; 4388 } 4389 4390 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4391 "Recovery Successful, RxPort Restored")); 4392 4393 return (NXGE_OK); 4394 fail: 4395 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 4396 return (status); 4397 } 4398 4399 void 4400 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 4401 { 4402 rx_dma_ctl_stat_t cs; 4403 rx_ctl_dat_fifo_stat_t cdfs; 4404 4405 switch (err_id) { 4406 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 4407 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 4408 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 4409 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 4410 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 4411 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 4412 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 4413 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 4414 case NXGE_FM_EREPORT_RDMC_RCRINCON: 4415 case NXGE_FM_EREPORT_RDMC_RCRFULL: 4416 case NXGE_FM_EREPORT_RDMC_RBRFULL: 4417 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 4418 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 4419 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 4420 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4421 chan, &cs.value); 4422 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 4423 cs.bits.hdw.rcr_ack_err = 1; 4424 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 4425 cs.bits.hdw.dc_fifo_err = 1; 4426 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 4427 cs.bits.hdw.rcr_sha_par = 1; 4428 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 4429 cs.bits.hdw.rbr_pre_par = 1; 4430 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 4431 cs.bits.hdw.rbr_tmout = 1; 4432 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 4433 cs.bits.hdw.rsp_cnt_err = 1; 4434 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 4435 cs.bits.hdw.byte_en_bus = 1; 4436 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 4437 cs.bits.hdw.rsp_dat_err = 1; 4438 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 4439 cs.bits.hdw.config_err = 1; 4440 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 4441 cs.bits.hdw.rcrincon = 1; 4442 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 4443 cs.bits.hdw.rcrfull = 1; 4444 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 4445 cs.bits.hdw.rbrfull = 1; 4446 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 4447 cs.bits.hdw.rbrlogpage = 1; 4448 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 4449 cs.bits.hdw.cfiglogpage = 1; 4450 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 4451 cs.value); 4452 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 4453 chan, cs.value); 4454 break; 4455 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 4456 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 4457 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 4458 cdfs.value = 0; 4459 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 4460 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 4461 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 4462 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 4463 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 4464 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4465 cmn_err(CE_NOTE, 4466 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4467 cdfs.value); 4468 RXDMA_REG_WRITE64(nxgep->npi_handle, 4469 RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value); 4470 break; 4471 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 4472 break; 4473 case NXGE_FM_EREPORT_RDMC_COMPLETION_ERR: 4474 break; 4475 } 4476 } 4477 4478 4479 static uint16_t 4480 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb) 4481 { 4482 uint16_t sz = RBR_BKSIZE_8K_BYTES; 4483 4484 switch (bufsz_type) { 4485 case RCR_PKTBUFSZ_0: 4486 switch (rbr_cfgb.bits.ldw.bufsz0) { 4487 case RBR_BUFSZ0_256B: 4488 sz = RBR_BUFSZ0_256_BYTES; 4489 break; 4490 case RBR_BUFSZ0_512B: 4491 sz = RBR_BUFSZ0_512B_BYTES; 4492 break; 4493 case RBR_BUFSZ0_1K: 4494 sz = RBR_BUFSZ0_1K_BYTES; 4495 break; 4496 case RBR_BUFSZ0_2K: 4497 sz = RBR_BUFSZ0_2K_BYTES; 4498 break; 4499 default: 4500 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4501 "nxge_get_pktbug_size: bad bufsz0")); 4502 break; 4503 } 4504 break; 4505 case RCR_PKTBUFSZ_1: 4506 switch (rbr_cfgb.bits.ldw.bufsz1) { 4507 case RBR_BUFSZ1_1K: 4508 sz = RBR_BUFSZ1_1K_BYTES; 4509 break; 4510 case RBR_BUFSZ1_2K: 4511 sz = RBR_BUFSZ1_2K_BYTES; 4512 break; 4513 case RBR_BUFSZ1_4K: 4514 sz = RBR_BUFSZ1_4K_BYTES; 4515 break; 4516 case RBR_BUFSZ1_8K: 4517 sz = RBR_BUFSZ1_8K_BYTES; 4518 break; 4519 default: 4520 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4521 "nxge_get_pktbug_size: bad bufsz1")); 4522 break; 4523 } 4524 break; 4525 case RCR_PKTBUFSZ_2: 4526 switch (rbr_cfgb.bits.ldw.bufsz2) { 4527 case RBR_BUFSZ2_2K: 4528 sz = RBR_BUFSZ2_2K_BYTES; 4529 break; 4530 case RBR_BUFSZ2_4K: 4531 sz = RBR_BUFSZ2_4K_BYTES; 4532 break; 4533 case RBR_BUFSZ2_8K: 4534 sz = RBR_BUFSZ2_8K_BYTES; 4535 break; 4536 case RBR_BUFSZ2_16K: 4537 sz = RBR_BUFSZ2_16K_BYTES; 4538 break; 4539 default: 4540 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4541 "nxge_get_pktbug_size: bad bufsz2")); 4542 break; 4543 } 4544 break; 4545 case RCR_SINGLE_BLOCK: 4546 switch (rbr_cfgb.bits.ldw.bksize) { 4547 case BKSIZE_4K: 4548 sz = RBR_BKSIZE_4K_BYTES; 4549 break; 4550 case BKSIZE_8K: 4551 sz = RBR_BKSIZE_8K_BYTES; 4552 break; 4553 case BKSIZE_16K: 4554 sz = RBR_BKSIZE_16K_BYTES; 4555 break; 4556 case BKSIZE_32K: 4557 sz = RBR_BKSIZE_32K_BYTES; 4558 break; 4559 default: 4560 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4561 "nxge_get_pktbug_size: bad bksize")); 4562 break; 4563 } 4564 break; 4565 default: 4566 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4567 "nxge_get_pktbug_size: bad bufsz_type")); 4568 break; 4569 } 4570 return (sz); 4571 } 4572