16f45ec7bSml29623 /* 26f45ec7bSml29623 * CDDL HEADER START 36f45ec7bSml29623 * 46f45ec7bSml29623 * The contents of this file are subject to the terms of the 56f45ec7bSml29623 * Common Development and Distribution License (the "License"). 66f45ec7bSml29623 * You may not use this file except in compliance with the License. 76f45ec7bSml29623 * 86f45ec7bSml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 96f45ec7bSml29623 * or http://www.opensolaris.org/os/licensing. 106f45ec7bSml29623 * See the License for the specific language governing permissions 116f45ec7bSml29623 * and limitations under the License. 126f45ec7bSml29623 * 136f45ec7bSml29623 * When distributing Covered Code, include this CDDL HEADER in each 146f45ec7bSml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 156f45ec7bSml29623 * If applicable, add the following below this CDDL HEADER, with the 166f45ec7bSml29623 * fields enclosed by brackets "[]" replaced with your own identifying 176f45ec7bSml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 186f45ec7bSml29623 * 196f45ec7bSml29623 * CDDL HEADER END 206f45ec7bSml29623 */ 21ef523517SMichael Speer 226f45ec7bSml29623 /* 230dc2366fSVenugopal Iyer * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 246f45ec7bSml29623 * Use is subject to license terms. 256f45ec7bSml29623 */ 266f45ec7bSml29623 276f45ec7bSml29623 #include <sys/nxge/nxge_impl.h> 286f45ec7bSml29623 #include <sys/nxge/nxge_rxdma.h> 29678453a8Sspeer #include <sys/nxge/nxge_hio.h> 30678453a8Sspeer 31678453a8Sspeer #if !defined(_BIG_ENDIAN) 32678453a8Sspeer #include <npi_rx_rd32.h> 33678453a8Sspeer #endif 34678453a8Sspeer #include <npi_rx_rd64.h> 35678453a8Sspeer #include <npi_rx_wr64.h> 366f45ec7bSml29623 376f45ec7bSml29623 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 38678453a8Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 396f45ec7bSml29623 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 406f45ec7bSml29623 (rdc + nxgep->pt_config.hw_config.start_rdc) 416f45ec7bSml29623 426f45ec7bSml29623 /* 436f45ec7bSml29623 * Globals: tunable parameters (/etc/system or adb) 446f45ec7bSml29623 * 456f45ec7bSml29623 */ 466f45ec7bSml29623 extern uint32_t nxge_rbr_size; 476f45ec7bSml29623 extern uint32_t nxge_rcr_size; 486f45ec7bSml29623 extern uint32_t nxge_rbr_spare_size; 494df55fdeSJanie Lu extern uint16_t nxge_rdc_buf_offset; 506f45ec7bSml29623 516f45ec7bSml29623 extern uint32_t nxge_mblks_pending; 526f45ec7bSml29623 536f45ec7bSml29623 /* 546f45ec7bSml29623 * Tunable to reduce the amount of time spent in the 556f45ec7bSml29623 * ISR doing Rx Processing. 566f45ec7bSml29623 */ 576f45ec7bSml29623 extern uint32_t nxge_max_rx_pkts; 586f45ec7bSml29623 596f45ec7bSml29623 /* 606f45ec7bSml29623 * Tunables to manage the receive buffer blocks. 616f45ec7bSml29623 * 626f45ec7bSml29623 * nxge_rx_threshold_hi: copy all buffers. 636f45ec7bSml29623 * nxge_rx_bcopy_size_type: receive buffer block size type. 646f45ec7bSml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type. 656f45ec7bSml29623 */ 666f45ec7bSml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 676f45ec7bSml29623 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 686f45ec7bSml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 696f45ec7bSml29623 70b4d05839Sml29623 extern uint32_t nxge_cksum_offload; 71678453a8Sspeer 72678453a8Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 73678453a8Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 746f45ec7bSml29623 756f45ec7bSml29623 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 766f45ec7bSml29623 77678453a8Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 78678453a8Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 796f45ec7bSml29623 806f45ec7bSml29623 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 816f45ec7bSml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 826f45ec7bSml29623 uint32_t, 836f45ec7bSml29623 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 846f45ec7bSml29623 p_rx_mbox_t *); 856f45ec7bSml29623 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 866f45ec7bSml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 876f45ec7bSml29623 886f45ec7bSml29623 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 896f45ec7bSml29623 uint16_t, 906f45ec7bSml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 916f45ec7bSml29623 p_rx_rcr_ring_t *, p_rx_mbox_t *); 926f45ec7bSml29623 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 936f45ec7bSml29623 p_rx_rcr_ring_t, p_rx_mbox_t); 946f45ec7bSml29623 956f45ec7bSml29623 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 966f45ec7bSml29623 uint16_t, 976f45ec7bSml29623 p_nxge_dma_common_t *, 986f45ec7bSml29623 p_rx_rbr_ring_t *, uint32_t); 996f45ec7bSml29623 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 1006f45ec7bSml29623 p_rx_rbr_ring_t); 1016f45ec7bSml29623 1026f45ec7bSml29623 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 1036f45ec7bSml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 1046f45ec7bSml29623 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 1056f45ec7bSml29623 106678453a8Sspeer static mblk_t * 107678453a8Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 1086f45ec7bSml29623 1096f45ec7bSml29623 static void nxge_receive_packet(p_nxge_t, 1106f45ec7bSml29623 p_rx_rcr_ring_t, 1116f45ec7bSml29623 p_rcr_entry_t, 1126f45ec7bSml29623 boolean_t *, 1136f45ec7bSml29623 mblk_t **, mblk_t **); 1146f45ec7bSml29623 1156f45ec7bSml29623 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 1166f45ec7bSml29623 1176f45ec7bSml29623 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 1186f45ec7bSml29623 static void nxge_freeb(p_rx_msg_t); 119678453a8Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 1206f45ec7bSml29623 1216f45ec7bSml29623 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 1226f45ec7bSml29623 uint32_t, uint32_t); 1236f45ec7bSml29623 1246f45ec7bSml29623 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 1256f45ec7bSml29623 p_rx_rbr_ring_t); 1266f45ec7bSml29623 1276f45ec7bSml29623 1286f45ec7bSml29623 static nxge_status_t 1296f45ec7bSml29623 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 1306f45ec7bSml29623 1316f45ec7bSml29623 nxge_status_t 1326f45ec7bSml29623 nxge_rx_port_fatal_err_recover(p_nxge_t); 1336f45ec7bSml29623 134678453a8Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 135678453a8Sspeer 1366f45ec7bSml29623 nxge_status_t 1376f45ec7bSml29623 nxge_init_rxdma_channels(p_nxge_t nxgep) 1386f45ec7bSml29623 { 139678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 140da14cebeSEric Cheng int i, count, channel; 141e11f0814SMichael Speer nxge_grp_t *group; 142da14cebeSEric Cheng dc_map_t map; 143da14cebeSEric Cheng int dev_gindex; 1446f45ec7bSml29623 1456f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 1466f45ec7bSml29623 147678453a8Sspeer if (!isLDOMguest(nxgep)) { 148678453a8Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 149678453a8Sspeer cmn_err(CE_NOTE, "hw_start_common"); 150678453a8Sspeer return (NXGE_ERROR); 151678453a8Sspeer } 152678453a8Sspeer } 153678453a8Sspeer 154678453a8Sspeer /* 155678453a8Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 156678453a8Sspeer * We only have 8 hardware RDC tables, but we may have 157678453a8Sspeer * up to 16 logical (software-defined) groups of RDCS, 158678453a8Sspeer * if we make use of layer 3 & 4 hardware classification. 159678453a8Sspeer */ 160678453a8Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 161678453a8Sspeer if ((1 << i) & set->lg.map) { 162e11f0814SMichael Speer group = set->group[i]; 163da14cebeSEric Cheng dev_gindex = 164da14cebeSEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 165da14cebeSEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 166678453a8Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 167da14cebeSEric Cheng if ((1 << channel) & map) { 168678453a8Sspeer if ((nxge_grp_dc_add(nxgep, 1696920a987SMisaki Miyashita group, VP_BOUND_RX, channel))) 170e11f0814SMichael Speer goto init_rxdma_channels_exit; 171678453a8Sspeer } 172678453a8Sspeer } 173678453a8Sspeer } 174678453a8Sspeer if (++count == set->lg.count) 175678453a8Sspeer break; 176678453a8Sspeer } 177678453a8Sspeer 178678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 179678453a8Sspeer return (NXGE_OK); 180e11f0814SMichael Speer 181e11f0814SMichael Speer init_rxdma_channels_exit: 182e11f0814SMichael Speer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 183e11f0814SMichael Speer if ((1 << i) & set->lg.map) { 184e11f0814SMichael Speer group = set->group[i]; 185da14cebeSEric Cheng dev_gindex = 186da14cebeSEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 187da14cebeSEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 188da14cebeSEric Cheng for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 189da14cebeSEric Cheng if ((1 << channel) & map) { 190e11f0814SMichael Speer nxge_grp_dc_remove(nxgep, 191da14cebeSEric Cheng VP_BOUND_RX, channel); 192e11f0814SMichael Speer } 193e11f0814SMichael Speer } 194e11f0814SMichael Speer } 195e11f0814SMichael Speer if (++count == set->lg.count) 196e11f0814SMichael Speer break; 197e11f0814SMichael Speer } 198e11f0814SMichael Speer 199e11f0814SMichael Speer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 200e11f0814SMichael Speer return (NXGE_ERROR); 201678453a8Sspeer } 202678453a8Sspeer 203678453a8Sspeer nxge_status_t 204678453a8Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 205678453a8Sspeer { 206678453a8Sspeer nxge_status_t status; 207678453a8Sspeer 208678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 209678453a8Sspeer 210678453a8Sspeer status = nxge_map_rxdma(nxge, channel); 2116f45ec7bSml29623 if (status != NXGE_OK) { 212678453a8Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2136f45ec7bSml29623 "<== nxge_init_rxdma: status 0x%x", status)); 2146f45ec7bSml29623 return (status); 2156f45ec7bSml29623 } 2166f45ec7bSml29623 21708ac1c49SNicolas Droux #if defined(sun4v) 21808ac1c49SNicolas Droux if (isLDOMguest(nxge)) { 21908ac1c49SNicolas Droux /* set rcr_ring */ 22008ac1c49SNicolas Droux p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 22108ac1c49SNicolas Droux 22208ac1c49SNicolas Droux status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 22308ac1c49SNicolas Droux if (status != NXGE_OK) { 22408ac1c49SNicolas Droux nxge_unmap_rxdma(nxge, channel); 22508ac1c49SNicolas Droux return (status); 22608ac1c49SNicolas Droux } 22708ac1c49SNicolas Droux } 22808ac1c49SNicolas Droux #endif 22908ac1c49SNicolas Droux 230678453a8Sspeer status = nxge_rxdma_hw_start(nxge, channel); 2316f45ec7bSml29623 if (status != NXGE_OK) { 232678453a8Sspeer nxge_unmap_rxdma(nxge, channel); 2336f45ec7bSml29623 } 2346f45ec7bSml29623 235678453a8Sspeer if (!nxge->statsp->rdc_ksp[channel]) 236678453a8Sspeer nxge_setup_rdc_kstats(nxge, channel); 2376f45ec7bSml29623 238678453a8Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 239678453a8Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 2406f45ec7bSml29623 2416f45ec7bSml29623 return (status); 2426f45ec7bSml29623 } 2436f45ec7bSml29623 2446f45ec7bSml29623 void 2456f45ec7bSml29623 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 2466f45ec7bSml29623 { 247678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 248678453a8Sspeer int rdc; 249678453a8Sspeer 2506f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 2516f45ec7bSml29623 252678453a8Sspeer if (set->owned.map == 0) { 2536f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 254678453a8Sspeer "nxge_uninit_rxdma_channels: no channels")); 255678453a8Sspeer return; 256678453a8Sspeer } 257678453a8Sspeer 258678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 259678453a8Sspeer if ((1 << rdc) & set->owned.map) { 260678453a8Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 261678453a8Sspeer } 262678453a8Sspeer } 263678453a8Sspeer 264678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 265678453a8Sspeer } 266678453a8Sspeer 267678453a8Sspeer void 268678453a8Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 269678453a8Sspeer { 270678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 271678453a8Sspeer 272678453a8Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 273678453a8Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 274678453a8Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 275678453a8Sspeer } 276678453a8Sspeer 277678453a8Sspeer nxge_rxdma_hw_stop(nxgep, channel); 278678453a8Sspeer nxge_unmap_rxdma(nxgep, channel); 279678453a8Sspeer 280678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 2816f45ec7bSml29623 } 2826f45ec7bSml29623 2836f45ec7bSml29623 nxge_status_t 2846f45ec7bSml29623 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 2856f45ec7bSml29623 { 2866f45ec7bSml29623 npi_handle_t handle; 2876f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 2886f45ec7bSml29623 nxge_status_t status = NXGE_OK; 2896f45ec7bSml29623 290330cd344SMichael Speer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 2916f45ec7bSml29623 2926f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2936f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 2946f45ec7bSml29623 2956f45ec7bSml29623 if (rs != NPI_SUCCESS) { 2966f45ec7bSml29623 status = NXGE_ERROR | rs; 2976f45ec7bSml29623 } 2986f45ec7bSml29623 299330cd344SMichael Speer NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 300330cd344SMichael Speer 3016f45ec7bSml29623 return (status); 3026f45ec7bSml29623 } 3036f45ec7bSml29623 3046f45ec7bSml29623 void 3056f45ec7bSml29623 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 3066f45ec7bSml29623 { 307678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 308678453a8Sspeer int rdc; 3096f45ec7bSml29623 3106f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 3116f45ec7bSml29623 312678453a8Sspeer if (!isLDOMguest(nxgep)) { 313678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 3146f45ec7bSml29623 (void) npi_rxdma_dump_fzc_regs(handle); 3156f45ec7bSml29623 } 316678453a8Sspeer 317678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 318678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 319678453a8Sspeer "nxge_rxdma_regs_dump_channels: " 320678453a8Sspeer "NULL ring pointer(s)")); 3216f45ec7bSml29623 return; 3226f45ec7bSml29623 } 3236f45ec7bSml29623 324678453a8Sspeer if (set->owned.map == 0) { 3256f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 326678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 3276f45ec7bSml29623 return; 3286f45ec7bSml29623 } 3296f45ec7bSml29623 330678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 331678453a8Sspeer if ((1 << rdc) & set->owned.map) { 332678453a8Sspeer rx_rbr_ring_t *ring = 333678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 334678453a8Sspeer if (ring) { 335678453a8Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 3366f45ec7bSml29623 } 337678453a8Sspeer } 3386f45ec7bSml29623 } 3396f45ec7bSml29623 3406f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 3416f45ec7bSml29623 } 3426f45ec7bSml29623 3436f45ec7bSml29623 nxge_status_t 3446f45ec7bSml29623 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 3456f45ec7bSml29623 { 3466f45ec7bSml29623 npi_handle_t handle; 3476f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 3486f45ec7bSml29623 nxge_status_t status = NXGE_OK; 3496f45ec7bSml29623 3506f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 3516f45ec7bSml29623 3526f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3536f45ec7bSml29623 rs = npi_rxdma_dump_rdc_regs(handle, channel); 3546f45ec7bSml29623 3556f45ec7bSml29623 if (rs != NPI_SUCCESS) { 3566f45ec7bSml29623 status = NXGE_ERROR | rs; 3576f45ec7bSml29623 } 3586f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 3596f45ec7bSml29623 return (status); 3606f45ec7bSml29623 } 3616f45ec7bSml29623 3626f45ec7bSml29623 nxge_status_t 3636f45ec7bSml29623 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 3646f45ec7bSml29623 p_rx_dma_ent_msk_t mask_p) 3656f45ec7bSml29623 { 3666f45ec7bSml29623 npi_handle_t handle; 3676f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 3686f45ec7bSml29623 nxge_status_t status = NXGE_OK; 3696f45ec7bSml29623 3706f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3716f45ec7bSml29623 "<== nxge_init_rxdma_channel_event_mask")); 3726f45ec7bSml29623 3736f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3746f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 3756f45ec7bSml29623 if (rs != NPI_SUCCESS) { 3766f45ec7bSml29623 status = NXGE_ERROR | rs; 3776f45ec7bSml29623 } 3786f45ec7bSml29623 3796f45ec7bSml29623 return (status); 3806f45ec7bSml29623 } 3816f45ec7bSml29623 3826f45ec7bSml29623 nxge_status_t 3836f45ec7bSml29623 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3846f45ec7bSml29623 p_rx_dma_ctl_stat_t cs_p) 3856f45ec7bSml29623 { 3866f45ec7bSml29623 npi_handle_t handle; 3876f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 3886f45ec7bSml29623 nxge_status_t status = NXGE_OK; 3896f45ec7bSml29623 3906f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3916f45ec7bSml29623 "<== nxge_init_rxdma_channel_cntl_stat")); 3926f45ec7bSml29623 3936f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3946f45ec7bSml29623 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 3956f45ec7bSml29623 3966f45ec7bSml29623 if (rs != NPI_SUCCESS) { 3976f45ec7bSml29623 status = NXGE_ERROR | rs; 3986f45ec7bSml29623 } 3996f45ec7bSml29623 4006f45ec7bSml29623 return (status); 4016f45ec7bSml29623 } 4026f45ec7bSml29623 403678453a8Sspeer /* 404678453a8Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 405678453a8Sspeer * 406678453a8Sspeer * Set the default RDC for an RDC Group (Table) 407678453a8Sspeer * 408678453a8Sspeer * Arguments: 409678453a8Sspeer * nxgep 410678453a8Sspeer * rdcgrp The group to modify 411678453a8Sspeer * rdc The new default RDC. 412678453a8Sspeer * 413678453a8Sspeer * Notes: 414678453a8Sspeer * 415678453a8Sspeer * NPI/NXGE function calls: 416678453a8Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 417678453a8Sspeer * 418678453a8Sspeer * Registers accessed: 419678453a8Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 420678453a8Sspeer * 421678453a8Sspeer * Context: 422678453a8Sspeer * Service domain 423678453a8Sspeer */ 4246f45ec7bSml29623 nxge_status_t 425678453a8Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 426678453a8Sspeer p_nxge_t nxgep, 427678453a8Sspeer uint8_t rdcgrp, 4286f45ec7bSml29623 uint8_t rdc) 4296f45ec7bSml29623 { 4306f45ec7bSml29623 npi_handle_t handle; 4316f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 4326f45ec7bSml29623 p_nxge_dma_pt_cfg_t p_dma_cfgp; 4336f45ec7bSml29623 p_nxge_rdc_grp_t rdc_grp_p; 4346f45ec7bSml29623 uint8_t actual_rdcgrp, actual_rdc; 4356f45ec7bSml29623 4366f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4376f45ec7bSml29623 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 4386f45ec7bSml29623 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 4396f45ec7bSml29623 4406f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4416f45ec7bSml29623 442678453a8Sspeer /* 443678453a8Sspeer * This has to be rewritten. Do we even allow this anymore? 444678453a8Sspeer */ 4456f45ec7bSml29623 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 446678453a8Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 447678453a8Sspeer rdc_grp_p->def_rdc = rdc; 4486f45ec7bSml29623 4496f45ec7bSml29623 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 4506f45ec7bSml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 4516f45ec7bSml29623 452678453a8Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 453678453a8Sspeer handle, actual_rdcgrp, actual_rdc); 4546f45ec7bSml29623 4556f45ec7bSml29623 if (rs != NPI_SUCCESS) { 4566f45ec7bSml29623 return (NXGE_ERROR | rs); 4576f45ec7bSml29623 } 4586f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4596f45ec7bSml29623 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 4606f45ec7bSml29623 return (NXGE_OK); 4616f45ec7bSml29623 } 4626f45ec7bSml29623 4636f45ec7bSml29623 nxge_status_t 4646f45ec7bSml29623 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 4656f45ec7bSml29623 { 4666f45ec7bSml29623 npi_handle_t handle; 4676f45ec7bSml29623 4686f45ec7bSml29623 uint8_t actual_rdc; 4696f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 4706f45ec7bSml29623 4716f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4726f45ec7bSml29623 " ==> nxge_rxdma_cfg_port_default_rdc")); 4736f45ec7bSml29623 4746f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 475678453a8Sspeer actual_rdc = rdc; /* XXX Hack! */ 4766f45ec7bSml29623 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 4776f45ec7bSml29623 4786f45ec7bSml29623 4796f45ec7bSml29623 if (rs != NPI_SUCCESS) { 4806f45ec7bSml29623 return (NXGE_ERROR | rs); 4816f45ec7bSml29623 } 4826f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4836f45ec7bSml29623 " <== nxge_rxdma_cfg_port_default_rdc")); 4846f45ec7bSml29623 4856f45ec7bSml29623 return (NXGE_OK); 4866f45ec7bSml29623 } 4876f45ec7bSml29623 4886f45ec7bSml29623 nxge_status_t 4896f45ec7bSml29623 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 4906f45ec7bSml29623 uint16_t pkts) 4916f45ec7bSml29623 { 4926f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 4936f45ec7bSml29623 npi_handle_t handle; 4946f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4956f45ec7bSml29623 " ==> nxge_rxdma_cfg_rcr_threshold")); 4966f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4976f45ec7bSml29623 4986f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 4996f45ec7bSml29623 5006f45ec7bSml29623 if (rs != NPI_SUCCESS) { 5016f45ec7bSml29623 return (NXGE_ERROR | rs); 5026f45ec7bSml29623 } 5036f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 5046f45ec7bSml29623 return (NXGE_OK); 5056f45ec7bSml29623 } 5066f45ec7bSml29623 5076f45ec7bSml29623 nxge_status_t 5086f45ec7bSml29623 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 5096f45ec7bSml29623 uint16_t tout, uint8_t enable) 5106f45ec7bSml29623 { 5116f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 5126f45ec7bSml29623 npi_handle_t handle; 5136f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 5146f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5156f45ec7bSml29623 if (enable == 0) { 5166f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 5176f45ec7bSml29623 } else { 5186f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 5196f45ec7bSml29623 tout); 5206f45ec7bSml29623 } 5216f45ec7bSml29623 5226f45ec7bSml29623 if (rs != NPI_SUCCESS) { 5236f45ec7bSml29623 return (NXGE_ERROR | rs); 5246f45ec7bSml29623 } 5256f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 5266f45ec7bSml29623 return (NXGE_OK); 5276f45ec7bSml29623 } 5286f45ec7bSml29623 5296f45ec7bSml29623 nxge_status_t 5306f45ec7bSml29623 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 5316f45ec7bSml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 5326f45ec7bSml29623 { 5336f45ec7bSml29623 npi_handle_t handle; 5346f45ec7bSml29623 rdc_desc_cfg_t rdc_desc; 5356f45ec7bSml29623 p_rcrcfig_b_t cfgb_p; 5366f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 5376f45ec7bSml29623 5386f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 5396f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5406f45ec7bSml29623 /* 5416f45ec7bSml29623 * Use configuration data composed at init time. 5426f45ec7bSml29623 * Write to hardware the receive ring configurations. 5436f45ec7bSml29623 */ 5446f45ec7bSml29623 rdc_desc.mbox_enable = 1; 5456f45ec7bSml29623 rdc_desc.mbox_addr = mbox_p->mbox_addr; 5466f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5476f45ec7bSml29623 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 5486f45ec7bSml29623 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 5496f45ec7bSml29623 5506f45ec7bSml29623 rdc_desc.rbr_len = rbr_p->rbb_max; 5516f45ec7bSml29623 rdc_desc.rbr_addr = rbr_p->rbr_addr; 5526f45ec7bSml29623 5536f45ec7bSml29623 switch (nxgep->rx_bksize_code) { 5546f45ec7bSml29623 case RBR_BKSIZE_4K: 5556f45ec7bSml29623 rdc_desc.page_size = SIZE_4KB; 5566f45ec7bSml29623 break; 5576f45ec7bSml29623 case RBR_BKSIZE_8K: 5586f45ec7bSml29623 rdc_desc.page_size = SIZE_8KB; 5596f45ec7bSml29623 break; 5606f45ec7bSml29623 case RBR_BKSIZE_16K: 5616f45ec7bSml29623 rdc_desc.page_size = SIZE_16KB; 5626f45ec7bSml29623 break; 5636f45ec7bSml29623 case RBR_BKSIZE_32K: 5646f45ec7bSml29623 rdc_desc.page_size = SIZE_32KB; 5656f45ec7bSml29623 break; 5666f45ec7bSml29623 } 5676f45ec7bSml29623 5686f45ec7bSml29623 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 5696f45ec7bSml29623 rdc_desc.valid0 = 1; 5706f45ec7bSml29623 5716f45ec7bSml29623 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 5726f45ec7bSml29623 rdc_desc.valid1 = 1; 5736f45ec7bSml29623 5746f45ec7bSml29623 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 5756f45ec7bSml29623 rdc_desc.valid2 = 1; 5766f45ec7bSml29623 5776f45ec7bSml29623 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 5786f45ec7bSml29623 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 5796f45ec7bSml29623 5806f45ec7bSml29623 rdc_desc.rcr_len = rcr_p->comp_size; 5816f45ec7bSml29623 rdc_desc.rcr_addr = rcr_p->rcr_addr; 5826f45ec7bSml29623 5836f45ec7bSml29623 cfgb_p = &(rcr_p->rcr_cfgb); 5846f45ec7bSml29623 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 585678453a8Sspeer /* For now, disable this timeout in a guest domain. */ 586678453a8Sspeer if (isLDOMguest(nxgep)) { 587678453a8Sspeer rdc_desc.rcr_timeout = 0; 588678453a8Sspeer rdc_desc.rcr_timeout_enable = 0; 589678453a8Sspeer } else { 5906f45ec7bSml29623 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 5916f45ec7bSml29623 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 592678453a8Sspeer } 5936f45ec7bSml29623 5946f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 5956f45ec7bSml29623 "rbr_len qlen %d pagesize code %d rcr_len %d", 5966f45ec7bSml29623 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 5976f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 5986f45ec7bSml29623 "size 0 %d size 1 %d size 2 %d", 5996f45ec7bSml29623 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 6006f45ec7bSml29623 rbr_p->npi_pkt_buf_size2)); 6016f45ec7bSml29623 6024df55fdeSJanie Lu if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) 6034df55fdeSJanie Lu rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, 6044df55fdeSJanie Lu &rdc_desc, B_TRUE); 6054df55fdeSJanie Lu else 6064df55fdeSJanie Lu rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, 6074df55fdeSJanie Lu &rdc_desc, B_FALSE); 6086f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6096f45ec7bSml29623 return (NXGE_ERROR | rs); 6106f45ec7bSml29623 } 6116f45ec7bSml29623 6126f45ec7bSml29623 /* 6136f45ec7bSml29623 * Enable the timeout and threshold. 6146f45ec7bSml29623 */ 6156f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 6166f45ec7bSml29623 rdc_desc.rcr_threshold); 6176f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6186f45ec7bSml29623 return (NXGE_ERROR | rs); 6196f45ec7bSml29623 } 6206f45ec7bSml29623 6216f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 6226f45ec7bSml29623 rdc_desc.rcr_timeout); 6236f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6246f45ec7bSml29623 return (NXGE_ERROR | rs); 6256f45ec7bSml29623 } 6266f45ec7bSml29623 627e759c33aSMichael Speer if (!isLDOMguest(nxgep)) { 6286f45ec7bSml29623 /* Enable the DMA */ 6296f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 6306f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6316f45ec7bSml29623 return (NXGE_ERROR | rs); 6326f45ec7bSml29623 } 633e759c33aSMichael Speer } 6346f45ec7bSml29623 6356f45ec7bSml29623 /* Kick the DMA engine. */ 6366f45ec7bSml29623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 637e759c33aSMichael Speer 638e759c33aSMichael Speer if (!isLDOMguest(nxgep)) { 6396f45ec7bSml29623 /* Clear the rbr empty bit */ 6406f45ec7bSml29623 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 641e759c33aSMichael Speer } 6426f45ec7bSml29623 6436f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 6446f45ec7bSml29623 6456f45ec7bSml29623 return (NXGE_OK); 6466f45ec7bSml29623 } 6476f45ec7bSml29623 6486f45ec7bSml29623 nxge_status_t 6496f45ec7bSml29623 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 6506f45ec7bSml29623 { 6516f45ec7bSml29623 npi_handle_t handle; 6526f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 6536f45ec7bSml29623 6546f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 6556f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6566f45ec7bSml29623 6576f45ec7bSml29623 /* disable the DMA */ 6586f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 6596f45ec7bSml29623 if (rs != NPI_SUCCESS) { 6606f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 6616f45ec7bSml29623 "<== nxge_disable_rxdma_channel:failed (0x%x)", 6626f45ec7bSml29623 rs)); 6636f45ec7bSml29623 return (NXGE_ERROR | rs); 6646f45ec7bSml29623 } 6656f45ec7bSml29623 6666f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 6676f45ec7bSml29623 return (NXGE_OK); 6686f45ec7bSml29623 } 6696f45ec7bSml29623 6706f45ec7bSml29623 nxge_status_t 6716f45ec7bSml29623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 6726f45ec7bSml29623 { 6736f45ec7bSml29623 npi_handle_t handle; 6746f45ec7bSml29623 nxge_status_t status = NXGE_OK; 6756f45ec7bSml29623 6766f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6776f45ec7bSml29623 "<== nxge_init_rxdma_channel_rcrflush")); 6786f45ec7bSml29623 6796f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6806f45ec7bSml29623 npi_rxdma_rdc_rcr_flush(handle, channel); 6816f45ec7bSml29623 6826f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6836f45ec7bSml29623 "<== nxge_init_rxdma_channel_rcrflsh")); 6846f45ec7bSml29623 return (status); 6856f45ec7bSml29623 6866f45ec7bSml29623 } 6876f45ec7bSml29623 6886f45ec7bSml29623 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 6896f45ec7bSml29623 6906f45ec7bSml29623 #define TO_LEFT -1 6916f45ec7bSml29623 #define TO_RIGHT 1 6926f45ec7bSml29623 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 6936f45ec7bSml29623 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 6946f45ec7bSml29623 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 6956f45ec7bSml29623 #define NO_HINT 0xffffffff 6966f45ec7bSml29623 6976f45ec7bSml29623 /*ARGSUSED*/ 6986f45ec7bSml29623 nxge_status_t 6996f45ec7bSml29623 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 7006f45ec7bSml29623 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 7016f45ec7bSml29623 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 7026f45ec7bSml29623 { 7036f45ec7bSml29623 int bufsize; 7046f45ec7bSml29623 uint64_t pktbuf_pp; 7056f45ec7bSml29623 uint64_t dvma_addr; 7066f45ec7bSml29623 rxring_info_t *ring_info; 7076f45ec7bSml29623 int base_side, end_side; 7086f45ec7bSml29623 int r_index, l_index, anchor_index; 7096f45ec7bSml29623 int found, search_done; 7106f45ec7bSml29623 uint32_t offset, chunk_size, block_size, page_size_mask; 7116f45ec7bSml29623 uint32_t chunk_index, block_index, total_index; 7126f45ec7bSml29623 int max_iterations, iteration; 7136f45ec7bSml29623 rxbuf_index_info_t *bufinfo; 7146f45ec7bSml29623 7156f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 7166f45ec7bSml29623 7176f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7186f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 7196f45ec7bSml29623 pkt_buf_addr_pp, 7206f45ec7bSml29623 pktbufsz_type)); 721adfcba55Sjoycey #if defined(__i386) 722adfcba55Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 723adfcba55Sjoycey #else 7246f45ec7bSml29623 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 725adfcba55Sjoycey #endif 7266f45ec7bSml29623 7276f45ec7bSml29623 switch (pktbufsz_type) { 7286f45ec7bSml29623 case 0: 7296f45ec7bSml29623 bufsize = rbr_p->pkt_buf_size0; 7306f45ec7bSml29623 break; 7316f45ec7bSml29623 case 1: 7326f45ec7bSml29623 bufsize = rbr_p->pkt_buf_size1; 7336f45ec7bSml29623 break; 7346f45ec7bSml29623 case 2: 7356f45ec7bSml29623 bufsize = rbr_p->pkt_buf_size2; 7366f45ec7bSml29623 break; 7376f45ec7bSml29623 case RCR_SINGLE_BLOCK: 7386f45ec7bSml29623 bufsize = 0; 7396f45ec7bSml29623 anchor_index = 0; 7406f45ec7bSml29623 break; 7416f45ec7bSml29623 default: 7426f45ec7bSml29623 return (NXGE_ERROR); 7436f45ec7bSml29623 } 7446f45ec7bSml29623 7456f45ec7bSml29623 if (rbr_p->num_blocks == 1) { 7466f45ec7bSml29623 anchor_index = 0; 7476f45ec7bSml29623 ring_info = rbr_p->ring_info; 7486f45ec7bSml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7496f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7506f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 7516f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d " 7526f45ec7bSml29623 "bufinfo $%p", 7536f45ec7bSml29623 pkt_buf_addr_pp, 7546f45ec7bSml29623 pktbufsz_type, 7556f45ec7bSml29623 anchor_index, 7566f45ec7bSml29623 bufinfo)); 7576f45ec7bSml29623 7586f45ec7bSml29623 goto found_index; 7596f45ec7bSml29623 } 7606f45ec7bSml29623 7616f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7626f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: " 7636f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d", 7646f45ec7bSml29623 pkt_buf_addr_pp, 7656f45ec7bSml29623 pktbufsz_type, 7666f45ec7bSml29623 anchor_index)); 7676f45ec7bSml29623 7686f45ec7bSml29623 ring_info = rbr_p->ring_info; 7696f45ec7bSml29623 found = B_FALSE; 7706f45ec7bSml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7716f45ec7bSml29623 iteration = 0; 7726f45ec7bSml29623 max_iterations = ring_info->max_iterations; 7736f45ec7bSml29623 /* 7746f45ec7bSml29623 * First check if this block has been seen 7756f45ec7bSml29623 * recently. This is indicated by a hint which 7766f45ec7bSml29623 * is initialized when the first buffer of the block 7776f45ec7bSml29623 * is seen. The hint is reset when the last buffer of 7786f45ec7bSml29623 * the block has been processed. 7796f45ec7bSml29623 * As three block sizes are supported, three hints 7806f45ec7bSml29623 * are kept. The idea behind the hints is that once 7816f45ec7bSml29623 * the hardware uses a block for a buffer of that 7826f45ec7bSml29623 * size, it will use it exclusively for that size 7836f45ec7bSml29623 * and will use it until it is exhausted. It is assumed 7846f45ec7bSml29623 * that there would a single block being used for the same 7856f45ec7bSml29623 * buffer sizes at any given time. 7866f45ec7bSml29623 */ 7876f45ec7bSml29623 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 7886f45ec7bSml29623 anchor_index = ring_info->hint[pktbufsz_type]; 7896f45ec7bSml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 7906f45ec7bSml29623 chunk_size = bufinfo[anchor_index].buf_size; 7916f45ec7bSml29623 if ((pktbuf_pp >= dvma_addr) && 7926f45ec7bSml29623 (pktbuf_pp < (dvma_addr + chunk_size))) { 7936f45ec7bSml29623 found = B_TRUE; 7946f45ec7bSml29623 /* 7956f45ec7bSml29623 * check if this is the last buffer in the block 7966f45ec7bSml29623 * If so, then reset the hint for the size; 7976f45ec7bSml29623 */ 7986f45ec7bSml29623 7996f45ec7bSml29623 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 8006f45ec7bSml29623 ring_info->hint[pktbufsz_type] = NO_HINT; 8016f45ec7bSml29623 } 8026f45ec7bSml29623 } 8036f45ec7bSml29623 8046f45ec7bSml29623 if (found == B_FALSE) { 8056f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8066f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (!found)" 8076f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d", 8086f45ec7bSml29623 pkt_buf_addr_pp, 8096f45ec7bSml29623 pktbufsz_type, 8106f45ec7bSml29623 anchor_index)); 8116f45ec7bSml29623 8126f45ec7bSml29623 /* 8136f45ec7bSml29623 * This is the first buffer of the block of this 8146f45ec7bSml29623 * size. Need to search the whole information 8156f45ec7bSml29623 * array. 8166f45ec7bSml29623 * the search algorithm uses a binary tree search 8176f45ec7bSml29623 * algorithm. It assumes that the information is 8186f45ec7bSml29623 * already sorted with increasing order 8196f45ec7bSml29623 * info[0] < info[1] < info[2] .... < info[n-1] 8206f45ec7bSml29623 * where n is the size of the information array 8216f45ec7bSml29623 */ 8226f45ec7bSml29623 r_index = rbr_p->num_blocks - 1; 8236f45ec7bSml29623 l_index = 0; 8246f45ec7bSml29623 search_done = B_FALSE; 8256f45ec7bSml29623 anchor_index = MID_INDEX(r_index, l_index); 8266f45ec7bSml29623 while (search_done == B_FALSE) { 8276f45ec7bSml29623 if ((r_index == l_index) || 8286f45ec7bSml29623 (iteration >= max_iterations)) 8296f45ec7bSml29623 search_done = B_TRUE; 8306f45ec7bSml29623 end_side = TO_RIGHT; /* to the right */ 8316f45ec7bSml29623 base_side = TO_LEFT; /* to the left */ 8326f45ec7bSml29623 /* read the DVMA address information and sort it */ 8336f45ec7bSml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 8346f45ec7bSml29623 chunk_size = bufinfo[anchor_index].buf_size; 8356f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8366f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (searching)" 8376f45ec7bSml29623 "buf_pp $%p btype %d " 8386f45ec7bSml29623 "anchor_index %d chunk_size %d dvmaaddr $%p", 8396f45ec7bSml29623 pkt_buf_addr_pp, 8406f45ec7bSml29623 pktbufsz_type, 8416f45ec7bSml29623 anchor_index, 8426f45ec7bSml29623 chunk_size, 8436f45ec7bSml29623 dvma_addr)); 8446f45ec7bSml29623 8456f45ec7bSml29623 if (pktbuf_pp >= dvma_addr) 8466f45ec7bSml29623 base_side = TO_RIGHT; /* to the right */ 8476f45ec7bSml29623 if (pktbuf_pp < (dvma_addr + chunk_size)) 8486f45ec7bSml29623 end_side = TO_LEFT; /* to the left */ 8496f45ec7bSml29623 8506f45ec7bSml29623 switch (base_side + end_side) { 8516f45ec7bSml29623 case IN_MIDDLE: 8526f45ec7bSml29623 /* found */ 8536f45ec7bSml29623 found = B_TRUE; 8546f45ec7bSml29623 search_done = B_TRUE; 8556f45ec7bSml29623 if ((pktbuf_pp + bufsize) < 8566f45ec7bSml29623 (dvma_addr + chunk_size)) 8576f45ec7bSml29623 ring_info->hint[pktbufsz_type] = 8586f45ec7bSml29623 bufinfo[anchor_index].buf_index; 8596f45ec7bSml29623 break; 8606f45ec7bSml29623 case BOTH_RIGHT: 8616f45ec7bSml29623 /* not found: go to the right */ 8626f45ec7bSml29623 l_index = anchor_index + 1; 86352ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 8646f45ec7bSml29623 break; 8656f45ec7bSml29623 8666f45ec7bSml29623 case BOTH_LEFT: 8676f45ec7bSml29623 /* not found: go to the left */ 8686f45ec7bSml29623 r_index = anchor_index - 1; 86952ccf843Smisaki anchor_index = MID_INDEX(r_index, l_index); 8706f45ec7bSml29623 break; 8716f45ec7bSml29623 default: /* should not come here */ 8726f45ec7bSml29623 return (NXGE_ERROR); 8736f45ec7bSml29623 } 8746f45ec7bSml29623 iteration++; 8756f45ec7bSml29623 } 8766f45ec7bSml29623 8776f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8786f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (search done)" 8796f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d", 8806f45ec7bSml29623 pkt_buf_addr_pp, 8816f45ec7bSml29623 pktbufsz_type, 8826f45ec7bSml29623 anchor_index)); 8836f45ec7bSml29623 } 8846f45ec7bSml29623 8856f45ec7bSml29623 if (found == B_FALSE) { 8866f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8876f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (search failed)" 8886f45ec7bSml29623 "buf_pp $%p btype %d anchor_index %d", 8896f45ec7bSml29623 pkt_buf_addr_pp, 8906f45ec7bSml29623 pktbufsz_type, 8916f45ec7bSml29623 anchor_index)); 8926f45ec7bSml29623 return (NXGE_ERROR); 8936f45ec7bSml29623 } 8946f45ec7bSml29623 8956f45ec7bSml29623 found_index: 8966f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8976f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 8986f45ec7bSml29623 "buf_pp $%p btype %d bufsize %d anchor_index %d", 8996f45ec7bSml29623 pkt_buf_addr_pp, 9006f45ec7bSml29623 pktbufsz_type, 9016f45ec7bSml29623 bufsize, 9026f45ec7bSml29623 anchor_index)); 9036f45ec7bSml29623 9046f45ec7bSml29623 /* index of the first block in this chunk */ 9056f45ec7bSml29623 chunk_index = bufinfo[anchor_index].start_index; 9066f45ec7bSml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 9076f45ec7bSml29623 page_size_mask = ring_info->block_size_mask; 9086f45ec7bSml29623 9096f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9106f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 9116f45ec7bSml29623 "buf_pp $%p btype %d bufsize %d " 9126f45ec7bSml29623 "anchor_index %d chunk_index %d dvma $%p", 9136f45ec7bSml29623 pkt_buf_addr_pp, 9146f45ec7bSml29623 pktbufsz_type, 9156f45ec7bSml29623 bufsize, 9166f45ec7bSml29623 anchor_index, 9176f45ec7bSml29623 chunk_index, 9186f45ec7bSml29623 dvma_addr)); 9196f45ec7bSml29623 9206f45ec7bSml29623 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 9216f45ec7bSml29623 block_size = rbr_p->block_size; /* System block(page) size */ 9226f45ec7bSml29623 9236f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9246f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 9256f45ec7bSml29623 "buf_pp $%p btype %d bufsize %d " 9266f45ec7bSml29623 "anchor_index %d chunk_index %d dvma $%p " 9276f45ec7bSml29623 "offset %d block_size %d", 9286f45ec7bSml29623 pkt_buf_addr_pp, 9296f45ec7bSml29623 pktbufsz_type, 9306f45ec7bSml29623 bufsize, 9316f45ec7bSml29623 anchor_index, 9326f45ec7bSml29623 chunk_index, 9336f45ec7bSml29623 dvma_addr, 9346f45ec7bSml29623 offset, 9356f45ec7bSml29623 block_size)); 9366f45ec7bSml29623 9376f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 9386f45ec7bSml29623 9396f45ec7bSml29623 block_index = (offset / block_size); /* index within chunk */ 9406f45ec7bSml29623 total_index = chunk_index + block_index; 9416f45ec7bSml29623 9426f45ec7bSml29623 9436f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9446f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: " 9456f45ec7bSml29623 "total_index %d dvma_addr $%p " 9466f45ec7bSml29623 "offset %d block_size %d " 9476f45ec7bSml29623 "block_index %d ", 9486f45ec7bSml29623 total_index, dvma_addr, 9496f45ec7bSml29623 offset, block_size, 9506f45ec7bSml29623 block_index)); 951adfcba55Sjoycey #if defined(__i386) 952adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 953adfcba55Sjoycey (uint32_t)offset); 954adfcba55Sjoycey #else 955adfcba55Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 956adfcba55Sjoycey (uint64_t)offset); 957adfcba55Sjoycey #endif 9586f45ec7bSml29623 9596f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9606f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: " 9616f45ec7bSml29623 "total_index %d dvma_addr $%p " 9626f45ec7bSml29623 "offset %d block_size %d " 9636f45ec7bSml29623 "block_index %d " 9646f45ec7bSml29623 "*pkt_buf_addr_p $%p", 9656f45ec7bSml29623 total_index, dvma_addr, 9666f45ec7bSml29623 offset, block_size, 9676f45ec7bSml29623 block_index, 9686f45ec7bSml29623 *pkt_buf_addr_p)); 9696f45ec7bSml29623 9706f45ec7bSml29623 9716f45ec7bSml29623 *msg_index = total_index; 9726f45ec7bSml29623 *bufoffset = (offset & page_size_mask); 9736f45ec7bSml29623 9746f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9756f45ec7bSml29623 "==> nxge_rxbuf_pp_to_vp: get msg index: " 9766f45ec7bSml29623 "msg_index %d bufoffset_index %d", 9776f45ec7bSml29623 *msg_index, 9786f45ec7bSml29623 *bufoffset)); 9796f45ec7bSml29623 9806f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 9816f45ec7bSml29623 9826f45ec7bSml29623 return (NXGE_OK); 9836f45ec7bSml29623 } 9846f45ec7bSml29623 9856f45ec7bSml29623 /* 9866f45ec7bSml29623 * used by quick sort (qsort) function 9876f45ec7bSml29623 * to perform comparison 9886f45ec7bSml29623 */ 9896f45ec7bSml29623 static int 9906f45ec7bSml29623 nxge_sort_compare(const void *p1, const void *p2) 9916f45ec7bSml29623 { 9926f45ec7bSml29623 9936f45ec7bSml29623 rxbuf_index_info_t *a, *b; 9946f45ec7bSml29623 9956f45ec7bSml29623 a = (rxbuf_index_info_t *)p1; 9966f45ec7bSml29623 b = (rxbuf_index_info_t *)p2; 9976f45ec7bSml29623 9986f45ec7bSml29623 if (a->dvma_addr > b->dvma_addr) 9996f45ec7bSml29623 return (1); 10006f45ec7bSml29623 if (a->dvma_addr < b->dvma_addr) 10016f45ec7bSml29623 return (-1); 10026f45ec7bSml29623 return (0); 10036f45ec7bSml29623 } 10046f45ec7bSml29623 10056f45ec7bSml29623 10066f45ec7bSml29623 10076f45ec7bSml29623 /* 10086f45ec7bSml29623 * grabbed this sort implementation from common/syscall/avl.c 10096f45ec7bSml29623 * 10106f45ec7bSml29623 */ 10116f45ec7bSml29623 /* 10126f45ec7bSml29623 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 10136f45ec7bSml29623 * v = Ptr to array/vector of objs 10146f45ec7bSml29623 * n = # objs in the array 10156f45ec7bSml29623 * s = size of each obj (must be multiples of a word size) 10166f45ec7bSml29623 * f = ptr to function to compare two objs 10176f45ec7bSml29623 * returns (-1 = less than, 0 = equal, 1 = greater than 10186f45ec7bSml29623 */ 10196f45ec7bSml29623 void 10206f45ec7bSml29623 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 10216f45ec7bSml29623 { 10226f45ec7bSml29623 int g, i, j, ii; 10236f45ec7bSml29623 unsigned int *p1, *p2; 10246f45ec7bSml29623 unsigned int tmp; 10256f45ec7bSml29623 10266f45ec7bSml29623 /* No work to do */ 10276f45ec7bSml29623 if (v == NULL || n <= 1) 10286f45ec7bSml29623 return; 10296f45ec7bSml29623 /* Sanity check on arguments */ 10306f45ec7bSml29623 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 10316f45ec7bSml29623 ASSERT(s > 0); 10326f45ec7bSml29623 10336f45ec7bSml29623 for (g = n / 2; g > 0; g /= 2) { 10346f45ec7bSml29623 for (i = g; i < n; i++) { 10356f45ec7bSml29623 for (j = i - g; j >= 0 && 10366f45ec7bSml29623 (*f)(v + j * s, v + (j + g) * s) == 1; 10376f45ec7bSml29623 j -= g) { 10386f45ec7bSml29623 p1 = (unsigned *)(v + j * s); 10396f45ec7bSml29623 p2 = (unsigned *)(v + (j + g) * s); 10406f45ec7bSml29623 for (ii = 0; ii < s / 4; ii++) { 10416f45ec7bSml29623 tmp = *p1; 10426f45ec7bSml29623 *p1++ = *p2; 10436f45ec7bSml29623 *p2++ = tmp; 10446f45ec7bSml29623 } 10456f45ec7bSml29623 } 10466f45ec7bSml29623 } 10476f45ec7bSml29623 } 10486f45ec7bSml29623 } 10496f45ec7bSml29623 10506f45ec7bSml29623 /* 10516f45ec7bSml29623 * Initialize data structures required for rxdma 10526f45ec7bSml29623 * buffer dvma->vmem address lookup 10536f45ec7bSml29623 */ 10546f45ec7bSml29623 /*ARGSUSED*/ 10556f45ec7bSml29623 static nxge_status_t 10566f45ec7bSml29623 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 10576f45ec7bSml29623 { 10586f45ec7bSml29623 10596f45ec7bSml29623 int index; 10606f45ec7bSml29623 rxring_info_t *ring_info; 10616f45ec7bSml29623 int max_iteration = 0, max_index = 0; 10626f45ec7bSml29623 10636f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 10646f45ec7bSml29623 10656f45ec7bSml29623 ring_info = rbrp->ring_info; 10666f45ec7bSml29623 ring_info->hint[0] = NO_HINT; 10676f45ec7bSml29623 ring_info->hint[1] = NO_HINT; 10686f45ec7bSml29623 ring_info->hint[2] = NO_HINT; 10696f45ec7bSml29623 max_index = rbrp->num_blocks; 10706f45ec7bSml29623 10716f45ec7bSml29623 /* read the DVMA address information and sort it */ 10726f45ec7bSml29623 /* do init of the information array */ 10736f45ec7bSml29623 10746f45ec7bSml29623 10756f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10766f45ec7bSml29623 " nxge_rxbuf_index_info_init Sort ptrs")); 10776f45ec7bSml29623 10786f45ec7bSml29623 /* sort the array */ 10796f45ec7bSml29623 nxge_ksort((void *)ring_info->buffer, max_index, 10806f45ec7bSml29623 sizeof (rxbuf_index_info_t), nxge_sort_compare); 10816f45ec7bSml29623 10826f45ec7bSml29623 10836f45ec7bSml29623 10846f45ec7bSml29623 for (index = 0; index < max_index; index++) { 10856f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10866f45ec7bSml29623 " nxge_rxbuf_index_info_init: sorted chunk %d " 10876f45ec7bSml29623 " ioaddr $%p kaddr $%p size %x", 10886f45ec7bSml29623 index, ring_info->buffer[index].dvma_addr, 10896f45ec7bSml29623 ring_info->buffer[index].kaddr, 10906f45ec7bSml29623 ring_info->buffer[index].buf_size)); 10916f45ec7bSml29623 } 10926f45ec7bSml29623 10936f45ec7bSml29623 max_iteration = 0; 10946f45ec7bSml29623 while (max_index >= (1ULL << max_iteration)) 10956f45ec7bSml29623 max_iteration++; 10966f45ec7bSml29623 ring_info->max_iterations = max_iteration + 1; 10976f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10986f45ec7bSml29623 " nxge_rxbuf_index_info_init Find max iter %d", 10996f45ec7bSml29623 ring_info->max_iterations)); 11006f45ec7bSml29623 11016f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 11026f45ec7bSml29623 return (NXGE_OK); 11036f45ec7bSml29623 } 11046f45ec7bSml29623 11056f45ec7bSml29623 /* ARGSUSED */ 11066f45ec7bSml29623 void 11076f45ec7bSml29623 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 11086f45ec7bSml29623 { 11096f45ec7bSml29623 #ifdef NXGE_DEBUG 11106f45ec7bSml29623 11116f45ec7bSml29623 uint32_t bptr; 11126f45ec7bSml29623 uint64_t pp; 11136f45ec7bSml29623 11146f45ec7bSml29623 bptr = entry_p->bits.hdw.pkt_buf_addr; 11156f45ec7bSml29623 11166f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11176f45ec7bSml29623 "\trcr entry $%p " 11186f45ec7bSml29623 "\trcr entry 0x%0llx " 11196f45ec7bSml29623 "\trcr entry 0x%08x " 11206f45ec7bSml29623 "\trcr entry 0x%08x " 11216f45ec7bSml29623 "\tvalue 0x%0llx\n" 11226f45ec7bSml29623 "\tmulti = %d\n" 11236f45ec7bSml29623 "\tpkt_type = 0x%x\n" 11246f45ec7bSml29623 "\tzero_copy = %d\n" 11256f45ec7bSml29623 "\tnoport = %d\n" 11266f45ec7bSml29623 "\tpromis = %d\n" 11276f45ec7bSml29623 "\terror = 0x%04x\n" 11286f45ec7bSml29623 "\tdcf_err = 0x%01x\n" 11296f45ec7bSml29623 "\tl2_len = %d\n" 11306f45ec7bSml29623 "\tpktbufsize = %d\n" 11316f45ec7bSml29623 "\tpkt_buf_addr = $%p\n" 11326f45ec7bSml29623 "\tpkt_buf_addr (<< 6) = $%p\n", 11336f45ec7bSml29623 entry_p, 11346f45ec7bSml29623 *(int64_t *)entry_p, 11356f45ec7bSml29623 *(int32_t *)entry_p, 11366f45ec7bSml29623 *(int32_t *)((char *)entry_p + 32), 11376f45ec7bSml29623 entry_p->value, 11386f45ec7bSml29623 entry_p->bits.hdw.multi, 11396f45ec7bSml29623 entry_p->bits.hdw.pkt_type, 11406f45ec7bSml29623 entry_p->bits.hdw.zero_copy, 11416f45ec7bSml29623 entry_p->bits.hdw.noport, 11426f45ec7bSml29623 entry_p->bits.hdw.promis, 11436f45ec7bSml29623 entry_p->bits.hdw.error, 11446f45ec7bSml29623 entry_p->bits.hdw.dcf_err, 11456f45ec7bSml29623 entry_p->bits.hdw.l2_len, 11466f45ec7bSml29623 entry_p->bits.hdw.pktbufsz, 11476f45ec7bSml29623 bptr, 11486f45ec7bSml29623 entry_p->bits.ldw.pkt_buf_addr)); 11496f45ec7bSml29623 11506f45ec7bSml29623 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 11516f45ec7bSml29623 RCR_PKT_BUF_ADDR_SHIFT; 11526f45ec7bSml29623 11536f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 11546f45ec7bSml29623 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 11556f45ec7bSml29623 #endif 11566f45ec7bSml29623 } 11576f45ec7bSml29623 11586f45ec7bSml29623 void 11596f45ec7bSml29623 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 11606f45ec7bSml29623 { 11616f45ec7bSml29623 npi_handle_t handle; 11626f45ec7bSml29623 rbr_stat_t rbr_stat; 11636f45ec7bSml29623 addr44_t hd_addr; 11646f45ec7bSml29623 addr44_t tail_addr; 11656f45ec7bSml29623 uint16_t qlen; 11666f45ec7bSml29623 11676f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11686f45ec7bSml29623 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 11696f45ec7bSml29623 11706f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11716f45ec7bSml29623 11726f45ec7bSml29623 /* RBR head */ 11736f45ec7bSml29623 hd_addr.addr = 0; 11746f45ec7bSml29623 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1175adfcba55Sjoycey #if defined(__i386) 117653f3d8ecSyc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 1177adfcba55Sjoycey (void *)(uint32_t)hd_addr.addr); 1178adfcba55Sjoycey #else 117953f3d8ecSyc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11806f45ec7bSml29623 (void *)hd_addr.addr); 1181adfcba55Sjoycey #endif 11826f45ec7bSml29623 11836f45ec7bSml29623 /* RBR stats */ 11846f45ec7bSml29623 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 11856f45ec7bSml29623 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 11866f45ec7bSml29623 11876f45ec7bSml29623 /* RCR tail */ 11886f45ec7bSml29623 tail_addr.addr = 0; 11896f45ec7bSml29623 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1190adfcba55Sjoycey #if defined(__i386) 119153f3d8ecSyc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 1192adfcba55Sjoycey (void *)(uint32_t)tail_addr.addr); 1193adfcba55Sjoycey #else 119453f3d8ecSyc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11956f45ec7bSml29623 (void *)tail_addr.addr); 1196adfcba55Sjoycey #endif 11976f45ec7bSml29623 11986f45ec7bSml29623 /* RCR qlen */ 11996f45ec7bSml29623 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 12006f45ec7bSml29623 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 12016f45ec7bSml29623 12026f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12036f45ec7bSml29623 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 12046f45ec7bSml29623 } 12056f45ec7bSml29623 12066f45ec7bSml29623 nxge_status_t 12076f45ec7bSml29623 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 12086f45ec7bSml29623 { 1209678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1210678453a8Sspeer nxge_status_t status; 1211678453a8Sspeer npi_status_t rs; 1212678453a8Sspeer int rdc; 12136f45ec7bSml29623 12146f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12156f45ec7bSml29623 "==> nxge_rxdma_hw_mode: mode %d", enable)); 12166f45ec7bSml29623 12176f45ec7bSml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 12186f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12196f45ec7bSml29623 "<== nxge_rxdma_mode: not initialized")); 12206f45ec7bSml29623 return (NXGE_ERROR); 12216f45ec7bSml29623 } 12226f45ec7bSml29623 1223678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1224678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1225678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1226678453a8Sspeer "NULL ring pointer(s)")); 12276f45ec7bSml29623 return (NXGE_ERROR); 12286f45ec7bSml29623 } 12296f45ec7bSml29623 1230678453a8Sspeer if (set->owned.map == 0) { 12316f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1232678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 1233678453a8Sspeer return (NULL); 12346f45ec7bSml29623 } 12356f45ec7bSml29623 1236678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1237678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1238678453a8Sspeer rx_rbr_ring_t *ring = 1239678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1240678453a8Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 1241678453a8Sspeer if (ring) { 12426f45ec7bSml29623 if (enable) { 12436f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1244678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1245678453a8Sspeer "channel %d (enable)", rdc)); 1246678453a8Sspeer rs = npi_rxdma_cfg_rdc_enable 1247678453a8Sspeer (handle, rdc); 12486f45ec7bSml29623 } else { 12496f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1250678453a8Sspeer "==> nxge_rxdma_hw_mode: " 1251678453a8Sspeer "channel %d disable)", rdc)); 1252678453a8Sspeer rs = npi_rxdma_cfg_rdc_disable 1253678453a8Sspeer (handle, rdc); 1254678453a8Sspeer } 1255678453a8Sspeer } 12566f45ec7bSml29623 } 12576f45ec7bSml29623 } 12586f45ec7bSml29623 12596f45ec7bSml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 12606f45ec7bSml29623 12616f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12626f45ec7bSml29623 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 12636f45ec7bSml29623 12646f45ec7bSml29623 return (status); 12656f45ec7bSml29623 } 12666f45ec7bSml29623 12676f45ec7bSml29623 void 12686f45ec7bSml29623 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 12696f45ec7bSml29623 { 12706f45ec7bSml29623 npi_handle_t handle; 12716f45ec7bSml29623 12726f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12736f45ec7bSml29623 "==> nxge_rxdma_enable_channel: channel %d", channel)); 12746f45ec7bSml29623 12756f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12766f45ec7bSml29623 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 12776f45ec7bSml29623 12786f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 12796f45ec7bSml29623 } 12806f45ec7bSml29623 12816f45ec7bSml29623 void 12826f45ec7bSml29623 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 12836f45ec7bSml29623 { 12846f45ec7bSml29623 npi_handle_t handle; 12856f45ec7bSml29623 12866f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12876f45ec7bSml29623 "==> nxge_rxdma_disable_channel: channel %d", channel)); 12886f45ec7bSml29623 12896f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12906f45ec7bSml29623 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 12916f45ec7bSml29623 12926f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 12936f45ec7bSml29623 } 12946f45ec7bSml29623 12956f45ec7bSml29623 void 12966f45ec7bSml29623 nxge_hw_start_rx(p_nxge_t nxgep) 12976f45ec7bSml29623 { 12986f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 12996f45ec7bSml29623 13006f45ec7bSml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 13016f45ec7bSml29623 (void) nxge_rx_mac_enable(nxgep); 13026f45ec7bSml29623 13036f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 13046f45ec7bSml29623 } 13056f45ec7bSml29623 13066f45ec7bSml29623 /*ARGSUSED*/ 13076f45ec7bSml29623 void 13086f45ec7bSml29623 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 13096f45ec7bSml29623 { 1310678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1311678453a8Sspeer int rdc; 13126f45ec7bSml29623 13136f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 13146f45ec7bSml29623 1315678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1316678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1317678453a8Sspeer "<== nxge_tx_port_fatal_err_recover: " 1318678453a8Sspeer "NULL ring pointer(s)")); 13196f45ec7bSml29623 return; 13206f45ec7bSml29623 } 13216f45ec7bSml29623 1322678453a8Sspeer if (set->owned.map == 0) { 13236f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1324678453a8Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 13256f45ec7bSml29623 return; 13266f45ec7bSml29623 } 13276f45ec7bSml29623 1328678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1329678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1330678453a8Sspeer rx_rbr_ring_t *ring = 1331678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1332678453a8Sspeer if (ring) { 1333678453a8Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 13346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1335678453a8Sspeer "==> nxge_fixup_rxdma_rings: " 1336678453a8Sspeer "channel %d ring $%px", 1337678453a8Sspeer rdc, ring)); 13383587e8e2SMichael Speer (void) nxge_rxdma_fix_channel(nxgep, rdc); 1339678453a8Sspeer } 1340678453a8Sspeer } 13416f45ec7bSml29623 } 13426f45ec7bSml29623 13436f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 13446f45ec7bSml29623 } 13456f45ec7bSml29623 13466f45ec7bSml29623 void 13476f45ec7bSml29623 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 13486f45ec7bSml29623 { 13496f45ec7bSml29623 int ndmas; 13506f45ec7bSml29623 p_rx_rbr_rings_t rx_rbr_rings; 13516f45ec7bSml29623 p_rx_rbr_ring_t *rbr_rings; 13526f45ec7bSml29623 p_rx_rcr_rings_t rx_rcr_rings; 13536f45ec7bSml29623 p_rx_rcr_ring_t *rcr_rings; 13546f45ec7bSml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 13556f45ec7bSml29623 p_rx_mbox_t *rx_mbox_p; 13566f45ec7bSml29623 p_nxge_dma_pool_t dma_buf_poolp; 13576f45ec7bSml29623 p_nxge_dma_pool_t dma_cntl_poolp; 13586f45ec7bSml29623 p_rx_rbr_ring_t rbrp; 13596f45ec7bSml29623 p_rx_rcr_ring_t rcrp; 13606f45ec7bSml29623 p_rx_mbox_t mboxp; 13616f45ec7bSml29623 p_nxge_dma_common_t dmap; 13626f45ec7bSml29623 nxge_status_t status = NXGE_OK; 13636f45ec7bSml29623 13643587e8e2SMichael Speer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 13656f45ec7bSml29623 13666f45ec7bSml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 13676f45ec7bSml29623 13686f45ec7bSml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 13696f45ec7bSml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 13706f45ec7bSml29623 13716f45ec7bSml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 13726f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13733587e8e2SMichael Speer "<== nxge_rxdma_fix_channel: buf not allocated")); 13746f45ec7bSml29623 return; 13756f45ec7bSml29623 } 13766f45ec7bSml29623 13776f45ec7bSml29623 ndmas = dma_buf_poolp->ndmas; 13786f45ec7bSml29623 if (!ndmas) { 13796f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13803587e8e2SMichael Speer "<== nxge_rxdma_fix_channel: no dma allocated")); 13816f45ec7bSml29623 return; 13826f45ec7bSml29623 } 13836f45ec7bSml29623 13846f45ec7bSml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 13856f45ec7bSml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 13866f45ec7bSml29623 rbr_rings = rx_rbr_rings->rbr_rings; 13876f45ec7bSml29623 rcr_rings = rx_rcr_rings->rcr_rings; 13886f45ec7bSml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 13896f45ec7bSml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 13906f45ec7bSml29623 13916f45ec7bSml29623 /* Reinitialize the receive block and completion rings */ 13923587e8e2SMichael Speer rbrp = (p_rx_rbr_ring_t)rbr_rings[channel], 13933587e8e2SMichael Speer rcrp = (p_rx_rcr_ring_t)rcr_rings[channel], 13943587e8e2SMichael Speer mboxp = (p_rx_mbox_t)rx_mbox_p[channel]; 13956f45ec7bSml29623 13966f45ec7bSml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 13976f45ec7bSml29623 rbrp->rbr_rd_index = 0; 13986f45ec7bSml29623 rcrp->comp_rd_index = 0; 13996f45ec7bSml29623 rcrp->comp_wt_index = 0; 14006f45ec7bSml29623 14016f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 14026f45ec7bSml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 14036f45ec7bSml29623 14046f45ec7bSml29623 status = nxge_rxdma_start_channel(nxgep, channel, 14056f45ec7bSml29623 rbrp, rcrp, mboxp); 14066f45ec7bSml29623 if (status != NXGE_OK) { 14073587e8e2SMichael Speer goto nxge_rxdma_fix_channel_fail; 1408da14cebeSEric Cheng } 1409da14cebeSEric Cheng 1410da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14113587e8e2SMichael Speer "<== nxge_rxdma_fix_channel: success (0x%08x)", status)); 14123587e8e2SMichael Speer return; 1413da14cebeSEric Cheng 14143587e8e2SMichael Speer nxge_rxdma_fix_channel_fail: 1415da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14163587e8e2SMichael Speer "<== nxge_rxdma_fix_channel: failed (0x%08x)", status)); 14176f45ec7bSml29623 } 14186f45ec7bSml29623 14196f45ec7bSml29623 p_rx_rbr_ring_t 14206f45ec7bSml29623 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 14216f45ec7bSml29623 { 1422678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1423678453a8Sspeer nxge_channel_t rdc; 14246f45ec7bSml29623 14256f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14266f45ec7bSml29623 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 14276f45ec7bSml29623 1428678453a8Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 1429678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1430678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: " 1431678453a8Sspeer "NULL ring pointer(s)")); 14326f45ec7bSml29623 return (NULL); 14336f45ec7bSml29623 } 14346f45ec7bSml29623 1435678453a8Sspeer if (set->owned.map == 0) { 14366f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1437678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 1438678453a8Sspeer return (NULL); 1439678453a8Sspeer } 14406f45ec7bSml29623 1441678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1442678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1443678453a8Sspeer rx_rbr_ring_t *ring = 1444678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 1445678453a8Sspeer if (ring) { 1446678453a8Sspeer if (channel == ring->rdc) { 14476f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1448678453a8Sspeer "==> nxge_rxdma_get_rbr_ring: " 1449678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1450678453a8Sspeer return (ring); 1451678453a8Sspeer } 1452678453a8Sspeer } 14536f45ec7bSml29623 } 14546f45ec7bSml29623 } 14556f45ec7bSml29623 14566f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14576f45ec7bSml29623 "<== nxge_rxdma_get_rbr_ring: not found")); 14586f45ec7bSml29623 14596f45ec7bSml29623 return (NULL); 14606f45ec7bSml29623 } 14616f45ec7bSml29623 14626f45ec7bSml29623 p_rx_rcr_ring_t 14636f45ec7bSml29623 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 14646f45ec7bSml29623 { 1465678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 1466678453a8Sspeer nxge_channel_t rdc; 14676f45ec7bSml29623 14686f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14696f45ec7bSml29623 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 14706f45ec7bSml29623 1471678453a8Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 1472678453a8Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 1473678453a8Sspeer "<== nxge_rxdma_get_rcr_ring: " 1474678453a8Sspeer "NULL ring pointer(s)")); 14756f45ec7bSml29623 return (NULL); 14766f45ec7bSml29623 } 14776f45ec7bSml29623 1478678453a8Sspeer if (set->owned.map == 0) { 14796f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1480678453a8Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 1481678453a8Sspeer return (NULL); 1482678453a8Sspeer } 14836f45ec7bSml29623 1484678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 1485678453a8Sspeer if ((1 << rdc) & set->owned.map) { 1486678453a8Sspeer rx_rcr_ring_t *ring = 1487678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 1488678453a8Sspeer if (ring) { 1489678453a8Sspeer if (channel == ring->rdc) { 14906f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 1491678453a8Sspeer "==> nxge_rxdma_get_rcr_ring: " 1492678453a8Sspeer "channel %d ring $%p", rdc, ring)); 1493678453a8Sspeer return (ring); 1494678453a8Sspeer } 1495678453a8Sspeer } 14966f45ec7bSml29623 } 14976f45ec7bSml29623 } 14986f45ec7bSml29623 14996f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15006f45ec7bSml29623 "<== nxge_rxdma_get_rcr_ring: not found")); 15016f45ec7bSml29623 15026f45ec7bSml29623 return (NULL); 15036f45ec7bSml29623 } 15046f45ec7bSml29623 15056f45ec7bSml29623 /* 15066f45ec7bSml29623 * Static functions start here. 15076f45ec7bSml29623 */ 15086f45ec7bSml29623 static p_rx_msg_t 15096f45ec7bSml29623 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 15106f45ec7bSml29623 { 15116f45ec7bSml29623 p_rx_msg_t nxge_mp = NULL; 15126f45ec7bSml29623 p_nxge_dma_common_t dmamsg_p; 15136f45ec7bSml29623 uchar_t *buffer; 15146f45ec7bSml29623 15156f45ec7bSml29623 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 15166f45ec7bSml29623 if (nxge_mp == NULL) { 151756d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15186f45ec7bSml29623 "Allocation of a rx msg failed.")); 15196f45ec7bSml29623 goto nxge_allocb_exit; 15206f45ec7bSml29623 } 15216f45ec7bSml29623 15226f45ec7bSml29623 nxge_mp->use_buf_pool = B_FALSE; 15236f45ec7bSml29623 if (dmabuf_p) { 15246f45ec7bSml29623 nxge_mp->use_buf_pool = B_TRUE; 15256f45ec7bSml29623 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 15266f45ec7bSml29623 *dmamsg_p = *dmabuf_p; 15276f45ec7bSml29623 dmamsg_p->nblocks = 1; 15286f45ec7bSml29623 dmamsg_p->block_size = size; 15296f45ec7bSml29623 dmamsg_p->alength = size; 15306f45ec7bSml29623 buffer = (uchar_t *)dmabuf_p->kaddrp; 15316f45ec7bSml29623 15326f45ec7bSml29623 dmabuf_p->kaddrp = (void *) 15336f45ec7bSml29623 ((char *)dmabuf_p->kaddrp + size); 15346f45ec7bSml29623 dmabuf_p->ioaddr_pp = (void *) 15356f45ec7bSml29623 ((char *)dmabuf_p->ioaddr_pp + size); 15366f45ec7bSml29623 dmabuf_p->alength -= size; 15376f45ec7bSml29623 dmabuf_p->offset += size; 15386f45ec7bSml29623 dmabuf_p->dma_cookie.dmac_laddress += size; 15396f45ec7bSml29623 dmabuf_p->dma_cookie.dmac_size -= size; 15406f45ec7bSml29623 15416f45ec7bSml29623 } else { 15426f45ec7bSml29623 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 15436f45ec7bSml29623 if (buffer == NULL) { 154456d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15456f45ec7bSml29623 "Allocation of a receive page failed.")); 15466f45ec7bSml29623 goto nxge_allocb_fail1; 15476f45ec7bSml29623 } 15486f45ec7bSml29623 } 15496f45ec7bSml29623 15506f45ec7bSml29623 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 15516f45ec7bSml29623 if (nxge_mp->rx_mblk_p == NULL) { 155256d930aeSspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 15536f45ec7bSml29623 goto nxge_allocb_fail2; 15546f45ec7bSml29623 } 15556f45ec7bSml29623 15566f45ec7bSml29623 nxge_mp->buffer = buffer; 15576f45ec7bSml29623 nxge_mp->block_size = size; 15586f45ec7bSml29623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 15596f45ec7bSml29623 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 15606f45ec7bSml29623 nxge_mp->ref_cnt = 1; 15616f45ec7bSml29623 nxge_mp->free = B_TRUE; 15626f45ec7bSml29623 nxge_mp->rx_use_bcopy = B_FALSE; 15636f45ec7bSml29623 15646f45ec7bSml29623 atomic_inc_32(&nxge_mblks_pending); 15656f45ec7bSml29623 15666f45ec7bSml29623 goto nxge_allocb_exit; 15676f45ec7bSml29623 15686f45ec7bSml29623 nxge_allocb_fail2: 15696f45ec7bSml29623 if (!nxge_mp->use_buf_pool) { 15706f45ec7bSml29623 KMEM_FREE(buffer, size); 15716f45ec7bSml29623 } 15726f45ec7bSml29623 15736f45ec7bSml29623 nxge_allocb_fail1: 15746f45ec7bSml29623 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 15756f45ec7bSml29623 nxge_mp = NULL; 15766f45ec7bSml29623 15776f45ec7bSml29623 nxge_allocb_exit: 15786f45ec7bSml29623 return (nxge_mp); 15796f45ec7bSml29623 } 15806f45ec7bSml29623 15816f45ec7bSml29623 p_mblk_t 15826f45ec7bSml29623 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 15836f45ec7bSml29623 { 15846f45ec7bSml29623 p_mblk_t mp; 15856f45ec7bSml29623 15866f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 15876f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 15886f45ec7bSml29623 "offset = 0x%08X " 15896f45ec7bSml29623 "size = 0x%08X", 15906f45ec7bSml29623 nxge_mp, offset, size)); 15916f45ec7bSml29623 15926f45ec7bSml29623 mp = desballoc(&nxge_mp->buffer[offset], size, 15936f45ec7bSml29623 0, &nxge_mp->freeb); 15946f45ec7bSml29623 if (mp == NULL) { 15956f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 15966f45ec7bSml29623 goto nxge_dupb_exit; 15976f45ec7bSml29623 } 15986f45ec7bSml29623 atomic_inc_32(&nxge_mp->ref_cnt); 15996f45ec7bSml29623 16006f45ec7bSml29623 16016f45ec7bSml29623 nxge_dupb_exit: 16026f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16036f45ec7bSml29623 nxge_mp)); 16046f45ec7bSml29623 return (mp); 16056f45ec7bSml29623 } 16066f45ec7bSml29623 16076f45ec7bSml29623 p_mblk_t 16086f45ec7bSml29623 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16096f45ec7bSml29623 { 16106f45ec7bSml29623 p_mblk_t mp; 16116f45ec7bSml29623 uchar_t *dp; 16126f45ec7bSml29623 16136f45ec7bSml29623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 16146f45ec7bSml29623 if (mp == NULL) { 16156f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16166f45ec7bSml29623 goto nxge_dupb_bcopy_exit; 16176f45ec7bSml29623 } 16186f45ec7bSml29623 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 16196f45ec7bSml29623 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 16206f45ec7bSml29623 mp->b_wptr = dp + size; 16216f45ec7bSml29623 16226f45ec7bSml29623 nxge_dupb_bcopy_exit: 16236f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16246f45ec7bSml29623 nxge_mp)); 16256f45ec7bSml29623 return (mp); 16266f45ec7bSml29623 } 16276f45ec7bSml29623 16286f45ec7bSml29623 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 16296f45ec7bSml29623 p_rx_msg_t rx_msg_p); 16306f45ec7bSml29623 16316f45ec7bSml29623 void 16326f45ec7bSml29623 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 16336f45ec7bSml29623 { 16346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 16356f45ec7bSml29623 16366f45ec7bSml29623 /* Reuse this buffer */ 16376f45ec7bSml29623 rx_msg_p->free = B_FALSE; 16386f45ec7bSml29623 rx_msg_p->cur_usage_cnt = 0; 16396f45ec7bSml29623 rx_msg_p->max_usage_cnt = 0; 16406f45ec7bSml29623 rx_msg_p->pkt_buf_size = 0; 16416f45ec7bSml29623 16426f45ec7bSml29623 if (rx_rbr_p->rbr_use_bcopy) { 16436f45ec7bSml29623 rx_msg_p->rx_use_bcopy = B_FALSE; 16446f45ec7bSml29623 atomic_dec_32(&rx_rbr_p->rbr_consumed); 16456f45ec7bSml29623 } 16466f45ec7bSml29623 16476f45ec7bSml29623 /* 16486f45ec7bSml29623 * Get the rbr header pointer and its offset index. 16496f45ec7bSml29623 */ 16506f45ec7bSml29623 MUTEX_ENTER(&rx_rbr_p->post_lock); 16516f45ec7bSml29623 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 16526f45ec7bSml29623 rx_rbr_p->rbr_wrap_mask); 16536f45ec7bSml29623 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 16546f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->post_lock); 165530ac2e7bSml29623 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 165630ac2e7bSml29623 rx_rbr_p->rdc, 1); 16576f45ec7bSml29623 16586f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 16596f45ec7bSml29623 "<== nxge_post_page (channel %d post_next_index %d)", 16606f45ec7bSml29623 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 16616f45ec7bSml29623 16626f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 16636f45ec7bSml29623 } 16646f45ec7bSml29623 16656f45ec7bSml29623 void 16666f45ec7bSml29623 nxge_freeb(p_rx_msg_t rx_msg_p) 16676f45ec7bSml29623 { 16686f45ec7bSml29623 size_t size; 16696f45ec7bSml29623 uchar_t *buffer = NULL; 16706f45ec7bSml29623 int ref_cnt; 1671958cea9eSml29623 boolean_t free_state = B_FALSE; 16726f45ec7bSml29623 1673007969e0Stm144005 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 1674007969e0Stm144005 16756f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 16766f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 16776f45ec7bSml29623 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 16786f45ec7bSml29623 rx_msg_p, nxge_mblks_pending)); 16796f45ec7bSml29623 1680958cea9eSml29623 /* 1681958cea9eSml29623 * First we need to get the free state, then 1682958cea9eSml29623 * atomic decrement the reference count to prevent 1683958cea9eSml29623 * the race condition with the interrupt thread that 1684958cea9eSml29623 * is processing a loaned up buffer block. 1685958cea9eSml29623 */ 1686958cea9eSml29623 free_state = rx_msg_p->free; 1687*1a5e258fSJosef 'Jeff' Sipek ref_cnt = atomic_dec_32_nv(&rx_msg_p->ref_cnt); 16886f45ec7bSml29623 if (!ref_cnt) { 168930ac2e7bSml29623 atomic_dec_32(&nxge_mblks_pending); 16906f45ec7bSml29623 buffer = rx_msg_p->buffer; 16916f45ec7bSml29623 size = rx_msg_p->block_size; 16926f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 16936f45ec7bSml29623 "will free: rx_msg_p = $%p (block pending %d)", 169456d930aeSspeer rx_msg_p, nxge_mblks_pending)); 16956f45ec7bSml29623 16966f45ec7bSml29623 if (!rx_msg_p->use_buf_pool) { 16976f45ec7bSml29623 KMEM_FREE(buffer, size); 16986f45ec7bSml29623 } 16996f45ec7bSml29623 17006f45ec7bSml29623 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 1701007969e0Stm144005 17023e82a89eSmisaki if (ring) { 17033e82a89eSmisaki /* 17043e82a89eSmisaki * Decrement the receive buffer ring's reference 17053e82a89eSmisaki * count, too. 17063e82a89eSmisaki */ 1707007969e0Stm144005 atomic_dec_32(&ring->rbr_ref_cnt); 1708007969e0Stm144005 1709007969e0Stm144005 /* 1710678453a8Sspeer * Free the receive buffer ring, if 1711007969e0Stm144005 * 1. all the receive buffers have been freed 1712007969e0Stm144005 * 2. and we are in the proper state (that is, 1713007969e0Stm144005 * we are not UNMAPPING). 1714007969e0Stm144005 */ 1715007969e0Stm144005 if (ring->rbr_ref_cnt == 0 && 1716007969e0Stm144005 ring->rbr_state == RBR_UNMAPPED) { 1717678453a8Sspeer /* 1718678453a8Sspeer * Free receive data buffers, 1719678453a8Sspeer * buffer index information 1720678453a8Sspeer * (rxring_info) and 1721678453a8Sspeer * the message block ring. 1722678453a8Sspeer */ 1723678453a8Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 1724678453a8Sspeer "nxge_freeb:rx_msg_p = $%p " 1725678453a8Sspeer "(block pending %d) free buffers", 1726678453a8Sspeer rx_msg_p, nxge_mblks_pending)); 1727678453a8Sspeer nxge_rxdma_databuf_free(ring); 1728678453a8Sspeer if (ring->ring_info) { 1729678453a8Sspeer KMEM_FREE(ring->ring_info, 1730678453a8Sspeer sizeof (rxring_info_t)); 1731678453a8Sspeer } 1732678453a8Sspeer 1733678453a8Sspeer if (ring->rx_msg_ring) { 1734678453a8Sspeer KMEM_FREE(ring->rx_msg_ring, 1735678453a8Sspeer ring->tnblocks * 1736678453a8Sspeer sizeof (p_rx_msg_t)); 1737678453a8Sspeer } 1738007969e0Stm144005 KMEM_FREE(ring, sizeof (*ring)); 1739007969e0Stm144005 } 17403e82a89eSmisaki } 17416f45ec7bSml29623 return; 17426f45ec7bSml29623 } 17436f45ec7bSml29623 17446f45ec7bSml29623 /* 17456f45ec7bSml29623 * Repost buffer. 17466f45ec7bSml29623 */ 17473e82a89eSmisaki if (free_state && (ref_cnt == 1) && ring) { 17486f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, 17496f45ec7bSml29623 "nxge_freeb: post page $%p:", rx_msg_p)); 1750007969e0Stm144005 if (ring->rbr_state == RBR_POSTING) 1751007969e0Stm144005 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 17526f45ec7bSml29623 } 17536f45ec7bSml29623 17546f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 17556f45ec7bSml29623 } 17566f45ec7bSml29623 17576f45ec7bSml29623 uint_t 17586f45ec7bSml29623 nxge_rx_intr(void *arg1, void *arg2) 17596f45ec7bSml29623 { 17606f45ec7bSml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 17616f45ec7bSml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 17626f45ec7bSml29623 p_nxge_ldg_t ldgp; 17636f45ec7bSml29623 uint8_t channel; 17646f45ec7bSml29623 npi_handle_t handle; 17656f45ec7bSml29623 rx_dma_ctl_stat_t cs; 176663f531d1SSriharsha Basavapatna p_rx_rcr_ring_t rcrp; 176748056c53SMichael Speer mblk_t *mp = NULL; 17686f45ec7bSml29623 17696f45ec7bSml29623 if (ldvp == NULL) { 17706f45ec7bSml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 17716f45ec7bSml29623 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 17726f45ec7bSml29623 nxgep, ldvp)); 17736f45ec7bSml29623 return (DDI_INTR_CLAIMED); 17746f45ec7bSml29623 } 17756f45ec7bSml29623 17766f45ec7bSml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 17776f45ec7bSml29623 nxgep = ldvp->nxgep; 17786f45ec7bSml29623 } 17791d36aa9eSspeer 17801d36aa9eSspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 17811d36aa9eSspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 17821d36aa9eSspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 17831d36aa9eSspeer "<== nxge_rx_intr: interface not started or intialized")); 17841d36aa9eSspeer return (DDI_INTR_CLAIMED); 17851d36aa9eSspeer } 17861d36aa9eSspeer 17876f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 17886f45ec7bSml29623 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 17896f45ec7bSml29623 nxgep, ldvp)); 17906f45ec7bSml29623 17916f45ec7bSml29623 /* 1792e759c33aSMichael Speer * Get the PIO handle. 17936f45ec7bSml29623 */ 17946f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1795da14cebeSEric Cheng 1796e759c33aSMichael Speer /* 1797e759c33aSMichael Speer * Get the ring to enable us to process packets. 1798e759c33aSMichael Speer */ 179963f531d1SSriharsha Basavapatna rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 1800da14cebeSEric Cheng 1801da14cebeSEric Cheng /* 1802da14cebeSEric Cheng * The RCR ring lock must be held when packets 1803da14cebeSEric Cheng * are being processed and the hardware registers are 1804da14cebeSEric Cheng * being read or written to prevent race condition 1805da14cebeSEric Cheng * among the interrupt thread, the polling thread 1806da14cebeSEric Cheng * (will cause fatal errors such as rcrincon bit set) 1807da14cebeSEric Cheng * and the setting of the poll_flag. 1808da14cebeSEric Cheng */ 180963f531d1SSriharsha Basavapatna MUTEX_ENTER(&rcrp->lock); 1810da14cebeSEric Cheng 18116f45ec7bSml29623 /* 18126f45ec7bSml29623 * Get the control and status for this channel. 18136f45ec7bSml29623 */ 18146f45ec7bSml29623 channel = ldvp->channel; 18156f45ec7bSml29623 ldgp = ldvp->ldgp; 1816da14cebeSEric Cheng 18170dc2366fSVenugopal Iyer if (!isLDOMguest(nxgep) && (!rcrp->started)) { 1818da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, INT_CTL, 1819da14cebeSEric Cheng "<== nxge_rx_intr: channel is not started")); 1820e759c33aSMichael Speer 1821e759c33aSMichael Speer /* 1822e759c33aSMichael Speer * We received an interrupt before the ring is started. 1823e759c33aSMichael Speer */ 1824e759c33aSMichael Speer RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1825e759c33aSMichael Speer &cs.value); 1826e759c33aSMichael Speer cs.value &= RX_DMA_CTL_STAT_WR1C; 1827e759c33aSMichael Speer cs.bits.hdw.mex = 1; 1828e759c33aSMichael Speer RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1829e759c33aSMichael Speer cs.value); 1830e759c33aSMichael Speer 1831e759c33aSMichael Speer /* 1832e759c33aSMichael Speer * Rearm this logical group if this is a single device 1833e759c33aSMichael Speer * group. 1834e759c33aSMichael Speer */ 1835e759c33aSMichael Speer if (ldgp->nldvs == 1) { 1836e759c33aSMichael Speer if (isLDOMguest(nxgep)) { 1837e759c33aSMichael Speer nxge_hio_ldgimgn(nxgep, ldgp); 1838e759c33aSMichael Speer } else { 1839e759c33aSMichael Speer ldgimgm_t mgm; 1840e759c33aSMichael Speer 1841e759c33aSMichael Speer mgm.value = 0; 1842e759c33aSMichael Speer mgm.bits.ldw.arm = 1; 1843e759c33aSMichael Speer mgm.bits.ldw.timer = ldgp->ldg_timer; 1844e759c33aSMichael Speer 1845e759c33aSMichael Speer NXGE_REG_WR64(handle, 1846e759c33aSMichael Speer LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1847e759c33aSMichael Speer mgm.value); 1848e759c33aSMichael Speer } 1849e759c33aSMichael Speer } 185063f531d1SSriharsha Basavapatna MUTEX_EXIT(&rcrp->lock); 1851da14cebeSEric Cheng return (DDI_INTR_CLAIMED); 1852da14cebeSEric Cheng } 1853da14cebeSEric Cheng 185463f531d1SSriharsha Basavapatna ASSERT(rcrp->ldgp == ldgp); 185563f531d1SSriharsha Basavapatna ASSERT(rcrp->ldvp == ldvp); 1856da14cebeSEric Cheng 18576f45ec7bSml29623 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 18586f45ec7bSml29623 18596f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 18606f45ec7bSml29623 "cs 0x%016llx rcrto 0x%x rcrthres %x", 18616f45ec7bSml29623 channel, 18626f45ec7bSml29623 cs.value, 18636f45ec7bSml29623 cs.bits.hdw.rcrto, 18646f45ec7bSml29623 cs.bits.hdw.rcrthres)); 18656f45ec7bSml29623 186663f531d1SSriharsha Basavapatna if (!rcrp->poll_flag) { 186763f531d1SSriharsha Basavapatna mp = nxge_rx_pkts(nxgep, rcrp, cs, -1); 186848056c53SMichael Speer } 18696f45ec7bSml29623 18706f45ec7bSml29623 /* error events. */ 18716f45ec7bSml29623 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 1872678453a8Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 18736f45ec7bSml29623 } 18746f45ec7bSml29623 18756f45ec7bSml29623 /* 18766f45ec7bSml29623 * Enable the mailbox update interrupt if we want 18776f45ec7bSml29623 * to use mailbox. We probably don't need to use 18786f45ec7bSml29623 * mailbox as it only saves us one pio read. 18796f45ec7bSml29623 * Also write 1 to rcrthres and rcrto to clear 18806f45ec7bSml29623 * these two edge triggered bits. 18816f45ec7bSml29623 */ 18826f45ec7bSml29623 cs.value &= RX_DMA_CTL_STAT_WR1C; 188363f531d1SSriharsha Basavapatna cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1; 18846f45ec7bSml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 18856f45ec7bSml29623 cs.value); 18866f45ec7bSml29623 18876f45ec7bSml29623 /* 1888da14cebeSEric Cheng * If the polling mode is enabled, disable the interrupt. 1889da14cebeSEric Cheng */ 189063f531d1SSriharsha Basavapatna if (rcrp->poll_flag) { 1891da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1892da14cebeSEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 1893da14cebeSEric Cheng "(disabling interrupts)", channel, ldgp, ldvp)); 189463f531d1SSriharsha Basavapatna 1895da14cebeSEric Cheng /* 1896da14cebeSEric Cheng * Disarm this logical group if this is a single device 18976f45ec7bSml29623 * group. 18986f45ec7bSml29623 */ 18996f45ec7bSml29623 if (ldgp->nldvs == 1) { 190063f531d1SSriharsha Basavapatna if (isLDOMguest(nxgep)) { 190163f531d1SSriharsha Basavapatna ldgp->arm = B_FALSE; 190263f531d1SSriharsha Basavapatna nxge_hio_ldgimgn(nxgep, ldgp); 190363f531d1SSriharsha Basavapatna } else { 19046f45ec7bSml29623 ldgimgm_t mgm; 19056f45ec7bSml29623 mgm.value = 0; 1906da14cebeSEric Cheng mgm.bits.ldw.arm = 0; 1907da14cebeSEric Cheng NXGE_REG_WR64(handle, 190863f531d1SSriharsha Basavapatna LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 190963f531d1SSriharsha Basavapatna mgm.value); 191063f531d1SSriharsha Basavapatna } 1911da14cebeSEric Cheng } 1912da14cebeSEric Cheng } else { 1913da14cebeSEric Cheng /* 191408ac1c49SNicolas Droux * Rearm this logical group if this is a single device 191508ac1c49SNicolas Droux * group. 1916da14cebeSEric Cheng */ 1917da14cebeSEric Cheng if (ldgp->nldvs == 1) { 1918678453a8Sspeer if (isLDOMguest(nxgep)) { 1919678453a8Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 1920678453a8Sspeer } else { 1921da14cebeSEric Cheng ldgimgm_t mgm; 1922da14cebeSEric Cheng 1923da14cebeSEric Cheng mgm.value = 0; 1924da14cebeSEric Cheng mgm.bits.ldw.arm = 1; 1925da14cebeSEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 1926da14cebeSEric Cheng 19276f45ec7bSml29623 NXGE_REG_WR64(handle, 19286f45ec7bSml29623 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 19296f45ec7bSml29623 mgm.value); 19306f45ec7bSml29623 } 1931678453a8Sspeer } 19326f45ec7bSml29623 1933da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 1934da14cebeSEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p " 1935da14cebeSEric Cheng "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 1936da14cebeSEric Cheng } 193763f531d1SSriharsha Basavapatna MUTEX_EXIT(&rcrp->lock); 1938da14cebeSEric Cheng 193948056c53SMichael Speer if (mp != NULL) { 194063f531d1SSriharsha Basavapatna mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp, 194163f531d1SSriharsha Basavapatna rcrp->rcr_gen_num); 1942da14cebeSEric Cheng } 1943da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 1944da14cebeSEric Cheng return (DDI_INTR_CLAIMED); 19456f45ec7bSml29623 } 19466f45ec7bSml29623 19476f45ec7bSml29623 /* 19486f45ec7bSml29623 * This routine is the main packet receive processing function. 19496f45ec7bSml29623 * It gets the packet type, error code, and buffer related 19506f45ec7bSml29623 * information from the receive completion entry. 19516f45ec7bSml29623 * How many completion entries to process is based on the number of packets 19526f45ec7bSml29623 * queued by the hardware, a hardware maintained tail pointer 19536f45ec7bSml29623 * and a configurable receive packet count. 19546f45ec7bSml29623 * 19556f45ec7bSml29623 * A chain of message blocks will be created as result of processing 19566f45ec7bSml29623 * the completion entries. This chain of message blocks will be returned and 19576f45ec7bSml29623 * a hardware control status register will be updated with the number of 19586f45ec7bSml29623 * packets were removed from the hardware queue. 19596f45ec7bSml29623 * 1960da14cebeSEric Cheng * The RCR ring lock is held when entering this function. 19616f45ec7bSml29623 */ 1962678453a8Sspeer static mblk_t * 1963678453a8Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 1964678453a8Sspeer int bytes_to_pickup) 19656f45ec7bSml29623 { 19666f45ec7bSml29623 npi_handle_t handle; 19676f45ec7bSml29623 uint8_t channel; 19686f45ec7bSml29623 uint32_t comp_rd_index; 19696f45ec7bSml29623 p_rcr_entry_t rcr_desc_rd_head_p; 19706f45ec7bSml29623 p_rcr_entry_t rcr_desc_rd_head_pp; 19716f45ec7bSml29623 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 19726f45ec7bSml29623 uint16_t qlen, nrcr_read, npkt_read; 19736f45ec7bSml29623 uint32_t qlen_hw; 19746f45ec7bSml29623 boolean_t multi; 19756f45ec7bSml29623 rcrcfig_b_t rcr_cfg_b; 1976678453a8Sspeer int totallen = 0; 19776f45ec7bSml29623 #if defined(_BIG_ENDIAN) 19786f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 19796f45ec7bSml29623 #endif 19806f45ec7bSml29623 1981da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 1982678453a8Sspeer "channel %d", rcr_p->rdc)); 19836f45ec7bSml29623 19846f45ec7bSml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 19856f45ec7bSml29623 return (NULL); 19866f45ec7bSml29623 } 19876f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 19886f45ec7bSml29623 channel = rcr_p->rdc; 19896f45ec7bSml29623 19906f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19916f45ec7bSml29623 "==> nxge_rx_pkts: START: rcr channel %d " 19926f45ec7bSml29623 "head_p $%p head_pp $%p index %d ", 19936f45ec7bSml29623 channel, rcr_p->rcr_desc_rd_head_p, 19946f45ec7bSml29623 rcr_p->rcr_desc_rd_head_pp, 19956f45ec7bSml29623 rcr_p->comp_rd_index)); 19966f45ec7bSml29623 19976f45ec7bSml29623 19986f45ec7bSml29623 #if !defined(_BIG_ENDIAN) 19996f45ec7bSml29623 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 20006f45ec7bSml29623 #else 20016f45ec7bSml29623 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 20026f45ec7bSml29623 if (rs != NPI_SUCCESS) { 2003678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 20046f45ec7bSml29623 "channel %d, get qlen failed 0x%08x", 2005678453a8Sspeer channel, rs)); 20066f45ec7bSml29623 return (NULL); 20076f45ec7bSml29623 } 20086f45ec7bSml29623 #endif 20096f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 20106f45ec7bSml29623 "qlen %d", channel, qlen)); 20116f45ec7bSml29623 20126f45ec7bSml29623 20136f45ec7bSml29623 20146f45ec7bSml29623 if (!qlen) { 2015da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 20166f45ec7bSml29623 "==> nxge_rx_pkts:rcr channel %d " 20176f45ec7bSml29623 "qlen %d (no pkts)", channel, qlen)); 20186f45ec7bSml29623 20196f45ec7bSml29623 return (NULL); 20206f45ec7bSml29623 } 20216f45ec7bSml29623 20226f45ec7bSml29623 comp_rd_index = rcr_p->comp_rd_index; 20236f45ec7bSml29623 20246f45ec7bSml29623 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 20256f45ec7bSml29623 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 20266f45ec7bSml29623 nrcr_read = npkt_read = 0; 20276f45ec7bSml29623 20286f45ec7bSml29623 /* 20296f45ec7bSml29623 * Number of packets queued 20306f45ec7bSml29623 * (The jumbo or multi packet will be counted as only one 20316f45ec7bSml29623 * packets and it may take up more than one completion entry). 20326f45ec7bSml29623 */ 20336f45ec7bSml29623 qlen_hw = (qlen < nxge_max_rx_pkts) ? 20346f45ec7bSml29623 qlen : nxge_max_rx_pkts; 20356f45ec7bSml29623 head_mp = NULL; 20366f45ec7bSml29623 tail_mp = &head_mp; 20376f45ec7bSml29623 nmp = mp_cont = NULL; 20386f45ec7bSml29623 multi = B_FALSE; 20396f45ec7bSml29623 20406f45ec7bSml29623 while (qlen_hw) { 20416f45ec7bSml29623 20426f45ec7bSml29623 #ifdef NXGE_DEBUG 20436f45ec7bSml29623 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 20446f45ec7bSml29623 #endif 20456f45ec7bSml29623 /* 20466f45ec7bSml29623 * Process one completion ring entry. 20476f45ec7bSml29623 */ 20486f45ec7bSml29623 nxge_receive_packet(nxgep, 20496f45ec7bSml29623 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 20506f45ec7bSml29623 20516f45ec7bSml29623 /* 20526f45ec7bSml29623 * message chaining modes 20536f45ec7bSml29623 */ 20546f45ec7bSml29623 if (nmp) { 20556f45ec7bSml29623 nmp->b_next = NULL; 20566f45ec7bSml29623 if (!multi && !mp_cont) { /* frame fits a partition */ 20576f45ec7bSml29623 *tail_mp = nmp; 20586f45ec7bSml29623 tail_mp = &nmp->b_next; 2059678453a8Sspeer totallen += MBLKL(nmp); 20606f45ec7bSml29623 nmp = NULL; 20616f45ec7bSml29623 } else if (multi && !mp_cont) { /* first segment */ 20626f45ec7bSml29623 *tail_mp = nmp; 20636f45ec7bSml29623 tail_mp = &nmp->b_cont; 2064678453a8Sspeer totallen += MBLKL(nmp); 20656f45ec7bSml29623 } else if (multi && mp_cont) { /* mid of multi segs */ 20666f45ec7bSml29623 *tail_mp = mp_cont; 20676f45ec7bSml29623 tail_mp = &mp_cont->b_cont; 2068678453a8Sspeer totallen += MBLKL(mp_cont); 20696f45ec7bSml29623 } else if (!multi && mp_cont) { /* last segment */ 20706f45ec7bSml29623 *tail_mp = mp_cont; 20716f45ec7bSml29623 tail_mp = &nmp->b_next; 2072678453a8Sspeer totallen += MBLKL(mp_cont); 20736f45ec7bSml29623 nmp = NULL; 20746f45ec7bSml29623 } 20756f45ec7bSml29623 } 20766f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20776f45ec7bSml29623 "==> nxge_rx_pkts: loop: rcr channel %d " 20786f45ec7bSml29623 "before updating: multi %d " 20796f45ec7bSml29623 "nrcr_read %d " 20806f45ec7bSml29623 "npk read %d " 20816f45ec7bSml29623 "head_pp $%p index %d ", 20826f45ec7bSml29623 channel, 20836f45ec7bSml29623 multi, 20846f45ec7bSml29623 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 20856f45ec7bSml29623 comp_rd_index)); 20866f45ec7bSml29623 20876f45ec7bSml29623 if (!multi) { 20886f45ec7bSml29623 qlen_hw--; 20896f45ec7bSml29623 npkt_read++; 20906f45ec7bSml29623 } 20916f45ec7bSml29623 20926f45ec7bSml29623 /* 20936f45ec7bSml29623 * Update the next read entry. 20946f45ec7bSml29623 */ 20956f45ec7bSml29623 comp_rd_index = NEXT_ENTRY(comp_rd_index, 20966f45ec7bSml29623 rcr_p->comp_wrap_mask); 20976f45ec7bSml29623 20986f45ec7bSml29623 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 20996f45ec7bSml29623 rcr_p->rcr_desc_first_p, 21006f45ec7bSml29623 rcr_p->rcr_desc_last_p); 21016f45ec7bSml29623 21026f45ec7bSml29623 nrcr_read++; 21036f45ec7bSml29623 21046f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21056f45ec7bSml29623 "<== nxge_rx_pkts: (SAM, process one packet) " 21066f45ec7bSml29623 "nrcr_read %d", 21076f45ec7bSml29623 nrcr_read)); 21086f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21096f45ec7bSml29623 "==> nxge_rx_pkts: loop: rcr channel %d " 21106f45ec7bSml29623 "multi %d " 21116f45ec7bSml29623 "nrcr_read %d " 21126f45ec7bSml29623 "npk read %d " 21136f45ec7bSml29623 "head_pp $%p index %d ", 21146f45ec7bSml29623 channel, 21156f45ec7bSml29623 multi, 21166f45ec7bSml29623 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 21176f45ec7bSml29623 comp_rd_index)); 21186f45ec7bSml29623 2119678453a8Sspeer if ((bytes_to_pickup != -1) && 2120678453a8Sspeer (totallen >= bytes_to_pickup)) { 2121678453a8Sspeer break; 2122678453a8Sspeer } 21236f45ec7bSml29623 } 21246f45ec7bSml29623 21256f45ec7bSml29623 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 21266f45ec7bSml29623 rcr_p->comp_rd_index = comp_rd_index; 21276f45ec7bSml29623 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 21286f45ec7bSml29623 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 21296f45ec7bSml29623 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 21307b26d9ffSSantwona Behera 21317b26d9ffSSantwona Behera rcr_p->intr_timeout = (nxgep->intr_timeout < 21327b26d9ffSSantwona Behera NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 21337b26d9ffSSantwona Behera nxgep->intr_timeout; 21347b26d9ffSSantwona Behera 21357b26d9ffSSantwona Behera rcr_p->intr_threshold = (nxgep->intr_threshold < 21367b26d9ffSSantwona Behera NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 21377b26d9ffSSantwona Behera nxgep->intr_threshold; 21387b26d9ffSSantwona Behera 21396f45ec7bSml29623 rcr_cfg_b.value = 0x0ULL; 21406f45ec7bSml29623 rcr_cfg_b.bits.ldw.entout = 1; 21416f45ec7bSml29623 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 21426f45ec7bSml29623 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 21437b26d9ffSSantwona Behera 21446f45ec7bSml29623 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 21456f45ec7bSml29623 channel, rcr_cfg_b.value); 21466f45ec7bSml29623 } 21476f45ec7bSml29623 21486f45ec7bSml29623 cs.bits.ldw.pktread = npkt_read; 21496f45ec7bSml29623 cs.bits.ldw.ptrread = nrcr_read; 21506f45ec7bSml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 21516f45ec7bSml29623 channel, cs.value); 21526f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21536f45ec7bSml29623 "==> nxge_rx_pkts: EXIT: rcr channel %d " 21546f45ec7bSml29623 "head_pp $%p index %016llx ", 21556f45ec7bSml29623 channel, 21566f45ec7bSml29623 rcr_p->rcr_desc_rd_head_pp, 21576f45ec7bSml29623 rcr_p->comp_rd_index)); 21586f45ec7bSml29623 /* 21596f45ec7bSml29623 * Update RCR buffer pointer read and number of packets 21606f45ec7bSml29623 * read. 21616f45ec7bSml29623 */ 21626f45ec7bSml29623 2163da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 2164da14cebeSEric Cheng "channel %d", rcr_p->rdc)); 2165da14cebeSEric Cheng 21666f45ec7bSml29623 return (head_mp); 21676f45ec7bSml29623 } 21686f45ec7bSml29623 21696f45ec7bSml29623 void 21706f45ec7bSml29623 nxge_receive_packet(p_nxge_t nxgep, 21716f45ec7bSml29623 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 21726f45ec7bSml29623 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 21736f45ec7bSml29623 { 21746f45ec7bSml29623 p_mblk_t nmp = NULL; 21756f45ec7bSml29623 uint64_t multi; 21766f45ec7bSml29623 uint64_t dcf_err; 21776f45ec7bSml29623 uint8_t channel; 21786f45ec7bSml29623 21796f45ec7bSml29623 boolean_t first_entry = B_TRUE; 21806f45ec7bSml29623 boolean_t is_tcp_udp = B_FALSE; 21816f45ec7bSml29623 boolean_t buffer_free = B_FALSE; 21826f45ec7bSml29623 boolean_t error_send_up = B_FALSE; 21836f45ec7bSml29623 uint8_t error_type; 21846f45ec7bSml29623 uint16_t l2_len; 21856f45ec7bSml29623 uint16_t skip_len; 21866f45ec7bSml29623 uint8_t pktbufsz_type; 21876f45ec7bSml29623 uint64_t rcr_entry; 21886f45ec7bSml29623 uint64_t *pkt_buf_addr_pp; 21896f45ec7bSml29623 uint64_t *pkt_buf_addr_p; 21906f45ec7bSml29623 uint32_t buf_offset; 21916f45ec7bSml29623 uint32_t bsize; 21926f45ec7bSml29623 uint32_t error_disp_cnt; 21936f45ec7bSml29623 uint32_t msg_index; 21946f45ec7bSml29623 p_rx_rbr_ring_t rx_rbr_p; 21956f45ec7bSml29623 p_rx_msg_t *rx_msg_ring_p; 21966f45ec7bSml29623 p_rx_msg_t rx_msg_p; 21976f45ec7bSml29623 uint16_t sw_offset_bytes = 0, hdr_size = 0; 21986f45ec7bSml29623 nxge_status_t status = NXGE_OK; 21996f45ec7bSml29623 boolean_t is_valid = B_FALSE; 22006f45ec7bSml29623 p_nxge_rx_ring_stats_t rdc_stats; 22016f45ec7bSml29623 uint32_t bytes_read; 22026f45ec7bSml29623 uint64_t pkt_type; 22036f45ec7bSml29623 uint64_t frag; 22044202ea4bSsbehera boolean_t pkt_too_long_err = B_FALSE; 22056f45ec7bSml29623 #ifdef NXGE_DEBUG 22066f45ec7bSml29623 int dump_len; 22076f45ec7bSml29623 #endif 22086f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 22096f45ec7bSml29623 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 22106f45ec7bSml29623 22116f45ec7bSml29623 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 22126f45ec7bSml29623 22136f45ec7bSml29623 multi = (rcr_entry & RCR_MULTI_MASK); 22146f45ec7bSml29623 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 22156f45ec7bSml29623 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 22166f45ec7bSml29623 22176f45ec7bSml29623 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 22186f45ec7bSml29623 frag = (rcr_entry & RCR_FRAG_MASK); 22196f45ec7bSml29623 22206f45ec7bSml29623 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 22216f45ec7bSml29623 22226f45ec7bSml29623 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 22236f45ec7bSml29623 RCR_PKTBUFSZ_SHIFT); 2224adfcba55Sjoycey #if defined(__i386) 2225adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 2226adfcba55Sjoycey RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 2227adfcba55Sjoycey #else 22286f45ec7bSml29623 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 22296f45ec7bSml29623 RCR_PKT_BUF_ADDR_SHIFT); 2230adfcba55Sjoycey #endif 22316f45ec7bSml29623 22326f45ec7bSml29623 channel = rcr_p->rdc; 22336f45ec7bSml29623 22346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22356f45ec7bSml29623 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 22366f45ec7bSml29623 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 22376f45ec7bSml29623 "error_type 0x%x pkt_type 0x%x " 22386f45ec7bSml29623 "pktbufsz_type %d ", 22396f45ec7bSml29623 rcr_desc_rd_head_p, 22406f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 22416f45ec7bSml29623 multi, 22426f45ec7bSml29623 error_type, 22436f45ec7bSml29623 pkt_type, 22446f45ec7bSml29623 pktbufsz_type)); 22456f45ec7bSml29623 22466f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22476f45ec7bSml29623 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 22486f45ec7bSml29623 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 22496f45ec7bSml29623 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 22506f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 22516f45ec7bSml29623 multi, 22526f45ec7bSml29623 error_type, 22536f45ec7bSml29623 pkt_type)); 22546f45ec7bSml29623 22556f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22566f45ec7bSml29623 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 22576f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 22586f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 22596f45ec7bSml29623 22606f45ec7bSml29623 /* get the stats ptr */ 22616f45ec7bSml29623 rdc_stats = rcr_p->rdc_stats; 22626f45ec7bSml29623 22636f45ec7bSml29623 if (!l2_len) { 22646f45ec7bSml29623 22656f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22666f45ec7bSml29623 "<== nxge_receive_packet: failed: l2 length is 0.")); 22676f45ec7bSml29623 return; 22686f45ec7bSml29623 } 22696f45ec7bSml29623 22704202ea4bSsbehera /* 2271da14cebeSEric Cheng * Software workaround for BMAC hardware limitation that allows 22724202ea4bSsbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 22734202ea4bSsbehera * instead of 0x2400 for jumbo. 22744202ea4bSsbehera */ 22754202ea4bSsbehera if (l2_len > nxgep->mac.maxframesize) { 22764202ea4bSsbehera pkt_too_long_err = B_TRUE; 22774202ea4bSsbehera } 22784202ea4bSsbehera 227956d930aeSspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 228056d930aeSspeer l2_len -= ETHERFCSL; 228156d930aeSspeer 22826f45ec7bSml29623 /* shift 6 bits to get the full io address */ 2283adfcba55Sjoycey #if defined(__i386) 2284adfcba55Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 2285adfcba55Sjoycey RCR_PKT_BUF_ADDR_SHIFT_FULL); 2286adfcba55Sjoycey #else 22876f45ec7bSml29623 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 22886f45ec7bSml29623 RCR_PKT_BUF_ADDR_SHIFT_FULL); 2289adfcba55Sjoycey #endif 22906f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22916f45ec7bSml29623 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 22926f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 22936f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 22946f45ec7bSml29623 22956f45ec7bSml29623 rx_rbr_p = rcr_p->rx_rbr_p; 22966f45ec7bSml29623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 22976f45ec7bSml29623 22986f45ec7bSml29623 if (first_entry) { 22996f45ec7bSml29623 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 23006f45ec7bSml29623 RXDMA_HDR_SIZE_DEFAULT); 23016f45ec7bSml29623 23026f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23036f45ec7bSml29623 "==> nxge_receive_packet: first entry 0x%016llx " 23046f45ec7bSml29623 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 23056f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 23066f45ec7bSml29623 hdr_size)); 23076f45ec7bSml29623 } 23086f45ec7bSml29623 23096f45ec7bSml29623 MUTEX_ENTER(&rx_rbr_p->lock); 23106f45ec7bSml29623 23116f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23126f45ec7bSml29623 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 23136f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 23146f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 23156f45ec7bSml29623 23166f45ec7bSml29623 /* 23176f45ec7bSml29623 * Packet buffer address in the completion entry points 23186f45ec7bSml29623 * to the starting buffer address (offset 0). 23196f45ec7bSml29623 * Use the starting buffer address to locate the corresponding 23206f45ec7bSml29623 * kernel address. 23216f45ec7bSml29623 */ 23226f45ec7bSml29623 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 23236f45ec7bSml29623 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 23246f45ec7bSml29623 &buf_offset, 23256f45ec7bSml29623 &msg_index); 23266f45ec7bSml29623 23276f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23286f45ec7bSml29623 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 23296f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 23306f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 23316f45ec7bSml29623 23326f45ec7bSml29623 if (status != NXGE_OK) { 23336f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->lock); 23346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23356f45ec7bSml29623 "<== nxge_receive_packet: found vaddr failed %d", 23366f45ec7bSml29623 status)); 23376f45ec7bSml29623 return; 23386f45ec7bSml29623 } 23396f45ec7bSml29623 23406f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23416f45ec7bSml29623 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 23426f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 23436f45ec7bSml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 23446f45ec7bSml29623 23456f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23466f45ec7bSml29623 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 23476f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 23486f45ec7bSml29623 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 23496f45ec7bSml29623 23506f45ec7bSml29623 rx_msg_p = rx_msg_ring_p[msg_index]; 23516f45ec7bSml29623 23526f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23536f45ec7bSml29623 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 23546f45ec7bSml29623 "full pkt_buf_addr_pp $%p l2_len %d", 23556f45ec7bSml29623 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 23566f45ec7bSml29623 23576f45ec7bSml29623 switch (pktbufsz_type) { 23586f45ec7bSml29623 case RCR_PKTBUFSZ_0: 23596f45ec7bSml29623 bsize = rx_rbr_p->pkt_buf_size0_bytes; 23606f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23616f45ec7bSml29623 "==> nxge_receive_packet: 0 buf %d", bsize)); 23626f45ec7bSml29623 break; 23636f45ec7bSml29623 case RCR_PKTBUFSZ_1: 23646f45ec7bSml29623 bsize = rx_rbr_p->pkt_buf_size1_bytes; 23656f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23666f45ec7bSml29623 "==> nxge_receive_packet: 1 buf %d", bsize)); 23676f45ec7bSml29623 break; 23686f45ec7bSml29623 case RCR_PKTBUFSZ_2: 23696f45ec7bSml29623 bsize = rx_rbr_p->pkt_buf_size2_bytes; 23706f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23716f45ec7bSml29623 "==> nxge_receive_packet: 2 buf %d", bsize)); 23726f45ec7bSml29623 break; 23736f45ec7bSml29623 case RCR_SINGLE_BLOCK: 23746f45ec7bSml29623 bsize = rx_msg_p->block_size; 23756f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23766f45ec7bSml29623 "==> nxge_receive_packet: single %d", bsize)); 23776f45ec7bSml29623 23786f45ec7bSml29623 break; 23796f45ec7bSml29623 default: 23806f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->lock); 23816f45ec7bSml29623 return; 23826f45ec7bSml29623 } 23836f45ec7bSml29623 23844df55fdeSJanie Lu switch (nxge_rdc_buf_offset) { 23854df55fdeSJanie Lu case SW_OFFSET_NO_OFFSET: 23864df55fdeSJanie Lu sw_offset_bytes = 0; 23874df55fdeSJanie Lu break; 23884df55fdeSJanie Lu case SW_OFFSET_64: 23894df55fdeSJanie Lu sw_offset_bytes = 64; 23904df55fdeSJanie Lu break; 23914df55fdeSJanie Lu case SW_OFFSET_128: 23924df55fdeSJanie Lu sw_offset_bytes = 128; 23934df55fdeSJanie Lu break; 23944df55fdeSJanie Lu case SW_OFFSET_192: 23954df55fdeSJanie Lu sw_offset_bytes = 192; 23964df55fdeSJanie Lu break; 23974df55fdeSJanie Lu case SW_OFFSET_256: 23984df55fdeSJanie Lu sw_offset_bytes = 256; 23994df55fdeSJanie Lu break; 24004df55fdeSJanie Lu case SW_OFFSET_320: 24014df55fdeSJanie Lu sw_offset_bytes = 320; 24024df55fdeSJanie Lu break; 24034df55fdeSJanie Lu case SW_OFFSET_384: 24044df55fdeSJanie Lu sw_offset_bytes = 384; 24054df55fdeSJanie Lu break; 24064df55fdeSJanie Lu case SW_OFFSET_448: 24074df55fdeSJanie Lu sw_offset_bytes = 448; 24084df55fdeSJanie Lu break; 24094df55fdeSJanie Lu default: 24104df55fdeSJanie Lu sw_offset_bytes = 0; 24114df55fdeSJanie Lu break; 24124df55fdeSJanie Lu } 24134df55fdeSJanie Lu 24146f45ec7bSml29623 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 24156f45ec7bSml29623 (buf_offset + sw_offset_bytes), 24166f45ec7bSml29623 (hdr_size + l2_len), 24176f45ec7bSml29623 DDI_DMA_SYNC_FORCPU); 24186f45ec7bSml29623 24196f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24206f45ec7bSml29623 "==> nxge_receive_packet: after first dump:usage count")); 24216f45ec7bSml29623 24226f45ec7bSml29623 if (rx_msg_p->cur_usage_cnt == 0) { 24236f45ec7bSml29623 if (rx_rbr_p->rbr_use_bcopy) { 24246f45ec7bSml29623 atomic_inc_32(&rx_rbr_p->rbr_consumed); 24256f45ec7bSml29623 if (rx_rbr_p->rbr_consumed < 24266f45ec7bSml29623 rx_rbr_p->rbr_threshold_hi) { 24276f45ec7bSml29623 if (rx_rbr_p->rbr_threshold_lo == 0 || 24286f45ec7bSml29623 ((rx_rbr_p->rbr_consumed >= 24296f45ec7bSml29623 rx_rbr_p->rbr_threshold_lo) && 24306f45ec7bSml29623 (rx_rbr_p->rbr_bufsize_type >= 24316f45ec7bSml29623 pktbufsz_type))) { 24326f45ec7bSml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 24336f45ec7bSml29623 } 24346f45ec7bSml29623 } else { 24356f45ec7bSml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 24366f45ec7bSml29623 } 24376f45ec7bSml29623 } 24386f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24396f45ec7bSml29623 "==> nxge_receive_packet: buf %d (new block) ", 24406f45ec7bSml29623 bsize)); 24416f45ec7bSml29623 24426f45ec7bSml29623 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 24436f45ec7bSml29623 rx_msg_p->pkt_buf_size = bsize; 24446f45ec7bSml29623 rx_msg_p->cur_usage_cnt = 1; 24456f45ec7bSml29623 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 24466f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24476f45ec7bSml29623 "==> nxge_receive_packet: buf %d " 24486f45ec7bSml29623 "(single block) ", 24496f45ec7bSml29623 bsize)); 24506f45ec7bSml29623 /* 24516f45ec7bSml29623 * Buffer can be reused once the free function 24526f45ec7bSml29623 * is called. 24536f45ec7bSml29623 */ 24546f45ec7bSml29623 rx_msg_p->max_usage_cnt = 1; 24556f45ec7bSml29623 buffer_free = B_TRUE; 24566f45ec7bSml29623 } else { 24576f45ec7bSml29623 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 24586f45ec7bSml29623 if (rx_msg_p->max_usage_cnt == 1) { 24596f45ec7bSml29623 buffer_free = B_TRUE; 24606f45ec7bSml29623 } 24616f45ec7bSml29623 } 24626f45ec7bSml29623 } else { 24636f45ec7bSml29623 rx_msg_p->cur_usage_cnt++; 24646f45ec7bSml29623 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 24656f45ec7bSml29623 buffer_free = B_TRUE; 24666f45ec7bSml29623 } 24676f45ec7bSml29623 } 24686f45ec7bSml29623 24696f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24706f45ec7bSml29623 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 24716f45ec7bSml29623 msg_index, l2_len, 24726f45ec7bSml29623 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 24736f45ec7bSml29623 24744202ea4bSsbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 24756f45ec7bSml29623 rdc_stats->ierrors++; 24766f45ec7bSml29623 if (dcf_err) { 24776f45ec7bSml29623 rdc_stats->dcf_err++; 24786f45ec7bSml29623 #ifdef NXGE_DEBUG 24796f45ec7bSml29623 if (!rdc_stats->dcf_err) { 24806f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24816f45ec7bSml29623 "nxge_receive_packet: channel %d dcf_err rcr" 24826f45ec7bSml29623 " 0x%llx", channel, rcr_entry)); 24836f45ec7bSml29623 } 24846f45ec7bSml29623 #endif 24856f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 24866f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_DCF_ERR); 24874202ea4bSsbehera } else if (pkt_too_long_err) { 24884202ea4bSsbehera rdc_stats->pkt_too_long_err++; 24894202ea4bSsbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 24904202ea4bSsbehera " channel %d packet length [%d] > " 24914202ea4bSsbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 24924202ea4bSsbehera nxgep->mac.maxframesize)); 24936f45ec7bSml29623 } else { 24946f45ec7bSml29623 /* Update error stats */ 24956f45ec7bSml29623 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 24966f45ec7bSml29623 rdc_stats->errlog.compl_err_type = error_type; 24976f45ec7bSml29623 24986f45ec7bSml29623 switch (error_type) { 2499f6485eecSyc148097 /* 2500f6485eecSyc148097 * Do not send FMA ereport for RCR_L2_ERROR and 2501f6485eecSyc148097 * RCR_L4_CSUM_ERROR because most likely they indicate 2502f6485eecSyc148097 * back pressure rather than HW failures. 2503f6485eecSyc148097 */ 25046f45ec7bSml29623 case RCR_L2_ERROR: 25056f45ec7bSml29623 rdc_stats->l2_err++; 25066f45ec7bSml29623 if (rdc_stats->l2_err < 250753f3d8ecSyc148097 error_disp_cnt) { 250853f3d8ecSyc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25096f45ec7bSml29623 " nxge_receive_packet:" 25106f45ec7bSml29623 " channel %d RCR L2_ERROR", 25116f45ec7bSml29623 channel)); 251253f3d8ecSyc148097 } 25136f45ec7bSml29623 break; 25146f45ec7bSml29623 case RCR_L4_CSUM_ERROR: 25156f45ec7bSml29623 error_send_up = B_TRUE; 25166f45ec7bSml29623 rdc_stats->l4_cksum_err++; 25176f45ec7bSml29623 if (rdc_stats->l4_cksum_err < 251853f3d8ecSyc148097 error_disp_cnt) { 251953f3d8ecSyc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25206f45ec7bSml29623 " nxge_receive_packet:" 25216f45ec7bSml29623 " channel %d" 252253f3d8ecSyc148097 " RCR L4_CSUM_ERROR", channel)); 252353f3d8ecSyc148097 } 25246f45ec7bSml29623 break; 2525f6485eecSyc148097 /* 2526f6485eecSyc148097 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 2527f6485eecSyc148097 * RCR_ZCP_SOFT_ERROR because they reflect the same 2528f6485eecSyc148097 * FFLP and ZCP errors that have been reported by 2529f6485eecSyc148097 * nxge_fflp.c and nxge_zcp.c. 2530f6485eecSyc148097 */ 25316f45ec7bSml29623 case RCR_FFLP_SOFT_ERROR: 25326f45ec7bSml29623 error_send_up = B_TRUE; 25336f45ec7bSml29623 rdc_stats->fflp_soft_err++; 25346f45ec7bSml29623 if (rdc_stats->fflp_soft_err < 253553f3d8ecSyc148097 error_disp_cnt) { 25366f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, 25376f45ec7bSml29623 NXGE_ERR_CTL, 25386f45ec7bSml29623 " nxge_receive_packet:" 25396f45ec7bSml29623 " channel %d" 254053f3d8ecSyc148097 " RCR FFLP_SOFT_ERROR", channel)); 254153f3d8ecSyc148097 } 25426f45ec7bSml29623 break; 25436f45ec7bSml29623 case RCR_ZCP_SOFT_ERROR: 25446f45ec7bSml29623 error_send_up = B_TRUE; 25456f45ec7bSml29623 rdc_stats->fflp_soft_err++; 25466f45ec7bSml29623 if (rdc_stats->zcp_soft_err < 25476f45ec7bSml29623 error_disp_cnt) 254853f3d8ecSyc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 254953f3d8ecSyc148097 " nxge_receive_packet: Channel %d" 255053f3d8ecSyc148097 " RCR ZCP_SOFT_ERROR", channel)); 25516f45ec7bSml29623 break; 25526f45ec7bSml29623 default: 255353f3d8ecSyc148097 rdc_stats->rcr_unknown_err++; 255453f3d8ecSyc148097 if (rdc_stats->rcr_unknown_err 255553f3d8ecSyc148097 < error_disp_cnt) { 25566f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 255753f3d8ecSyc148097 " nxge_receive_packet: Channel %d" 255853f3d8ecSyc148097 " RCR entry 0x%llx error 0x%x", 255953f3d8ecSyc148097 rcr_entry, channel, error_type)); 256053f3d8ecSyc148097 } 25616f45ec7bSml29623 break; 25626f45ec7bSml29623 } 25636f45ec7bSml29623 } 25646f45ec7bSml29623 25656f45ec7bSml29623 /* 25666f45ec7bSml29623 * Update and repost buffer block if max usage 25676f45ec7bSml29623 * count is reached. 25686f45ec7bSml29623 */ 25696f45ec7bSml29623 if (error_send_up == B_FALSE) { 2570958cea9eSml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 25716f45ec7bSml29623 if (buffer_free == B_TRUE) { 25726f45ec7bSml29623 rx_msg_p->free = B_TRUE; 25736f45ec7bSml29623 } 25746f45ec7bSml29623 25756f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->lock); 25766f45ec7bSml29623 nxge_freeb(rx_msg_p); 25776f45ec7bSml29623 return; 25786f45ec7bSml29623 } 25796f45ec7bSml29623 } 25806f45ec7bSml29623 25816f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25826f45ec7bSml29623 "==> nxge_receive_packet: DMA sync second ")); 25836f45ec7bSml29623 258453f3d8ecSyc148097 bytes_read = rcr_p->rcvd_pkt_bytes; 25856f45ec7bSml29623 skip_len = sw_offset_bytes + hdr_size; 25866f45ec7bSml29623 if (!rx_msg_p->rx_use_bcopy) { 2587958cea9eSml29623 /* 2588958cea9eSml29623 * For loaned up buffers, the driver reference count 2589958cea9eSml29623 * will be incremented first and then the free state. 2590958cea9eSml29623 */ 259153f3d8ecSyc148097 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 25926f45ec7bSml29623 if (first_entry) { 25936f45ec7bSml29623 nmp->b_rptr = &nmp->b_rptr[skip_len]; 259453f3d8ecSyc148097 if (l2_len < bsize - skip_len) { 25956f45ec7bSml29623 nmp->b_wptr = &nmp->b_rptr[l2_len]; 25966f45ec7bSml29623 } else { 259753f3d8ecSyc148097 nmp->b_wptr = &nmp->b_rptr[bsize 259853f3d8ecSyc148097 - skip_len]; 259953f3d8ecSyc148097 } 260053f3d8ecSyc148097 } else { 260153f3d8ecSyc148097 if (l2_len - bytes_read < bsize) { 26026f45ec7bSml29623 nmp->b_wptr = 26036f45ec7bSml29623 &nmp->b_rptr[l2_len - bytes_read]; 260453f3d8ecSyc148097 } else { 260553f3d8ecSyc148097 nmp->b_wptr = &nmp->b_rptr[bsize]; 26066f45ec7bSml29623 } 260753f3d8ecSyc148097 } 260853f3d8ecSyc148097 } 260953f3d8ecSyc148097 } else { 261053f3d8ecSyc148097 if (first_entry) { 261153f3d8ecSyc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 261253f3d8ecSyc148097 l2_len < bsize - skip_len ? 261353f3d8ecSyc148097 l2_len : bsize - skip_len); 261453f3d8ecSyc148097 } else { 261553f3d8ecSyc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 261653f3d8ecSyc148097 l2_len - bytes_read < bsize ? 261753f3d8ecSyc148097 l2_len - bytes_read : bsize); 261853f3d8ecSyc148097 } 261953f3d8ecSyc148097 } 262053f3d8ecSyc148097 if (nmp != NULL) { 2621f720bc57Syc148097 if (first_entry) { 2622f720bc57Syc148097 /* 2623f720bc57Syc148097 * Jumbo packets may be received with more than one 2624f720bc57Syc148097 * buffer, increment ipackets for the first entry only. 2625f720bc57Syc148097 */ 2626f720bc57Syc148097 rdc_stats->ipackets++; 2627f720bc57Syc148097 2628f720bc57Syc148097 /* Update ibytes for kstat. */ 2629f720bc57Syc148097 rdc_stats->ibytes += skip_len 2630f720bc57Syc148097 + l2_len < bsize ? l2_len : bsize; 2631f720bc57Syc148097 /* 2632f720bc57Syc148097 * Update the number of bytes read so far for the 2633f720bc57Syc148097 * current frame. 2634f720bc57Syc148097 */ 263553f3d8ecSyc148097 bytes_read = nmp->b_wptr - nmp->b_rptr; 2636f720bc57Syc148097 } else { 2637f720bc57Syc148097 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2638f720bc57Syc148097 l2_len - bytes_read : bsize; 26396f45ec7bSml29623 bytes_read += nmp->b_wptr - nmp->b_rptr; 2640f720bc57Syc148097 } 264153f3d8ecSyc148097 26426f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 26436f45ec7bSml29623 "==> nxge_receive_packet after dupb: " 26446f45ec7bSml29623 "rbr consumed %d " 26456f45ec7bSml29623 "pktbufsz_type %d " 26466f45ec7bSml29623 "nmp $%p rptr $%p wptr $%p " 26476f45ec7bSml29623 "buf_offset %d bzise %d l2_len %d skip_len %d", 26486f45ec7bSml29623 rx_rbr_p->rbr_consumed, 26496f45ec7bSml29623 pktbufsz_type, 26506f45ec7bSml29623 nmp, nmp->b_rptr, nmp->b_wptr, 26516f45ec7bSml29623 buf_offset, bsize, l2_len, skip_len)); 26526f45ec7bSml29623 } else { 26536f45ec7bSml29623 cmn_err(CE_WARN, "!nxge_receive_packet: " 26546f45ec7bSml29623 "update stats (error)"); 26552e59129aSraghus atomic_inc_32(&rx_msg_p->ref_cnt); 26562e59129aSraghus if (buffer_free == B_TRUE) { 26572e59129aSraghus rx_msg_p->free = B_TRUE; 26582e59129aSraghus } 26592e59129aSraghus MUTEX_EXIT(&rx_rbr_p->lock); 26602e59129aSraghus nxge_freeb(rx_msg_p); 26612e59129aSraghus return; 26626f45ec7bSml29623 } 2663ee5416c9Syc148097 26646f45ec7bSml29623 if (buffer_free == B_TRUE) { 26656f45ec7bSml29623 rx_msg_p->free = B_TRUE; 26666f45ec7bSml29623 } 2667f720bc57Syc148097 26686f45ec7bSml29623 is_valid = (nmp != NULL); 266953f3d8ecSyc148097 267053f3d8ecSyc148097 rcr_p->rcvd_pkt_bytes = bytes_read; 267153f3d8ecSyc148097 26726f45ec7bSml29623 MUTEX_EXIT(&rx_rbr_p->lock); 26736f45ec7bSml29623 26746f45ec7bSml29623 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 26756f45ec7bSml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 26766f45ec7bSml29623 nxge_freeb(rx_msg_p); 26776f45ec7bSml29623 } 26786f45ec7bSml29623 26796f45ec7bSml29623 if (is_valid) { 26806f45ec7bSml29623 nmp->b_cont = NULL; 26816f45ec7bSml29623 if (first_entry) { 26826f45ec7bSml29623 *mp = nmp; 26836f45ec7bSml29623 *mp_cont = NULL; 268453f3d8ecSyc148097 } else { 26856f45ec7bSml29623 *mp_cont = nmp; 26866f45ec7bSml29623 } 268753f3d8ecSyc148097 } 26886f45ec7bSml29623 26896f45ec7bSml29623 /* 2690f720bc57Syc148097 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 2691f720bc57Syc148097 * If a packet is not fragmented and no error bit is set, then 2692f720bc57Syc148097 * L4 checksum is OK. 26936f45ec7bSml29623 */ 2694f720bc57Syc148097 26956f45ec7bSml29623 if (is_valid && !multi) { 2696678453a8Sspeer /* 2697b4d05839Sml29623 * If the checksum flag nxge_chksum_offload 2698b4d05839Sml29623 * is 1, TCP and UDP packets can be sent 2699678453a8Sspeer * up with good checksum. If the checksum flag 2700b4d05839Sml29623 * is set to 0, checksum reporting will apply to 2701678453a8Sspeer * TCP packets only (workaround for a hardware bug). 2702b4d05839Sml29623 * If the checksum flag nxge_cksum_offload is 2703b4d05839Sml29623 * greater than 1, both TCP and UDP packets 2704b4d05839Sml29623 * will not be reported its hardware checksum results. 2705678453a8Sspeer */ 2706b4d05839Sml29623 if (nxge_cksum_offload == 1) { 27076f45ec7bSml29623 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 27086f45ec7bSml29623 pkt_type == RCR_PKT_IS_UDP) ? 27096f45ec7bSml29623 B_TRUE: B_FALSE); 2710b4d05839Sml29623 } else if (!nxge_cksum_offload) { 2711678453a8Sspeer /* TCP checksum only. */ 2712678453a8Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 2713678453a8Sspeer B_TRUE: B_FALSE); 2714678453a8Sspeer } 27156f45ec7bSml29623 27166f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 27176f45ec7bSml29623 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 27186f45ec7bSml29623 is_valid, multi, is_tcp_udp, frag, error_type)); 27196f45ec7bSml29623 27206f45ec7bSml29623 if (is_tcp_udp && !frag && !error_type) { 27210dc2366fSVenugopal Iyer mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK); 27226f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 27236f45ec7bSml29623 "==> nxge_receive_packet: Full tcp/udp cksum " 27246f45ec7bSml29623 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 27256f45ec7bSml29623 "error %d", 27266f45ec7bSml29623 is_valid, multi, is_tcp_udp, frag, error_type)); 27276f45ec7bSml29623 } 27286f45ec7bSml29623 } 27296f45ec7bSml29623 27306f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 27316f45ec7bSml29623 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 27326f45ec7bSml29623 27336f45ec7bSml29623 *multi_p = (multi == RCR_MULTI_MASK); 27346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 27356f45ec7bSml29623 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 27366f45ec7bSml29623 *multi_p, nmp, *mp, *mp_cont)); 27376f45ec7bSml29623 } 27386f45ec7bSml29623 2739da14cebeSEric Cheng /* 2740da14cebeSEric Cheng * Enable polling for a ring. Interrupt for the ring is disabled when 2741da14cebeSEric Cheng * the nxge interrupt comes (see nxge_rx_intr). 2742da14cebeSEric Cheng */ 2743da14cebeSEric Cheng int 2744da14cebeSEric Cheng nxge_enable_poll(void *arg) 2745da14cebeSEric Cheng { 2746da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2747da14cebeSEric Cheng p_rx_rcr_ring_t ringp; 2748da14cebeSEric Cheng p_nxge_t nxgep; 2749da14cebeSEric Cheng p_nxge_ldg_t ldgp; 2750da14cebeSEric Cheng uint32_t channel; 2751da14cebeSEric Cheng 2752da14cebeSEric Cheng if (ring_handle == NULL) { 275363f531d1SSriharsha Basavapatna ASSERT(ring_handle != NULL); 2754da14cebeSEric Cheng return (0); 2755da14cebeSEric Cheng } 2756da14cebeSEric Cheng 2757da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2758da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2759da14cebeSEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2760da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2761da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 2762da14cebeSEric Cheng ldgp = ringp->ldgp; 2763da14cebeSEric Cheng if (ldgp == NULL) { 2764da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2765da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 2766da14cebeSEric Cheng ringp->rdc)); 2767da14cebeSEric Cheng return (0); 2768da14cebeSEric Cheng } 2769da14cebeSEric Cheng 2770da14cebeSEric Cheng MUTEX_ENTER(&ringp->lock); 2771da14cebeSEric Cheng /* enable polling */ 2772da14cebeSEric Cheng if (ringp->poll_flag == 0) { 2773da14cebeSEric Cheng ringp->poll_flag = 1; 2774da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2775da14cebeSEric Cheng "==> nxge_enable_poll: rdc %d set poll flag to 1", 2776da14cebeSEric Cheng ringp->rdc)); 2777da14cebeSEric Cheng } 2778da14cebeSEric Cheng 2779da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2780da14cebeSEric Cheng return (0); 2781da14cebeSEric Cheng } 2782da14cebeSEric Cheng /* 2783da14cebeSEric Cheng * Disable polling for a ring and enable its interrupt. 2784da14cebeSEric Cheng */ 2785da14cebeSEric Cheng int 2786da14cebeSEric Cheng nxge_disable_poll(void *arg) 2787da14cebeSEric Cheng { 2788da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2789da14cebeSEric Cheng p_rx_rcr_ring_t ringp; 2790da14cebeSEric Cheng p_nxge_t nxgep; 2791da14cebeSEric Cheng uint32_t channel; 2792da14cebeSEric Cheng 2793da14cebeSEric Cheng if (ring_handle == NULL) { 279463f531d1SSriharsha Basavapatna ASSERT(ring_handle != NULL); 2795da14cebeSEric Cheng return (0); 2796da14cebeSEric Cheng } 2797da14cebeSEric Cheng 2798da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2799da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2800da14cebeSEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 2801da14cebeSEric Cheng 2802da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2803da14cebeSEric Cheng "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 2804da14cebeSEric Cheng 2805da14cebeSEric Cheng MUTEX_ENTER(&ringp->lock); 2806da14cebeSEric Cheng 2807da14cebeSEric Cheng /* disable polling: enable interrupt */ 2808da14cebeSEric Cheng if (ringp->poll_flag) { 2809da14cebeSEric Cheng npi_handle_t handle; 2810da14cebeSEric Cheng rx_dma_ctl_stat_t cs; 2811da14cebeSEric Cheng uint8_t channel; 2812da14cebeSEric Cheng p_nxge_ldg_t ldgp; 2813da14cebeSEric Cheng 2814da14cebeSEric Cheng /* 2815da14cebeSEric Cheng * Get the control and status for this channel. 2816da14cebeSEric Cheng */ 2817da14cebeSEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 2818da14cebeSEric Cheng channel = ringp->rdc; 2819da14cebeSEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 2820da14cebeSEric Cheng channel, &cs.value); 2821da14cebeSEric Cheng 2822da14cebeSEric Cheng /* 2823da14cebeSEric Cheng * Enable mailbox update 2824da14cebeSEric Cheng * Since packets were not read and the hardware uses 2825da14cebeSEric Cheng * bits pktread and ptrread to update the queue 2826da14cebeSEric Cheng * length, we need to set both bits to 0. 2827da14cebeSEric Cheng */ 2828da14cebeSEric Cheng cs.bits.ldw.pktread = 0; 2829da14cebeSEric Cheng cs.bits.ldw.ptrread = 0; 2830da14cebeSEric Cheng cs.bits.hdw.mex = 1; 2831da14cebeSEric Cheng RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 2832da14cebeSEric Cheng cs.value); 2833da14cebeSEric Cheng 2834da14cebeSEric Cheng /* 2835da14cebeSEric Cheng * Rearm this logical group if this is a single device 2836da14cebeSEric Cheng * group. 2837da14cebeSEric Cheng */ 2838da14cebeSEric Cheng ldgp = ringp->ldgp; 2839da14cebeSEric Cheng if (ldgp == NULL) { 2840da14cebeSEric Cheng ringp->poll_flag = 0; 2841da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2842da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2843da14cebeSEric Cheng "==> nxge_disable_poll: no ldgp rdc %d " 2844da14cebeSEric Cheng "(still set poll to 0", ringp->rdc)); 2845da14cebeSEric Cheng return (0); 2846da14cebeSEric Cheng } 2847da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2848da14cebeSEric Cheng "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 2849da14cebeSEric Cheng ringp->rdc, ldgp)); 2850da14cebeSEric Cheng if (ldgp->nldvs == 1) { 285163f531d1SSriharsha Basavapatna if (isLDOMguest(nxgep)) { 285263f531d1SSriharsha Basavapatna ldgp->arm = B_TRUE; 285363f531d1SSriharsha Basavapatna nxge_hio_ldgimgn(nxgep, ldgp); 285463f531d1SSriharsha Basavapatna } else { 2855da14cebeSEric Cheng ldgimgm_t mgm; 2856da14cebeSEric Cheng mgm.value = 0; 2857da14cebeSEric Cheng mgm.bits.ldw.arm = 1; 2858da14cebeSEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 2859da14cebeSEric Cheng NXGE_REG_WR64(handle, 286063f531d1SSriharsha Basavapatna LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 286163f531d1SSriharsha Basavapatna mgm.value); 286263f531d1SSriharsha Basavapatna } 2863da14cebeSEric Cheng } 2864da14cebeSEric Cheng ringp->poll_flag = 0; 2865da14cebeSEric Cheng } 2866da14cebeSEric Cheng 2867da14cebeSEric Cheng MUTEX_EXIT(&ringp->lock); 2868da14cebeSEric Cheng return (0); 2869da14cebeSEric Cheng } 2870da14cebeSEric Cheng 2871da14cebeSEric Cheng /* 2872da14cebeSEric Cheng * Poll 'bytes_to_pickup' bytes of message from the rx ring. 2873da14cebeSEric Cheng */ 2874da14cebeSEric Cheng mblk_t * 2875da14cebeSEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup) 2876da14cebeSEric Cheng { 2877da14cebeSEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 2878da14cebeSEric Cheng p_rx_rcr_ring_t rcr_p; 2879da14cebeSEric Cheng p_nxge_t nxgep; 2880da14cebeSEric Cheng npi_handle_t handle; 2881da14cebeSEric Cheng rx_dma_ctl_stat_t cs; 2882da14cebeSEric Cheng mblk_t *mblk; 2883da14cebeSEric Cheng p_nxge_ldv_t ldvp; 2884da14cebeSEric Cheng uint32_t channel; 2885da14cebeSEric Cheng 2886da14cebeSEric Cheng nxgep = ring_handle->nxgep; 2887da14cebeSEric Cheng 2888da14cebeSEric Cheng /* 2889da14cebeSEric Cheng * Get the control and status for this channel. 2890da14cebeSEric Cheng */ 2891da14cebeSEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 2892da14cebeSEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 2893da14cebeSEric Cheng rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 2894da14cebeSEric Cheng MUTEX_ENTER(&rcr_p->lock); 2895da14cebeSEric Cheng ASSERT(rcr_p->poll_flag == 1); 2896da14cebeSEric Cheng 2897da14cebeSEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 2898da14cebeSEric Cheng 2899da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2900da14cebeSEric Cheng "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 2901da14cebeSEric Cheng rcr_p->rdc, rcr_p->poll_flag)); 2902da14cebeSEric Cheng mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 2903da14cebeSEric Cheng 2904da14cebeSEric Cheng ldvp = rcr_p->ldvp; 2905da14cebeSEric Cheng /* error events. */ 2906da14cebeSEric Cheng if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 2907da14cebeSEric Cheng (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 2908da14cebeSEric Cheng } 2909da14cebeSEric Cheng 2910da14cebeSEric Cheng MUTEX_EXIT(&rcr_p->lock); 2911da14cebeSEric Cheng 2912da14cebeSEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 2913da14cebeSEric Cheng "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 2914da14cebeSEric Cheng return (mblk); 2915da14cebeSEric Cheng } 2916da14cebeSEric Cheng 2917da14cebeSEric Cheng 29186f45ec7bSml29623 /*ARGSUSED*/ 29196f45ec7bSml29623 static nxge_status_t 2920678453a8Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 29216f45ec7bSml29623 { 29226f45ec7bSml29623 p_nxge_rx_ring_stats_t rdc_stats; 29236f45ec7bSml29623 npi_handle_t handle; 29246f45ec7bSml29623 npi_status_t rs; 29256f45ec7bSml29623 boolean_t rxchan_fatal = B_FALSE; 29266f45ec7bSml29623 boolean_t rxport_fatal = B_FALSE; 29276f45ec7bSml29623 uint8_t portn; 29286f45ec7bSml29623 nxge_status_t status = NXGE_OK; 29296f45ec7bSml29623 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 29306f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 29316f45ec7bSml29623 29326f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 29336f45ec7bSml29623 portn = nxgep->mac.portnum; 2934678453a8Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 29356f45ec7bSml29623 29366f45ec7bSml29623 if (cs.bits.hdw.rbr_tmout) { 29376f45ec7bSml29623 rdc_stats->rx_rbr_tmout++; 29386f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 29396f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 29406f45ec7bSml29623 rxchan_fatal = B_TRUE; 29416f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29426f45ec7bSml29623 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 29436f45ec7bSml29623 } 29446f45ec7bSml29623 if (cs.bits.hdw.rsp_cnt_err) { 29456f45ec7bSml29623 rdc_stats->rsp_cnt_err++; 29466f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 29476f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 29486f45ec7bSml29623 rxchan_fatal = B_TRUE; 29496f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29506f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 29516f45ec7bSml29623 "rsp_cnt_err", channel)); 29526f45ec7bSml29623 } 29536f45ec7bSml29623 if (cs.bits.hdw.byte_en_bus) { 29546f45ec7bSml29623 rdc_stats->byte_en_bus++; 29556f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 29566f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 29576f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29586f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 29596f45ec7bSml29623 "fatal error: byte_en_bus", channel)); 29606f45ec7bSml29623 rxchan_fatal = B_TRUE; 29616f45ec7bSml29623 } 29626f45ec7bSml29623 if (cs.bits.hdw.rsp_dat_err) { 29636f45ec7bSml29623 rdc_stats->rsp_dat_err++; 29646f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 29656f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 29666f45ec7bSml29623 rxchan_fatal = B_TRUE; 29676f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29686f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 29696f45ec7bSml29623 "fatal error: rsp_dat_err", channel)); 29706f45ec7bSml29623 } 29716f45ec7bSml29623 if (cs.bits.hdw.rcr_ack_err) { 29726f45ec7bSml29623 rdc_stats->rcr_ack_err++; 29736f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 29746f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 29756f45ec7bSml29623 rxchan_fatal = B_TRUE; 29766f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29776f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 29786f45ec7bSml29623 "fatal error: rcr_ack_err", channel)); 29796f45ec7bSml29623 } 29806f45ec7bSml29623 if (cs.bits.hdw.dc_fifo_err) { 29816f45ec7bSml29623 rdc_stats->dc_fifo_err++; 29826f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 29836f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 29846f45ec7bSml29623 /* This is not a fatal error! */ 29856f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29866f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 29876f45ec7bSml29623 "dc_fifo_err", channel)); 29886f45ec7bSml29623 rxport_fatal = B_TRUE; 29896f45ec7bSml29623 } 29906f45ec7bSml29623 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 29916f45ec7bSml29623 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 29926f45ec7bSml29623 &rdc_stats->errlog.pre_par, 29936f45ec7bSml29623 &rdc_stats->errlog.sha_par)) 29946f45ec7bSml29623 != NPI_SUCCESS) { 29956f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29966f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 29976f45ec7bSml29623 "rcr_sha_par: get perr", channel)); 29986f45ec7bSml29623 return (NXGE_ERROR | rs); 29996f45ec7bSml29623 } 30006f45ec7bSml29623 if (cs.bits.hdw.rcr_sha_par) { 30016f45ec7bSml29623 rdc_stats->rcr_sha_par++; 30026f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30036f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 30046f45ec7bSml29623 rxchan_fatal = B_TRUE; 30056f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30066f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30076f45ec7bSml29623 "fatal error: rcr_sha_par", channel)); 30086f45ec7bSml29623 } 30096f45ec7bSml29623 if (cs.bits.hdw.rbr_pre_par) { 30106f45ec7bSml29623 rdc_stats->rbr_pre_par++; 30116f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30126f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 30136f45ec7bSml29623 rxchan_fatal = B_TRUE; 30146f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30156f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30166f45ec7bSml29623 "fatal error: rbr_pre_par", channel)); 30176f45ec7bSml29623 } 30186f45ec7bSml29623 } 301963e23a19Syc148097 /* 302063e23a19Syc148097 * The Following 4 status bits are for information, the system 302163e23a19Syc148097 * is running fine. There is no need to send FMA ereports or 302263e23a19Syc148097 * log messages. 302363e23a19Syc148097 */ 30246f45ec7bSml29623 if (cs.bits.hdw.port_drop_pkt) { 30256f45ec7bSml29623 rdc_stats->port_drop_pkt++; 30266f45ec7bSml29623 } 30276f45ec7bSml29623 if (cs.bits.hdw.wred_drop) { 30286f45ec7bSml29623 rdc_stats->wred_drop++; 30296f45ec7bSml29623 } 30306f45ec7bSml29623 if (cs.bits.hdw.rbr_pre_empty) { 30316f45ec7bSml29623 rdc_stats->rbr_pre_empty++; 30326f45ec7bSml29623 } 30336f45ec7bSml29623 if (cs.bits.hdw.rcr_shadow_full) { 30346f45ec7bSml29623 rdc_stats->rcr_shadow_full++; 30356f45ec7bSml29623 } 30366f45ec7bSml29623 if (cs.bits.hdw.config_err) { 30376f45ec7bSml29623 rdc_stats->config_err++; 30386f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30396f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 30406f45ec7bSml29623 rxchan_fatal = B_TRUE; 30416f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30426f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30436f45ec7bSml29623 "config error", channel)); 30446f45ec7bSml29623 } 30456f45ec7bSml29623 if (cs.bits.hdw.rcrincon) { 30466f45ec7bSml29623 rdc_stats->rcrincon++; 30476f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30486f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RCRINCON); 30496f45ec7bSml29623 rxchan_fatal = B_TRUE; 30506f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30516f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30526f45ec7bSml29623 "fatal error: rcrincon error", channel)); 30536f45ec7bSml29623 } 30546f45ec7bSml29623 if (cs.bits.hdw.rcrfull) { 30556f45ec7bSml29623 rdc_stats->rcrfull++; 30566f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30576f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RCRFULL); 30586f45ec7bSml29623 rxchan_fatal = B_TRUE; 30596f45ec7bSml29623 if (rdc_stats->rcrfull < error_disp_cnt) 30606f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30616f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30626f45ec7bSml29623 "fatal error: rcrfull error", channel)); 30636f45ec7bSml29623 } 30646f45ec7bSml29623 if (cs.bits.hdw.rbr_empty) { 306563e23a19Syc148097 /* 306663e23a19Syc148097 * This bit is for information, there is no need 306763e23a19Syc148097 * send FMA ereport or log a message. 306863e23a19Syc148097 */ 30696f45ec7bSml29623 rdc_stats->rbr_empty++; 30706f45ec7bSml29623 } 30716f45ec7bSml29623 if (cs.bits.hdw.rbrfull) { 30726f45ec7bSml29623 rdc_stats->rbrfull++; 30736f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30746f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RBRFULL); 30756f45ec7bSml29623 rxchan_fatal = B_TRUE; 30766f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30776f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30786f45ec7bSml29623 "fatal error: rbr_full error", channel)); 30796f45ec7bSml29623 } 30806f45ec7bSml29623 if (cs.bits.hdw.rbrlogpage) { 30816f45ec7bSml29623 rdc_stats->rbrlogpage++; 30826f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30836f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 30846f45ec7bSml29623 rxchan_fatal = B_TRUE; 30856f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30866f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30876f45ec7bSml29623 "fatal error: rbr logical page error", channel)); 30886f45ec7bSml29623 } 30896f45ec7bSml29623 if (cs.bits.hdw.cfiglogpage) { 30906f45ec7bSml29623 rdc_stats->cfiglogpage++; 30916f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30926f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 30936f45ec7bSml29623 rxchan_fatal = B_TRUE; 30946f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30956f45ec7bSml29623 "==> nxge_rx_err_evnts(channel %d): " 30966f45ec7bSml29623 "fatal error: cfig logical page error", channel)); 30976f45ec7bSml29623 } 30986f45ec7bSml29623 30996f45ec7bSml29623 if (rxport_fatal) { 31006f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3101678453a8Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 31026f45ec7bSml29623 portn)); 3103678453a8Sspeer if (isLDOMguest(nxgep)) { 3104678453a8Sspeer status = NXGE_ERROR; 3105678453a8Sspeer } else { 31066f45ec7bSml29623 status = nxge_ipp_fatal_err_recover(nxgep); 31076f45ec7bSml29623 if (status == NXGE_OK) { 31086f45ec7bSml29623 FM_SERVICE_RESTORED(nxgep); 31096f45ec7bSml29623 } 31106f45ec7bSml29623 } 3111678453a8Sspeer } 31126f45ec7bSml29623 31136f45ec7bSml29623 if (rxchan_fatal) { 31146f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3115678453a8Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 31166f45ec7bSml29623 channel)); 3117678453a8Sspeer if (isLDOMguest(nxgep)) { 3118678453a8Sspeer status = NXGE_ERROR; 3119678453a8Sspeer } else { 31206f45ec7bSml29623 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 31216f45ec7bSml29623 if (status == NXGE_OK) { 31226f45ec7bSml29623 FM_SERVICE_RESTORED(nxgep); 31236f45ec7bSml29623 } 31246f45ec7bSml29623 } 3125678453a8Sspeer } 31266f45ec7bSml29623 31276f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 31286f45ec7bSml29623 31296f45ec7bSml29623 return (status); 31306f45ec7bSml29623 } 31316f45ec7bSml29623 3132678453a8Sspeer /* 3133678453a8Sspeer * nxge_rdc_hvio_setup 3134678453a8Sspeer * 3135678453a8Sspeer * This code appears to setup some Hypervisor variables. 3136678453a8Sspeer * 3137678453a8Sspeer * Arguments: 3138678453a8Sspeer * nxgep 3139678453a8Sspeer * channel 3140678453a8Sspeer * 3141678453a8Sspeer * Notes: 3142678453a8Sspeer * What does NIU_LP_WORKAROUND mean? 3143678453a8Sspeer * 3144678453a8Sspeer * NPI/NXGE function calls: 3145678453a8Sspeer * na 3146678453a8Sspeer * 3147678453a8Sspeer * Context: 3148678453a8Sspeer * Any domain 3149678453a8Sspeer */ 31506f45ec7bSml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3151678453a8Sspeer static void 3152678453a8Sspeer nxge_rdc_hvio_setup( 3153678453a8Sspeer nxge_t *nxgep, int channel) 3154678453a8Sspeer { 3155678453a8Sspeer nxge_dma_common_t *dma_common; 3156678453a8Sspeer nxge_dma_common_t *dma_control; 3157678453a8Sspeer rx_rbr_ring_t *ring; 3158678453a8Sspeer 3159678453a8Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3160678453a8Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3161678453a8Sspeer 3162678453a8Sspeer ring->hv_set = B_FALSE; 3163678453a8Sspeer 3164678453a8Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 3165678453a8Sspeer dma_common->orig_ioaddr_pp; 3166678453a8Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 3167678453a8Sspeer dma_common->orig_alength; 3168678453a8Sspeer 3169678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3170678453a8Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 3171678453a8Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 3172678453a8Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 3173678453a8Sspeer dma_common->orig_alength, dma_common->orig_alength)); 3174678453a8Sspeer 3175678453a8Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3176678453a8Sspeer 3177678453a8Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 3178678453a8Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 3179678453a8Sspeer ring->hv_rx_cntl_ioaddr_size = 3180678453a8Sspeer (uint64_t)dma_control->orig_alength; 3181678453a8Sspeer 3182678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 3183678453a8Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 3184678453a8Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 3185678453a8Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 3186678453a8Sspeer dma_control->orig_alength, dma_control->orig_alength)); 3187678453a8Sspeer } 31886f45ec7bSml29623 #endif 31896f45ec7bSml29623 3190678453a8Sspeer /* 3191678453a8Sspeer * nxge_map_rxdma 3192678453a8Sspeer * 3193678453a8Sspeer * Map an RDC into our kernel space. 3194678453a8Sspeer * 3195678453a8Sspeer * Arguments: 3196678453a8Sspeer * nxgep 3197678453a8Sspeer * channel The channel to map. 3198678453a8Sspeer * 3199678453a8Sspeer * Notes: 3200678453a8Sspeer * 1. Allocate & initialise a memory pool, if necessary. 3201678453a8Sspeer * 2. Allocate however many receive buffers are required. 3202678453a8Sspeer * 3. Setup buffers, descriptors, and mailbox. 3203678453a8Sspeer * 3204678453a8Sspeer * NPI/NXGE function calls: 3205678453a8Sspeer * nxge_alloc_rx_mem_pool() 3206678453a8Sspeer * nxge_alloc_rbb() 3207678453a8Sspeer * nxge_map_rxdma_channel() 3208678453a8Sspeer * 3209678453a8Sspeer * Registers accessed: 3210678453a8Sspeer * 3211678453a8Sspeer * Context: 3212678453a8Sspeer * Any domain 3213678453a8Sspeer */ 3214678453a8Sspeer static nxge_status_t 3215678453a8Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 3216678453a8Sspeer { 3217678453a8Sspeer nxge_dma_common_t **data; 3218678453a8Sspeer nxge_dma_common_t **control; 3219678453a8Sspeer rx_rbr_ring_t **rbr_ring; 3220678453a8Sspeer rx_rcr_ring_t **rcr_ring; 3221678453a8Sspeer rx_mbox_t **mailbox; 3222678453a8Sspeer uint32_t chunks; 3223678453a8Sspeer 3224678453a8Sspeer nxge_status_t status; 3225678453a8Sspeer 32266f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 32276f45ec7bSml29623 3228678453a8Sspeer if (!nxgep->rx_buf_pool_p) { 3229678453a8Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 32306f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32316f45ec7bSml29623 "<== nxge_map_rxdma: buf not allocated")); 32326f45ec7bSml29623 return (NXGE_ERROR); 32336f45ec7bSml29623 } 32346f45ec7bSml29623 } 32356f45ec7bSml29623 3236678453a8Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 3237678453a8Sspeer return (NXGE_ERROR); 32386f45ec7bSml29623 32396f45ec7bSml29623 /* 3240678453a8Sspeer * Map descriptors from the buffer polls for each dma channel. 32416f45ec7bSml29623 */ 3242678453a8Sspeer 32436f45ec7bSml29623 /* 32446f45ec7bSml29623 * Set up and prepare buffer blocks, descriptors 32456f45ec7bSml29623 * and mailbox. 32466f45ec7bSml29623 */ 3247678453a8Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 3248678453a8Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 3249678453a8Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 3250678453a8Sspeer 3251678453a8Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 3252678453a8Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 3253678453a8Sspeer 3254678453a8Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3255678453a8Sspeer 3256678453a8Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 3257678453a8Sspeer chunks, control, rcr_ring, mailbox); 32586f45ec7bSml29623 if (status != NXGE_OK) { 3259678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3260678453a8Sspeer "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 3261678453a8Sspeer "returned 0x%x", 3262678453a8Sspeer channel, status)); 3263678453a8Sspeer return (status); 32646f45ec7bSml29623 } 3265678453a8Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 3266678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 3267678453a8Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 3268678453a8Sspeer &nxgep->statsp->rdc_stats[channel]; 32696f45ec7bSml29623 32706f45ec7bSml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3271678453a8Sspeer if (!isLDOMguest(nxgep)) 3272678453a8Sspeer nxge_rdc_hvio_setup(nxgep, channel); 3273678453a8Sspeer #endif 32746f45ec7bSml29623 32756f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3276678453a8Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 32776f45ec7bSml29623 32786f45ec7bSml29623 return (status); 32796f45ec7bSml29623 } 32806f45ec7bSml29623 32816f45ec7bSml29623 static void 3282678453a8Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 32836f45ec7bSml29623 { 3284678453a8Sspeer rx_rbr_ring_t *rbr_ring; 3285678453a8Sspeer rx_rcr_ring_t *rcr_ring; 3286678453a8Sspeer rx_mbox_t *mailbox; 32876f45ec7bSml29623 3288678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 32896f45ec7bSml29623 3290678453a8Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 3291678453a8Sspeer !nxgep->rx_mbox_areas_p) 32926f45ec7bSml29623 return; 32936f45ec7bSml29623 3294678453a8Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 3295678453a8Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 3296678453a8Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 3297678453a8Sspeer 3298678453a8Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 32996f45ec7bSml29623 return; 33006f45ec7bSml29623 3301678453a8Sspeer (void) nxge_unmap_rxdma_channel( 3302678453a8Sspeer nxgep, channel, rbr_ring, rcr_ring, mailbox); 33036f45ec7bSml29623 3304678453a8Sspeer nxge_free_rxb(nxgep, channel); 33056f45ec7bSml29623 3306678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 33076f45ec7bSml29623 } 33086f45ec7bSml29623 33096f45ec7bSml29623 nxge_status_t 33106f45ec7bSml29623 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 33116f45ec7bSml29623 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 33126f45ec7bSml29623 uint32_t num_chunks, 33136f45ec7bSml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 33146f45ec7bSml29623 p_rx_mbox_t *rx_mbox_p) 33156f45ec7bSml29623 { 33166f45ec7bSml29623 int status = NXGE_OK; 33176f45ec7bSml29623 33186f45ec7bSml29623 /* 33196f45ec7bSml29623 * Set up and prepare buffer blocks, descriptors 33206f45ec7bSml29623 * and mailbox. 33216f45ec7bSml29623 */ 33226f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 33236f45ec7bSml29623 "==> nxge_map_rxdma_channel (channel %d)", channel)); 33246f45ec7bSml29623 /* 33256f45ec7bSml29623 * Receive buffer blocks 33266f45ec7bSml29623 */ 33276f45ec7bSml29623 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 33286f45ec7bSml29623 dma_buf_p, rbr_p, num_chunks); 33296f45ec7bSml29623 if (status != NXGE_OK) { 33306f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33316f45ec7bSml29623 "==> nxge_map_rxdma_channel (channel %d): " 33326f45ec7bSml29623 "map buffer failed 0x%x", channel, status)); 33336f45ec7bSml29623 goto nxge_map_rxdma_channel_exit; 33346f45ec7bSml29623 } 33356f45ec7bSml29623 33366f45ec7bSml29623 /* 33376f45ec7bSml29623 * Receive block ring, completion ring and mailbox. 33386f45ec7bSml29623 */ 33396f45ec7bSml29623 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 33406f45ec7bSml29623 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 33416f45ec7bSml29623 if (status != NXGE_OK) { 33426f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33436f45ec7bSml29623 "==> nxge_map_rxdma_channel (channel %d): " 33446f45ec7bSml29623 "map config failed 0x%x", channel, status)); 33456f45ec7bSml29623 goto nxge_map_rxdma_channel_fail2; 33466f45ec7bSml29623 } 33476f45ec7bSml29623 33486f45ec7bSml29623 goto nxge_map_rxdma_channel_exit; 33496f45ec7bSml29623 33506f45ec7bSml29623 nxge_map_rxdma_channel_fail3: 33516f45ec7bSml29623 /* Free rbr, rcr */ 33526f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33536f45ec7bSml29623 "==> nxge_map_rxdma_channel: free rbr/rcr " 33546f45ec7bSml29623 "(status 0x%x channel %d)", 33556f45ec7bSml29623 status, channel)); 33566f45ec7bSml29623 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 33576f45ec7bSml29623 *rcr_p, *rx_mbox_p); 33586f45ec7bSml29623 33596f45ec7bSml29623 nxge_map_rxdma_channel_fail2: 33606f45ec7bSml29623 /* Free buffer blocks */ 33616f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33626f45ec7bSml29623 "==> nxge_map_rxdma_channel: free rx buffers" 33636f45ec7bSml29623 "(nxgep 0x%x status 0x%x channel %d)", 33646f45ec7bSml29623 nxgep, status, channel)); 33656f45ec7bSml29623 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 33666f45ec7bSml29623 336756d930aeSspeer status = NXGE_ERROR; 336856d930aeSspeer 33696f45ec7bSml29623 nxge_map_rxdma_channel_exit: 33706f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 33716f45ec7bSml29623 "<== nxge_map_rxdma_channel: " 33726f45ec7bSml29623 "(nxgep 0x%x status 0x%x channel %d)", 33736f45ec7bSml29623 nxgep, status, channel)); 33746f45ec7bSml29623 33756f45ec7bSml29623 return (status); 33766f45ec7bSml29623 } 33776f45ec7bSml29623 33786f45ec7bSml29623 /*ARGSUSED*/ 33796f45ec7bSml29623 static void 33806f45ec7bSml29623 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 33816f45ec7bSml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 33826f45ec7bSml29623 { 33836f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 33846f45ec7bSml29623 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 33856f45ec7bSml29623 33866f45ec7bSml29623 /* 33876f45ec7bSml29623 * unmap receive block ring, completion ring and mailbox. 33886f45ec7bSml29623 */ 33896f45ec7bSml29623 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 33906f45ec7bSml29623 rcr_p, rx_mbox_p); 33916f45ec7bSml29623 33926f45ec7bSml29623 /* unmap buffer blocks */ 33936f45ec7bSml29623 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 33946f45ec7bSml29623 33956f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 33966f45ec7bSml29623 } 33976f45ec7bSml29623 33986f45ec7bSml29623 /*ARGSUSED*/ 33996f45ec7bSml29623 static nxge_status_t 34006f45ec7bSml29623 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 34016f45ec7bSml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 34026f45ec7bSml29623 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 34036f45ec7bSml29623 { 34046f45ec7bSml29623 p_rx_rbr_ring_t rbrp; 34056f45ec7bSml29623 p_rx_rcr_ring_t rcrp; 34066f45ec7bSml29623 p_rx_mbox_t mboxp; 34076f45ec7bSml29623 p_nxge_dma_common_t cntl_dmap; 34086f45ec7bSml29623 p_nxge_dma_common_t dmap; 34096f45ec7bSml29623 p_rx_msg_t *rx_msg_ring; 34106f45ec7bSml29623 p_rx_msg_t rx_msg_p; 34116f45ec7bSml29623 p_rbr_cfig_a_t rcfga_p; 34126f45ec7bSml29623 p_rbr_cfig_b_t rcfgb_p; 34136f45ec7bSml29623 p_rcrcfig_a_t cfga_p; 34146f45ec7bSml29623 p_rcrcfig_b_t cfgb_p; 34156f45ec7bSml29623 p_rxdma_cfig1_t cfig1_p; 34166f45ec7bSml29623 p_rxdma_cfig2_t cfig2_p; 34176f45ec7bSml29623 p_rbr_kick_t kick_p; 34186f45ec7bSml29623 uint32_t dmaaddrp; 34196f45ec7bSml29623 uint32_t *rbr_vaddrp; 34206f45ec7bSml29623 uint32_t bkaddr; 34216f45ec7bSml29623 nxge_status_t status = NXGE_OK; 34226f45ec7bSml29623 int i; 34236f45ec7bSml29623 uint32_t nxge_port_rcr_size; 34246f45ec7bSml29623 34256f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34266f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring")); 34276f45ec7bSml29623 34286f45ec7bSml29623 cntl_dmap = *dma_cntl_p; 34296f45ec7bSml29623 34306f45ec7bSml29623 /* Map in the receive block ring */ 34316f45ec7bSml29623 rbrp = *rbr_p; 34326f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 34336f45ec7bSml29623 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 34346f45ec7bSml29623 /* 34356f45ec7bSml29623 * Zero out buffer block ring descriptors. 34366f45ec7bSml29623 */ 34376f45ec7bSml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 34386f45ec7bSml29623 34396f45ec7bSml29623 rcfga_p = &(rbrp->rbr_cfga); 34406f45ec7bSml29623 rcfgb_p = &(rbrp->rbr_cfgb); 34416f45ec7bSml29623 kick_p = &(rbrp->rbr_kick); 34426f45ec7bSml29623 rcfga_p->value = 0; 34436f45ec7bSml29623 rcfgb_p->value = 0; 34446f45ec7bSml29623 kick_p->value = 0; 34456f45ec7bSml29623 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 34466f45ec7bSml29623 rcfga_p->value = (rbrp->rbr_addr & 34476f45ec7bSml29623 (RBR_CFIG_A_STDADDR_MASK | 34486f45ec7bSml29623 RBR_CFIG_A_STDADDR_BASE_MASK)); 34496f45ec7bSml29623 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 34506f45ec7bSml29623 34516f45ec7bSml29623 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 34526f45ec7bSml29623 rcfgb_p->bits.ldw.vld0 = 1; 34536f45ec7bSml29623 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 34546f45ec7bSml29623 rcfgb_p->bits.ldw.vld1 = 1; 34556f45ec7bSml29623 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 34566f45ec7bSml29623 rcfgb_p->bits.ldw.vld2 = 1; 34576f45ec7bSml29623 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 34586f45ec7bSml29623 34596f45ec7bSml29623 /* 34606f45ec7bSml29623 * For each buffer block, enter receive block address to the ring. 34616f45ec7bSml29623 */ 34626f45ec7bSml29623 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 34636f45ec7bSml29623 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 34646f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34656f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 34666f45ec7bSml29623 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 34676f45ec7bSml29623 34686f45ec7bSml29623 rx_msg_ring = rbrp->rx_msg_ring; 34696f45ec7bSml29623 for (i = 0; i < rbrp->tnblocks; i++) { 34706f45ec7bSml29623 rx_msg_p = rx_msg_ring[i]; 34716f45ec7bSml29623 rx_msg_p->nxgep = nxgep; 34726f45ec7bSml29623 rx_msg_p->rx_rbr_p = rbrp; 34736f45ec7bSml29623 bkaddr = (uint32_t) 34746f45ec7bSml29623 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 34756f45ec7bSml29623 >> RBR_BKADDR_SHIFT)); 34766f45ec7bSml29623 rx_msg_p->free = B_FALSE; 34776f45ec7bSml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 34786f45ec7bSml29623 34796f45ec7bSml29623 *rbr_vaddrp++ = bkaddr; 34806f45ec7bSml29623 } 34816f45ec7bSml29623 34826f45ec7bSml29623 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 34836f45ec7bSml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 34846f45ec7bSml29623 34856f45ec7bSml29623 rbrp->rbr_rd_index = 0; 34866f45ec7bSml29623 34876f45ec7bSml29623 rbrp->rbr_consumed = 0; 34886f45ec7bSml29623 rbrp->rbr_use_bcopy = B_TRUE; 34896f45ec7bSml29623 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 34906f45ec7bSml29623 /* 34916f45ec7bSml29623 * Do bcopy on packets greater than bcopy size once 34926f45ec7bSml29623 * the lo threshold is reached. 34936f45ec7bSml29623 * This lo threshold should be less than the hi threshold. 34946f45ec7bSml29623 * 34956f45ec7bSml29623 * Do bcopy on every packet once the hi threshold is reached. 34966f45ec7bSml29623 */ 34976f45ec7bSml29623 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 34986f45ec7bSml29623 /* default it to use hi */ 34996f45ec7bSml29623 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 35006f45ec7bSml29623 } 35016f45ec7bSml29623 35026f45ec7bSml29623 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 35036f45ec7bSml29623 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 35046f45ec7bSml29623 } 35056f45ec7bSml29623 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 35066f45ec7bSml29623 35076f45ec7bSml29623 switch (nxge_rx_threshold_hi) { 35086f45ec7bSml29623 default: 35096f45ec7bSml29623 case NXGE_RX_COPY_NONE: 35106f45ec7bSml29623 /* Do not do bcopy at all */ 35116f45ec7bSml29623 rbrp->rbr_use_bcopy = B_FALSE; 35126f45ec7bSml29623 rbrp->rbr_threshold_hi = rbrp->rbb_max; 35136f45ec7bSml29623 break; 35146f45ec7bSml29623 35156f45ec7bSml29623 case NXGE_RX_COPY_1: 35166f45ec7bSml29623 case NXGE_RX_COPY_2: 35176f45ec7bSml29623 case NXGE_RX_COPY_3: 35186f45ec7bSml29623 case NXGE_RX_COPY_4: 35196f45ec7bSml29623 case NXGE_RX_COPY_5: 35206f45ec7bSml29623 case NXGE_RX_COPY_6: 35216f45ec7bSml29623 case NXGE_RX_COPY_7: 35226f45ec7bSml29623 rbrp->rbr_threshold_hi = 35236f45ec7bSml29623 rbrp->rbb_max * 35246f45ec7bSml29623 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 35256f45ec7bSml29623 break; 35266f45ec7bSml29623 35276f45ec7bSml29623 case NXGE_RX_COPY_ALL: 35286f45ec7bSml29623 rbrp->rbr_threshold_hi = 0; 35296f45ec7bSml29623 break; 35306f45ec7bSml29623 } 35316f45ec7bSml29623 35326f45ec7bSml29623 switch (nxge_rx_threshold_lo) { 35336f45ec7bSml29623 default: 35346f45ec7bSml29623 case NXGE_RX_COPY_NONE: 35356f45ec7bSml29623 /* Do not do bcopy at all */ 35366f45ec7bSml29623 if (rbrp->rbr_use_bcopy) { 35376f45ec7bSml29623 rbrp->rbr_use_bcopy = B_FALSE; 35386f45ec7bSml29623 } 35396f45ec7bSml29623 rbrp->rbr_threshold_lo = rbrp->rbb_max; 35406f45ec7bSml29623 break; 35416f45ec7bSml29623 35426f45ec7bSml29623 case NXGE_RX_COPY_1: 35436f45ec7bSml29623 case NXGE_RX_COPY_2: 35446f45ec7bSml29623 case NXGE_RX_COPY_3: 35456f45ec7bSml29623 case NXGE_RX_COPY_4: 35466f45ec7bSml29623 case NXGE_RX_COPY_5: 35476f45ec7bSml29623 case NXGE_RX_COPY_6: 35486f45ec7bSml29623 case NXGE_RX_COPY_7: 35496f45ec7bSml29623 rbrp->rbr_threshold_lo = 35506f45ec7bSml29623 rbrp->rbb_max * 35516f45ec7bSml29623 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 35526f45ec7bSml29623 break; 35536f45ec7bSml29623 35546f45ec7bSml29623 case NXGE_RX_COPY_ALL: 35556f45ec7bSml29623 rbrp->rbr_threshold_lo = 0; 35566f45ec7bSml29623 break; 35576f45ec7bSml29623 } 35586f45ec7bSml29623 35596f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 35606f45ec7bSml29623 "nxge_map_rxdma_channel_cfg_ring: channel %d " 35616f45ec7bSml29623 "rbb_max %d " 35626f45ec7bSml29623 "rbrp->rbr_bufsize_type %d " 35636f45ec7bSml29623 "rbb_threshold_hi %d " 35646f45ec7bSml29623 "rbb_threshold_lo %d", 35656f45ec7bSml29623 dma_channel, 35666f45ec7bSml29623 rbrp->rbb_max, 35676f45ec7bSml29623 rbrp->rbr_bufsize_type, 35686f45ec7bSml29623 rbrp->rbr_threshold_hi, 35696f45ec7bSml29623 rbrp->rbr_threshold_lo)); 35706f45ec7bSml29623 35716f45ec7bSml29623 rbrp->page_valid.value = 0; 35726f45ec7bSml29623 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 35736f45ec7bSml29623 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 35746f45ec7bSml29623 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 35756f45ec7bSml29623 rbrp->page_hdl.value = 0; 35766f45ec7bSml29623 35776f45ec7bSml29623 rbrp->page_valid.bits.ldw.page0 = 1; 35786f45ec7bSml29623 rbrp->page_valid.bits.ldw.page1 = 1; 35796f45ec7bSml29623 35806f45ec7bSml29623 /* Map in the receive completion ring */ 35816f45ec7bSml29623 rcrp = (p_rx_rcr_ring_t) 35826f45ec7bSml29623 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 35836f45ec7bSml29623 rcrp->rdc = dma_channel; 35846f45ec7bSml29623 35856f45ec7bSml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 35866f45ec7bSml29623 rcrp->comp_size = nxge_port_rcr_size; 35876f45ec7bSml29623 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 35886f45ec7bSml29623 35896f45ec7bSml29623 rcrp->max_receive_pkts = nxge_max_rx_pkts; 35906f45ec7bSml29623 35916f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 35926f45ec7bSml29623 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 35936f45ec7bSml29623 sizeof (rcr_entry_t)); 35946f45ec7bSml29623 rcrp->comp_rd_index = 0; 35956f45ec7bSml29623 rcrp->comp_wt_index = 0; 35966f45ec7bSml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 35976f45ec7bSml29623 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 3598adfcba55Sjoycey #if defined(__i386) 359952ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 3600adfcba55Sjoycey (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3601adfcba55Sjoycey #else 360252ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 36036f45ec7bSml29623 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 3604adfcba55Sjoycey #endif 36056f45ec7bSml29623 36066f45ec7bSml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 36076f45ec7bSml29623 (nxge_port_rcr_size - 1); 36086f45ec7bSml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 36096f45ec7bSml29623 (nxge_port_rcr_size - 1); 36106f45ec7bSml29623 36116f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36126f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 36136f45ec7bSml29623 "channel %d " 36146f45ec7bSml29623 "rbr_vaddrp $%p " 36156f45ec7bSml29623 "rcr_desc_rd_head_p $%p " 36166f45ec7bSml29623 "rcr_desc_rd_head_pp $%p " 36176f45ec7bSml29623 "rcr_desc_rd_last_p $%p " 36186f45ec7bSml29623 "rcr_desc_rd_last_pp $%p ", 36196f45ec7bSml29623 dma_channel, 36206f45ec7bSml29623 rbr_vaddrp, 36216f45ec7bSml29623 rcrp->rcr_desc_rd_head_p, 36226f45ec7bSml29623 rcrp->rcr_desc_rd_head_pp, 36236f45ec7bSml29623 rcrp->rcr_desc_last_p, 36246f45ec7bSml29623 rcrp->rcr_desc_last_pp)); 36256f45ec7bSml29623 36266f45ec7bSml29623 /* 36276f45ec7bSml29623 * Zero out buffer block ring descriptors. 36286f45ec7bSml29623 */ 36296f45ec7bSml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 36307b26d9ffSSantwona Behera 36317b26d9ffSSantwona Behera rcrp->intr_timeout = (nxgep->intr_timeout < 36327b26d9ffSSantwona Behera NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 36337b26d9ffSSantwona Behera nxgep->intr_timeout; 36347b26d9ffSSantwona Behera 36357b26d9ffSSantwona Behera rcrp->intr_threshold = (nxgep->intr_threshold < 36367b26d9ffSSantwona Behera NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 36377b26d9ffSSantwona Behera nxgep->intr_threshold; 36387b26d9ffSSantwona Behera 36396f45ec7bSml29623 rcrp->full_hdr_flag = B_FALSE; 36404df55fdeSJanie Lu 36414df55fdeSJanie Lu rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset; 36424df55fdeSJanie Lu 36436f45ec7bSml29623 36446f45ec7bSml29623 cfga_p = &(rcrp->rcr_cfga); 36456f45ec7bSml29623 cfgb_p = &(rcrp->rcr_cfgb); 36466f45ec7bSml29623 cfga_p->value = 0; 36476f45ec7bSml29623 cfgb_p->value = 0; 36486f45ec7bSml29623 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 36496f45ec7bSml29623 cfga_p->value = (rcrp->rcr_addr & 36506f45ec7bSml29623 (RCRCFIG_A_STADDR_MASK | 36516f45ec7bSml29623 RCRCFIG_A_STADDR_BASE_MASK)); 36526f45ec7bSml29623 36536f45ec7bSml29623 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 36546f45ec7bSml29623 RCRCFIG_A_LEN_SHIF); 36556f45ec7bSml29623 36566f45ec7bSml29623 /* 36576f45ec7bSml29623 * Timeout should be set based on the system clock divider. 36587b26d9ffSSantwona Behera * A timeout value of 1 assumes that the 36596f45ec7bSml29623 * granularity (1000) is 3 microseconds running at 300MHz. 36606f45ec7bSml29623 */ 36616f45ec7bSml29623 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 36626f45ec7bSml29623 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 36636f45ec7bSml29623 cfgb_p->bits.ldw.entout = 1; 36646f45ec7bSml29623 36656f45ec7bSml29623 /* Map in the mailbox */ 36666f45ec7bSml29623 mboxp = (p_rx_mbox_t) 36676f45ec7bSml29623 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 36686f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 36696f45ec7bSml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 36706f45ec7bSml29623 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 36716f45ec7bSml29623 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 36726f45ec7bSml29623 cfig1_p->value = cfig2_p->value = 0; 36736f45ec7bSml29623 36746f45ec7bSml29623 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 36756f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36766f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 36776f45ec7bSml29623 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 36786f45ec7bSml29623 dma_channel, cfig1_p->value, cfig2_p->value, 36796f45ec7bSml29623 mboxp->mbox_addr)); 36806f45ec7bSml29623 36816f45ec7bSml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 36826f45ec7bSml29623 & 0xfff); 36836f45ec7bSml29623 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 36846f45ec7bSml29623 36856f45ec7bSml29623 36866f45ec7bSml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 36876f45ec7bSml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 36886f45ec7bSml29623 RXDMA_CFIG2_MBADDR_L_MASK); 36896f45ec7bSml29623 36906f45ec7bSml29623 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 36916f45ec7bSml29623 36926f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36936f45ec7bSml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 36946f45ec7bSml29623 "channel %d damaddrp $%p " 36956f45ec7bSml29623 "cfg1 0x%016llx cfig2 0x%016llx", 36966f45ec7bSml29623 dma_channel, dmaaddrp, 36976f45ec7bSml29623 cfig1_p->value, cfig2_p->value)); 36986f45ec7bSml29623 36996f45ec7bSml29623 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 37004df55fdeSJanie Lu if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) { 37014df55fdeSJanie Lu switch (rcrp->sw_priv_hdr_len) { 37024df55fdeSJanie Lu case SW_OFFSET_NO_OFFSET: 37034df55fdeSJanie Lu case SW_OFFSET_64: 37044df55fdeSJanie Lu case SW_OFFSET_128: 37054df55fdeSJanie Lu case SW_OFFSET_192: 37064df55fdeSJanie Lu cfig2_p->bits.ldw.offset = 37074df55fdeSJanie Lu rcrp->sw_priv_hdr_len; 37084df55fdeSJanie Lu cfig2_p->bits.ldw.offset256 = 0; 37094df55fdeSJanie Lu break; 37104df55fdeSJanie Lu case SW_OFFSET_256: 37114df55fdeSJanie Lu case SW_OFFSET_320: 37124df55fdeSJanie Lu case SW_OFFSET_384: 37134df55fdeSJanie Lu case SW_OFFSET_448: 37144df55fdeSJanie Lu cfig2_p->bits.ldw.offset = 37154df55fdeSJanie Lu rcrp->sw_priv_hdr_len & 0x3; 37164df55fdeSJanie Lu cfig2_p->bits.ldw.offset256 = 1; 37174df55fdeSJanie Lu break; 37184df55fdeSJanie Lu default: 37194df55fdeSJanie Lu cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET; 37204df55fdeSJanie Lu cfig2_p->bits.ldw.offset256 = 0; 37214df55fdeSJanie Lu } 37224df55fdeSJanie Lu } else { 37236f45ec7bSml29623 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 37244df55fdeSJanie Lu } 37256f45ec7bSml29623 37266f45ec7bSml29623 rbrp->rx_rcr_p = rcrp; 37276f45ec7bSml29623 rcrp->rx_rbr_p = rbrp; 37286f45ec7bSml29623 *rcr_p = rcrp; 37296f45ec7bSml29623 *rx_mbox_p = mboxp; 37306f45ec7bSml29623 37316f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37326f45ec7bSml29623 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 37336f45ec7bSml29623 37346f45ec7bSml29623 return (status); 37356f45ec7bSml29623 } 37366f45ec7bSml29623 37376f45ec7bSml29623 /*ARGSUSED*/ 37386f45ec7bSml29623 static void 37396f45ec7bSml29623 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 37406f45ec7bSml29623 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 37416f45ec7bSml29623 { 37426f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37436f45ec7bSml29623 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 37446f45ec7bSml29623 rcr_p->rdc)); 37456f45ec7bSml29623 37466f45ec7bSml29623 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 37476f45ec7bSml29623 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 37486f45ec7bSml29623 37496f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37506f45ec7bSml29623 "<== nxge_unmap_rxdma_channel_cfg_ring")); 37516f45ec7bSml29623 } 37526f45ec7bSml29623 37536f45ec7bSml29623 static nxge_status_t 37546f45ec7bSml29623 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 37556f45ec7bSml29623 p_nxge_dma_common_t *dma_buf_p, 37566f45ec7bSml29623 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 37576f45ec7bSml29623 { 37586f45ec7bSml29623 p_rx_rbr_ring_t rbrp; 37596f45ec7bSml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 37606f45ec7bSml29623 p_rx_msg_t *rx_msg_ring; 37616f45ec7bSml29623 p_rx_msg_t rx_msg_p; 37626f45ec7bSml29623 p_mblk_t mblk_p; 37636f45ec7bSml29623 37646f45ec7bSml29623 rxring_info_t *ring_info; 37656f45ec7bSml29623 nxge_status_t status = NXGE_OK; 37666f45ec7bSml29623 int i, j, index; 37676f45ec7bSml29623 uint32_t size, bsize, nblocks, nmsgs; 37686f45ec7bSml29623 37696f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37706f45ec7bSml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 37716f45ec7bSml29623 channel)); 37726f45ec7bSml29623 37736f45ec7bSml29623 dma_bufp = tmp_bufp = *dma_buf_p; 37746f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37756f45ec7bSml29623 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 37766f45ec7bSml29623 "chunks bufp 0x%016llx", 37776f45ec7bSml29623 channel, num_chunks, dma_bufp)); 37786f45ec7bSml29623 37796f45ec7bSml29623 nmsgs = 0; 37806f45ec7bSml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 37816f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37826f45ec7bSml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 37836f45ec7bSml29623 "bufp 0x%016llx nblocks %d nmsgs %d", 37846f45ec7bSml29623 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 37856f45ec7bSml29623 nmsgs += tmp_bufp->nblocks; 37866f45ec7bSml29623 } 37876f45ec7bSml29623 if (!nmsgs) { 378856d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 37896f45ec7bSml29623 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 37906f45ec7bSml29623 "no msg blocks", 37916f45ec7bSml29623 channel)); 37926f45ec7bSml29623 status = NXGE_ERROR; 37936f45ec7bSml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 37946f45ec7bSml29623 } 37956f45ec7bSml29623 3796007969e0Stm144005 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 37976f45ec7bSml29623 37986f45ec7bSml29623 size = nmsgs * sizeof (p_rx_msg_t); 37996f45ec7bSml29623 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 38006f45ec7bSml29623 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 38016f45ec7bSml29623 KM_SLEEP); 38026f45ec7bSml29623 38036f45ec7bSml29623 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 38046f45ec7bSml29623 (void *)nxgep->interrupt_cookie); 38056f45ec7bSml29623 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 38066f45ec7bSml29623 (void *)nxgep->interrupt_cookie); 38076f45ec7bSml29623 rbrp->rdc = channel; 38086f45ec7bSml29623 rbrp->num_blocks = num_chunks; 38096f45ec7bSml29623 rbrp->tnblocks = nmsgs; 38106f45ec7bSml29623 rbrp->rbb_max = nmsgs; 38116f45ec7bSml29623 rbrp->rbr_max_size = nmsgs; 38126f45ec7bSml29623 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 38136f45ec7bSml29623 38146f45ec7bSml29623 /* 38156f45ec7bSml29623 * Buffer sizes suggested by NIU architect. 38166f45ec7bSml29623 * 256, 512 and 2K. 38176f45ec7bSml29623 */ 38186f45ec7bSml29623 38196f45ec7bSml29623 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 38206f45ec7bSml29623 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 38216f45ec7bSml29623 rbrp->npi_pkt_buf_size0 = SIZE_256B; 38226f45ec7bSml29623 38236f45ec7bSml29623 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 38246f45ec7bSml29623 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 38256f45ec7bSml29623 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 38266f45ec7bSml29623 38276f45ec7bSml29623 rbrp->block_size = nxgep->rx_default_block_size; 38286f45ec7bSml29623 382948056c53SMichael Speer if (!nxgep->mac.is_jumbo) { 38306f45ec7bSml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 38316f45ec7bSml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 38326f45ec7bSml29623 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 38336f45ec7bSml29623 } else { 38346f45ec7bSml29623 if (rbrp->block_size >= 0x2000) { 38356f45ec7bSml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 38366f45ec7bSml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 38376f45ec7bSml29623 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 38386f45ec7bSml29623 } else { 38396f45ec7bSml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 38406f45ec7bSml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 38416f45ec7bSml29623 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 38426f45ec7bSml29623 } 38436f45ec7bSml29623 } 38446f45ec7bSml29623 38456f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38466f45ec7bSml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 38476f45ec7bSml29623 "actual rbr max %d rbb_max %d nmsgs %d " 38486f45ec7bSml29623 "rbrp->block_size %d default_block_size %d " 38496f45ec7bSml29623 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 38506f45ec7bSml29623 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 38516f45ec7bSml29623 rbrp->block_size, nxgep->rx_default_block_size, 38526f45ec7bSml29623 nxge_rbr_size, nxge_rbr_spare_size)); 38536f45ec7bSml29623 38546f45ec7bSml29623 /* Map in buffers from the buffer pool. */ 38556f45ec7bSml29623 index = 0; 38566f45ec7bSml29623 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 38576f45ec7bSml29623 bsize = dma_bufp->block_size; 38586f45ec7bSml29623 nblocks = dma_bufp->nblocks; 3859adfcba55Sjoycey #if defined(__i386) 3860adfcba55Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 3861adfcba55Sjoycey #else 38626f45ec7bSml29623 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 3863adfcba55Sjoycey #endif 38646f45ec7bSml29623 ring_info->buffer[i].buf_index = i; 38656f45ec7bSml29623 ring_info->buffer[i].buf_size = dma_bufp->alength; 38666f45ec7bSml29623 ring_info->buffer[i].start_index = index; 3867adfcba55Sjoycey #if defined(__i386) 3868adfcba55Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 3869adfcba55Sjoycey #else 38706f45ec7bSml29623 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 3871adfcba55Sjoycey #endif 38726f45ec7bSml29623 38736f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38746f45ec7bSml29623 " nxge_map_rxdma_channel_buf_ring: map channel %d " 38756f45ec7bSml29623 "chunk %d" 38766f45ec7bSml29623 " nblocks %d chunk_size %x block_size 0x%x " 38776f45ec7bSml29623 "dma_bufp $%p", channel, i, 38786f45ec7bSml29623 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 38796f45ec7bSml29623 dma_bufp)); 38806f45ec7bSml29623 38816f45ec7bSml29623 for (j = 0; j < nblocks; j++) { 38826f45ec7bSml29623 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 38836f45ec7bSml29623 dma_bufp)) == NULL) { 388456d930aeSspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 388556d930aeSspeer "allocb failed (index %d i %d j %d)", 388656d930aeSspeer index, i, j)); 388756d930aeSspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 38886f45ec7bSml29623 } 38896f45ec7bSml29623 rx_msg_ring[index] = rx_msg_p; 38906f45ec7bSml29623 rx_msg_p->block_index = index; 38916f45ec7bSml29623 rx_msg_p->shifted_addr = (uint32_t) 38926f45ec7bSml29623 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 38936f45ec7bSml29623 RBR_BKADDR_SHIFT)); 38946f45ec7bSml29623 38956f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 389656d930aeSspeer "index %d j %d rx_msg_p $%p mblk %p", 389756d930aeSspeer index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 38986f45ec7bSml29623 38996f45ec7bSml29623 mblk_p = rx_msg_p->rx_mblk_p; 39006f45ec7bSml29623 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 3901007969e0Stm144005 3902007969e0Stm144005 rbrp->rbr_ref_cnt++; 39036f45ec7bSml29623 index++; 39046f45ec7bSml29623 rx_msg_p->buf_dma.dma_channel = channel; 39056f45ec7bSml29623 } 3906678453a8Sspeer 3907678453a8Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 3908678453a8Sspeer if (dma_bufp->contig_alloc_type) { 3909678453a8Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 3910678453a8Sspeer } 3911678453a8Sspeer 3912678453a8Sspeer if (dma_bufp->kmem_alloc_type) { 3913678453a8Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 3914678453a8Sspeer } 3915678453a8Sspeer 3916678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 3917678453a8Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 3918678453a8Sspeer "chunk %d" 3919678453a8Sspeer " nblocks %d chunk_size %x block_size 0x%x " 3920678453a8Sspeer "dma_bufp $%p", 3921678453a8Sspeer channel, i, 3922678453a8Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 3923678453a8Sspeer dma_bufp)); 39246f45ec7bSml29623 } 39256f45ec7bSml29623 if (i < rbrp->num_blocks) { 39266f45ec7bSml29623 goto nxge_map_rxdma_channel_buf_ring_fail1; 39276f45ec7bSml29623 } 39286f45ec7bSml29623 39296f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39306f45ec7bSml29623 "nxge_map_rxdma_channel_buf_ring: done buf init " 39316f45ec7bSml29623 "channel %d msg block entries %d", 39326f45ec7bSml29623 channel, index)); 39336f45ec7bSml29623 ring_info->block_size_mask = bsize - 1; 39346f45ec7bSml29623 rbrp->rx_msg_ring = rx_msg_ring; 39356f45ec7bSml29623 rbrp->dma_bufp = dma_buf_p; 39366f45ec7bSml29623 rbrp->ring_info = ring_info; 39376f45ec7bSml29623 39386f45ec7bSml29623 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 39396f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39406f45ec7bSml29623 " nxge_map_rxdma_channel_buf_ring: " 39416f45ec7bSml29623 "channel %d done buf info init", channel)); 39426f45ec7bSml29623 3943007969e0Stm144005 /* 3944007969e0Stm144005 * Finally, permit nxge_freeb() to call nxge_post_page(). 3945007969e0Stm144005 */ 3946007969e0Stm144005 rbrp->rbr_state = RBR_POSTING; 3947007969e0Stm144005 39486f45ec7bSml29623 *rbr_p = rbrp; 39496f45ec7bSml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 39506f45ec7bSml29623 39516f45ec7bSml29623 nxge_map_rxdma_channel_buf_ring_fail1: 39526f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39536f45ec7bSml29623 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 39546f45ec7bSml29623 channel, status)); 39556f45ec7bSml29623 39566f45ec7bSml29623 index--; 39576f45ec7bSml29623 for (; index >= 0; index--) { 39586f45ec7bSml29623 rx_msg_p = rx_msg_ring[index]; 39596f45ec7bSml29623 if (rx_msg_p != NULL) { 39606f45ec7bSml29623 freeb(rx_msg_p->rx_mblk_p); 39616f45ec7bSml29623 rx_msg_ring[index] = NULL; 39626f45ec7bSml29623 } 39636f45ec7bSml29623 } 39646f45ec7bSml29623 nxge_map_rxdma_channel_buf_ring_fail: 39656f45ec7bSml29623 MUTEX_DESTROY(&rbrp->post_lock); 39666f45ec7bSml29623 MUTEX_DESTROY(&rbrp->lock); 39676f45ec7bSml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 39686f45ec7bSml29623 KMEM_FREE(rx_msg_ring, size); 39696f45ec7bSml29623 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 39706f45ec7bSml29623 397156d930aeSspeer status = NXGE_ERROR; 397256d930aeSspeer 39736f45ec7bSml29623 nxge_map_rxdma_channel_buf_ring_exit: 39746f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39756f45ec7bSml29623 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 39766f45ec7bSml29623 39776f45ec7bSml29623 return (status); 39786f45ec7bSml29623 } 39796f45ec7bSml29623 39806f45ec7bSml29623 /*ARGSUSED*/ 39816f45ec7bSml29623 static void 39826f45ec7bSml29623 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 39836f45ec7bSml29623 p_rx_rbr_ring_t rbr_p) 39846f45ec7bSml29623 { 39856f45ec7bSml29623 p_rx_msg_t *rx_msg_ring; 39866f45ec7bSml29623 p_rx_msg_t rx_msg_p; 39876f45ec7bSml29623 rxring_info_t *ring_info; 39886f45ec7bSml29623 int i; 39896f45ec7bSml29623 uint32_t size; 39906f45ec7bSml29623 #ifdef NXGE_DEBUG 39916f45ec7bSml29623 int num_chunks; 39926f45ec7bSml29623 #endif 39936f45ec7bSml29623 39946f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39956f45ec7bSml29623 "==> nxge_unmap_rxdma_channel_buf_ring")); 39966f45ec7bSml29623 if (rbr_p == NULL) { 39976f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 39986f45ec7bSml29623 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 39996f45ec7bSml29623 return; 40006f45ec7bSml29623 } 40016f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40026f45ec7bSml29623 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 40036f45ec7bSml29623 rbr_p->rdc)); 40046f45ec7bSml29623 40056f45ec7bSml29623 rx_msg_ring = rbr_p->rx_msg_ring; 40066f45ec7bSml29623 ring_info = rbr_p->ring_info; 40076f45ec7bSml29623 40086f45ec7bSml29623 if (rx_msg_ring == NULL || ring_info == NULL) { 40096f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40106f45ec7bSml29623 "<== nxge_unmap_rxdma_channel_buf_ring: " 40116f45ec7bSml29623 "rx_msg_ring $%p ring_info $%p", 40126f45ec7bSml29623 rx_msg_p, ring_info)); 40136f45ec7bSml29623 return; 40146f45ec7bSml29623 } 40156f45ec7bSml29623 40166f45ec7bSml29623 #ifdef NXGE_DEBUG 40176f45ec7bSml29623 num_chunks = rbr_p->num_blocks; 40186f45ec7bSml29623 #endif 40196f45ec7bSml29623 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 40206f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40216f45ec7bSml29623 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 40226f45ec7bSml29623 "tnblocks %d (max %d) size ptrs %d ", 40236f45ec7bSml29623 rbr_p->rdc, num_chunks, 40246f45ec7bSml29623 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 40256f45ec7bSml29623 40266f45ec7bSml29623 for (i = 0; i < rbr_p->tnblocks; i++) { 40276f45ec7bSml29623 rx_msg_p = rx_msg_ring[i]; 40286f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40296f45ec7bSml29623 " nxge_unmap_rxdma_channel_buf_ring: " 40306f45ec7bSml29623 "rx_msg_p $%p", 40316f45ec7bSml29623 rx_msg_p)); 40326f45ec7bSml29623 if (rx_msg_p != NULL) { 40336f45ec7bSml29623 freeb(rx_msg_p->rx_mblk_p); 40346f45ec7bSml29623 rx_msg_ring[i] = NULL; 40356f45ec7bSml29623 } 40366f45ec7bSml29623 } 40376f45ec7bSml29623 4038007969e0Stm144005 /* 4039007969e0Stm144005 * We no longer may use the mutex <post_lock>. By setting 4040007969e0Stm144005 * <rbr_state> to anything but POSTING, we prevent 4041007969e0Stm144005 * nxge_post_page() from accessing a dead mutex. 4042007969e0Stm144005 */ 4043007969e0Stm144005 rbr_p->rbr_state = RBR_UNMAPPING; 40446f45ec7bSml29623 MUTEX_DESTROY(&rbr_p->post_lock); 4045007969e0Stm144005 40466f45ec7bSml29623 MUTEX_DESTROY(&rbr_p->lock); 4047007969e0Stm144005 4048007969e0Stm144005 if (rbr_p->rbr_ref_cnt == 0) { 4049678453a8Sspeer /* 4050678453a8Sspeer * This is the normal state of affairs. 4051678453a8Sspeer * Need to free the following buffers: 4052678453a8Sspeer * - data buffers 4053678453a8Sspeer * - rx_msg ring 4054678453a8Sspeer * - ring_info 4055678453a8Sspeer * - rbr ring 4056678453a8Sspeer */ 4057678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 4058678453a8Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 4059678453a8Sspeer nxge_rxdma_databuf_free(rbr_p); 4060678453a8Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 4061678453a8Sspeer KMEM_FREE(rx_msg_ring, size); 4062007969e0Stm144005 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 4063007969e0Stm144005 } else { 4064007969e0Stm144005 /* 4065007969e0Stm144005 * Some of our buffers are still being used. 4066007969e0Stm144005 * Therefore, tell nxge_freeb() this ring is 4067007969e0Stm144005 * unmapped, so it may free <rbr_p> for us. 4068007969e0Stm144005 */ 4069007969e0Stm144005 rbr_p->rbr_state = RBR_UNMAPPED; 4070007969e0Stm144005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4071007969e0Stm144005 "unmap_rxdma_buf_ring: %d %s outstanding.", 4072007969e0Stm144005 rbr_p->rbr_ref_cnt, 4073007969e0Stm144005 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 4074007969e0Stm144005 } 40756f45ec7bSml29623 40766f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40776f45ec7bSml29623 "<== nxge_unmap_rxdma_channel_buf_ring")); 40786f45ec7bSml29623 } 40796f45ec7bSml29623 4080678453a8Sspeer /* 4081678453a8Sspeer * nxge_rxdma_hw_start_common 4082678453a8Sspeer * 4083678453a8Sspeer * Arguments: 4084678453a8Sspeer * nxgep 4085678453a8Sspeer * 4086678453a8Sspeer * Notes: 4087678453a8Sspeer * 4088678453a8Sspeer * NPI/NXGE function calls: 4089678453a8Sspeer * nxge_init_fzc_rx_common(); 4090678453a8Sspeer * nxge_init_fzc_rxdma_port(); 4091678453a8Sspeer * 4092678453a8Sspeer * Registers accessed: 4093678453a8Sspeer * 4094678453a8Sspeer * Context: 4095678453a8Sspeer * Service domain 4096678453a8Sspeer */ 40976f45ec7bSml29623 static nxge_status_t 40986f45ec7bSml29623 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 40996f45ec7bSml29623 { 41006f45ec7bSml29623 nxge_status_t status = NXGE_OK; 41016f45ec7bSml29623 41026f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41036f45ec7bSml29623 41046f45ec7bSml29623 /* 41056f45ec7bSml29623 * Load the sharable parameters by writing to the 41066f45ec7bSml29623 * function zero control registers. These FZC registers 41076f45ec7bSml29623 * should be initialized only once for the entire chip. 41086f45ec7bSml29623 */ 41096f45ec7bSml29623 (void) nxge_init_fzc_rx_common(nxgep); 41106f45ec7bSml29623 41116f45ec7bSml29623 /* 41126f45ec7bSml29623 * Initialize the RXDMA port specific FZC control configurations. 41136f45ec7bSml29623 * These FZC registers are pertaining to each port. 41146f45ec7bSml29623 */ 41156f45ec7bSml29623 (void) nxge_init_fzc_rxdma_port(nxgep); 41166f45ec7bSml29623 41176f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41186f45ec7bSml29623 41196f45ec7bSml29623 return (status); 41206f45ec7bSml29623 } 41216f45ec7bSml29623 41226f45ec7bSml29623 static nxge_status_t 4123678453a8Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 41246f45ec7bSml29623 { 41256f45ec7bSml29623 int i, ndmas; 41266f45ec7bSml29623 p_rx_rbr_rings_t rx_rbr_rings; 41276f45ec7bSml29623 p_rx_rbr_ring_t *rbr_rings; 41286f45ec7bSml29623 p_rx_rcr_rings_t rx_rcr_rings; 41296f45ec7bSml29623 p_rx_rcr_ring_t *rcr_rings; 41306f45ec7bSml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 41316f45ec7bSml29623 p_rx_mbox_t *rx_mbox_p; 41326f45ec7bSml29623 nxge_status_t status = NXGE_OK; 41336f45ec7bSml29623 41346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 41356f45ec7bSml29623 41366f45ec7bSml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 41376f45ec7bSml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 41386f45ec7bSml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 41396f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 41406f45ec7bSml29623 "<== nxge_rxdma_hw_start: NULL ring pointers")); 41416f45ec7bSml29623 return (NXGE_ERROR); 41426f45ec7bSml29623 } 41436f45ec7bSml29623 ndmas = rx_rbr_rings->ndmas; 41446f45ec7bSml29623 if (ndmas == 0) { 41456f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 41466f45ec7bSml29623 "<== nxge_rxdma_hw_start: no dma channel allocated")); 41476f45ec7bSml29623 return (NXGE_ERROR); 41486f45ec7bSml29623 } 41496f45ec7bSml29623 41506f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41516f45ec7bSml29623 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 41526f45ec7bSml29623 41536f45ec7bSml29623 rbr_rings = rx_rbr_rings->rbr_rings; 41546f45ec7bSml29623 rcr_rings = rx_rcr_rings->rcr_rings; 41556f45ec7bSml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 41566f45ec7bSml29623 if (rx_mbox_areas_p) { 41576f45ec7bSml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 41586f45ec7bSml29623 } 41596f45ec7bSml29623 4160678453a8Sspeer i = channel; 41616f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41626f45ec7bSml29623 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 41636f45ec7bSml29623 ndmas, channel)); 41646f45ec7bSml29623 status = nxge_rxdma_start_channel(nxgep, channel, 41656f45ec7bSml29623 (p_rx_rbr_ring_t)rbr_rings[i], 41666f45ec7bSml29623 (p_rx_rcr_ring_t)rcr_rings[i], 41676f45ec7bSml29623 (p_rx_mbox_t)rx_mbox_p[i]); 41686f45ec7bSml29623 if (status != NXGE_OK) { 4169678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4170678453a8Sspeer "==> nxge_rxdma_hw_start: disable " 4171678453a8Sspeer "(status 0x%x channel %d)", status, channel)); 4172678453a8Sspeer return (status); 41736f45ec7bSml29623 } 41746f45ec7bSml29623 41756f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 41766f45ec7bSml29623 "rx_rbr_rings 0x%016llx rings 0x%016llx", 41776f45ec7bSml29623 rx_rbr_rings, rx_rcr_rings)); 41786f45ec7bSml29623 41796f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41806f45ec7bSml29623 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 41816f45ec7bSml29623 41826f45ec7bSml29623 return (status); 41836f45ec7bSml29623 } 41846f45ec7bSml29623 41856f45ec7bSml29623 static void 4186678453a8Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 41876f45ec7bSml29623 { 41886f45ec7bSml29623 p_rx_rbr_rings_t rx_rbr_rings; 41896f45ec7bSml29623 p_rx_rcr_rings_t rx_rcr_rings; 41906f45ec7bSml29623 41916f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 41926f45ec7bSml29623 41936f45ec7bSml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 41946f45ec7bSml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 41956f45ec7bSml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 41966f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 41976f45ec7bSml29623 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 41986f45ec7bSml29623 return; 41996f45ec7bSml29623 } 42006f45ec7bSml29623 42016f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4202678453a8Sspeer "==> nxge_rxdma_hw_stop(channel %d)", 4203678453a8Sspeer channel)); 42046f45ec7bSml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 42056f45ec7bSml29623 42066f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 42076f45ec7bSml29623 "rx_rbr_rings 0x%016llx rings 0x%016llx", 42086f45ec7bSml29623 rx_rbr_rings, rx_rcr_rings)); 42096f45ec7bSml29623 42106f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 42116f45ec7bSml29623 } 42126f45ec7bSml29623 42136f45ec7bSml29623 42146f45ec7bSml29623 static nxge_status_t 42156f45ec7bSml29623 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 42166f45ec7bSml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 42176f45ec7bSml29623 42186f45ec7bSml29623 { 42196f45ec7bSml29623 npi_handle_t handle; 42206f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 42216f45ec7bSml29623 rx_dma_ctl_stat_t cs; 42226f45ec7bSml29623 rx_dma_ent_msk_t ent_mask; 42236f45ec7bSml29623 nxge_status_t status = NXGE_OK; 42246f45ec7bSml29623 42256f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 42266f45ec7bSml29623 42276f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 42286f45ec7bSml29623 42296f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 42306f45ec7bSml29623 "npi handle addr $%p acc $%p", 42316f45ec7bSml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 42326f45ec7bSml29623 4233678453a8Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 4234678453a8Sspeer if (!isLDOMguest(nxgep)) { 42356f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 42366f45ec7bSml29623 if (rs != NPI_SUCCESS) { 42376f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4238678453a8Sspeer "==> nxge_init_fzc_rdc: " 4239678453a8Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 4240678453a8Sspeer channel, rs)); 42416f45ec7bSml29623 return (NXGE_ERROR | rs); 42426f45ec7bSml29623 } 42436f45ec7bSml29623 42446f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42456f45ec7bSml29623 "==> nxge_rxdma_start_channel: reset done: channel %d", 42466f45ec7bSml29623 channel)); 4247678453a8Sspeer } 4248678453a8Sspeer 4249678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4250678453a8Sspeer if (isLDOMguest(nxgep)) 4251678453a8Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 4252678453a8Sspeer #endif 42536f45ec7bSml29623 42546f45ec7bSml29623 /* 42556f45ec7bSml29623 * Initialize the RXDMA channel specific FZC control 42566f45ec7bSml29623 * configurations. These FZC registers are pertaining 42576f45ec7bSml29623 * to each RX channel (logical pages). 42586f45ec7bSml29623 */ 4259678453a8Sspeer if (!isLDOMguest(nxgep)) { 4260678453a8Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 42616f45ec7bSml29623 if (status != NXGE_OK) { 42626f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42636f45ec7bSml29623 "==> nxge_rxdma_start_channel: " 42646f45ec7bSml29623 "init fzc rxdma failed (0x%08x channel %d)", 42656f45ec7bSml29623 status, channel)); 42666f45ec7bSml29623 return (status); 42676f45ec7bSml29623 } 42686f45ec7bSml29623 42696f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42706f45ec7bSml29623 "==> nxge_rxdma_start_channel: fzc done")); 4271678453a8Sspeer } 42726f45ec7bSml29623 42736f45ec7bSml29623 /* Set up the interrupt event masks. */ 42746f45ec7bSml29623 ent_mask.value = 0; 42756f45ec7bSml29623 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 42766f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 42776f45ec7bSml29623 &ent_mask); 42786f45ec7bSml29623 if (rs != NPI_SUCCESS) { 42796f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42806f45ec7bSml29623 "==> nxge_rxdma_start_channel: " 4281678453a8Sspeer "init rxdma event masks failed " 4282678453a8Sspeer "(0x%08x channel %d)", 42836f45ec7bSml29623 status, channel)); 42846f45ec7bSml29623 return (NXGE_ERROR | rs); 42856f45ec7bSml29623 } 42866f45ec7bSml29623 4287678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4288678453a8Sspeer "==> nxge_rxdma_start_channel: " 42896f45ec7bSml29623 "event done: channel %d (mask 0x%016llx)", 42906f45ec7bSml29623 channel, ent_mask.value)); 42916f45ec7bSml29623 42926f45ec7bSml29623 /* Initialize the receive DMA control and status register */ 42936f45ec7bSml29623 cs.value = 0; 42946f45ec7bSml29623 cs.bits.hdw.mex = 1; 42956f45ec7bSml29623 cs.bits.hdw.rcrthres = 1; 42966f45ec7bSml29623 cs.bits.hdw.rcrto = 1; 42976f45ec7bSml29623 cs.bits.hdw.rbr_empty = 1; 42986f45ec7bSml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 42996f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43006f45ec7bSml29623 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 43016f45ec7bSml29623 if (status != NXGE_OK) { 43026f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43036f45ec7bSml29623 "==> nxge_rxdma_start_channel: " 43046f45ec7bSml29623 "init rxdma control register failed (0x%08x channel %d", 43056f45ec7bSml29623 status, channel)); 43066f45ec7bSml29623 return (status); 43076f45ec7bSml29623 } 43086f45ec7bSml29623 43096f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43106f45ec7bSml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 43116f45ec7bSml29623 43126f45ec7bSml29623 /* 43136f45ec7bSml29623 * Load RXDMA descriptors, buffers, mailbox, 43146f45ec7bSml29623 * initialise the receive DMA channels and 43156f45ec7bSml29623 * enable each DMA channel. 43166f45ec7bSml29623 */ 43176f45ec7bSml29623 status = nxge_enable_rxdma_channel(nxgep, 43186f45ec7bSml29623 channel, rbr_p, rcr_p, mbox_p); 43196f45ec7bSml29623 43206f45ec7bSml29623 if (status != NXGE_OK) { 43216f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43226f45ec7bSml29623 " nxge_rxdma_start_channel: " 4323678453a8Sspeer " enable rxdma failed (0x%08x channel %d)", 43246f45ec7bSml29623 status, channel)); 43256f45ec7bSml29623 return (status); 43266f45ec7bSml29623 } 43276f45ec7bSml29623 4328678453a8Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 4329678453a8Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 4330678453a8Sspeer 4331678453a8Sspeer if (isLDOMguest(nxgep)) { 4332678453a8Sspeer /* Add interrupt handler for this channel. */ 4333ef523517SMichael Speer status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel); 4334ef523517SMichael Speer if (status != NXGE_OK) { 4335678453a8Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4336678453a8Sspeer " nxge_rxdma_start_channel: " 4337678453a8Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 4338678453a8Sspeer status, channel)); 4339ef523517SMichael Speer return (status); 4340678453a8Sspeer } 4341678453a8Sspeer } 4342678453a8Sspeer 43436f45ec7bSml29623 ent_mask.value = 0; 43446f45ec7bSml29623 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 43456f45ec7bSml29623 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 43466f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 43476f45ec7bSml29623 &ent_mask); 43486f45ec7bSml29623 if (rs != NPI_SUCCESS) { 43496f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43506f45ec7bSml29623 "==> nxge_rxdma_start_channel: " 43516f45ec7bSml29623 "init rxdma event masks failed (0x%08x channel %d)", 43526f45ec7bSml29623 status, channel)); 43536f45ec7bSml29623 return (NXGE_ERROR | rs); 43546f45ec7bSml29623 } 43556f45ec7bSml29623 43566f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43576f45ec7bSml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 43586f45ec7bSml29623 43596f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 43606f45ec7bSml29623 43616f45ec7bSml29623 return (NXGE_OK); 43626f45ec7bSml29623 } 43636f45ec7bSml29623 43646f45ec7bSml29623 static nxge_status_t 43656f45ec7bSml29623 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 43666f45ec7bSml29623 { 43676f45ec7bSml29623 npi_handle_t handle; 43686f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 43696f45ec7bSml29623 rx_dma_ctl_stat_t cs; 43706f45ec7bSml29623 rx_dma_ent_msk_t ent_mask; 43716f45ec7bSml29623 nxge_status_t status = NXGE_OK; 43726f45ec7bSml29623 43736f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 43746f45ec7bSml29623 43756f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 43766f45ec7bSml29623 43776f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 43786f45ec7bSml29623 "npi handle addr $%p acc $%p", 43796f45ec7bSml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 43806f45ec7bSml29623 4381330cd344SMichael Speer if (!isLDOMguest(nxgep)) { 4382330cd344SMichael Speer /* 4383330cd344SMichael Speer * Stop RxMAC = A.9.2.6 4384330cd344SMichael Speer */ 4385330cd344SMichael Speer if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 4386330cd344SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4387330cd344SMichael Speer "nxge_rxdma_stop_channel: " 4388330cd344SMichael Speer "Failed to disable RxMAC")); 4389330cd344SMichael Speer } 4390330cd344SMichael Speer 4391330cd344SMichael Speer /* 4392330cd344SMichael Speer * Drain IPP Port = A.9.3.6 4393330cd344SMichael Speer */ 4394330cd344SMichael Speer (void) nxge_ipp_drain(nxgep); 4395330cd344SMichael Speer } 4396330cd344SMichael Speer 43976f45ec7bSml29623 /* Reset RXDMA channel */ 43986f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 43996f45ec7bSml29623 if (rs != NPI_SUCCESS) { 44006f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44016f45ec7bSml29623 " nxge_rxdma_stop_channel: " 44026f45ec7bSml29623 " reset rxdma failed (0x%08x channel %d)", 44036f45ec7bSml29623 rs, channel)); 44046f45ec7bSml29623 return (NXGE_ERROR | rs); 44056f45ec7bSml29623 } 44066f45ec7bSml29623 44076f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44086f45ec7bSml29623 "==> nxge_rxdma_stop_channel: reset done")); 44096f45ec7bSml29623 44106f45ec7bSml29623 /* Set up the interrupt event masks. */ 44116f45ec7bSml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 44126f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44136f45ec7bSml29623 &ent_mask); 44146f45ec7bSml29623 if (rs != NPI_SUCCESS) { 44156f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44166f45ec7bSml29623 "==> nxge_rxdma_stop_channel: " 44176f45ec7bSml29623 "set rxdma event masks failed (0x%08x channel %d)", 44186f45ec7bSml29623 rs, channel)); 44196f45ec7bSml29623 return (NXGE_ERROR | rs); 44206f45ec7bSml29623 } 44216f45ec7bSml29623 44226f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44236f45ec7bSml29623 "==> nxge_rxdma_stop_channel: event done")); 44246f45ec7bSml29623 4425330cd344SMichael Speer /* 4426330cd344SMichael Speer * Initialize the receive DMA control and status register 4427330cd344SMichael Speer */ 44286f45ec7bSml29623 cs.value = 0; 4429330cd344SMichael Speer status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 44306f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 44316f45ec7bSml29623 " to default (all 0s) 0x%08x", cs.value)); 44326f45ec7bSml29623 if (status != NXGE_OK) { 44336f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44346f45ec7bSml29623 " nxge_rxdma_stop_channel: init rxdma" 44356f45ec7bSml29623 " control register failed (0x%08x channel %d", 44366f45ec7bSml29623 status, channel)); 44376f45ec7bSml29623 return (status); 44386f45ec7bSml29623 } 44396f45ec7bSml29623 44406f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44416f45ec7bSml29623 "==> nxge_rxdma_stop_channel: control done")); 44426f45ec7bSml29623 4443330cd344SMichael Speer /* 4444330cd344SMichael Speer * Make sure channel is disabled. 4445330cd344SMichael Speer */ 44466f45ec7bSml29623 status = nxge_disable_rxdma_channel(nxgep, channel); 4447da14cebeSEric Cheng 44486f45ec7bSml29623 if (status != NXGE_OK) { 44496f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44506f45ec7bSml29623 " nxge_rxdma_stop_channel: " 44516f45ec7bSml29623 " init enable rxdma failed (0x%08x channel %d)", 44526f45ec7bSml29623 status, channel)); 44536f45ec7bSml29623 return (status); 44546f45ec7bSml29623 } 44556f45ec7bSml29623 4456330cd344SMichael Speer if (!isLDOMguest(nxgep)) { 4457330cd344SMichael Speer /* 4458330cd344SMichael Speer * Enable RxMAC = A.9.2.10 4459330cd344SMichael Speer */ 4460330cd344SMichael Speer if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 4461330cd344SMichael Speer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4462330cd344SMichael Speer "nxge_rxdma_stop_channel: Rx MAC still disabled")); 4463330cd344SMichael Speer } 4464330cd344SMichael Speer } 4465330cd344SMichael Speer 44666f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, 44676f45ec7bSml29623 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 44686f45ec7bSml29623 44696f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 44706f45ec7bSml29623 44716f45ec7bSml29623 return (NXGE_OK); 44726f45ec7bSml29623 } 44736f45ec7bSml29623 44746f45ec7bSml29623 nxge_status_t 44756f45ec7bSml29623 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 44766f45ec7bSml29623 { 44776f45ec7bSml29623 npi_handle_t handle; 44786f45ec7bSml29623 p_nxge_rdc_sys_stats_t statsp; 44796f45ec7bSml29623 rx_ctl_dat_fifo_stat_t stat; 44806f45ec7bSml29623 uint32_t zcp_err_status; 44816f45ec7bSml29623 uint32_t ipp_err_status; 44826f45ec7bSml29623 nxge_status_t status = NXGE_OK; 44836f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 44846f45ec7bSml29623 boolean_t my_err = B_FALSE; 44856f45ec7bSml29623 44866f45ec7bSml29623 handle = nxgep->npi_handle; 44876f45ec7bSml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 44886f45ec7bSml29623 44896f45ec7bSml29623 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 44906f45ec7bSml29623 44916f45ec7bSml29623 if (rs != NPI_SUCCESS) 44926f45ec7bSml29623 return (NXGE_ERROR | rs); 44936f45ec7bSml29623 44946f45ec7bSml29623 if (stat.bits.ldw.id_mismatch) { 44956f45ec7bSml29623 statsp->id_mismatch++; 44966f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 44976f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 44986f45ec7bSml29623 /* Global fatal error encountered */ 44996f45ec7bSml29623 } 45006f45ec7bSml29623 45016f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 45026f45ec7bSml29623 switch (nxgep->mac.portnum) { 45036f45ec7bSml29623 case 0: 45046f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 45056f45ec7bSml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 45066f45ec7bSml29623 my_err = B_TRUE; 45076f45ec7bSml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45086f45ec7bSml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45096f45ec7bSml29623 } 45106f45ec7bSml29623 break; 45116f45ec7bSml29623 case 1: 45126f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 45136f45ec7bSml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 45146f45ec7bSml29623 my_err = B_TRUE; 45156f45ec7bSml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45166f45ec7bSml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45176f45ec7bSml29623 } 45186f45ec7bSml29623 break; 45196f45ec7bSml29623 case 2: 45206f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 45216f45ec7bSml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 45226f45ec7bSml29623 my_err = B_TRUE; 45236f45ec7bSml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45246f45ec7bSml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45256f45ec7bSml29623 } 45266f45ec7bSml29623 break; 45276f45ec7bSml29623 case 3: 45286f45ec7bSml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 45296f45ec7bSml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 45306f45ec7bSml29623 my_err = B_TRUE; 45316f45ec7bSml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45326f45ec7bSml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45336f45ec7bSml29623 } 45346f45ec7bSml29623 break; 45356f45ec7bSml29623 default: 45366f45ec7bSml29623 return (NXGE_ERROR); 45376f45ec7bSml29623 } 45386f45ec7bSml29623 } 45396f45ec7bSml29623 45406f45ec7bSml29623 if (my_err) { 45416f45ec7bSml29623 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 45426f45ec7bSml29623 zcp_err_status); 45436f45ec7bSml29623 if (status != NXGE_OK) 45446f45ec7bSml29623 return (status); 45456f45ec7bSml29623 } 45466f45ec7bSml29623 45476f45ec7bSml29623 return (NXGE_OK); 45486f45ec7bSml29623 } 45496f45ec7bSml29623 45506f45ec7bSml29623 static nxge_status_t 45516f45ec7bSml29623 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 45526f45ec7bSml29623 uint32_t zcp_status) 45536f45ec7bSml29623 { 45546f45ec7bSml29623 boolean_t rxport_fatal = B_FALSE; 45556f45ec7bSml29623 p_nxge_rdc_sys_stats_t statsp; 45566f45ec7bSml29623 nxge_status_t status = NXGE_OK; 45576f45ec7bSml29623 uint8_t portn; 45586f45ec7bSml29623 45596f45ec7bSml29623 portn = nxgep->mac.portnum; 45606f45ec7bSml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 45616f45ec7bSml29623 45626f45ec7bSml29623 if (ipp_status & (0x1 << portn)) { 45636f45ec7bSml29623 statsp->ipp_eop_err++; 45646f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 45656f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 45666f45ec7bSml29623 rxport_fatal = B_TRUE; 45676f45ec7bSml29623 } 45686f45ec7bSml29623 45696f45ec7bSml29623 if (zcp_status & (0x1 << portn)) { 45706f45ec7bSml29623 statsp->zcp_eop_err++; 45716f45ec7bSml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 45726f45ec7bSml29623 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 45736f45ec7bSml29623 rxport_fatal = B_TRUE; 45746f45ec7bSml29623 } 45756f45ec7bSml29623 45766f45ec7bSml29623 if (rxport_fatal) { 45776f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45786f45ec7bSml29623 " nxge_rxdma_handle_port_error: " 45796f45ec7bSml29623 " fatal error on Port #%d\n", 45806f45ec7bSml29623 portn)); 45816f45ec7bSml29623 status = nxge_rx_port_fatal_err_recover(nxgep); 45826f45ec7bSml29623 if (status == NXGE_OK) { 45836f45ec7bSml29623 FM_SERVICE_RESTORED(nxgep); 45846f45ec7bSml29623 } 45856f45ec7bSml29623 } 45866f45ec7bSml29623 45876f45ec7bSml29623 return (status); 45886f45ec7bSml29623 } 45896f45ec7bSml29623 45906f45ec7bSml29623 static nxge_status_t 45916f45ec7bSml29623 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 45926f45ec7bSml29623 { 45936f45ec7bSml29623 npi_handle_t handle; 45946f45ec7bSml29623 npi_status_t rs = NPI_SUCCESS; 45956f45ec7bSml29623 nxge_status_t status = NXGE_OK; 45966f45ec7bSml29623 p_rx_rbr_ring_t rbrp; 45976f45ec7bSml29623 p_rx_rcr_ring_t rcrp; 45986f45ec7bSml29623 p_rx_mbox_t mboxp; 45996f45ec7bSml29623 rx_dma_ent_msk_t ent_mask; 46006f45ec7bSml29623 p_nxge_dma_common_t dmap; 46016f45ec7bSml29623 uint32_t ref_cnt; 46026f45ec7bSml29623 p_rx_msg_t rx_msg_p; 46036f45ec7bSml29623 int i; 46046f45ec7bSml29623 uint32_t nxge_port_rcr_size; 46056f45ec7bSml29623 46066f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 46076f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46086f45ec7bSml29623 "Recovering from RxDMAChannel#%d error...", channel)); 46096f45ec7bSml29623 46106f45ec7bSml29623 /* 46116f45ec7bSml29623 * Stop the dma channel waits for the stop done. 46126f45ec7bSml29623 * If the stop done bit is not set, then create 46136f45ec7bSml29623 * an error. 46146f45ec7bSml29623 */ 46156f45ec7bSml29623 46166f45ec7bSml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 46176f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 46186f45ec7bSml29623 46193587e8e2SMichael Speer rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel]; 46203587e8e2SMichael Speer rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel]; 46216f45ec7bSml29623 46226f45ec7bSml29623 MUTEX_ENTER(&rbrp->lock); 46236f45ec7bSml29623 MUTEX_ENTER(&rbrp->post_lock); 46246f45ec7bSml29623 46256f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 46266f45ec7bSml29623 46276f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 46286f45ec7bSml29623 if (rs != NPI_SUCCESS) { 46296f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46306f45ec7bSml29623 "nxge_disable_rxdma_channel:failed")); 46316f45ec7bSml29623 goto fail; 46326f45ec7bSml29623 } 46336f45ec7bSml29623 46346f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 46356f45ec7bSml29623 46366f45ec7bSml29623 /* Disable interrupt */ 46376f45ec7bSml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 46386f45ec7bSml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 46396f45ec7bSml29623 if (rs != NPI_SUCCESS) { 46406f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46416f45ec7bSml29623 "nxge_rxdma_stop_channel: " 46426f45ec7bSml29623 "set rxdma event masks failed (channel %d)", 46436f45ec7bSml29623 channel)); 46446f45ec7bSml29623 } 46456f45ec7bSml29623 46466f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 46476f45ec7bSml29623 46486f45ec7bSml29623 /* Reset RXDMA channel */ 46496f45ec7bSml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 46506f45ec7bSml29623 if (rs != NPI_SUCCESS) { 46516f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46526f45ec7bSml29623 "nxge_rxdma_fatal_err_recover: " 46536f45ec7bSml29623 " reset rxdma failed (channel %d)", channel)); 46546f45ec7bSml29623 goto fail; 46556f45ec7bSml29623 } 46566f45ec7bSml29623 46576f45ec7bSml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 46586f45ec7bSml29623 46593587e8e2SMichael Speer mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 46606f45ec7bSml29623 46616f45ec7bSml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 46626f45ec7bSml29623 rbrp->rbr_rd_index = 0; 46636f45ec7bSml29623 46646f45ec7bSml29623 rcrp->comp_rd_index = 0; 46656f45ec7bSml29623 rcrp->comp_wt_index = 0; 46666f45ec7bSml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 46676f45ec7bSml29623 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 4668adfcba55Sjoycey #if defined(__i386) 466952ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 4670adfcba55Sjoycey (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4671adfcba55Sjoycey #else 467252ccf843Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 46736f45ec7bSml29623 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 4674adfcba55Sjoycey #endif 46756f45ec7bSml29623 46766f45ec7bSml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 46776f45ec7bSml29623 (nxge_port_rcr_size - 1); 46786f45ec7bSml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 46796f45ec7bSml29623 (nxge_port_rcr_size - 1); 46806f45ec7bSml29623 46816f45ec7bSml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 46826f45ec7bSml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 46836f45ec7bSml29623 46846f45ec7bSml29623 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 46856f45ec7bSml29623 46866f45ec7bSml29623 for (i = 0; i < rbrp->rbr_max_size; i++) { 46876f45ec7bSml29623 rx_msg_p = rbrp->rx_msg_ring[i]; 46886f45ec7bSml29623 ref_cnt = rx_msg_p->ref_cnt; 46896f45ec7bSml29623 if (ref_cnt != 1) { 46906f45ec7bSml29623 if (rx_msg_p->cur_usage_cnt != 46916f45ec7bSml29623 rx_msg_p->max_usage_cnt) { 46926f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46936f45ec7bSml29623 "buf[%d]: cur_usage_cnt = %d " 46946f45ec7bSml29623 "max_usage_cnt = %d\n", i, 46956f45ec7bSml29623 rx_msg_p->cur_usage_cnt, 46966f45ec7bSml29623 rx_msg_p->max_usage_cnt)); 46976f45ec7bSml29623 } else { 46986f45ec7bSml29623 /* Buffer can be re-posted */ 46996f45ec7bSml29623 rx_msg_p->free = B_TRUE; 47006f45ec7bSml29623 rx_msg_p->cur_usage_cnt = 0; 47016f45ec7bSml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 47026f45ec7bSml29623 rx_msg_p->pkt_buf_size = 0; 47036f45ec7bSml29623 } 47046f45ec7bSml29623 } 47056f45ec7bSml29623 } 47066f45ec7bSml29623 47076f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 47086f45ec7bSml29623 47096f45ec7bSml29623 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 47106f45ec7bSml29623 if (status != NXGE_OK) { 47116f45ec7bSml29623 goto fail; 47126f45ec7bSml29623 } 47136f45ec7bSml29623 47146f45ec7bSml29623 MUTEX_EXIT(&rbrp->post_lock); 47156f45ec7bSml29623 MUTEX_EXIT(&rbrp->lock); 47166f45ec7bSml29623 47176f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47186f45ec7bSml29623 "Recovery Successful, RxDMAChannel#%d Restored", 47196f45ec7bSml29623 channel)); 47206f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 47216f45ec7bSml29623 return (NXGE_OK); 4722ef523517SMichael Speer 47236f45ec7bSml29623 fail: 47246f45ec7bSml29623 MUTEX_EXIT(&rbrp->post_lock); 47256f45ec7bSml29623 MUTEX_EXIT(&rbrp->lock); 47266f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 47276f45ec7bSml29623 return (NXGE_ERROR | rs); 47286f45ec7bSml29623 } 47296f45ec7bSml29623 47306f45ec7bSml29623 nxge_status_t 47316f45ec7bSml29623 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 47326f45ec7bSml29623 { 4733678453a8Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 47346f45ec7bSml29623 nxge_status_t status = NXGE_OK; 4735ef523517SMichael Speer p_rx_rcr_ring_t rcrp; 4736678453a8Sspeer int rdc; 47376f45ec7bSml29623 47386f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 47396f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47406f45ec7bSml29623 "Recovering from RxPort error...")); 4741678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 47426f45ec7bSml29623 47436f45ec7bSml29623 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 47446f45ec7bSml29623 goto fail; 47456f45ec7bSml29623 47466f45ec7bSml29623 NXGE_DELAY(1000); 47476f45ec7bSml29623 4748678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 47496f45ec7bSml29623 4750678453a8Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 4751678453a8Sspeer if ((1 << rdc) & set->owned.map) { 4752ef523517SMichael Speer rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc]; 4753ef523517SMichael Speer if (rcrp != NULL) { 4754ef523517SMichael Speer MUTEX_ENTER(&rcrp->lock); 4755ef523517SMichael Speer if (nxge_rxdma_fatal_err_recover(nxgep, 4756ef523517SMichael Speer rdc) != NXGE_OK) { 47576f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 4758ef523517SMichael Speer "Could not recover " 4759ef523517SMichael Speer "channel %d", rdc)); 4760ef523517SMichael Speer } 4761ef523517SMichael Speer MUTEX_EXIT(&rcrp->lock); 4762678453a8Sspeer } 47636f45ec7bSml29623 } 47646f45ec7bSml29623 } 47656f45ec7bSml29623 4766678453a8Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 47676f45ec7bSml29623 47686f45ec7bSml29623 /* Reset IPP */ 47696f45ec7bSml29623 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 47706f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47716f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 47726f45ec7bSml29623 "Failed to reset IPP")); 47736f45ec7bSml29623 goto fail; 47746f45ec7bSml29623 } 47756f45ec7bSml29623 47766f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 47776f45ec7bSml29623 47786f45ec7bSml29623 /* Reset RxMAC */ 47796f45ec7bSml29623 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 47806f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47816f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 47826f45ec7bSml29623 "Failed to reset RxMAC")); 47836f45ec7bSml29623 goto fail; 47846f45ec7bSml29623 } 47856f45ec7bSml29623 47866f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 47876f45ec7bSml29623 47886f45ec7bSml29623 /* Re-Initialize IPP */ 47896f45ec7bSml29623 if (nxge_ipp_init(nxgep) != NXGE_OK) { 47906f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47916f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 47926f45ec7bSml29623 "Failed to init IPP")); 47936f45ec7bSml29623 goto fail; 47946f45ec7bSml29623 } 47956f45ec7bSml29623 47966f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 47976f45ec7bSml29623 47986f45ec7bSml29623 /* Re-Initialize RxMAC */ 47996f45ec7bSml29623 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 48006f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48016f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 48026f45ec7bSml29623 "Failed to reset RxMAC")); 48036f45ec7bSml29623 goto fail; 48046f45ec7bSml29623 } 48056f45ec7bSml29623 48066f45ec7bSml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 48076f45ec7bSml29623 48086f45ec7bSml29623 /* Re-enable RxMAC */ 48096f45ec7bSml29623 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 48106f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48116f45ec7bSml29623 "nxge_rx_port_fatal_err_recover: " 48126f45ec7bSml29623 "Failed to enable RxMAC")); 48136f45ec7bSml29623 goto fail; 48146f45ec7bSml29623 } 48156f45ec7bSml29623 48166f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48176f45ec7bSml29623 "Recovery Successful, RxPort Restored")); 48186f45ec7bSml29623 48196f45ec7bSml29623 return (NXGE_OK); 48206f45ec7bSml29623 fail: 48216f45ec7bSml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 48226f45ec7bSml29623 return (status); 48236f45ec7bSml29623 } 48246f45ec7bSml29623 48256f45ec7bSml29623 void 48266f45ec7bSml29623 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 48276f45ec7bSml29623 { 48286f45ec7bSml29623 rx_dma_ctl_stat_t cs; 48296f45ec7bSml29623 rx_ctl_dat_fifo_stat_t cdfs; 48306f45ec7bSml29623 48316f45ec7bSml29623 switch (err_id) { 48326f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 48336f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 48346f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 48356f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 48366f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 48376f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 48386f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 48396f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 48406f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RCRINCON: 48416f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RCRFULL: 48426f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RBRFULL: 48436f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 48446f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 48456f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 48466f45ec7bSml29623 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 48476f45ec7bSml29623 chan, &cs.value); 48486f45ec7bSml29623 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 48496f45ec7bSml29623 cs.bits.hdw.rcr_ack_err = 1; 48506f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 48516f45ec7bSml29623 cs.bits.hdw.dc_fifo_err = 1; 48526f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 48536f45ec7bSml29623 cs.bits.hdw.rcr_sha_par = 1; 48546f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 48556f45ec7bSml29623 cs.bits.hdw.rbr_pre_par = 1; 48566f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 48576f45ec7bSml29623 cs.bits.hdw.rbr_tmout = 1; 48586f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 48596f45ec7bSml29623 cs.bits.hdw.rsp_cnt_err = 1; 48606f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 48616f45ec7bSml29623 cs.bits.hdw.byte_en_bus = 1; 48626f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 48636f45ec7bSml29623 cs.bits.hdw.rsp_dat_err = 1; 48646f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 48656f45ec7bSml29623 cs.bits.hdw.config_err = 1; 48666f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 48676f45ec7bSml29623 cs.bits.hdw.rcrincon = 1; 48686f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 48696f45ec7bSml29623 cs.bits.hdw.rcrfull = 1; 48706f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 48716f45ec7bSml29623 cs.bits.hdw.rbrfull = 1; 48726f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 48736f45ec7bSml29623 cs.bits.hdw.rbrlogpage = 1; 48746f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 48756f45ec7bSml29623 cs.bits.hdw.cfiglogpage = 1; 4876adfcba55Sjoycey #if defined(__i386) 4877adfcba55Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 4878adfcba55Sjoycey cs.value); 4879adfcba55Sjoycey #else 48806f45ec7bSml29623 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 48816f45ec7bSml29623 cs.value); 4882adfcba55Sjoycey #endif 48836f45ec7bSml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 48846f45ec7bSml29623 chan, cs.value); 48856f45ec7bSml29623 break; 48866f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 48876f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 48886f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 48896f45ec7bSml29623 cdfs.value = 0; 48906f45ec7bSml29623 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 48916f45ec7bSml29623 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 48926f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 48936f45ec7bSml29623 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 48946f45ec7bSml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 48956f45ec7bSml29623 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 4896adfcba55Sjoycey #if defined(__i386) 4897adfcba55Sjoycey cmn_err(CE_NOTE, 4898adfcba55Sjoycey "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 4899adfcba55Sjoycey cdfs.value); 4900adfcba55Sjoycey #else 49016f45ec7bSml29623 cmn_err(CE_NOTE, 49026f45ec7bSml29623 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 49036f45ec7bSml29623 cdfs.value); 4904adfcba55Sjoycey #endif 4905678453a8Sspeer NXGE_REG_WR64(nxgep->npi_handle, 4906678453a8Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 49076f45ec7bSml29623 break; 49086f45ec7bSml29623 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 49096f45ec7bSml29623 break; 491053f3d8ecSyc148097 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 49116f45ec7bSml29623 break; 49126f45ec7bSml29623 } 49136f45ec7bSml29623 } 4914678453a8Sspeer 4915678453a8Sspeer static void 4916678453a8Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 4917678453a8Sspeer { 4918678453a8Sspeer rxring_info_t *ring_info; 4919678453a8Sspeer int index; 4920678453a8Sspeer uint32_t chunk_size; 4921678453a8Sspeer uint64_t kaddr; 4922678453a8Sspeer uint_t num_blocks; 4923678453a8Sspeer 4924678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 4925678453a8Sspeer 4926678453a8Sspeer if (rbr_p == NULL) { 4927678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4928678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 4929678453a8Sspeer return; 4930678453a8Sspeer } 4931678453a8Sspeer 4932678453a8Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 4933e759c33aSMichael Speer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4934e759c33aSMichael Speer "<== nxge_rxdma_databuf_free: DDI")); 4935678453a8Sspeer return; 4936678453a8Sspeer } 4937678453a8Sspeer 4938678453a8Sspeer ring_info = rbr_p->ring_info; 4939678453a8Sspeer if (ring_info == NULL) { 4940678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4941678453a8Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 4942678453a8Sspeer return; 4943678453a8Sspeer } 4944678453a8Sspeer num_blocks = rbr_p->num_blocks; 4945678453a8Sspeer for (index = 0; index < num_blocks; index++) { 4946678453a8Sspeer kaddr = ring_info->buffer[index].kaddr; 4947678453a8Sspeer chunk_size = ring_info->buffer[index].buf_size; 4948678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4949678453a8Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 4950678453a8Sspeer "kaddrp $%p chunk size %d", 4951678453a8Sspeer index, kaddr, chunk_size)); 4952678453a8Sspeer if (kaddr == NULL) continue; 4953678453a8Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 4954678453a8Sspeer ring_info->buffer[index].kaddr = NULL; 4955678453a8Sspeer } 4956678453a8Sspeer 4957678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 4958678453a8Sspeer } 4959678453a8Sspeer 4960678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4961678453a8Sspeer extern void contig_mem_free(void *, size_t); 4962678453a8Sspeer #endif 4963678453a8Sspeer 4964678453a8Sspeer void 4965678453a8Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 4966678453a8Sspeer { 4967678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 4968678453a8Sspeer 4969678453a8Sspeer if (kaddr == NULL || !buf_size) { 4970678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4971678453a8Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 4972678453a8Sspeer kaddr, buf_size)); 4973678453a8Sspeer return; 4974678453a8Sspeer } 4975678453a8Sspeer 4976678453a8Sspeer switch (alloc_type) { 4977678453a8Sspeer case KMEM_ALLOC: 4978678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4979678453a8Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 4980678453a8Sspeer kaddr, buf_size)); 4981678453a8Sspeer #if defined(__i386) 4982678453a8Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 4983678453a8Sspeer #else 4984678453a8Sspeer KMEM_FREE((void *)kaddr, buf_size); 4985678453a8Sspeer #endif 4986678453a8Sspeer break; 4987678453a8Sspeer 4988678453a8Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 4989678453a8Sspeer case CONTIG_MEM_ALLOC: 4990678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 4991678453a8Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 4992678453a8Sspeer kaddr, buf_size)); 4993678453a8Sspeer contig_mem_free((void *)kaddr, buf_size); 4994678453a8Sspeer break; 4995678453a8Sspeer #endif 4996678453a8Sspeer 4997678453a8Sspeer default: 4998678453a8Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 4999678453a8Sspeer "<== nxge_free_buf: unsupported alloc type %d", 5000678453a8Sspeer alloc_type)); 5001678453a8Sspeer return; 5002678453a8Sspeer } 5003678453a8Sspeer 5004678453a8Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 5005678453a8Sspeer } 5006