1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_NXGE_NXGE_TXDMA_H 27 #define _SYS_NXGE_NXGE_TXDMA_H 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #ifdef __cplusplus 32 extern "C" { 33 #endif 34 35 #include <sys/nxge/nxge_txdma_hw.h> 36 #include <npi_txdma.h> 37 #include <sys/nxge/nxge_serialize.h> 38 39 #define TXDMA_PORT_BITMAP(nxgep) (nxgep->pt_config.tx_dma_map) 40 41 #define TXDMA_RECLAIM_PENDING_DEFAULT 64 42 #define TX_FULL_MARK 3 43 44 /* 45 * Transmit load balancing definitions. 46 */ 47 #define NXGE_TX_LB_TCPUDP 0 /* default policy */ 48 #define NXGE_TX_LB_HASH 1 /* from the hint data */ 49 #define NXGE_TX_LB_DEST_MAC 2 /* Dest. MAC */ 50 51 /* 52 * Descriptor ring empty: 53 * (1) head index is equal to tail index. 54 * (2) wrapped around bits are the same. 55 * Descriptor ring full: 56 * (1) head index is equal to tail index. 57 * (2) wrapped around bits are different. 58 * 59 */ 60 #define TXDMA_RING_EMPTY(head, head_wrap, tail, tail_wrap) \ 61 ((head == tail && head_wrap == tail_wrap) ? B_TRUE : B_FALSE) 62 63 #define TXDMA_RING_FULL(head, head_wrap, tail, tail_wrap) \ 64 ((head == tail && head_wrap != tail_wrap) ? B_TRUE : B_FALSE) 65 66 #define TXDMA_DESC_NEXT_INDEX(index, entries, wrap_mask) \ 67 ((index + entries) & wrap_mask) 68 69 #define TXDMA_DRR_WEIGHT_DEFAULT 0x001f 70 71 typedef enum { 72 NXGE_USE_SERIAL = 0, 73 NXGE_USE_START, 74 } nxge_tx_mode_t; 75 76 typedef struct _tx_msg_t { 77 nxge_os_block_mv_t flags; /* DMA, BCOPY, DVMA (?) */ 78 nxge_os_dma_common_t buf_dma; /* premapped buffer blocks */ 79 nxge_os_dma_handle_t buf_dma_handle; /* premapped buffer handle */ 80 nxge_os_dma_handle_t dma_handle; /* DMA handle for normal send */ 81 nxge_os_dma_handle_t dvma_handle; /* Fast DVMA handle */ 82 83 p_mblk_t tx_message; 84 uint32_t tx_msg_size; 85 size_t bytes_used; 86 int head; 87 int tail; 88 } tx_msg_t, *p_tx_msg_t; 89 90 /* 91 * TX Statistics. 92 */ 93 typedef struct _nxge_tx_ring_stats_t { 94 uint64_t opackets; 95 uint64_t obytes; 96 uint64_t oerrors; 97 98 uint32_t tx_inits; 99 uint32_t tx_no_buf; 100 101 uint32_t mbox_err; 102 uint32_t pkt_size_err; 103 uint32_t tx_ring_oflow; 104 uint32_t pre_buf_par_err; 105 uint32_t nack_pref; 106 uint32_t nack_pkt_rd; 107 uint32_t conf_part_err; 108 uint32_t pkt_part_err; 109 uint32_t tx_starts; 110 uint32_t tx_nocanput; 111 uint32_t tx_msgdup_fail; 112 uint32_t tx_allocb_fail; 113 uint32_t tx_no_desc; 114 uint32_t tx_dma_bind_fail; 115 uint32_t tx_uflo; 116 117 uint32_t tx_hdr_pkts; 118 uint32_t tx_ddi_pkts; 119 uint32_t tx_dvma_pkts; 120 121 uint32_t tx_max_pend; 122 uint32_t tx_jumbo_pkts; 123 124 txdma_ring_errlog_t errlog; 125 } nxge_tx_ring_stats_t, *p_nxge_tx_ring_stats_t; 126 127 typedef enum { 128 TX_RING_STATE_IDLE, 129 TX_RING_STATE_BUSY, 130 TX_RING_STATE_OFFLINE 131 } nxge_tx_state_t; 132 133 typedef struct _tx_ring_t { 134 nxge_os_dma_common_t tdc_desc; 135 struct _nxge_t *nxgep; 136 p_tx_msg_t tx_msg_ring; 137 uint32_t tnblocks; 138 tx_rng_cfig_t tx_ring_cfig; 139 tx_ring_hdl_t tx_ring_hdl; 140 tx_ring_kick_t tx_ring_kick; 141 tx_cs_t tx_cs; 142 tx_dma_ent_msk_t tx_evmask; 143 txdma_mbh_t tx_mbox_mbh; 144 txdma_mbl_t tx_mbox_mbl; 145 log_page_vld_t page_valid; 146 log_page_mask_t page_mask_1; 147 log_page_mask_t page_mask_2; 148 log_page_value_t page_value_1; 149 log_page_value_t page_value_2; 150 log_page_relo_t page_reloc_1; 151 log_page_relo_t page_reloc_2; 152 log_page_hdl_t page_hdl; 153 txc_dma_max_burst_t max_burst; 154 boolean_t cfg_set; 155 nxge_tx_state_t tx_ring_state; 156 157 nxge_os_mutex_t lock; 158 uint16_t index; 159 uint16_t tdc; 160 struct nxge_tdc_cfg *tdc_p; 161 uint_t tx_ring_size; 162 uint32_t num_chunks; 163 164 uint_t tx_wrap_mask; 165 uint_t rd_index; 166 uint_t wr_index; 167 boolean_t wr_index_wrap; 168 uint_t head_index; 169 boolean_t head_wrap; 170 tx_ring_hdl_t ring_head; 171 tx_ring_kick_t ring_kick_tail; 172 txdma_mailbox_t tx_mbox; 173 174 uint_t descs_pending; 175 boolean_t queueing; 176 177 nxge_os_mutex_t sq_lock; 178 nxge_serialize_t *serial; 179 p_mblk_t head; 180 p_mblk_t tail; 181 182 uint16_t ldg_group_id; 183 p_nxge_tx_ring_stats_t tdc_stats; 184 185 nxge_os_mutex_t dvma_lock; 186 uint_t dvma_wr_index; 187 uint_t dvma_rd_index; 188 uint_t dvma_pending; 189 uint_t dvma_available; 190 uint_t dvma_wrap_mask; 191 192 nxge_os_dma_handle_t *dvma_ring; 193 194 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 195 uint64_t hv_tx_buf_base_ioaddr_pp; 196 uint64_t hv_tx_buf_ioaddr_size; 197 uint64_t hv_tx_cntl_base_ioaddr_pp; 198 uint64_t hv_tx_cntl_ioaddr_size; 199 boolean_t hv_set; 200 #endif 201 } tx_ring_t, *p_tx_ring_t; 202 203 204 /* Transmit Mailbox */ 205 typedef struct _tx_mbox_t { 206 nxge_os_mutex_t lock; 207 uint16_t index; 208 struct _nxge_t *nxgep; 209 uint16_t tdc; 210 nxge_os_dma_common_t tx_mbox; 211 txdma_mbl_t tx_mbox_l; 212 txdma_mbh_t tx_mbox_h; 213 } tx_mbox_t, *p_tx_mbox_t; 214 215 typedef struct _tx_rings_t { 216 p_tx_ring_t *rings; 217 boolean_t txdesc_allocated; 218 uint32_t ndmas; 219 nxge_os_dma_common_t tdc_dma; 220 nxge_os_dma_common_t tdc_mbox; 221 } tx_rings_t, *p_tx_rings_t; 222 223 224 #if defined(_KERNEL) || (defined(COSIM) && !defined(IODIAG)) 225 226 typedef struct _tx_buf_rings_t { 227 struct _tx_buf_ring_t *txbuf_rings; 228 boolean_t txbuf_allocated; 229 } tx_buf_rings_t, *p_tx_buf_rings_t; 230 231 #endif 232 233 typedef struct _tx_mbox_areas_t { 234 p_tx_mbox_t *txmbox_areas_p; 235 boolean_t txmbox_allocated; 236 } tx_mbox_areas_t, *p_tx_mbox_areas_t; 237 238 typedef struct _tx_param_t { 239 nxge_logical_page_t tx_logical_pages[NXGE_MAX_LOGICAL_PAGES]; 240 } tx_param_t, *p_tx_param_t; 241 242 typedef struct _tx_params { 243 struct _tx_param_t *tx_param_p; 244 } tx_params_t, *p_tx_params_t; 245 246 /* 247 * Global register definitions per chip and they are initialized 248 * using the function zero control registers. 249 * . 250 */ 251 typedef struct _txdma_globals { 252 boolean_t mode32; 253 } txdma_globals_t, *p_txdma_globals; 254 255 256 #if defined(SOLARIS) && (defined(_KERNEL) || \ 257 (defined(COSIM) && !defined(IODIAG))) 258 259 /* 260 * Transmit prototypes. 261 */ 262 nxge_status_t nxge_init_txdma_channels(p_nxge_t); 263 void nxge_uninit_txdma_channels(p_nxge_t); 264 265 nxge_status_t nxge_init_txdma_channel(p_nxge_t, int); 266 void nxge_uninit_txdma_channel(p_nxge_t, int); 267 268 void nxge_setup_dma_common(p_nxge_dma_common_t, p_nxge_dma_common_t, 269 uint32_t, uint32_t); 270 nxge_status_t nxge_reset_txdma_channel(p_nxge_t, uint16_t, 271 uint64_t); 272 nxge_status_t nxge_init_txdma_channel_event_mask(p_nxge_t, 273 uint16_t, p_tx_dma_ent_msk_t); 274 nxge_status_t nxge_init_txdma_channel_cntl_stat(p_nxge_t, 275 uint16_t, uint64_t); 276 nxge_status_t nxge_enable_txdma_channel(p_nxge_t, uint16_t, 277 p_tx_ring_t, p_tx_mbox_t); 278 279 p_mblk_t nxge_tx_pkt_header_reserve(p_mblk_t, uint8_t *); 280 int nxge_tx_pkt_nmblocks(p_mblk_t, int *); 281 boolean_t nxge_txdma_reclaim(p_nxge_t, p_tx_ring_t, int); 282 283 void nxge_fill_tx_hdr(p_mblk_t, boolean_t, boolean_t, 284 int, uint8_t, p_tx_pkt_hdr_all_t); 285 286 nxge_status_t nxge_txdma_hw_mode(p_nxge_t, boolean_t); 287 void nxge_hw_start_tx(p_nxge_t); 288 void nxge_txdma_stop(p_nxge_t); 289 void nxge_txdma_stop_start(p_nxge_t); 290 void nxge_fixup_txdma_rings(p_nxge_t); 291 void nxge_txdma_hw_kick(p_nxge_t); 292 void nxge_txdma_fix_channel(p_nxge_t, uint16_t); 293 void nxge_txdma_fixup_channel(p_nxge_t, p_tx_ring_t, 294 uint16_t); 295 void nxge_txdma_hw_kick_channel(p_nxge_t, p_tx_ring_t, 296 uint16_t); 297 298 void nxge_txdma_regs_dump(p_nxge_t, int); 299 void nxge_txdma_regs_dump_channels(p_nxge_t); 300 301 void nxge_check_tx_hang(p_nxge_t); 302 void nxge_fixup_hung_txdma_rings(p_nxge_t); 303 304 void nxge_reclaim_rings(p_nxge_t); 305 int nxge_txdma_channel_hung(p_nxge_t, 306 p_tx_ring_t tx_ring_p, uint16_t); 307 int nxge_txdma_hung(p_nxge_t); 308 int nxge_txdma_stop_inj_err(p_nxge_t, int); 309 void nxge_txdma_inject_err(p_nxge_t, uint32_t, uint8_t); 310 311 extern nxge_status_t nxge_alloc_tx_mem_pool(p_nxge_t); 312 extern nxge_status_t nxge_alloc_txb(p_nxge_t nxgep, int channel); 313 extern void nxge_free_txb(p_nxge_t nxgep, int channel); 314 315 #endif 316 317 #ifdef __cplusplus 318 } 319 #endif 320 321 #endif /* _SYS_NXGE_NXGE_TXDMA_H */ 322