1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_NXGE_NXGE_RXDMA_H 27 #define _SYS_NXGE_NXGE_RXDMA_H 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #ifdef __cplusplus 32 extern "C" { 33 #endif 34 35 #include <sys/nxge/nxge_rxdma_hw.h> 36 #include <npi_rxdma.h> 37 38 #define RXDMA_CK_DIV_DEFAULT 7500 /* 25 usec */ 39 /* 40 * Hardware RDC designer: 8 cache lines during Atlas bringup. 41 */ 42 #define RXDMA_RED_LESS_BYTES (8 * 64) /* 8 cache line */ 43 #define RXDMA_RED_LESS_ENTRIES (RXDMA_RED_LESS_BYTES/8) 44 #define RXDMA_RED_WINDOW_DEFAULT 0 45 #define RXDMA_RED_THRES_DEFAULT 0 46 47 #define RXDMA_RCR_PTHRES_DEFAULT 0x20 48 #define RXDMA_RCR_TO_DEFAULT 0x8 49 50 /* 51 * hardware workarounds: kick 16 (was 8 before) 52 */ 53 #define NXGE_RXDMA_POST_BATCH 16 54 55 #define RXBUF_START_ADDR(a, index, bsize) ((a & (index * bsize)) 56 #define RXBUF_OFFSET_FROM_START(a, start) (start - a) 57 #define RXBUF_64B_ALIGNED 64 58 59 #define NXGE_RXBUF_EXTRA 34 60 /* 61 * Receive buffer thresholds and buffer types 62 */ 63 #define NXGE_RX_BCOPY_SCALE 8 /* use 1/8 as lowest granularity */ 64 typedef enum { 65 NXGE_RX_COPY_ALL = 0, /* do bcopy on every packet */ 66 NXGE_RX_COPY_1, /* bcopy on 1/8 of buffer posted */ 67 NXGE_RX_COPY_2, /* bcopy on 2/8 of buffer posted */ 68 NXGE_RX_COPY_3, /* bcopy on 3/8 of buffer posted */ 69 NXGE_RX_COPY_4, /* bcopy on 4/8 of buffer posted */ 70 NXGE_RX_COPY_5, /* bcopy on 5/8 of buffer posted */ 71 NXGE_RX_COPY_6, /* bcopy on 6/8 of buffer posted */ 72 NXGE_RX_COPY_7, /* bcopy on 7/8 of buffer posted */ 73 NXGE_RX_COPY_NONE /* don't do bcopy at all */ 74 } nxge_rxbuf_threshold_t; 75 76 typedef enum { 77 NXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0, /* bcopy buffer size 0 (small) */ 78 NXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1, /* bcopy buffer size 1 (medium) */ 79 NXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2 /* bcopy buffer size 2 (large) */ 80 } nxge_rxbuf_type_t; 81 82 typedef struct _rdc_errlog { 83 rdmc_par_err_log_t pre_par; 84 rdmc_par_err_log_t sha_par; 85 uint8_t compl_err_type; 86 } rdc_errlog_t; 87 88 /* 89 * Receive Statistics. 90 */ 91 typedef struct _nxge_rx_ring_stats_t { 92 uint64_t ipackets; 93 uint64_t ibytes; 94 uint32_t ierrors; 95 uint32_t multircv; 96 uint32_t brdcstrcv; 97 uint32_t norcvbuf; 98 99 uint32_t rx_inits; 100 uint32_t rx_jumbo_pkts; 101 uint32_t rx_multi_pkts; 102 uint32_t rx_mtu_pkts; 103 uint32_t rx_no_buf; 104 105 /* 106 * Receive buffer management statistics. 107 */ 108 uint32_t rx_new_pages; 109 uint32_t rx_new_mtu_pgs; 110 uint32_t rx_new_nxt_pgs; 111 uint32_t rx_reused_pgs; 112 uint32_t rx_mtu_drops; 113 uint32_t rx_nxt_drops; 114 115 /* 116 * Error event stats. 117 */ 118 uint32_t rx_rbr_tmout; 119 uint32_t pkt_too_long_err; 120 uint32_t l2_err; 121 uint32_t l4_cksum_err; 122 uint32_t fflp_soft_err; 123 uint32_t zcp_soft_err; 124 uint32_t rcr_unknown_err; 125 uint32_t dcf_err; 126 uint32_t rbr_tmout; 127 uint32_t rsp_cnt_err; 128 uint32_t byte_en_err; 129 uint32_t byte_en_bus; 130 uint32_t rsp_dat_err; 131 uint32_t rcr_ack_err; 132 uint32_t dc_fifo_err; 133 uint32_t rcr_sha_par; 134 uint32_t rbr_pre_par; 135 uint32_t port_drop_pkt; 136 uint32_t wred_drop; 137 uint32_t rbr_pre_empty; 138 uint32_t rcr_shadow_full; 139 uint32_t config_err; 140 uint32_t rcrincon; 141 uint32_t rcrfull; 142 uint32_t rbr_empty; 143 uint32_t rbrfull; 144 uint32_t rbrlogpage; 145 uint32_t cfiglogpage; 146 uint32_t rcrto; 147 uint32_t rcrthres; 148 uint32_t mex; 149 rdc_errlog_t errlog; 150 } nxge_rx_ring_stats_t, *p_nxge_rx_ring_stats_t; 151 152 typedef struct _nxge_rdc_sys_stats { 153 uint32_t pre_par; 154 uint32_t sha_par; 155 uint32_t id_mismatch; 156 uint32_t ipp_eop_err; 157 uint32_t zcp_eop_err; 158 } nxge_rdc_sys_stats_t, *p_nxge_rdc_sys_stats_t; 159 160 /* 161 * Software reserved buffer offset 162 */ 163 typedef struct _nxge_rxbuf_off_hdr_t { 164 uint32_t index; 165 } nxge_rxbuf_off_hdr_t, *p_nxge_rxbuf_off_hdr_t; 166 167 /* 168 * Definitions for each receive buffer block. 169 */ 170 typedef struct _nxge_rbb_t { 171 nxge_os_dma_common_t dma_buf_info; 172 uint8_t rbr_page_num; 173 uint32_t block_size; 174 uint16_t dma_channel; 175 uint32_t bytes_received; 176 uint32_t ref_cnt; 177 uint_t pkt_buf_size; 178 uint_t max_pkt_bufs; 179 uint32_t cur_usage_cnt; 180 } nxge_rbb_t, *p_nxge_rbb_t; 181 182 183 typedef struct _rx_tx_param_t { 184 nxge_logical_page_t logical_pages[NXGE_MAX_LOGICAL_PAGES]; 185 } rx_tx_param_t, *p_rx_tx_param_t; 186 187 typedef struct _rx_tx_params { 188 struct _tx_param_t *tx_param_p; 189 } rx_tx_params_t, *p_rx_tx_params_t; 190 191 192 typedef struct _rx_msg_t { 193 nxge_os_dma_common_t buf_dma; 194 nxge_os_mutex_t lock; 195 struct _nxge_t *nxgep; 196 struct _rx_rbr_ring_t *rx_rbr_p; 197 boolean_t spare_in_use; 198 boolean_t free; 199 uint32_t ref_cnt; 200 #ifdef RXBUFF_USE_SEPARATE_UP_CNTR 201 uint32_t pass_up_cnt; 202 boolean_t release; 203 #endif 204 nxge_os_frtn_t freeb; 205 size_t bytes_arrived; 206 size_t bytes_expected; 207 size_t block_size; 208 uint32_t block_index; 209 uint32_t pkt_buf_size; 210 uint32_t pkt_buf_size_code; 211 uint32_t max_pkt_bufs; 212 uint32_t cur_usage_cnt; 213 uint32_t max_usage_cnt; 214 uchar_t *buffer; 215 uint32_t pri; 216 uint32_t shifted_addr; 217 boolean_t use_buf_pool; 218 p_mblk_t rx_mblk_p; 219 boolean_t rx_use_bcopy; 220 } rx_msg_t, *p_rx_msg_t; 221 222 typedef struct _rx_dma_handle_t { 223 nxge_os_dma_handle_t dma_handle; /* DMA handle */ 224 nxge_os_acc_handle_t acc_handle; /* DMA memory handle */ 225 npi_handle_t npi_handle; 226 } rx_dma_handle_t, *p_rx_dma_handle_t; 227 228 #define RXCOMP_HIST_ELEMENTS 100000 229 230 typedef struct _nxge_rxcomphist_t { 231 uint_t comp_cnt; 232 uint64_t rx_comp_entry; 233 } nxge_rxcomphist_t, *p_nxge_rxcomphist_t; 234 235 /* Receive Completion Ring */ 236 typedef struct _rx_rcr_ring_t { 237 nxge_os_dma_common_t rcr_desc; 238 uint8_t rcr_page_num; 239 uint8_t rcr_buf_page_num; 240 241 struct _nxge_t *nxgep; 242 243 p_nxge_rx_ring_stats_t rdc_stats; 244 245 int poll_flag; /* 1 if polling mode */ 246 247 rcrcfig_a_t rcr_cfga; 248 rcrcfig_b_t rcr_cfgb; 249 boolean_t cfg_set; 250 251 nxge_os_mutex_t lock; 252 uint16_t index; 253 uint16_t rdc; 254 uint16_t rdc_grp_id; 255 uint16_t ldg_group_id; 256 boolean_t full_hdr_flag; /* 1: 18 bytes header */ 257 uint16_t sw_priv_hdr_len; /* 0 - 192 bytes (SW) */ 258 uint32_t comp_size; /* # of RCR entries */ 259 uint64_t rcr_addr; 260 uint_t comp_wrap_mask; 261 uint_t comp_rd_index; 262 uint_t comp_wt_index; 263 264 p_rcr_entry_t rcr_desc_first_p; 265 p_rcr_entry_t rcr_desc_first_pp; 266 p_rcr_entry_t rcr_desc_last_p; 267 p_rcr_entry_t rcr_desc_last_pp; 268 269 p_rcr_entry_t rcr_desc_rd_head_p; /* software next read */ 270 p_rcr_entry_t rcr_desc_rd_head_pp; 271 272 p_rcr_entry_t rcr_desc_wt_tail_p; /* hardware write */ 273 p_rcr_entry_t rcr_desc_wt_tail_pp; 274 275 uint64_t rcr_tail_pp; 276 uint64_t rcr_head_pp; 277 struct _rx_rbr_ring_t *rx_rbr_p; 278 uint32_t intr_timeout; 279 uint32_t intr_threshold; 280 uint64_t max_receive_pkts; 281 p_mblk_t rx_first_mp; 282 mac_resource_handle_t rcr_mac_handle; 283 uint32_t rcvd_pkt_bytes; /* Received bytes of a packet */ 284 } rx_rcr_ring_t, *p_rx_rcr_ring_t; 285 286 287 288 /* Buffer index information */ 289 typedef struct _rxbuf_index_info_t { 290 uint32_t buf_index; 291 uint32_t start_index; 292 uint32_t buf_size; 293 uint64_t dvma_addr; 294 uint64_t kaddr; 295 } rxbuf_index_info_t, *p_rxbuf_index_info_t; 296 297 /* Buffer index information */ 298 299 typedef struct _rxring_info_t { 300 uint32_t hint[3]; 301 uint32_t block_size_mask; 302 uint16_t max_iterations; 303 rxbuf_index_info_t buffer[NXGE_DMA_BLOCK]; 304 } rxring_info_t, *p_rxring_info_t; 305 306 307 typedef enum { 308 RBR_POSTING = 1, /* We may post rx buffers. */ 309 RBR_UNMAPPING, /* We are in the process of unmapping. */ 310 RBR_UNMAPPED /* The ring is unmapped. */ 311 } rbr_state_t; 312 313 314 /* Receive Buffer Block Ring */ 315 typedef struct _rx_rbr_ring_t { 316 nxge_os_dma_common_t rbr_desc; 317 p_rx_msg_t *rx_msg_ring; 318 p_nxge_dma_common_t *dma_bufp; 319 rbr_cfig_a_t rbr_cfga; 320 rbr_cfig_b_t rbr_cfgb; 321 rbr_kick_t rbr_kick; 322 log_page_vld_t page_valid; 323 log_page_mask_t page_mask_1; 324 log_page_mask_t page_mask_2; 325 log_page_value_t page_value_1; 326 log_page_value_t page_value_2; 327 log_page_relo_t page_reloc_1; 328 log_page_relo_t page_reloc_2; 329 log_page_hdl_t page_hdl; 330 331 boolean_t cfg_set; 332 333 nxge_os_mutex_t lock; 334 nxge_os_mutex_t post_lock; 335 uint16_t index; 336 struct _nxge_t *nxgep; 337 uint16_t rdc; 338 uint16_t rdc_grp_id; 339 uint_t rbr_max_size; 340 uint64_t rbr_addr; 341 uint_t rbr_wrap_mask; 342 uint_t rbb_max; 343 uint_t rbb_added; 344 uint_t block_size; 345 uint_t num_blocks; 346 uint_t tnblocks; 347 uint_t pkt_buf_size0; 348 uint_t pkt_buf_size0_bytes; 349 uint_t npi_pkt_buf_size0; 350 uint_t pkt_buf_size1; 351 uint_t pkt_buf_size1_bytes; 352 uint_t npi_pkt_buf_size1; 353 uint_t pkt_buf_size2; 354 uint_t pkt_buf_size2_bytes; 355 uint_t npi_pkt_buf_size2; 356 357 uint64_t rbr_head_pp; 358 uint64_t rbr_tail_pp; 359 uint32_t *rbr_desc_vp; 360 361 p_rx_rcr_ring_t rx_rcr_p; 362 363 rx_dma_ent_msk_t rx_dma_ent_mask; 364 365 rbr_hdh_t rbr_head; 366 rbr_hdl_t rbr_tail; 367 uint_t rbr_wr_index; 368 uint_t rbr_rd_index; 369 uint_t rbr_hw_head_index; 370 uint64_t rbr_hw_head_ptr; 371 372 /* may not be needed */ 373 p_nxge_rbb_t rbb_p; 374 375 rxring_info_t *ring_info; 376 #ifdef RX_USE_RECLAIM_POST 377 uint32_t hw_freed; 378 uint32_t sw_freed; 379 uint32_t msg_rd_index; 380 uint32_t msg_cnt; 381 #endif 382 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 383 uint64_t hv_rx_buf_base_ioaddr_pp; 384 uint64_t hv_rx_buf_ioaddr_size; 385 uint64_t hv_rx_cntl_base_ioaddr_pp; 386 uint64_t hv_rx_cntl_ioaddr_size; 387 boolean_t hv_set; 388 #endif 389 uint_t rbr_consumed; 390 uint_t rbr_threshold_hi; 391 uint_t rbr_threshold_lo; 392 nxge_rxbuf_type_t rbr_bufsize_type; 393 boolean_t rbr_use_bcopy; 394 395 /* 396 * <rbr_ref_cnt> is a count of those receive buffers which 397 * have been loaned to the kernel. We will not free this 398 * ring until the reference count reaches zero (0). 399 */ 400 uint32_t rbr_ref_cnt; 401 rbr_state_t rbr_state; /* POSTING, etc */ 402 /* 403 * Receive buffer allocation types: 404 * ddi_dma_mem_alloc(), contig_mem_alloc(), kmem_alloc() 405 */ 406 buf_alloc_type_t rbr_alloc_type; 407 } rx_rbr_ring_t, *p_rx_rbr_ring_t; 408 409 /* Receive Mailbox */ 410 typedef struct _rx_mbox_t { 411 nxge_os_dma_common_t rx_mbox; 412 rxdma_cfig1_t rx_cfg1; 413 rxdma_cfig2_t rx_cfg2; 414 uint64_t mbox_addr; 415 boolean_t cfg_set; 416 417 nxge_os_mutex_t lock; 418 uint16_t index; 419 struct _nxge_t *nxgep; 420 uint16_t rdc; 421 } rx_mbox_t, *p_rx_mbox_t; 422 423 424 typedef struct _rx_rbr_rings_t { 425 p_rx_rbr_ring_t *rbr_rings; 426 uint32_t ndmas; 427 boolean_t rxbuf_allocated; 428 } rx_rbr_rings_t, *p_rx_rbr_rings_t; 429 430 typedef struct _rx_rcr_rings_t { 431 p_rx_rcr_ring_t *rcr_rings; 432 uint32_t ndmas; 433 boolean_t cntl_buf_allocated; 434 } rx_rcr_rings_t, *p_rx_rcr_rings_t; 435 436 typedef struct _rx_mbox_areas_t { 437 p_rx_mbox_t *rxmbox_areas; 438 uint32_t ndmas; 439 boolean_t mbox_allocated; 440 } rx_mbox_areas_t, *p_rx_mbox_areas_t; 441 442 /* 443 * Global register definitions per chip and they are initialized 444 * using the function zero control registers. 445 * . 446 */ 447 448 typedef struct _rxdma_globals { 449 boolean_t mode32; 450 uint16_t rxdma_ck_div_cnt; 451 uint16_t rxdma_red_ran_init; 452 uint32_t rxdma_eing_timeout; 453 } rxdma_globals_t, *p_rxdma_globals; 454 455 456 /* 457 * Receive DMA Prototypes. 458 */ 459 nxge_status_t nxge_init_rxdma_channels(p_nxge_t); 460 void nxge_uninit_rxdma_channels(p_nxge_t); 461 462 nxge_status_t nxge_init_rxdma_channel(p_nxge_t, int); 463 void nxge_uninit_rxdma_channel(p_nxge_t, int); 464 465 nxge_status_t nxge_init_rxdma_channel_rcrflush(p_nxge_t, uint8_t); 466 nxge_status_t nxge_reset_rxdma_channel(p_nxge_t, uint16_t); 467 nxge_status_t nxge_init_rxdma_channel_cntl_stat(p_nxge_t, 468 uint16_t, p_rx_dma_ctl_stat_t); 469 nxge_status_t nxge_enable_rxdma_channel(p_nxge_t, 470 uint16_t, p_rx_rbr_ring_t, p_rx_rcr_ring_t, 471 p_rx_mbox_t); 472 nxge_status_t nxge_init_rxdma_channel_event_mask(p_nxge_t, 473 uint16_t, p_rx_dma_ent_msk_t); 474 475 nxge_status_t nxge_rxdma_hw_mode(p_nxge_t, boolean_t); 476 void nxge_hw_start_rx(p_nxge_t); 477 void nxge_fixup_rxdma_rings(p_nxge_t); 478 nxge_status_t nxge_dump_rxdma_channel(p_nxge_t, uint8_t); 479 480 void nxge_rxdma_fix_channel(p_nxge_t, uint16_t); 481 void nxge_rxdma_fixup_channel(p_nxge_t, uint16_t, int); 482 int nxge_rxdma_get_ring_index(p_nxge_t, uint16_t); 483 484 void nxge_rxdma_regs_dump_channels(p_nxge_t); 485 nxge_status_t nxge_rxdma_handle_sys_errors(p_nxge_t); 486 void nxge_rxdma_inject_err(p_nxge_t, uint32_t, uint8_t); 487 488 extern nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 489 extern nxge_status_t nxge_alloc_rxb(p_nxge_t nxgep, int channel); 490 extern void nxge_free_rxb(p_nxge_t nxgep, int channel); 491 492 #ifdef __cplusplus 493 } 494 #endif 495 496 #endif /* _SYS_NXGE_NXGE_RXDMA_H */ 497