1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_HXGE_HXGE_RXDMA_H 27 #define _SYS_HXGE_HXGE_RXDMA_H 28 29 #ifdef __cplusplus 30 extern "C" { 31 #endif 32 33 #include <hxge_rdc_hw.h> 34 #include <hpi_rxdma.h> 35 36 #define RXDMA_CK_DIV_DEFAULT 7500 /* 25 usec */ 37 #define RXDMA_RCR_PTHRES_DEFAULT 0x20 38 #define RXDMA_RCR_TO_DEFAULT 0x8 39 #define RXDMA_HDR_SIZE_DEFAULT 2 40 #define RXDMA_HDR_SIZE_FULL 6 /* entire header of 6B */ 41 42 /* 43 * Receive Completion Ring (RCR) 44 */ 45 #define RCR_PKT_BUF_ADDR_SHIFT 0 /* bit 37:0 */ 46 #define RCR_PKT_BUF_ADDR_SHIFT_FULL 6 /* fulll buffer address */ 47 #define RCR_PKT_BUF_ADDR_MASK 0x0000003FFFFFFFFFULL 48 #define RCR_PKTBUFSZ_SHIFT 38 /* bit 39:38 */ 49 #define RCR_PKTBUFSZ_MASK 0x000000C000000000ULL 50 #define RCR_L2_LEN_SHIFT 40 /* bit 53:40 */ 51 #define RCR_L2_LEN_MASK 0x003fff0000000000ULL 52 #define RCR_ERROR_SHIFT 54 /* bit 57:54 */ 53 #define RCR_ERROR_MASK 0x03C0000000000000ULL 54 #define RCR_PKT_TYPE_SHIFT 61 /* bit 62:61 */ 55 #define RCR_PKT_TYPE_MASK 0x6000000000000000ULL 56 #define RCR_MULTI_SHIFT 63 /* bit 63 */ 57 #define RCR_MULTI_MASK 0x8000000000000000ULL 58 59 #define RCR_PKTBUFSZ_0 0x00 60 #define RCR_PKTBUFSZ_1 0x01 61 #define RCR_PKTBUFSZ_2 0x02 62 #define RCR_SINGLE_BLOCK 0x03 63 64 #define RCR_NO_ERROR 0x0 65 #define RCR_CTRL_FIFO_DED 0x1 66 #define RCR_DATA_FIFO_DED 0x2 67 #define RCR_ERROR_RESERVE 0x4 68 69 #define RCR_PKT_IS_TCP 0x2000000000000000ULL 70 #define RCR_PKT_IS_UDP 0x4000000000000000ULL 71 #define RCR_PKT_IS_SCTP 0x6000000000000000ULL 72 73 #define RDC_INT_MASK_RBRFULL_SHIFT 34 74 #define RDC_INT_MASK_RBRFULL_MASK 0x0000000400000000ULL 75 #define RDC_INT_MASK_RBREMPTY_SHIFT 35 76 #define RDC_INT_MASK_RBREMPTY_MASK 0x0000000800000000ULL 77 #define RDC_INT_MASK_RCRFULL_SHIFT 36 78 #define RDC_INT_MASK_RCRFULL_MASK 0x0000001000000000ULL 79 #define RDC_INT_MASK_RCRSH_FULL_SHIFT 39 80 #define RDC_INT_MASK_RCRSH_FULL_MASK 0x0000008000000000ULL 81 #define RDC_INT_MASK_RBR_PRE_EMPTY_SHIFT 40 82 #define RDC_INT_MASK_RBR_PRE_EMPTY_MASK 0x0000010000000000ULL 83 #define RDC_INT_MASK_RBR_PRE_PAR_SHIFT 43 84 #define RDC_INT_MASK_RBR_PRE_PAR_MASK 0x0000080000000000ULL 85 #define RDC_INT_MASK_RCR_SHA_PAR_SHIFT 44 86 #define RDC_INT_MASK_RCR_SHA_PAR_MASK 0x0000100000000000ULL 87 #define RDC_INT_MASK_RCRTO_SHIFT 45 88 #define RDC_INT_MASK_RCRTO_MASK 0x0000200000000000ULL 89 #define RDC_INT_MASK_THRES_SHIFT 46 90 #define RDC_INT_MASK_THRES_MASK 0x0000400000000000ULL 91 #define RDC_INT_MASK_PEU_ERR_SHIFT 52 92 #define RDC_INT_MASK_PEU_ERR_MASK 0x0010000000000000ULL 93 #define RDC_INT_MASK_RBR_CPL_SHIFT 53 94 #define RDC_INT_MASK_RBR_CPL_MASK 0x0020000000000000ULL 95 #define RDC_INT_MASK_ALL (RDC_INT_MASK_RBRFULL_MASK | \ 96 RDC_INT_MASK_RBREMPTY_MASK | \ 97 RDC_INT_MASK_RCRFULL_MASK | \ 98 RDC_INT_MASK_RCRSH_FULL_MASK | \ 99 RDC_INT_MASK_RBR_PRE_EMPTY_MASK | \ 100 RDC_INT_MASK_RBR_PRE_PAR_MASK | \ 101 RDC_INT_MASK_RCR_SHA_PAR_MASK | \ 102 RDC_INT_MASK_RCRTO_MASK | \ 103 RDC_INT_MASK_THRES_MASK | \ 104 RDC_INT_MASK_PEU_ERR_MASK | \ 105 RDC_INT_MASK_RBR_CPL_MASK) 106 107 #define RDC_STAT_PKTREAD_SHIFT 0 /* WO, bit 15:0 */ 108 #define RDC_STAT_PKTREAD_MASK 0x000000000000ffffULL 109 #define RDC_STAT_PTRREAD_SHIFT 16 /* WO, bit 31:16 */ 110 #define RDC_STAT_PTRREAD_MASK 0x00000000FFFF0000ULL 111 112 #define RDC_STAT_RBRFULL_SHIFT 34 /* RO, bit 34 */ 113 #define RDC_STAT_RBRFULL 0x0000000400000000ULL 114 #define RDC_STAT_RBRFULL_MASK 0x0000000400000000ULL 115 #define RDC_STAT_RBREMPTY_SHIFT 35 /* RW1C, bit 35 */ 116 #define RDC_STAT_RBREMPTY 0x0000000800000000ULL 117 #define RDC_STAT_RBREMPTY_MASK 0x0000000800000000ULL 118 #define RDC_STAT_RCR_FULL_SHIFT 36 /* RW1C, bit 36 */ 119 #define RDC_STAT_RCR_FULL 0x0000001000000000ULL 120 #define RDC_STAT_RCR_FULL_MASK 0x0000001000000000ULL 121 122 #define RDC_STAT_RCR_SHDW_FULL_SHIFT 39 /* RW1C, bit 39 */ 123 #define RDC_STAT_RCR_SHDW_FULL 0x0000008000000000ULL 124 #define RDC_STAT_RCR_SHDW_FULL_MASK 0x0000008000000000ULL 125 #define RDC_STAT_RBR_PRE_EMPTY_SHIFT 40 /* RO, bit 40 */ 126 #define RDC_STAT_RBR_PRE_EMPTY 0x0000010000000000ULL 127 #define RDC_STAT_RBR_PRE_EMPTY_MASK 0x0000010000000000ULL 128 129 #define RDC_STAT_RBR_PRE_PAR_SHIFT 43 /* RO, bit 43 */ 130 #define RDC_STAT_RBR_PRE_PAR 0x0000080000000000ULL 131 #define RDC_STAT_RBR_PRE_PAR_MASK 0x0000080000000000ULL 132 #define RDC_STAT_RCR_SHA_PAR_SHIFT 44 /* RO, bit 44 */ 133 #define RDC_STAT_RCR_SHA_PAR 0x0000100000000000ULL 134 #define RDC_STAT_RCR_SHA_PAR_MASK 0x0000100000000000ULL 135 136 #define RDC_STAT_RCR_TO_SHIFT 45 /* RW1C, bit 45 */ 137 #define RDC_STAT_RCR_TO 0x0000200000000000ULL 138 #define RDC_STAT_RCR_TO_MASK 0x0000200000000000ULL 139 #define RDC_STAT_RCR_THRES_SHIFT 46 /* RO, bit 46 */ 140 #define RDC_STAT_RCR_THRES 0x0000400000000000ULL 141 #define RDC_STAT_RCR_THRES_MASK 0x0000400000000000ULL 142 #define RDC_STAT_RCR_MEX_SHIFT 47 /* RW, bit 47 */ 143 #define RDC_STAT_RCR_MEX 0x0000800000000000ULL 144 #define RDC_STAT_RCR_MEX_MASK 0x0000800000000000ULL 145 146 #define RDC_STAT_PEU_ERR_SHIFT 52 /* RO, bit 52 */ 147 #define RDC_STAT_PEU_ERR 0x0010000000000000ULL 148 #define RDC_STAT_PEU_ERR_MASK 0x0010000000000000ULL 149 150 #define RDC_STAT_RBR_CPL_SHIFT 53 /* RO, bit 53 */ 151 #define RDC_STAT_RBR_CPL 0x0020000000000000ULL 152 #define RDC_STAT_RBR_CPL_MASK 0x0020000000000000ULL 153 154 #define RDC_STAT_ERROR RDC_INT_MASK_ALL 155 156 /* the following are write 1 to clear bits */ 157 #define RDC_STAT_WR1C (RDC_STAT_RBREMPTY | \ 158 RDC_STAT_RCR_SHDW_FULL | \ 159 RDC_STAT_RBR_PRE_EMPTY | \ 160 RDC_STAT_RBR_PRE_PAR | \ 161 RDC_STAT_RCR_SHA_PAR | \ 162 RDC_STAT_RCR_TO | \ 163 RDC_STAT_RCR_THRES | \ 164 RDC_STAT_RBR_CPL | \ 165 RDC_STAT_PEU_ERR) 166 167 typedef union _rcr_entry_t { 168 uint64_t value; 169 struct { 170 #if defined(_BIG_ENDIAN) 171 uint32_t multi:1; 172 uint32_t pkt_type:2; 173 uint32_t reserved:3; 174 uint32_t error:4; 175 uint32_t l2_len:14; 176 uint32_t pktbufsz:2; 177 uint32_t pkt_buf_addr:6; 178 uint32_t pkt_buf_addr_l:32; 179 #else 180 uint32_t pkt_buf_addr_l:32; 181 uint32_t pkt_buf_addr:6; 182 uint32_t pktbufsz:2; 183 uint32_t l2_len:14; 184 uint32_t error:4; 185 uint32_t reserved:3; 186 uint32_t pkt_type:2; 187 uint32_t multi:1; 188 #endif 189 } bits; 190 } rcr_entry_t, *p_rcr_entry_t; 191 192 #define RX_DMA_MAILBOX_BYTE_LENGTH 64 193 194 typedef struct _rxdma_mailbox_t { 195 rdc_stat_t rxdma_ctl_stat; /* 8 bytes */ 196 rdc_rbr_qlen_t rbr_stat; /* 8 bytes */ 197 rdc_rbr_head_t rbr_hdh; /* 8 bytes */ 198 uint64_t resv_1; 199 rdc_rcr_tail_t rcrstat_c; /* 8 bytes */ 200 uint64_t resv_2; 201 rdc_rcr_qlen_t rcrstat_a; /* 8 bytes */ 202 uint64_t resv_3; 203 } rxdma_mailbox_t, *p_rxdma_mailbox_t; 204 205 /* 206 * hardware workarounds: kick 16 (was 8 before) 207 */ 208 #define HXGE_RXDMA_POST_BATCH 16 209 210 #define RXBUF_START_ADDR(a, index, bsize) ((a & (index * bsize)) 211 #define RXBUF_OFFSET_FROM_START(a, start) (start - a) 212 #define RXBUF_64B_ALIGNED 64 213 214 #define HXGE_RXBUF_EXTRA 34 215 216 /* 217 * Receive buffer thresholds and buffer types 218 */ 219 #define HXGE_RX_BCOPY_SCALE 8 /* use 1/8 as lowest granularity */ 220 221 typedef enum { 222 HXGE_RX_COPY_ALL = 0, /* do bcopy on every packet */ 223 HXGE_RX_COPY_1, /* bcopy on 1/8 of buffer posted */ 224 HXGE_RX_COPY_2, /* bcopy on 2/8 of buffer posted */ 225 HXGE_RX_COPY_3, /* bcopy on 3/8 of buffer posted */ 226 HXGE_RX_COPY_4, /* bcopy on 4/8 of buffer posted */ 227 HXGE_RX_COPY_5, /* bcopy on 5/8 of buffer posted */ 228 HXGE_RX_COPY_6, /* bcopy on 6/8 of buffer posted */ 229 HXGE_RX_COPY_7, /* bcopy on 7/8 of buffer posted */ 230 HXGE_RX_COPY_NONE /* don't do bcopy at all */ 231 } hxge_rxbuf_threshold_t; 232 233 typedef enum { 234 HXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0, /* bcopy buffer size 0 (small) */ 235 HXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1, /* bcopy buffer size 1 (medium) */ 236 HXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2 /* bcopy buffer size 2 (large) */ 237 } hxge_rxbuf_type_t; 238 239 typedef struct _rdc_errlog { 240 rdc_pref_par_log_t pre_par; 241 rdc_pref_par_log_t sha_par; 242 uint8_t compl_err_type; 243 } rdc_errlog_t; 244 245 /* 246 * Receive Statistics. 247 */ 248 typedef struct _hxge_rx_ring_stats_t { 249 uint64_t ipackets; 250 uint64_t ibytes; 251 uint32_t ierrors; 252 uint32_t jumbo_pkts; 253 254 /* 255 * Error event stats. 256 */ 257 uint32_t rcr_unknown_err; 258 uint32_t ctrl_fifo_ecc_err; 259 uint32_t data_fifo_ecc_err; 260 uint32_t rbr_tmout; /* rbr_cpl_to */ 261 uint32_t peu_resp_err; /* peu_resp_err */ 262 uint32_t rcr_sha_par; /* rcr_shadow_par_err */ 263 uint32_t rbr_pre_par; /* rbr_prefetch_par_err */ 264 uint32_t rbr_pre_empty; /* rbr_pre_empty */ 265 uint32_t rcr_shadow_full; /* rcr_shadow_full */ 266 uint32_t rcrfull; /* rcr_full */ 267 uint32_t rbr_empty; /* rbr_empty */ 268 uint32_t rbrfull; /* rbr_full */ 269 /* 270 * RCR invalids: when processing RCR entries, can 271 * run into invalid RCR entries. This counter provides 272 * a means to account for invalid RCR entries. 273 */ 274 uint32_t rcr_invalids; /* rcr invalids */ 275 uint32_t rcr_to; /* rcr_to */ 276 uint32_t rcr_thres; /* rcr_thres */ 277 rdc_errlog_t errlog; 278 } hxge_rx_ring_stats_t, *p_hxge_rx_ring_stats_t; 279 280 typedef struct _hxge_rdc_sys_stats { 281 uint32_t ctrl_fifo_sec; 282 uint32_t ctrl_fifo_ded; 283 uint32_t data_fifo_sec; 284 uint32_t data_fifo_ded; 285 } hxge_rdc_sys_stats_t, *p_hxge_rdc_sys_stats_t; 286 287 typedef struct _rx_msg_t { 288 hxge_os_dma_common_t buf_dma; 289 hxge_os_mutex_t lock; 290 struct _hxge_t *hxgep; 291 struct _rx_rbr_ring_t *rx_rbr_p; 292 boolean_t free; 293 uint32_t ref_cnt; 294 hxge_os_frtn_t freeb; 295 size_t block_size; 296 uint32_t block_index; 297 uint32_t pkt_buf_size; 298 uint32_t pkt_buf_size_code; 299 uint32_t cur_usage_cnt; 300 uint32_t max_usage_cnt; 301 uchar_t *buffer; 302 uint32_t pri; 303 uint32_t shifted_addr; 304 boolean_t use_buf_pool; 305 p_mblk_t rx_mblk_p; 306 boolean_t rx_use_bcopy; 307 } rx_msg_t, *p_rx_msg_t; 308 309 /* Receive Completion Ring */ 310 typedef struct _rx_rcr_ring_t { 311 hxge_os_dma_common_t rcr_desc; 312 struct _hxge_t *hxgep; 313 314 p_hxge_rx_ring_stats_t rdc_stats; /* pointer to real kstats */ 315 316 rdc_rcr_cfg_a_t rcr_cfga; 317 rdc_rcr_cfg_b_t rcr_cfgb; 318 319 hxge_os_mutex_t lock; 320 uint16_t index; 321 uint16_t rdc; 322 boolean_t full_hdr_flag; /* 1: 18 bytes header */ 323 uint16_t sw_priv_hdr_len; /* 0 - 192 bytes (SW) */ 324 uint32_t comp_size; /* # of RCR entries */ 325 uint64_t rcr_addr; 326 uint_t comp_wrap_mask; 327 uint_t comp_rd_index; 328 uint_t comp_wt_index; 329 330 p_rcr_entry_t rcr_desc_first_p; 331 p_rcr_entry_t rcr_desc_first_pp; 332 p_rcr_entry_t rcr_desc_last_p; 333 p_rcr_entry_t rcr_desc_last_pp; 334 335 p_rcr_entry_t rcr_desc_rd_head_p; /* software next read */ 336 p_rcr_entry_t rcr_desc_rd_head_pp; 337 uint64_t rcr_tail_begin; 338 339 struct _rx_rbr_ring_t *rx_rbr_p; 340 uint32_t intr_timeout; 341 uint32_t intr_threshold; 342 uint64_t max_receive_pkts; 343 mac_resource_handle_t rcr_mac_handle; 344 uint32_t rcvd_pkt_bytes; /* Received bytes of a packet */ 345 } rx_rcr_ring_t, *p_rx_rcr_ring_t; 346 347 348 /* Buffer index information */ 349 typedef struct _rxbuf_index_info_t { 350 uint32_t buf_index; 351 uint32_t start_index; 352 uint32_t buf_size; 353 uint64_t dvma_addr; 354 uint64_t kaddr; 355 } rxbuf_index_info_t, *p_rxbuf_index_info_t; 356 357 /* Buffer index information */ 358 359 typedef struct _rxring_info_t { 360 uint32_t hint[3]; 361 uint32_t block_size_mask; 362 uint16_t max_iterations; 363 rxbuf_index_info_t buffer[HXGE_DMA_BLOCK]; 364 } rxring_info_t, *p_rxring_info_t; 365 366 367 typedef enum { 368 RBR_POSTING = 1, /* We may post rx buffers. */ 369 RBR_UNMAPPING, /* We are in the process of unmapping. */ 370 RBR_UNMAPPED /* The ring is unmapped. */ 371 } rbr_state_t; 372 373 374 /* Receive Buffer Block Ring */ 375 typedef struct _rx_rbr_ring_t { 376 hxge_os_dma_common_t rbr_desc; 377 p_rx_msg_t *rx_msg_ring; 378 p_hxge_dma_common_t *dma_bufp; 379 rdc_rbr_cfg_a_t rbr_cfga; 380 rdc_rbr_cfg_b_t rbr_cfgb; 381 rdc_rbr_kick_t rbr_kick; 382 rdc_page_handle_t page_hdl; 383 384 hxge_os_mutex_t lock; 385 hxge_os_mutex_t post_lock; 386 uint16_t index; 387 struct _hxge_t *hxgep; 388 uint16_t rdc; 389 uint_t rbr_max_size; 390 uint64_t rbr_addr; 391 uint_t rbr_wrap_mask; 392 uint_t rbb_max; 393 uint_t block_size; 394 uint_t num_blocks; 395 uint_t tnblocks; 396 uint_t pkt_buf_size0; 397 uint_t pkt_buf_size0_bytes; 398 uint_t hpi_pkt_buf_size0; 399 uint_t pkt_buf_size1; 400 uint_t pkt_buf_size1_bytes; 401 uint_t hpi_pkt_buf_size1; 402 uint_t pkt_buf_size2; 403 uint_t pkt_buf_size2_bytes; 404 uint_t hpi_pkt_buf_size2; 405 406 uint64_t rbr_head_pp; 407 uint64_t rbr_tail_pp; 408 uint32_t *rbr_desc_vp; 409 410 p_rx_rcr_ring_t rx_rcr_p; 411 412 rdc_rbr_head_t rbr_head; 413 uint_t rbr_wr_index; 414 uint_t rbr_rd_index; 415 uint_t rbr_hw_head_index; 416 uint64_t rbr_hw_head_ptr; 417 418 rxring_info_t *ring_info; 419 uint_t rbr_consumed; 420 uint_t rbr_threshold_hi; 421 uint_t rbr_threshold_lo; 422 hxge_rxbuf_type_t rbr_bufsize_type; 423 boolean_t rbr_use_bcopy; 424 425 /* 426 * <rbr_ref_cnt> is a count of those receive buffers which 427 * have been loaned to the kernel. We will not free this 428 * ring until the reference count reaches zero (0). 429 */ 430 uint32_t rbr_ref_cnt; 431 rbr_state_t rbr_state; /* POSTING, etc */ 432 433 int pages_to_post; 434 int pages_to_post_threshold; 435 int pages_to_skip; 436 } rx_rbr_ring_t, *p_rx_rbr_ring_t; 437 438 /* Receive Mailbox */ 439 typedef struct _rx_mbox_t { 440 hxge_os_dma_common_t rx_mbox; 441 rdc_rx_cfg1_t rx_cfg1; 442 rdc_rx_cfg2_t rx_cfg2; 443 uint64_t mbox_addr; 444 boolean_t cfg_set; 445 446 hxge_os_mutex_t lock; 447 uint16_t index; 448 struct _hxge_t *hxgep; 449 uint16_t rdc; 450 } rx_mbox_t, *p_rx_mbox_t; 451 452 typedef struct _rx_rbr_rings_t { 453 p_rx_rbr_ring_t *rbr_rings; 454 uint32_t ndmas; 455 boolean_t rxbuf_allocated; 456 } rx_rbr_rings_t, *p_rx_rbr_rings_t; 457 458 typedef struct _rx_rcr_rings_t { 459 p_rx_rcr_ring_t *rcr_rings; 460 uint32_t ndmas; 461 boolean_t cntl_buf_allocated; 462 } rx_rcr_rings_t, *p_rx_rcr_rings_t; 463 464 typedef struct _rx_mbox_areas_t { 465 p_rx_mbox_t *rxmbox_areas; 466 uint32_t ndmas; 467 boolean_t mbox_allocated; 468 } rx_mbox_areas_t, *p_rx_mbox_areas_t; 469 470 /* 471 * Receive DMA Prototypes. 472 */ 473 hxge_status_t hxge_init_rxdma_channels(p_hxge_t hxgep); 474 void hxge_uninit_rxdma_channels(p_hxge_t hxgep); 475 hxge_status_t hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, 476 uint16_t channel, rdc_stat_t *cs_p); 477 hxge_status_t hxge_enable_rxdma_channel(p_hxge_t hxgep, 478 uint16_t channel, p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, 479 p_rx_mbox_t mbox_p); 480 hxge_status_t hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable); 481 int hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel); 482 hxge_status_t hxge_rxdma_handle_sys_errors(p_hxge_t hxgep); 483 484 485 #ifdef __cplusplus 486 } 487 #endif 488 489 #endif /* _SYS_HXGE_HXGE_RXDMA_H */ 490