1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_HXGE_HXGE_RXDMA_H 27 #define _SYS_HXGE_HXGE_RXDMA_H 28 29 #ifdef __cplusplus 30 extern "C" { 31 #endif 32 33 #include <hxge_rdc_hw.h> 34 #include <hpi_rxdma.h> 35 36 #define RXDMA_CK_DIV_DEFAULT 25000 /* 84 usec */ 37 #define RXDMA_RCR_PTHRES_DEFAULT 0x1 38 #define RXDMA_RCR_TO_DEFAULT 0x1 39 #define RXDMA_HDR_SIZE_DEFAULT 2 40 #define RXDMA_HDR_SIZE_FULL 6 /* entire header of 6B */ 41 42 /* 43 * Receive Completion Ring (RCR) 44 */ 45 #define RCR_PKT_BUF_ADDR_SHIFT 0 /* bit 37:0 */ 46 #define RCR_PKT_BUF_ADDR_SHIFT_FULL 6 /* fulll buffer address */ 47 #define RCR_PKT_BUF_ADDR_MASK 0x0000003FFFFFFFFFULL 48 #define RCR_PKTBUFSZ_SHIFT 38 /* bit 39:38 */ 49 #define RCR_PKTBUFSZ_MASK 0x000000C000000000ULL 50 #define RCR_L2_LEN_SHIFT 40 /* bit 53:40 */ 51 #define RCR_L2_LEN_MASK 0x003fff0000000000ULL 52 #define RCR_ERROR_SHIFT 54 /* bit 57:54 */ 53 #define RCR_ERROR_MASK 0x03C0000000000000ULL 54 #define RCR_PKT_TYPE_SHIFT 61 /* bit 62:61 */ 55 #define RCR_PKT_TYPE_MASK 0x6000000000000000ULL 56 #define RCR_MULTI_SHIFT 63 /* bit 63 */ 57 #define RCR_MULTI_MASK 0x8000000000000000ULL 58 59 #define RCR_PKTBUFSZ_0 0x00 60 #define RCR_PKTBUFSZ_1 0x01 61 #define RCR_PKTBUFSZ_2 0x02 62 #define RCR_SINGLE_BLOCK 0x03 63 64 #define RCR_NO_ERROR 0x0 65 #define RCR_CTRL_FIFO_DED 0x1 66 #define RCR_DATA_FIFO_DED 0x2 67 #define RCR_ERROR_RESERVE 0x4 68 69 #define RCR_PKT_IS_TCP 0x2000000000000000ULL 70 #define RCR_PKT_IS_UDP 0x4000000000000000ULL 71 #define RCR_PKT_IS_SCTP 0x6000000000000000ULL 72 73 #define RDC_INT_MASK_RBRFULL_SHIFT 34 74 #define RDC_INT_MASK_RBRFULL_MASK 0x0000000400000000ULL 75 #define RDC_INT_MASK_RBREMPTY_SHIFT 35 76 #define RDC_INT_MASK_RBREMPTY_MASK 0x0000000800000000ULL 77 #define RDC_INT_MASK_RCRFULL_SHIFT 36 78 #define RDC_INT_MASK_RCRFULL_MASK 0x0000001000000000ULL 79 #define RDC_INT_MASK_RCRSH_FULL_SHIFT 39 80 #define RDC_INT_MASK_RCRSH_FULL_MASK 0x0000008000000000ULL 81 #define RDC_INT_MASK_RBR_PRE_EMPTY_SHIFT 40 82 #define RDC_INT_MASK_RBR_PRE_EMPTY_MASK 0x0000010000000000ULL 83 #define RDC_INT_MASK_RBR_PRE_PAR_SHIFT 43 84 #define RDC_INT_MASK_RBR_PRE_PAR_MASK 0x0000080000000000ULL 85 #define RDC_INT_MASK_RCR_SHA_PAR_SHIFT 44 86 #define RDC_INT_MASK_RCR_SHA_PAR_MASK 0x0000100000000000ULL 87 #define RDC_INT_MASK_RCRTO_SHIFT 45 88 #define RDC_INT_MASK_RCRTO_MASK 0x0000200000000000ULL 89 #define RDC_INT_MASK_THRES_SHIFT 46 90 #define RDC_INT_MASK_THRES_MASK 0x0000400000000000ULL 91 #define RDC_INT_MASK_PEU_ERR_SHIFT 52 92 #define RDC_INT_MASK_PEU_ERR_MASK 0x0010000000000000ULL 93 #define RDC_INT_MASK_RBR_CPL_SHIFT 53 94 #define RDC_INT_MASK_RBR_CPL_MASK 0x0020000000000000ULL 95 #define RDC_INT_MASK_ALL (RDC_INT_MASK_RBRFULL_MASK | \ 96 RDC_INT_MASK_RBREMPTY_MASK | \ 97 RDC_INT_MASK_RCRFULL_MASK | \ 98 RDC_INT_MASK_RCRSH_FULL_MASK | \ 99 RDC_INT_MASK_RBR_PRE_EMPTY_MASK | \ 100 RDC_INT_MASK_RBR_PRE_PAR_MASK | \ 101 RDC_INT_MASK_RCR_SHA_PAR_MASK | \ 102 RDC_INT_MASK_RCRTO_MASK | \ 103 RDC_INT_MASK_THRES_MASK | \ 104 RDC_INT_MASK_PEU_ERR_MASK | \ 105 RDC_INT_MASK_RBR_CPL_MASK) 106 107 #define RDC_STAT_PKTREAD_SHIFT 0 /* WO, bit 15:0 */ 108 #define RDC_STAT_PKTREAD_MASK 0x000000000000ffffULL 109 #define RDC_STAT_PTRREAD_SHIFT 16 /* WO, bit 31:16 */ 110 #define RDC_STAT_PTRREAD_MASK 0x00000000FFFF0000ULL 111 112 #define RDC_STAT_RBRFULL_SHIFT 34 /* RO, bit 34 */ 113 #define RDC_STAT_RBRFULL 0x0000000400000000ULL 114 #define RDC_STAT_RBRFULL_MASK 0x0000000400000000ULL 115 #define RDC_STAT_RBREMPTY_SHIFT 35 /* RW1C, bit 35 */ 116 #define RDC_STAT_RBREMPTY 0x0000000800000000ULL 117 #define RDC_STAT_RBREMPTY_MASK 0x0000000800000000ULL 118 #define RDC_STAT_RCR_FULL_SHIFT 36 /* RW1C, bit 36 */ 119 #define RDC_STAT_RCR_FULL 0x0000001000000000ULL 120 #define RDC_STAT_RCR_FULL_MASK 0x0000001000000000ULL 121 122 #define RDC_STAT_RCR_SHDW_FULL_SHIFT 39 /* RW1C, bit 39 */ 123 #define RDC_STAT_RCR_SHDW_FULL 0x0000008000000000ULL 124 #define RDC_STAT_RCR_SHDW_FULL_MASK 0x0000008000000000ULL 125 #define RDC_STAT_RBR_PRE_EMPTY_SHIFT 40 /* RO, bit 40 */ 126 #define RDC_STAT_RBR_PRE_EMPTY 0x0000010000000000ULL 127 #define RDC_STAT_RBR_PRE_EMPTY_MASK 0x0000010000000000ULL 128 129 #define RDC_STAT_RBR_PRE_PAR_SHIFT 43 /* RO, bit 43 */ 130 #define RDC_STAT_RBR_PRE_PAR 0x0000080000000000ULL 131 #define RDC_STAT_RBR_PRE_PAR_MASK 0x0000080000000000ULL 132 #define RDC_STAT_RCR_SHA_PAR_SHIFT 44 /* RO, bit 44 */ 133 #define RDC_STAT_RCR_SHA_PAR 0x0000100000000000ULL 134 #define RDC_STAT_RCR_SHA_PAR_MASK 0x0000100000000000ULL 135 136 #define RDC_STAT_RCR_TO_SHIFT 45 /* RW1C, bit 45 */ 137 #define RDC_STAT_RCR_TO 0x0000200000000000ULL 138 #define RDC_STAT_RCR_TO_MASK 0x0000200000000000ULL 139 #define RDC_STAT_RCR_THRES_SHIFT 46 /* RO, bit 46 */ 140 #define RDC_STAT_RCR_THRES 0x0000400000000000ULL 141 #define RDC_STAT_RCR_THRES_MASK 0x0000400000000000ULL 142 #define RDC_STAT_RCR_MEX_SHIFT 47 /* RW, bit 47 */ 143 #define RDC_STAT_RCR_MEX 0x0000800000000000ULL 144 #define RDC_STAT_RCR_MEX_MASK 0x0000800000000000ULL 145 146 #define RDC_STAT_PEU_ERR_SHIFT 52 /* RO, bit 52 */ 147 #define RDC_STAT_PEU_ERR 0x0010000000000000ULL 148 #define RDC_STAT_PEU_ERR_MASK 0x0010000000000000ULL 149 150 #define RDC_STAT_RBR_CPL_SHIFT 53 /* RO, bit 53 */ 151 #define RDC_STAT_RBR_CPL 0x0020000000000000ULL 152 #define RDC_STAT_RBR_CPL_MASK 0x0020000000000000ULL 153 154 #define RDC_STAT_ERROR RDC_INT_MASK_ALL 155 156 /* the following are write 1 to clear bits */ 157 #define RDC_STAT_WR1C (RDC_STAT_RBREMPTY | \ 158 RDC_STAT_RCR_SHDW_FULL | \ 159 RDC_STAT_RBR_PRE_EMPTY | \ 160 RDC_STAT_RBR_PRE_PAR | \ 161 RDC_STAT_RCR_SHA_PAR | \ 162 RDC_STAT_RBR_CPL | \ 163 RDC_STAT_PEU_ERR) 164 165 typedef union _rcr_entry_t { 166 uint64_t value; 167 struct { 168 #if defined(_BIG_ENDIAN) 169 uint32_t multi:1; 170 uint32_t pkt_type:2; 171 uint32_t reserved:3; 172 uint32_t error:4; 173 uint32_t l2_len:14; 174 uint32_t pktbufsz:2; 175 uint32_t pkt_buf_addr:6; 176 uint32_t pkt_buf_addr_l:32; 177 #else 178 uint32_t pkt_buf_addr_l:32; 179 uint32_t pkt_buf_addr:6; 180 uint32_t pktbufsz:2; 181 uint32_t l2_len:14; 182 uint32_t error:4; 183 uint32_t reserved:3; 184 uint32_t pkt_type:2; 185 uint32_t multi:1; 186 #endif 187 } bits; 188 } rcr_entry_t, *p_rcr_entry_t; 189 190 #define RX_DMA_MAILBOX_BYTE_LENGTH 64 191 192 typedef struct _rxdma_mailbox_t { 193 rdc_stat_t rxdma_ctl_stat; /* 8 bytes */ 194 rdc_rbr_qlen_t rbr_stat; /* 8 bytes */ 195 rdc_rbr_head_t rbr_hdh; /* 8 bytes */ 196 uint64_t resv_1; 197 rdc_rcr_tail_t rcrstat_c; /* 8 bytes */ 198 uint64_t resv_2; 199 rdc_rcr_qlen_t rcrstat_a; /* 8 bytes */ 200 uint64_t resv_3; 201 } rxdma_mailbox_t, *p_rxdma_mailbox_t; 202 203 /* 204 * hardware workarounds: kick 16 (was 8 before) 205 */ 206 #define HXGE_RXDMA_POST_BATCH 16 207 208 #define RXBUF_START_ADDR(a, index, bsize) ((a & (index * bsize)) 209 #define RXBUF_OFFSET_FROM_START(a, start) (start - a) 210 #define RXBUF_64B_ALIGNED 64 211 212 #define HXGE_RXBUF_EXTRA 34 213 214 /* 215 * Receive buffer thresholds and buffer types 216 */ 217 #define HXGE_RX_BCOPY_SCALE 8 /* use 1/8 as lowest granularity */ 218 219 typedef enum { 220 HXGE_RX_COPY_ALL = 0, /* do bcopy on every packet */ 221 HXGE_RX_COPY_1, /* bcopy on 1/8 of buffer posted */ 222 HXGE_RX_COPY_2, /* bcopy on 2/8 of buffer posted */ 223 HXGE_RX_COPY_3, /* bcopy on 3/8 of buffer posted */ 224 HXGE_RX_COPY_4, /* bcopy on 4/8 of buffer posted */ 225 HXGE_RX_COPY_5, /* bcopy on 5/8 of buffer posted */ 226 HXGE_RX_COPY_6, /* bcopy on 6/8 of buffer posted */ 227 HXGE_RX_COPY_7, /* bcopy on 7/8 of buffer posted */ 228 HXGE_RX_COPY_NONE /* don't do bcopy at all */ 229 } hxge_rxbuf_threshold_t; 230 231 typedef enum { 232 HXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0, /* bcopy buffer size 0 (small) */ 233 HXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1, /* bcopy buffer size 1 (medium) */ 234 HXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2 /* bcopy buffer size 2 (large) */ 235 } hxge_rxbuf_type_t; 236 237 typedef struct _rdc_errlog { 238 rdc_pref_par_log_t pre_par; 239 rdc_pref_par_log_t sha_par; 240 uint8_t compl_err_type; 241 } rdc_errlog_t; 242 243 /* 244 * Receive Statistics. 245 */ 246 typedef struct _hxge_rx_ring_stats_t { 247 uint64_t ipackets; 248 uint64_t ibytes; 249 uint32_t ierrors; 250 uint32_t jumbo_pkts; 251 252 /* 253 * Error event stats. 254 */ 255 uint32_t rcr_unknown_err; 256 uint32_t ctrl_fifo_ecc_err; 257 uint32_t data_fifo_ecc_err; 258 uint32_t rbr_tmout; /* rbr_cpl_to */ 259 uint32_t peu_resp_err; /* peu_resp_err */ 260 uint32_t rcr_sha_par; /* rcr_shadow_par_err */ 261 uint32_t rbr_pre_par; /* rbr_prefetch_par_err */ 262 uint32_t rbr_pre_empty; /* rbr_pre_empty */ 263 uint32_t rcr_shadow_full; /* rcr_shadow_full */ 264 uint32_t rcrfull; /* rcr_full */ 265 uint32_t rbr_empty; /* rbr_empty */ 266 uint32_t rbr_empty_fail; /* rbr_empty_fail */ 267 uint32_t rbr_empty_restore; /* rbr_empty_restore */ 268 uint32_t rbrfull; /* rbr_full */ 269 /* 270 * RCR invalids: when processing RCR entries, can 271 * run into invalid RCR entries. This counter provides 272 * a means to account for invalid RCR entries. 273 */ 274 uint32_t rcr_invalids; /* rcr invalids */ 275 uint32_t rcr_to; /* rcr_to */ 276 uint32_t rcr_thres; /* rcr_thres */ 277 /* Packets dropped in order to prevent rbr_empty condition */ 278 uint32_t pkt_drop; 279 rdc_errlog_t errlog; 280 } hxge_rx_ring_stats_t, *p_hxge_rx_ring_stats_t; 281 282 typedef struct _hxge_rdc_sys_stats { 283 uint32_t ctrl_fifo_sec; 284 uint32_t ctrl_fifo_ded; 285 uint32_t data_fifo_sec; 286 uint32_t data_fifo_ded; 287 } hxge_rdc_sys_stats_t, *p_hxge_rdc_sys_stats_t; 288 289 typedef struct _rx_msg_t { 290 hxge_os_dma_common_t buf_dma; 291 hxge_os_mutex_t lock; 292 struct _hxge_t *hxgep; 293 struct _rx_rbr_ring_t *rx_rbr_p; 294 boolean_t free; 295 uint32_t ref_cnt; 296 hxge_os_frtn_t freeb; 297 size_t block_size; 298 uint32_t block_index; 299 uint32_t pkt_buf_size; 300 uint32_t pkt_buf_size_code; 301 uint32_t cur_usage_cnt; 302 uint32_t max_usage_cnt; 303 uchar_t *buffer; 304 uint32_t pri; 305 uint32_t shifted_addr; 306 boolean_t use_buf_pool; 307 p_mblk_t rx_mblk_p; 308 boolean_t rx_use_bcopy; 309 } rx_msg_t, *p_rx_msg_t; 310 311 /* Receive Completion Ring */ 312 typedef struct _rx_rcr_ring_t { 313 hxge_os_dma_common_t rcr_desc; 314 struct _hxge_t *hxgep; 315 mac_ring_handle_t rcr_mac_handle; 316 uint64_t rcr_gen_num; 317 boolean_t poll_flag; 318 p_hxge_ldv_t ldvp; 319 p_hxge_ldg_t ldgp; 320 321 p_hxge_rx_ring_stats_t rdc_stats; /* pointer to real kstats */ 322 323 rdc_rcr_cfg_a_t rcr_cfga; 324 rdc_rcr_cfg_b_t rcr_cfgb; 325 326 hxge_os_mutex_t lock; 327 uint16_t index; 328 uint16_t rdc; 329 boolean_t full_hdr_flag; /* 1: 18 bytes header */ 330 uint16_t sw_priv_hdr_len; /* 0 - 192 bytes (SW) */ 331 uint32_t comp_size; /* # of RCR entries */ 332 uint64_t rcr_addr; 333 uint_t comp_wrap_mask; 334 uint_t comp_rd_index; 335 uint_t comp_wt_index; 336 337 p_rcr_entry_t rcr_desc_first_p; 338 p_rcr_entry_t rcr_desc_first_pp; 339 p_rcr_entry_t rcr_desc_last_p; 340 p_rcr_entry_t rcr_desc_last_pp; 341 342 p_rcr_entry_t rcr_desc_rd_head_p; /* software next read */ 343 p_rcr_entry_t rcr_desc_rd_head_pp; 344 uint64_t rcr_tail_begin; 345 346 struct _rx_rbr_ring_t *rx_rbr_p; 347 uint32_t intr_timeout; 348 uint32_t intr_threshold; 349 uint32_t rcvd_pkt_bytes; /* Received bytes of a packet */ 350 } rx_rcr_ring_t, *p_rx_rcr_ring_t; 351 352 353 /* Buffer index information */ 354 typedef struct _rxbuf_index_info_t { 355 uint32_t buf_index; 356 uint32_t start_index; 357 uint32_t buf_size; 358 uint64_t dvma_addr; 359 uint64_t kaddr; 360 } rxbuf_index_info_t, *p_rxbuf_index_info_t; 361 362 /* Buffer index information */ 363 364 typedef struct _rxring_info_t { 365 uint32_t hint[3]; 366 uint32_t block_size_mask; 367 uint16_t max_iterations; 368 rxbuf_index_info_t buffer[HXGE_DMA_BLOCK]; 369 } rxring_info_t, *p_rxring_info_t; 370 371 372 typedef enum { 373 RBR_POSTING = 1, /* We may post rx buffers. */ 374 RBR_UNMAPPING, /* We are in the process of unmapping. */ 375 RBR_UNMAPPED /* The ring is unmapped. */ 376 } rbr_state_t; 377 378 379 /* Receive Buffer Block Ring */ 380 typedef struct _rx_rbr_ring_t { 381 hxge_os_dma_common_t rbr_desc; 382 p_rx_msg_t *rx_msg_ring; 383 p_hxge_dma_common_t *dma_bufp; 384 rdc_rbr_cfg_a_t rbr_cfga; 385 rdc_rbr_cfg_b_t rbr_cfgb; 386 rdc_rbr_kick_t rbr_kick; 387 rdc_page_handle_t page_hdl; 388 389 hxge_os_mutex_t lock; 390 hxge_os_mutex_t post_lock; 391 boolean_t rbr_is_empty; 392 uint32_t rbr_used; 393 uint16_t index; 394 struct _hxge_t *hxgep; 395 uint16_t rdc; 396 uint_t rbr_max_size; 397 uint64_t rbr_addr; 398 uint_t rbr_wrap_mask; 399 uint_t rbb_max; 400 uint_t block_size; 401 uint_t num_blocks; 402 uint_t tnblocks; 403 uint_t pkt_buf_size0; 404 uint_t pkt_buf_size0_bytes; 405 uint_t hpi_pkt_buf_size0; 406 uint_t pkt_buf_size1; 407 uint_t pkt_buf_size1_bytes; 408 uint_t hpi_pkt_buf_size1; 409 uint_t pkt_buf_size2; 410 uint_t pkt_buf_size2_bytes; 411 uint_t hpi_pkt_buf_size2; 412 413 uint64_t rbr_head_pp; 414 uint64_t rbr_tail_pp; 415 uint32_t *rbr_desc_vp; 416 417 p_rx_rcr_ring_t rx_rcr_p; 418 419 rdc_rbr_head_t rbr_head; 420 uint_t rbr_wr_index; 421 uint_t rbr_rd_index; 422 uint_t rbr_hw_head_index; 423 uint64_t rbr_hw_head_ptr; 424 425 rxring_info_t *ring_info; 426 uint_t rbr_consumed; 427 uint_t rbr_threshold_hi; 428 uint_t rbr_threshold_lo; 429 hxge_rxbuf_type_t rbr_bufsize_type; 430 boolean_t rbr_use_bcopy; 431 432 /* 433 * <rbr_ref_cnt> is a count of those receive buffers which 434 * have been loaned to the kernel. We will not free this 435 * ring until the reference count reaches zero (0). 436 */ 437 uint32_t rbr_ref_cnt; 438 rbr_state_t rbr_state; /* POSTING, etc */ 439 } rx_rbr_ring_t, *p_rx_rbr_ring_t; 440 441 /* Receive Mailbox */ 442 typedef struct _rx_mbox_t { 443 hxge_os_dma_common_t rx_mbox; 444 rdc_rx_cfg1_t rx_cfg1; 445 rdc_rx_cfg2_t rx_cfg2; 446 uint64_t mbox_addr; 447 boolean_t cfg_set; 448 449 hxge_os_mutex_t lock; 450 uint16_t index; 451 struct _hxge_t *hxgep; 452 uint16_t rdc; 453 } rx_mbox_t, *p_rx_mbox_t; 454 455 typedef struct _rx_rbr_rings_t { 456 p_rx_rbr_ring_t *rbr_rings; 457 uint32_t ndmas; 458 boolean_t rxbuf_allocated; 459 } rx_rbr_rings_t, *p_rx_rbr_rings_t; 460 461 typedef struct _rx_rcr_rings_t { 462 p_rx_rcr_ring_t *rcr_rings; 463 uint32_t ndmas; 464 boolean_t cntl_buf_allocated; 465 } rx_rcr_rings_t, *p_rx_rcr_rings_t; 466 467 typedef struct _rx_mbox_areas_t { 468 p_rx_mbox_t *rxmbox_areas; 469 uint32_t ndmas; 470 boolean_t mbox_allocated; 471 } rx_mbox_areas_t, *p_rx_mbox_areas_t; 472 473 /* 474 * Receive DMA Prototypes. 475 */ 476 hxge_status_t hxge_init_rxdma_channels(p_hxge_t hxgep); 477 void hxge_uninit_rxdma_channels(p_hxge_t hxgep); 478 hxge_status_t hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, 479 uint16_t channel, rdc_stat_t *cs_p); 480 hxge_status_t hxge_enable_rxdma_channel(p_hxge_t hxgep, 481 uint16_t channel, p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, 482 p_rx_mbox_t mbox_p, int n_init_kick); 483 hxge_status_t hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable); 484 int hxge_rxdma_get_ring_index(p_hxge_t hxgep, uint16_t channel); 485 hxge_status_t hxge_rxdma_handle_sys_errors(p_hxge_t hxgep); 486 487 extern int hxge_enable_poll(void *arg); 488 extern int hxge_disable_poll(void *arg); 489 extern mblk_t *hxge_rx_poll(void *arg, int bytes_to_read); 490 491 492 #ifdef __cplusplus 493 } 494 #endif 495 496 #endif /* _SYS_HXGE_HXGE_RXDMA_H */ 497