1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 #ifndef _SYS_IB_IBTL_IBTL_TYPES_H 26 #define _SYS_IB_IBTL_IBTL_TYPES_H 27 28 /* 29 * ibtl_types.h 30 * 31 * All common IBTL defined types. These are common data types 32 * that are shared by the IBTI and IBCI interfaces, it is only included 33 * by ibti.h and ibci.h 34 */ 35 #include <sys/ddi.h> 36 #include <sys/sunddi.h> 37 #include <sys/ib/ib_types.h> 38 #include <sys/ib/ibtl/ibtl_status.h> 39 #include <sys/socket.h> 40 #include <sys/byteorder.h> 41 42 43 #ifdef __cplusplus 44 extern "C" { 45 #endif 46 47 /* 48 * Endian Macros 49 * h2b - host endian to big endian protocol 50 * b2h - big endian protocol to host endian 51 * h2l - host endian to little endian protocol 52 * l2h - little endian protocol to host endian 53 */ 54 #if defined(_LITTLE_ENDIAN) 55 #define h2b16(x) (htons(x)) 56 #define h2b32(x) (htonl(x)) 57 #define h2b64(x) (htonll(x)) 58 #define b2h16(x) (ntohs(x)) 59 #define b2h32(x) (ntohl(x)) 60 #define b2h64(x) (htonll(x)) 61 62 #define h2l16(x) (x) 63 #define h2l32(x) (x) 64 #define h2l64(x) (x) 65 #define l2h16(x) (x) 66 #define l2h32(x) (x) 67 #define l2h64(x) (x) 68 69 #elif defined(_BIG_ENDIAN) 70 #define h2b16(x) (x) 71 #define h2b32(x) (x) 72 #define h2b64(x) (x) 73 #define b2h16(x) (x) 74 #define b2h32(x) (x) 75 #define b2h64(x) (x) 76 77 #define h2l16(x) (ddi_swap16(x)) 78 #define h2l32(x) (ddi_swap32(x)) 79 #define h2l64(x) (ddi_swap64(x)) 80 #define l2h16(x) (ddi_swap16(x)) 81 #define l2h32(x) (ddi_swap32(x)) 82 #define l2h64(x) (ddi_swap64(x)) 83 84 #else 85 #error "what endian is this machine?" 86 #endif 87 88 /* 89 * Define Internal IBTL handles 90 */ 91 typedef struct ibtl_clnt_s *ibt_clnt_hdl_t; /* ibt_attach() */ 92 typedef struct ibtl_hca_s *ibt_hca_hdl_t; /* ibt_open_hca() */ 93 typedef struct ibtl_channel_s *ibt_channel_hdl_t; /* alloc_rc|ud_channel() */ 94 typedef struct ibtl_srq_s *ibt_srq_hdl_t; /* ibt_alloc_srq() */ 95 typedef struct ibtl_cq_s *ibt_cq_hdl_t; /* ibt_alloc_cq() */ 96 typedef struct ibcm_svc_info_s *ibt_srv_hdl_t; /* ibt_register_service() */ 97 typedef struct ibcm_svc_bind_s *ibt_sbind_hdl_t; /* ibt_bind_service() */ 98 99 typedef struct ibc_fmr_pool_s *ibt_fmr_pool_hdl_t; /* ibt_create_fmr_pool() */ 100 typedef struct ibc_ma_s *ibt_ma_hdl_t; /* ibt_map_mem_area() */ 101 typedef struct ibc_pd_s *ibt_pd_hdl_t; /* ibt_alloc_pd() */ 102 typedef struct ibc_sched_s *ibt_sched_hdl_t; /* ibt_alloc_cq_sched() */ 103 typedef struct ibc_mr_s *ibt_mr_hdl_t; /* ibt_register_mr() */ 104 typedef struct ibc_mw_s *ibt_mw_hdl_t; /* ibt_alloc_mw() */ 105 typedef struct ibt_ud_dest_s *ibt_ud_dest_hdl_t; /* UD dest handle */ 106 typedef struct ibc_ah_s *ibt_ah_hdl_t; /* ibt_alloc_ah() */ 107 typedef struct ibtl_eec_s *ibt_eec_hdl_t; 108 typedef struct ibt_rd_dest_s *ibt_rd_dest_hdl_t; /* Reserved for */ 109 /* Future use */ 110 typedef struct ibc_mem_alloc_s *ibt_mem_alloc_hdl_t; /* ibt_alloc_io_mem() */ 111 typedef struct ibc_mi_s *ibt_mi_hdl_t; /* ibt_map_mem_iov() */ 112 113 /* 114 * Some General Types. 115 */ 116 typedef uint32_t ibt_lkey_t; /* L_Key */ 117 typedef uint32_t ibt_rkey_t; /* R_Key */ 118 typedef uint64_t ibt_wrid_t; /* Client assigned WR ID */ 119 typedef uint32_t ibt_immed_t; /* WR Immediate Data */ 120 typedef uint64_t ibt_atom_arg_t; /* WR Atomic Operation arg */ 121 typedef uint_t ibt_cq_handler_id_t; /* Event handler ID */ 122 123 /* 124 * IBT selector type, used when looking up/requesting either an 125 * MTU, Pkt lifetime, or Static rate. 126 * The interpretation of IBT_BEST depends on the attribute being selected. 127 */ 128 typedef enum ibt_selector_e { 129 IBT_GT = 0, /* Greater than */ 130 IBT_LT = 1, /* Less than */ 131 IBT_EQU = 2, /* Equal to */ 132 IBT_BEST = 3 /* Best */ 133 } ibt_selector_t; 134 135 136 /* 137 * Static rate definitions. 138 */ 139 typedef enum ibt_srate_e { 140 IBT_SRATE_NOT_SPECIFIED = 0, 141 IBT_SRATE_2 = 2, /* 1X SDR i.e 2.5 Gbps */ 142 IBT_SRATE_10 = 3, /* 4X SDR or 1X QDR i.e 10 Gbps */ 143 IBT_SRATE_30 = 4, /* 12X SDR i.e 30 Gbps */ 144 145 IBT_SRATE_5 = 5, /* 1X DDR i.e 5 Gbps */ 146 IBT_SRATE_20 = 6, /* 4X DDR or 8X SDR i.e 20 Gbps */ 147 IBT_SRATE_40 = 7, /* 8X DDR or 4X QDR i.e 40 Gbps */ 148 149 IBT_SRATE_60 = 8, /* 12X DDR i.e 60 Gbps */ 150 IBT_SRATE_80 = 9, /* 8X QDR i.e 80 Gbps */ 151 IBT_SRATE_120 = 10 /* 12X QDR i.e 120 Gbps */ 152 } ibt_srate_t; 153 154 /* retain old definition to be compatible with older bits. */ 155 #define IBT_SRATE_1X IBT_SRATE_2 156 #define IBT_SRATE_4X IBT_SRATE_10 157 #define IBT_SRATE_12X IBT_SRATE_30 158 159 /* 160 * Static rate request type. 161 */ 162 typedef struct ibt_srate_req_s { 163 ibt_srate_t r_srate; /* Requested srate */ 164 ibt_selector_t r_selector; /* Qualifier for r_srate */ 165 } ibt_srate_req_t; 166 167 /* 168 * Packet Life Time Request Type. 169 */ 170 typedef struct ibt_pkt_lt_req_s { 171 clock_t p_pkt_lt; /* Requested Packet Life Time */ 172 ibt_selector_t p_selector; /* Qualifier for p_pkt_lt */ 173 } ibt_pkt_lt_req_t; 174 175 /* 176 * Queue size struct. 177 */ 178 typedef struct ibt_queue_sizes_s { 179 uint_t qs_sq; /* SendQ size. */ 180 uint_t qs_rq; /* RecvQ size. */ 181 } ibt_queue_sizes_t; 182 183 /* 184 * Channel sizes struct, used by functions that allocate/query RC or UD 185 * channels. 186 */ 187 typedef struct ibt_chan_sizes_s { 188 uint_t cs_sq; /* SendQ size. */ 189 uint_t cs_rq; /* ReceiveQ size. */ 190 uint_t cs_sq_sgl; /* Max SGL elements in a SQ WR. */ 191 uint_t cs_rq_sgl; /* Max SGL elements in a RQ Wr. */ 192 uint_t cs_inline; /* max inline payload size */ 193 } ibt_chan_sizes_t; 194 195 /* 196 * Shared Queue size struct. 197 */ 198 typedef struct ibt_srq_sizes_s { 199 uint_t srq_wr_sz; 200 uint_t srq_sgl_sz; 201 } ibt_srq_sizes_t; 202 203 /* 204 * SRQ Modify Flags 205 */ 206 typedef enum ibt_srq_modify_flags_e { 207 IBT_SRQ_SET_NOTHING = 0, 208 IBT_SRQ_SET_SIZE = (1 << 1), 209 IBT_SRQ_SET_LIMIT = (1 << 2) 210 } ibt_srq_modify_flags_t; 211 212 213 /* 214 * Execution flags, indicates if the function should block or not. 215 * Note: in some cases, e.g., a NULL rc_cm_handler, IBT_NONBLOCKING 216 * will not have an effect, and the thread will block. 217 * IBT_NOCALLBACKS is valid for ibt_close_rc_channel only. 218 */ 219 typedef enum ibt_execution_mode_e { 220 IBT_BLOCKING = 0, /* Block */ 221 IBT_NONBLOCKING = 1, /* Return as soon as possible */ 222 IBT_NOCALLBACKS = 2 /* cm_handler is not invoked after */ 223 /* ibt_close_rc_channel returns */ 224 } ibt_execution_mode_t; 225 226 /* 227 * Memory window alloc flags 228 */ 229 typedef enum ibt_mw_flags_e { 230 IBT_MW_SLEEP = 0, /* Can block */ 231 IBT_MW_NOSLEEP = (1 << 0), /* Can't block */ 232 IBT_MW_USER_MAP = (1 << 1), 233 IBT_MW_DEFER_ALLOC = (1 << 2), 234 IBT_MW_TYPE_1 = (1 << 3), 235 IBT_MW_TYPE_2 = (1 << 4) 236 } ibt_mw_flags_t; 237 238 /* 239 * PD alloc flags 240 */ 241 typedef enum ibt_pd_flags_e { 242 IBT_PD_NO_FLAGS = 0, 243 IBT_PD_USER_MAP = (1 << 0), 244 IBT_PD_DEFER_ALLOC = (1 << 1) 245 } ibt_pd_flags_t; 246 247 /* 248 * UD Dest alloc flags 249 */ 250 typedef enum ibt_ud_dest_flags_e { 251 IBT_UD_DEST_NO_FLAGS = 0, 252 IBT_UD_DEST_USER_MAP = (1 << 0), 253 IBT_UD_DEST_DEFER_ALLOC = (1 << 1) 254 } ibt_ud_dest_flags_t; 255 256 /* 257 * SRQ alloc flags 258 */ 259 typedef enum ibt_srq_flags_e { 260 IBT_SRQ_NO_FLAGS = 0, 261 IBT_SRQ_USER_MAP = (1 << 0), 262 IBT_SRQ_DEFER_ALLOC = (1 << 1) 263 } ibt_srq_flags_t; 264 265 /* 266 * ibt_alloc_lkey() alloc flags 267 */ 268 typedef enum ibt_lkey_flags_e { 269 IBT_KEY_SLEEP = 0, 270 IBT_KEY_NOSLEEP = (1 << 0), 271 IBT_KEY_REMOTE = (1 << 1) 272 } ibt_lkey_flags_t; 273 274 /* 275 * RNR NAK retry counts. 276 */ 277 typedef enum ibt_rnr_retry_cnt_e { 278 IBT_RNR_NO_RETRY = 0x0, /* Don't retry, fail on first timeout */ 279 IBT_RNR_RETRY_1 = 0x1, /* Retry once */ 280 IBT_RNR_RETRY_2 = 0x2, /* Retry twice */ 281 IBT_RNR_RETRY_3 = 0x3, /* Retry three times */ 282 IBT_RNR_RETRY_4 = 0x4, /* Retry four times */ 283 IBT_RNR_RETRY_5 = 0x5, /* Retry five times */ 284 IBT_RNR_RETRY_6 = 0x6, /* Retry six times */ 285 IBT_RNR_INFINITE_RETRY = 0x7 /* Retry forever */ 286 } ibt_rnr_retry_cnt_t; 287 288 /* 289 * Valid values for RNR NAK timer fields, part of a channel's context. 290 */ 291 typedef enum ibt_rnr_nak_time_e { 292 IBT_RNR_NAK_655ms = 0x0, 293 IBT_RNR_NAK_10us = 0x1, 294 IBT_RNR_NAK_20us = 0x2, 295 IBT_RNR_NAK_30us = 0x3, 296 IBT_RNR_NAK_40us = 0x4, 297 IBT_RNR_NAK_60us = 0x5, 298 IBT_RNR_NAK_80us = 0x6, 299 IBT_RNR_NAK_120us = 0x7, 300 IBT_RNR_NAK_160us = 0x8, 301 IBT_RNR_NAK_240us = 0x9, 302 IBT_RNR_NAK_320us = 0xA, 303 IBT_RNR_NAK_480us = 0xB, 304 IBT_RNR_NAK_640us = 0xC, 305 IBT_RNR_NAK_960us = 0xD, 306 IBT_RNR_NAK_1280us = 0xE, 307 IBT_RNR_NAK_1920us = 0xF, 308 IBT_RNR_NAK_2560us = 0x10, 309 IBT_RNR_NAK_3840us = 0x11, 310 IBT_RNR_NAK_5120us = 0x12, 311 IBT_RNR_NAK_7680us = 0x13, 312 IBT_RNR_NAK_10ms = 0x14, 313 IBT_RNR_NAK_15ms = 0x15, 314 IBT_RNR_NAK_20ms = 0x16, 315 IBT_RNR_NAK_31ms = 0x17, 316 IBT_RNR_NAK_41ms = 0x18, 317 IBT_RNR_NAK_61ms = 0x19, 318 IBT_RNR_NAK_82ms = 0x1A, 319 IBT_RNR_NAK_123ms = 0x1B, 320 IBT_RNR_NAK_164ms = 0x1C, 321 IBT_RNR_NAK_246ms = 0x1D, 322 IBT_RNR_NAK_328ms = 0x1E, 323 IBT_RNR_NAK_492ms = 0x1F 324 } ibt_rnr_nak_time_t; 325 326 /* 327 * The definition of HCA capabilities etc as a bitfield. 328 */ 329 typedef enum ibt_hca_flags_e { 330 IBT_HCA_NO_FLAGS = 0, 331 332 IBT_HCA_RD = 1 << 0, 333 IBT_HCA_UD_MULTICAST = 1 << 1, 334 IBT_HCA_RAW_MULTICAST = 1 << 2, 335 336 IBT_HCA_ATOMICS_HCA = 1 << 3, 337 IBT_HCA_ATOMICS_GLOBAL = 1 << 4, 338 339 IBT_HCA_RESIZE_CHAN = 1 << 5, /* Is resize supported? */ 340 IBT_HCA_AUTO_PATH_MIG = 1 << 6, /* Is APM supported? */ 341 IBT_HCA_SQD_SQD_PORT = 1 << 7, /* Can change physical port */ 342 /* on transit from SQD to SQD */ 343 IBT_HCA_PKEY_CNTR = 1 << 8, 344 IBT_HCA_QKEY_CNTR = 1 << 9, 345 IBT_HCA_AH_PORT_CHECK = 1 << 10, /* HCA checks AH port match */ 346 /* in UD WRs */ 347 IBT_HCA_PORT_UP = 1 << 11, /* PortActive event supported */ 348 IBT_HCA_INIT_TYPE = 1 << 12, /* InitType supported */ 349 IBT_HCA_SI_GUID = 1 << 13, /* System Image GUID */ 350 /* supported */ 351 IBT_HCA_SHUTDOWN_PORT = 1 << 14, /* ShutdownPort supported */ 352 IBT_HCA_RNR_NAK = 1 << 15, /* RNR-NAK supported for RC */ 353 IBT_HCA_CURRENT_QP_STATE = 1 << 16, /* Does modify_qp support */ 354 /* checking of current state? */ 355 IBT_HCA_SRQ = 1 << 17, /* Shared Receive Queue (RC) */ 356 IBT_HCA_RC_SRQ = IBT_HCA_SRQ, 357 IBT_HCA_RESIZE_SRQ = 1 << 18, /* Is resize SRQ supported? */ 358 IBT_HCA_UD_SRQ = 1 << 19, /* UD with SRQ */ 359 360 IBT_HCA_MULT_PAGE_SZ_MR = 1 << 20, /* Support of multiple page */ 361 /* sizes per memory region? */ 362 IBT_HCA_BLOCK_LIST = 1 << 21, /* Block list physical buffer */ 363 /* lists supported? */ 364 IBT_HCA_ZERO_BASED_VA = 1 << 22, /* Zero Based Virtual */ 365 /* Addresses supported? */ 366 IBT_HCA_LOCAL_INVAL_FENCE = 1 << 23, /* Local invalidate fencing? */ 367 IBT_HCA_BASE_QUEUE_MGT = 1 << 24, /* Base Queue Mgt supported? */ 368 IBT_HCA_CKSUM_FULL = 1 << 25, /* Checksum offload supported */ 369 IBT_HCA_MEM_WIN_TYPE_2B = 1 << 26, /* Type 2B memory windows */ 370 IBT_HCA_PHYS_BUF_BLOCK = 1 << 27, /* Block mode phys buf lists */ 371 IBT_HCA_FMR = 1 << 28, /* FMR Support */ 372 IBT_HCA_WQE_SIZE_INFO = 1 << 29, /* detailed WQE size info */ 373 IBT_HCA_SQD_STATE = 1 << 30 /* SQD QP state */ 374 } ibt_hca_flags_t; 375 376 typedef enum ibt_hca_flags2_e { 377 IBT_HCA2_NO_FLAGS = 0, 378 379 IBT_HCA2_UC = 1 << 1, /* Unreliable Connected */ 380 IBT_HCA2_UC_SRQ = 1 << 2, /* UC with SRQ */ 381 IBT_HCA2_RES_LKEY = 1 << 3, /* Reserved L_Key */ 382 IBT_HCA2_PORT_CHANGE = 1 << 4, /* Port Change event */ 383 IBT_HCA2_IP_CLASS = 1 << 5, /* IP Classification flags */ 384 IBT_HCA2_RSS_TPL_ALG = 1 << 6, /* RSS: Toeplitz algorithm */ 385 IBT_HCA2_RSS_XOR_ALG = 1 << 7, /* RSS: XOR algorithm */ 386 IBT_HCA2_XRC = 1 << 8, /* Extended RC (XRC) */ 387 IBT_HCA2_XRC_SRQ_RESIZE = 1 << 9, /* resize XRC SRQ */ 388 IBT_HCA2_MEM_MGT_EXT = 1 << 10, /* FMR-WR, send-inv, local-inv */ 389 IBT_HCA2_DMA_MR = 1 << 11, /* DMA MR */ 390 IBT_HCA2_FC = 1 << 12 /* FCoIB or FCoE offload */ 391 } ibt_hca_flags2_t; 392 393 /* 394 * The definition of HCA page size capabilities as a bitfield 395 */ 396 typedef enum ibt_page_sizes_e { 397 IBT_PAGE_4K = 0x1 << 2, 398 IBT_PAGE_8K = 0x1 << 3, 399 IBT_PAGE_16K = 0x1 << 4, 400 IBT_PAGE_32K = 0x1 << 5, 401 IBT_PAGE_64K = 0x1 << 6, 402 IBT_PAGE_128K = 0x1 << 7, 403 IBT_PAGE_256K = 0x1 << 8, 404 IBT_PAGE_512K = 0x1 << 9, 405 IBT_PAGE_1M = 0x1 << 10, 406 IBT_PAGE_2M = 0x1 << 11, 407 IBT_PAGE_4M = 0x1 << 12, 408 IBT_PAGE_8M = 0x1 << 13, 409 IBT_PAGE_16M = 0x1 << 14, 410 IBT_PAGE_32M = 0x1 << 15, 411 IBT_PAGE_64M = 0x1 << 16, 412 IBT_PAGE_128M = 0x1 << 17, 413 IBT_PAGE_256M = 0x1 << 18, 414 IBT_PAGE_512M = 0x1 << 19, 415 IBT_PAGE_1G = 0x1 << 20, 416 IBT_PAGE_2G = 0x1 << 21, 417 IBT_PAGE_4G = 0x1 << 22, 418 IBT_PAGE_8G = 0x1 << 23, 419 IBT_PAGE_16G = 0x1 << 24 420 } ibt_page_sizes_t; 421 422 /* 423 * Memory Window Type. 424 */ 425 typedef enum ibt_mem_win_type_e { 426 IBT_MEM_WIN_TYPE_NOT_DEFINED = 0, 427 IBT_MEM_WIN_TYPE_1 = (1 << 0), 428 IBT_MEM_WIN_TYPE_2 = (1 << 1) 429 } ibt_mem_win_type_t; 430 431 /* 432 * HCA attributes. 433 * Contains all HCA static attributes. 434 */ 435 typedef struct ibt_hca_attr_s { 436 ibt_hca_flags_t hca_flags; /* HCA capabilities etc */ 437 ibt_hca_flags2_t hca_flags2; 438 439 /* device/version inconsistency w/ NodeInfo and IOControllerProfile */ 440 uint32_t hca_vendor_id:24; /* 24 bit Vendor ID */ 441 uint16_t hca_device_id; 442 uint32_t hca_version_id; 443 444 uint_t hca_max_chans; /* Max Chans supported */ 445 uint_t hca_max_chan_sz; /* Max outstanding WRs on any */ 446 /* channel */ 447 448 uint_t hca_max_sgl; /* Max SGL entries per WR */ 449 450 uint_t hca_max_cq; /* Max num of CQs supported */ 451 uint_t hca_max_cq_sz; /* Max capacity of each CQ */ 452 453 ibt_page_sizes_t hca_page_sz; /* Bit mask of page sizes */ 454 455 uint_t hca_max_memr; /* Max num of HCA mem regions */ 456 ib_memlen_t hca_max_memr_len; /* Largest block, in bytes of */ 457 /* mem that can be registered */ 458 uint_t hca_max_mem_win; /* Max Memory windows in HCA */ 459 460 uint_t hca_max_rsc; /* Max Responder Resources of */ 461 /* this HCA for RDMAR/Atomics */ 462 /* with this HCA as target. */ 463 uint8_t hca_max_rdma_in_chan; /* Max RDMAR/Atomics in per */ 464 /* chan this HCA as target. */ 465 uint8_t hca_max_rdma_out_chan; /* Max RDMA Reads/Atomics out */ 466 /* per channel by this HCA */ 467 uint_t hca_max_ipv6_chan; /* Max IPV6 channels in HCA */ 468 uint_t hca_max_ether_chan; /* Max Ether channels in HCA */ 469 470 uint_t hca_max_mcg_chans; /* Max number of channels */ 471 /* that can join multicast */ 472 /* groups */ 473 uint_t hca_max_mcg; /* Max multicast groups */ 474 uint_t hca_max_chan_per_mcg; /* Max number of channels per */ 475 /* Multicast group in HCA */ 476 477 uint16_t hca_max_partitions; /* Max partitions in HCA */ 478 uint8_t hca_nports; /* Number of physical ports */ 479 ib_guid_t hca_node_guid; /* Node GUID */ 480 481 ib_time_t hca_local_ack_delay; 482 483 uint_t hca_max_port_sgid_tbl_sz; 484 uint16_t hca_max_port_pkey_tbl_sz; 485 uint_t hca_max_pd; /* Max# of Protection Domains */ 486 ib_guid_t hca_si_guid; /* Optional System Image GUID */ 487 uint_t hca_hca_max_ci_priv_sz; 488 uint_t hca_chan_max_ci_priv_sz; 489 uint_t hca_cq_max_ci_priv_sz; 490 uint_t hca_pd_max_ci_priv_sz; 491 uint_t hca_mr_max_ci_priv_sz; 492 uint_t hca_mw_max_ci_priv_sz; 493 uint_t hca_ud_dest_max_ci_priv_sz; 494 uint_t hca_cq_sched_max_ci_priv_sz; 495 uint_t hca_max_ud_dest; 496 uint_t hca_opaque2; 497 uint_t hca_opaque3; 498 uint_t hca_opaque4; 499 uint8_t hca_opaque5; 500 uint8_t hca_opaque6; 501 uint8_t hca_rss_max_log2_table; /* max RSS log2 table size */ 502 uint_t hca_opaque7; 503 uint_t hca_opaque8; 504 uint_t hca_max_srqs; /* Max SRQs supported */ 505 uint_t hca_max_srqs_sz; /* Max outstanding WRs on any */ 506 /* SRQ */ 507 uint_t hca_max_srq_sgl; /* Max SGL entries per SRQ WR */ 508 uint_t hca_max_phys_buf_list_sz; 509 size_t hca_block_sz_lo; /* Range of block sizes */ 510 size_t hca_block_sz_hi; /* supported by the HCA */ 511 uint_t hca_max_cq_handlers; 512 ibt_lkey_t hca_reserved_lkey; /* Reserved L_Key value */ 513 uint_t hca_max_fmrs; /* Max FMR Supported */ 514 uint_t hca_opaque9; 515 516 uint_t hca_max_lso_size; 517 uint_t hca_max_lso_hdr_size; 518 uint_t hca_max_inline_size; 519 520 uint_t hca_max_cq_mod_count; /* CQ notify moderation */ 521 uint_t hca_max_cq_mod_usec; 522 523 uint32_t hca_fw_major_version; /* firmware version */ 524 uint16_t hca_fw_minor_version; 525 uint16_t hca_fw_micro_version; 526 527 uint_t hca_max_xrc_domains; /* XRC items */ 528 uint_t hca_max_xrc_srqs; 529 uint_t hca_max_xrc_srq_size; 530 uint_t hca_max_xrc_srq_sgl; 531 532 /* detailed WQE size info */ 533 uint_t hca_ud_send_inline_sz; /* inline size in bytes */ 534 uint_t hca_conn_send_inline_sz; 535 uint_t hca_conn_rdmaw_inline_overhead; 536 uint_t hca_recv_sgl_sz; /* detailed SGL sizes */ 537 uint_t hca_ud_send_sgl_sz; 538 uint_t hca_conn_send_sgl_sz; 539 uint_t hca_conn_rdma_read_sgl_sz; 540 uint_t hca_conn_rdma_write_sgl_sz; 541 uint_t hca_conn_rdma_sgl_overhead; 542 543 /* FC Support */ 544 uint8_t hca_rfci_max_log2_qp; /* max log2 RFCI QPs */ 545 uint8_t hca_fexch_max_log2_qp; /* max log2 FEXCH QPs */ 546 uint8_t hca_fexch_max_log2_mem; /* max log2 mem per FEXCH */ 547 548 dev_info_t *hca_dip; /* HCA dev_info */ 549 } ibt_hca_attr_t; 550 551 /* 552 * HCA Port link states. 553 */ 554 typedef enum ibt_port_state_e { 555 IBT_PORT_DOWN = 1, 556 IBT_PORT_INIT, 557 IBT_PORT_ARM, 558 IBT_PORT_ACTIVE 559 } ibt_port_state_t; 560 561 /* 562 * HCA Port capabilities as a bitfield. 563 */ 564 typedef enum ibt_port_caps_e { 565 IBT_PORT_CAP_NO_FLAGS = 0, 566 IBT_PORT_CAP_SM = 1 << 0, /* SM port */ 567 IBT_PORT_CAP_SM_DISABLED = 1 << 1, 568 IBT_PORT_CAP_SNMP_TUNNEL = 1 << 2, /* SNMP Tunneling */ 569 IBT_PORT_CAP_DM = 1 << 3, /* DM supported */ 570 IBT_PORT_CAP_VENDOR = 1 << 4, /* Vendor Class */ 571 IBT_PORT_CAP_CLNT_REREG = 1 << 5 /* Client Rereg */ 572 } ibt_port_caps_t; 573 574 575 /* LinkWidth fields from PortInfo */ 576 typedef uint8_t ib_link_width_t; 577 578 /* 579 * When reading LinkWidthSupported and LinkWidthEnabled, these 580 * values will be OR-ed together. See IB spec 14.2.5.6 for allowed 581 * combinations. For LinkWidthActive, only one will be returned. 582 */ 583 #define IBT_LINK_WIDTH_1X (1) 584 #define IBT_LINK_WIDTH_4X (2) 585 #define IBT_LINK_WIDTH_8X (4) 586 #define IBT_LINK_WIDTH_12X (8) 587 588 /* LinkSpeed fields from PortInfo */ 589 typedef uint8_t ib_link_speed_t; 590 591 /* 592 * When reading LinkSpeedSupported and LinkSpeedEnabled, these 593 * values will be OR-ed together. See IB spec 14.2.5.6 for allowed 594 * combinations. For LinkSpeedActive, only one will be returned. 595 */ 596 #define IBT_LINK_SPEED_SDR (1) 597 #define IBT_LINK_SPEED_DDR (2) 598 #define IBT_LINK_SPEED_QDR (4) 599 600 /* PortPhysicalState field from PortInfo */ 601 typedef uint8_t ib_port_phys_state_t; 602 603 #define IBT_PORT_PHYS_STATE_SLEEP (1) 604 #define IBT_PORT_PHYS_STATE_POLLING (2) 605 #define IBT_PORT_PHYS_STATE_DISABLED (3) 606 #define IBT_PORT_PHYS_STATE_TRAINING (4) 607 #define IBT_PORT_PHYS_STATE_UP (5) 608 #define IBT_PORT_PHYS_STATE_RECOVERY (6) 609 #define IBT_PORT_PHYS_STATE_TEST (7) 610 611 /* 612 * HCA port attributes structure definition. The number of ports per HCA 613 * can be found from the "ibt_hca_attr_t" structure. 614 * 615 * p_pkey_tbl is a pointer to an array of ib_pkey_t, members are 616 * accessed as: 617 * hca_portinfo->p_pkey_tbl[i] 618 * 619 * Where 0 <= i < hca_portinfo.p_pkey_tbl_sz 620 * 621 * Similarly p_sgid_tbl is a pointer to an array of ib_gid_t. 622 * 623 * The Query Port function - ibt_query_hca_ports() allocates the memory 624 * required for the ibt_hca_portinfo_t struct as well as the memory 625 * required for the SGID and P_Key tables. The memory is freed by calling 626 * ibt_free_portinfo(). 627 */ 628 typedef struct ibt_hca_portinfo_s { 629 ib_lid_t p_opaque1; /* Base LID of port */ 630 ib_qkey_cntr_t p_qkey_violations; /* Bad Q_Key cnt */ 631 ib_pkey_cntr_t p_pkey_violations; /* Optional bad P_Key cnt */ 632 uint8_t p_sm_sl:4; /* SM Service level */ 633 ib_port_phys_state_t p_phys_state; 634 ib_lid_t p_sm_lid; /* SM LID */ 635 ibt_port_state_t p_linkstate; /* Port state */ 636 uint8_t p_port_num; 637 ib_link_width_t p_width_supported; 638 ib_link_width_t p_width_enabled; 639 ib_link_width_t p_width_active; 640 ib_mtu_t p_mtu; /* Max transfer unit - pkt */ 641 uint8_t p_lmc:3; /* Local mask control */ 642 ib_link_speed_t p_speed_supported; 643 ib_link_speed_t p_speed_enabled; 644 ib_link_speed_t p_speed_active; 645 ib_gid_t *p_sgid_tbl; /* SGID Table */ 646 uint_t p_sgid_tbl_sz; /* Size of SGID table */ 647 uint16_t p_pkey_tbl_sz; /* Size of P_Key table */ 648 uint16_t p_def_pkey_ix; /* default pkey index for TI */ 649 ib_pkey_t *p_pkey_tbl; /* P_Key table */ 650 uint8_t p_max_vl; /* Max num of virtual lanes */ 651 uint8_t p_init_type_reply; /* Optional InitTypeReply */ 652 ib_time_t p_subnet_timeout; /* Max Subnet Timeout */ 653 ibt_port_caps_t p_capabilities; /* Port Capabilities */ 654 uint32_t p_msg_sz; /* Max message size */ 655 } ibt_hca_portinfo_t; 656 657 /* 658 * Modify HCA port attributes flags, specifies which HCA port 659 * attributes to modify. 660 */ 661 typedef enum ibt_port_modify_flags_e { 662 IBT_PORT_NO_FLAGS = 0, 663 664 IBT_PORT_RESET_QKEY = 1 << 0, /* Reset Q_Key violation */ 665 /* counter */ 666 IBT_PORT_RESET_SM = 1 << 1, /* SM */ 667 IBT_PORT_SET_SM = 1 << 2, 668 IBT_PORT_RESET_SNMP = 1 << 3, /* SNMP Tunneling */ 669 IBT_PORT_SET_SNMP = 1 << 4, 670 IBT_PORT_RESET_DEVMGT = 1 << 5, /* Device Management */ 671 IBT_PORT_SET_DEVMGT = 1 << 6, 672 IBT_PORT_RESET_VENDOR = 1 << 7, /* Vendor Class */ 673 IBT_PORT_SET_VENDOR = 1 << 8, 674 IBT_PORT_SHUTDOWN = 1 << 9, /* Shut down the port */ 675 IBT_PORT_SET_INIT_TYPE = 1 << 10 /* InitTypeReply value */ 676 } ibt_port_modify_flags_t; 677 678 /* 679 * Modify HCA port InitType bit definitions, applicable only if 680 * IBT_PORT_SET_INIT_TYPE modify flag (ibt_port_modify_flags_t) is set. 681 */ 682 #define IBT_PINIT_NO_LOAD 0x1 683 #define IBT_PINIT_PRESERVE_CONTENT 0x2 684 #define IBT_PINIT_PRESERVE_PRESENCE 0x4 685 #define IBT_PINIT_NO_RESUSCITATE 0x8 686 687 688 /* 689 * Address vector definition. 690 */ 691 typedef struct ibt_adds_vect_s { 692 ib_gid_t av_dgid; /* IPV6 dest GID in GRH */ 693 ib_gid_t av_sgid; /* SGID */ 694 ibt_srate_t av_srate; /* Max static rate */ 695 uint8_t av_srvl:4; /* Service level in LRH */ 696 uint_t av_flow:20; /* 20 bit Flow Label */ 697 uint8_t av_tclass; /* Traffic Class */ 698 uint8_t av_hop; /* Hop Limit */ 699 uint8_t av_port_num; /* Port number for UD */ 700 boolean_t av_opaque1; 701 ib_lid_t av_opaque2; 702 ib_path_bits_t av_opaque3; 703 uint32_t av_opaque4; 704 } ibt_adds_vect_t; 705 706 typedef struct ibt_cep_path_s { 707 ibt_adds_vect_t cep_adds_vect; /* Address Vector */ 708 uint16_t cep_pkey_ix; /* P_Key Index */ 709 uint8_t cep_hca_port_num; /* Port number for connected */ 710 /* channels. A value of 0 */ 711 /* indicates an invalid path */ 712 ib_time_t cep_cm_opaque1; 713 } ibt_cep_path_t; 714 715 /* 716 * Define Receive Side Scaling types for IP over IB. 717 */ 718 typedef enum ibt_rss_flags_e { 719 IBT_RSS_ALG_TPL = (1 << 0), /* RSS: Toeplitz hash */ 720 IBT_RSS_ALG_XOR = (1 << 1), /* RSS: XOR hash */ 721 IBT_RSS_HASH_IPV4 = (1 << 2), /* RSS: hash IPv4 headers */ 722 IBT_RSS_HASH_IPV6 = (1 << 3), /* RSS: hash IPv6 headers */ 723 IBT_RSS_HASH_TCP_IPV4 = (1 << 4), /* RSS: hash TCP/IPv4 hdrs */ 724 IBT_RSS_HASH_TCP_IPV6 = (1 << 5) /* RSS: hash TCP/IPv6 hdrs */ 725 } ibt_rss_flags_t; 726 727 typedef struct ibt_rss_attr_s { 728 ibt_rss_flags_t rss_flags; /* RSS: flags */ 729 uint_t rss_log2_table; /* RSS: log2 table size */ 730 ib_qpn_t rss_base_qpn; /* RSS: base QPN */ 731 ib_qpn_t rss_def_qpn; /* RSS: default QPN */ 732 uint8_t rss_toe_key[40]; /* RSS: Toeplitz hash key */ 733 } ibt_rss_attr_t; 734 735 /* 736 * Channel Migration State. 737 */ 738 typedef enum ibt_cep_cmstate_e { 739 IBT_STATE_NOT_SUPPORTED = 0, 740 IBT_STATE_MIGRATED = 1, 741 IBT_STATE_REARMED = 2, 742 IBT_STATE_ARMED = 3 743 } ibt_cep_cmstate_t; 744 745 /* 746 * Transport service type 747 * 748 * NOTE: this was converted from an enum to a uint8_t to save space. 749 */ 750 typedef uint8_t ibt_tran_srv_t; 751 752 #define IBT_RC_SRV 0 753 #define IBT_UC_SRV 1 754 #define IBT_RD_SRV 2 755 #define IBT_UD_SRV 3 756 #define IBT_RAWIP_SRV 4 757 #define IBT_RAWETHER_SRV 5 758 #define IBT_RFCI_SRV 6 759 #define IBT_FCMD_SRV 7 760 #define IBT_FEXCH_SRV 8 761 762 /* 763 * Channel (QP/EEC) state definitions. 764 */ 765 typedef enum ibt_cep_state_e { 766 IBT_STATE_RESET = 0, /* Reset */ 767 IBT_STATE_INIT, /* Initialized */ 768 IBT_STATE_RTR, /* Ready to Receive */ 769 IBT_STATE_RTS, /* Ready to Send */ 770 IBT_STATE_SQD, /* Send Queue Drained */ 771 IBT_STATE_SQE, /* Send Queue Error */ 772 IBT_STATE_ERROR, /* Error */ 773 IBT_STATE_SQDRAIN, /* Send Queue Draining */ 774 IBT_STATE_NUM /* Number of states */ 775 } ibt_cep_state_t; 776 777 778 /* 779 * Channel Attribute flags. 780 */ 781 typedef enum ibt_attr_flags_e { 782 IBT_ALL_SIGNALED = 0, /* All sends signaled */ 783 IBT_WR_SIGNALED = 1, /* Signaled on a WR basis */ 784 IBT_FAST_REG_RES_LKEY = (1 << 1), 785 IBT_USES_LSO = (1 << 2) 786 } ibt_attr_flags_t; 787 788 /* 789 * Channel End Point (CEP) Control Flags. 790 */ 791 typedef enum ibt_cep_flags_e { 792 IBT_CEP_NO_FLAGS = 0, /* Enable Nothing */ 793 IBT_CEP_RDMA_RD = (1 << 0), /* Enable incoming RDMA RD's */ 794 /* RC & RD only */ 795 IBT_CEP_RDMA_WR = (1 << 1), /* Enable incoming RDMA WR's */ 796 /* RC & RD only */ 797 IBT_CEP_ATOMIC = (1 << 2) /* Enable incoming Atomics, */ 798 /* RC & RD only */ 799 } ibt_cep_flags_t; 800 801 /* 802 * Channel Modify Flags 803 */ 804 typedef enum ibt_cep_modify_flags_e { 805 IBT_CEP_SET_NOTHING = 0, 806 IBT_CEP_SET_SQ_SIZE = (1 << 1), 807 IBT_CEP_SET_RQ_SIZE = (1 << 2), 808 809 IBT_CEP_SET_RDMA_R = (1 << 3), 810 IBT_CEP_SET_RDMA_W = (1 << 4), 811 IBT_CEP_SET_ATOMIC = (1 << 5), 812 813 IBT_CEP_SET_ALT_PATH = (1 << 6), /* Alternate Path */ 814 815 IBT_CEP_SET_ADDS_VECT = (1 << 7), 816 IBT_CEP_SET_PORT = (1 << 8), 817 IBT_CEP_SET_OPAQUE5 = (1 << 9), 818 IBT_CEP_SET_RETRY = (1 << 10), 819 IBT_CEP_SET_RNR_NAK_RETRY = (1 << 11), 820 IBT_CEP_SET_MIN_RNR_NAK = (1 << 12), 821 822 IBT_CEP_SET_QKEY = (1 << 13), 823 IBT_CEP_SET_RDMARA_OUT = (1 << 14), 824 IBT_CEP_SET_RDMARA_IN = (1 << 15), 825 826 IBT_CEP_SET_OPAQUE1 = (1 << 16), 827 IBT_CEP_SET_OPAQUE2 = (1 << 17), 828 IBT_CEP_SET_OPAQUE3 = (1 << 18), 829 IBT_CEP_SET_OPAQUE4 = (1 << 19), 830 IBT_CEP_SET_SQD_EVENT = (1 << 20), 831 IBT_CEP_SET_OPAQUE6 = (1 << 21), 832 IBT_CEP_SET_OPAQUE7 = (1 << 22), 833 IBT_CEP_SET_OPAQUE8 = (1 << 23), 834 IBT_CEP_SET_RSS = (1 << 24), 835 IBT_CEP_SET_FEXCH_RANGE = (1 << 25) 836 } ibt_cep_modify_flags_t; 837 838 /* 839 * CQ notify types. 840 */ 841 typedef enum ibt_cq_notify_flags_e { 842 IBT_NEXT_COMPLETION = 1, 843 IBT_NEXT_SOLICITED = 2 844 } ibt_cq_notify_flags_t; 845 846 /* 847 * CQ types shared across TI and CI. 848 */ 849 typedef enum ibt_cq_flags_e { 850 IBT_CQ_NO_FLAGS = 0, 851 IBT_CQ_HANDLER_IN_THREAD = 1 << 0, /* A thread calls the */ 852 /* CQ handler */ 853 IBT_CQ_USER_MAP = 1 << 1, 854 IBT_CQ_DEFER_ALLOC = 1 << 2, 855 IBT_CQ_HID = 1 << 3 856 } ibt_cq_flags_t; 857 858 typedef enum ibt_cq_sched_flags_e { 859 IBT_CQS_NO_FLAGS = 0, 860 IBT_CQS_WARM_CACHE = 1 << 0, /* run on same CPU */ 861 IBT_CQS_EXACT_SCHED_GROUP = 1 << 1, 862 IBT_CQS_SCHED_GROUP = 1 << 2, 863 IBT_CQS_USER_MAP = 1 << 3, 864 IBT_CQS_DEFER_ALLOC = 1 << 4 865 } ibt_cq_sched_flags_t; 866 867 /* 868 * Attributes when creating a Completion Queue Scheduling Handle. 869 */ 870 typedef struct ibt_cq_sched_attr_s { 871 ibt_cq_sched_flags_t cqs_flags; 872 char *cqs_pool_name; 873 } ibt_cq_sched_attr_t; 874 875 typedef void *ibt_intr_handle_t; 876 877 typedef struct ibt_cq_handler_attr_s { 878 dev_info_t *cha_dip; 879 ibt_intr_handle_t cha_ih; 880 } ibt_cq_handler_attr_t; 881 882 /* 883 * Attributes when creating a Completion Queue. 884 * 885 * Note: 886 * The IBT_CQ_HANDLER_IN_THREAD cq_flags bit should be ignored by the CI. 887 */ 888 typedef struct ibt_cq_attr_s { 889 uint_t cq_size; 890 ibt_sched_hdl_t cq_sched; /* 0 = no hint, */ 891 /* other = cq_sched value */ 892 ibt_cq_flags_t cq_flags; 893 ibt_cq_handler_id_t cq_hid; 894 } ibt_cq_attr_t; 895 896 /* 897 * Memory Management 898 */ 899 900 /* Memory management flags */ 901 typedef enum ibt_mr_flags_e { 902 IBT_MR_SLEEP = 0, 903 IBT_MR_NOSLEEP = (1 << 1), 904 IBT_MR_NONCOHERENT = (1 << 2), 905 IBT_MR_PHYS_IOVA = (1 << 3), /* ibt_(re)register_buf */ 906 907 /* Access control flags */ 908 IBT_MR_ENABLE_WINDOW_BIND = (1 << 4), 909 IBT_MR_ENABLE_LOCAL_WRITE = (1 << 5), 910 IBT_MR_ENABLE_REMOTE_READ = (1 << 6), 911 IBT_MR_ENABLE_REMOTE_WRITE = (1 << 7), 912 IBT_MR_ENABLE_REMOTE_ATOMIC = (1 << 8), 913 914 /* Reregister flags */ 915 IBT_MR_CHANGE_TRANSLATION = (1 << 9), 916 IBT_MR_CHANGE_ACCESS = (1 << 10), 917 IBT_MR_CHANGE_PD = (1 << 11), 918 919 /* Additional registration flags */ 920 IBT_MR_ZBVA = (1 << 12), 921 922 /* Additional physical registration flags */ 923 IBT_MR_CONSUMER_KEY = (1 << 13), /* Consumer owns key */ 924 /* portion of keys */ 925 IBT_MR_DISABLE_RO = (1 << 14), 926 IBT_MR_USER_BUF = (1 << 15) /* ibt_(re)register_buf */ 927 } ibt_mr_flags_t; 928 929 930 /* Memory Region attribute flags */ 931 typedef enum ibt_mr_attr_flags_e { 932 /* Access control flags */ 933 IBT_MR_WINDOW_BIND = (1 << 0), 934 IBT_MR_LOCAL_WRITE = (1 << 1), 935 IBT_MR_REMOTE_READ = (1 << 2), 936 IBT_MR_REMOTE_WRITE = (1 << 3), 937 IBT_MR_REMOTE_ATOMIC = (1 << 4), 938 IBT_MR_ZERO_BASED_VA = (1 << 5), 939 IBT_MR_CONSUMER_OWNED_KEY = (1 << 6), 940 IBT_MR_SHARED = (1 << 7), 941 IBT_MR_FMR = (1 << 8), 942 IBT_MR_RO_DISABLED = (1 << 9) 943 } ibt_mr_attr_flags_t; 944 945 /* Memory region physical descriptor. */ 946 typedef struct ibt_phys_buf_s { 947 union { 948 uint64_t _p_ll; /* 64 bit DMA address */ 949 uint32_t _p_la[2]; /* 2 x 32 bit address */ 950 } _phys_buf; 951 size_t p_size; 952 } ibt_phys_buf_t; 953 954 /* version of above for uniform buffer size */ 955 typedef struct ib_phys_addr_t { 956 union { 957 uint64_t _p_ll; /* 64 bit DMA address */ 958 uint32_t _p_la[2]; /* 2 x 32 bit address */ 959 } _phys_buf; 960 } ibt_phys_addr_t; 961 962 #define p_laddr _phys_buf._p_ll 963 #ifdef _LONG_LONG_HTOL 964 #define p_notused _phys_buf._p_la[0] 965 #define p_addr _phys_buf._p_la[1] 966 #else 967 #define p_addr _phys_buf._p_la[0] 968 #define p_notused _phys_buf._p_la[1] 969 #endif 970 971 972 /* Memory region descriptor. */ 973 typedef struct ibt_mr_desc_s { 974 ib_vaddr_t md_vaddr; /* IB virtual adds of memory */ 975 ibt_lkey_t md_lkey; 976 ibt_rkey_t md_rkey; 977 boolean_t md_sync_required; 978 } ibt_mr_desc_t; 979 980 /* Physical Memory region descriptor. */ 981 typedef struct ibt_pmr_desc_s { 982 ib_vaddr_t pmd_iova; /* Returned I/O Virtual Address */ 983 ibt_lkey_t pmd_lkey; 984 ibt_rkey_t pmd_rkey; 985 uint_t pmd_phys_buf_list_sz; /* Allocated Phys buf sz */ 986 boolean_t pmd_sync_required; 987 } ibt_pmr_desc_t; 988 989 /* Memory region protection bounds. */ 990 typedef struct ibt_mr_prot_bounds_s { 991 ib_vaddr_t pb_addr; /* Beginning address */ 992 size_t pb_len; /* Length of protected region */ 993 } ibt_mr_prot_bounds_t; 994 995 /* Memory Region (Re)Register attributes */ 996 typedef struct ibt_mr_attr_s { 997 ib_vaddr_t mr_vaddr; /* Virtual address to register */ 998 ib_memlen_t mr_len; /* Length of region to register */ 999 struct as *mr_as; /* A pointer to an address space */ 1000 /* structure. This parameter should */ 1001 /* be set to NULL, which implies */ 1002 /* kernel address space. */ 1003 ibt_mr_flags_t mr_flags; 1004 } ibt_mr_attr_t; 1005 1006 /* Physical Memory Region (Re)Register */ 1007 typedef struct ibt_pmr_attr_s { 1008 ib_vaddr_t pmr_iova; /* I/O virtual address requested by */ 1009 /* client for the first byte of the */ 1010 /* region */ 1011 ib_memlen_t pmr_len; /* Length of region to register */ 1012 ib_memlen_t pmr_offset; /* Offset of the regions starting */ 1013 /* IOVA within the 1st physical */ 1014 /* buffer */ 1015 ibt_ma_hdl_t pmr_ma; /* Memory handle used to obtain the */ 1016 /* pmr_buf_list */ 1017 ibt_phys_addr_t *pmr_addr_list; /* List of physical buffers accessed */ 1018 /* as an array */ 1019 size_t pmr_buf_sz; 1020 uint_t pmr_num_buf; /* Num of entries in the pmr_buf_list */ 1021 ibt_lkey_t pmr_lkey; /* Reregister only */ 1022 ibt_rkey_t pmr_rkey; /* Reregister only */ 1023 ibt_mr_flags_t pmr_flags; 1024 uint8_t pmr_key; /* Key to use on new Lkey & Rkey */ 1025 } ibt_pmr_attr_t; 1026 1027 /* DMA Memory Region */ 1028 typedef struct ibt_dmr_attr_s { 1029 uint64_t dmr_paddr; /* starting physical addr */ 1030 ib_memlen_t dmr_len; /* length in bytes */ 1031 ibt_mr_flags_t dmr_flags; /* no sleep, memory permissions */ 1032 } ibt_dmr_attr_t; 1033 1034 /* addr/length pair */ 1035 typedef struct ibt_iov_s { 1036 caddr_t iov_addr; /* Beginning address */ 1037 size_t iov_len; /* Length */ 1038 } ibt_iov_t; 1039 1040 /* Map memory IOV */ 1041 typedef enum ibt_iov_flags_e { 1042 IBT_IOV_SLEEP = 0, 1043 IBT_IOV_NOSLEEP = (1 << 0), 1044 IBT_IOV_BUF = (1 << 1), 1045 IBT_IOV_RECV = (1 << 2), 1046 IBT_IOV_USER_BUF = (1 << 3), 1047 IBT_IOV_ALT_LKEY = (1 << 4) 1048 } ibt_iov_flags_t; 1049 1050 typedef struct ibt_iov_attr_s { 1051 struct as *iov_as; 1052 ibt_iov_t *iov; 1053 struct buf *iov_buf; 1054 uint32_t iov_list_len; 1055 uint32_t iov_wr_nds; 1056 ib_msglen_t iov_lso_hdr_sz; 1057 ibt_lkey_t iov_alt_lkey; 1058 ibt_iov_flags_t iov_flags; 1059 } ibt_iov_attr_t; 1060 1061 /* 1062 * Memory Region (Re)Register attributes - used by ibt_register_shared_mr(), 1063 * ibt_register_buf() and ibt_reregister_buf(). 1064 */ 1065 typedef struct ibt_smr_attr_s { 1066 ib_vaddr_t mr_vaddr; 1067 ibt_mr_flags_t mr_flags; 1068 uint8_t mr_key; /* Only for physical */ 1069 /* ibt_(Re)register_buf() */ 1070 ibt_lkey_t mr_lkey; /* Only for physical */ 1071 ibt_rkey_t mr_rkey; /* ibt_Reregister_buf() */ 1072 } ibt_smr_attr_t; 1073 1074 /* 1075 * key states. 1076 */ 1077 typedef enum ibt_key_state_e { 1078 IBT_KEY_INVALID = 0, 1079 IBT_KEY_FREE, 1080 IBT_KEY_VALID 1081 } ibt_key_state_t; 1082 1083 /* Memory region query attributes */ 1084 typedef struct ibt_mr_query_attr_s { 1085 ibt_lkey_t mr_lkey; 1086 ibt_rkey_t mr_rkey; 1087 ibt_mr_prot_bounds_t mr_lbounds; /* Actual local CI protection */ 1088 /* bounds */ 1089 ibt_mr_prot_bounds_t mr_rbounds; /* Actual remote CI */ 1090 /* protection bounds */ 1091 ibt_mr_attr_flags_t mr_attr_flags; /* Access rights etc. */ 1092 ibt_pd_hdl_t mr_pd; /* Protection domain */ 1093 boolean_t mr_sync_required; 1094 ibt_key_state_t mr_lkey_state; 1095 uint_t mr_phys_buf_list_sz; 1096 } ibt_mr_query_attr_t; 1097 1098 /* Memory window query attributes */ 1099 typedef struct ibt_mw_query_attr_s { 1100 ibt_pd_hdl_t mw_pd; 1101 ibt_mem_win_type_t mw_type; 1102 ibt_rkey_t mw_rkey; 1103 ibt_key_state_t mw_state; 1104 } ibt_mw_query_attr_t; 1105 1106 1107 /* Memory Region Sync Flags. */ 1108 #define IBT_SYNC_READ 0x1 /* Make memory changes visible to incoming */ 1109 /* RDMA reads */ 1110 1111 #define IBT_SYNC_WRITE 0x2 /* Make the affects of an incoming RDMA write */ 1112 /* visible to the consumer */ 1113 1114 /* Memory region sync args */ 1115 typedef struct ibt_mr_sync_s { 1116 ibt_mr_hdl_t ms_handle; 1117 ib_vaddr_t ms_vaddr; 1118 ib_memlen_t ms_len; 1119 uint32_t ms_flags; /* IBT_SYNC_READ or IBT_SYNC_WRITE */ 1120 } ibt_mr_sync_t; 1121 1122 /* 1123 * Flags for Virtual Address to HCA Physical Address translation. 1124 */ 1125 typedef enum ibt_va_flags_e { 1126 IBT_VA_SLEEP = 0, 1127 IBT_VA_NOSLEEP = (1 << 0), 1128 IBT_VA_NONCOHERENT = (1 << 1), 1129 IBT_VA_FMR = (1 << 2), 1130 IBT_VA_BLOCK_MODE = (1 << 3), 1131 IBT_VA_BUF = (1 << 4), 1132 IBT_VA_REG_FN = (1 << 5), 1133 IBT_VA_USER_BUF = (1 << 6) 1134 } ibt_va_flags_t; 1135 1136 1137 /* Address Translation parameters */ 1138 typedef struct ibt_va_attr_s { 1139 ib_vaddr_t va_vaddr; /* Virtual address to register */ 1140 ib_memlen_t va_len; /* Length of region to register */ 1141 struct as *va_as; /* A pointer to an address space */ 1142 /* structure. */ 1143 size_t va_phys_buf_min; /* block mode only */ 1144 size_t va_phys_buf_max; /* block mode only */ 1145 ibt_va_flags_t va_flags; 1146 struct buf *va_buf; 1147 } ibt_va_attr_t; 1148 1149 1150 /* 1151 * Fast Memory Registration (FMR) support. 1152 */ 1153 1154 /* FMR flush function handler. */ 1155 typedef void (*ibt_fmr_flush_handler_t)(ibt_fmr_pool_hdl_t fmr_pool, 1156 void *fmr_func_arg); 1157 1158 /* FMR Pool create attributes. */ 1159 typedef struct ibt_fmr_pool_attr_s { 1160 uint_t fmr_max_pages_per_fmr; 1161 uint_t fmr_pool_size; 1162 uint_t fmr_dirty_watermark; 1163 size_t fmr_page_sz; 1164 boolean_t fmr_cache; 1165 ibt_mr_flags_t fmr_flags; 1166 ibt_fmr_flush_handler_t fmr_func_hdlr; 1167 void *fmr_func_arg; 1168 } ibt_fmr_pool_attr_t; 1169 1170 /* 1171 * Define types for Fibre Channel over IB (fcoib) 1172 */ 1173 typedef enum ibt_fexch_query_flags_e { 1174 IBT_FEXCH_NO_FLAGS = 0, 1175 IBT_FEXCH_HEART_BEAT_OK = (1 << 0) /* FEXCH only */ 1176 } ibt_fexch_query_flags_t; 1177 1178 typedef struct ibt_fexch_query_attr_s { 1179 ibt_pmr_desc_t fq_uni_mem_desc; /* FEXCH: uni-directional MR attrs */ 1180 ibt_pmr_desc_t fq_bi_mem_desc; /* FEXCH: bi-directional MR attrs */ 1181 ibt_fexch_query_flags_t fq_flags; 1182 } ibt_fexch_query_attr_t; 1183 1184 typedef struct ibt_fc_attr_s { 1185 uint32_t fc_src_id; /* S_ID assigned to the RFCI QP */ 1186 /* FCMD, FEXCH: matching RFCI QP = RFCI base + idx */ 1187 ib_qpn_t fc_rfci_qpn; 1188 uint16_t fc_exch_base_off; /* FCMD: FEXCH usable base */ 1189 uint8_t fc_exch_log2_sz; /* FCMD: FEXCH log2 size */ 1190 uint8_t fc_hca_port; /* RFCI, FEXCH: HCA port number */ 1191 } ibt_fc_attr_t; 1192 1193 1194 /* 1195 * WORK REQUEST AND WORK REQUEST COMPLETION DEFINITIONS. 1196 */ 1197 1198 /* 1199 * Work Request and Work Request Completion types - These types are used 1200 * to indicate the type of work requests posted to a work queue 1201 * or the type of completion received. Immediate Data is indicated via 1202 * ibt_wr_flags_t or ibt_wc_flags_t. 1203 * 1204 * IBT_WRC_RECV and IBT_WRC_RECV_RDMAWI are only used as opcodes in the 1205 * work completions. 1206 * 1207 * NOTE: this was converted from an enum to a uint8_t to save space. 1208 */ 1209 typedef uint8_t ibt_wrc_opcode_t; 1210 1211 #define IBT_WRC_SEND 1 /* Send */ 1212 #define IBT_WRC_RDMAR 2 /* RDMA Read */ 1213 #define IBT_WRC_RDMAW 3 /* RDMA Write */ 1214 #define IBT_WRC_CSWAP 4 /* Compare & Swap Atomic */ 1215 #define IBT_WRC_FADD 5 /* Fetch & Add Atomic */ 1216 #define IBT_WRC_BIND 6 /* Bind Memory Window */ 1217 #define IBT_WRC_RECV 7 /* Receive */ 1218 #define IBT_WRC_RECV_RDMAWI 8 /* Received RDMA Write w/ Immediate */ 1219 #define IBT_WRC_FAST_REG_PMR 9 /* Fast Register Physical mem region */ 1220 #define IBT_WRC_LOCAL_INVALIDATE 10 1221 #define IBT_WRC_SEND_LSO 11 1222 #define IBT_WRC_INIT_SEND_FCMD 12 /* Init & Send for FCMD initiator */ 1223 #define IBT_WRC_INIT_FEXCH 13 /* Init for FEXCH target */ 1224 1225 1226 /* 1227 * Work Request Completion flags - These flags indicate what type 1228 * of data is present in the Work Request Completion structure 1229 */ 1230 typedef uint8_t ibt_wc_flags_t; 1231 1232 #define IBT_WC_NO_FLAGS 0 1233 #define IBT_WC_GRH_PRESENT (1 << 0) 1234 #define IBT_WC_IMMED_DATA_PRESENT (1 << 1) 1235 #define IBT_WC_RKEY_INVALIDATED (1 << 2) 1236 #define IBT_WC_CKSUM_OK (1 << 3) 1237 #define IBT_WC_FEXCH_FMT (1 << 4) 1238 #define IBT_WC_DIF_ERROR (1 << 5) 1239 1240 /* IPoIB flags for wc_detail field */ 1241 #define IBT_WC_DETAIL_ALL_FLAGS_MASK (0x0FC00000) 1242 #define IBT_WC_DETAIL_IPV4 (1 << 22) 1243 #define IBT_WC_DETAIL_IPV4_FRAG (1 << 23) 1244 #define IBT_WC_DETAIL_IPV6 (1 << 24) 1245 #define IBT_WC_DETAIL_IPV4_OPT (1 << 25) 1246 #define IBT_WC_DETAIL_TCP (1 << 26) 1247 #define IBT_WC_DETAIL_UDP (1 << 27) 1248 1249 #define IBT_WC_DETAIL_RSS_MATCH_MASK (0x003F0000) 1250 #define IBT_WC_DETAIL_RSS_TCP_IPV6 (1 << 18) 1251 #define IBT_WC_DETAIL_RSS_IPV6 (1 << 19) 1252 #define IBT_WC_DETAIL_RSS_TCP_IPV4 (1 << 20) 1253 #define IBT_WC_DETAIL_RSS_IPV4 (1 << 21) 1254 1255 /* FEXCH flags for wc_detail field */ 1256 #define IBT_WC_DETAIL_FC_MATCH_MASK (0xE000000) 1257 #define IBT_WC_DETAIL_FEXCH_INIT_XFER (1 << 25) 1258 #define IBT_WC_DETAIL_FEXCH_LAST (1 << 26) 1259 #define IBT_WC_DETAIL_RFCI_CRC_OK (1 << 27) 1260 1261 /* 1262 * Work Request Completion - This structure encapsulates the information 1263 * necessary to define a work request completion. 1264 */ 1265 typedef struct ibt_wc_s { 1266 ibt_wrid_t wc_id; /* Work Request Id */ 1267 uint64_t wc_fma_ena; /* fault management err data */ 1268 ib_msglen_t wc_bytes_xfer; /* Number of Bytes */ 1269 /* Transferred */ 1270 ibt_wc_flags_t wc_flags; /* WR Completion Flags */ 1271 ibt_wrc_opcode_t wc_type; /* Operation Type */ 1272 uint16_t wc_cksum; /* payload checksum */ 1273 ibt_immed_t wc_immed_data; /* Immediate Data */ 1274 uint32_t wc_res_hash; /* RD: Freed Res, RSS: hash */ 1275 ibt_wc_status_t wc_status; /* Completion Status */ 1276 uint8_t wc_sl:4; /* Remote SL */ 1277 uint16_t wc_ethertype; /* Ethertype Field - RE */ 1278 ib_lid_t wc_opaque1; 1279 uint16_t wc_opaque2; 1280 ib_qpn_t wc_qpn; /* Source QPN Datagram only */ 1281 uint32_t wc_detail; /* RD: EECN, UD: IPoIB flags */ 1282 ib_qpn_t wc_local_qpn; 1283 ibt_rkey_t wc_rkey; 1284 ib_path_bits_t wc_opaque4; 1285 } ibt_wc_t; 1286 1287 /* FC format alternative field names */ 1288 #define wc_fexch_seq_cnt wc_cksum 1289 #define wc_fexch_tx_bytes_xfer wc_immed_data 1290 #define wc_fexch_rx_bytes_xfer wc_res_hash 1291 #define wc_fexch_seq_id wc_opaque2 1292 1293 1294 /* 1295 * WR Flags. Common for both RC and UD 1296 * 1297 * NOTE: this was converted from an enum to a uint8_t to save space. 1298 */ 1299 typedef uint8_t ibt_wr_flags_t; 1300 1301 #define IBT_WR_NO_FLAGS 0 1302 #define IBT_WR_SEND_IMMED (1 << 0) /* Immediate Data Indicator */ 1303 #define IBT_WR_SEND_SIGNAL (1 << 1) /* Signaled, if set */ 1304 #define IBT_WR_SEND_FENCE (1 << 2) /* Fence Indicator */ 1305 #define IBT_WR_SEND_SOLICIT (1 << 3) /* Solicited Event Indicator */ 1306 #define IBT_WR_SEND_REMOTE_INVAL (1 << 4) /* Remote Invalidate */ 1307 #define IBT_WR_SEND_CKSUM (1 << 5) /* Checksum offload Indicator */ 1308 #define IBT_WR_SEND_FC_CRC IBT_WR_SEND_CKSUM /* RFCI: FC CRC */ 1309 #define IBT_WR_SEND_INLINE (1 << 6) /* INLINE required (no lkey) */ 1310 1311 /* 1312 * Access control flags for Bind Memory Window operation, 1313 * applicable for RC/UC/RD only. 1314 * 1315 * If IBT_WR_BIND_WRITE or IBT_WR_BIND_ATOMIC is desired then 1316 * it is required that Memory Region should have Local Write Access. 1317 */ 1318 typedef enum ibt_bind_flags_e { 1319 IBT_WR_BIND_READ = (1 << 0), /* enable remote read */ 1320 IBT_WR_BIND_WRITE = (1 << 1), /* enable remote write */ 1321 IBT_WR_BIND_ATOMIC = (1 << 2), /* enable remote atomics */ 1322 IBT_WR_BIND_ZBVA = (1 << 3) /* Zero Based Virtual Address */ 1323 } ibt_bind_flags_t; 1324 1325 /* 1326 * Data Segment for scatter-gather list 1327 * 1328 * SGL consists of an array of data segments and the length of the SGL. 1329 */ 1330 typedef struct ibt_wr_ds_s { 1331 ib_vaddr_t ds_va; /* Virtual Address */ 1332 ibt_lkey_t ds_key; /* L_Key */ 1333 ib_msglen_t ds_len; /* Length of DS */ 1334 } ibt_wr_ds_t; 1335 1336 /* 1337 * Bind Memory Window WR 1338 * 1339 * WR ID from ibt_send_wr_t applies here too, SWG_0038 errata. 1340 */ 1341 typedef struct ibt_wr_bind_s { 1342 ibt_bind_flags_t bind_flags; 1343 ibt_rkey_t bind_rkey; /* Mem Window's R_key */ 1344 ibt_lkey_t bind_lkey; /* Mem Region's L_Key */ 1345 ibt_rkey_t bind_rkey_out; /* OUT: new R_Key */ 1346 ibt_mr_hdl_t bind_ibt_mr_hdl; /* Mem Region handle */ 1347 ibt_mw_hdl_t bind_ibt_mw_hdl; /* Mem Window handle */ 1348 ib_vaddr_t bind_va; /* Virtual Address */ 1349 ib_memlen_t bind_len; /* Length of Window */ 1350 } ibt_wr_bind_t; 1351 1352 /* 1353 * Atomic WR 1354 * 1355 * Operation type (compare & swap or fetch & add) in ibt_wrc_opcode_t. 1356 * 1357 * A copy of the original contents of the remote memory will be stored 1358 * in the local data segment described by wr_sgl within ibt_send_wr_t, 1359 * and wr_nds should be set to 1. 1360 * 1361 * Atomic operation operands: 1362 * Compare & Swap Operation: 1363 * atom_arg1 - Compare Operand 1364 * atom_arg2 - Swap Operand 1365 * 1366 * Fetch & Add Operation: 1367 * atom_arg1 - Add Operand 1368 * atom_arg2 - ignored 1369 */ 1370 typedef struct ibt_wr_atomic_s { 1371 ib_vaddr_t atom_raddr; /* Remote address. */ 1372 ibt_atom_arg_t atom_arg1; /* operand #1 */ 1373 ibt_atom_arg_t atom_arg2; /* operand #2 */ 1374 ibt_rkey_t atom_rkey; /* R_Key. */ 1375 } ibt_wr_atomic_t; 1376 1377 /* 1378 * RDMA WR 1379 * Immediate Data indicator in ibt_wr_flags_t. 1380 */ 1381 typedef struct ibt_wr_rdma_s { 1382 ib_vaddr_t rdma_raddr; /* Remote address. */ 1383 ibt_rkey_t rdma_rkey; /* R_Key. */ 1384 ibt_immed_t rdma_immed; /* Immediate Data */ 1385 } ibt_wr_rdma_t; 1386 1387 /* 1388 * Fast Register Physical Memory Region Work Request. 1389 */ 1390 typedef struct ibt_wr_reg_pmr_s { 1391 ib_vaddr_t pmr_iova; /* I/O virtual address requested by */ 1392 /* client for the first byte of the */ 1393 /* region */ 1394 ib_memlen_t pmr_len; /* Length of region to register */ 1395 ib_memlen_t pmr_offset; /* Offset of the region's starting */ 1396 /* IOVA within the 1st physical */ 1397 /* buffer */ 1398 ibt_mr_hdl_t pmr_mr_hdl; 1399 ibt_phys_addr_t *pmr_addr_list; /* List of physical buffers accessed */ 1400 /* as an array */ 1401 size_t pmr_buf_sz; /* size of uniform size PBEs */ 1402 uint_t pmr_num_buf; /* #entries in the pmr_addr_list */ 1403 ibt_lkey_t pmr_lkey; /* new lkey upon return */ 1404 ibt_rkey_t pmr_rkey; /* new rkey upon return */ 1405 ibt_mr_flags_t pmr_flags; 1406 uint8_t pmr_key; /* Key to use on new Lkey & Rkey */ 1407 } ibt_wr_reg_pmr_t; 1408 1409 /* phys reg function or WR */ 1410 typedef union ibt_reg_req_u { 1411 ibt_pmr_attr_t fn_arg; 1412 ibt_wr_reg_pmr_t wr; 1413 } ibt_reg_req_t; 1414 1415 /* 1416 * Local Invalidate. 1417 */ 1418 typedef struct ibt_wr_li_s { 1419 ibt_mr_hdl_t li_mr_hdl; /* Null for MW invalidates */ 1420 ibt_mw_hdl_t li_mw_hdl; /* Null for MR invalidates */ 1421 ibt_lkey_t li_lkey; /* Ignore for MW invalidates */ 1422 ibt_rkey_t li_rkey; 1423 } ibt_wr_li_t; 1424 1425 /* 1426 * Reserved For Future Use. 1427 * Raw IPv6 Send WR 1428 */ 1429 typedef struct ibt_wr_ripv6_s { 1430 ib_lid_t rip_dlid; /* DLID */ 1431 ib_path_bits_t rip_slid_bits; /* SLID path bits, SWG_0033 errata */ 1432 uint8_t rip_sl:4; /* SL */ 1433 ibt_srate_t rip_rate; /* Max Static Rate, SWG_0007 errata */ 1434 } ibt_wr_ripv6_t; 1435 1436 /* 1437 * Reserved For Future Use. 1438 * Raw Ethertype Send WR 1439 */ 1440 typedef struct ibt_wr_reth_s { 1441 ib_ethertype_t reth_type; /* Ethertype */ 1442 ib_lid_t reth_dlid; /* DLID */ 1443 ib_path_bits_t reth_slid_bits; /* SLID path bits, SWG_0033 errata */ 1444 uint8_t reth_sl:4; /* SL */ 1445 ibt_srate_t reth_rate; /* Max Static Rate, SWG_0007 errata */ 1446 } ibt_wr_reth_t; 1447 1448 /* 1449 * Reserved For future Use. 1450 * RD Send WR, Operation type in ibt_wrc_opcode_t. 1451 */ 1452 typedef struct ibt_wr_rd_s { 1453 ibt_rd_dest_hdl_t rdwr_dest_hdl; 1454 union { 1455 ibt_immed_t send_immed; /* IBT_WRC_SEND */ 1456 ibt_wr_rdma_t rdma; /* IBT_WRC_RDMAR */ 1457 /* IBT_WRC_RDMAW */ 1458 ibt_wr_li_t *li; /* IBT_WRC_LOCAL_INVALIDATE */ 1459 ibt_wr_atomic_t *atomic; /* IBT_WRC_FADD */ 1460 /* IBT_WRC_CSWAP */ 1461 ibt_wr_bind_t *bind; /* IBT_WRC_BIND */ 1462 ibt_wr_reg_pmr_t *reg_pmr; /* IBT_WRC_FAST_REG_PMR */ 1463 } rdwr; 1464 } ibt_wr_rd_t; 1465 1466 /* 1467 * Reserved For Future Use. 1468 * UC Send WR, Operation type in ibt_wrc_opcode_t, the only valid 1469 * ones are: 1470 * IBT_WRC_SEND 1471 * IBT_WRC_RDMAW 1472 * IBT_WRC_BIND 1473 */ 1474 typedef struct ibt_wr_uc_s { 1475 union { 1476 ibt_immed_t send_immed; /* IBT_WRC_SEND */ 1477 ibt_wr_rdma_t rdma; /* IBT_WRC_RDMAW */ 1478 ibt_wr_li_t *li; /* IBT_WRC_LOCAL_INVALIDATE */ 1479 ibt_wr_bind_t *bind; /* IBT_WRC_BIND */ 1480 ibt_wr_reg_pmr_t *reg_pmr; /* IBT_WRC_FAST_REG_PMR */ 1481 } ucwr; 1482 } ibt_wr_uc_t; 1483 1484 /* 1485 * RC Send WR, Operation type in ibt_wrc_opcode_t. 1486 */ 1487 typedef struct ibt_wr_rc_s { 1488 union { 1489 ibt_immed_t send_immed; /* IBT_WRC_SEND w/ immediate */ 1490 ibt_rkey_t send_inval; /* IBT_WRC_SEND w/ invalidate */ 1491 ibt_wr_rdma_t rdma; /* IBT_WRC_RDMAR */ 1492 /* IBT_WRC_RDMAW */ 1493 ibt_wr_li_t *li; /* IBT_WRC_LOCAL_INVALIDATE */ 1494 ibt_wr_atomic_t *atomic; /* IBT_WRC_CSWAP */ 1495 /* IBT_WRC_FADD */ 1496 ibt_wr_bind_t *bind; /* IBT_WRC_BIND */ 1497 ibt_wr_reg_pmr_t *reg_pmr; /* IBT_WRC_FAST_REG_PMR */ 1498 } rcwr; 1499 } ibt_wr_rc_t; 1500 1501 /* 1502 * UD Send WR, the only valid Operation is IBT_WRC_SEND. 1503 */ 1504 typedef struct ibt_wr_ud_s { 1505 ibt_immed_t udwr_immed; 1506 ibt_ud_dest_hdl_t udwr_dest; 1507 } ibt_wr_ud_t; 1508 1509 /* LSO variant */ 1510 typedef struct ibt_wr_lso_s { 1511 ibt_ud_dest_hdl_t lso_ud_dest; 1512 uint8_t *lso_hdr; 1513 ib_msglen_t lso_hdr_sz; 1514 ib_msglen_t lso_mss; 1515 } ibt_wr_lso_t; 1516 1517 /* FC WR definitions */ 1518 typedef enum ibt_fctl_flags_e { /* F_CTL flags */ 1519 IBT_FCTL_NO_FLAGS = 0, 1520 IBT_FCTL_SIT = (1 << 16), /* seq initiative transfer */ 1521 IBT_FCTL_PRIO = (1 << 17), /* InitAndSend WR: priority */ 1522 IBT_FCTL_LAST_SEQ = (1 << 20), 1523 /* InitAndSend WR: Exchange Originator, set = initiator, off = tgt */ 1524 IBT_FCTL_ORIG_INIT = (1 << 23) 1525 } ibt_fctl_flags_t; 1526 #define IBT_FCTL_SET_ABORT_FIELD(VAL) (((VAL) & 0x3) << 4) /* InitAndSend WR */ 1527 #define IBT_FCTL_GET_ABORT_FIELD(FCTL) (((FCTL) & 0x30) >> 4) 1528 1529 /* FC information category value, low 4 bits of routing control */ 1530 #define IBT_FC_INFO_SOL_DATA 1 /* solicited data */ 1531 #define IBT_FC_INFO_DATA_DESC 5 /* data descriptor */ 1532 #define IBT_FC_INFO_UNSOL_CMD 6 /* unsolicited command */ 1533 #define IBT_FC_INFO_CMD_STAT 7 /* command status */ 1534 1535 typedef struct ibt_fc_ctl_s { 1536 ibt_ud_dest_hdl_t fc_dest; 1537 ibt_fctl_flags_t fc_frame_ctrl; 1538 uint32_t fc_parameter; 1539 uint8_t fc_seq_id; 1540 /* FC R_CTL containing information category */ 1541 uint8_t fc_routing_ctrl; 1542 } ibt_fc_ctl_t; 1543 1544 /* RFCI version of send */ 1545 typedef struct ibt_wr_rfci_send_s { 1546 ibt_ud_dest_hdl_t rfci_dest; 1547 uint8_t rfci_eof; /* RFCI: when FC CRC set */ 1548 } ibt_wr_rfci_send_t; 1549 1550 typedef uint8_t ibt_init_send_op_t; 1551 #define IBT_IS_OP_TARGET 0x0 /* target mode or no IO initiator op */ 1552 #define IBT_IS_OP_NO_IO IBT_IS_OP_TARGET 1553 #define IBT_IS_OP_IO_READ 0x1 /* IO read */ 1554 #define IBT_IS_OP_IO_WRITE 0x2 /* IO write */ 1555 #define IBT_IS_OP_BIDIR 0x3 /* bidirectional command */ 1556 1557 /* Init and Send for FCMD initiator and also Init for FEXCH target */ 1558 typedef struct ibt_wr_init_send_s { 1559 ibt_fc_ctl_t is_ctl; 1560 uint32_t is_dest_id; /* FC hdr: D_ID, low 24 bits */ 1561 uint16_t is_fc_mtu; /* packet MTU (4B), low 10 bits */ 1562 uint16_t is_rem_exch; /* target: remote exchange */ 1563 uint16_t is_exch_qp_idx; /* FEXCH index for ULP */ 1564 uint8_t is_cs_priority; /* FC hdr: CS_CTL/Priority */ 1565 uint8_t is_tx_seq_id; /* initiator: FCP_DATA seq_id */ 1566 ibt_init_send_op_t is_op; 1567 } ibt_wr_init_send_t; 1568 1569 typedef union ibt_wr_fc_u { 1570 ibt_wr_rfci_send_t rfci_send; /* RFCI send */ 1571 ibt_wr_init_send_t *fc_is; /* FCMD, FEXCH */ 1572 ibt_wr_reg_pmr_t *reg_pmr; /* FCMD */ 1573 } ibt_wr_fc_t; 1574 1575 1576 /* 1577 * Send Work Request (WR) attributes structure. 1578 * 1579 * Operation type in ibt_wrc_opcode_t. 1580 * Immediate Data indicator in ibt_wr_flags_t. 1581 * 1582 * RFCI initiator QP: send (FCP_CONF) 1583 * FCMD initiator QP: init & send (FCP_CMND), FRWR 1584 * FEXCH target QP: init, FRWR, RDMA-R (FCP_XFER_RDY), RDMA-W (FCP_DATA), 1585 * Send (FCP_RSP) 1586 */ 1587 typedef struct ibt_send_wr_s { 1588 ibt_wrid_t wr_id; /* WR ID */ 1589 ibt_wr_flags_t wr_flags; /* Work Request Flags. */ 1590 ibt_tran_srv_t wr_trans; /* Transport Type. */ 1591 ibt_wrc_opcode_t wr_opcode; /* Operation Type. */ 1592 uint8_t wr_rsvd; /* maybe later */ 1593 uint32_t wr_nds; /* Number of data segments */ 1594 /* pointed to by wr_sgl */ 1595 ibt_wr_ds_t *wr_sgl; /* SGL */ 1596 union { 1597 ibt_wr_ud_t ud; 1598 ibt_wr_rc_t rc; 1599 ibt_wr_rd_t rd; /* Reserved For Future Use */ 1600 ibt_wr_uc_t uc; /* Reserved For Future Use */ 1601 ibt_wr_reth_t reth; /* Reserved For Future Use */ 1602 ibt_wr_ripv6_t ripv6; /* Reserved For Future Use */ 1603 ibt_wr_lso_t ud_lso; 1604 ibt_wr_fc_t fc; /* RFCI, FCMD, FEXCH */ 1605 } wr; /* operation specific */ 1606 } ibt_send_wr_t; 1607 1608 /* 1609 * Receive Work Request (WR) attributes structure. 1610 * 1611 * also used by these FC QP types: 1612 * RFCI initiator QP 1613 * FEXCH initiator QP (FCP_RSP) 1614 * RFCI target QP (FCP_CMND) 1615 */ 1616 typedef struct ibt_recv_wr_s { 1617 ibt_wrid_t wr_id; /* WR ID */ 1618 uint32_t wr_nds; /* number of data segments */ 1619 /* pointed to by wr_sgl */ 1620 ibt_wr_ds_t *wr_sgl; /* SGL */ 1621 } ibt_recv_wr_t; 1622 1623 typedef union ibt_all_wr_u { 1624 ibt_send_wr_t send; 1625 ibt_recv_wr_t recv; 1626 } ibt_all_wr_t; 1627 1628 1629 /* 1630 * Asynchronous Events and Errors. 1631 * 1632 * The following codes are not used in calls to ibc_async_handler, but 1633 * are used by IBTL to inform IBT clients of a significant event. 1634 * 1635 * IBT_HCA_ATTACH_EVENT - New HCA available. 1636 * IBT_HCA_DETACH_EVENT - HCA is requesting not to be used. 1637 * 1638 * ERRORs on a channel indicate that the channel has entered error state. 1639 * EVENTs on a channel indicate that the channel has not changed state. 1640 * 1641 */ 1642 typedef enum ibt_async_code_e { 1643 IBT_EVENT_PATH_MIGRATED = 0x000001, 1644 IBT_EVENT_SQD = 0x000002, 1645 IBT_EVENT_COM_EST = 0x000004, 1646 IBT_ERROR_CATASTROPHIC_CHAN = 0x000008, 1647 IBT_ERROR_INVALID_REQUEST_CHAN = 0x000010, 1648 IBT_ERROR_ACCESS_VIOLATION_CHAN = 0x000020, 1649 IBT_ERROR_PATH_MIGRATE_REQ = 0x000040, 1650 1651 IBT_ERROR_CQ = 0x000080, 1652 1653 IBT_EVENT_PORT_UP = 0x000100, 1654 IBT_ERROR_PORT_DOWN = 0x000200, 1655 IBT_ERROR_LOCAL_CATASTROPHIC = 0x000400, 1656 1657 IBT_HCA_ATTACH_EVENT = 0x000800, 1658 IBT_HCA_DETACH_EVENT = 0x001000, 1659 IBT_ASYNC_OPAQUE1 = 0x002000, 1660 IBT_ASYNC_OPAQUE2 = 0x004000, 1661 IBT_ASYNC_OPAQUE3 = 0x008000, 1662 IBT_ASYNC_OPAQUE4 = 0x010000, 1663 IBT_EVENT_LIMIT_REACHED_SRQ = 0x020000, 1664 IBT_EVENT_EMPTY_CHAN = 0x040000, 1665 IBT_ERROR_CATASTROPHIC_SRQ = 0x080000, 1666 1667 IBT_PORT_CHANGE_EVENT = 0x100000, 1668 IBT_CLNT_REREG_EVENT = 0x200000, 1669 IBT_FEXCH_ERROR = 0x400000 1670 } ibt_async_code_t; 1671 1672 #define IBT_PORT_EVENTS (IBT_EVENT_PORT_UP|IBT_PORT_CHANGE_EVENT|\ 1673 IBT_ERROR_PORT_DOWN|IBT_CLNT_REREG_EVENT) 1674 1675 typedef enum ibt_port_change_e { 1676 IBT_PORT_CHANGE_SGID = 0x000001, /* SGID table */ 1677 IBT_PORT_CHANGE_PKEY = 0x000002, /* P_Key table */ 1678 IBT_PORT_CHANGE_SM_LID = 0x000004, /* Master SM LID */ 1679 IBT_PORT_CHANGE_SM_SL = 0x000008, /* Master SM SL */ 1680 IBT_PORT_CHANGE_SUB_TIMEOUT = 0x000010, /* Subnet Timeout */ 1681 IBT_PORT_CHANGE_SM_FLAG = 0x000020, /* IsSMDisabled bit */ 1682 IBT_PORT_CHANGE_REREG = 0x000040 /* IsClientReregSupport */ 1683 } ibt_port_change_t; 1684 1685 typedef uint8_t ibt_fc_syndrome_t; 1686 #define IBT_FC_BAD_IU 0x0 1687 #define IBT_FC_BROKEN_SEQ 0x1 1688 1689 /* 1690 * ibt_ci_data_in() and ibt_ci_data_out() flags. 1691 */ 1692 typedef enum ibt_ci_data_flags_e { 1693 IBT_CI_NO_FLAGS = 0, 1694 IBT_CI_COMPLETE_ALLOC = (1 << 0) 1695 } ibt_ci_data_flags_t; 1696 1697 /* 1698 * Used by ibt_ci_data_in() and ibt_ci_data_out() identifies the type of handle 1699 * mapping data is being obtained for. 1700 */ 1701 typedef enum ibt_object_type_e { 1702 IBT_HDL_HCA = 1, 1703 IBT_HDL_CHANNEL, 1704 IBT_HDL_CQ, 1705 IBT_HDL_PD, 1706 IBT_HDL_MR, 1707 IBT_HDL_MW, 1708 IBT_HDL_UD_DEST, 1709 IBT_HDL_SCHED, 1710 IBT_HDL_OPAQUE1, 1711 IBT_HDL_OPAQUE2, 1712 IBT_HDL_SRQ 1713 } ibt_object_type_t; 1714 1715 /* 1716 * Standard information for ibt_ci_data_in() for memory regions. 1717 * 1718 * IBT_MR_DATA_IN_IF_VERSION is the value used in the mr_rev member. 1719 * mr_func is the callback handler. mr_arg1 and mr_arg2 are its arguments. 1720 */ 1721 #define IBT_MR_DATA_IN_IF_VERSION 1 1722 typedef struct ibt_mr_data_in_s { 1723 uint_t mr_rev; 1724 void (*mr_func)(void *, void *); 1725 void *mr_arg1; 1726 void *mr_arg2; 1727 } ibt_mr_data_in_t; 1728 1729 /* 1730 * Memory error handler data structures; code, and payload data. 1731 */ 1732 typedef enum ibt_mem_code_s { 1733 IBT_MEM_AREA = 0x1, 1734 IBT_MEM_REGION = 0x2 1735 } ibt_mem_code_t; 1736 1737 typedef struct ibt_mem_data_s { 1738 uint64_t ev_fma_ena; /* FMA Error data */ 1739 ibt_mr_hdl_t ev_mr_hdl; /* MR handle */ 1740 ibt_ma_hdl_t ev_ma_hdl; /* MA handle */ 1741 } ibt_mem_data_t; 1742 1743 /* 1744 * Special case failure type. 1745 */ 1746 typedef enum ibt_failure_type_e { 1747 IBT_FAILURE_STANDARD = 0, 1748 IBT_FAILURE_CI, 1749 IBT_FAILURE_IBMF, 1750 IBT_FAILURE_IBTL, 1751 IBT_FAILURE_IBCM, 1752 IBT_FAILURE_IBDM, 1753 IBT_FAILURE_IBSM 1754 } ibt_failure_type_t; 1755 1756 /* 1757 * RDMA IP CM service Annex definitions 1758 */ 1759 typedef struct ibt_ip_addr_s { 1760 sa_family_t family; /* AF_INET or AF_INET6 */ 1761 union { 1762 in_addr_t ip4addr; 1763 in6_addr_t ip6addr; 1764 } un; 1765 uint32_t ip6_scope_id; /* Applicable only for AF_INET6 */ 1766 } ibt_ip_addr_t; 1767 1768 #ifdef __cplusplus 1769 } 1770 #endif 1771 1772 #endif /* _SYS_IB_IBTL_IBTL_TYPES_H */ 1773