1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 * 38 * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $ 39 */ 40 41 #if !defined(IB_VERBS_H) 42 #define IB_VERBS_H 43 44 #include <linux/types.h> 45 #include <linux/device.h> 46 47 #include <asm/atomic.h> 48 #include <asm/scatterlist.h> 49 #include <asm/uaccess.h> 50 51 union ib_gid { 52 u8 raw[16]; 53 struct { 54 __be64 subnet_prefix; 55 __be64 interface_id; 56 } global; 57 }; 58 59 enum ib_node_type { 60 IB_NODE_CA = 1, 61 IB_NODE_SWITCH, 62 IB_NODE_ROUTER 63 }; 64 65 enum ib_device_cap_flags { 66 IB_DEVICE_RESIZE_MAX_WR = 1, 67 IB_DEVICE_BAD_PKEY_CNTR = (1<<1), 68 IB_DEVICE_BAD_QKEY_CNTR = (1<<2), 69 IB_DEVICE_RAW_MULTI = (1<<3), 70 IB_DEVICE_AUTO_PATH_MIG = (1<<4), 71 IB_DEVICE_CHANGE_PHY_PORT = (1<<5), 72 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), 73 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), 74 IB_DEVICE_SHUTDOWN_PORT = (1<<8), 75 IB_DEVICE_INIT_TYPE = (1<<9), 76 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), 77 IB_DEVICE_SYS_IMAGE_GUID = (1<<11), 78 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), 79 IB_DEVICE_SRQ_RESIZE = (1<<13), 80 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 81 }; 82 83 enum ib_atomic_cap { 84 IB_ATOMIC_NONE, 85 IB_ATOMIC_HCA, 86 IB_ATOMIC_GLOB 87 }; 88 89 struct ib_device_attr { 90 u64 fw_ver; 91 __be64 sys_image_guid; 92 u64 max_mr_size; 93 u64 page_size_cap; 94 u32 vendor_id; 95 u32 vendor_part_id; 96 u32 hw_ver; 97 int max_qp; 98 int max_qp_wr; 99 int device_cap_flags; 100 int max_sge; 101 int max_sge_rd; 102 int max_cq; 103 int max_cqe; 104 int max_mr; 105 int max_pd; 106 int max_qp_rd_atom; 107 int max_ee_rd_atom; 108 int max_res_rd_atom; 109 int max_qp_init_rd_atom; 110 int max_ee_init_rd_atom; 111 enum ib_atomic_cap atomic_cap; 112 int max_ee; 113 int max_rdd; 114 int max_mw; 115 int max_raw_ipv6_qp; 116 int max_raw_ethy_qp; 117 int max_mcast_grp; 118 int max_mcast_qp_attach; 119 int max_total_mcast_qp_attach; 120 int max_ah; 121 int max_fmr; 122 int max_map_per_fmr; 123 int max_srq; 124 int max_srq_wr; 125 int max_srq_sge; 126 u16 max_pkeys; 127 u8 local_ca_ack_delay; 128 }; 129 130 enum ib_mtu { 131 IB_MTU_256 = 1, 132 IB_MTU_512 = 2, 133 IB_MTU_1024 = 3, 134 IB_MTU_2048 = 4, 135 IB_MTU_4096 = 5 136 }; 137 138 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 139 { 140 switch (mtu) { 141 case IB_MTU_256: return 256; 142 case IB_MTU_512: return 512; 143 case IB_MTU_1024: return 1024; 144 case IB_MTU_2048: return 2048; 145 case IB_MTU_4096: return 4096; 146 default: return -1; 147 } 148 } 149 150 enum ib_port_state { 151 IB_PORT_NOP = 0, 152 IB_PORT_DOWN = 1, 153 IB_PORT_INIT = 2, 154 IB_PORT_ARMED = 3, 155 IB_PORT_ACTIVE = 4, 156 IB_PORT_ACTIVE_DEFER = 5 157 }; 158 159 enum ib_port_cap_flags { 160 IB_PORT_SM = 1 << 1, 161 IB_PORT_NOTICE_SUP = 1 << 2, 162 IB_PORT_TRAP_SUP = 1 << 3, 163 IB_PORT_OPT_IPD_SUP = 1 << 4, 164 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 165 IB_PORT_SL_MAP_SUP = 1 << 6, 166 IB_PORT_MKEY_NVRAM = 1 << 7, 167 IB_PORT_PKEY_NVRAM = 1 << 8, 168 IB_PORT_LED_INFO_SUP = 1 << 9, 169 IB_PORT_SM_DISABLED = 1 << 10, 170 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 171 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 172 IB_PORT_CM_SUP = 1 << 16, 173 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 174 IB_PORT_REINIT_SUP = 1 << 18, 175 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 176 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 177 IB_PORT_DR_NOTICE_SUP = 1 << 21, 178 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 179 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 180 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 181 IB_PORT_CLIENT_REG_SUP = 1 << 25 182 }; 183 184 enum ib_port_width { 185 IB_WIDTH_1X = 1, 186 IB_WIDTH_4X = 2, 187 IB_WIDTH_8X = 4, 188 IB_WIDTH_12X = 8 189 }; 190 191 static inline int ib_width_enum_to_int(enum ib_port_width width) 192 { 193 switch (width) { 194 case IB_WIDTH_1X: return 1; 195 case IB_WIDTH_4X: return 4; 196 case IB_WIDTH_8X: return 8; 197 case IB_WIDTH_12X: return 12; 198 default: return -1; 199 } 200 } 201 202 struct ib_port_attr { 203 enum ib_port_state state; 204 enum ib_mtu max_mtu; 205 enum ib_mtu active_mtu; 206 int gid_tbl_len; 207 u32 port_cap_flags; 208 u32 max_msg_sz; 209 u32 bad_pkey_cntr; 210 u32 qkey_viol_cntr; 211 u16 pkey_tbl_len; 212 u16 lid; 213 u16 sm_lid; 214 u8 lmc; 215 u8 max_vl_num; 216 u8 sm_sl; 217 u8 subnet_timeout; 218 u8 init_type_reply; 219 u8 active_width; 220 u8 active_speed; 221 u8 phys_state; 222 }; 223 224 enum ib_device_modify_flags { 225 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 226 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 227 }; 228 229 struct ib_device_modify { 230 u64 sys_image_guid; 231 char node_desc[64]; 232 }; 233 234 enum ib_port_modify_flags { 235 IB_PORT_SHUTDOWN = 1, 236 IB_PORT_INIT_TYPE = (1<<2), 237 IB_PORT_RESET_QKEY_CNTR = (1<<3) 238 }; 239 240 struct ib_port_modify { 241 u32 set_port_cap_mask; 242 u32 clr_port_cap_mask; 243 u8 init_type; 244 }; 245 246 enum ib_event_type { 247 IB_EVENT_CQ_ERR, 248 IB_EVENT_QP_FATAL, 249 IB_EVENT_QP_REQ_ERR, 250 IB_EVENT_QP_ACCESS_ERR, 251 IB_EVENT_COMM_EST, 252 IB_EVENT_SQ_DRAINED, 253 IB_EVENT_PATH_MIG, 254 IB_EVENT_PATH_MIG_ERR, 255 IB_EVENT_DEVICE_FATAL, 256 IB_EVENT_PORT_ACTIVE, 257 IB_EVENT_PORT_ERR, 258 IB_EVENT_LID_CHANGE, 259 IB_EVENT_PKEY_CHANGE, 260 IB_EVENT_SM_CHANGE, 261 IB_EVENT_SRQ_ERR, 262 IB_EVENT_SRQ_LIMIT_REACHED, 263 IB_EVENT_QP_LAST_WQE_REACHED, 264 IB_EVENT_CLIENT_REREGISTER 265 }; 266 267 struct ib_event { 268 struct ib_device *device; 269 union { 270 struct ib_cq *cq; 271 struct ib_qp *qp; 272 struct ib_srq *srq; 273 u8 port_num; 274 } element; 275 enum ib_event_type event; 276 }; 277 278 struct ib_event_handler { 279 struct ib_device *device; 280 void (*handler)(struct ib_event_handler *, struct ib_event *); 281 struct list_head list; 282 }; 283 284 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 285 do { \ 286 (_ptr)->device = _device; \ 287 (_ptr)->handler = _handler; \ 288 INIT_LIST_HEAD(&(_ptr)->list); \ 289 } while (0) 290 291 struct ib_global_route { 292 union ib_gid dgid; 293 u32 flow_label; 294 u8 sgid_index; 295 u8 hop_limit; 296 u8 traffic_class; 297 }; 298 299 struct ib_grh { 300 __be32 version_tclass_flow; 301 __be16 paylen; 302 u8 next_hdr; 303 u8 hop_limit; 304 union ib_gid sgid; 305 union ib_gid dgid; 306 }; 307 308 enum { 309 IB_MULTICAST_QPN = 0xffffff 310 }; 311 312 #define IB_LID_PERMISSIVE __constant_htons(0xFFFF) 313 314 enum ib_ah_flags { 315 IB_AH_GRH = 1 316 }; 317 318 enum ib_rate { 319 IB_RATE_PORT_CURRENT = 0, 320 IB_RATE_2_5_GBPS = 2, 321 IB_RATE_5_GBPS = 5, 322 IB_RATE_10_GBPS = 3, 323 IB_RATE_20_GBPS = 6, 324 IB_RATE_30_GBPS = 4, 325 IB_RATE_40_GBPS = 7, 326 IB_RATE_60_GBPS = 8, 327 IB_RATE_80_GBPS = 9, 328 IB_RATE_120_GBPS = 10 329 }; 330 331 /** 332 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 333 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 334 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 335 * @rate: rate to convert. 336 */ 337 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; 338 339 /** 340 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 341 * enum. 342 * @mult: multiple to convert. 343 */ 344 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__; 345 346 struct ib_ah_attr { 347 struct ib_global_route grh; 348 u16 dlid; 349 u8 sl; 350 u8 src_path_bits; 351 u8 static_rate; 352 u8 ah_flags; 353 u8 port_num; 354 }; 355 356 enum ib_wc_status { 357 IB_WC_SUCCESS, 358 IB_WC_LOC_LEN_ERR, 359 IB_WC_LOC_QP_OP_ERR, 360 IB_WC_LOC_EEC_OP_ERR, 361 IB_WC_LOC_PROT_ERR, 362 IB_WC_WR_FLUSH_ERR, 363 IB_WC_MW_BIND_ERR, 364 IB_WC_BAD_RESP_ERR, 365 IB_WC_LOC_ACCESS_ERR, 366 IB_WC_REM_INV_REQ_ERR, 367 IB_WC_REM_ACCESS_ERR, 368 IB_WC_REM_OP_ERR, 369 IB_WC_RETRY_EXC_ERR, 370 IB_WC_RNR_RETRY_EXC_ERR, 371 IB_WC_LOC_RDD_VIOL_ERR, 372 IB_WC_REM_INV_RD_REQ_ERR, 373 IB_WC_REM_ABORT_ERR, 374 IB_WC_INV_EECN_ERR, 375 IB_WC_INV_EEC_STATE_ERR, 376 IB_WC_FATAL_ERR, 377 IB_WC_RESP_TIMEOUT_ERR, 378 IB_WC_GENERAL_ERR 379 }; 380 381 enum ib_wc_opcode { 382 IB_WC_SEND, 383 IB_WC_RDMA_WRITE, 384 IB_WC_RDMA_READ, 385 IB_WC_COMP_SWAP, 386 IB_WC_FETCH_ADD, 387 IB_WC_BIND_MW, 388 /* 389 * Set value of IB_WC_RECV so consumers can test if a completion is a 390 * receive by testing (opcode & IB_WC_RECV). 391 */ 392 IB_WC_RECV = 1 << 7, 393 IB_WC_RECV_RDMA_WITH_IMM 394 }; 395 396 enum ib_wc_flags { 397 IB_WC_GRH = 1, 398 IB_WC_WITH_IMM = (1<<1) 399 }; 400 401 struct ib_wc { 402 u64 wr_id; 403 enum ib_wc_status status; 404 enum ib_wc_opcode opcode; 405 u32 vendor_err; 406 u32 byte_len; 407 __be32 imm_data; 408 u32 qp_num; 409 u32 src_qp; 410 int wc_flags; 411 u16 pkey_index; 412 u16 slid; 413 u8 sl; 414 u8 dlid_path_bits; 415 u8 port_num; /* valid only for DR SMPs on switches */ 416 }; 417 418 enum ib_cq_notify { 419 IB_CQ_SOLICITED, 420 IB_CQ_NEXT_COMP 421 }; 422 423 enum ib_srq_attr_mask { 424 IB_SRQ_MAX_WR = 1 << 0, 425 IB_SRQ_LIMIT = 1 << 1, 426 }; 427 428 struct ib_srq_attr { 429 u32 max_wr; 430 u32 max_sge; 431 u32 srq_limit; 432 }; 433 434 struct ib_srq_init_attr { 435 void (*event_handler)(struct ib_event *, void *); 436 void *srq_context; 437 struct ib_srq_attr attr; 438 }; 439 440 struct ib_qp_cap { 441 u32 max_send_wr; 442 u32 max_recv_wr; 443 u32 max_send_sge; 444 u32 max_recv_sge; 445 u32 max_inline_data; 446 }; 447 448 enum ib_sig_type { 449 IB_SIGNAL_ALL_WR, 450 IB_SIGNAL_REQ_WR 451 }; 452 453 enum ib_qp_type { 454 /* 455 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 456 * here (and in that order) since the MAD layer uses them as 457 * indices into a 2-entry table. 458 */ 459 IB_QPT_SMI, 460 IB_QPT_GSI, 461 462 IB_QPT_RC, 463 IB_QPT_UC, 464 IB_QPT_UD, 465 IB_QPT_RAW_IPV6, 466 IB_QPT_RAW_ETY 467 }; 468 469 struct ib_qp_init_attr { 470 void (*event_handler)(struct ib_event *, void *); 471 void *qp_context; 472 struct ib_cq *send_cq; 473 struct ib_cq *recv_cq; 474 struct ib_srq *srq; 475 struct ib_qp_cap cap; 476 enum ib_sig_type sq_sig_type; 477 enum ib_qp_type qp_type; 478 u8 port_num; /* special QP types only */ 479 }; 480 481 enum ib_rnr_timeout { 482 IB_RNR_TIMER_655_36 = 0, 483 IB_RNR_TIMER_000_01 = 1, 484 IB_RNR_TIMER_000_02 = 2, 485 IB_RNR_TIMER_000_03 = 3, 486 IB_RNR_TIMER_000_04 = 4, 487 IB_RNR_TIMER_000_06 = 5, 488 IB_RNR_TIMER_000_08 = 6, 489 IB_RNR_TIMER_000_12 = 7, 490 IB_RNR_TIMER_000_16 = 8, 491 IB_RNR_TIMER_000_24 = 9, 492 IB_RNR_TIMER_000_32 = 10, 493 IB_RNR_TIMER_000_48 = 11, 494 IB_RNR_TIMER_000_64 = 12, 495 IB_RNR_TIMER_000_96 = 13, 496 IB_RNR_TIMER_001_28 = 14, 497 IB_RNR_TIMER_001_92 = 15, 498 IB_RNR_TIMER_002_56 = 16, 499 IB_RNR_TIMER_003_84 = 17, 500 IB_RNR_TIMER_005_12 = 18, 501 IB_RNR_TIMER_007_68 = 19, 502 IB_RNR_TIMER_010_24 = 20, 503 IB_RNR_TIMER_015_36 = 21, 504 IB_RNR_TIMER_020_48 = 22, 505 IB_RNR_TIMER_030_72 = 23, 506 IB_RNR_TIMER_040_96 = 24, 507 IB_RNR_TIMER_061_44 = 25, 508 IB_RNR_TIMER_081_92 = 26, 509 IB_RNR_TIMER_122_88 = 27, 510 IB_RNR_TIMER_163_84 = 28, 511 IB_RNR_TIMER_245_76 = 29, 512 IB_RNR_TIMER_327_68 = 30, 513 IB_RNR_TIMER_491_52 = 31 514 }; 515 516 enum ib_qp_attr_mask { 517 IB_QP_STATE = 1, 518 IB_QP_CUR_STATE = (1<<1), 519 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 520 IB_QP_ACCESS_FLAGS = (1<<3), 521 IB_QP_PKEY_INDEX = (1<<4), 522 IB_QP_PORT = (1<<5), 523 IB_QP_QKEY = (1<<6), 524 IB_QP_AV = (1<<7), 525 IB_QP_PATH_MTU = (1<<8), 526 IB_QP_TIMEOUT = (1<<9), 527 IB_QP_RETRY_CNT = (1<<10), 528 IB_QP_RNR_RETRY = (1<<11), 529 IB_QP_RQ_PSN = (1<<12), 530 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 531 IB_QP_ALT_PATH = (1<<14), 532 IB_QP_MIN_RNR_TIMER = (1<<15), 533 IB_QP_SQ_PSN = (1<<16), 534 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 535 IB_QP_PATH_MIG_STATE = (1<<18), 536 IB_QP_CAP = (1<<19), 537 IB_QP_DEST_QPN = (1<<20) 538 }; 539 540 enum ib_qp_state { 541 IB_QPS_RESET, 542 IB_QPS_INIT, 543 IB_QPS_RTR, 544 IB_QPS_RTS, 545 IB_QPS_SQD, 546 IB_QPS_SQE, 547 IB_QPS_ERR 548 }; 549 550 enum ib_mig_state { 551 IB_MIG_MIGRATED, 552 IB_MIG_REARM, 553 IB_MIG_ARMED 554 }; 555 556 struct ib_qp_attr { 557 enum ib_qp_state qp_state; 558 enum ib_qp_state cur_qp_state; 559 enum ib_mtu path_mtu; 560 enum ib_mig_state path_mig_state; 561 u32 qkey; 562 u32 rq_psn; 563 u32 sq_psn; 564 u32 dest_qp_num; 565 int qp_access_flags; 566 struct ib_qp_cap cap; 567 struct ib_ah_attr ah_attr; 568 struct ib_ah_attr alt_ah_attr; 569 u16 pkey_index; 570 u16 alt_pkey_index; 571 u8 en_sqd_async_notify; 572 u8 sq_draining; 573 u8 max_rd_atomic; 574 u8 max_dest_rd_atomic; 575 u8 min_rnr_timer; 576 u8 port_num; 577 u8 timeout; 578 u8 retry_cnt; 579 u8 rnr_retry; 580 u8 alt_port_num; 581 u8 alt_timeout; 582 }; 583 584 enum ib_wr_opcode { 585 IB_WR_RDMA_WRITE, 586 IB_WR_RDMA_WRITE_WITH_IMM, 587 IB_WR_SEND, 588 IB_WR_SEND_WITH_IMM, 589 IB_WR_RDMA_READ, 590 IB_WR_ATOMIC_CMP_AND_SWP, 591 IB_WR_ATOMIC_FETCH_AND_ADD 592 }; 593 594 enum ib_send_flags { 595 IB_SEND_FENCE = 1, 596 IB_SEND_SIGNALED = (1<<1), 597 IB_SEND_SOLICITED = (1<<2), 598 IB_SEND_INLINE = (1<<3) 599 }; 600 601 struct ib_sge { 602 u64 addr; 603 u32 length; 604 u32 lkey; 605 }; 606 607 struct ib_send_wr { 608 struct ib_send_wr *next; 609 u64 wr_id; 610 struct ib_sge *sg_list; 611 int num_sge; 612 enum ib_wr_opcode opcode; 613 int send_flags; 614 __be32 imm_data; 615 union { 616 struct { 617 u64 remote_addr; 618 u32 rkey; 619 } rdma; 620 struct { 621 u64 remote_addr; 622 u64 compare_add; 623 u64 swap; 624 u32 rkey; 625 } atomic; 626 struct { 627 struct ib_ah *ah; 628 u32 remote_qpn; 629 u32 remote_qkey; 630 u16 pkey_index; /* valid for GSI only */ 631 u8 port_num; /* valid for DR SMPs on switch only */ 632 } ud; 633 } wr; 634 }; 635 636 struct ib_recv_wr { 637 struct ib_recv_wr *next; 638 u64 wr_id; 639 struct ib_sge *sg_list; 640 int num_sge; 641 }; 642 643 enum ib_access_flags { 644 IB_ACCESS_LOCAL_WRITE = 1, 645 IB_ACCESS_REMOTE_WRITE = (1<<1), 646 IB_ACCESS_REMOTE_READ = (1<<2), 647 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 648 IB_ACCESS_MW_BIND = (1<<4) 649 }; 650 651 struct ib_phys_buf { 652 u64 addr; 653 u64 size; 654 }; 655 656 struct ib_mr_attr { 657 struct ib_pd *pd; 658 u64 device_virt_addr; 659 u64 size; 660 int mr_access_flags; 661 u32 lkey; 662 u32 rkey; 663 }; 664 665 enum ib_mr_rereg_flags { 666 IB_MR_REREG_TRANS = 1, 667 IB_MR_REREG_PD = (1<<1), 668 IB_MR_REREG_ACCESS = (1<<2) 669 }; 670 671 struct ib_mw_bind { 672 struct ib_mr *mr; 673 u64 wr_id; 674 u64 addr; 675 u32 length; 676 int send_flags; 677 int mw_access_flags; 678 }; 679 680 struct ib_fmr_attr { 681 int max_pages; 682 int max_maps; 683 u8 page_shift; 684 }; 685 686 struct ib_ucontext { 687 struct ib_device *device; 688 struct list_head pd_list; 689 struct list_head mr_list; 690 struct list_head mw_list; 691 struct list_head cq_list; 692 struct list_head qp_list; 693 struct list_head srq_list; 694 struct list_head ah_list; 695 }; 696 697 struct ib_uobject { 698 u64 user_handle; /* handle given to us by userspace */ 699 struct ib_ucontext *context; /* associated user context */ 700 void *object; /* containing object */ 701 struct list_head list; /* link to context's list */ 702 u32 id; /* index into kernel idr */ 703 struct kref ref; 704 struct rw_semaphore mutex; /* protects .live */ 705 int live; 706 }; 707 708 struct ib_umem { 709 unsigned long user_base; 710 unsigned long virt_base; 711 size_t length; 712 int offset; 713 int page_size; 714 int writable; 715 struct list_head chunk_list; 716 }; 717 718 struct ib_umem_chunk { 719 struct list_head list; 720 int nents; 721 int nmap; 722 struct scatterlist page_list[0]; 723 }; 724 725 struct ib_udata { 726 void __user *inbuf; 727 void __user *outbuf; 728 size_t inlen; 729 size_t outlen; 730 }; 731 732 #define IB_UMEM_MAX_PAGE_CHUNK \ 733 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ 734 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ 735 (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) 736 737 struct ib_umem_object { 738 struct ib_uobject uobject; 739 struct ib_umem umem; 740 }; 741 742 struct ib_pd { 743 struct ib_device *device; 744 struct ib_uobject *uobject; 745 atomic_t usecnt; /* count all resources */ 746 }; 747 748 struct ib_ah { 749 struct ib_device *device; 750 struct ib_pd *pd; 751 struct ib_uobject *uobject; 752 }; 753 754 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 755 756 struct ib_cq { 757 struct ib_device *device; 758 struct ib_uobject *uobject; 759 ib_comp_handler comp_handler; 760 void (*event_handler)(struct ib_event *, void *); 761 void * cq_context; 762 int cqe; 763 atomic_t usecnt; /* count number of work queues */ 764 }; 765 766 struct ib_srq { 767 struct ib_device *device; 768 struct ib_pd *pd; 769 struct ib_uobject *uobject; 770 void (*event_handler)(struct ib_event *, void *); 771 void *srq_context; 772 atomic_t usecnt; 773 }; 774 775 struct ib_qp { 776 struct ib_device *device; 777 struct ib_pd *pd; 778 struct ib_cq *send_cq; 779 struct ib_cq *recv_cq; 780 struct ib_srq *srq; 781 struct ib_uobject *uobject; 782 void (*event_handler)(struct ib_event *, void *); 783 void *qp_context; 784 u32 qp_num; 785 enum ib_qp_type qp_type; 786 }; 787 788 struct ib_mr { 789 struct ib_device *device; 790 struct ib_pd *pd; 791 struct ib_uobject *uobject; 792 u32 lkey; 793 u32 rkey; 794 atomic_t usecnt; /* count number of MWs */ 795 }; 796 797 struct ib_mw { 798 struct ib_device *device; 799 struct ib_pd *pd; 800 struct ib_uobject *uobject; 801 u32 rkey; 802 }; 803 804 struct ib_fmr { 805 struct ib_device *device; 806 struct ib_pd *pd; 807 struct list_head list; 808 u32 lkey; 809 u32 rkey; 810 }; 811 812 struct ib_mad; 813 struct ib_grh; 814 815 enum ib_process_mad_flags { 816 IB_MAD_IGNORE_MKEY = 1, 817 IB_MAD_IGNORE_BKEY = 2, 818 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 819 }; 820 821 enum ib_mad_result { 822 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 823 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 824 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 825 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 826 }; 827 828 #define IB_DEVICE_NAME_MAX 64 829 830 struct ib_cache { 831 rwlock_t lock; 832 struct ib_event_handler event_handler; 833 struct ib_pkey_cache **pkey_cache; 834 struct ib_gid_cache **gid_cache; 835 u8 *lmc_cache; 836 }; 837 838 struct ib_device { 839 struct device *dma_device; 840 841 char name[IB_DEVICE_NAME_MAX]; 842 843 struct list_head event_handler_list; 844 spinlock_t event_handler_lock; 845 846 struct list_head core_list; 847 struct list_head client_data_list; 848 spinlock_t client_data_lock; 849 850 struct ib_cache cache; 851 852 u32 flags; 853 854 int (*query_device)(struct ib_device *device, 855 struct ib_device_attr *device_attr); 856 int (*query_port)(struct ib_device *device, 857 u8 port_num, 858 struct ib_port_attr *port_attr); 859 int (*query_gid)(struct ib_device *device, 860 u8 port_num, int index, 861 union ib_gid *gid); 862 int (*query_pkey)(struct ib_device *device, 863 u8 port_num, u16 index, u16 *pkey); 864 int (*modify_device)(struct ib_device *device, 865 int device_modify_mask, 866 struct ib_device_modify *device_modify); 867 int (*modify_port)(struct ib_device *device, 868 u8 port_num, int port_modify_mask, 869 struct ib_port_modify *port_modify); 870 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 871 struct ib_udata *udata); 872 int (*dealloc_ucontext)(struct ib_ucontext *context); 873 int (*mmap)(struct ib_ucontext *context, 874 struct vm_area_struct *vma); 875 struct ib_pd * (*alloc_pd)(struct ib_device *device, 876 struct ib_ucontext *context, 877 struct ib_udata *udata); 878 int (*dealloc_pd)(struct ib_pd *pd); 879 struct ib_ah * (*create_ah)(struct ib_pd *pd, 880 struct ib_ah_attr *ah_attr); 881 int (*modify_ah)(struct ib_ah *ah, 882 struct ib_ah_attr *ah_attr); 883 int (*query_ah)(struct ib_ah *ah, 884 struct ib_ah_attr *ah_attr); 885 int (*destroy_ah)(struct ib_ah *ah); 886 struct ib_srq * (*create_srq)(struct ib_pd *pd, 887 struct ib_srq_init_attr *srq_init_attr, 888 struct ib_udata *udata); 889 int (*modify_srq)(struct ib_srq *srq, 890 struct ib_srq_attr *srq_attr, 891 enum ib_srq_attr_mask srq_attr_mask); 892 int (*query_srq)(struct ib_srq *srq, 893 struct ib_srq_attr *srq_attr); 894 int (*destroy_srq)(struct ib_srq *srq); 895 int (*post_srq_recv)(struct ib_srq *srq, 896 struct ib_recv_wr *recv_wr, 897 struct ib_recv_wr **bad_recv_wr); 898 struct ib_qp * (*create_qp)(struct ib_pd *pd, 899 struct ib_qp_init_attr *qp_init_attr, 900 struct ib_udata *udata); 901 int (*modify_qp)(struct ib_qp *qp, 902 struct ib_qp_attr *qp_attr, 903 int qp_attr_mask); 904 int (*query_qp)(struct ib_qp *qp, 905 struct ib_qp_attr *qp_attr, 906 int qp_attr_mask, 907 struct ib_qp_init_attr *qp_init_attr); 908 int (*destroy_qp)(struct ib_qp *qp); 909 int (*post_send)(struct ib_qp *qp, 910 struct ib_send_wr *send_wr, 911 struct ib_send_wr **bad_send_wr); 912 int (*post_recv)(struct ib_qp *qp, 913 struct ib_recv_wr *recv_wr, 914 struct ib_recv_wr **bad_recv_wr); 915 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, 916 struct ib_ucontext *context, 917 struct ib_udata *udata); 918 int (*destroy_cq)(struct ib_cq *cq); 919 int (*resize_cq)(struct ib_cq *cq, int cqe, 920 struct ib_udata *udata); 921 int (*poll_cq)(struct ib_cq *cq, int num_entries, 922 struct ib_wc *wc); 923 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 924 int (*req_notify_cq)(struct ib_cq *cq, 925 enum ib_cq_notify cq_notify); 926 int (*req_ncomp_notif)(struct ib_cq *cq, 927 int wc_cnt); 928 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 929 int mr_access_flags); 930 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, 931 struct ib_phys_buf *phys_buf_array, 932 int num_phys_buf, 933 int mr_access_flags, 934 u64 *iova_start); 935 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 936 struct ib_umem *region, 937 int mr_access_flags, 938 struct ib_udata *udata); 939 int (*query_mr)(struct ib_mr *mr, 940 struct ib_mr_attr *mr_attr); 941 int (*dereg_mr)(struct ib_mr *mr); 942 int (*rereg_phys_mr)(struct ib_mr *mr, 943 int mr_rereg_mask, 944 struct ib_pd *pd, 945 struct ib_phys_buf *phys_buf_array, 946 int num_phys_buf, 947 int mr_access_flags, 948 u64 *iova_start); 949 struct ib_mw * (*alloc_mw)(struct ib_pd *pd); 950 int (*bind_mw)(struct ib_qp *qp, 951 struct ib_mw *mw, 952 struct ib_mw_bind *mw_bind); 953 int (*dealloc_mw)(struct ib_mw *mw); 954 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 955 int mr_access_flags, 956 struct ib_fmr_attr *fmr_attr); 957 int (*map_phys_fmr)(struct ib_fmr *fmr, 958 u64 *page_list, int list_len, 959 u64 iova); 960 int (*unmap_fmr)(struct list_head *fmr_list); 961 int (*dealloc_fmr)(struct ib_fmr *fmr); 962 int (*attach_mcast)(struct ib_qp *qp, 963 union ib_gid *gid, 964 u16 lid); 965 int (*detach_mcast)(struct ib_qp *qp, 966 union ib_gid *gid, 967 u16 lid); 968 int (*process_mad)(struct ib_device *device, 969 int process_mad_flags, 970 u8 port_num, 971 struct ib_wc *in_wc, 972 struct ib_grh *in_grh, 973 struct ib_mad *in_mad, 974 struct ib_mad *out_mad); 975 976 struct module *owner; 977 struct class_device class_dev; 978 struct kobject ports_parent; 979 struct list_head port_list; 980 981 enum { 982 IB_DEV_UNINITIALIZED, 983 IB_DEV_REGISTERED, 984 IB_DEV_UNREGISTERED 985 } reg_state; 986 987 u64 uverbs_cmd_mask; 988 int uverbs_abi_ver; 989 990 char node_desc[64]; 991 __be64 node_guid; 992 u8 node_type; 993 u8 phys_port_cnt; 994 }; 995 996 struct ib_client { 997 char *name; 998 void (*add) (struct ib_device *); 999 void (*remove)(struct ib_device *); 1000 1001 struct list_head list; 1002 }; 1003 1004 struct ib_device *ib_alloc_device(size_t size); 1005 void ib_dealloc_device(struct ib_device *device); 1006 1007 int ib_register_device (struct ib_device *device); 1008 void ib_unregister_device(struct ib_device *device); 1009 1010 int ib_register_client (struct ib_client *client); 1011 void ib_unregister_client(struct ib_client *client); 1012 1013 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 1014 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 1015 void *data); 1016 1017 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 1018 { 1019 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 1020 } 1021 1022 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1023 { 1024 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 1025 } 1026 1027 /** 1028 * ib_modify_qp_is_ok - Check that the supplied attribute mask 1029 * contains all required attributes and no attributes not allowed for 1030 * the given QP state transition. 1031 * @cur_state: Current QP state 1032 * @next_state: Next QP state 1033 * @type: QP type 1034 * @mask: Mask of supplied QP attributes 1035 * 1036 * This function is a helper function that a low-level driver's 1037 * modify_qp method can use to validate the consumer's input. It 1038 * checks that cur_state and next_state are valid QP states, that a 1039 * transition from cur_state to next_state is allowed by the IB spec, 1040 * and that the attribute mask supplied is allowed for the transition. 1041 */ 1042 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1043 enum ib_qp_type type, enum ib_qp_attr_mask mask); 1044 1045 int ib_register_event_handler (struct ib_event_handler *event_handler); 1046 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1047 void ib_dispatch_event(struct ib_event *event); 1048 1049 int ib_query_device(struct ib_device *device, 1050 struct ib_device_attr *device_attr); 1051 1052 int ib_query_port(struct ib_device *device, 1053 u8 port_num, struct ib_port_attr *port_attr); 1054 1055 int ib_query_gid(struct ib_device *device, 1056 u8 port_num, int index, union ib_gid *gid); 1057 1058 int ib_query_pkey(struct ib_device *device, 1059 u8 port_num, u16 index, u16 *pkey); 1060 1061 int ib_modify_device(struct ib_device *device, 1062 int device_modify_mask, 1063 struct ib_device_modify *device_modify); 1064 1065 int ib_modify_port(struct ib_device *device, 1066 u8 port_num, int port_modify_mask, 1067 struct ib_port_modify *port_modify); 1068 1069 /** 1070 * ib_alloc_pd - Allocates an unused protection domain. 1071 * @device: The device on which to allocate the protection domain. 1072 * 1073 * A protection domain object provides an association between QPs, shared 1074 * receive queues, address handles, memory regions, and memory windows. 1075 */ 1076 struct ib_pd *ib_alloc_pd(struct ib_device *device); 1077 1078 /** 1079 * ib_dealloc_pd - Deallocates a protection domain. 1080 * @pd: The protection domain to deallocate. 1081 */ 1082 int ib_dealloc_pd(struct ib_pd *pd); 1083 1084 /** 1085 * ib_create_ah - Creates an address handle for the given address vector. 1086 * @pd: The protection domain associated with the address handle. 1087 * @ah_attr: The attributes of the address vector. 1088 * 1089 * The address handle is used to reference a local or global destination 1090 * in all UD QP post sends. 1091 */ 1092 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 1093 1094 /** 1095 * ib_init_ah_from_wc - Initializes address handle attributes from a 1096 * work completion. 1097 * @device: Device on which the received message arrived. 1098 * @port_num: Port on which the received message arrived. 1099 * @wc: Work completion associated with the received message. 1100 * @grh: References the received global route header. This parameter is 1101 * ignored unless the work completion indicates that the GRH is valid. 1102 * @ah_attr: Returned attributes that can be used when creating an address 1103 * handle for replying to the message. 1104 */ 1105 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, 1106 struct ib_grh *grh, struct ib_ah_attr *ah_attr); 1107 1108 /** 1109 * ib_create_ah_from_wc - Creates an address handle associated with the 1110 * sender of the specified work completion. 1111 * @pd: The protection domain associated with the address handle. 1112 * @wc: Work completion information associated with a received message. 1113 * @grh: References the received global route header. This parameter is 1114 * ignored unless the work completion indicates that the GRH is valid. 1115 * @port_num: The outbound port number to associate with the address. 1116 * 1117 * The address handle is used to reference a local or global destination 1118 * in all UD QP post sends. 1119 */ 1120 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, 1121 struct ib_grh *grh, u8 port_num); 1122 1123 /** 1124 * ib_modify_ah - Modifies the address vector associated with an address 1125 * handle. 1126 * @ah: The address handle to modify. 1127 * @ah_attr: The new address vector attributes to associate with the 1128 * address handle. 1129 */ 1130 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1131 1132 /** 1133 * ib_query_ah - Queries the address vector associated with an address 1134 * handle. 1135 * @ah: The address handle to query. 1136 * @ah_attr: The address vector attributes associated with the address 1137 * handle. 1138 */ 1139 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1140 1141 /** 1142 * ib_destroy_ah - Destroys an address handle. 1143 * @ah: The address handle to destroy. 1144 */ 1145 int ib_destroy_ah(struct ib_ah *ah); 1146 1147 /** 1148 * ib_create_srq - Creates a SRQ associated with the specified protection 1149 * domain. 1150 * @pd: The protection domain associated with the SRQ. 1151 * @srq_init_attr: A list of initial attributes required to create the 1152 * SRQ. If SRQ creation succeeds, then the attributes are updated to 1153 * the actual capabilities of the created SRQ. 1154 * 1155 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 1156 * requested size of the SRQ, and set to the actual values allocated 1157 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 1158 * will always be at least as large as the requested values. 1159 */ 1160 struct ib_srq *ib_create_srq(struct ib_pd *pd, 1161 struct ib_srq_init_attr *srq_init_attr); 1162 1163 /** 1164 * ib_modify_srq - Modifies the attributes for the specified SRQ. 1165 * @srq: The SRQ to modify. 1166 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 1167 * the current values of selected SRQ attributes are returned. 1168 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 1169 * are being modified. 1170 * 1171 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 1172 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 1173 * the number of receives queued drops below the limit. 1174 */ 1175 int ib_modify_srq(struct ib_srq *srq, 1176 struct ib_srq_attr *srq_attr, 1177 enum ib_srq_attr_mask srq_attr_mask); 1178 1179 /** 1180 * ib_query_srq - Returns the attribute list and current values for the 1181 * specified SRQ. 1182 * @srq: The SRQ to query. 1183 * @srq_attr: The attributes of the specified SRQ. 1184 */ 1185 int ib_query_srq(struct ib_srq *srq, 1186 struct ib_srq_attr *srq_attr); 1187 1188 /** 1189 * ib_destroy_srq - Destroys the specified SRQ. 1190 * @srq: The SRQ to destroy. 1191 */ 1192 int ib_destroy_srq(struct ib_srq *srq); 1193 1194 /** 1195 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 1196 * @srq: The SRQ to post the work request on. 1197 * @recv_wr: A list of work requests to post on the receive queue. 1198 * @bad_recv_wr: On an immediate failure, this parameter will reference 1199 * the work request that failed to be posted on the QP. 1200 */ 1201 static inline int ib_post_srq_recv(struct ib_srq *srq, 1202 struct ib_recv_wr *recv_wr, 1203 struct ib_recv_wr **bad_recv_wr) 1204 { 1205 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 1206 } 1207 1208 /** 1209 * ib_create_qp - Creates a QP associated with the specified protection 1210 * domain. 1211 * @pd: The protection domain associated with the QP. 1212 * @qp_init_attr: A list of initial attributes required to create the 1213 * QP. If QP creation succeeds, then the attributes are updated to 1214 * the actual capabilities of the created QP. 1215 */ 1216 struct ib_qp *ib_create_qp(struct ib_pd *pd, 1217 struct ib_qp_init_attr *qp_init_attr); 1218 1219 /** 1220 * ib_modify_qp - Modifies the attributes for the specified QP and then 1221 * transitions the QP to the given state. 1222 * @qp: The QP to modify. 1223 * @qp_attr: On input, specifies the QP attributes to modify. On output, 1224 * the current values of selected QP attributes are returned. 1225 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 1226 * are being modified. 1227 */ 1228 int ib_modify_qp(struct ib_qp *qp, 1229 struct ib_qp_attr *qp_attr, 1230 int qp_attr_mask); 1231 1232 /** 1233 * ib_query_qp - Returns the attribute list and current values for the 1234 * specified QP. 1235 * @qp: The QP to query. 1236 * @qp_attr: The attributes of the specified QP. 1237 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 1238 * @qp_init_attr: Additional attributes of the selected QP. 1239 * 1240 * The qp_attr_mask may be used to limit the query to gathering only the 1241 * selected attributes. 1242 */ 1243 int ib_query_qp(struct ib_qp *qp, 1244 struct ib_qp_attr *qp_attr, 1245 int qp_attr_mask, 1246 struct ib_qp_init_attr *qp_init_attr); 1247 1248 /** 1249 * ib_destroy_qp - Destroys the specified QP. 1250 * @qp: The QP to destroy. 1251 */ 1252 int ib_destroy_qp(struct ib_qp *qp); 1253 1254 /** 1255 * ib_post_send - Posts a list of work requests to the send queue of 1256 * the specified QP. 1257 * @qp: The QP to post the work request on. 1258 * @send_wr: A list of work requests to post on the send queue. 1259 * @bad_send_wr: On an immediate failure, this parameter will reference 1260 * the work request that failed to be posted on the QP. 1261 */ 1262 static inline int ib_post_send(struct ib_qp *qp, 1263 struct ib_send_wr *send_wr, 1264 struct ib_send_wr **bad_send_wr) 1265 { 1266 return qp->device->post_send(qp, send_wr, bad_send_wr); 1267 } 1268 1269 /** 1270 * ib_post_recv - Posts a list of work requests to the receive queue of 1271 * the specified QP. 1272 * @qp: The QP to post the work request on. 1273 * @recv_wr: A list of work requests to post on the receive queue. 1274 * @bad_recv_wr: On an immediate failure, this parameter will reference 1275 * the work request that failed to be posted on the QP. 1276 */ 1277 static inline int ib_post_recv(struct ib_qp *qp, 1278 struct ib_recv_wr *recv_wr, 1279 struct ib_recv_wr **bad_recv_wr) 1280 { 1281 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 1282 } 1283 1284 /** 1285 * ib_create_cq - Creates a CQ on the specified device. 1286 * @device: The device on which to create the CQ. 1287 * @comp_handler: A user-specified callback that is invoked when a 1288 * completion event occurs on the CQ. 1289 * @event_handler: A user-specified callback that is invoked when an 1290 * asynchronous event not associated with a completion occurs on the CQ. 1291 * @cq_context: Context associated with the CQ returned to the user via 1292 * the associated completion and event handlers. 1293 * @cqe: The minimum size of the CQ. 1294 * 1295 * Users can examine the cq structure to determine the actual CQ size. 1296 */ 1297 struct ib_cq *ib_create_cq(struct ib_device *device, 1298 ib_comp_handler comp_handler, 1299 void (*event_handler)(struct ib_event *, void *), 1300 void *cq_context, int cqe); 1301 1302 /** 1303 * ib_resize_cq - Modifies the capacity of the CQ. 1304 * @cq: The CQ to resize. 1305 * @cqe: The minimum size of the CQ. 1306 * 1307 * Users can examine the cq structure to determine the actual CQ size. 1308 */ 1309 int ib_resize_cq(struct ib_cq *cq, int cqe); 1310 1311 /** 1312 * ib_destroy_cq - Destroys the specified CQ. 1313 * @cq: The CQ to destroy. 1314 */ 1315 int ib_destroy_cq(struct ib_cq *cq); 1316 1317 /** 1318 * ib_poll_cq - poll a CQ for completion(s) 1319 * @cq:the CQ being polled 1320 * @num_entries:maximum number of completions to return 1321 * @wc:array of at least @num_entries &struct ib_wc where completions 1322 * will be returned 1323 * 1324 * Poll a CQ for (possibly multiple) completions. If the return value 1325 * is < 0, an error occurred. If the return value is >= 0, it is the 1326 * number of completions returned. If the return value is 1327 * non-negative and < num_entries, then the CQ was emptied. 1328 */ 1329 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 1330 struct ib_wc *wc) 1331 { 1332 return cq->device->poll_cq(cq, num_entries, wc); 1333 } 1334 1335 /** 1336 * ib_peek_cq - Returns the number of unreaped completions currently 1337 * on the specified CQ. 1338 * @cq: The CQ to peek. 1339 * @wc_cnt: A minimum number of unreaped completions to check for. 1340 * 1341 * If the number of unreaped completions is greater than or equal to wc_cnt, 1342 * this function returns wc_cnt, otherwise, it returns the actual number of 1343 * unreaped completions. 1344 */ 1345 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 1346 1347 /** 1348 * ib_req_notify_cq - Request completion notification on a CQ. 1349 * @cq: The CQ to generate an event for. 1350 * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will 1351 * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP, 1352 * notification will occur on the next completion. 1353 */ 1354 static inline int ib_req_notify_cq(struct ib_cq *cq, 1355 enum ib_cq_notify cq_notify) 1356 { 1357 return cq->device->req_notify_cq(cq, cq_notify); 1358 } 1359 1360 /** 1361 * ib_req_ncomp_notif - Request completion notification when there are 1362 * at least the specified number of unreaped completions on the CQ. 1363 * @cq: The CQ to generate an event for. 1364 * @wc_cnt: The number of unreaped completions that should be on the 1365 * CQ before an event is generated. 1366 */ 1367 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 1368 { 1369 return cq->device->req_ncomp_notif ? 1370 cq->device->req_ncomp_notif(cq, wc_cnt) : 1371 -ENOSYS; 1372 } 1373 1374 /** 1375 * ib_get_dma_mr - Returns a memory region for system memory that is 1376 * usable for DMA. 1377 * @pd: The protection domain associated with the memory region. 1378 * @mr_access_flags: Specifies the memory access rights. 1379 */ 1380 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1381 1382 /** 1383 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1384 * by an HCA. 1385 * @pd: The protection domain associated assigned to the registered region. 1386 * @phys_buf_array: Specifies a list of physical buffers to use in the 1387 * memory region. 1388 * @num_phys_buf: Specifies the size of the phys_buf_array. 1389 * @mr_access_flags: Specifies the memory access rights. 1390 * @iova_start: The offset of the region's starting I/O virtual address. 1391 */ 1392 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 1393 struct ib_phys_buf *phys_buf_array, 1394 int num_phys_buf, 1395 int mr_access_flags, 1396 u64 *iova_start); 1397 1398 /** 1399 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. 1400 * Conceptually, this call performs the functions deregister memory region 1401 * followed by register physical memory region. Where possible, 1402 * resources are reused instead of deallocated and reallocated. 1403 * @mr: The memory region to modify. 1404 * @mr_rereg_mask: A bit-mask used to indicate which of the following 1405 * properties of the memory region are being modified. 1406 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies 1407 * the new protection domain to associated with the memory region, 1408 * otherwise, this parameter is ignored. 1409 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 1410 * field specifies a list of physical buffers to use in the new 1411 * translation, otherwise, this parameter is ignored. 1412 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 1413 * field specifies the size of the phys_buf_array, otherwise, this 1414 * parameter is ignored. 1415 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this 1416 * field specifies the new memory access rights, otherwise, this 1417 * parameter is ignored. 1418 * @iova_start: The offset of the region's starting I/O virtual address. 1419 */ 1420 int ib_rereg_phys_mr(struct ib_mr *mr, 1421 int mr_rereg_mask, 1422 struct ib_pd *pd, 1423 struct ib_phys_buf *phys_buf_array, 1424 int num_phys_buf, 1425 int mr_access_flags, 1426 u64 *iova_start); 1427 1428 /** 1429 * ib_query_mr - Retrieves information about a specific memory region. 1430 * @mr: The memory region to retrieve information about. 1431 * @mr_attr: The attributes of the specified memory region. 1432 */ 1433 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); 1434 1435 /** 1436 * ib_dereg_mr - Deregisters a memory region and removes it from the 1437 * HCA translation table. 1438 * @mr: The memory region to deregister. 1439 */ 1440 int ib_dereg_mr(struct ib_mr *mr); 1441 1442 /** 1443 * ib_alloc_mw - Allocates a memory window. 1444 * @pd: The protection domain associated with the memory window. 1445 */ 1446 struct ib_mw *ib_alloc_mw(struct ib_pd *pd); 1447 1448 /** 1449 * ib_bind_mw - Posts a work request to the send queue of the specified 1450 * QP, which binds the memory window to the given address range and 1451 * remote access attributes. 1452 * @qp: QP to post the bind work request on. 1453 * @mw: The memory window to bind. 1454 * @mw_bind: Specifies information about the memory window, including 1455 * its address range, remote access rights, and associated memory region. 1456 */ 1457 static inline int ib_bind_mw(struct ib_qp *qp, 1458 struct ib_mw *mw, 1459 struct ib_mw_bind *mw_bind) 1460 { 1461 /* XXX reference counting in corresponding MR? */ 1462 return mw->device->bind_mw ? 1463 mw->device->bind_mw(qp, mw, mw_bind) : 1464 -ENOSYS; 1465 } 1466 1467 /** 1468 * ib_dealloc_mw - Deallocates a memory window. 1469 * @mw: The memory window to deallocate. 1470 */ 1471 int ib_dealloc_mw(struct ib_mw *mw); 1472 1473 /** 1474 * ib_alloc_fmr - Allocates a unmapped fast memory region. 1475 * @pd: The protection domain associated with the unmapped region. 1476 * @mr_access_flags: Specifies the memory access rights. 1477 * @fmr_attr: Attributes of the unmapped region. 1478 * 1479 * A fast memory region must be mapped before it can be used as part of 1480 * a work request. 1481 */ 1482 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1483 int mr_access_flags, 1484 struct ib_fmr_attr *fmr_attr); 1485 1486 /** 1487 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 1488 * @fmr: The fast memory region to associate with the pages. 1489 * @page_list: An array of physical pages to map to the fast memory region. 1490 * @list_len: The number of pages in page_list. 1491 * @iova: The I/O virtual address to use with the mapped region. 1492 */ 1493 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 1494 u64 *page_list, int list_len, 1495 u64 iova) 1496 { 1497 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 1498 } 1499 1500 /** 1501 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 1502 * @fmr_list: A linked list of fast memory regions to unmap. 1503 */ 1504 int ib_unmap_fmr(struct list_head *fmr_list); 1505 1506 /** 1507 * ib_dealloc_fmr - Deallocates a fast memory region. 1508 * @fmr: The fast memory region to deallocate. 1509 */ 1510 int ib_dealloc_fmr(struct ib_fmr *fmr); 1511 1512 /** 1513 * ib_attach_mcast - Attaches the specified QP to a multicast group. 1514 * @qp: QP to attach to the multicast group. The QP must be type 1515 * IB_QPT_UD. 1516 * @gid: Multicast group GID. 1517 * @lid: Multicast group LID in host byte order. 1518 * 1519 * In order to send and receive multicast packets, subnet 1520 * administration must have created the multicast group and configured 1521 * the fabric appropriately. The port associated with the specified 1522 * QP must also be a member of the multicast group. 1523 */ 1524 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 1525 1526 /** 1527 * ib_detach_mcast - Detaches the specified QP from a multicast group. 1528 * @qp: QP to detach from the multicast group. 1529 * @gid: Multicast group GID. 1530 * @lid: Multicast group LID in host byte order. 1531 */ 1532 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 1533 1534 #endif /* IB_VERBS_H */ 1535