1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 * 38 * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $ 39 */ 40 41 #if !defined(IB_VERBS_H) 42 #define IB_VERBS_H 43 44 #include <linux/types.h> 45 #include <linux/device.h> 46 47 #include <asm/atomic.h> 48 #include <asm/scatterlist.h> 49 #include <asm/uaccess.h> 50 51 union ib_gid { 52 u8 raw[16]; 53 struct { 54 __be64 subnet_prefix; 55 __be64 interface_id; 56 } global; 57 }; 58 59 enum ib_node_type { 60 IB_NODE_CA = 1, 61 IB_NODE_SWITCH, 62 IB_NODE_ROUTER 63 }; 64 65 enum ib_device_cap_flags { 66 IB_DEVICE_RESIZE_MAX_WR = 1, 67 IB_DEVICE_BAD_PKEY_CNTR = (1<<1), 68 IB_DEVICE_BAD_QKEY_CNTR = (1<<2), 69 IB_DEVICE_RAW_MULTI = (1<<3), 70 IB_DEVICE_AUTO_PATH_MIG = (1<<4), 71 IB_DEVICE_CHANGE_PHY_PORT = (1<<5), 72 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), 73 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), 74 IB_DEVICE_SHUTDOWN_PORT = (1<<8), 75 IB_DEVICE_INIT_TYPE = (1<<9), 76 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), 77 IB_DEVICE_SYS_IMAGE_GUID = (1<<11), 78 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), 79 IB_DEVICE_SRQ_RESIZE = (1<<13), 80 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 81 }; 82 83 enum ib_atomic_cap { 84 IB_ATOMIC_NONE, 85 IB_ATOMIC_HCA, 86 IB_ATOMIC_GLOB 87 }; 88 89 struct ib_device_attr { 90 u64 fw_ver; 91 __be64 sys_image_guid; 92 u64 max_mr_size; 93 u64 page_size_cap; 94 u32 vendor_id; 95 u32 vendor_part_id; 96 u32 hw_ver; 97 int max_qp; 98 int max_qp_wr; 99 int device_cap_flags; 100 int max_sge; 101 int max_sge_rd; 102 int max_cq; 103 int max_cqe; 104 int max_mr; 105 int max_pd; 106 int max_qp_rd_atom; 107 int max_ee_rd_atom; 108 int max_res_rd_atom; 109 int max_qp_init_rd_atom; 110 int max_ee_init_rd_atom; 111 enum ib_atomic_cap atomic_cap; 112 int max_ee; 113 int max_rdd; 114 int max_mw; 115 int max_raw_ipv6_qp; 116 int max_raw_ethy_qp; 117 int max_mcast_grp; 118 int max_mcast_qp_attach; 119 int max_total_mcast_qp_attach; 120 int max_ah; 121 int max_fmr; 122 int max_map_per_fmr; 123 int max_srq; 124 int max_srq_wr; 125 int max_srq_sge; 126 u16 max_pkeys; 127 u8 local_ca_ack_delay; 128 }; 129 130 enum ib_mtu { 131 IB_MTU_256 = 1, 132 IB_MTU_512 = 2, 133 IB_MTU_1024 = 3, 134 IB_MTU_2048 = 4, 135 IB_MTU_4096 = 5 136 }; 137 138 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 139 { 140 switch (mtu) { 141 case IB_MTU_256: return 256; 142 case IB_MTU_512: return 512; 143 case IB_MTU_1024: return 1024; 144 case IB_MTU_2048: return 2048; 145 case IB_MTU_4096: return 4096; 146 default: return -1; 147 } 148 } 149 150 enum ib_port_state { 151 IB_PORT_NOP = 0, 152 IB_PORT_DOWN = 1, 153 IB_PORT_INIT = 2, 154 IB_PORT_ARMED = 3, 155 IB_PORT_ACTIVE = 4, 156 IB_PORT_ACTIVE_DEFER = 5 157 }; 158 159 enum ib_port_cap_flags { 160 IB_PORT_SM = 1 << 1, 161 IB_PORT_NOTICE_SUP = 1 << 2, 162 IB_PORT_TRAP_SUP = 1 << 3, 163 IB_PORT_OPT_IPD_SUP = 1 << 4, 164 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 165 IB_PORT_SL_MAP_SUP = 1 << 6, 166 IB_PORT_MKEY_NVRAM = 1 << 7, 167 IB_PORT_PKEY_NVRAM = 1 << 8, 168 IB_PORT_LED_INFO_SUP = 1 << 9, 169 IB_PORT_SM_DISABLED = 1 << 10, 170 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 171 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 172 IB_PORT_CM_SUP = 1 << 16, 173 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 174 IB_PORT_REINIT_SUP = 1 << 18, 175 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 176 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 177 IB_PORT_DR_NOTICE_SUP = 1 << 21, 178 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 179 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 180 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 181 IB_PORT_CLIENT_REG_SUP = 1 << 25 182 }; 183 184 enum ib_port_width { 185 IB_WIDTH_1X = 1, 186 IB_WIDTH_4X = 2, 187 IB_WIDTH_8X = 4, 188 IB_WIDTH_12X = 8 189 }; 190 191 static inline int ib_width_enum_to_int(enum ib_port_width width) 192 { 193 switch (width) { 194 case IB_WIDTH_1X: return 1; 195 case IB_WIDTH_4X: return 4; 196 case IB_WIDTH_8X: return 8; 197 case IB_WIDTH_12X: return 12; 198 default: return -1; 199 } 200 } 201 202 struct ib_port_attr { 203 enum ib_port_state state; 204 enum ib_mtu max_mtu; 205 enum ib_mtu active_mtu; 206 int gid_tbl_len; 207 u32 port_cap_flags; 208 u32 max_msg_sz; 209 u32 bad_pkey_cntr; 210 u32 qkey_viol_cntr; 211 u16 pkey_tbl_len; 212 u16 lid; 213 u16 sm_lid; 214 u8 lmc; 215 u8 max_vl_num; 216 u8 sm_sl; 217 u8 subnet_timeout; 218 u8 init_type_reply; 219 u8 active_width; 220 u8 active_speed; 221 u8 phys_state; 222 }; 223 224 enum ib_device_modify_flags { 225 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 226 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 227 }; 228 229 struct ib_device_modify { 230 u64 sys_image_guid; 231 char node_desc[64]; 232 }; 233 234 enum ib_port_modify_flags { 235 IB_PORT_SHUTDOWN = 1, 236 IB_PORT_INIT_TYPE = (1<<2), 237 IB_PORT_RESET_QKEY_CNTR = (1<<3) 238 }; 239 240 struct ib_port_modify { 241 u32 set_port_cap_mask; 242 u32 clr_port_cap_mask; 243 u8 init_type; 244 }; 245 246 enum ib_event_type { 247 IB_EVENT_CQ_ERR, 248 IB_EVENT_QP_FATAL, 249 IB_EVENT_QP_REQ_ERR, 250 IB_EVENT_QP_ACCESS_ERR, 251 IB_EVENT_COMM_EST, 252 IB_EVENT_SQ_DRAINED, 253 IB_EVENT_PATH_MIG, 254 IB_EVENT_PATH_MIG_ERR, 255 IB_EVENT_DEVICE_FATAL, 256 IB_EVENT_PORT_ACTIVE, 257 IB_EVENT_PORT_ERR, 258 IB_EVENT_LID_CHANGE, 259 IB_EVENT_PKEY_CHANGE, 260 IB_EVENT_SM_CHANGE, 261 IB_EVENT_SRQ_ERR, 262 IB_EVENT_SRQ_LIMIT_REACHED, 263 IB_EVENT_QP_LAST_WQE_REACHED 264 }; 265 266 struct ib_event { 267 struct ib_device *device; 268 union { 269 struct ib_cq *cq; 270 struct ib_qp *qp; 271 struct ib_srq *srq; 272 u8 port_num; 273 } element; 274 enum ib_event_type event; 275 }; 276 277 struct ib_event_handler { 278 struct ib_device *device; 279 void (*handler)(struct ib_event_handler *, struct ib_event *); 280 struct list_head list; 281 }; 282 283 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 284 do { \ 285 (_ptr)->device = _device; \ 286 (_ptr)->handler = _handler; \ 287 INIT_LIST_HEAD(&(_ptr)->list); \ 288 } while (0) 289 290 struct ib_global_route { 291 union ib_gid dgid; 292 u32 flow_label; 293 u8 sgid_index; 294 u8 hop_limit; 295 u8 traffic_class; 296 }; 297 298 struct ib_grh { 299 __be32 version_tclass_flow; 300 __be16 paylen; 301 u8 next_hdr; 302 u8 hop_limit; 303 union ib_gid sgid; 304 union ib_gid dgid; 305 }; 306 307 enum { 308 IB_MULTICAST_QPN = 0xffffff 309 }; 310 311 #define IB_LID_PERMISSIVE __constant_htons(0xFFFF) 312 313 enum ib_ah_flags { 314 IB_AH_GRH = 1 315 }; 316 317 enum ib_rate { 318 IB_RATE_PORT_CURRENT = 0, 319 IB_RATE_2_5_GBPS = 2, 320 IB_RATE_5_GBPS = 5, 321 IB_RATE_10_GBPS = 3, 322 IB_RATE_20_GBPS = 6, 323 IB_RATE_30_GBPS = 4, 324 IB_RATE_40_GBPS = 7, 325 IB_RATE_60_GBPS = 8, 326 IB_RATE_80_GBPS = 9, 327 IB_RATE_120_GBPS = 10 328 }; 329 330 /** 331 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 332 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 333 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 334 * @rate: rate to convert. 335 */ 336 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; 337 338 /** 339 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 340 * enum. 341 * @mult: multiple to convert. 342 */ 343 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__; 344 345 struct ib_ah_attr { 346 struct ib_global_route grh; 347 u16 dlid; 348 u8 sl; 349 u8 src_path_bits; 350 u8 static_rate; 351 u8 ah_flags; 352 u8 port_num; 353 }; 354 355 enum ib_wc_status { 356 IB_WC_SUCCESS, 357 IB_WC_LOC_LEN_ERR, 358 IB_WC_LOC_QP_OP_ERR, 359 IB_WC_LOC_EEC_OP_ERR, 360 IB_WC_LOC_PROT_ERR, 361 IB_WC_WR_FLUSH_ERR, 362 IB_WC_MW_BIND_ERR, 363 IB_WC_BAD_RESP_ERR, 364 IB_WC_LOC_ACCESS_ERR, 365 IB_WC_REM_INV_REQ_ERR, 366 IB_WC_REM_ACCESS_ERR, 367 IB_WC_REM_OP_ERR, 368 IB_WC_RETRY_EXC_ERR, 369 IB_WC_RNR_RETRY_EXC_ERR, 370 IB_WC_LOC_RDD_VIOL_ERR, 371 IB_WC_REM_INV_RD_REQ_ERR, 372 IB_WC_REM_ABORT_ERR, 373 IB_WC_INV_EECN_ERR, 374 IB_WC_INV_EEC_STATE_ERR, 375 IB_WC_FATAL_ERR, 376 IB_WC_RESP_TIMEOUT_ERR, 377 IB_WC_GENERAL_ERR 378 }; 379 380 enum ib_wc_opcode { 381 IB_WC_SEND, 382 IB_WC_RDMA_WRITE, 383 IB_WC_RDMA_READ, 384 IB_WC_COMP_SWAP, 385 IB_WC_FETCH_ADD, 386 IB_WC_BIND_MW, 387 /* 388 * Set value of IB_WC_RECV so consumers can test if a completion is a 389 * receive by testing (opcode & IB_WC_RECV). 390 */ 391 IB_WC_RECV = 1 << 7, 392 IB_WC_RECV_RDMA_WITH_IMM 393 }; 394 395 enum ib_wc_flags { 396 IB_WC_GRH = 1, 397 IB_WC_WITH_IMM = (1<<1) 398 }; 399 400 struct ib_wc { 401 u64 wr_id; 402 enum ib_wc_status status; 403 enum ib_wc_opcode opcode; 404 u32 vendor_err; 405 u32 byte_len; 406 __be32 imm_data; 407 u32 qp_num; 408 u32 src_qp; 409 int wc_flags; 410 u16 pkey_index; 411 u16 slid; 412 u8 sl; 413 u8 dlid_path_bits; 414 u8 port_num; /* valid only for DR SMPs on switches */ 415 }; 416 417 enum ib_cq_notify { 418 IB_CQ_SOLICITED, 419 IB_CQ_NEXT_COMP 420 }; 421 422 enum ib_srq_attr_mask { 423 IB_SRQ_MAX_WR = 1 << 0, 424 IB_SRQ_LIMIT = 1 << 1, 425 }; 426 427 struct ib_srq_attr { 428 u32 max_wr; 429 u32 max_sge; 430 u32 srq_limit; 431 }; 432 433 struct ib_srq_init_attr { 434 void (*event_handler)(struct ib_event *, void *); 435 void *srq_context; 436 struct ib_srq_attr attr; 437 }; 438 439 struct ib_qp_cap { 440 u32 max_send_wr; 441 u32 max_recv_wr; 442 u32 max_send_sge; 443 u32 max_recv_sge; 444 u32 max_inline_data; 445 }; 446 447 enum ib_sig_type { 448 IB_SIGNAL_ALL_WR, 449 IB_SIGNAL_REQ_WR 450 }; 451 452 enum ib_qp_type { 453 /* 454 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 455 * here (and in that order) since the MAD layer uses them as 456 * indices into a 2-entry table. 457 */ 458 IB_QPT_SMI, 459 IB_QPT_GSI, 460 461 IB_QPT_RC, 462 IB_QPT_UC, 463 IB_QPT_UD, 464 IB_QPT_RAW_IPV6, 465 IB_QPT_RAW_ETY 466 }; 467 468 struct ib_qp_init_attr { 469 void (*event_handler)(struct ib_event *, void *); 470 void *qp_context; 471 struct ib_cq *send_cq; 472 struct ib_cq *recv_cq; 473 struct ib_srq *srq; 474 struct ib_qp_cap cap; 475 enum ib_sig_type sq_sig_type; 476 enum ib_qp_type qp_type; 477 u8 port_num; /* special QP types only */ 478 }; 479 480 enum ib_rnr_timeout { 481 IB_RNR_TIMER_655_36 = 0, 482 IB_RNR_TIMER_000_01 = 1, 483 IB_RNR_TIMER_000_02 = 2, 484 IB_RNR_TIMER_000_03 = 3, 485 IB_RNR_TIMER_000_04 = 4, 486 IB_RNR_TIMER_000_06 = 5, 487 IB_RNR_TIMER_000_08 = 6, 488 IB_RNR_TIMER_000_12 = 7, 489 IB_RNR_TIMER_000_16 = 8, 490 IB_RNR_TIMER_000_24 = 9, 491 IB_RNR_TIMER_000_32 = 10, 492 IB_RNR_TIMER_000_48 = 11, 493 IB_RNR_TIMER_000_64 = 12, 494 IB_RNR_TIMER_000_96 = 13, 495 IB_RNR_TIMER_001_28 = 14, 496 IB_RNR_TIMER_001_92 = 15, 497 IB_RNR_TIMER_002_56 = 16, 498 IB_RNR_TIMER_003_84 = 17, 499 IB_RNR_TIMER_005_12 = 18, 500 IB_RNR_TIMER_007_68 = 19, 501 IB_RNR_TIMER_010_24 = 20, 502 IB_RNR_TIMER_015_36 = 21, 503 IB_RNR_TIMER_020_48 = 22, 504 IB_RNR_TIMER_030_72 = 23, 505 IB_RNR_TIMER_040_96 = 24, 506 IB_RNR_TIMER_061_44 = 25, 507 IB_RNR_TIMER_081_92 = 26, 508 IB_RNR_TIMER_122_88 = 27, 509 IB_RNR_TIMER_163_84 = 28, 510 IB_RNR_TIMER_245_76 = 29, 511 IB_RNR_TIMER_327_68 = 30, 512 IB_RNR_TIMER_491_52 = 31 513 }; 514 515 enum ib_qp_attr_mask { 516 IB_QP_STATE = 1, 517 IB_QP_CUR_STATE = (1<<1), 518 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 519 IB_QP_ACCESS_FLAGS = (1<<3), 520 IB_QP_PKEY_INDEX = (1<<4), 521 IB_QP_PORT = (1<<5), 522 IB_QP_QKEY = (1<<6), 523 IB_QP_AV = (1<<7), 524 IB_QP_PATH_MTU = (1<<8), 525 IB_QP_TIMEOUT = (1<<9), 526 IB_QP_RETRY_CNT = (1<<10), 527 IB_QP_RNR_RETRY = (1<<11), 528 IB_QP_RQ_PSN = (1<<12), 529 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 530 IB_QP_ALT_PATH = (1<<14), 531 IB_QP_MIN_RNR_TIMER = (1<<15), 532 IB_QP_SQ_PSN = (1<<16), 533 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 534 IB_QP_PATH_MIG_STATE = (1<<18), 535 IB_QP_CAP = (1<<19), 536 IB_QP_DEST_QPN = (1<<20) 537 }; 538 539 enum ib_qp_state { 540 IB_QPS_RESET, 541 IB_QPS_INIT, 542 IB_QPS_RTR, 543 IB_QPS_RTS, 544 IB_QPS_SQD, 545 IB_QPS_SQE, 546 IB_QPS_ERR 547 }; 548 549 enum ib_mig_state { 550 IB_MIG_MIGRATED, 551 IB_MIG_REARM, 552 IB_MIG_ARMED 553 }; 554 555 struct ib_qp_attr { 556 enum ib_qp_state qp_state; 557 enum ib_qp_state cur_qp_state; 558 enum ib_mtu path_mtu; 559 enum ib_mig_state path_mig_state; 560 u32 qkey; 561 u32 rq_psn; 562 u32 sq_psn; 563 u32 dest_qp_num; 564 int qp_access_flags; 565 struct ib_qp_cap cap; 566 struct ib_ah_attr ah_attr; 567 struct ib_ah_attr alt_ah_attr; 568 u16 pkey_index; 569 u16 alt_pkey_index; 570 u8 en_sqd_async_notify; 571 u8 sq_draining; 572 u8 max_rd_atomic; 573 u8 max_dest_rd_atomic; 574 u8 min_rnr_timer; 575 u8 port_num; 576 u8 timeout; 577 u8 retry_cnt; 578 u8 rnr_retry; 579 u8 alt_port_num; 580 u8 alt_timeout; 581 }; 582 583 enum ib_wr_opcode { 584 IB_WR_RDMA_WRITE, 585 IB_WR_RDMA_WRITE_WITH_IMM, 586 IB_WR_SEND, 587 IB_WR_SEND_WITH_IMM, 588 IB_WR_RDMA_READ, 589 IB_WR_ATOMIC_CMP_AND_SWP, 590 IB_WR_ATOMIC_FETCH_AND_ADD 591 }; 592 593 enum ib_send_flags { 594 IB_SEND_FENCE = 1, 595 IB_SEND_SIGNALED = (1<<1), 596 IB_SEND_SOLICITED = (1<<2), 597 IB_SEND_INLINE = (1<<3) 598 }; 599 600 struct ib_sge { 601 u64 addr; 602 u32 length; 603 u32 lkey; 604 }; 605 606 struct ib_send_wr { 607 struct ib_send_wr *next; 608 u64 wr_id; 609 struct ib_sge *sg_list; 610 int num_sge; 611 enum ib_wr_opcode opcode; 612 int send_flags; 613 __be32 imm_data; 614 union { 615 struct { 616 u64 remote_addr; 617 u32 rkey; 618 } rdma; 619 struct { 620 u64 remote_addr; 621 u64 compare_add; 622 u64 swap; 623 u32 rkey; 624 } atomic; 625 struct { 626 struct ib_ah *ah; 627 u32 remote_qpn; 628 u32 remote_qkey; 629 u16 pkey_index; /* valid for GSI only */ 630 u8 port_num; /* valid for DR SMPs on switch only */ 631 } ud; 632 } wr; 633 }; 634 635 struct ib_recv_wr { 636 struct ib_recv_wr *next; 637 u64 wr_id; 638 struct ib_sge *sg_list; 639 int num_sge; 640 }; 641 642 enum ib_access_flags { 643 IB_ACCESS_LOCAL_WRITE = 1, 644 IB_ACCESS_REMOTE_WRITE = (1<<1), 645 IB_ACCESS_REMOTE_READ = (1<<2), 646 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 647 IB_ACCESS_MW_BIND = (1<<4) 648 }; 649 650 struct ib_phys_buf { 651 u64 addr; 652 u64 size; 653 }; 654 655 struct ib_mr_attr { 656 struct ib_pd *pd; 657 u64 device_virt_addr; 658 u64 size; 659 int mr_access_flags; 660 u32 lkey; 661 u32 rkey; 662 }; 663 664 enum ib_mr_rereg_flags { 665 IB_MR_REREG_TRANS = 1, 666 IB_MR_REREG_PD = (1<<1), 667 IB_MR_REREG_ACCESS = (1<<2) 668 }; 669 670 struct ib_mw_bind { 671 struct ib_mr *mr; 672 u64 wr_id; 673 u64 addr; 674 u32 length; 675 int send_flags; 676 int mw_access_flags; 677 }; 678 679 struct ib_fmr_attr { 680 int max_pages; 681 int max_maps; 682 u8 page_shift; 683 }; 684 685 struct ib_ucontext { 686 struct ib_device *device; 687 struct list_head pd_list; 688 struct list_head mr_list; 689 struct list_head mw_list; 690 struct list_head cq_list; 691 struct list_head qp_list; 692 struct list_head srq_list; 693 struct list_head ah_list; 694 }; 695 696 struct ib_uobject { 697 u64 user_handle; /* handle given to us by userspace */ 698 struct ib_ucontext *context; /* associated user context */ 699 struct list_head list; /* link to context's list */ 700 u32 id; /* index into kernel idr */ 701 }; 702 703 struct ib_umem { 704 unsigned long user_base; 705 unsigned long virt_base; 706 size_t length; 707 int offset; 708 int page_size; 709 int writable; 710 struct list_head chunk_list; 711 }; 712 713 struct ib_umem_chunk { 714 struct list_head list; 715 int nents; 716 int nmap; 717 struct scatterlist page_list[0]; 718 }; 719 720 struct ib_udata { 721 void __user *inbuf; 722 void __user *outbuf; 723 size_t inlen; 724 size_t outlen; 725 }; 726 727 #define IB_UMEM_MAX_PAGE_CHUNK \ 728 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ 729 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ 730 (void *) &((struct ib_umem_chunk *) 0)->page_list[0])) 731 732 struct ib_umem_object { 733 struct ib_uobject uobject; 734 struct ib_umem umem; 735 }; 736 737 struct ib_pd { 738 struct ib_device *device; 739 struct ib_uobject *uobject; 740 atomic_t usecnt; /* count all resources */ 741 }; 742 743 struct ib_ah { 744 struct ib_device *device; 745 struct ib_pd *pd; 746 struct ib_uobject *uobject; 747 }; 748 749 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 750 751 struct ib_cq { 752 struct ib_device *device; 753 struct ib_uobject *uobject; 754 ib_comp_handler comp_handler; 755 void (*event_handler)(struct ib_event *, void *); 756 void * cq_context; 757 int cqe; 758 atomic_t usecnt; /* count number of work queues */ 759 }; 760 761 struct ib_srq { 762 struct ib_device *device; 763 struct ib_pd *pd; 764 struct ib_uobject *uobject; 765 void (*event_handler)(struct ib_event *, void *); 766 void *srq_context; 767 atomic_t usecnt; 768 }; 769 770 struct ib_qp { 771 struct ib_device *device; 772 struct ib_pd *pd; 773 struct ib_cq *send_cq; 774 struct ib_cq *recv_cq; 775 struct ib_srq *srq; 776 struct ib_uobject *uobject; 777 void (*event_handler)(struct ib_event *, void *); 778 void *qp_context; 779 u32 qp_num; 780 enum ib_qp_type qp_type; 781 }; 782 783 struct ib_mr { 784 struct ib_device *device; 785 struct ib_pd *pd; 786 struct ib_uobject *uobject; 787 u32 lkey; 788 u32 rkey; 789 atomic_t usecnt; /* count number of MWs */ 790 }; 791 792 struct ib_mw { 793 struct ib_device *device; 794 struct ib_pd *pd; 795 struct ib_uobject *uobject; 796 u32 rkey; 797 }; 798 799 struct ib_fmr { 800 struct ib_device *device; 801 struct ib_pd *pd; 802 struct list_head list; 803 u32 lkey; 804 u32 rkey; 805 }; 806 807 struct ib_mad; 808 struct ib_grh; 809 810 enum ib_process_mad_flags { 811 IB_MAD_IGNORE_MKEY = 1, 812 IB_MAD_IGNORE_BKEY = 2, 813 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 814 }; 815 816 enum ib_mad_result { 817 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 818 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 819 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 820 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 821 }; 822 823 #define IB_DEVICE_NAME_MAX 64 824 825 struct ib_cache { 826 rwlock_t lock; 827 struct ib_event_handler event_handler; 828 struct ib_pkey_cache **pkey_cache; 829 struct ib_gid_cache **gid_cache; 830 }; 831 832 struct ib_device { 833 struct device *dma_device; 834 835 char name[IB_DEVICE_NAME_MAX]; 836 837 struct list_head event_handler_list; 838 spinlock_t event_handler_lock; 839 840 struct list_head core_list; 841 struct list_head client_data_list; 842 spinlock_t client_data_lock; 843 844 struct ib_cache cache; 845 846 u32 flags; 847 848 int (*query_device)(struct ib_device *device, 849 struct ib_device_attr *device_attr); 850 int (*query_port)(struct ib_device *device, 851 u8 port_num, 852 struct ib_port_attr *port_attr); 853 int (*query_gid)(struct ib_device *device, 854 u8 port_num, int index, 855 union ib_gid *gid); 856 int (*query_pkey)(struct ib_device *device, 857 u8 port_num, u16 index, u16 *pkey); 858 int (*modify_device)(struct ib_device *device, 859 int device_modify_mask, 860 struct ib_device_modify *device_modify); 861 int (*modify_port)(struct ib_device *device, 862 u8 port_num, int port_modify_mask, 863 struct ib_port_modify *port_modify); 864 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 865 struct ib_udata *udata); 866 int (*dealloc_ucontext)(struct ib_ucontext *context); 867 int (*mmap)(struct ib_ucontext *context, 868 struct vm_area_struct *vma); 869 struct ib_pd * (*alloc_pd)(struct ib_device *device, 870 struct ib_ucontext *context, 871 struct ib_udata *udata); 872 int (*dealloc_pd)(struct ib_pd *pd); 873 struct ib_ah * (*create_ah)(struct ib_pd *pd, 874 struct ib_ah_attr *ah_attr); 875 int (*modify_ah)(struct ib_ah *ah, 876 struct ib_ah_attr *ah_attr); 877 int (*query_ah)(struct ib_ah *ah, 878 struct ib_ah_attr *ah_attr); 879 int (*destroy_ah)(struct ib_ah *ah); 880 struct ib_srq * (*create_srq)(struct ib_pd *pd, 881 struct ib_srq_init_attr *srq_init_attr, 882 struct ib_udata *udata); 883 int (*modify_srq)(struct ib_srq *srq, 884 struct ib_srq_attr *srq_attr, 885 enum ib_srq_attr_mask srq_attr_mask); 886 int (*query_srq)(struct ib_srq *srq, 887 struct ib_srq_attr *srq_attr); 888 int (*destroy_srq)(struct ib_srq *srq); 889 int (*post_srq_recv)(struct ib_srq *srq, 890 struct ib_recv_wr *recv_wr, 891 struct ib_recv_wr **bad_recv_wr); 892 struct ib_qp * (*create_qp)(struct ib_pd *pd, 893 struct ib_qp_init_attr *qp_init_attr, 894 struct ib_udata *udata); 895 int (*modify_qp)(struct ib_qp *qp, 896 struct ib_qp_attr *qp_attr, 897 int qp_attr_mask); 898 int (*query_qp)(struct ib_qp *qp, 899 struct ib_qp_attr *qp_attr, 900 int qp_attr_mask, 901 struct ib_qp_init_attr *qp_init_attr); 902 int (*destroy_qp)(struct ib_qp *qp); 903 int (*post_send)(struct ib_qp *qp, 904 struct ib_send_wr *send_wr, 905 struct ib_send_wr **bad_send_wr); 906 int (*post_recv)(struct ib_qp *qp, 907 struct ib_recv_wr *recv_wr, 908 struct ib_recv_wr **bad_recv_wr); 909 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, 910 struct ib_ucontext *context, 911 struct ib_udata *udata); 912 int (*destroy_cq)(struct ib_cq *cq); 913 int (*resize_cq)(struct ib_cq *cq, int cqe, 914 struct ib_udata *udata); 915 int (*poll_cq)(struct ib_cq *cq, int num_entries, 916 struct ib_wc *wc); 917 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 918 int (*req_notify_cq)(struct ib_cq *cq, 919 enum ib_cq_notify cq_notify); 920 int (*req_ncomp_notif)(struct ib_cq *cq, 921 int wc_cnt); 922 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 923 int mr_access_flags); 924 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, 925 struct ib_phys_buf *phys_buf_array, 926 int num_phys_buf, 927 int mr_access_flags, 928 u64 *iova_start); 929 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 930 struct ib_umem *region, 931 int mr_access_flags, 932 struct ib_udata *udata); 933 int (*query_mr)(struct ib_mr *mr, 934 struct ib_mr_attr *mr_attr); 935 int (*dereg_mr)(struct ib_mr *mr); 936 int (*rereg_phys_mr)(struct ib_mr *mr, 937 int mr_rereg_mask, 938 struct ib_pd *pd, 939 struct ib_phys_buf *phys_buf_array, 940 int num_phys_buf, 941 int mr_access_flags, 942 u64 *iova_start); 943 struct ib_mw * (*alloc_mw)(struct ib_pd *pd); 944 int (*bind_mw)(struct ib_qp *qp, 945 struct ib_mw *mw, 946 struct ib_mw_bind *mw_bind); 947 int (*dealloc_mw)(struct ib_mw *mw); 948 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 949 int mr_access_flags, 950 struct ib_fmr_attr *fmr_attr); 951 int (*map_phys_fmr)(struct ib_fmr *fmr, 952 u64 *page_list, int list_len, 953 u64 iova); 954 int (*unmap_fmr)(struct list_head *fmr_list); 955 int (*dealloc_fmr)(struct ib_fmr *fmr); 956 int (*attach_mcast)(struct ib_qp *qp, 957 union ib_gid *gid, 958 u16 lid); 959 int (*detach_mcast)(struct ib_qp *qp, 960 union ib_gid *gid, 961 u16 lid); 962 int (*process_mad)(struct ib_device *device, 963 int process_mad_flags, 964 u8 port_num, 965 struct ib_wc *in_wc, 966 struct ib_grh *in_grh, 967 struct ib_mad *in_mad, 968 struct ib_mad *out_mad); 969 970 struct module *owner; 971 struct class_device class_dev; 972 struct kobject ports_parent; 973 struct list_head port_list; 974 975 enum { 976 IB_DEV_UNINITIALIZED, 977 IB_DEV_REGISTERED, 978 IB_DEV_UNREGISTERED 979 } reg_state; 980 981 u64 uverbs_cmd_mask; 982 int uverbs_abi_ver; 983 984 char node_desc[64]; 985 __be64 node_guid; 986 u8 node_type; 987 u8 phys_port_cnt; 988 }; 989 990 struct ib_client { 991 char *name; 992 void (*add) (struct ib_device *); 993 void (*remove)(struct ib_device *); 994 995 struct list_head list; 996 }; 997 998 struct ib_device *ib_alloc_device(size_t size); 999 void ib_dealloc_device(struct ib_device *device); 1000 1001 int ib_register_device (struct ib_device *device); 1002 void ib_unregister_device(struct ib_device *device); 1003 1004 int ib_register_client (struct ib_client *client); 1005 void ib_unregister_client(struct ib_client *client); 1006 1007 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 1008 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 1009 void *data); 1010 1011 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 1012 { 1013 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 1014 } 1015 1016 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1017 { 1018 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 1019 } 1020 1021 /** 1022 * ib_modify_qp_is_ok - Check that the supplied attribute mask 1023 * contains all required attributes and no attributes not allowed for 1024 * the given QP state transition. 1025 * @cur_state: Current QP state 1026 * @next_state: Next QP state 1027 * @type: QP type 1028 * @mask: Mask of supplied QP attributes 1029 * 1030 * This function is a helper function that a low-level driver's 1031 * modify_qp method can use to validate the consumer's input. It 1032 * checks that cur_state and next_state are valid QP states, that a 1033 * transition from cur_state to next_state is allowed by the IB spec, 1034 * and that the attribute mask supplied is allowed for the transition. 1035 */ 1036 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1037 enum ib_qp_type type, enum ib_qp_attr_mask mask); 1038 1039 int ib_register_event_handler (struct ib_event_handler *event_handler); 1040 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1041 void ib_dispatch_event(struct ib_event *event); 1042 1043 int ib_query_device(struct ib_device *device, 1044 struct ib_device_attr *device_attr); 1045 1046 int ib_query_port(struct ib_device *device, 1047 u8 port_num, struct ib_port_attr *port_attr); 1048 1049 int ib_query_gid(struct ib_device *device, 1050 u8 port_num, int index, union ib_gid *gid); 1051 1052 int ib_query_pkey(struct ib_device *device, 1053 u8 port_num, u16 index, u16 *pkey); 1054 1055 int ib_modify_device(struct ib_device *device, 1056 int device_modify_mask, 1057 struct ib_device_modify *device_modify); 1058 1059 int ib_modify_port(struct ib_device *device, 1060 u8 port_num, int port_modify_mask, 1061 struct ib_port_modify *port_modify); 1062 1063 /** 1064 * ib_alloc_pd - Allocates an unused protection domain. 1065 * @device: The device on which to allocate the protection domain. 1066 * 1067 * A protection domain object provides an association between QPs, shared 1068 * receive queues, address handles, memory regions, and memory windows. 1069 */ 1070 struct ib_pd *ib_alloc_pd(struct ib_device *device); 1071 1072 /** 1073 * ib_dealloc_pd - Deallocates a protection domain. 1074 * @pd: The protection domain to deallocate. 1075 */ 1076 int ib_dealloc_pd(struct ib_pd *pd); 1077 1078 /** 1079 * ib_create_ah - Creates an address handle for the given address vector. 1080 * @pd: The protection domain associated with the address handle. 1081 * @ah_attr: The attributes of the address vector. 1082 * 1083 * The address handle is used to reference a local or global destination 1084 * in all UD QP post sends. 1085 */ 1086 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 1087 1088 /** 1089 * ib_create_ah_from_wc - Creates an address handle associated with the 1090 * sender of the specified work completion. 1091 * @pd: The protection domain associated with the address handle. 1092 * @wc: Work completion information associated with a received message. 1093 * @grh: References the received global route header. This parameter is 1094 * ignored unless the work completion indicates that the GRH is valid. 1095 * @port_num: The outbound port number to associate with the address. 1096 * 1097 * The address handle is used to reference a local or global destination 1098 * in all UD QP post sends. 1099 */ 1100 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, 1101 struct ib_grh *grh, u8 port_num); 1102 1103 /** 1104 * ib_modify_ah - Modifies the address vector associated with an address 1105 * handle. 1106 * @ah: The address handle to modify. 1107 * @ah_attr: The new address vector attributes to associate with the 1108 * address handle. 1109 */ 1110 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1111 1112 /** 1113 * ib_query_ah - Queries the address vector associated with an address 1114 * handle. 1115 * @ah: The address handle to query. 1116 * @ah_attr: The address vector attributes associated with the address 1117 * handle. 1118 */ 1119 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1120 1121 /** 1122 * ib_destroy_ah - Destroys an address handle. 1123 * @ah: The address handle to destroy. 1124 */ 1125 int ib_destroy_ah(struct ib_ah *ah); 1126 1127 /** 1128 * ib_create_srq - Creates a SRQ associated with the specified protection 1129 * domain. 1130 * @pd: The protection domain associated with the SRQ. 1131 * @srq_init_attr: A list of initial attributes required to create the 1132 * SRQ. If SRQ creation succeeds, then the attributes are updated to 1133 * the actual capabilities of the created SRQ. 1134 * 1135 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 1136 * requested size of the SRQ, and set to the actual values allocated 1137 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 1138 * will always be at least as large as the requested values. 1139 */ 1140 struct ib_srq *ib_create_srq(struct ib_pd *pd, 1141 struct ib_srq_init_attr *srq_init_attr); 1142 1143 /** 1144 * ib_modify_srq - Modifies the attributes for the specified SRQ. 1145 * @srq: The SRQ to modify. 1146 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 1147 * the current values of selected SRQ attributes are returned. 1148 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 1149 * are being modified. 1150 * 1151 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 1152 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 1153 * the number of receives queued drops below the limit. 1154 */ 1155 int ib_modify_srq(struct ib_srq *srq, 1156 struct ib_srq_attr *srq_attr, 1157 enum ib_srq_attr_mask srq_attr_mask); 1158 1159 /** 1160 * ib_query_srq - Returns the attribute list and current values for the 1161 * specified SRQ. 1162 * @srq: The SRQ to query. 1163 * @srq_attr: The attributes of the specified SRQ. 1164 */ 1165 int ib_query_srq(struct ib_srq *srq, 1166 struct ib_srq_attr *srq_attr); 1167 1168 /** 1169 * ib_destroy_srq - Destroys the specified SRQ. 1170 * @srq: The SRQ to destroy. 1171 */ 1172 int ib_destroy_srq(struct ib_srq *srq); 1173 1174 /** 1175 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 1176 * @srq: The SRQ to post the work request on. 1177 * @recv_wr: A list of work requests to post on the receive queue. 1178 * @bad_recv_wr: On an immediate failure, this parameter will reference 1179 * the work request that failed to be posted on the QP. 1180 */ 1181 static inline int ib_post_srq_recv(struct ib_srq *srq, 1182 struct ib_recv_wr *recv_wr, 1183 struct ib_recv_wr **bad_recv_wr) 1184 { 1185 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 1186 } 1187 1188 /** 1189 * ib_create_qp - Creates a QP associated with the specified protection 1190 * domain. 1191 * @pd: The protection domain associated with the QP. 1192 * @qp_init_attr: A list of initial attributes required to create the 1193 * QP. If QP creation succeeds, then the attributes are updated to 1194 * the actual capabilities of the created QP. 1195 */ 1196 struct ib_qp *ib_create_qp(struct ib_pd *pd, 1197 struct ib_qp_init_attr *qp_init_attr); 1198 1199 /** 1200 * ib_modify_qp - Modifies the attributes for the specified QP and then 1201 * transitions the QP to the given state. 1202 * @qp: The QP to modify. 1203 * @qp_attr: On input, specifies the QP attributes to modify. On output, 1204 * the current values of selected QP attributes are returned. 1205 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 1206 * are being modified. 1207 */ 1208 int ib_modify_qp(struct ib_qp *qp, 1209 struct ib_qp_attr *qp_attr, 1210 int qp_attr_mask); 1211 1212 /** 1213 * ib_query_qp - Returns the attribute list and current values for the 1214 * specified QP. 1215 * @qp: The QP to query. 1216 * @qp_attr: The attributes of the specified QP. 1217 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 1218 * @qp_init_attr: Additional attributes of the selected QP. 1219 * 1220 * The qp_attr_mask may be used to limit the query to gathering only the 1221 * selected attributes. 1222 */ 1223 int ib_query_qp(struct ib_qp *qp, 1224 struct ib_qp_attr *qp_attr, 1225 int qp_attr_mask, 1226 struct ib_qp_init_attr *qp_init_attr); 1227 1228 /** 1229 * ib_destroy_qp - Destroys the specified QP. 1230 * @qp: The QP to destroy. 1231 */ 1232 int ib_destroy_qp(struct ib_qp *qp); 1233 1234 /** 1235 * ib_post_send - Posts a list of work requests to the send queue of 1236 * the specified QP. 1237 * @qp: The QP to post the work request on. 1238 * @send_wr: A list of work requests to post on the send queue. 1239 * @bad_send_wr: On an immediate failure, this parameter will reference 1240 * the work request that failed to be posted on the QP. 1241 */ 1242 static inline int ib_post_send(struct ib_qp *qp, 1243 struct ib_send_wr *send_wr, 1244 struct ib_send_wr **bad_send_wr) 1245 { 1246 return qp->device->post_send(qp, send_wr, bad_send_wr); 1247 } 1248 1249 /** 1250 * ib_post_recv - Posts a list of work requests to the receive queue of 1251 * the specified QP. 1252 * @qp: The QP to post the work request on. 1253 * @recv_wr: A list of work requests to post on the receive queue. 1254 * @bad_recv_wr: On an immediate failure, this parameter will reference 1255 * the work request that failed to be posted on the QP. 1256 */ 1257 static inline int ib_post_recv(struct ib_qp *qp, 1258 struct ib_recv_wr *recv_wr, 1259 struct ib_recv_wr **bad_recv_wr) 1260 { 1261 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 1262 } 1263 1264 /** 1265 * ib_create_cq - Creates a CQ on the specified device. 1266 * @device: The device on which to create the CQ. 1267 * @comp_handler: A user-specified callback that is invoked when a 1268 * completion event occurs on the CQ. 1269 * @event_handler: A user-specified callback that is invoked when an 1270 * asynchronous event not associated with a completion occurs on the CQ. 1271 * @cq_context: Context associated with the CQ returned to the user via 1272 * the associated completion and event handlers. 1273 * @cqe: The minimum size of the CQ. 1274 * 1275 * Users can examine the cq structure to determine the actual CQ size. 1276 */ 1277 struct ib_cq *ib_create_cq(struct ib_device *device, 1278 ib_comp_handler comp_handler, 1279 void (*event_handler)(struct ib_event *, void *), 1280 void *cq_context, int cqe); 1281 1282 /** 1283 * ib_resize_cq - Modifies the capacity of the CQ. 1284 * @cq: The CQ to resize. 1285 * @cqe: The minimum size of the CQ. 1286 * 1287 * Users can examine the cq structure to determine the actual CQ size. 1288 */ 1289 int ib_resize_cq(struct ib_cq *cq, int cqe); 1290 1291 /** 1292 * ib_destroy_cq - Destroys the specified CQ. 1293 * @cq: The CQ to destroy. 1294 */ 1295 int ib_destroy_cq(struct ib_cq *cq); 1296 1297 /** 1298 * ib_poll_cq - poll a CQ for completion(s) 1299 * @cq:the CQ being polled 1300 * @num_entries:maximum number of completions to return 1301 * @wc:array of at least @num_entries &struct ib_wc where completions 1302 * will be returned 1303 * 1304 * Poll a CQ for (possibly multiple) completions. If the return value 1305 * is < 0, an error occurred. If the return value is >= 0, it is the 1306 * number of completions returned. If the return value is 1307 * non-negative and < num_entries, then the CQ was emptied. 1308 */ 1309 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 1310 struct ib_wc *wc) 1311 { 1312 return cq->device->poll_cq(cq, num_entries, wc); 1313 } 1314 1315 /** 1316 * ib_peek_cq - Returns the number of unreaped completions currently 1317 * on the specified CQ. 1318 * @cq: The CQ to peek. 1319 * @wc_cnt: A minimum number of unreaped completions to check for. 1320 * 1321 * If the number of unreaped completions is greater than or equal to wc_cnt, 1322 * this function returns wc_cnt, otherwise, it returns the actual number of 1323 * unreaped completions. 1324 */ 1325 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 1326 1327 /** 1328 * ib_req_notify_cq - Request completion notification on a CQ. 1329 * @cq: The CQ to generate an event for. 1330 * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will 1331 * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP, 1332 * notification will occur on the next completion. 1333 */ 1334 static inline int ib_req_notify_cq(struct ib_cq *cq, 1335 enum ib_cq_notify cq_notify) 1336 { 1337 return cq->device->req_notify_cq(cq, cq_notify); 1338 } 1339 1340 /** 1341 * ib_req_ncomp_notif - Request completion notification when there are 1342 * at least the specified number of unreaped completions on the CQ. 1343 * @cq: The CQ to generate an event for. 1344 * @wc_cnt: The number of unreaped completions that should be on the 1345 * CQ before an event is generated. 1346 */ 1347 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 1348 { 1349 return cq->device->req_ncomp_notif ? 1350 cq->device->req_ncomp_notif(cq, wc_cnt) : 1351 -ENOSYS; 1352 } 1353 1354 /** 1355 * ib_get_dma_mr - Returns a memory region for system memory that is 1356 * usable for DMA. 1357 * @pd: The protection domain associated with the memory region. 1358 * @mr_access_flags: Specifies the memory access rights. 1359 */ 1360 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1361 1362 /** 1363 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1364 * by an HCA. 1365 * @pd: The protection domain associated assigned to the registered region. 1366 * @phys_buf_array: Specifies a list of physical buffers to use in the 1367 * memory region. 1368 * @num_phys_buf: Specifies the size of the phys_buf_array. 1369 * @mr_access_flags: Specifies the memory access rights. 1370 * @iova_start: The offset of the region's starting I/O virtual address. 1371 */ 1372 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 1373 struct ib_phys_buf *phys_buf_array, 1374 int num_phys_buf, 1375 int mr_access_flags, 1376 u64 *iova_start); 1377 1378 /** 1379 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. 1380 * Conceptually, this call performs the functions deregister memory region 1381 * followed by register physical memory region. Where possible, 1382 * resources are reused instead of deallocated and reallocated. 1383 * @mr: The memory region to modify. 1384 * @mr_rereg_mask: A bit-mask used to indicate which of the following 1385 * properties of the memory region are being modified. 1386 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies 1387 * the new protection domain to associated with the memory region, 1388 * otherwise, this parameter is ignored. 1389 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 1390 * field specifies a list of physical buffers to use in the new 1391 * translation, otherwise, this parameter is ignored. 1392 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 1393 * field specifies the size of the phys_buf_array, otherwise, this 1394 * parameter is ignored. 1395 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this 1396 * field specifies the new memory access rights, otherwise, this 1397 * parameter is ignored. 1398 * @iova_start: The offset of the region's starting I/O virtual address. 1399 */ 1400 int ib_rereg_phys_mr(struct ib_mr *mr, 1401 int mr_rereg_mask, 1402 struct ib_pd *pd, 1403 struct ib_phys_buf *phys_buf_array, 1404 int num_phys_buf, 1405 int mr_access_flags, 1406 u64 *iova_start); 1407 1408 /** 1409 * ib_query_mr - Retrieves information about a specific memory region. 1410 * @mr: The memory region to retrieve information about. 1411 * @mr_attr: The attributes of the specified memory region. 1412 */ 1413 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); 1414 1415 /** 1416 * ib_dereg_mr - Deregisters a memory region and removes it from the 1417 * HCA translation table. 1418 * @mr: The memory region to deregister. 1419 */ 1420 int ib_dereg_mr(struct ib_mr *mr); 1421 1422 /** 1423 * ib_alloc_mw - Allocates a memory window. 1424 * @pd: The protection domain associated with the memory window. 1425 */ 1426 struct ib_mw *ib_alloc_mw(struct ib_pd *pd); 1427 1428 /** 1429 * ib_bind_mw - Posts a work request to the send queue of the specified 1430 * QP, which binds the memory window to the given address range and 1431 * remote access attributes. 1432 * @qp: QP to post the bind work request on. 1433 * @mw: The memory window to bind. 1434 * @mw_bind: Specifies information about the memory window, including 1435 * its address range, remote access rights, and associated memory region. 1436 */ 1437 static inline int ib_bind_mw(struct ib_qp *qp, 1438 struct ib_mw *mw, 1439 struct ib_mw_bind *mw_bind) 1440 { 1441 /* XXX reference counting in corresponding MR? */ 1442 return mw->device->bind_mw ? 1443 mw->device->bind_mw(qp, mw, mw_bind) : 1444 -ENOSYS; 1445 } 1446 1447 /** 1448 * ib_dealloc_mw - Deallocates a memory window. 1449 * @mw: The memory window to deallocate. 1450 */ 1451 int ib_dealloc_mw(struct ib_mw *mw); 1452 1453 /** 1454 * ib_alloc_fmr - Allocates a unmapped fast memory region. 1455 * @pd: The protection domain associated with the unmapped region. 1456 * @mr_access_flags: Specifies the memory access rights. 1457 * @fmr_attr: Attributes of the unmapped region. 1458 * 1459 * A fast memory region must be mapped before it can be used as part of 1460 * a work request. 1461 */ 1462 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 1463 int mr_access_flags, 1464 struct ib_fmr_attr *fmr_attr); 1465 1466 /** 1467 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 1468 * @fmr: The fast memory region to associate with the pages. 1469 * @page_list: An array of physical pages to map to the fast memory region. 1470 * @list_len: The number of pages in page_list. 1471 * @iova: The I/O virtual address to use with the mapped region. 1472 */ 1473 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 1474 u64 *page_list, int list_len, 1475 u64 iova) 1476 { 1477 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 1478 } 1479 1480 /** 1481 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 1482 * @fmr_list: A linked list of fast memory regions to unmap. 1483 */ 1484 int ib_unmap_fmr(struct list_head *fmr_list); 1485 1486 /** 1487 * ib_dealloc_fmr - Deallocates a fast memory region. 1488 * @fmr: The fast memory region to deallocate. 1489 */ 1490 int ib_dealloc_fmr(struct ib_fmr *fmr); 1491 1492 /** 1493 * ib_attach_mcast - Attaches the specified QP to a multicast group. 1494 * @qp: QP to attach to the multicast group. The QP must be type 1495 * IB_QPT_UD. 1496 * @gid: Multicast group GID. 1497 * @lid: Multicast group LID in host byte order. 1498 * 1499 * In order to send and receive multicast packets, subnet 1500 * administration must have created the multicast group and configured 1501 * the fabric appropriately. The port associated with the specified 1502 * QP must also be a member of the multicast group. 1503 */ 1504 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 1505 1506 /** 1507 * ib_detach_mcast - Detaches the specified QP from a multicast group. 1508 * @qp: QP to detach from the multicast group. 1509 * @gid: Multicast group GID. 1510 * @lid: Multicast group LID in host byte order. 1511 */ 1512 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 1513 1514 #endif /* IB_VERBS_H */ 1515