1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/mm.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/kref.h> 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 #include <linux/workqueue.h> 51 52 #include <linux/atomic.h> 53 #include <asm/uaccess.h> 54 55 extern struct workqueue_struct *ib_wq; 56 57 union ib_gid { 58 u8 raw[16]; 59 struct { 60 __be64 subnet_prefix; 61 __be64 interface_id; 62 } global; 63 }; 64 65 enum rdma_node_type { 66 /* IB values map to NodeInfo:NodeType. */ 67 RDMA_NODE_IB_CA = 1, 68 RDMA_NODE_IB_SWITCH, 69 RDMA_NODE_IB_ROUTER, 70 RDMA_NODE_RNIC 71 }; 72 73 enum rdma_transport_type { 74 RDMA_TRANSPORT_IB, 75 RDMA_TRANSPORT_IWARP 76 }; 77 78 enum rdma_transport_type 79 rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__; 80 81 enum rdma_link_layer { 82 IB_LINK_LAYER_UNSPECIFIED, 83 IB_LINK_LAYER_INFINIBAND, 84 IB_LINK_LAYER_ETHERNET, 85 }; 86 87 enum ib_device_cap_flags { 88 IB_DEVICE_RESIZE_MAX_WR = 1, 89 IB_DEVICE_BAD_PKEY_CNTR = (1<<1), 90 IB_DEVICE_BAD_QKEY_CNTR = (1<<2), 91 IB_DEVICE_RAW_MULTI = (1<<3), 92 IB_DEVICE_AUTO_PATH_MIG = (1<<4), 93 IB_DEVICE_CHANGE_PHY_PORT = (1<<5), 94 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), 95 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), 96 IB_DEVICE_SHUTDOWN_PORT = (1<<8), 97 IB_DEVICE_INIT_TYPE = (1<<9), 98 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), 99 IB_DEVICE_SYS_IMAGE_GUID = (1<<11), 100 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), 101 IB_DEVICE_SRQ_RESIZE = (1<<13), 102 IB_DEVICE_N_NOTIFY_CQ = (1<<14), 103 IB_DEVICE_LOCAL_DMA_LKEY = (1<<15), 104 IB_DEVICE_RESERVED = (1<<16), /* old SEND_W_INV */ 105 IB_DEVICE_MEM_WINDOW = (1<<17), 106 /* 107 * Devices should set IB_DEVICE_UD_IP_SUM if they support 108 * insertion of UDP and TCP checksum on outgoing UD IPoIB 109 * messages and can verify the validity of checksum for 110 * incoming messages. Setting this flag implies that the 111 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 112 */ 113 IB_DEVICE_UD_IP_CSUM = (1<<18), 114 IB_DEVICE_UD_TSO = (1<<19), 115 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), 116 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), 117 }; 118 119 enum ib_atomic_cap { 120 IB_ATOMIC_NONE, 121 IB_ATOMIC_HCA, 122 IB_ATOMIC_GLOB 123 }; 124 125 struct ib_device_attr { 126 u64 fw_ver; 127 __be64 sys_image_guid; 128 u64 max_mr_size; 129 u64 page_size_cap; 130 u32 vendor_id; 131 u32 vendor_part_id; 132 u32 hw_ver; 133 int max_qp; 134 int max_qp_wr; 135 int device_cap_flags; 136 int max_sge; 137 int max_sge_rd; 138 int max_cq; 139 int max_cqe; 140 int max_mr; 141 int max_pd; 142 int max_qp_rd_atom; 143 int max_ee_rd_atom; 144 int max_res_rd_atom; 145 int max_qp_init_rd_atom; 146 int max_ee_init_rd_atom; 147 enum ib_atomic_cap atomic_cap; 148 enum ib_atomic_cap masked_atomic_cap; 149 int max_ee; 150 int max_rdd; 151 int max_mw; 152 int max_raw_ipv6_qp; 153 int max_raw_ethy_qp; 154 int max_mcast_grp; 155 int max_mcast_qp_attach; 156 int max_total_mcast_qp_attach; 157 int max_ah; 158 int max_fmr; 159 int max_map_per_fmr; 160 int max_srq; 161 int max_srq_wr; 162 int max_srq_sge; 163 unsigned int max_fast_reg_page_list_len; 164 u16 max_pkeys; 165 u8 local_ca_ack_delay; 166 }; 167 168 enum ib_mtu { 169 IB_MTU_256 = 1, 170 IB_MTU_512 = 2, 171 IB_MTU_1024 = 3, 172 IB_MTU_2048 = 4, 173 IB_MTU_4096 = 5 174 }; 175 176 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 177 { 178 switch (mtu) { 179 case IB_MTU_256: return 256; 180 case IB_MTU_512: return 512; 181 case IB_MTU_1024: return 1024; 182 case IB_MTU_2048: return 2048; 183 case IB_MTU_4096: return 4096; 184 default: return -1; 185 } 186 } 187 188 enum ib_port_state { 189 IB_PORT_NOP = 0, 190 IB_PORT_DOWN = 1, 191 IB_PORT_INIT = 2, 192 IB_PORT_ARMED = 3, 193 IB_PORT_ACTIVE = 4, 194 IB_PORT_ACTIVE_DEFER = 5 195 }; 196 197 enum ib_port_cap_flags { 198 IB_PORT_SM = 1 << 1, 199 IB_PORT_NOTICE_SUP = 1 << 2, 200 IB_PORT_TRAP_SUP = 1 << 3, 201 IB_PORT_OPT_IPD_SUP = 1 << 4, 202 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 203 IB_PORT_SL_MAP_SUP = 1 << 6, 204 IB_PORT_MKEY_NVRAM = 1 << 7, 205 IB_PORT_PKEY_NVRAM = 1 << 8, 206 IB_PORT_LED_INFO_SUP = 1 << 9, 207 IB_PORT_SM_DISABLED = 1 << 10, 208 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 209 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 210 IB_PORT_CM_SUP = 1 << 16, 211 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 212 IB_PORT_REINIT_SUP = 1 << 18, 213 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 214 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 215 IB_PORT_DR_NOTICE_SUP = 1 << 21, 216 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 217 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 218 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 219 IB_PORT_CLIENT_REG_SUP = 1 << 25 220 }; 221 222 enum ib_port_width { 223 IB_WIDTH_1X = 1, 224 IB_WIDTH_4X = 2, 225 IB_WIDTH_8X = 4, 226 IB_WIDTH_12X = 8 227 }; 228 229 static inline int ib_width_enum_to_int(enum ib_port_width width) 230 { 231 switch (width) { 232 case IB_WIDTH_1X: return 1; 233 case IB_WIDTH_4X: return 4; 234 case IB_WIDTH_8X: return 8; 235 case IB_WIDTH_12X: return 12; 236 default: return -1; 237 } 238 } 239 240 struct ib_protocol_stats { 241 /* TBD... */ 242 }; 243 244 struct iw_protocol_stats { 245 u64 ipInReceives; 246 u64 ipInHdrErrors; 247 u64 ipInTooBigErrors; 248 u64 ipInNoRoutes; 249 u64 ipInAddrErrors; 250 u64 ipInUnknownProtos; 251 u64 ipInTruncatedPkts; 252 u64 ipInDiscards; 253 u64 ipInDelivers; 254 u64 ipOutForwDatagrams; 255 u64 ipOutRequests; 256 u64 ipOutDiscards; 257 u64 ipOutNoRoutes; 258 u64 ipReasmTimeout; 259 u64 ipReasmReqds; 260 u64 ipReasmOKs; 261 u64 ipReasmFails; 262 u64 ipFragOKs; 263 u64 ipFragFails; 264 u64 ipFragCreates; 265 u64 ipInMcastPkts; 266 u64 ipOutMcastPkts; 267 u64 ipInBcastPkts; 268 u64 ipOutBcastPkts; 269 270 u64 tcpRtoAlgorithm; 271 u64 tcpRtoMin; 272 u64 tcpRtoMax; 273 u64 tcpMaxConn; 274 u64 tcpActiveOpens; 275 u64 tcpPassiveOpens; 276 u64 tcpAttemptFails; 277 u64 tcpEstabResets; 278 u64 tcpCurrEstab; 279 u64 tcpInSegs; 280 u64 tcpOutSegs; 281 u64 tcpRetransSegs; 282 u64 tcpInErrs; 283 u64 tcpOutRsts; 284 }; 285 286 union rdma_protocol_stats { 287 struct ib_protocol_stats ib; 288 struct iw_protocol_stats iw; 289 }; 290 291 struct ib_port_attr { 292 enum ib_port_state state; 293 enum ib_mtu max_mtu; 294 enum ib_mtu active_mtu; 295 int gid_tbl_len; 296 u32 port_cap_flags; 297 u32 max_msg_sz; 298 u32 bad_pkey_cntr; 299 u32 qkey_viol_cntr; 300 u16 pkey_tbl_len; 301 u16 lid; 302 u16 sm_lid; 303 u8 lmc; 304 u8 max_vl_num; 305 u8 sm_sl; 306 u8 subnet_timeout; 307 u8 init_type_reply; 308 u8 active_width; 309 u8 active_speed; 310 u8 phys_state; 311 }; 312 313 enum ib_device_modify_flags { 314 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 315 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 316 }; 317 318 struct ib_device_modify { 319 u64 sys_image_guid; 320 char node_desc[64]; 321 }; 322 323 enum ib_port_modify_flags { 324 IB_PORT_SHUTDOWN = 1, 325 IB_PORT_INIT_TYPE = (1<<2), 326 IB_PORT_RESET_QKEY_CNTR = (1<<3) 327 }; 328 329 struct ib_port_modify { 330 u32 set_port_cap_mask; 331 u32 clr_port_cap_mask; 332 u8 init_type; 333 }; 334 335 enum ib_event_type { 336 IB_EVENT_CQ_ERR, 337 IB_EVENT_QP_FATAL, 338 IB_EVENT_QP_REQ_ERR, 339 IB_EVENT_QP_ACCESS_ERR, 340 IB_EVENT_COMM_EST, 341 IB_EVENT_SQ_DRAINED, 342 IB_EVENT_PATH_MIG, 343 IB_EVENT_PATH_MIG_ERR, 344 IB_EVENT_DEVICE_FATAL, 345 IB_EVENT_PORT_ACTIVE, 346 IB_EVENT_PORT_ERR, 347 IB_EVENT_LID_CHANGE, 348 IB_EVENT_PKEY_CHANGE, 349 IB_EVENT_SM_CHANGE, 350 IB_EVENT_SRQ_ERR, 351 IB_EVENT_SRQ_LIMIT_REACHED, 352 IB_EVENT_QP_LAST_WQE_REACHED, 353 IB_EVENT_CLIENT_REREGISTER, 354 IB_EVENT_GID_CHANGE, 355 }; 356 357 struct ib_event { 358 struct ib_device *device; 359 union { 360 struct ib_cq *cq; 361 struct ib_qp *qp; 362 struct ib_srq *srq; 363 u8 port_num; 364 } element; 365 enum ib_event_type event; 366 }; 367 368 struct ib_event_handler { 369 struct ib_device *device; 370 void (*handler)(struct ib_event_handler *, struct ib_event *); 371 struct list_head list; 372 }; 373 374 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 375 do { \ 376 (_ptr)->device = _device; \ 377 (_ptr)->handler = _handler; \ 378 INIT_LIST_HEAD(&(_ptr)->list); \ 379 } while (0) 380 381 struct ib_global_route { 382 union ib_gid dgid; 383 u32 flow_label; 384 u8 sgid_index; 385 u8 hop_limit; 386 u8 traffic_class; 387 }; 388 389 struct ib_grh { 390 __be32 version_tclass_flow; 391 __be16 paylen; 392 u8 next_hdr; 393 u8 hop_limit; 394 union ib_gid sgid; 395 union ib_gid dgid; 396 }; 397 398 enum { 399 IB_MULTICAST_QPN = 0xffffff 400 }; 401 402 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 403 404 enum ib_ah_flags { 405 IB_AH_GRH = 1 406 }; 407 408 enum ib_rate { 409 IB_RATE_PORT_CURRENT = 0, 410 IB_RATE_2_5_GBPS = 2, 411 IB_RATE_5_GBPS = 5, 412 IB_RATE_10_GBPS = 3, 413 IB_RATE_20_GBPS = 6, 414 IB_RATE_30_GBPS = 4, 415 IB_RATE_40_GBPS = 7, 416 IB_RATE_60_GBPS = 8, 417 IB_RATE_80_GBPS = 9, 418 IB_RATE_120_GBPS = 10 419 }; 420 421 /** 422 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 423 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 424 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 425 * @rate: rate to convert. 426 */ 427 int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; 428 429 /** 430 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 431 * enum. 432 * @mult: multiple to convert. 433 */ 434 enum ib_rate mult_to_ib_rate(int mult) __attribute_const__; 435 436 struct ib_ah_attr { 437 struct ib_global_route grh; 438 u16 dlid; 439 u8 sl; 440 u8 src_path_bits; 441 u8 static_rate; 442 u8 ah_flags; 443 u8 port_num; 444 }; 445 446 enum ib_wc_status { 447 IB_WC_SUCCESS, 448 IB_WC_LOC_LEN_ERR, 449 IB_WC_LOC_QP_OP_ERR, 450 IB_WC_LOC_EEC_OP_ERR, 451 IB_WC_LOC_PROT_ERR, 452 IB_WC_WR_FLUSH_ERR, 453 IB_WC_MW_BIND_ERR, 454 IB_WC_BAD_RESP_ERR, 455 IB_WC_LOC_ACCESS_ERR, 456 IB_WC_REM_INV_REQ_ERR, 457 IB_WC_REM_ACCESS_ERR, 458 IB_WC_REM_OP_ERR, 459 IB_WC_RETRY_EXC_ERR, 460 IB_WC_RNR_RETRY_EXC_ERR, 461 IB_WC_LOC_RDD_VIOL_ERR, 462 IB_WC_REM_INV_RD_REQ_ERR, 463 IB_WC_REM_ABORT_ERR, 464 IB_WC_INV_EECN_ERR, 465 IB_WC_INV_EEC_STATE_ERR, 466 IB_WC_FATAL_ERR, 467 IB_WC_RESP_TIMEOUT_ERR, 468 IB_WC_GENERAL_ERR 469 }; 470 471 enum ib_wc_opcode { 472 IB_WC_SEND, 473 IB_WC_RDMA_WRITE, 474 IB_WC_RDMA_READ, 475 IB_WC_COMP_SWAP, 476 IB_WC_FETCH_ADD, 477 IB_WC_BIND_MW, 478 IB_WC_LSO, 479 IB_WC_LOCAL_INV, 480 IB_WC_FAST_REG_MR, 481 IB_WC_MASKED_COMP_SWAP, 482 IB_WC_MASKED_FETCH_ADD, 483 /* 484 * Set value of IB_WC_RECV so consumers can test if a completion is a 485 * receive by testing (opcode & IB_WC_RECV). 486 */ 487 IB_WC_RECV = 1 << 7, 488 IB_WC_RECV_RDMA_WITH_IMM 489 }; 490 491 enum ib_wc_flags { 492 IB_WC_GRH = 1, 493 IB_WC_WITH_IMM = (1<<1), 494 IB_WC_WITH_INVALIDATE = (1<<2), 495 }; 496 497 struct ib_wc { 498 u64 wr_id; 499 enum ib_wc_status status; 500 enum ib_wc_opcode opcode; 501 u32 vendor_err; 502 u32 byte_len; 503 struct ib_qp *qp; 504 union { 505 __be32 imm_data; 506 u32 invalidate_rkey; 507 } ex; 508 u32 src_qp; 509 int wc_flags; 510 u16 pkey_index; 511 u16 slid; 512 u8 sl; 513 u8 dlid_path_bits; 514 u8 port_num; /* valid only for DR SMPs on switches */ 515 int csum_ok; 516 }; 517 518 enum ib_cq_notify_flags { 519 IB_CQ_SOLICITED = 1 << 0, 520 IB_CQ_NEXT_COMP = 1 << 1, 521 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 522 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 523 }; 524 525 enum ib_srq_attr_mask { 526 IB_SRQ_MAX_WR = 1 << 0, 527 IB_SRQ_LIMIT = 1 << 1, 528 }; 529 530 struct ib_srq_attr { 531 u32 max_wr; 532 u32 max_sge; 533 u32 srq_limit; 534 }; 535 536 struct ib_srq_init_attr { 537 void (*event_handler)(struct ib_event *, void *); 538 void *srq_context; 539 struct ib_srq_attr attr; 540 }; 541 542 struct ib_qp_cap { 543 u32 max_send_wr; 544 u32 max_recv_wr; 545 u32 max_send_sge; 546 u32 max_recv_sge; 547 u32 max_inline_data; 548 }; 549 550 enum ib_sig_type { 551 IB_SIGNAL_ALL_WR, 552 IB_SIGNAL_REQ_WR 553 }; 554 555 enum ib_qp_type { 556 /* 557 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 558 * here (and in that order) since the MAD layer uses them as 559 * indices into a 2-entry table. 560 */ 561 IB_QPT_SMI, 562 IB_QPT_GSI, 563 564 IB_QPT_RC, 565 IB_QPT_UC, 566 IB_QPT_UD, 567 IB_QPT_RAW_IPV6, 568 IB_QPT_RAW_ETHERTYPE 569 }; 570 571 enum ib_qp_create_flags { 572 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 573 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 574 }; 575 576 struct ib_qp_init_attr { 577 void (*event_handler)(struct ib_event *, void *); 578 void *qp_context; 579 struct ib_cq *send_cq; 580 struct ib_cq *recv_cq; 581 struct ib_srq *srq; 582 struct ib_qp_cap cap; 583 enum ib_sig_type sq_sig_type; 584 enum ib_qp_type qp_type; 585 enum ib_qp_create_flags create_flags; 586 u8 port_num; /* special QP types only */ 587 }; 588 589 enum ib_rnr_timeout { 590 IB_RNR_TIMER_655_36 = 0, 591 IB_RNR_TIMER_000_01 = 1, 592 IB_RNR_TIMER_000_02 = 2, 593 IB_RNR_TIMER_000_03 = 3, 594 IB_RNR_TIMER_000_04 = 4, 595 IB_RNR_TIMER_000_06 = 5, 596 IB_RNR_TIMER_000_08 = 6, 597 IB_RNR_TIMER_000_12 = 7, 598 IB_RNR_TIMER_000_16 = 8, 599 IB_RNR_TIMER_000_24 = 9, 600 IB_RNR_TIMER_000_32 = 10, 601 IB_RNR_TIMER_000_48 = 11, 602 IB_RNR_TIMER_000_64 = 12, 603 IB_RNR_TIMER_000_96 = 13, 604 IB_RNR_TIMER_001_28 = 14, 605 IB_RNR_TIMER_001_92 = 15, 606 IB_RNR_TIMER_002_56 = 16, 607 IB_RNR_TIMER_003_84 = 17, 608 IB_RNR_TIMER_005_12 = 18, 609 IB_RNR_TIMER_007_68 = 19, 610 IB_RNR_TIMER_010_24 = 20, 611 IB_RNR_TIMER_015_36 = 21, 612 IB_RNR_TIMER_020_48 = 22, 613 IB_RNR_TIMER_030_72 = 23, 614 IB_RNR_TIMER_040_96 = 24, 615 IB_RNR_TIMER_061_44 = 25, 616 IB_RNR_TIMER_081_92 = 26, 617 IB_RNR_TIMER_122_88 = 27, 618 IB_RNR_TIMER_163_84 = 28, 619 IB_RNR_TIMER_245_76 = 29, 620 IB_RNR_TIMER_327_68 = 30, 621 IB_RNR_TIMER_491_52 = 31 622 }; 623 624 enum ib_qp_attr_mask { 625 IB_QP_STATE = 1, 626 IB_QP_CUR_STATE = (1<<1), 627 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 628 IB_QP_ACCESS_FLAGS = (1<<3), 629 IB_QP_PKEY_INDEX = (1<<4), 630 IB_QP_PORT = (1<<5), 631 IB_QP_QKEY = (1<<6), 632 IB_QP_AV = (1<<7), 633 IB_QP_PATH_MTU = (1<<8), 634 IB_QP_TIMEOUT = (1<<9), 635 IB_QP_RETRY_CNT = (1<<10), 636 IB_QP_RNR_RETRY = (1<<11), 637 IB_QP_RQ_PSN = (1<<12), 638 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 639 IB_QP_ALT_PATH = (1<<14), 640 IB_QP_MIN_RNR_TIMER = (1<<15), 641 IB_QP_SQ_PSN = (1<<16), 642 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 643 IB_QP_PATH_MIG_STATE = (1<<18), 644 IB_QP_CAP = (1<<19), 645 IB_QP_DEST_QPN = (1<<20) 646 }; 647 648 enum ib_qp_state { 649 IB_QPS_RESET, 650 IB_QPS_INIT, 651 IB_QPS_RTR, 652 IB_QPS_RTS, 653 IB_QPS_SQD, 654 IB_QPS_SQE, 655 IB_QPS_ERR 656 }; 657 658 enum ib_mig_state { 659 IB_MIG_MIGRATED, 660 IB_MIG_REARM, 661 IB_MIG_ARMED 662 }; 663 664 struct ib_qp_attr { 665 enum ib_qp_state qp_state; 666 enum ib_qp_state cur_qp_state; 667 enum ib_mtu path_mtu; 668 enum ib_mig_state path_mig_state; 669 u32 qkey; 670 u32 rq_psn; 671 u32 sq_psn; 672 u32 dest_qp_num; 673 int qp_access_flags; 674 struct ib_qp_cap cap; 675 struct ib_ah_attr ah_attr; 676 struct ib_ah_attr alt_ah_attr; 677 u16 pkey_index; 678 u16 alt_pkey_index; 679 u8 en_sqd_async_notify; 680 u8 sq_draining; 681 u8 max_rd_atomic; 682 u8 max_dest_rd_atomic; 683 u8 min_rnr_timer; 684 u8 port_num; 685 u8 timeout; 686 u8 retry_cnt; 687 u8 rnr_retry; 688 u8 alt_port_num; 689 u8 alt_timeout; 690 }; 691 692 enum ib_wr_opcode { 693 IB_WR_RDMA_WRITE, 694 IB_WR_RDMA_WRITE_WITH_IMM, 695 IB_WR_SEND, 696 IB_WR_SEND_WITH_IMM, 697 IB_WR_RDMA_READ, 698 IB_WR_ATOMIC_CMP_AND_SWP, 699 IB_WR_ATOMIC_FETCH_AND_ADD, 700 IB_WR_LSO, 701 IB_WR_SEND_WITH_INV, 702 IB_WR_RDMA_READ_WITH_INV, 703 IB_WR_LOCAL_INV, 704 IB_WR_FAST_REG_MR, 705 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 706 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 707 }; 708 709 enum ib_send_flags { 710 IB_SEND_FENCE = 1, 711 IB_SEND_SIGNALED = (1<<1), 712 IB_SEND_SOLICITED = (1<<2), 713 IB_SEND_INLINE = (1<<3), 714 IB_SEND_IP_CSUM = (1<<4) 715 }; 716 717 struct ib_sge { 718 u64 addr; 719 u32 length; 720 u32 lkey; 721 }; 722 723 struct ib_fast_reg_page_list { 724 struct ib_device *device; 725 u64 *page_list; 726 unsigned int max_page_list_len; 727 }; 728 729 struct ib_send_wr { 730 struct ib_send_wr *next; 731 u64 wr_id; 732 struct ib_sge *sg_list; 733 int num_sge; 734 enum ib_wr_opcode opcode; 735 int send_flags; 736 union { 737 __be32 imm_data; 738 u32 invalidate_rkey; 739 } ex; 740 union { 741 struct { 742 u64 remote_addr; 743 u32 rkey; 744 } rdma; 745 struct { 746 u64 remote_addr; 747 u64 compare_add; 748 u64 swap; 749 u64 compare_add_mask; 750 u64 swap_mask; 751 u32 rkey; 752 } atomic; 753 struct { 754 struct ib_ah *ah; 755 void *header; 756 int hlen; 757 int mss; 758 u32 remote_qpn; 759 u32 remote_qkey; 760 u16 pkey_index; /* valid for GSI only */ 761 u8 port_num; /* valid for DR SMPs on switch only */ 762 } ud; 763 struct { 764 u64 iova_start; 765 struct ib_fast_reg_page_list *page_list; 766 unsigned int page_shift; 767 unsigned int page_list_len; 768 u32 length; 769 int access_flags; 770 u32 rkey; 771 } fast_reg; 772 } wr; 773 }; 774 775 struct ib_recv_wr { 776 struct ib_recv_wr *next; 777 u64 wr_id; 778 struct ib_sge *sg_list; 779 int num_sge; 780 }; 781 782 enum ib_access_flags { 783 IB_ACCESS_LOCAL_WRITE = 1, 784 IB_ACCESS_REMOTE_WRITE = (1<<1), 785 IB_ACCESS_REMOTE_READ = (1<<2), 786 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 787 IB_ACCESS_MW_BIND = (1<<4) 788 }; 789 790 struct ib_phys_buf { 791 u64 addr; 792 u64 size; 793 }; 794 795 struct ib_mr_attr { 796 struct ib_pd *pd; 797 u64 device_virt_addr; 798 u64 size; 799 int mr_access_flags; 800 u32 lkey; 801 u32 rkey; 802 }; 803 804 enum ib_mr_rereg_flags { 805 IB_MR_REREG_TRANS = 1, 806 IB_MR_REREG_PD = (1<<1), 807 IB_MR_REREG_ACCESS = (1<<2) 808 }; 809 810 struct ib_mw_bind { 811 struct ib_mr *mr; 812 u64 wr_id; 813 u64 addr; 814 u32 length; 815 int send_flags; 816 int mw_access_flags; 817 }; 818 819 struct ib_fmr_attr { 820 int max_pages; 821 int max_maps; 822 u8 page_shift; 823 }; 824 825 struct ib_ucontext { 826 struct ib_device *device; 827 struct list_head pd_list; 828 struct list_head mr_list; 829 struct list_head mw_list; 830 struct list_head cq_list; 831 struct list_head qp_list; 832 struct list_head srq_list; 833 struct list_head ah_list; 834 int closing; 835 }; 836 837 struct ib_uobject { 838 u64 user_handle; /* handle given to us by userspace */ 839 struct ib_ucontext *context; /* associated user context */ 840 void *object; /* containing object */ 841 struct list_head list; /* link to context's list */ 842 int id; /* index into kernel idr */ 843 struct kref ref; 844 struct rw_semaphore mutex; /* protects .live */ 845 int live; 846 }; 847 848 struct ib_udata { 849 void __user *inbuf; 850 void __user *outbuf; 851 size_t inlen; 852 size_t outlen; 853 }; 854 855 struct ib_pd { 856 struct ib_device *device; 857 struct ib_uobject *uobject; 858 atomic_t usecnt; /* count all resources */ 859 }; 860 861 struct ib_ah { 862 struct ib_device *device; 863 struct ib_pd *pd; 864 struct ib_uobject *uobject; 865 }; 866 867 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 868 869 struct ib_cq { 870 struct ib_device *device; 871 struct ib_uobject *uobject; 872 ib_comp_handler comp_handler; 873 void (*event_handler)(struct ib_event *, void *); 874 void *cq_context; 875 int cqe; 876 atomic_t usecnt; /* count number of work queues */ 877 }; 878 879 struct ib_srq { 880 struct ib_device *device; 881 struct ib_pd *pd; 882 struct ib_uobject *uobject; 883 void (*event_handler)(struct ib_event *, void *); 884 void *srq_context; 885 atomic_t usecnt; 886 }; 887 888 struct ib_qp { 889 struct ib_device *device; 890 struct ib_pd *pd; 891 struct ib_cq *send_cq; 892 struct ib_cq *recv_cq; 893 struct ib_srq *srq; 894 struct ib_uobject *uobject; 895 void (*event_handler)(struct ib_event *, void *); 896 void *qp_context; 897 u32 qp_num; 898 enum ib_qp_type qp_type; 899 }; 900 901 struct ib_mr { 902 struct ib_device *device; 903 struct ib_pd *pd; 904 struct ib_uobject *uobject; 905 u32 lkey; 906 u32 rkey; 907 atomic_t usecnt; /* count number of MWs */ 908 }; 909 910 struct ib_mw { 911 struct ib_device *device; 912 struct ib_pd *pd; 913 struct ib_uobject *uobject; 914 u32 rkey; 915 }; 916 917 struct ib_fmr { 918 struct ib_device *device; 919 struct ib_pd *pd; 920 struct list_head list; 921 u32 lkey; 922 u32 rkey; 923 }; 924 925 struct ib_mad; 926 struct ib_grh; 927 928 enum ib_process_mad_flags { 929 IB_MAD_IGNORE_MKEY = 1, 930 IB_MAD_IGNORE_BKEY = 2, 931 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 932 }; 933 934 enum ib_mad_result { 935 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 936 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 937 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 938 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 939 }; 940 941 #define IB_DEVICE_NAME_MAX 64 942 943 struct ib_cache { 944 rwlock_t lock; 945 struct ib_event_handler event_handler; 946 struct ib_pkey_cache **pkey_cache; 947 struct ib_gid_cache **gid_cache; 948 u8 *lmc_cache; 949 }; 950 951 struct ib_dma_mapping_ops { 952 int (*mapping_error)(struct ib_device *dev, 953 u64 dma_addr); 954 u64 (*map_single)(struct ib_device *dev, 955 void *ptr, size_t size, 956 enum dma_data_direction direction); 957 void (*unmap_single)(struct ib_device *dev, 958 u64 addr, size_t size, 959 enum dma_data_direction direction); 960 u64 (*map_page)(struct ib_device *dev, 961 struct page *page, unsigned long offset, 962 size_t size, 963 enum dma_data_direction direction); 964 void (*unmap_page)(struct ib_device *dev, 965 u64 addr, size_t size, 966 enum dma_data_direction direction); 967 int (*map_sg)(struct ib_device *dev, 968 struct scatterlist *sg, int nents, 969 enum dma_data_direction direction); 970 void (*unmap_sg)(struct ib_device *dev, 971 struct scatterlist *sg, int nents, 972 enum dma_data_direction direction); 973 u64 (*dma_address)(struct ib_device *dev, 974 struct scatterlist *sg); 975 unsigned int (*dma_len)(struct ib_device *dev, 976 struct scatterlist *sg); 977 void (*sync_single_for_cpu)(struct ib_device *dev, 978 u64 dma_handle, 979 size_t size, 980 enum dma_data_direction dir); 981 void (*sync_single_for_device)(struct ib_device *dev, 982 u64 dma_handle, 983 size_t size, 984 enum dma_data_direction dir); 985 void *(*alloc_coherent)(struct ib_device *dev, 986 size_t size, 987 u64 *dma_handle, 988 gfp_t flag); 989 void (*free_coherent)(struct ib_device *dev, 990 size_t size, void *cpu_addr, 991 u64 dma_handle); 992 }; 993 994 struct iw_cm_verbs; 995 996 struct ib_device { 997 struct device *dma_device; 998 999 char name[IB_DEVICE_NAME_MAX]; 1000 1001 struct list_head event_handler_list; 1002 spinlock_t event_handler_lock; 1003 1004 spinlock_t client_data_lock; 1005 struct list_head core_list; 1006 struct list_head client_data_list; 1007 1008 struct ib_cache cache; 1009 int *pkey_tbl_len; 1010 int *gid_tbl_len; 1011 1012 int num_comp_vectors; 1013 1014 struct iw_cm_verbs *iwcm; 1015 1016 int (*get_protocol_stats)(struct ib_device *device, 1017 union rdma_protocol_stats *stats); 1018 int (*query_device)(struct ib_device *device, 1019 struct ib_device_attr *device_attr); 1020 int (*query_port)(struct ib_device *device, 1021 u8 port_num, 1022 struct ib_port_attr *port_attr); 1023 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1024 u8 port_num); 1025 int (*query_gid)(struct ib_device *device, 1026 u8 port_num, int index, 1027 union ib_gid *gid); 1028 int (*query_pkey)(struct ib_device *device, 1029 u8 port_num, u16 index, u16 *pkey); 1030 int (*modify_device)(struct ib_device *device, 1031 int device_modify_mask, 1032 struct ib_device_modify *device_modify); 1033 int (*modify_port)(struct ib_device *device, 1034 u8 port_num, int port_modify_mask, 1035 struct ib_port_modify *port_modify); 1036 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 1037 struct ib_udata *udata); 1038 int (*dealloc_ucontext)(struct ib_ucontext *context); 1039 int (*mmap)(struct ib_ucontext *context, 1040 struct vm_area_struct *vma); 1041 struct ib_pd * (*alloc_pd)(struct ib_device *device, 1042 struct ib_ucontext *context, 1043 struct ib_udata *udata); 1044 int (*dealloc_pd)(struct ib_pd *pd); 1045 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1046 struct ib_ah_attr *ah_attr); 1047 int (*modify_ah)(struct ib_ah *ah, 1048 struct ib_ah_attr *ah_attr); 1049 int (*query_ah)(struct ib_ah *ah, 1050 struct ib_ah_attr *ah_attr); 1051 int (*destroy_ah)(struct ib_ah *ah); 1052 struct ib_srq * (*create_srq)(struct ib_pd *pd, 1053 struct ib_srq_init_attr *srq_init_attr, 1054 struct ib_udata *udata); 1055 int (*modify_srq)(struct ib_srq *srq, 1056 struct ib_srq_attr *srq_attr, 1057 enum ib_srq_attr_mask srq_attr_mask, 1058 struct ib_udata *udata); 1059 int (*query_srq)(struct ib_srq *srq, 1060 struct ib_srq_attr *srq_attr); 1061 int (*destroy_srq)(struct ib_srq *srq); 1062 int (*post_srq_recv)(struct ib_srq *srq, 1063 struct ib_recv_wr *recv_wr, 1064 struct ib_recv_wr **bad_recv_wr); 1065 struct ib_qp * (*create_qp)(struct ib_pd *pd, 1066 struct ib_qp_init_attr *qp_init_attr, 1067 struct ib_udata *udata); 1068 int (*modify_qp)(struct ib_qp *qp, 1069 struct ib_qp_attr *qp_attr, 1070 int qp_attr_mask, 1071 struct ib_udata *udata); 1072 int (*query_qp)(struct ib_qp *qp, 1073 struct ib_qp_attr *qp_attr, 1074 int qp_attr_mask, 1075 struct ib_qp_init_attr *qp_init_attr); 1076 int (*destroy_qp)(struct ib_qp *qp); 1077 int (*post_send)(struct ib_qp *qp, 1078 struct ib_send_wr *send_wr, 1079 struct ib_send_wr **bad_send_wr); 1080 int (*post_recv)(struct ib_qp *qp, 1081 struct ib_recv_wr *recv_wr, 1082 struct ib_recv_wr **bad_recv_wr); 1083 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, 1084 int comp_vector, 1085 struct ib_ucontext *context, 1086 struct ib_udata *udata); 1087 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1088 u16 cq_period); 1089 int (*destroy_cq)(struct ib_cq *cq); 1090 int (*resize_cq)(struct ib_cq *cq, int cqe, 1091 struct ib_udata *udata); 1092 int (*poll_cq)(struct ib_cq *cq, int num_entries, 1093 struct ib_wc *wc); 1094 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 1095 int (*req_notify_cq)(struct ib_cq *cq, 1096 enum ib_cq_notify_flags flags); 1097 int (*req_ncomp_notif)(struct ib_cq *cq, 1098 int wc_cnt); 1099 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 1100 int mr_access_flags); 1101 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, 1102 struct ib_phys_buf *phys_buf_array, 1103 int num_phys_buf, 1104 int mr_access_flags, 1105 u64 *iova_start); 1106 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 1107 u64 start, u64 length, 1108 u64 virt_addr, 1109 int mr_access_flags, 1110 struct ib_udata *udata); 1111 int (*query_mr)(struct ib_mr *mr, 1112 struct ib_mr_attr *mr_attr); 1113 int (*dereg_mr)(struct ib_mr *mr); 1114 struct ib_mr * (*alloc_fast_reg_mr)(struct ib_pd *pd, 1115 int max_page_list_len); 1116 struct ib_fast_reg_page_list * (*alloc_fast_reg_page_list)(struct ib_device *device, 1117 int page_list_len); 1118 void (*free_fast_reg_page_list)(struct ib_fast_reg_page_list *page_list); 1119 int (*rereg_phys_mr)(struct ib_mr *mr, 1120 int mr_rereg_mask, 1121 struct ib_pd *pd, 1122 struct ib_phys_buf *phys_buf_array, 1123 int num_phys_buf, 1124 int mr_access_flags, 1125 u64 *iova_start); 1126 struct ib_mw * (*alloc_mw)(struct ib_pd *pd); 1127 int (*bind_mw)(struct ib_qp *qp, 1128 struct ib_mw *mw, 1129 struct ib_mw_bind *mw_bind); 1130 int (*dealloc_mw)(struct ib_mw *mw); 1131 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 1132 int mr_access_flags, 1133 struct ib_fmr_attr *fmr_attr); 1134 int (*map_phys_fmr)(struct ib_fmr *fmr, 1135 u64 *page_list, int list_len, 1136 u64 iova); 1137 int (*unmap_fmr)(struct list_head *fmr_list); 1138 int (*dealloc_fmr)(struct ib_fmr *fmr); 1139 int (*attach_mcast)(struct ib_qp *qp, 1140 union ib_gid *gid, 1141 u16 lid); 1142 int (*detach_mcast)(struct ib_qp *qp, 1143 union ib_gid *gid, 1144 u16 lid); 1145 int (*process_mad)(struct ib_device *device, 1146 int process_mad_flags, 1147 u8 port_num, 1148 struct ib_wc *in_wc, 1149 struct ib_grh *in_grh, 1150 struct ib_mad *in_mad, 1151 struct ib_mad *out_mad); 1152 1153 struct ib_dma_mapping_ops *dma_ops; 1154 1155 struct module *owner; 1156 struct device dev; 1157 struct kobject *ports_parent; 1158 struct list_head port_list; 1159 1160 enum { 1161 IB_DEV_UNINITIALIZED, 1162 IB_DEV_REGISTERED, 1163 IB_DEV_UNREGISTERED 1164 } reg_state; 1165 1166 int uverbs_abi_ver; 1167 u64 uverbs_cmd_mask; 1168 1169 char node_desc[64]; 1170 __be64 node_guid; 1171 u32 local_dma_lkey; 1172 u8 node_type; 1173 u8 phys_port_cnt; 1174 }; 1175 1176 struct ib_client { 1177 char *name; 1178 void (*add) (struct ib_device *); 1179 void (*remove)(struct ib_device *); 1180 1181 struct list_head list; 1182 }; 1183 1184 struct ib_device *ib_alloc_device(size_t size); 1185 void ib_dealloc_device(struct ib_device *device); 1186 1187 int ib_register_device(struct ib_device *device, 1188 int (*port_callback)(struct ib_device *, 1189 u8, struct kobject *)); 1190 void ib_unregister_device(struct ib_device *device); 1191 1192 int ib_register_client (struct ib_client *client); 1193 void ib_unregister_client(struct ib_client *client); 1194 1195 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 1196 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 1197 void *data); 1198 1199 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 1200 { 1201 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 1202 } 1203 1204 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 1205 { 1206 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 1207 } 1208 1209 /** 1210 * ib_modify_qp_is_ok - Check that the supplied attribute mask 1211 * contains all required attributes and no attributes not allowed for 1212 * the given QP state transition. 1213 * @cur_state: Current QP state 1214 * @next_state: Next QP state 1215 * @type: QP type 1216 * @mask: Mask of supplied QP attributes 1217 * 1218 * This function is a helper function that a low-level driver's 1219 * modify_qp method can use to validate the consumer's input. It 1220 * checks that cur_state and next_state are valid QP states, that a 1221 * transition from cur_state to next_state is allowed by the IB spec, 1222 * and that the attribute mask supplied is allowed for the transition. 1223 */ 1224 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 1225 enum ib_qp_type type, enum ib_qp_attr_mask mask); 1226 1227 int ib_register_event_handler (struct ib_event_handler *event_handler); 1228 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 1229 void ib_dispatch_event(struct ib_event *event); 1230 1231 int ib_query_device(struct ib_device *device, 1232 struct ib_device_attr *device_attr); 1233 1234 int ib_query_port(struct ib_device *device, 1235 u8 port_num, struct ib_port_attr *port_attr); 1236 1237 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 1238 u8 port_num); 1239 1240 int ib_query_gid(struct ib_device *device, 1241 u8 port_num, int index, union ib_gid *gid); 1242 1243 int ib_query_pkey(struct ib_device *device, 1244 u8 port_num, u16 index, u16 *pkey); 1245 1246 int ib_modify_device(struct ib_device *device, 1247 int device_modify_mask, 1248 struct ib_device_modify *device_modify); 1249 1250 int ib_modify_port(struct ib_device *device, 1251 u8 port_num, int port_modify_mask, 1252 struct ib_port_modify *port_modify); 1253 1254 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 1255 u8 *port_num, u16 *index); 1256 1257 int ib_find_pkey(struct ib_device *device, 1258 u8 port_num, u16 pkey, u16 *index); 1259 1260 /** 1261 * ib_alloc_pd - Allocates an unused protection domain. 1262 * @device: The device on which to allocate the protection domain. 1263 * 1264 * A protection domain object provides an association between QPs, shared 1265 * receive queues, address handles, memory regions, and memory windows. 1266 */ 1267 struct ib_pd *ib_alloc_pd(struct ib_device *device); 1268 1269 /** 1270 * ib_dealloc_pd - Deallocates a protection domain. 1271 * @pd: The protection domain to deallocate. 1272 */ 1273 int ib_dealloc_pd(struct ib_pd *pd); 1274 1275 /** 1276 * ib_create_ah - Creates an address handle for the given address vector. 1277 * @pd: The protection domain associated with the address handle. 1278 * @ah_attr: The attributes of the address vector. 1279 * 1280 * The address handle is used to reference a local or global destination 1281 * in all UD QP post sends. 1282 */ 1283 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 1284 1285 /** 1286 * ib_init_ah_from_wc - Initializes address handle attributes from a 1287 * work completion. 1288 * @device: Device on which the received message arrived. 1289 * @port_num: Port on which the received message arrived. 1290 * @wc: Work completion associated with the received message. 1291 * @grh: References the received global route header. This parameter is 1292 * ignored unless the work completion indicates that the GRH is valid. 1293 * @ah_attr: Returned attributes that can be used when creating an address 1294 * handle for replying to the message. 1295 */ 1296 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, 1297 struct ib_grh *grh, struct ib_ah_attr *ah_attr); 1298 1299 /** 1300 * ib_create_ah_from_wc - Creates an address handle associated with the 1301 * sender of the specified work completion. 1302 * @pd: The protection domain associated with the address handle. 1303 * @wc: Work completion information associated with a received message. 1304 * @grh: References the received global route header. This parameter is 1305 * ignored unless the work completion indicates that the GRH is valid. 1306 * @port_num: The outbound port number to associate with the address. 1307 * 1308 * The address handle is used to reference a local or global destination 1309 * in all UD QP post sends. 1310 */ 1311 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc, 1312 struct ib_grh *grh, u8 port_num); 1313 1314 /** 1315 * ib_modify_ah - Modifies the address vector associated with an address 1316 * handle. 1317 * @ah: The address handle to modify. 1318 * @ah_attr: The new address vector attributes to associate with the 1319 * address handle. 1320 */ 1321 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1322 1323 /** 1324 * ib_query_ah - Queries the address vector associated with an address 1325 * handle. 1326 * @ah: The address handle to query. 1327 * @ah_attr: The address vector attributes associated with the address 1328 * handle. 1329 */ 1330 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 1331 1332 /** 1333 * ib_destroy_ah - Destroys an address handle. 1334 * @ah: The address handle to destroy. 1335 */ 1336 int ib_destroy_ah(struct ib_ah *ah); 1337 1338 /** 1339 * ib_create_srq - Creates a SRQ associated with the specified protection 1340 * domain. 1341 * @pd: The protection domain associated with the SRQ. 1342 * @srq_init_attr: A list of initial attributes required to create the 1343 * SRQ. If SRQ creation succeeds, then the attributes are updated to 1344 * the actual capabilities of the created SRQ. 1345 * 1346 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 1347 * requested size of the SRQ, and set to the actual values allocated 1348 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 1349 * will always be at least as large as the requested values. 1350 */ 1351 struct ib_srq *ib_create_srq(struct ib_pd *pd, 1352 struct ib_srq_init_attr *srq_init_attr); 1353 1354 /** 1355 * ib_modify_srq - Modifies the attributes for the specified SRQ. 1356 * @srq: The SRQ to modify. 1357 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 1358 * the current values of selected SRQ attributes are returned. 1359 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 1360 * are being modified. 1361 * 1362 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 1363 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 1364 * the number of receives queued drops below the limit. 1365 */ 1366 int ib_modify_srq(struct ib_srq *srq, 1367 struct ib_srq_attr *srq_attr, 1368 enum ib_srq_attr_mask srq_attr_mask); 1369 1370 /** 1371 * ib_query_srq - Returns the attribute list and current values for the 1372 * specified SRQ. 1373 * @srq: The SRQ to query. 1374 * @srq_attr: The attributes of the specified SRQ. 1375 */ 1376 int ib_query_srq(struct ib_srq *srq, 1377 struct ib_srq_attr *srq_attr); 1378 1379 /** 1380 * ib_destroy_srq - Destroys the specified SRQ. 1381 * @srq: The SRQ to destroy. 1382 */ 1383 int ib_destroy_srq(struct ib_srq *srq); 1384 1385 /** 1386 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 1387 * @srq: The SRQ to post the work request on. 1388 * @recv_wr: A list of work requests to post on the receive queue. 1389 * @bad_recv_wr: On an immediate failure, this parameter will reference 1390 * the work request that failed to be posted on the QP. 1391 */ 1392 static inline int ib_post_srq_recv(struct ib_srq *srq, 1393 struct ib_recv_wr *recv_wr, 1394 struct ib_recv_wr **bad_recv_wr) 1395 { 1396 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 1397 } 1398 1399 /** 1400 * ib_create_qp - Creates a QP associated with the specified protection 1401 * domain. 1402 * @pd: The protection domain associated with the QP. 1403 * @qp_init_attr: A list of initial attributes required to create the 1404 * QP. If QP creation succeeds, then the attributes are updated to 1405 * the actual capabilities of the created QP. 1406 */ 1407 struct ib_qp *ib_create_qp(struct ib_pd *pd, 1408 struct ib_qp_init_attr *qp_init_attr); 1409 1410 /** 1411 * ib_modify_qp - Modifies the attributes for the specified QP and then 1412 * transitions the QP to the given state. 1413 * @qp: The QP to modify. 1414 * @qp_attr: On input, specifies the QP attributes to modify. On output, 1415 * the current values of selected QP attributes are returned. 1416 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 1417 * are being modified. 1418 */ 1419 int ib_modify_qp(struct ib_qp *qp, 1420 struct ib_qp_attr *qp_attr, 1421 int qp_attr_mask); 1422 1423 /** 1424 * ib_query_qp - Returns the attribute list and current values for the 1425 * specified QP. 1426 * @qp: The QP to query. 1427 * @qp_attr: The attributes of the specified QP. 1428 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 1429 * @qp_init_attr: Additional attributes of the selected QP. 1430 * 1431 * The qp_attr_mask may be used to limit the query to gathering only the 1432 * selected attributes. 1433 */ 1434 int ib_query_qp(struct ib_qp *qp, 1435 struct ib_qp_attr *qp_attr, 1436 int qp_attr_mask, 1437 struct ib_qp_init_attr *qp_init_attr); 1438 1439 /** 1440 * ib_destroy_qp - Destroys the specified QP. 1441 * @qp: The QP to destroy. 1442 */ 1443 int ib_destroy_qp(struct ib_qp *qp); 1444 1445 /** 1446 * ib_post_send - Posts a list of work requests to the send queue of 1447 * the specified QP. 1448 * @qp: The QP to post the work request on. 1449 * @send_wr: A list of work requests to post on the send queue. 1450 * @bad_send_wr: On an immediate failure, this parameter will reference 1451 * the work request that failed to be posted on the QP. 1452 * 1453 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 1454 * error is returned, the QP state shall not be affected, 1455 * ib_post_send() will return an immediate error after queueing any 1456 * earlier work requests in the list. 1457 */ 1458 static inline int ib_post_send(struct ib_qp *qp, 1459 struct ib_send_wr *send_wr, 1460 struct ib_send_wr **bad_send_wr) 1461 { 1462 return qp->device->post_send(qp, send_wr, bad_send_wr); 1463 } 1464 1465 /** 1466 * ib_post_recv - Posts a list of work requests to the receive queue of 1467 * the specified QP. 1468 * @qp: The QP to post the work request on. 1469 * @recv_wr: A list of work requests to post on the receive queue. 1470 * @bad_recv_wr: On an immediate failure, this parameter will reference 1471 * the work request that failed to be posted on the QP. 1472 */ 1473 static inline int ib_post_recv(struct ib_qp *qp, 1474 struct ib_recv_wr *recv_wr, 1475 struct ib_recv_wr **bad_recv_wr) 1476 { 1477 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 1478 } 1479 1480 /** 1481 * ib_create_cq - Creates a CQ on the specified device. 1482 * @device: The device on which to create the CQ. 1483 * @comp_handler: A user-specified callback that is invoked when a 1484 * completion event occurs on the CQ. 1485 * @event_handler: A user-specified callback that is invoked when an 1486 * asynchronous event not associated with a completion occurs on the CQ. 1487 * @cq_context: Context associated with the CQ returned to the user via 1488 * the associated completion and event handlers. 1489 * @cqe: The minimum size of the CQ. 1490 * @comp_vector - Completion vector used to signal completion events. 1491 * Must be >= 0 and < context->num_comp_vectors. 1492 * 1493 * Users can examine the cq structure to determine the actual CQ size. 1494 */ 1495 struct ib_cq *ib_create_cq(struct ib_device *device, 1496 ib_comp_handler comp_handler, 1497 void (*event_handler)(struct ib_event *, void *), 1498 void *cq_context, int cqe, int comp_vector); 1499 1500 /** 1501 * ib_resize_cq - Modifies the capacity of the CQ. 1502 * @cq: The CQ to resize. 1503 * @cqe: The minimum size of the CQ. 1504 * 1505 * Users can examine the cq structure to determine the actual CQ size. 1506 */ 1507 int ib_resize_cq(struct ib_cq *cq, int cqe); 1508 1509 /** 1510 * ib_modify_cq - Modifies moderation params of the CQ 1511 * @cq: The CQ to modify. 1512 * @cq_count: number of CQEs that will trigger an event 1513 * @cq_period: max period of time in usec before triggering an event 1514 * 1515 */ 1516 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 1517 1518 /** 1519 * ib_destroy_cq - Destroys the specified CQ. 1520 * @cq: The CQ to destroy. 1521 */ 1522 int ib_destroy_cq(struct ib_cq *cq); 1523 1524 /** 1525 * ib_poll_cq - poll a CQ for completion(s) 1526 * @cq:the CQ being polled 1527 * @num_entries:maximum number of completions to return 1528 * @wc:array of at least @num_entries &struct ib_wc where completions 1529 * will be returned 1530 * 1531 * Poll a CQ for (possibly multiple) completions. If the return value 1532 * is < 0, an error occurred. If the return value is >= 0, it is the 1533 * number of completions returned. If the return value is 1534 * non-negative and < num_entries, then the CQ was emptied. 1535 */ 1536 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 1537 struct ib_wc *wc) 1538 { 1539 return cq->device->poll_cq(cq, num_entries, wc); 1540 } 1541 1542 /** 1543 * ib_peek_cq - Returns the number of unreaped completions currently 1544 * on the specified CQ. 1545 * @cq: The CQ to peek. 1546 * @wc_cnt: A minimum number of unreaped completions to check for. 1547 * 1548 * If the number of unreaped completions is greater than or equal to wc_cnt, 1549 * this function returns wc_cnt, otherwise, it returns the actual number of 1550 * unreaped completions. 1551 */ 1552 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 1553 1554 /** 1555 * ib_req_notify_cq - Request completion notification on a CQ. 1556 * @cq: The CQ to generate an event for. 1557 * @flags: 1558 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 1559 * to request an event on the next solicited event or next work 1560 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 1561 * may also be |ed in to request a hint about missed events, as 1562 * described below. 1563 * 1564 * Return Value: 1565 * < 0 means an error occurred while requesting notification 1566 * == 0 means notification was requested successfully, and if 1567 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 1568 * were missed and it is safe to wait for another event. In 1569 * this case is it guaranteed that any work completions added 1570 * to the CQ since the last CQ poll will trigger a completion 1571 * notification event. 1572 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 1573 * in. It means that the consumer must poll the CQ again to 1574 * make sure it is empty to avoid missing an event because of a 1575 * race between requesting notification and an entry being 1576 * added to the CQ. This return value means it is possible 1577 * (but not guaranteed) that a work completion has been added 1578 * to the CQ since the last poll without triggering a 1579 * completion notification event. 1580 */ 1581 static inline int ib_req_notify_cq(struct ib_cq *cq, 1582 enum ib_cq_notify_flags flags) 1583 { 1584 return cq->device->req_notify_cq(cq, flags); 1585 } 1586 1587 /** 1588 * ib_req_ncomp_notif - Request completion notification when there are 1589 * at least the specified number of unreaped completions on the CQ. 1590 * @cq: The CQ to generate an event for. 1591 * @wc_cnt: The number of unreaped completions that should be on the 1592 * CQ before an event is generated. 1593 */ 1594 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 1595 { 1596 return cq->device->req_ncomp_notif ? 1597 cq->device->req_ncomp_notif(cq, wc_cnt) : 1598 -ENOSYS; 1599 } 1600 1601 /** 1602 * ib_get_dma_mr - Returns a memory region for system memory that is 1603 * usable for DMA. 1604 * @pd: The protection domain associated with the memory region. 1605 * @mr_access_flags: Specifies the memory access rights. 1606 * 1607 * Note that the ib_dma_*() functions defined below must be used 1608 * to create/destroy addresses used with the Lkey or Rkey returned 1609 * by ib_get_dma_mr(). 1610 */ 1611 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1612 1613 /** 1614 * ib_dma_mapping_error - check a DMA addr for error 1615 * @dev: The device for which the dma_addr was created 1616 * @dma_addr: The DMA address to check 1617 */ 1618 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 1619 { 1620 if (dev->dma_ops) 1621 return dev->dma_ops->mapping_error(dev, dma_addr); 1622 return dma_mapping_error(dev->dma_device, dma_addr); 1623 } 1624 1625 /** 1626 * ib_dma_map_single - Map a kernel virtual address to DMA address 1627 * @dev: The device for which the dma_addr is to be created 1628 * @cpu_addr: The kernel virtual address 1629 * @size: The size of the region in bytes 1630 * @direction: The direction of the DMA 1631 */ 1632 static inline u64 ib_dma_map_single(struct ib_device *dev, 1633 void *cpu_addr, size_t size, 1634 enum dma_data_direction direction) 1635 { 1636 if (dev->dma_ops) 1637 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 1638 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 1639 } 1640 1641 /** 1642 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 1643 * @dev: The device for which the DMA address was created 1644 * @addr: The DMA address 1645 * @size: The size of the region in bytes 1646 * @direction: The direction of the DMA 1647 */ 1648 static inline void ib_dma_unmap_single(struct ib_device *dev, 1649 u64 addr, size_t size, 1650 enum dma_data_direction direction) 1651 { 1652 if (dev->dma_ops) 1653 dev->dma_ops->unmap_single(dev, addr, size, direction); 1654 else 1655 dma_unmap_single(dev->dma_device, addr, size, direction); 1656 } 1657 1658 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 1659 void *cpu_addr, size_t size, 1660 enum dma_data_direction direction, 1661 struct dma_attrs *attrs) 1662 { 1663 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 1664 direction, attrs); 1665 } 1666 1667 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 1668 u64 addr, size_t size, 1669 enum dma_data_direction direction, 1670 struct dma_attrs *attrs) 1671 { 1672 return dma_unmap_single_attrs(dev->dma_device, addr, size, 1673 direction, attrs); 1674 } 1675 1676 /** 1677 * ib_dma_map_page - Map a physical page to DMA address 1678 * @dev: The device for which the dma_addr is to be created 1679 * @page: The page to be mapped 1680 * @offset: The offset within the page 1681 * @size: The size of the region in bytes 1682 * @direction: The direction of the DMA 1683 */ 1684 static inline u64 ib_dma_map_page(struct ib_device *dev, 1685 struct page *page, 1686 unsigned long offset, 1687 size_t size, 1688 enum dma_data_direction direction) 1689 { 1690 if (dev->dma_ops) 1691 return dev->dma_ops->map_page(dev, page, offset, size, direction); 1692 return dma_map_page(dev->dma_device, page, offset, size, direction); 1693 } 1694 1695 /** 1696 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 1697 * @dev: The device for which the DMA address was created 1698 * @addr: The DMA address 1699 * @size: The size of the region in bytes 1700 * @direction: The direction of the DMA 1701 */ 1702 static inline void ib_dma_unmap_page(struct ib_device *dev, 1703 u64 addr, size_t size, 1704 enum dma_data_direction direction) 1705 { 1706 if (dev->dma_ops) 1707 dev->dma_ops->unmap_page(dev, addr, size, direction); 1708 else 1709 dma_unmap_page(dev->dma_device, addr, size, direction); 1710 } 1711 1712 /** 1713 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 1714 * @dev: The device for which the DMA addresses are to be created 1715 * @sg: The array of scatter/gather entries 1716 * @nents: The number of scatter/gather entries 1717 * @direction: The direction of the DMA 1718 */ 1719 static inline int ib_dma_map_sg(struct ib_device *dev, 1720 struct scatterlist *sg, int nents, 1721 enum dma_data_direction direction) 1722 { 1723 if (dev->dma_ops) 1724 return dev->dma_ops->map_sg(dev, sg, nents, direction); 1725 return dma_map_sg(dev->dma_device, sg, nents, direction); 1726 } 1727 1728 /** 1729 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 1730 * @dev: The device for which the DMA addresses were created 1731 * @sg: The array of scatter/gather entries 1732 * @nents: The number of scatter/gather entries 1733 * @direction: The direction of the DMA 1734 */ 1735 static inline void ib_dma_unmap_sg(struct ib_device *dev, 1736 struct scatterlist *sg, int nents, 1737 enum dma_data_direction direction) 1738 { 1739 if (dev->dma_ops) 1740 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 1741 else 1742 dma_unmap_sg(dev->dma_device, sg, nents, direction); 1743 } 1744 1745 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 1746 struct scatterlist *sg, int nents, 1747 enum dma_data_direction direction, 1748 struct dma_attrs *attrs) 1749 { 1750 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 1751 } 1752 1753 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 1754 struct scatterlist *sg, int nents, 1755 enum dma_data_direction direction, 1756 struct dma_attrs *attrs) 1757 { 1758 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 1759 } 1760 /** 1761 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 1762 * @dev: The device for which the DMA addresses were created 1763 * @sg: The scatter/gather entry 1764 */ 1765 static inline u64 ib_sg_dma_address(struct ib_device *dev, 1766 struct scatterlist *sg) 1767 { 1768 if (dev->dma_ops) 1769 return dev->dma_ops->dma_address(dev, sg); 1770 return sg_dma_address(sg); 1771 } 1772 1773 /** 1774 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 1775 * @dev: The device for which the DMA addresses were created 1776 * @sg: The scatter/gather entry 1777 */ 1778 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 1779 struct scatterlist *sg) 1780 { 1781 if (dev->dma_ops) 1782 return dev->dma_ops->dma_len(dev, sg); 1783 return sg_dma_len(sg); 1784 } 1785 1786 /** 1787 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 1788 * @dev: The device for which the DMA address was created 1789 * @addr: The DMA address 1790 * @size: The size of the region in bytes 1791 * @dir: The direction of the DMA 1792 */ 1793 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 1794 u64 addr, 1795 size_t size, 1796 enum dma_data_direction dir) 1797 { 1798 if (dev->dma_ops) 1799 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 1800 else 1801 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 1802 } 1803 1804 /** 1805 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 1806 * @dev: The device for which the DMA address was created 1807 * @addr: The DMA address 1808 * @size: The size of the region in bytes 1809 * @dir: The direction of the DMA 1810 */ 1811 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 1812 u64 addr, 1813 size_t size, 1814 enum dma_data_direction dir) 1815 { 1816 if (dev->dma_ops) 1817 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 1818 else 1819 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 1820 } 1821 1822 /** 1823 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 1824 * @dev: The device for which the DMA address is requested 1825 * @size: The size of the region to allocate in bytes 1826 * @dma_handle: A pointer for returning the DMA address of the region 1827 * @flag: memory allocator flags 1828 */ 1829 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 1830 size_t size, 1831 u64 *dma_handle, 1832 gfp_t flag) 1833 { 1834 if (dev->dma_ops) 1835 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 1836 else { 1837 dma_addr_t handle; 1838 void *ret; 1839 1840 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 1841 *dma_handle = handle; 1842 return ret; 1843 } 1844 } 1845 1846 /** 1847 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 1848 * @dev: The device for which the DMA addresses were allocated 1849 * @size: The size of the region 1850 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 1851 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 1852 */ 1853 static inline void ib_dma_free_coherent(struct ib_device *dev, 1854 size_t size, void *cpu_addr, 1855 u64 dma_handle) 1856 { 1857 if (dev->dma_ops) 1858 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 1859 else 1860 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 1861 } 1862 1863 /** 1864 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1865 * by an HCA. 1866 * @pd: The protection domain associated assigned to the registered region. 1867 * @phys_buf_array: Specifies a list of physical buffers to use in the 1868 * memory region. 1869 * @num_phys_buf: Specifies the size of the phys_buf_array. 1870 * @mr_access_flags: Specifies the memory access rights. 1871 * @iova_start: The offset of the region's starting I/O virtual address. 1872 */ 1873 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd, 1874 struct ib_phys_buf *phys_buf_array, 1875 int num_phys_buf, 1876 int mr_access_flags, 1877 u64 *iova_start); 1878 1879 /** 1880 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region. 1881 * Conceptually, this call performs the functions deregister memory region 1882 * followed by register physical memory region. Where possible, 1883 * resources are reused instead of deallocated and reallocated. 1884 * @mr: The memory region to modify. 1885 * @mr_rereg_mask: A bit-mask used to indicate which of the following 1886 * properties of the memory region are being modified. 1887 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies 1888 * the new protection domain to associated with the memory region, 1889 * otherwise, this parameter is ignored. 1890 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 1891 * field specifies a list of physical buffers to use in the new 1892 * translation, otherwise, this parameter is ignored. 1893 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this 1894 * field specifies the size of the phys_buf_array, otherwise, this 1895 * parameter is ignored. 1896 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this 1897 * field specifies the new memory access rights, otherwise, this 1898 * parameter is ignored. 1899 * @iova_start: The offset of the region's starting I/O virtual address. 1900 */ 1901 int ib_rereg_phys_mr(struct ib_mr *mr, 1902 int mr_rereg_mask, 1903 struct ib_pd *pd, 1904 struct ib_phys_buf *phys_buf_array, 1905 int num_phys_buf, 1906 int mr_access_flags, 1907 u64 *iova_start); 1908 1909 /** 1910 * ib_query_mr - Retrieves information about a specific memory region. 1911 * @mr: The memory region to retrieve information about. 1912 * @mr_attr: The attributes of the specified memory region. 1913 */ 1914 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); 1915 1916 /** 1917 * ib_dereg_mr - Deregisters a memory region and removes it from the 1918 * HCA translation table. 1919 * @mr: The memory region to deregister. 1920 */ 1921 int ib_dereg_mr(struct ib_mr *mr); 1922 1923 /** 1924 * ib_alloc_fast_reg_mr - Allocates memory region usable with the 1925 * IB_WR_FAST_REG_MR send work request. 1926 * @pd: The protection domain associated with the region. 1927 * @max_page_list_len: requested max physical buffer list length to be 1928 * used with fast register work requests for this MR. 1929 */ 1930 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); 1931 1932 /** 1933 * ib_alloc_fast_reg_page_list - Allocates a page list array 1934 * @device - ib device pointer. 1935 * @page_list_len - size of the page list array to be allocated. 1936 * 1937 * This allocates and returns a struct ib_fast_reg_page_list * and a 1938 * page_list array that is at least page_list_len in size. The actual 1939 * size is returned in max_page_list_len. The caller is responsible 1940 * for initializing the contents of the page_list array before posting 1941 * a send work request with the IB_WC_FAST_REG_MR opcode. 1942 * 1943 * The page_list array entries must be translated using one of the 1944 * ib_dma_*() functions just like the addresses passed to 1945 * ib_map_phys_fmr(). Once the ib_post_send() is issued, the struct 1946 * ib_fast_reg_page_list must not be modified by the caller until the 1947 * IB_WC_FAST_REG_MR work request completes. 1948 */ 1949 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list( 1950 struct ib_device *device, int page_list_len); 1951 1952 /** 1953 * ib_free_fast_reg_page_list - Deallocates a previously allocated 1954 * page list array. 1955 * @page_list - struct ib_fast_reg_page_list pointer to be deallocated. 1956 */ 1957 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list); 1958 1959 /** 1960 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 1961 * R_Key and L_Key. 1962 * @mr - struct ib_mr pointer to be updated. 1963 * @newkey - new key to be used. 1964 */ 1965 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 1966 { 1967 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 1968 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 1969 } 1970 1971 /** 1972 * ib_alloc_mw - Allocates a memory window. 1973 * @pd: The protection domain associated with the memory window. 1974 */ 1975 struct ib_mw *ib_alloc_mw(struct ib_pd *pd); 1976 1977 /** 1978 * ib_bind_mw - Posts a work request to the send queue of the specified 1979 * QP, which binds the memory window to the given address range and 1980 * remote access attributes. 1981 * @qp: QP to post the bind work request on. 1982 * @mw: The memory window to bind. 1983 * @mw_bind: Specifies information about the memory window, including 1984 * its address range, remote access rights, and associated memory region. 1985 */ 1986 static inline int ib_bind_mw(struct ib_qp *qp, 1987 struct ib_mw *mw, 1988 struct ib_mw_bind *mw_bind) 1989 { 1990 /* XXX reference counting in corresponding MR? */ 1991 return mw->device->bind_mw ? 1992 mw->device->bind_mw(qp, mw, mw_bind) : 1993 -ENOSYS; 1994 } 1995 1996 /** 1997 * ib_dealloc_mw - Deallocates a memory window. 1998 * @mw: The memory window to deallocate. 1999 */ 2000 int ib_dealloc_mw(struct ib_mw *mw); 2001 2002 /** 2003 * ib_alloc_fmr - Allocates a unmapped fast memory region. 2004 * @pd: The protection domain associated with the unmapped region. 2005 * @mr_access_flags: Specifies the memory access rights. 2006 * @fmr_attr: Attributes of the unmapped region. 2007 * 2008 * A fast memory region must be mapped before it can be used as part of 2009 * a work request. 2010 */ 2011 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 2012 int mr_access_flags, 2013 struct ib_fmr_attr *fmr_attr); 2014 2015 /** 2016 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 2017 * @fmr: The fast memory region to associate with the pages. 2018 * @page_list: An array of physical pages to map to the fast memory region. 2019 * @list_len: The number of pages in page_list. 2020 * @iova: The I/O virtual address to use with the mapped region. 2021 */ 2022 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 2023 u64 *page_list, int list_len, 2024 u64 iova) 2025 { 2026 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 2027 } 2028 2029 /** 2030 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 2031 * @fmr_list: A linked list of fast memory regions to unmap. 2032 */ 2033 int ib_unmap_fmr(struct list_head *fmr_list); 2034 2035 /** 2036 * ib_dealloc_fmr - Deallocates a fast memory region. 2037 * @fmr: The fast memory region to deallocate. 2038 */ 2039 int ib_dealloc_fmr(struct ib_fmr *fmr); 2040 2041 /** 2042 * ib_attach_mcast - Attaches the specified QP to a multicast group. 2043 * @qp: QP to attach to the multicast group. The QP must be type 2044 * IB_QPT_UD. 2045 * @gid: Multicast group GID. 2046 * @lid: Multicast group LID in host byte order. 2047 * 2048 * In order to send and receive multicast packets, subnet 2049 * administration must have created the multicast group and configured 2050 * the fabric appropriately. The port associated with the specified 2051 * QP must also be a member of the multicast group. 2052 */ 2053 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2054 2055 /** 2056 * ib_detach_mcast - Detaches the specified QP from a multicast group. 2057 * @qp: QP to detach from the multicast group. 2058 * @gid: Multicast group GID. 2059 * @lid: Multicast group LID in host byte order. 2060 */ 2061 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2062 2063 #endif /* IB_VERBS_H */ 2064