1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 6 * Copyright (c) 2004 Intel Corporation. All rights reserved. 7 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 8 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 10 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 11 * 12 * This software is available to you under a choice of one of two 13 * licenses. You may choose to be licensed under the terms of the GNU 14 * General Public License (GPL) Version 2, available from the file 15 * COPYING in the main directory of this source tree, or the 16 * OpenIB.org BSD license below: 17 * 18 * Redistribution and use in source and binary forms, with or 19 * without modification, are permitted provided that the following 20 * conditions are met: 21 * 22 * - Redistributions of source code must retain the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer. 25 * 26 * - Redistributions in binary form must reproduce the above 27 * copyright notice, this list of conditions and the following 28 * disclaimer in the documentation and/or other materials 29 * provided with the distribution. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 38 * SOFTWARE. 39 */ 40 41 #if !defined(IB_VERBS_H) 42 #define IB_VERBS_H 43 44 #include <linux/types.h> 45 #include <linux/device.h> 46 #include <linux/mm.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/kref.h> 49 #include <linux/list.h> 50 #include <linux/rwsem.h> 51 #include <linux/scatterlist.h> 52 #include <linux/workqueue.h> 53 #include <linux/socket.h> 54 #include <linux/if_ether.h> 55 #include <net/ipv6.h> 56 #include <net/ip.h> 57 #include <linux/string.h> 58 #include <linux/slab.h> 59 #include <linux/rcupdate.h> 60 #include <linux/netdevice.h> 61 #include <linux/xarray.h> 62 #include <netinet/ip.h> 63 #include <uapi/rdma/ib_user_verbs.h> 64 #include <rdma/signature.h> 65 #include <uapi/rdma/rdma_user_ioctl.h> 66 #include <uapi/rdma/ib_user_ioctl_verbs.h> 67 68 #include <asm/atomic.h> 69 #include <asm/uaccess.h> 70 71 struct ib_uqp_object; 72 struct ib_usrq_object; 73 struct ib_uwq_object; 74 struct ifla_vf_info; 75 struct ifla_vf_stats; 76 struct ib_uverbs_file; 77 struct uverbs_attr_bundle; 78 79 enum ib_uverbs_advise_mr_advice; 80 81 extern struct workqueue_struct *ib_wq; 82 extern struct workqueue_struct *ib_comp_wq; 83 84 struct ib_ucq_object; 85 86 union ib_gid { 87 u8 raw[16]; 88 struct { 89 __be64 subnet_prefix; 90 __be64 interface_id; 91 } global; 92 }; 93 94 extern union ib_gid zgid; 95 96 enum ib_gid_type { 97 /* If link layer is Ethernet, this is RoCE V1 */ 98 IB_GID_TYPE_IB = 0, 99 IB_GID_TYPE_ROCE = 0, 100 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 101 IB_GID_TYPE_SIZE 102 }; 103 104 #define ROCE_V2_UDP_DPORT 4791 105 struct ib_gid_attr { 106 enum ib_gid_type gid_type; 107 if_t ndev; 108 }; 109 110 enum rdma_node_type { 111 /* IB values map to NodeInfo:NodeType. */ 112 RDMA_NODE_IB_CA = 1, 113 RDMA_NODE_IB_SWITCH, 114 RDMA_NODE_IB_ROUTER, 115 RDMA_NODE_RNIC, 116 RDMA_NODE_USNIC, 117 RDMA_NODE_USNIC_UDP, 118 }; 119 120 enum { 121 /* set the local administered indication */ 122 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 123 }; 124 125 enum rdma_transport_type { 126 RDMA_TRANSPORT_IB, 127 RDMA_TRANSPORT_IWARP, 128 RDMA_TRANSPORT_USNIC, 129 RDMA_TRANSPORT_USNIC_UDP 130 }; 131 132 enum rdma_protocol_type { 133 RDMA_PROTOCOL_IB, 134 RDMA_PROTOCOL_IBOE, 135 RDMA_PROTOCOL_IWARP, 136 RDMA_PROTOCOL_USNIC_UDP 137 }; 138 139 __attribute_const__ enum rdma_transport_type 140 rdma_node_get_transport(enum rdma_node_type node_type); 141 142 enum rdma_network_type { 143 RDMA_NETWORK_IB, 144 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 145 RDMA_NETWORK_IPV4, 146 RDMA_NETWORK_IPV6 147 }; 148 149 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 150 { 151 if (network_type == RDMA_NETWORK_IPV4 || 152 network_type == RDMA_NETWORK_IPV6) 153 return IB_GID_TYPE_ROCE_UDP_ENCAP; 154 155 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 156 return IB_GID_TYPE_IB; 157 } 158 159 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 160 union ib_gid *gid) 161 { 162 if (gid_type == IB_GID_TYPE_IB) 163 return RDMA_NETWORK_IB; 164 165 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 166 return RDMA_NETWORK_IPV4; 167 else 168 return RDMA_NETWORK_IPV6; 169 } 170 171 enum rdma_link_layer { 172 IB_LINK_LAYER_UNSPECIFIED, 173 IB_LINK_LAYER_INFINIBAND, 174 IB_LINK_LAYER_ETHERNET, 175 }; 176 177 enum ib_device_cap_flags { 178 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 179 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 180 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 181 IB_DEVICE_RAW_MULTI = (1 << 3), 182 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 183 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 184 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 185 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 186 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 187 IB_DEVICE_INIT_TYPE = (1 << 9), 188 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 189 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 190 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 191 IB_DEVICE_SRQ_RESIZE = (1 << 13), 192 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 193 194 /* 195 * This device supports a per-device lkey or stag that can be 196 * used without performing a memory registration for the local 197 * memory. Note that ULPs should never check this flag, but 198 * instead of use the local_dma_lkey flag in the ib_pd structure, 199 * which will always contain a usable lkey. 200 */ 201 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 202 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), 203 IB_DEVICE_MEM_WINDOW = (1 << 17), 204 /* 205 * Devices should set IB_DEVICE_UD_IP_SUM if they support 206 * insertion of UDP and TCP checksum on outgoing UD IPoIB 207 * messages and can verify the validity of checksum for 208 * incoming messages. Setting this flag implies that the 209 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 210 */ 211 IB_DEVICE_UD_IP_CSUM = (1 << 18), 212 IB_DEVICE_UD_TSO = (1 << 19), 213 IB_DEVICE_XRC = (1 << 20), 214 215 /* 216 * This device supports the IB "base memory management extension", 217 * which includes support for fast registrations (IB_WR_REG_MR, 218 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 219 * also be set by any iWarp device which must support FRs to comply 220 * to the iWarp verbs spec. iWarp devices also support the 221 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 222 * stag. 223 */ 224 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 225 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 226 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 227 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 228 IB_DEVICE_RC_IP_CSUM = (1 << 25), 229 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 230 /* 231 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 232 * support execution of WQEs that involve synchronization 233 * of I/O operations with single completion queue managed 234 * by hardware. 235 */ 236 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 237 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 238 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 239 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 240 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 241 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 242 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 243 IB_DEVICE_KNOWSEPOCH = (1ULL << 35), 244 }; 245 246 enum ib_atomic_cap { 247 IB_ATOMIC_NONE, 248 IB_ATOMIC_HCA, 249 IB_ATOMIC_GLOB 250 }; 251 252 enum ib_odp_general_cap_bits { 253 IB_ODP_SUPPORT = 1 << 0, 254 }; 255 256 enum ib_odp_transport_cap_bits { 257 IB_ODP_SUPPORT_SEND = 1 << 0, 258 IB_ODP_SUPPORT_RECV = 1 << 1, 259 IB_ODP_SUPPORT_WRITE = 1 << 2, 260 IB_ODP_SUPPORT_READ = 1 << 3, 261 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 262 }; 263 264 struct ib_odp_caps { 265 uint64_t general_caps; 266 struct { 267 uint32_t rc_odp_caps; 268 uint32_t uc_odp_caps; 269 uint32_t ud_odp_caps; 270 uint32_t xrc_odp_caps; 271 } per_transport_caps; 272 }; 273 274 struct ib_rss_caps { 275 /* Corresponding bit will be set if qp type from 276 * 'enum ib_qp_type' is supported, e.g. 277 * supported_qpts |= 1 << IB_QPT_UD 278 */ 279 u32 supported_qpts; 280 u32 max_rwq_indirection_tables; 281 u32 max_rwq_indirection_table_size; 282 }; 283 284 enum ib_tm_cap_flags { 285 /* Support tag matching with rendezvous offload for RC transport */ 286 IB_TM_CAP_RNDV_RC = 1 << 0, 287 }; 288 289 struct ib_tm_caps { 290 /* Max size of RNDV header */ 291 u32 max_rndv_hdr_size; 292 /* Max number of entries in tag matching list */ 293 u32 max_num_tags; 294 /* From enum ib_tm_cap_flags */ 295 u32 flags; 296 /* Max number of outstanding list operations */ 297 u32 max_ops; 298 /* Max number of SGE in tag matching entry */ 299 u32 max_sge; 300 }; 301 302 enum ib_cq_creation_flags { 303 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 304 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 305 }; 306 307 struct ib_cq_init_attr { 308 unsigned int cqe; 309 u32 comp_vector; 310 u32 flags; 311 }; 312 313 enum ib_cq_attr_mask { 314 IB_CQ_MODERATE = 1 << 0, 315 }; 316 317 struct ib_cq_caps { 318 u16 max_cq_moderation_count; 319 u16 max_cq_moderation_period; 320 }; 321 322 struct ib_dm_mr_attr { 323 u64 length; 324 u64 offset; 325 u32 access_flags; 326 }; 327 328 struct ib_dm_alloc_attr { 329 u64 length; 330 u32 alignment; 331 u32 flags; 332 }; 333 334 struct ib_device_attr { 335 u64 fw_ver; 336 __be64 sys_image_guid; 337 u64 max_mr_size; 338 u64 page_size_cap; 339 u32 vendor_id; 340 u32 vendor_part_id; 341 u32 hw_ver; 342 int max_qp; 343 int max_qp_wr; 344 u64 device_cap_flags; 345 int max_sge; 346 int max_sge_rd; 347 int max_cq; 348 int max_cqe; 349 int max_mr; 350 int max_pd; 351 int max_qp_rd_atom; 352 int max_ee_rd_atom; 353 int max_res_rd_atom; 354 int max_qp_init_rd_atom; 355 int max_ee_init_rd_atom; 356 enum ib_atomic_cap atomic_cap; 357 enum ib_atomic_cap masked_atomic_cap; 358 int max_ee; 359 int max_rdd; 360 int max_mw; 361 int max_raw_ipv6_qp; 362 int max_raw_ethy_qp; 363 int max_mcast_grp; 364 int max_mcast_qp_attach; 365 int max_total_mcast_qp_attach; 366 int max_ah; 367 int max_fmr; 368 int max_map_per_fmr; 369 int max_srq; 370 int max_srq_wr; 371 union { 372 int max_srq_sge; 373 int max_send_sge; 374 int max_recv_sge; 375 }; 376 unsigned int max_fast_reg_page_list_len; 377 u16 max_pkeys; 378 u8 local_ca_ack_delay; 379 int sig_prot_cap; 380 int sig_guard_cap; 381 struct ib_odp_caps odp_caps; 382 uint64_t timestamp_mask; 383 uint64_t hca_core_clock; /* in KHZ */ 384 struct ib_rss_caps rss_caps; 385 u32 max_wq_type_rq; 386 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 387 struct ib_tm_caps tm_caps; 388 struct ib_cq_caps cq_caps; 389 u64 max_dm_size; 390 /* Max entries for sgl for optimized performance per READ */ 391 u32 max_sgl_rd; 392 }; 393 394 enum ib_mtu { 395 IB_MTU_256 = 1, 396 IB_MTU_512 = 2, 397 IB_MTU_1024 = 3, 398 IB_MTU_2048 = 4, 399 IB_MTU_4096 = 5 400 }; 401 402 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 403 { 404 switch (mtu) { 405 case IB_MTU_256: return 256; 406 case IB_MTU_512: return 512; 407 case IB_MTU_1024: return 1024; 408 case IB_MTU_2048: return 2048; 409 case IB_MTU_4096: return 4096; 410 default: return -1; 411 } 412 } 413 414 enum ib_port_state { 415 IB_PORT_NOP = 0, 416 IB_PORT_DOWN = 1, 417 IB_PORT_INIT = 2, 418 IB_PORT_ARMED = 3, 419 IB_PORT_ACTIVE = 4, 420 IB_PORT_ACTIVE_DEFER = 5, 421 IB_PORT_DUMMY = -1, /* force enum signed */ 422 }; 423 424 enum ib_port_cap_flags { 425 IB_PORT_SM = 1 << 1, 426 IB_PORT_NOTICE_SUP = 1 << 2, 427 IB_PORT_TRAP_SUP = 1 << 3, 428 IB_PORT_OPT_IPD_SUP = 1 << 4, 429 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 430 IB_PORT_SL_MAP_SUP = 1 << 6, 431 IB_PORT_MKEY_NVRAM = 1 << 7, 432 IB_PORT_PKEY_NVRAM = 1 << 8, 433 IB_PORT_LED_INFO_SUP = 1 << 9, 434 IB_PORT_SM_DISABLED = 1 << 10, 435 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 436 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 437 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 438 IB_PORT_CM_SUP = 1 << 16, 439 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 440 IB_PORT_REINIT_SUP = 1 << 18, 441 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 442 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 443 IB_PORT_DR_NOTICE_SUP = 1 << 21, 444 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 445 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 446 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 447 IB_PORT_CLIENT_REG_SUP = 1 << 25, 448 IB_PORT_IP_BASED_GIDS = 1 << 26, 449 }; 450 451 enum ib_port_phys_state { 452 IB_PORT_PHYS_STATE_SLEEP = 1, 453 IB_PORT_PHYS_STATE_POLLING = 2, 454 IB_PORT_PHYS_STATE_DISABLED = 3, 455 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, 456 IB_PORT_PHYS_STATE_LINK_UP = 5, 457 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, 458 IB_PORT_PHYS_STATE_PHY_TEST = 7, 459 }; 460 461 enum ib_port_width { 462 IB_WIDTH_1X = 1, 463 IB_WIDTH_2X = 16, 464 IB_WIDTH_4X = 2, 465 IB_WIDTH_8X = 4, 466 IB_WIDTH_12X = 8 467 }; 468 469 static inline int ib_width_enum_to_int(enum ib_port_width width) 470 { 471 switch (width) { 472 case IB_WIDTH_1X: return 1; 473 case IB_WIDTH_2X: return 2; 474 case IB_WIDTH_4X: return 4; 475 case IB_WIDTH_8X: return 8; 476 case IB_WIDTH_12X: return 12; 477 default: return -1; 478 } 479 } 480 481 enum ib_port_speed { 482 IB_SPEED_SDR = 1, 483 IB_SPEED_DDR = 2, 484 IB_SPEED_QDR = 4, 485 IB_SPEED_FDR10 = 8, 486 IB_SPEED_FDR = 16, 487 IB_SPEED_EDR = 32, 488 IB_SPEED_HDR = 64, 489 IB_SPEED_NDR = 128 490 }; 491 492 /** 493 * struct rdma_hw_stats 494 * @lock - Mutex to protect parallel write access to lifespan and values 495 * of counters, which are 64bits and not guaranteeed to be written 496 * atomicaly on 32bits systems. 497 * @timestamp - Used by the core code to track when the last update was 498 * @lifespan - Used by the core code to determine how old the counters 499 * should be before being updated again. Stored in jiffies, defaults 500 * to 10 milliseconds, drivers can override the default be specifying 501 * their own value during their allocation routine. 502 * @name - Array of pointers to static names used for the counters in 503 * directory. 504 * @num_counters - How many hardware counters there are. If name is 505 * shorter than this number, a kernel oops will result. Driver authors 506 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 507 * in their code to prevent this. 508 * @value - Array of u64 counters that are accessed by the sysfs code and 509 * filled in by the drivers get_stats routine 510 */ 511 struct rdma_hw_stats { 512 struct mutex lock; /* Protect lifespan and values[] */ 513 unsigned long timestamp; 514 unsigned long lifespan; 515 const char * const *names; 516 int num_counters; 517 u64 value[]; 518 }; 519 520 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 521 /** 522 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 523 * for drivers. 524 * @names - Array of static const char * 525 * @num_counters - How many elements in array 526 * @lifespan - How many milliseconds between updates 527 */ 528 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 529 const char * const *names, int num_counters, 530 unsigned long lifespan) 531 { 532 struct rdma_hw_stats *stats; 533 534 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 535 GFP_KERNEL); 536 if (!stats) 537 return NULL; 538 stats->names = names; 539 stats->num_counters = num_counters; 540 stats->lifespan = msecs_to_jiffies(lifespan); 541 542 return stats; 543 } 544 545 546 /* Define bits for the various functionality this port needs to be supported by 547 * the core. 548 */ 549 /* Management 0x00000FFF */ 550 #define RDMA_CORE_CAP_IB_MAD 0x00000001 551 #define RDMA_CORE_CAP_IB_SMI 0x00000002 552 #define RDMA_CORE_CAP_IB_CM 0x00000004 553 #define RDMA_CORE_CAP_IW_CM 0x00000008 554 #define RDMA_CORE_CAP_IB_SA 0x00000010 555 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 556 557 /* Address format 0x000FF000 */ 558 #define RDMA_CORE_CAP_AF_IB 0x00001000 559 #define RDMA_CORE_CAP_ETH_AH 0x00002000 560 561 /* Protocol 0xFFF00000 */ 562 #define RDMA_CORE_CAP_PROT_IB 0x00100000 563 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 564 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 565 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 566 567 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 568 | RDMA_CORE_CAP_IB_MAD \ 569 | RDMA_CORE_CAP_IB_SMI \ 570 | RDMA_CORE_CAP_IB_CM \ 571 | RDMA_CORE_CAP_IB_SA \ 572 | RDMA_CORE_CAP_AF_IB) 573 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 574 | RDMA_CORE_CAP_IB_MAD \ 575 | RDMA_CORE_CAP_IB_CM \ 576 | RDMA_CORE_CAP_AF_IB \ 577 | RDMA_CORE_CAP_ETH_AH) 578 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 579 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 580 | RDMA_CORE_CAP_IB_MAD \ 581 | RDMA_CORE_CAP_IB_CM \ 582 | RDMA_CORE_CAP_AF_IB \ 583 | RDMA_CORE_CAP_ETH_AH) 584 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 585 | RDMA_CORE_CAP_IW_CM) 586 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 587 | RDMA_CORE_CAP_OPA_MAD) 588 589 struct ib_port_attr { 590 u64 subnet_prefix; 591 enum ib_port_state state; 592 enum ib_mtu max_mtu; 593 enum ib_mtu active_mtu; 594 int gid_tbl_len; 595 unsigned int ip_gids:1; 596 /* This is the value from PortInfo CapabilityMask, defined by IBA */ 597 u32 port_cap_flags; 598 u32 max_msg_sz; 599 u32 bad_pkey_cntr; 600 u32 qkey_viol_cntr; 601 u16 pkey_tbl_len; 602 u16 lid; 603 u16 sm_lid; 604 u8 lmc; 605 u8 max_vl_num; 606 u8 sm_sl; 607 u8 subnet_timeout; 608 u8 init_type_reply; 609 u8 active_width; 610 u8 active_speed; 611 u8 phys_state; 612 bool grh_required; 613 }; 614 615 enum ib_device_modify_flags { 616 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 617 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 618 }; 619 620 #define IB_DEVICE_NODE_DESC_MAX 64 621 622 struct ib_device_modify { 623 u64 sys_image_guid; 624 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 625 }; 626 627 enum ib_port_modify_flags { 628 IB_PORT_SHUTDOWN = 1, 629 IB_PORT_INIT_TYPE = (1<<2), 630 IB_PORT_RESET_QKEY_CNTR = (1<<3) 631 }; 632 633 struct ib_port_modify { 634 u32 set_port_cap_mask; 635 u32 clr_port_cap_mask; 636 u8 init_type; 637 }; 638 639 enum ib_event_type { 640 IB_EVENT_CQ_ERR, 641 IB_EVENT_QP_FATAL, 642 IB_EVENT_QP_REQ_ERR, 643 IB_EVENT_QP_ACCESS_ERR, 644 IB_EVENT_COMM_EST, 645 IB_EVENT_SQ_DRAINED, 646 IB_EVENT_PATH_MIG, 647 IB_EVENT_PATH_MIG_ERR, 648 IB_EVENT_DEVICE_FATAL, 649 IB_EVENT_PORT_ACTIVE, 650 IB_EVENT_PORT_ERR, 651 IB_EVENT_LID_CHANGE, 652 IB_EVENT_PKEY_CHANGE, 653 IB_EVENT_SM_CHANGE, 654 IB_EVENT_SRQ_ERR, 655 IB_EVENT_SRQ_LIMIT_REACHED, 656 IB_EVENT_QP_LAST_WQE_REACHED, 657 IB_EVENT_CLIENT_REREGISTER, 658 IB_EVENT_GID_CHANGE, 659 IB_EVENT_WQ_FATAL, 660 }; 661 662 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 663 664 struct ib_event { 665 struct ib_device *device; 666 union { 667 struct ib_cq *cq; 668 struct ib_qp *qp; 669 struct ib_srq *srq; 670 struct ib_wq *wq; 671 u8 port_num; 672 } element; 673 enum ib_event_type event; 674 }; 675 676 struct ib_event_handler { 677 struct ib_device *device; 678 void (*handler)(struct ib_event_handler *, struct ib_event *); 679 struct list_head list; 680 }; 681 682 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 683 do { \ 684 (_ptr)->device = _device; \ 685 (_ptr)->handler = _handler; \ 686 INIT_LIST_HEAD(&(_ptr)->list); \ 687 } while (0) 688 689 struct ib_global_route { 690 union ib_gid dgid; 691 u32 flow_label; 692 u8 sgid_index; 693 u8 hop_limit; 694 u8 traffic_class; 695 }; 696 697 struct ib_grh { 698 __be32 version_tclass_flow; 699 __be16 paylen; 700 u8 next_hdr; 701 u8 hop_limit; 702 union ib_gid sgid; 703 union ib_gid dgid; 704 }; 705 706 union rdma_network_hdr { 707 struct ib_grh ibgrh; 708 struct { 709 /* The IB spec states that if it's IPv4, the header 710 * is located in the last 20 bytes of the header. 711 */ 712 u8 reserved[20]; 713 struct ip roce4grh; 714 }; 715 }; 716 717 enum { 718 IB_MULTICAST_QPN = 0xffffff 719 }; 720 721 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 722 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 723 724 enum ib_ah_flags { 725 IB_AH_GRH = 1 726 }; 727 728 enum ib_rate { 729 IB_RATE_PORT_CURRENT = 0, 730 IB_RATE_2_5_GBPS = 2, 731 IB_RATE_5_GBPS = 5, 732 IB_RATE_10_GBPS = 3, 733 IB_RATE_20_GBPS = 6, 734 IB_RATE_30_GBPS = 4, 735 IB_RATE_40_GBPS = 7, 736 IB_RATE_60_GBPS = 8, 737 IB_RATE_80_GBPS = 9, 738 IB_RATE_120_GBPS = 10, 739 IB_RATE_14_GBPS = 11, 740 IB_RATE_56_GBPS = 12, 741 IB_RATE_112_GBPS = 13, 742 IB_RATE_168_GBPS = 14, 743 IB_RATE_25_GBPS = 15, 744 IB_RATE_100_GBPS = 16, 745 IB_RATE_200_GBPS = 17, 746 IB_RATE_300_GBPS = 18, 747 IB_RATE_28_GBPS = 19, 748 IB_RATE_50_GBPS = 20, 749 IB_RATE_400_GBPS = 21, 750 IB_RATE_600_GBPS = 22, 751 }; 752 753 /** 754 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 755 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 756 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 757 * @rate: rate to convert. 758 */ 759 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 760 761 /** 762 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 763 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 764 * @rate: rate to convert. 765 */ 766 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 767 768 769 /** 770 * enum ib_mr_type - memory region type 771 * @IB_MR_TYPE_MEM_REG: memory region that is used for 772 * normal registration 773 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 774 * register any arbitrary sg lists (without 775 * the normal mr constraints - see 776 * ib_map_mr_sg) 777 * @IB_MR_TYPE_DM: memory region that is used for device 778 * memory registration 779 * @IB_MR_TYPE_USER: memory region that is used for the user-space 780 * application 781 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations 782 * without address translations (VA=PA) 783 * @IB_MR_TYPE_INTEGRITY: memory region that is used for 784 * data integrity operations 785 */ 786 enum ib_mr_type { 787 IB_MR_TYPE_MEM_REG, 788 IB_MR_TYPE_SG_GAPS, 789 IB_MR_TYPE_DM, 790 IB_MR_TYPE_USER, 791 IB_MR_TYPE_DMA, 792 IB_MR_TYPE_INTEGRITY, 793 }; 794 795 enum ib_mr_status_check { 796 IB_MR_CHECK_SIG_STATUS = 1, 797 }; 798 799 /** 800 * struct ib_mr_status - Memory region status container 801 * 802 * @fail_status: Bitmask of MR checks status. For each 803 * failed check a corresponding status bit is set. 804 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 805 * failure. 806 */ 807 struct ib_mr_status { 808 u32 fail_status; 809 struct ib_sig_err sig_err; 810 }; 811 812 /** 813 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 814 * enum. 815 * @mult: multiple to convert. 816 */ 817 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 818 819 struct ib_ah_attr { 820 struct ib_global_route grh; 821 u16 dlid; 822 u8 sl; 823 u8 src_path_bits; 824 u8 static_rate; 825 u8 ah_flags; 826 u8 port_num; 827 u8 dmac[ETH_ALEN]; 828 }; 829 830 enum ib_wc_status { 831 IB_WC_SUCCESS, 832 IB_WC_LOC_LEN_ERR, 833 IB_WC_LOC_QP_OP_ERR, 834 IB_WC_LOC_EEC_OP_ERR, 835 IB_WC_LOC_PROT_ERR, 836 IB_WC_WR_FLUSH_ERR, 837 IB_WC_MW_BIND_ERR, 838 IB_WC_BAD_RESP_ERR, 839 IB_WC_LOC_ACCESS_ERR, 840 IB_WC_REM_INV_REQ_ERR, 841 IB_WC_REM_ACCESS_ERR, 842 IB_WC_REM_OP_ERR, 843 IB_WC_RETRY_EXC_ERR, 844 IB_WC_RNR_RETRY_EXC_ERR, 845 IB_WC_LOC_RDD_VIOL_ERR, 846 IB_WC_REM_INV_RD_REQ_ERR, 847 IB_WC_REM_ABORT_ERR, 848 IB_WC_INV_EECN_ERR, 849 IB_WC_INV_EEC_STATE_ERR, 850 IB_WC_FATAL_ERR, 851 IB_WC_RESP_TIMEOUT_ERR, 852 IB_WC_GENERAL_ERR 853 }; 854 855 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 856 857 enum ib_wc_opcode { 858 IB_WC_SEND, 859 IB_WC_RDMA_WRITE, 860 IB_WC_RDMA_READ, 861 IB_WC_COMP_SWAP, 862 IB_WC_FETCH_ADD, 863 IB_WC_LSO, 864 IB_WC_LOCAL_INV, 865 IB_WC_REG_MR, 866 IB_WC_MASKED_COMP_SWAP, 867 IB_WC_MASKED_FETCH_ADD, 868 /* 869 * Set value of IB_WC_RECV so consumers can test if a completion is a 870 * receive by testing (opcode & IB_WC_RECV). 871 */ 872 IB_WC_RECV = 1 << 7, 873 IB_WC_RECV_RDMA_WITH_IMM, 874 IB_WC_DUMMY = -1, /* force enum signed */ 875 }; 876 877 enum ib_wc_flags { 878 IB_WC_GRH = 1, 879 IB_WC_WITH_IMM = (1<<1), 880 IB_WC_WITH_INVALIDATE = (1<<2), 881 IB_WC_IP_CSUM_OK = (1<<3), 882 IB_WC_WITH_SMAC = (1<<4), 883 IB_WC_WITH_VLAN = (1<<5), 884 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 885 }; 886 887 struct ib_wc { 888 union { 889 u64 wr_id; 890 struct ib_cqe *wr_cqe; 891 }; 892 enum ib_wc_status status; 893 enum ib_wc_opcode opcode; 894 u32 vendor_err; 895 u32 byte_len; 896 struct ib_qp *qp; 897 union { 898 __be32 imm_data; 899 u32 invalidate_rkey; 900 } ex; 901 u32 src_qp; 902 int wc_flags; 903 u16 pkey_index; 904 u16 slid; 905 u8 sl; 906 u8 dlid_path_bits; 907 u8 port_num; /* valid only for DR SMPs on switches */ 908 u8 smac[ETH_ALEN]; 909 u16 vlan_id; 910 u8 network_hdr_type; 911 }; 912 913 enum ib_cq_notify_flags { 914 IB_CQ_SOLICITED = 1 << 0, 915 IB_CQ_NEXT_COMP = 1 << 1, 916 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 917 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 918 }; 919 920 enum ib_srq_type { 921 IB_SRQT_BASIC, 922 IB_SRQT_XRC, 923 IB_SRQT_TM, 924 }; 925 926 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) 927 { 928 return srq_type == IB_SRQT_XRC || 929 srq_type == IB_SRQT_TM; 930 } 931 932 enum ib_srq_attr_mask { 933 IB_SRQ_MAX_WR = 1 << 0, 934 IB_SRQ_LIMIT = 1 << 1, 935 }; 936 937 struct ib_srq_attr { 938 u32 max_wr; 939 u32 max_sge; 940 u32 srq_limit; 941 }; 942 943 struct ib_srq_init_attr { 944 void (*event_handler)(struct ib_event *, void *); 945 void *srq_context; 946 struct ib_srq_attr attr; 947 enum ib_srq_type srq_type; 948 949 struct { 950 struct ib_cq *cq; 951 union { 952 struct { 953 struct ib_xrcd *xrcd; 954 } xrc; 955 956 struct { 957 u32 max_num_tags; 958 } tag_matching; 959 }; 960 } ext; 961 }; 962 963 struct ib_qp_cap { 964 u32 max_send_wr; 965 u32 max_recv_wr; 966 u32 max_send_sge; 967 u32 max_recv_sge; 968 u32 max_inline_data; 969 970 /* 971 * Maximum number of rdma_rw_ctx structures in flight at a time. 972 * ib_create_qp() will calculate the right amount of neededed WRs 973 * and MRs based on this. 974 */ 975 u32 max_rdma_ctxs; 976 }; 977 978 enum ib_sig_type { 979 IB_SIGNAL_ALL_WR, 980 IB_SIGNAL_REQ_WR 981 }; 982 983 enum ib_qp_type { 984 /* 985 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 986 * here (and in that order) since the MAD layer uses them as 987 * indices into a 2-entry table. 988 */ 989 IB_QPT_SMI, 990 IB_QPT_GSI, 991 992 IB_QPT_RC, 993 IB_QPT_UC, 994 IB_QPT_UD, 995 IB_QPT_RAW_IPV6, 996 IB_QPT_RAW_ETHERTYPE, 997 IB_QPT_RAW_PACKET = 8, 998 IB_QPT_XRC_INI = 9, 999 IB_QPT_XRC_TGT, 1000 IB_QPT_MAX, 1001 IB_QPT_DRIVER = 0xFF, 1002 /* Reserve a range for qp types internal to the low level driver. 1003 * These qp types will not be visible at the IB core layer, so the 1004 * IB_QPT_MAX usages should not be affected in the core layer 1005 */ 1006 IB_QPT_RESERVED1 = 0x1000, 1007 IB_QPT_RESERVED2, 1008 IB_QPT_RESERVED3, 1009 IB_QPT_RESERVED4, 1010 IB_QPT_RESERVED5, 1011 IB_QPT_RESERVED6, 1012 IB_QPT_RESERVED7, 1013 IB_QPT_RESERVED8, 1014 IB_QPT_RESERVED9, 1015 IB_QPT_RESERVED10, 1016 }; 1017 1018 enum ib_qp_create_flags { 1019 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1020 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1021 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1022 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1023 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1024 IB_QP_CREATE_NETIF_QP = 1 << 5, 1025 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1026 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 1027 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1028 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, 1029 IB_QP_CREATE_SOURCE_QPN = 1 << 10, 1030 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11, 1031 /* reserve bits 26-31 for low level drivers' internal use */ 1032 IB_QP_CREATE_RESERVED_START = 1 << 26, 1033 IB_QP_CREATE_RESERVED_END = 1 << 31, 1034 }; 1035 1036 /* 1037 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1038 * callback to destroy the passed in QP. 1039 */ 1040 1041 struct ib_qp_init_attr { 1042 void (*event_handler)(struct ib_event *, void *); 1043 void *qp_context; 1044 struct ib_cq *send_cq; 1045 struct ib_cq *recv_cq; 1046 struct ib_srq *srq; 1047 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1048 struct ib_qp_cap cap; 1049 enum ib_sig_type sq_sig_type; 1050 enum ib_qp_type qp_type; 1051 enum ib_qp_create_flags create_flags; 1052 1053 /* 1054 * Only needed for special QP types, or when using the RW API. 1055 */ 1056 u8 port_num; 1057 struct ib_rwq_ind_table *rwq_ind_tbl; 1058 u32 source_qpn; 1059 }; 1060 1061 struct ib_qp_open_attr { 1062 void (*event_handler)(struct ib_event *, void *); 1063 void *qp_context; 1064 u32 qp_num; 1065 enum ib_qp_type qp_type; 1066 }; 1067 1068 enum ib_rnr_timeout { 1069 IB_RNR_TIMER_655_36 = 0, 1070 IB_RNR_TIMER_000_01 = 1, 1071 IB_RNR_TIMER_000_02 = 2, 1072 IB_RNR_TIMER_000_03 = 3, 1073 IB_RNR_TIMER_000_04 = 4, 1074 IB_RNR_TIMER_000_06 = 5, 1075 IB_RNR_TIMER_000_08 = 6, 1076 IB_RNR_TIMER_000_12 = 7, 1077 IB_RNR_TIMER_000_16 = 8, 1078 IB_RNR_TIMER_000_24 = 9, 1079 IB_RNR_TIMER_000_32 = 10, 1080 IB_RNR_TIMER_000_48 = 11, 1081 IB_RNR_TIMER_000_64 = 12, 1082 IB_RNR_TIMER_000_96 = 13, 1083 IB_RNR_TIMER_001_28 = 14, 1084 IB_RNR_TIMER_001_92 = 15, 1085 IB_RNR_TIMER_002_56 = 16, 1086 IB_RNR_TIMER_003_84 = 17, 1087 IB_RNR_TIMER_005_12 = 18, 1088 IB_RNR_TIMER_007_68 = 19, 1089 IB_RNR_TIMER_010_24 = 20, 1090 IB_RNR_TIMER_015_36 = 21, 1091 IB_RNR_TIMER_020_48 = 22, 1092 IB_RNR_TIMER_030_72 = 23, 1093 IB_RNR_TIMER_040_96 = 24, 1094 IB_RNR_TIMER_061_44 = 25, 1095 IB_RNR_TIMER_081_92 = 26, 1096 IB_RNR_TIMER_122_88 = 27, 1097 IB_RNR_TIMER_163_84 = 28, 1098 IB_RNR_TIMER_245_76 = 29, 1099 IB_RNR_TIMER_327_68 = 30, 1100 IB_RNR_TIMER_491_52 = 31 1101 }; 1102 1103 enum ib_qp_attr_mask { 1104 IB_QP_STATE = 1, 1105 IB_QP_CUR_STATE = (1<<1), 1106 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1107 IB_QP_ACCESS_FLAGS = (1<<3), 1108 IB_QP_PKEY_INDEX = (1<<4), 1109 IB_QP_PORT = (1<<5), 1110 IB_QP_QKEY = (1<<6), 1111 IB_QP_AV = (1<<7), 1112 IB_QP_PATH_MTU = (1<<8), 1113 IB_QP_TIMEOUT = (1<<9), 1114 IB_QP_RETRY_CNT = (1<<10), 1115 IB_QP_RNR_RETRY = (1<<11), 1116 IB_QP_RQ_PSN = (1<<12), 1117 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1118 IB_QP_ALT_PATH = (1<<14), 1119 IB_QP_MIN_RNR_TIMER = (1<<15), 1120 IB_QP_SQ_PSN = (1<<16), 1121 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1122 IB_QP_PATH_MIG_STATE = (1<<18), 1123 IB_QP_CAP = (1<<19), 1124 IB_QP_DEST_QPN = (1<<20), 1125 IB_QP_RESERVED1 = (1<<21), 1126 IB_QP_RESERVED2 = (1<<22), 1127 IB_QP_RESERVED3 = (1<<23), 1128 IB_QP_RESERVED4 = (1<<24), 1129 IB_QP_RATE_LIMIT = (1<<25), 1130 }; 1131 1132 enum ib_qp_state { 1133 IB_QPS_RESET, 1134 IB_QPS_INIT, 1135 IB_QPS_RTR, 1136 IB_QPS_RTS, 1137 IB_QPS_SQD, 1138 IB_QPS_SQE, 1139 IB_QPS_ERR, 1140 IB_QPS_DUMMY = -1, /* force enum signed */ 1141 }; 1142 1143 enum ib_mig_state { 1144 IB_MIG_MIGRATED, 1145 IB_MIG_REARM, 1146 IB_MIG_ARMED 1147 }; 1148 1149 enum ib_mw_type { 1150 IB_MW_TYPE_1 = 1, 1151 IB_MW_TYPE_2 = 2 1152 }; 1153 1154 struct ib_qp_attr { 1155 enum ib_qp_state qp_state; 1156 enum ib_qp_state cur_qp_state; 1157 enum ib_mtu path_mtu; 1158 enum ib_mig_state path_mig_state; 1159 u32 qkey; 1160 u32 rq_psn; 1161 u32 sq_psn; 1162 u32 dest_qp_num; 1163 int qp_access_flags; 1164 struct ib_qp_cap cap; 1165 struct ib_ah_attr ah_attr; 1166 struct ib_ah_attr alt_ah_attr; 1167 u16 pkey_index; 1168 u16 alt_pkey_index; 1169 u8 en_sqd_async_notify; 1170 u8 sq_draining; 1171 u8 max_rd_atomic; 1172 u8 max_dest_rd_atomic; 1173 u8 min_rnr_timer; 1174 u8 port_num; 1175 u8 timeout; 1176 u8 retry_cnt; 1177 u8 rnr_retry; 1178 u8 alt_port_num; 1179 u8 alt_timeout; 1180 u32 rate_limit; 1181 }; 1182 1183 enum ib_wr_opcode { 1184 IB_WR_RDMA_WRITE, 1185 IB_WR_RDMA_WRITE_WITH_IMM, 1186 IB_WR_SEND, 1187 IB_WR_SEND_WITH_IMM, 1188 IB_WR_RDMA_READ, 1189 IB_WR_ATOMIC_CMP_AND_SWP, 1190 IB_WR_ATOMIC_FETCH_AND_ADD, 1191 IB_WR_LSO, 1192 IB_WR_SEND_WITH_INV, 1193 IB_WR_RDMA_READ_WITH_INV, 1194 IB_WR_LOCAL_INV, 1195 IB_WR_REG_MR, 1196 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1197 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1198 IB_WR_REG_SIG_MR, 1199 /* reserve values for low level drivers' internal use. 1200 * These values will not be used at all in the ib core layer. 1201 */ 1202 IB_WR_RESERVED1 = 0xf0, 1203 IB_WR_RESERVED2, 1204 IB_WR_RESERVED3, 1205 IB_WR_RESERVED4, 1206 IB_WR_RESERVED5, 1207 IB_WR_RESERVED6, 1208 IB_WR_RESERVED7, 1209 IB_WR_RESERVED8, 1210 IB_WR_RESERVED9, 1211 IB_WR_RESERVED10, 1212 IB_WR_DUMMY = -1, /* force enum signed */ 1213 }; 1214 1215 enum ib_send_flags { 1216 IB_SEND_FENCE = 1, 1217 IB_SEND_SIGNALED = (1<<1), 1218 IB_SEND_SOLICITED = (1<<2), 1219 IB_SEND_INLINE = (1<<3), 1220 IB_SEND_IP_CSUM = (1<<4), 1221 1222 /* reserve bits 26-31 for low level drivers' internal use */ 1223 IB_SEND_RESERVED_START = (1 << 26), 1224 IB_SEND_RESERVED_END = (1 << 31), 1225 }; 1226 1227 struct ib_sge { 1228 u64 addr; 1229 u32 length; 1230 u32 lkey; 1231 }; 1232 1233 struct ib_cqe { 1234 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1235 }; 1236 1237 struct ib_send_wr { 1238 struct ib_send_wr *next; 1239 union { 1240 u64 wr_id; 1241 struct ib_cqe *wr_cqe; 1242 }; 1243 struct ib_sge *sg_list; 1244 int num_sge; 1245 enum ib_wr_opcode opcode; 1246 int send_flags; 1247 union { 1248 __be32 imm_data; 1249 u32 invalidate_rkey; 1250 } ex; 1251 }; 1252 1253 struct ib_rdma_wr { 1254 struct ib_send_wr wr; 1255 u64 remote_addr; 1256 u32 rkey; 1257 }; 1258 1259 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr) 1260 { 1261 return container_of(wr, struct ib_rdma_wr, wr); 1262 } 1263 1264 struct ib_atomic_wr { 1265 struct ib_send_wr wr; 1266 u64 remote_addr; 1267 u64 compare_add; 1268 u64 swap; 1269 u64 compare_add_mask; 1270 u64 swap_mask; 1271 u32 rkey; 1272 }; 1273 1274 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr) 1275 { 1276 return container_of(wr, struct ib_atomic_wr, wr); 1277 } 1278 1279 struct ib_ud_wr { 1280 struct ib_send_wr wr; 1281 struct ib_ah *ah; 1282 void *header; 1283 int hlen; 1284 int mss; 1285 u32 remote_qpn; 1286 u32 remote_qkey; 1287 u16 pkey_index; /* valid for GSI only */ 1288 u8 port_num; /* valid for DR SMPs on switch only */ 1289 }; 1290 1291 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) 1292 { 1293 return container_of(wr, struct ib_ud_wr, wr); 1294 } 1295 1296 struct ib_reg_wr { 1297 struct ib_send_wr wr; 1298 struct ib_mr *mr; 1299 u32 key; 1300 int access; 1301 }; 1302 1303 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) 1304 { 1305 return container_of(wr, struct ib_reg_wr, wr); 1306 } 1307 1308 struct ib_sig_handover_wr { 1309 struct ib_send_wr wr; 1310 struct ib_sig_attrs *sig_attrs; 1311 struct ib_mr *sig_mr; 1312 int access_flags; 1313 struct ib_sge *prot; 1314 }; 1315 1316 static inline const struct ib_sig_handover_wr *sig_handover_wr(const struct ib_send_wr *wr) 1317 { 1318 return container_of(wr, struct ib_sig_handover_wr, wr); 1319 } 1320 1321 struct ib_recv_wr { 1322 struct ib_recv_wr *next; 1323 union { 1324 u64 wr_id; 1325 struct ib_cqe *wr_cqe; 1326 }; 1327 struct ib_sge *sg_list; 1328 int num_sge; 1329 }; 1330 1331 enum ib_access_flags { 1332 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE, 1333 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE, 1334 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ, 1335 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC, 1336 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND, 1337 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED, 1338 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND, 1339 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB, 1340 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING, 1341 1342 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE, 1343 IB_ACCESS_SUPPORTED = 1344 ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL, 1345 }; 1346 1347 /* 1348 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1349 * are hidden here instead of a uapi header! 1350 */ 1351 enum ib_mr_rereg_flags { 1352 IB_MR_REREG_TRANS = 1, 1353 IB_MR_REREG_PD = (1<<1), 1354 IB_MR_REREG_ACCESS = (1<<2), 1355 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1356 }; 1357 1358 struct ib_fmr_attr { 1359 int max_pages; 1360 int max_maps; 1361 u8 page_shift; 1362 }; 1363 1364 struct ib_umem; 1365 1366 enum rdma_remove_reason { 1367 /* 1368 * Userspace requested uobject deletion or initial try 1369 * to remove uobject via cleanup. Call could fail 1370 */ 1371 RDMA_REMOVE_DESTROY, 1372 /* Context deletion. This call should delete the actual object itself */ 1373 RDMA_REMOVE_CLOSE, 1374 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1375 RDMA_REMOVE_DRIVER_REMOVE, 1376 /* uobj is being cleaned-up before being committed */ 1377 RDMA_REMOVE_ABORT, 1378 }; 1379 1380 struct ib_rdmacg_object { 1381 }; 1382 1383 struct ib_ucontext { 1384 struct ib_device *device; 1385 struct ib_uverbs_file *ufile; 1386 /* 1387 * 'closing' can be read by the driver only during a destroy callback, 1388 * it is set when we are closing the file descriptor and indicates 1389 * that mm_sem may be locked. 1390 */ 1391 bool closing; 1392 1393 bool cleanup_retryable; 1394 1395 struct ib_rdmacg_object cg_obj; 1396 /* 1397 * Implementation details of the RDMA core, don't use in drivers: 1398 */ 1399 struct xarray mmap_xa; 1400 }; 1401 1402 struct ib_uobject { 1403 u64 user_handle; /* handle given to us by userspace */ 1404 /* ufile & ucontext owning this object */ 1405 struct ib_uverbs_file *ufile; 1406 /* FIXME, save memory: ufile->context == context */ 1407 struct ib_ucontext *context; /* associated user context */ 1408 void *object; /* containing object */ 1409 struct list_head list; /* link to context's list */ 1410 struct ib_rdmacg_object cg_obj; /* rdmacg object */ 1411 int id; /* index into kernel idr */ 1412 struct kref ref; 1413 atomic_t usecnt; /* protects exclusive access */ 1414 struct rcu_head rcu; /* kfree_rcu() overhead */ 1415 1416 const struct uverbs_api_object *uapi_object; 1417 }; 1418 1419 struct ib_udata { 1420 const u8 __user *inbuf; 1421 u8 __user *outbuf; 1422 size_t inlen; 1423 size_t outlen; 1424 }; 1425 1426 struct ib_pd { 1427 u32 local_dma_lkey; 1428 u32 flags; 1429 struct ib_device *device; 1430 struct ib_uobject *uobject; 1431 atomic_t usecnt; /* count all resources */ 1432 1433 u32 unsafe_global_rkey; 1434 1435 /* 1436 * Implementation details of the RDMA core, don't use in drivers: 1437 */ 1438 struct ib_mr *__internal_mr; 1439 }; 1440 1441 struct ib_xrcd { 1442 struct ib_device *device; 1443 atomic_t usecnt; /* count all exposed resources */ 1444 struct inode *inode; 1445 1446 struct mutex tgt_qp_mutex; 1447 struct list_head tgt_qp_list; 1448 }; 1449 1450 struct ib_ah { 1451 struct ib_device *device; 1452 struct ib_pd *pd; 1453 struct ib_uobject *uobject; 1454 }; 1455 1456 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1457 1458 enum ib_poll_context { 1459 IB_POLL_DIRECT, /* caller context, no hw completions */ 1460 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1461 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1462 }; 1463 1464 struct ib_cq { 1465 struct ib_device *device; 1466 struct ib_ucq_object *uobject; 1467 ib_comp_handler comp_handler; 1468 void (*event_handler)(struct ib_event *, void *); 1469 void *cq_context; 1470 int cqe; 1471 atomic_t usecnt; /* count number of work queues */ 1472 enum ib_poll_context poll_ctx; 1473 struct work_struct work; 1474 }; 1475 1476 struct ib_srq { 1477 struct ib_device *device; 1478 struct ib_pd *pd; 1479 struct ib_usrq_object *uobject; 1480 void (*event_handler)(struct ib_event *, void *); 1481 void *srq_context; 1482 enum ib_srq_type srq_type; 1483 atomic_t usecnt; 1484 1485 struct { 1486 struct ib_cq *cq; 1487 union { 1488 struct { 1489 struct ib_xrcd *xrcd; 1490 u32 srq_num; 1491 } xrc; 1492 }; 1493 } ext; 1494 }; 1495 1496 enum ib_wq_type { 1497 IB_WQT_RQ 1498 }; 1499 1500 enum ib_wq_state { 1501 IB_WQS_RESET, 1502 IB_WQS_RDY, 1503 IB_WQS_ERR 1504 }; 1505 1506 struct ib_wq { 1507 struct ib_device *device; 1508 struct ib_uwq_object *uobject; 1509 void *wq_context; 1510 void (*event_handler)(struct ib_event *, void *); 1511 struct ib_pd *pd; 1512 struct ib_cq *cq; 1513 u32 wq_num; 1514 enum ib_wq_state state; 1515 enum ib_wq_type wq_type; 1516 atomic_t usecnt; 1517 }; 1518 1519 enum ib_wq_flags { 1520 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1521 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, 1522 IB_WQ_FLAGS_DELAY_DROP = 1 << 2, 1523 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3, 1524 }; 1525 1526 struct ib_wq_init_attr { 1527 void *wq_context; 1528 enum ib_wq_type wq_type; 1529 u32 max_wr; 1530 u32 max_sge; 1531 struct ib_cq *cq; 1532 void (*event_handler)(struct ib_event *, void *); 1533 u32 create_flags; /* Use enum ib_wq_flags */ 1534 }; 1535 1536 enum ib_wq_attr_mask { 1537 IB_WQ_STATE = 1 << 0, 1538 IB_WQ_CUR_STATE = 1 << 1, 1539 IB_WQ_FLAGS = 1 << 2, 1540 }; 1541 1542 struct ib_wq_attr { 1543 enum ib_wq_state wq_state; 1544 enum ib_wq_state curr_wq_state; 1545 u32 flags; /* Use enum ib_wq_flags */ 1546 u32 flags_mask; /* Use enum ib_wq_flags */ 1547 }; 1548 1549 struct ib_rwq_ind_table { 1550 struct ib_device *device; 1551 struct ib_uobject *uobject; 1552 atomic_t usecnt; 1553 u32 ind_tbl_num; 1554 u32 log_ind_tbl_size; 1555 struct ib_wq **ind_tbl; 1556 }; 1557 1558 struct ib_rwq_ind_table_init_attr { 1559 u32 log_ind_tbl_size; 1560 /* Each entry is a pointer to Receive Work Queue */ 1561 struct ib_wq **ind_tbl; 1562 }; 1563 1564 /* 1565 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1566 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1567 */ 1568 struct ib_qp { 1569 struct ib_device *device; 1570 struct ib_pd *pd; 1571 struct ib_cq *send_cq; 1572 struct ib_cq *recv_cq; 1573 spinlock_t mr_lock; 1574 struct ib_srq *srq; 1575 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1576 struct list_head xrcd_list; 1577 1578 /* count times opened, mcast attaches, flow attaches */ 1579 atomic_t usecnt; 1580 struct list_head open_list; 1581 struct ib_qp *real_qp; 1582 struct ib_uqp_object *uobject; 1583 void (*event_handler)(struct ib_event *, void *); 1584 void *qp_context; 1585 u32 qp_num; 1586 u32 max_write_sge; 1587 u32 max_read_sge; 1588 enum ib_qp_type qp_type; 1589 struct ib_rwq_ind_table *rwq_ind_tbl; 1590 u8 port; 1591 }; 1592 1593 struct ib_dm { 1594 struct ib_device *device; 1595 u32 length; 1596 u32 flags; 1597 struct ib_uobject *uobject; 1598 atomic_t usecnt; 1599 }; 1600 1601 struct ib_mr { 1602 struct ib_device *device; 1603 struct ib_pd *pd; 1604 u32 lkey; 1605 u32 rkey; 1606 u64 iova; 1607 u64 length; 1608 unsigned int page_size; 1609 enum ib_mr_type type; 1610 bool need_inval; 1611 union { 1612 struct ib_uobject *uobject; /* user */ 1613 struct list_head qp_entry; /* FR */ 1614 }; 1615 1616 struct ib_dm *dm; 1617 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ 1618 }; 1619 1620 struct ib_mw { 1621 struct ib_device *device; 1622 struct ib_pd *pd; 1623 struct ib_uobject *uobject; 1624 u32 rkey; 1625 enum ib_mw_type type; 1626 }; 1627 1628 struct ib_fmr { 1629 struct ib_device *device; 1630 struct ib_pd *pd; 1631 struct list_head list; 1632 u32 lkey; 1633 u32 rkey; 1634 }; 1635 1636 /* Supported steering options */ 1637 enum ib_flow_attr_type { 1638 /* steering according to rule specifications */ 1639 IB_FLOW_ATTR_NORMAL = 0x0, 1640 /* default unicast and multicast rule - 1641 * receive all Eth traffic which isn't steered to any QP 1642 */ 1643 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1644 /* default multicast rule - 1645 * receive all Eth multicast traffic which isn't steered to any QP 1646 */ 1647 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1648 /* sniffer rule - receive all port traffic */ 1649 IB_FLOW_ATTR_SNIFFER = 0x3 1650 }; 1651 1652 /* Supported steering header types */ 1653 enum ib_flow_spec_type { 1654 /* L2 headers*/ 1655 IB_FLOW_SPEC_ETH = 0x20, 1656 IB_FLOW_SPEC_IB = 0x22, 1657 /* L3 header*/ 1658 IB_FLOW_SPEC_IPV4 = 0x30, 1659 IB_FLOW_SPEC_IPV6 = 0x31, 1660 IB_FLOW_SPEC_ESP = 0x34, 1661 /* L4 headers*/ 1662 IB_FLOW_SPEC_TCP = 0x40, 1663 IB_FLOW_SPEC_UDP = 0x41, 1664 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1665 IB_FLOW_SPEC_GRE = 0x51, 1666 IB_FLOW_SPEC_MPLS = 0x60, 1667 IB_FLOW_SPEC_INNER = 0x100, 1668 /* Actions */ 1669 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1670 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1671 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, 1672 IB_FLOW_SPEC_ACTION_COUNT = 0x1003, 1673 }; 1674 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1675 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 1676 1677 /* Flow steering rule priority is set according to it's domain. 1678 * Lower domain value means higher priority. 1679 */ 1680 enum ib_flow_domain { 1681 IB_FLOW_DOMAIN_USER, 1682 IB_FLOW_DOMAIN_ETHTOOL, 1683 IB_FLOW_DOMAIN_RFS, 1684 IB_FLOW_DOMAIN_NIC, 1685 IB_FLOW_DOMAIN_NUM /* Must be last */ 1686 }; 1687 1688 enum ib_flow_flags { 1689 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1690 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ 1691 }; 1692 1693 struct ib_flow_eth_filter { 1694 u8 dst_mac[6]; 1695 u8 src_mac[6]; 1696 __be16 ether_type; 1697 __be16 vlan_tag; 1698 /* Must be last */ 1699 u8 real_sz[0]; 1700 }; 1701 1702 struct ib_flow_spec_eth { 1703 enum ib_flow_spec_type type; 1704 u16 size; 1705 struct ib_flow_eth_filter val; 1706 struct ib_flow_eth_filter mask; 1707 }; 1708 1709 struct ib_flow_ib_filter { 1710 __be16 dlid; 1711 __u8 sl; 1712 /* Must be last */ 1713 u8 real_sz[0]; 1714 }; 1715 1716 struct ib_flow_spec_ib { 1717 enum ib_flow_spec_type type; 1718 u16 size; 1719 struct ib_flow_ib_filter val; 1720 struct ib_flow_ib_filter mask; 1721 }; 1722 1723 /* IPv4 header flags */ 1724 enum ib_ipv4_flags { 1725 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1726 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1727 last have this flag set */ 1728 }; 1729 1730 struct ib_flow_ipv4_filter { 1731 __be32 src_ip; 1732 __be32 dst_ip; 1733 u8 proto; 1734 u8 tos; 1735 u8 ttl; 1736 u8 flags; 1737 /* Must be last */ 1738 u8 real_sz[0]; 1739 }; 1740 1741 struct ib_flow_spec_ipv4 { 1742 enum ib_flow_spec_type type; 1743 u16 size; 1744 struct ib_flow_ipv4_filter val; 1745 struct ib_flow_ipv4_filter mask; 1746 }; 1747 1748 struct ib_flow_ipv6_filter { 1749 u8 src_ip[16]; 1750 u8 dst_ip[16]; 1751 __be32 flow_label; 1752 u8 next_hdr; 1753 u8 traffic_class; 1754 u8 hop_limit; 1755 /* Must be last */ 1756 u8 real_sz[0]; 1757 }; 1758 1759 struct ib_flow_spec_ipv6 { 1760 enum ib_flow_spec_type type; 1761 u16 size; 1762 struct ib_flow_ipv6_filter val; 1763 struct ib_flow_ipv6_filter mask; 1764 }; 1765 1766 struct ib_flow_tcp_udp_filter { 1767 __be16 dst_port; 1768 __be16 src_port; 1769 /* Must be last */ 1770 u8 real_sz[0]; 1771 }; 1772 1773 struct ib_flow_spec_tcp_udp { 1774 enum ib_flow_spec_type type; 1775 u16 size; 1776 struct ib_flow_tcp_udp_filter val; 1777 struct ib_flow_tcp_udp_filter mask; 1778 }; 1779 1780 struct ib_flow_tunnel_filter { 1781 __be32 tunnel_id; 1782 u8 real_sz[0]; 1783 }; 1784 1785 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1786 * the tunnel_id from val has the vni value 1787 */ 1788 struct ib_flow_spec_tunnel { 1789 u32 type; 1790 u16 size; 1791 struct ib_flow_tunnel_filter val; 1792 struct ib_flow_tunnel_filter mask; 1793 }; 1794 1795 struct ib_flow_esp_filter { 1796 __be32 spi; 1797 __be32 seq; 1798 /* Must be last */ 1799 u8 real_sz[0]; 1800 }; 1801 1802 struct ib_flow_spec_esp { 1803 u32 type; 1804 u16 size; 1805 struct ib_flow_esp_filter val; 1806 struct ib_flow_esp_filter mask; 1807 }; 1808 1809 struct ib_flow_gre_filter { 1810 __be16 c_ks_res0_ver; 1811 __be16 protocol; 1812 __be32 key; 1813 /* Must be last */ 1814 u8 real_sz[0]; 1815 }; 1816 1817 struct ib_flow_spec_gre { 1818 u32 type; 1819 u16 size; 1820 struct ib_flow_gre_filter val; 1821 struct ib_flow_gre_filter mask; 1822 }; 1823 1824 struct ib_flow_mpls_filter { 1825 __be32 tag; 1826 /* Must be last */ 1827 u8 real_sz[0]; 1828 }; 1829 1830 struct ib_flow_spec_mpls { 1831 u32 type; 1832 u16 size; 1833 struct ib_flow_mpls_filter val; 1834 struct ib_flow_mpls_filter mask; 1835 }; 1836 1837 struct ib_flow_spec_action_tag { 1838 enum ib_flow_spec_type type; 1839 u16 size; 1840 u32 tag_id; 1841 }; 1842 1843 struct ib_flow_spec_action_drop { 1844 enum ib_flow_spec_type type; 1845 u16 size; 1846 }; 1847 1848 struct ib_flow_spec_action_handle { 1849 enum ib_flow_spec_type type; 1850 u16 size; 1851 struct ib_flow_action *act; 1852 }; 1853 1854 enum ib_counters_description { 1855 IB_COUNTER_PACKETS, 1856 IB_COUNTER_BYTES, 1857 }; 1858 1859 struct ib_flow_spec_action_count { 1860 enum ib_flow_spec_type type; 1861 u16 size; 1862 struct ib_counters *counters; 1863 }; 1864 1865 union ib_flow_spec { 1866 struct { 1867 u32 type; 1868 u16 size; 1869 }; 1870 struct ib_flow_spec_eth eth; 1871 struct ib_flow_spec_ib ib; 1872 struct ib_flow_spec_ipv4 ipv4; 1873 struct ib_flow_spec_tcp_udp tcp_udp; 1874 struct ib_flow_spec_ipv6 ipv6; 1875 struct ib_flow_spec_tunnel tunnel; 1876 struct ib_flow_spec_esp esp; 1877 struct ib_flow_spec_gre gre; 1878 struct ib_flow_spec_mpls mpls; 1879 struct ib_flow_spec_action_tag flow_tag; 1880 struct ib_flow_spec_action_drop drop; 1881 struct ib_flow_spec_action_handle action; 1882 struct ib_flow_spec_action_count flow_count; 1883 }; 1884 1885 struct ib_flow_attr { 1886 enum ib_flow_attr_type type; 1887 u16 size; 1888 u16 priority; 1889 u32 flags; 1890 u8 num_of_specs; 1891 u8 port; 1892 union ib_flow_spec flows[0]; 1893 }; 1894 1895 struct ib_flow { 1896 struct ib_qp *qp; 1897 struct ib_device *device; 1898 struct ib_uobject *uobject; 1899 }; 1900 1901 enum ib_flow_action_type { 1902 IB_FLOW_ACTION_UNSPECIFIED, 1903 IB_FLOW_ACTION_ESP = 1, 1904 }; 1905 1906 struct ib_flow_action_attrs_esp_keymats { 1907 enum ib_uverbs_flow_action_esp_keymat protocol; 1908 union { 1909 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; 1910 } keymat; 1911 }; 1912 1913 struct ib_flow_action_attrs_esp_replays { 1914 enum ib_uverbs_flow_action_esp_replay protocol; 1915 union { 1916 struct ib_uverbs_flow_action_esp_replay_bmp bmp; 1917 } replay; 1918 }; 1919 1920 enum ib_flow_action_attrs_esp_flags { 1921 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags 1922 * This is done in order to share the same flags between user-space and 1923 * kernel and spare an unnecessary translation. 1924 */ 1925 1926 /* Kernel flags */ 1927 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, 1928 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, 1929 }; 1930 1931 struct ib_flow_spec_list { 1932 struct ib_flow_spec_list *next; 1933 union ib_flow_spec spec; 1934 }; 1935 1936 struct ib_flow_action_attrs_esp { 1937 struct ib_flow_action_attrs_esp_keymats *keymat; 1938 struct ib_flow_action_attrs_esp_replays *replay; 1939 struct ib_flow_spec_list *encap; 1940 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. 1941 * Value of 0 is a valid value. 1942 */ 1943 u32 esn; 1944 u32 spi; 1945 u32 seq; 1946 u32 tfc_pad; 1947 /* Use enum ib_flow_action_attrs_esp_flags */ 1948 u64 flags; 1949 u64 hard_limit_pkts; 1950 }; 1951 1952 struct ib_flow_action { 1953 struct ib_device *device; 1954 struct ib_uobject *uobject; 1955 enum ib_flow_action_type type; 1956 atomic_t usecnt; 1957 }; 1958 1959 1960 struct ib_mad_hdr; 1961 struct ib_grh; 1962 1963 enum ib_process_mad_flags { 1964 IB_MAD_IGNORE_MKEY = 1, 1965 IB_MAD_IGNORE_BKEY = 2, 1966 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1967 }; 1968 1969 enum ib_mad_result { 1970 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1971 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1972 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1973 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1974 }; 1975 1976 #define IB_DEVICE_NAME_MAX 64 1977 1978 struct ib_cache { 1979 rwlock_t lock; 1980 struct ib_event_handler event_handler; 1981 struct ib_pkey_cache **pkey_cache; 1982 struct ib_gid_table **gid_cache; 1983 u8 *lmc_cache; 1984 }; 1985 1986 struct ib_dma_mapping_ops { 1987 int (*mapping_error)(struct ib_device *dev, 1988 u64 dma_addr); 1989 u64 (*map_single)(struct ib_device *dev, 1990 void *ptr, size_t size, 1991 enum dma_data_direction direction); 1992 void (*unmap_single)(struct ib_device *dev, 1993 u64 addr, size_t size, 1994 enum dma_data_direction direction); 1995 u64 (*map_page)(struct ib_device *dev, 1996 struct page *page, unsigned long offset, 1997 size_t size, 1998 enum dma_data_direction direction); 1999 void (*unmap_page)(struct ib_device *dev, 2000 u64 addr, size_t size, 2001 enum dma_data_direction direction); 2002 int (*map_sg)(struct ib_device *dev, 2003 struct scatterlist *sg, int nents, 2004 enum dma_data_direction direction); 2005 void (*unmap_sg)(struct ib_device *dev, 2006 struct scatterlist *sg, int nents, 2007 enum dma_data_direction direction); 2008 int (*map_sg_attrs)(struct ib_device *dev, 2009 struct scatterlist *sg, int nents, 2010 enum dma_data_direction direction, 2011 struct dma_attrs *attrs); 2012 void (*unmap_sg_attrs)(struct ib_device *dev, 2013 struct scatterlist *sg, int nents, 2014 enum dma_data_direction direction, 2015 struct dma_attrs *attrs); 2016 void (*sync_single_for_cpu)(struct ib_device *dev, 2017 u64 dma_handle, 2018 size_t size, 2019 enum dma_data_direction dir); 2020 void (*sync_single_for_device)(struct ib_device *dev, 2021 u64 dma_handle, 2022 size_t size, 2023 enum dma_data_direction dir); 2024 void *(*alloc_coherent)(struct ib_device *dev, 2025 size_t size, 2026 u64 *dma_handle, 2027 gfp_t flag); 2028 void (*free_coherent)(struct ib_device *dev, 2029 size_t size, void *cpu_addr, 2030 u64 dma_handle); 2031 }; 2032 2033 struct iw_cm_verbs; 2034 2035 struct ib_port_immutable { 2036 int pkey_tbl_len; 2037 int gid_tbl_len; 2038 u32 core_cap_flags; 2039 u32 max_mad_size; 2040 }; 2041 2042 struct ib_counters { 2043 struct ib_device *device; 2044 struct ib_uobject *uobject; 2045 /* num of objects attached */ 2046 atomic_t usecnt; 2047 }; 2048 2049 struct ib_counters_read_attr { 2050 u64 *counters_buff; 2051 u32 ncounters; 2052 u32 flags; /* use enum ib_read_counters_flags */ 2053 }; 2054 2055 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ 2056 .size_##ib_struct = \ 2057 (sizeof(struct drv_struct) + \ 2058 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ 2059 BUILD_BUG_ON_ZERO( \ 2060 !__same_type(((struct drv_struct *)NULL)->member, \ 2061 struct ib_struct))) 2062 2063 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ 2064 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp)) 2065 2066 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ 2067 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) 2068 2069 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct 2070 2071 struct rdma_user_mmap_entry { 2072 struct kref ref; 2073 struct ib_ucontext *ucontext; 2074 unsigned long start_pgoff; 2075 size_t npages; 2076 bool driver_removed; 2077 }; 2078 2079 /* Return the offset (in bytes) the user should pass to libc's mmap() */ 2080 static inline u64 2081 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry) 2082 { 2083 return (u64)entry->start_pgoff << PAGE_SHIFT; 2084 } 2085 2086 struct ib_device_ops { 2087 enum rdma_driver_id driver_id; 2088 DECLARE_RDMA_OBJ_SIZE(ib_ah); 2089 DECLARE_RDMA_OBJ_SIZE(ib_cq); 2090 DECLARE_RDMA_OBJ_SIZE(ib_pd); 2091 DECLARE_RDMA_OBJ_SIZE(ib_srq); 2092 DECLARE_RDMA_OBJ_SIZE(ib_ucontext); 2093 }; 2094 2095 #define INIT_IB_DEVICE_OPS(pop, driver, DRIVER) do { \ 2096 (pop)[0] .driver_id = RDMA_DRIVER_##DRIVER; \ 2097 (pop)[0] INIT_RDMA_OBJ_SIZE(ib_ah, driver##_ib_ah, ibah); \ 2098 (pop)[0] INIT_RDMA_OBJ_SIZE(ib_cq, driver##_ib_cq, ibcq); \ 2099 (pop)[0] INIT_RDMA_OBJ_SIZE(ib_pd, driver##_ib_pd, ibpd); \ 2100 (pop)[0] INIT_RDMA_OBJ_SIZE(ib_srq, driver##_ib_srq, ibsrq); \ 2101 (pop)[0] INIT_RDMA_OBJ_SIZE(ib_ucontext, driver##_ib_ucontext, ibucontext); \ 2102 } while (0) 2103 2104 struct ib_device { 2105 struct device *dma_device; 2106 struct ib_device_ops ops; 2107 2108 char name[IB_DEVICE_NAME_MAX]; 2109 2110 struct list_head event_handler_list; 2111 spinlock_t event_handler_lock; 2112 2113 spinlock_t client_data_lock; 2114 struct list_head core_list; 2115 /* Access to the client_data_list is protected by the client_data_lock 2116 * spinlock and the lists_rwsem read-write semaphore */ 2117 struct list_head client_data_list; 2118 2119 struct ib_cache cache; 2120 /** 2121 * port_immutable is indexed by port number 2122 */ 2123 struct ib_port_immutable *port_immutable; 2124 2125 int num_comp_vectors; 2126 2127 struct iw_cm_verbs *iwcm; 2128 2129 /** 2130 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 2131 * driver initialized data. The struct is kfree()'ed by the sysfs 2132 * core when the device is removed. A lifespan of -1 in the return 2133 * struct tells the core to set a default lifespan. 2134 */ 2135 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 2136 u8 port_num); 2137 /** 2138 * get_hw_stats - Fill in the counter value(s) in the stats struct. 2139 * @index - The index in the value array we wish to have updated, or 2140 * num_counters if we want all stats updated 2141 * Return codes - 2142 * < 0 - Error, no counters updated 2143 * index - Updated the single counter pointed to by index 2144 * num_counters - Updated all counters (will reset the timestamp 2145 * and prevent further calls for lifespan milliseconds) 2146 * Drivers are allowed to update all counters in leiu of just the 2147 * one given in index at their option 2148 */ 2149 int (*get_hw_stats)(struct ib_device *device, 2150 struct rdma_hw_stats *stats, 2151 u8 port, int index); 2152 int (*query_device)(struct ib_device *device, 2153 struct ib_device_attr *device_attr, 2154 struct ib_udata *udata); 2155 int (*query_port)(struct ib_device *device, 2156 u8 port_num, 2157 struct ib_port_attr *port_attr); 2158 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 2159 u8 port_num); 2160 /* When calling get_netdev, the HW vendor's driver should return the 2161 * net device of device @device at port @port_num or NULL if such 2162 * a net device doesn't exist. The vendor driver should call dev_hold 2163 * on this net device. The HW vendor's device driver must guarantee 2164 * that this function returns NULL before the net device reaches 2165 * NETDEV_UNREGISTER_FINAL state. 2166 */ 2167 if_t (*get_netdev)(struct ib_device *device, 2168 u8 port_num); 2169 int (*query_gid)(struct ib_device *device, 2170 u8 port_num, int index, 2171 union ib_gid *gid); 2172 /* When calling add_gid, the HW vendor's driver should 2173 * add the gid of device @device at gid index @index of 2174 * port @port_num to be @gid. Meta-info of that gid (for example, 2175 * the network device related to this gid is available 2176 * at @attr. @context allows the HW vendor driver to store extra 2177 * information together with a GID entry. The HW vendor may allocate 2178 * memory to contain this information and store it in @context when a 2179 * new GID entry is written to. Params are consistent until the next 2180 * call of add_gid or delete_gid. The function should return 0 on 2181 * success or error otherwise. The function could be called 2182 * concurrently for different ports. This function is only called 2183 * when roce_gid_table is used. 2184 */ 2185 int (*add_gid)(struct ib_device *device, 2186 u8 port_num, 2187 unsigned int index, 2188 const union ib_gid *gid, 2189 const struct ib_gid_attr *attr, 2190 void **context); 2191 /* When calling del_gid, the HW vendor's driver should delete the 2192 * gid of device @device at gid index @index of port @port_num. 2193 * Upon the deletion of a GID entry, the HW vendor must free any 2194 * allocated memory. The caller will clear @context afterwards. 2195 * This function is only called when roce_gid_table is used. 2196 */ 2197 int (*del_gid)(struct ib_device *device, 2198 u8 port_num, 2199 unsigned int index, 2200 void **context); 2201 int (*query_pkey)(struct ib_device *device, 2202 u8 port_num, u16 index, u16 *pkey); 2203 int (*modify_device)(struct ib_device *device, 2204 int device_modify_mask, 2205 struct ib_device_modify *device_modify); 2206 int (*modify_port)(struct ib_device *device, 2207 u8 port_num, int port_modify_mask, 2208 struct ib_port_modify *port_modify); 2209 int (*alloc_ucontext)(struct ib_ucontext *uctx, 2210 struct ib_udata *udata); 2211 void (*dealloc_ucontext)(struct ib_ucontext *context); 2212 int (*mmap)(struct ib_ucontext *context, 2213 struct vm_area_struct *vma); 2214 int (*alloc_pd)(struct ib_pd *pd, 2215 struct ib_udata *udata); 2216 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2217 int (*create_ah)(struct ib_ah *ah, struct ib_ah_attr *ah_attr, 2218 u32 flags, struct ib_udata *udata); 2219 int (*modify_ah)(struct ib_ah *ah, 2220 struct ib_ah_attr *ah_attr); 2221 int (*query_ah)(struct ib_ah *ah, 2222 struct ib_ah_attr *ah_attr); 2223 void (*destroy_ah)(struct ib_ah *ah, u32 flags); 2224 int (*create_srq)(struct ib_srq *srq, 2225 struct ib_srq_init_attr *srq_init_attr, 2226 struct ib_udata *udata); 2227 int (*modify_srq)(struct ib_srq *srq, 2228 struct ib_srq_attr *srq_attr, 2229 enum ib_srq_attr_mask srq_attr_mask, 2230 struct ib_udata *udata); 2231 int (*query_srq)(struct ib_srq *srq, 2232 struct ib_srq_attr *srq_attr); 2233 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); 2234 int (*post_srq_recv)(struct ib_srq *srq, 2235 const struct ib_recv_wr *recv_wr, 2236 const struct ib_recv_wr **bad_recv_wr); 2237 struct ib_qp * (*create_qp)(struct ib_pd *pd, 2238 struct ib_qp_init_attr *qp_init_attr, 2239 struct ib_udata *udata); 2240 int (*modify_qp)(struct ib_qp *qp, 2241 struct ib_qp_attr *qp_attr, 2242 int qp_attr_mask, 2243 struct ib_udata *udata); 2244 int (*query_qp)(struct ib_qp *qp, 2245 struct ib_qp_attr *qp_attr, 2246 int qp_attr_mask, 2247 struct ib_qp_init_attr *qp_init_attr); 2248 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); 2249 int (*post_send)(struct ib_qp *qp, 2250 const struct ib_send_wr *send_wr, 2251 const struct ib_send_wr **bad_send_wr); 2252 int (*post_recv)(struct ib_qp *qp, 2253 const struct ib_recv_wr *recv_wr, 2254 const struct ib_recv_wr **bad_recv_wr); 2255 int (*create_cq)(struct ib_cq *, 2256 const struct ib_cq_init_attr *attr, 2257 struct ib_udata *udata); 2258 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 2259 u16 cq_period); 2260 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); 2261 int (*resize_cq)(struct ib_cq *cq, int cqe, 2262 struct ib_udata *udata); 2263 int (*poll_cq)(struct ib_cq *cq, int num_entries, 2264 struct ib_wc *wc); 2265 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2266 int (*req_notify_cq)(struct ib_cq *cq, 2267 enum ib_cq_notify_flags flags); 2268 int (*req_ncomp_notif)(struct ib_cq *cq, 2269 int wc_cnt); 2270 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 2271 int mr_access_flags); 2272 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 2273 u64 start, u64 length, 2274 u64 virt_addr, 2275 int mr_access_flags, 2276 struct ib_udata *udata); 2277 int (*rereg_user_mr)(struct ib_mr *mr, 2278 int flags, 2279 u64 start, u64 length, 2280 u64 virt_addr, 2281 int mr_access_flags, 2282 struct ib_pd *pd, 2283 struct ib_udata *udata); 2284 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata); 2285 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, 2286 u32 max_num_sg, struct ib_udata *udata); 2287 int (*advise_mr)(struct ib_pd *pd, 2288 enum ib_uverbs_advise_mr_advice advice, u32 flags, 2289 const struct ib_sge *sg_list, u32 num_sge, 2290 struct uverbs_attr_bundle *attrs); 2291 int (*map_mr_sg)(struct ib_mr *mr, 2292 struct scatterlist *sg, 2293 int sg_nents, 2294 unsigned int *sg_offset); 2295 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 2296 enum ib_mw_type type, 2297 struct ib_udata *udata); 2298 int (*dealloc_mw)(struct ib_mw *mw); 2299 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 2300 int mr_access_flags, 2301 struct ib_fmr_attr *fmr_attr); 2302 int (*map_phys_fmr)(struct ib_fmr *fmr, 2303 u64 *page_list, int list_len, 2304 u64 iova); 2305 int (*unmap_fmr)(struct list_head *fmr_list); 2306 int (*dealloc_fmr)(struct ib_fmr *fmr); 2307 int (*attach_mcast)(struct ib_qp *qp, 2308 union ib_gid *gid, 2309 u16 lid); 2310 int (*detach_mcast)(struct ib_qp *qp, 2311 union ib_gid *gid, 2312 u16 lid); 2313 int (*process_mad)(struct ib_device *device, 2314 int process_mad_flags, 2315 u8 port_num, 2316 const struct ib_wc *in_wc, 2317 const struct ib_grh *in_grh, 2318 const struct ib_mad_hdr *in_mad, 2319 size_t in_mad_size, 2320 struct ib_mad_hdr *out_mad, 2321 size_t *out_mad_size, 2322 u16 *out_mad_pkey_index); 2323 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 2324 struct ib_udata *udata); 2325 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); 2326 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2327 struct ib_flow_attr 2328 *flow_attr, 2329 int domain, struct ib_udata *udata); 2330 int (*destroy_flow)(struct ib_flow *flow_id); 2331 struct ib_flow_action *(*create_flow_action_esp)( 2332 struct ib_device *device, 2333 const struct ib_flow_action_attrs_esp *attr, 2334 struct uverbs_attr_bundle *attrs); 2335 int (*destroy_flow_action)(struct ib_flow_action *action); 2336 int (*modify_flow_action_esp)( 2337 struct ib_flow_action *action, 2338 const struct ib_flow_action_attrs_esp *attr, 2339 struct uverbs_attr_bundle *attrs); 2340 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2341 struct ib_mr_status *mr_status); 2342 /** 2343 * This will be called once refcount of an entry in mmap_xa reaches 2344 * zero. The type of the memory that was mapped may differ between 2345 * entries and is opaque to the rdma_user_mmap interface. 2346 * Therefore needs to be implemented by the driver in mmap_free. 2347 */ 2348 void (*mmap_free)(struct rdma_user_mmap_entry *entry); 2349 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2350 void (*drain_rq)(struct ib_qp *qp); 2351 void (*drain_sq)(struct ib_qp *qp); 2352 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2353 int state); 2354 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2355 struct ifla_vf_info *ivf); 2356 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2357 struct ifla_vf_stats *stats); 2358 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2359 int type); 2360 struct ib_wq * (*create_wq)(struct ib_pd *pd, 2361 struct ib_wq_init_attr *init_attr, 2362 struct ib_udata *udata); 2363 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); 2364 int (*modify_wq)(struct ib_wq *wq, 2365 struct ib_wq_attr *attr, 2366 u32 wq_attr_mask, 2367 struct ib_udata *udata); 2368 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2369 struct ib_rwq_ind_table_init_attr *init_attr, 2370 struct ib_udata *udata); 2371 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2372 struct ib_dm *(*alloc_dm)(struct ib_device *device, 2373 struct ib_ucontext *context, 2374 struct ib_dm_alloc_attr *attr, 2375 struct uverbs_attr_bundle *attrs); 2376 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs); 2377 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, 2378 struct ib_dm_mr_attr *attr, 2379 struct uverbs_attr_bundle *attrs); 2380 struct ib_counters *(*create_counters)( 2381 struct ib_device *device, struct uverbs_attr_bundle *attrs); 2382 int (*destroy_counters)(struct ib_counters *counters); 2383 int (*read_counters)(struct ib_counters *counters, 2384 struct ib_counters_read_attr *counters_read_attr, 2385 struct uverbs_attr_bundle *attrs); 2386 struct ib_dma_mapping_ops *dma_ops; 2387 2388 struct module *owner; 2389 struct device dev; 2390 struct kobject *ports_parent; 2391 struct list_head port_list; 2392 2393 enum { 2394 IB_DEV_UNINITIALIZED, 2395 IB_DEV_REGISTERED, 2396 IB_DEV_UNREGISTERED 2397 } reg_state; 2398 2399 int uverbs_abi_ver; 2400 u64 uverbs_cmd_mask; 2401 u64 uverbs_ex_cmd_mask; 2402 2403 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2404 __be64 node_guid; 2405 u32 local_dma_lkey; 2406 u16 is_switch:1; 2407 u8 node_type; 2408 u8 phys_port_cnt; 2409 struct ib_device_attr attrs; 2410 struct attribute_group *hw_stats_ag; 2411 struct rdma_hw_stats *hw_stats; 2412 2413 const struct uapi_definition *driver_def; 2414 2415 /** 2416 * The following mandatory functions are used only at device 2417 * registration. Keep functions such as these at the end of this 2418 * structure to avoid cache line misses when accessing struct ib_device 2419 * in fast paths. 2420 */ 2421 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 2422 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); 2423 }; 2424 2425 struct ib_client { 2426 char *name; 2427 void (*add) (struct ib_device *); 2428 void (*remove)(struct ib_device *, void *client_data); 2429 2430 /* Returns the net_dev belonging to this ib_client and matching the 2431 * given parameters. 2432 * @dev: An RDMA device that the net_dev use for communication. 2433 * @port: A physical port number on the RDMA device. 2434 * @pkey: P_Key that the net_dev uses if applicable. 2435 * @gid: A GID that the net_dev uses to communicate. 2436 * @addr: An IP address the net_dev is configured with. 2437 * @client_data: The device's client data set by ib_set_client_data(). 2438 * 2439 * An ib_client that implements a net_dev on top of RDMA devices 2440 * (such as IP over IB) should implement this callback, allowing the 2441 * rdma_cm module to find the right net_dev for a given request. 2442 * 2443 * The caller is responsible for calling dev_put on the returned 2444 * netdev. */ 2445 if_t (*get_net_dev_by_params)( 2446 struct ib_device *dev, 2447 u8 port, 2448 u16 pkey, 2449 const union ib_gid *gid, 2450 const struct sockaddr *addr, 2451 void *client_data); 2452 struct list_head list; 2453 }; 2454 2455 struct ib_device *ib_alloc_device(size_t size); 2456 void ib_dealloc_device(struct ib_device *device); 2457 2458 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); 2459 2460 int ib_register_device(struct ib_device *device, 2461 int (*port_callback)(struct ib_device *, 2462 u8, struct kobject *)); 2463 void ib_unregister_device(struct ib_device *device); 2464 2465 int ib_register_client (struct ib_client *client); 2466 void ib_unregister_client(struct ib_client *client); 2467 2468 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 2469 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2470 void *data); 2471 2472 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, 2473 unsigned long pfn, unsigned long size, pgprot_t prot, 2474 struct rdma_user_mmap_entry *entry); 2475 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, 2476 struct rdma_user_mmap_entry *entry, 2477 size_t length); 2478 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, 2479 struct rdma_user_mmap_entry *entry, 2480 size_t length, u32 min_pgoff, 2481 u32 max_pgoff); 2482 2483 struct rdma_user_mmap_entry * 2484 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext, 2485 unsigned long pgoff); 2486 struct rdma_user_mmap_entry * 2487 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext, 2488 struct vm_area_struct *vma); 2489 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry); 2490 2491 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry); 2492 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2493 { 2494 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2495 } 2496 2497 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2498 { 2499 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2500 } 2501 2502 static inline bool ib_is_buffer_cleared(const void __user *p, 2503 size_t len) 2504 { 2505 bool ret; 2506 u8 *buf; 2507 2508 if (len > USHRT_MAX) 2509 return false; 2510 2511 buf = memdup_user(p, len); 2512 if (IS_ERR(buf)) 2513 return false; 2514 2515 ret = !memchr_inv(buf, 0, len); 2516 kfree(buf); 2517 return ret; 2518 } 2519 2520 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2521 size_t offset, 2522 size_t len) 2523 { 2524 return ib_is_buffer_cleared(udata->inbuf + offset, len); 2525 } 2526 2527 /** 2528 * ib_is_destroy_retryable - Check whether the uobject destruction 2529 * is retryable. 2530 * @ret: The initial destruction return code 2531 * @why: remove reason 2532 * @uobj: The uobject that is destroyed 2533 * 2534 * This function is a helper function that IB layer and low-level drivers 2535 * can use to consider whether the destruction of the given uobject is 2536 * retry-able. 2537 * It checks the original return code, if it wasn't success the destruction 2538 * is retryable according to the ucontext state (i.e. cleanup_retryable) and 2539 * the remove reason. (i.e. why). 2540 * Must be called with the object locked for destroy. 2541 */ 2542 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why, 2543 struct ib_uobject *uobj) 2544 { 2545 return ret && (why == RDMA_REMOVE_DESTROY || 2546 uobj->context->cleanup_retryable); 2547 } 2548 2549 /** 2550 * ib_destroy_usecnt - Called during destruction to check the usecnt 2551 * @usecnt: The usecnt atomic 2552 * @why: remove reason 2553 * @uobj: The uobject that is destroyed 2554 * 2555 * Non-zero usecnts will block destruction unless destruction was triggered by 2556 * a ucontext cleanup. 2557 */ 2558 static inline int ib_destroy_usecnt(atomic_t *usecnt, 2559 enum rdma_remove_reason why, 2560 struct ib_uobject *uobj) 2561 { 2562 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj)) 2563 return -EBUSY; 2564 return 0; 2565 } 2566 2567 /** 2568 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2569 * contains all required attributes and no attributes not allowed for 2570 * the given QP state transition. 2571 * @cur_state: Current QP state 2572 * @next_state: Next QP state 2573 * @type: QP type 2574 * @mask: Mask of supplied QP attributes 2575 * 2576 * This function is a helper function that a low-level driver's 2577 * modify_qp method can use to validate the consumer's input. It 2578 * checks that cur_state and next_state are valid QP states, that a 2579 * transition from cur_state to next_state is allowed by the IB spec, 2580 * and that the attribute mask supplied is allowed for the transition. 2581 */ 2582 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2583 enum ib_qp_type type, enum ib_qp_attr_mask mask); 2584 2585 int ib_register_event_handler (struct ib_event_handler *event_handler); 2586 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 2587 void ib_dispatch_event(struct ib_event *event); 2588 2589 int ib_query_port(struct ib_device *device, 2590 u8 port_num, struct ib_port_attr *port_attr); 2591 2592 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2593 u8 port_num); 2594 2595 /** 2596 * rdma_cap_ib_switch - Check if the device is IB switch 2597 * @device: Device to check 2598 * 2599 * Device driver is responsible for setting is_switch bit on 2600 * in ib_device structure at init time. 2601 * 2602 * Return: true if the device is IB switch. 2603 */ 2604 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2605 { 2606 return device->is_switch; 2607 } 2608 2609 /** 2610 * rdma_start_port - Return the first valid port number for the device 2611 * specified 2612 * 2613 * @device: Device to be checked 2614 * 2615 * Return start port number 2616 */ 2617 static inline u8 rdma_start_port(const struct ib_device *device) 2618 { 2619 return rdma_cap_ib_switch(device) ? 0 : 1; 2620 } 2621 2622 /** 2623 * rdma_end_port - Return the last valid port number for the device 2624 * specified 2625 * 2626 * @device: Device to be checked 2627 * 2628 * Return last port number 2629 */ 2630 static inline u8 rdma_end_port(const struct ib_device *device) 2631 { 2632 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2633 } 2634 2635 static inline int rdma_is_port_valid(const struct ib_device *device, 2636 unsigned int port) 2637 { 2638 return (port >= rdma_start_port(device) && 2639 port <= rdma_end_port(device)); 2640 } 2641 2642 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2643 { 2644 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2645 } 2646 2647 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2648 { 2649 return device->port_immutable[port_num].core_cap_flags & 2650 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2651 } 2652 2653 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2654 { 2655 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2656 } 2657 2658 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2659 { 2660 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2661 } 2662 2663 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2664 { 2665 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2666 } 2667 2668 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2669 { 2670 return rdma_protocol_ib(device, port_num) || 2671 rdma_protocol_roce(device, port_num); 2672 } 2673 2674 /** 2675 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2676 * Management Datagrams. 2677 * @device: Device to check 2678 * @port_num: Port number to check 2679 * 2680 * Management Datagrams (MAD) are a required part of the InfiniBand 2681 * specification and are supported on all InfiniBand devices. A slightly 2682 * extended version are also supported on OPA interfaces. 2683 * 2684 * Return: true if the port supports sending/receiving of MAD packets. 2685 */ 2686 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2687 { 2688 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2689 } 2690 2691 /** 2692 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2693 * Management Datagrams. 2694 * @device: Device to check 2695 * @port_num: Port number to check 2696 * 2697 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2698 * datagrams with their own versions. These OPA MADs share many but not all of 2699 * the characteristics of InfiniBand MADs. 2700 * 2701 * OPA MADs differ in the following ways: 2702 * 2703 * 1) MADs are variable size up to 2K 2704 * IBTA defined MADs remain fixed at 256 bytes 2705 * 2) OPA SMPs must carry valid PKeys 2706 * 3) OPA SMP packets are a different format 2707 * 2708 * Return: true if the port supports OPA MAD packet formats. 2709 */ 2710 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2711 { 2712 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2713 == RDMA_CORE_CAP_OPA_MAD; 2714 } 2715 2716 /** 2717 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2718 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2719 * @device: Device to check 2720 * @port_num: Port number to check 2721 * 2722 * Each InfiniBand node is required to provide a Subnet Management Agent 2723 * that the subnet manager can access. Prior to the fabric being fully 2724 * configured by the subnet manager, the SMA is accessed via a well known 2725 * interface called the Subnet Management Interface (SMI). This interface 2726 * uses directed route packets to communicate with the SM to get around the 2727 * chicken and egg problem of the SM needing to know what's on the fabric 2728 * in order to configure the fabric, and needing to configure the fabric in 2729 * order to send packets to the devices on the fabric. These directed 2730 * route packets do not need the fabric fully configured in order to reach 2731 * their destination. The SMI is the only method allowed to send 2732 * directed route packets on an InfiniBand fabric. 2733 * 2734 * Return: true if the port provides an SMI. 2735 */ 2736 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2737 { 2738 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2739 } 2740 2741 /** 2742 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2743 * Communication Manager. 2744 * @device: Device to check 2745 * @port_num: Port number to check 2746 * 2747 * The InfiniBand Communication Manager is one of many pre-defined General 2748 * Service Agents (GSA) that are accessed via the General Service 2749 * Interface (GSI). It's role is to facilitate establishment of connections 2750 * between nodes as well as other management related tasks for established 2751 * connections. 2752 * 2753 * Return: true if the port supports an IB CM (this does not guarantee that 2754 * a CM is actually running however). 2755 */ 2756 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2757 { 2758 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2759 } 2760 2761 /** 2762 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2763 * Communication Manager. 2764 * @device: Device to check 2765 * @port_num: Port number to check 2766 * 2767 * Similar to above, but specific to iWARP connections which have a different 2768 * managment protocol than InfiniBand. 2769 * 2770 * Return: true if the port supports an iWARP CM (this does not guarantee that 2771 * a CM is actually running however). 2772 */ 2773 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2774 { 2775 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2776 } 2777 2778 /** 2779 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2780 * Subnet Administration. 2781 * @device: Device to check 2782 * @port_num: Port number to check 2783 * 2784 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2785 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2786 * fabrics, devices should resolve routes to other hosts by contacting the 2787 * SA to query the proper route. 2788 * 2789 * Return: true if the port should act as a client to the fabric Subnet 2790 * Administration interface. This does not imply that the SA service is 2791 * running locally. 2792 */ 2793 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2794 { 2795 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2796 } 2797 2798 /** 2799 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2800 * Multicast. 2801 * @device: Device to check 2802 * @port_num: Port number to check 2803 * 2804 * InfiniBand multicast registration is more complex than normal IPv4 or 2805 * IPv6 multicast registration. Each Host Channel Adapter must register 2806 * with the Subnet Manager when it wishes to join a multicast group. It 2807 * should do so only once regardless of how many queue pairs it subscribes 2808 * to this group. And it should leave the group only after all queue pairs 2809 * attached to the group have been detached. 2810 * 2811 * Return: true if the port must undertake the additional adminstrative 2812 * overhead of registering/unregistering with the SM and tracking of the 2813 * total number of queue pairs attached to the multicast group. 2814 */ 2815 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2816 { 2817 return rdma_cap_ib_sa(device, port_num); 2818 } 2819 2820 /** 2821 * rdma_cap_af_ib - Check if the port of device has the capability 2822 * Native Infiniband Address. 2823 * @device: Device to check 2824 * @port_num: Port number to check 2825 * 2826 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2827 * GID. RoCE uses a different mechanism, but still generates a GID via 2828 * a prescribed mechanism and port specific data. 2829 * 2830 * Return: true if the port uses a GID address to identify devices on the 2831 * network. 2832 */ 2833 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2834 { 2835 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2836 } 2837 2838 /** 2839 * rdma_cap_eth_ah - Check if the port of device has the capability 2840 * Ethernet Address Handle. 2841 * @device: Device to check 2842 * @port_num: Port number to check 2843 * 2844 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2845 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2846 * port. Normally, packet headers are generated by the sending host 2847 * adapter, but when sending connectionless datagrams, we must manually 2848 * inject the proper headers for the fabric we are communicating over. 2849 * 2850 * Return: true if we are running as a RoCE port and must force the 2851 * addition of a Global Route Header built from our Ethernet Address 2852 * Handle into our header list for connectionless packets. 2853 */ 2854 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2855 { 2856 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2857 } 2858 2859 /** 2860 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2861 * 2862 * @device: Device 2863 * @port_num: Port number 2864 * 2865 * This MAD size includes the MAD headers and MAD payload. No other headers 2866 * are included. 2867 * 2868 * Return the max MAD size required by the Port. Will return 0 if the port 2869 * does not support MADs 2870 */ 2871 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2872 { 2873 return device->port_immutable[port_num].max_mad_size; 2874 } 2875 2876 /** 2877 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 2878 * @device: Device to check 2879 * @port_num: Port number to check 2880 * 2881 * RoCE GID table mechanism manages the various GIDs for a device. 2882 * 2883 * NOTE: if allocating the port's GID table has failed, this call will still 2884 * return true, but any RoCE GID table API will fail. 2885 * 2886 * Return: true if the port uses RoCE GID table mechanism in order to manage 2887 * its GIDs. 2888 */ 2889 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 2890 u8 port_num) 2891 { 2892 return rdma_protocol_roce(device, port_num) && 2893 device->add_gid && device->del_gid; 2894 } 2895 2896 /* 2897 * Check if the device supports READ W/ INVALIDATE. 2898 */ 2899 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 2900 { 2901 /* 2902 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 2903 * has support for it yet. 2904 */ 2905 return rdma_protocol_iwarp(dev, port_num); 2906 } 2907 2908 int ib_query_gid(struct ib_device *device, 2909 u8 port_num, int index, union ib_gid *gid, 2910 struct ib_gid_attr *attr); 2911 2912 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2913 int state); 2914 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2915 struct ifla_vf_info *info); 2916 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2917 struct ifla_vf_stats *stats); 2918 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2919 int type); 2920 2921 int ib_query_pkey(struct ib_device *device, 2922 u8 port_num, u16 index, u16 *pkey); 2923 2924 int ib_modify_device(struct ib_device *device, 2925 int device_modify_mask, 2926 struct ib_device_modify *device_modify); 2927 2928 int ib_modify_port(struct ib_device *device, 2929 u8 port_num, int port_modify_mask, 2930 struct ib_port_modify *port_modify); 2931 2932 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2933 enum ib_gid_type gid_type, if_t ndev, 2934 u8 *port_num, u16 *index); 2935 2936 int ib_find_pkey(struct ib_device *device, 2937 u8 port_num, u16 pkey, u16 *index); 2938 2939 enum ib_pd_flags { 2940 /* 2941 * Create a memory registration for all memory in the system and place 2942 * the rkey for it into pd->unsafe_global_rkey. This can be used by 2943 * ULPs to avoid the overhead of dynamic MRs. 2944 * 2945 * This flag is generally considered unsafe and must only be used in 2946 * extremly trusted environments. Every use of it will log a warning 2947 * in the kernel log. 2948 */ 2949 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 2950 }; 2951 2952 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 2953 const char *caller); 2954 #define ib_alloc_pd(device, flags) \ 2955 __ib_alloc_pd((device), (flags), __func__) 2956 2957 /** 2958 * ib_dealloc_pd_user - Deallocate kernel/user PD 2959 * @pd: The protection domain 2960 * @udata: Valid user data or NULL for kernel objects 2961 */ 2962 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); 2963 2964 /** 2965 * ib_dealloc_pd - Deallocate kernel PD 2966 * @pd: The protection domain 2967 * 2968 * NOTE: for user PD use ib_dealloc_pd_user with valid udata! 2969 */ 2970 static inline void ib_dealloc_pd(struct ib_pd *pd) 2971 { 2972 ib_dealloc_pd_user(pd, NULL); 2973 } 2974 2975 enum rdma_create_ah_flags { 2976 /* In a sleepable context */ 2977 RDMA_CREATE_AH_SLEEPABLE = BIT(0), 2978 }; 2979 2980 /** 2981 * ib_create_ah - Creates an address handle for the given address vector. 2982 * @pd: The protection domain associated with the address handle. 2983 * @ah_attr: The attributes of the address vector. 2984 * @flags: Create address handle flags (see enum rdma_create_ah_flags). 2985 * 2986 * The address handle is used to reference a local or global destination 2987 * in all UD QP post sends. 2988 */ 2989 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, 2990 u32 flags); 2991 2992 /** 2993 * ib_create_user_ah - Creates an address handle for the given address vector. 2994 * It resolves destination mac address for ah attribute of RoCE type. 2995 * @pd: The protection domain associated with the address handle. 2996 * @ah_attr: The attributes of the address vector. 2997 * @udata: pointer to user's input output buffer information need by 2998 * provider driver. 2999 * 3000 * It returns 0 on success and returns appropriate error code on error. 3001 * The address handle is used to reference a local or global destination 3002 * in all UD QP post sends. 3003 */ 3004 struct ib_ah *ib_create_user_ah(struct ib_pd *pd, 3005 struct ib_ah_attr *ah_attr, 3006 struct ib_udata *udata); 3007 3008 /** 3009 * ib_init_ah_from_wc - Initializes address handle attributes from a 3010 * work completion. 3011 * @device: Device on which the received message arrived. 3012 * @port_num: Port on which the received message arrived. 3013 * @wc: Work completion associated with the received message. 3014 * @grh: References the received global route header. This parameter is 3015 * ignored unless the work completion indicates that the GRH is valid. 3016 * @ah_attr: Returned attributes that can be used when creating an address 3017 * handle for replying to the message. 3018 */ 3019 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 3020 const struct ib_wc *wc, const struct ib_grh *grh, 3021 struct ib_ah_attr *ah_attr); 3022 3023 /** 3024 * ib_create_ah_from_wc - Creates an address handle associated with the 3025 * sender of the specified work completion. 3026 * @pd: The protection domain associated with the address handle. 3027 * @wc: Work completion information associated with a received message. 3028 * @grh: References the received global route header. This parameter is 3029 * ignored unless the work completion indicates that the GRH is valid. 3030 * @port_num: The outbound port number to associate with the address. 3031 * 3032 * The address handle is used to reference a local or global destination 3033 * in all UD QP post sends. 3034 */ 3035 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 3036 const struct ib_grh *grh, u8 port_num); 3037 3038 /** 3039 * ib_modify_ah - Modifies the address vector associated with an address 3040 * handle. 3041 * @ah: The address handle to modify. 3042 * @ah_attr: The new address vector attributes to associate with the 3043 * address handle. 3044 */ 3045 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 3046 3047 /** 3048 * ib_query_ah - Queries the address vector associated with an address 3049 * handle. 3050 * @ah: The address handle to query. 3051 * @ah_attr: The address vector attributes associated with the address 3052 * handle. 3053 */ 3054 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 3055 3056 enum rdma_destroy_ah_flags { 3057 /* In a sleepable context */ 3058 RDMA_DESTROY_AH_SLEEPABLE = BIT(0), 3059 }; 3060 3061 /** 3062 * ib_destroy_ah_user - Destroys an address handle. 3063 * @ah: The address handle to destroy. 3064 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3065 * @udata: Valid user data or NULL for kernel objects 3066 */ 3067 int ib_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); 3068 3069 /** 3070 * rdma_destroy_ah - Destroys an kernel address handle. 3071 * @ah: The address handle to destroy. 3072 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3073 * 3074 * NOTE: for user ah use ib_destroy_ah_user with valid udata! 3075 */ 3076 static inline int ib_destroy_ah(struct ib_ah *ah, u32 flags) 3077 { 3078 return ib_destroy_ah_user(ah, flags, NULL); 3079 } 3080 3081 /** 3082 * ib_create_srq - Creates a SRQ associated with the specified protection 3083 * domain. 3084 * @pd: The protection domain associated with the SRQ. 3085 * @srq_init_attr: A list of initial attributes required to create the 3086 * SRQ. If SRQ creation succeeds, then the attributes are updated to 3087 * the actual capabilities of the created SRQ. 3088 * 3089 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 3090 * requested size of the SRQ, and set to the actual values allocated 3091 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 3092 * will always be at least as large as the requested values. 3093 */ 3094 struct ib_srq *ib_create_srq(struct ib_pd *pd, 3095 struct ib_srq_init_attr *srq_init_attr); 3096 3097 /** 3098 * ib_modify_srq - Modifies the attributes for the specified SRQ. 3099 * @srq: The SRQ to modify. 3100 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 3101 * the current values of selected SRQ attributes are returned. 3102 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 3103 * are being modified. 3104 * 3105 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 3106 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 3107 * the number of receives queued drops below the limit. 3108 */ 3109 int ib_modify_srq(struct ib_srq *srq, 3110 struct ib_srq_attr *srq_attr, 3111 enum ib_srq_attr_mask srq_attr_mask); 3112 3113 /** 3114 * ib_query_srq - Returns the attribute list and current values for the 3115 * specified SRQ. 3116 * @srq: The SRQ to query. 3117 * @srq_attr: The attributes of the specified SRQ. 3118 */ 3119 int ib_query_srq(struct ib_srq *srq, 3120 struct ib_srq_attr *srq_attr); 3121 3122 /** 3123 * ib_destroy_srq_user - Destroys the specified SRQ. 3124 * @srq: The SRQ to destroy. 3125 * @udata: Valid user data or NULL for kernel objects 3126 */ 3127 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); 3128 3129 /** 3130 * ib_destroy_srq - Destroys the specified kernel SRQ. 3131 * @srq: The SRQ to destroy. 3132 * 3133 * NOTE: for user srq use ib_destroy_srq_user with valid udata! 3134 */ 3135 static inline int ib_destroy_srq(struct ib_srq *srq) 3136 { 3137 return ib_destroy_srq_user(srq, NULL); 3138 } 3139 3140 /** 3141 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 3142 * @srq: The SRQ to post the work request on. 3143 * @recv_wr: A list of work requests to post on the receive queue. 3144 * @bad_recv_wr: On an immediate failure, this parameter will reference 3145 * the work request that failed to be posted on the QP. 3146 */ 3147 static inline int ib_post_srq_recv(struct ib_srq *srq, 3148 const struct ib_recv_wr *recv_wr, 3149 const struct ib_recv_wr **bad_recv_wr) 3150 { 3151 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 3152 } 3153 3154 /** 3155 * ib_create_qp - Creates a QP associated with the specified protection 3156 * domain. 3157 * @pd: The protection domain associated with the QP. 3158 * @qp_init_attr: A list of initial attributes required to create the 3159 * QP. If QP creation succeeds, then the attributes are updated to 3160 * the actual capabilities of the created QP. 3161 */ 3162 struct ib_qp *ib_create_qp(struct ib_pd *pd, 3163 struct ib_qp_init_attr *qp_init_attr); 3164 3165 /** 3166 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. 3167 * @qp: The QP to modify. 3168 * @attr: On input, specifies the QP attributes to modify. On output, 3169 * the current values of selected QP attributes are returned. 3170 * @attr_mask: A bit-mask used to specify which attributes of the QP 3171 * are being modified. 3172 * @udata: pointer to user's input output buffer information 3173 * are being modified. 3174 * It returns 0 on success and returns appropriate error code on error. 3175 */ 3176 int ib_modify_qp_with_udata(struct ib_qp *qp, 3177 struct ib_qp_attr *attr, 3178 int attr_mask, 3179 struct ib_udata *udata); 3180 3181 /** 3182 * ib_modify_qp - Modifies the attributes for the specified QP and then 3183 * transitions the QP to the given state. 3184 * @qp: The QP to modify. 3185 * @qp_attr: On input, specifies the QP attributes to modify. On output, 3186 * the current values of selected QP attributes are returned. 3187 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 3188 * are being modified. 3189 */ 3190 int ib_modify_qp(struct ib_qp *qp, 3191 struct ib_qp_attr *qp_attr, 3192 int qp_attr_mask); 3193 3194 /** 3195 * ib_query_qp - Returns the attribute list and current values for the 3196 * specified QP. 3197 * @qp: The QP to query. 3198 * @qp_attr: The attributes of the specified QP. 3199 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 3200 * @qp_init_attr: Additional attributes of the selected QP. 3201 * 3202 * The qp_attr_mask may be used to limit the query to gathering only the 3203 * selected attributes. 3204 */ 3205 int ib_query_qp(struct ib_qp *qp, 3206 struct ib_qp_attr *qp_attr, 3207 int qp_attr_mask, 3208 struct ib_qp_init_attr *qp_init_attr); 3209 3210 /** 3211 * ib_destroy_qp - Destroys the specified QP. 3212 * @qp: The QP to destroy. 3213 * @udata: Valid udata or NULL for kernel objects 3214 */ 3215 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata); 3216 3217 /** 3218 * ib_destroy_qp - Destroys the specified kernel QP. 3219 * @qp: The QP to destroy. 3220 * 3221 * NOTE: for user qp use ib_destroy_qp_user with valid udata! 3222 */ 3223 static inline int ib_destroy_qp(struct ib_qp *qp) 3224 { 3225 return ib_destroy_qp_user(qp, NULL); 3226 } 3227 3228 /** 3229 * ib_open_qp - Obtain a reference to an existing sharable QP. 3230 * @xrcd - XRC domain 3231 * @qp_open_attr: Attributes identifying the QP to open. 3232 * 3233 * Returns a reference to a sharable QP. 3234 */ 3235 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 3236 struct ib_qp_open_attr *qp_open_attr); 3237 3238 /** 3239 * ib_close_qp - Release an external reference to a QP. 3240 * @qp: The QP handle to release 3241 * 3242 * The opened QP handle is released by the caller. The underlying 3243 * shared QP is not destroyed until all internal references are released. 3244 */ 3245 int ib_close_qp(struct ib_qp *qp); 3246 3247 /** 3248 * ib_post_send - Posts a list of work requests to the send queue of 3249 * the specified QP. 3250 * @qp: The QP to post the work request on. 3251 * @send_wr: A list of work requests to post on the send queue. 3252 * @bad_send_wr: On an immediate failure, this parameter will reference 3253 * the work request that failed to be posted on the QP. 3254 * 3255 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 3256 * error is returned, the QP state shall not be affected, 3257 * ib_post_send() will return an immediate error after queueing any 3258 * earlier work requests in the list. 3259 */ 3260 static inline int ib_post_send(struct ib_qp *qp, 3261 const struct ib_send_wr *send_wr, 3262 const struct ib_send_wr **bad_send_wr) 3263 { 3264 return qp->device->post_send(qp, send_wr, bad_send_wr); 3265 } 3266 3267 /** 3268 * ib_post_recv - Posts a list of work requests to the receive queue of 3269 * the specified QP. 3270 * @qp: The QP to post the work request on. 3271 * @recv_wr: A list of work requests to post on the receive queue. 3272 * @bad_recv_wr: On an immediate failure, this parameter will reference 3273 * the work request that failed to be posted on the QP. 3274 */ 3275 static inline int ib_post_recv(struct ib_qp *qp, 3276 const struct ib_recv_wr *recv_wr, 3277 const struct ib_recv_wr **bad_recv_wr) 3278 { 3279 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 3280 } 3281 3282 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, 3283 int nr_cqe, int comp_vector, 3284 enum ib_poll_context poll_ctx, 3285 const char *caller, struct ib_udata *udata); 3286 3287 /** 3288 * ib_alloc_cq_user: Allocate kernel/user CQ 3289 * @dev: The IB device 3290 * @private: Private data attached to the CQE 3291 * @nr_cqe: Number of CQEs in the CQ 3292 * @comp_vector: Completion vector used for the IRQs 3293 * @poll_ctx: Context used for polling the CQ 3294 * @udata: Valid user data or NULL for kernel objects 3295 */ 3296 static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev, 3297 void *private, int nr_cqe, 3298 int comp_vector, 3299 enum ib_poll_context poll_ctx, 3300 struct ib_udata *udata) 3301 { 3302 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 3303 "ibcore", udata); 3304 } 3305 3306 /** 3307 * ib_alloc_cq: Allocate kernel CQ 3308 * @dev: The IB device 3309 * @private: Private data attached to the CQE 3310 * @nr_cqe: Number of CQEs in the CQ 3311 * @comp_vector: Completion vector used for the IRQs 3312 * @poll_ctx: Context used for polling the CQ 3313 * 3314 * NOTE: for user cq use ib_alloc_cq_user with valid udata! 3315 */ 3316 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 3317 int nr_cqe, int comp_vector, 3318 enum ib_poll_context poll_ctx) 3319 { 3320 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 3321 NULL); 3322 } 3323 3324 /** 3325 * ib_free_cq_user - Free kernel/user CQ 3326 * @cq: The CQ to free 3327 * @udata: Valid user data or NULL for kernel objects 3328 */ 3329 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3330 3331 /** 3332 * ib_free_cq - Free kernel CQ 3333 * @cq: The CQ to free 3334 * 3335 * NOTE: for user cq use ib_free_cq_user with valid udata! 3336 */ 3337 static inline void ib_free_cq(struct ib_cq *cq) 3338 { 3339 ib_free_cq_user(cq, NULL); 3340 } 3341 3342 /** 3343 * ib_create_cq - Creates a CQ on the specified device. 3344 * @device: The device on which to create the CQ. 3345 * @comp_handler: A user-specified callback that is invoked when a 3346 * completion event occurs on the CQ. 3347 * @event_handler: A user-specified callback that is invoked when an 3348 * asynchronous event not associated with a completion occurs on the CQ. 3349 * @cq_context: Context associated with the CQ returned to the user via 3350 * the associated completion and event handlers. 3351 * @cq_attr: The attributes the CQ should be created upon. 3352 * 3353 * Users can examine the cq structure to determine the actual CQ size. 3354 */ 3355 struct ib_cq *__ib_create_cq(struct ib_device *device, 3356 ib_comp_handler comp_handler, 3357 void (*event_handler)(struct ib_event *, void *), 3358 void *cq_context, 3359 const struct ib_cq_init_attr *cq_attr, 3360 const char *caller); 3361 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ 3362 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), "ibcore") 3363 3364 /** 3365 * ib_resize_cq - Modifies the capacity of the CQ. 3366 * @cq: The CQ to resize. 3367 * @cqe: The minimum size of the CQ. 3368 * 3369 * Users can examine the cq structure to determine the actual CQ size. 3370 */ 3371 int ib_resize_cq(struct ib_cq *cq, int cqe); 3372 3373 /** 3374 * ib_modify_cq - Modifies moderation params of the CQ 3375 * @cq: The CQ to modify. 3376 * @cq_count: number of CQEs that will trigger an event 3377 * @cq_period: max period of time in usec before triggering an event 3378 * 3379 */ 3380 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 3381 3382 /** 3383 * ib_destroy_cq_user - Destroys the specified CQ. 3384 * @cq: The CQ to destroy. 3385 * @udata: Valid user data or NULL for kernel objects 3386 */ 3387 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3388 3389 /** 3390 * ib_destroy_cq - Destroys the specified kernel CQ. 3391 * @cq: The CQ to destroy. 3392 * 3393 * NOTE: for user cq use ib_destroy_cq_user with valid udata! 3394 */ 3395 static inline void ib_destroy_cq(struct ib_cq *cq) 3396 { 3397 ib_destroy_cq_user(cq, NULL); 3398 } 3399 3400 /** 3401 * ib_poll_cq - poll a CQ for completion(s) 3402 * @cq:the CQ being polled 3403 * @num_entries:maximum number of completions to return 3404 * @wc:array of at least @num_entries &struct ib_wc where completions 3405 * will be returned 3406 * 3407 * Poll a CQ for (possibly multiple) completions. If the return value 3408 * is < 0, an error occurred. If the return value is >= 0, it is the 3409 * number of completions returned. If the return value is 3410 * non-negative and < num_entries, then the CQ was emptied. 3411 */ 3412 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 3413 struct ib_wc *wc) 3414 { 3415 return cq->device->poll_cq(cq, num_entries, wc); 3416 } 3417 3418 /** 3419 * ib_peek_cq - Returns the number of unreaped completions currently 3420 * on the specified CQ. 3421 * @cq: The CQ to peek. 3422 * @wc_cnt: A minimum number of unreaped completions to check for. 3423 * 3424 * If the number of unreaped completions is greater than or equal to wc_cnt, 3425 * this function returns wc_cnt, otherwise, it returns the actual number of 3426 * unreaped completions. 3427 */ 3428 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 3429 3430 /** 3431 * ib_req_notify_cq - Request completion notification on a CQ. 3432 * @cq: The CQ to generate an event for. 3433 * @flags: 3434 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 3435 * to request an event on the next solicited event or next work 3436 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 3437 * may also be |ed in to request a hint about missed events, as 3438 * described below. 3439 * 3440 * Return Value: 3441 * < 0 means an error occurred while requesting notification 3442 * == 0 means notification was requested successfully, and if 3443 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 3444 * were missed and it is safe to wait for another event. In 3445 * this case is it guaranteed that any work completions added 3446 * to the CQ since the last CQ poll will trigger a completion 3447 * notification event. 3448 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 3449 * in. It means that the consumer must poll the CQ again to 3450 * make sure it is empty to avoid missing an event because of a 3451 * race between requesting notification and an entry being 3452 * added to the CQ. This return value means it is possible 3453 * (but not guaranteed) that a work completion has been added 3454 * to the CQ since the last poll without triggering a 3455 * completion notification event. 3456 */ 3457 static inline int ib_req_notify_cq(struct ib_cq *cq, 3458 enum ib_cq_notify_flags flags) 3459 { 3460 return cq->device->req_notify_cq(cq, flags); 3461 } 3462 3463 /** 3464 * ib_req_ncomp_notif - Request completion notification when there are 3465 * at least the specified number of unreaped completions on the CQ. 3466 * @cq: The CQ to generate an event for. 3467 * @wc_cnt: The number of unreaped completions that should be on the 3468 * CQ before an event is generated. 3469 */ 3470 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3471 { 3472 return cq->device->req_ncomp_notif ? 3473 cq->device->req_ncomp_notif(cq, wc_cnt) : 3474 -ENOSYS; 3475 } 3476 3477 /** 3478 * ib_dma_mapping_error - check a DMA addr for error 3479 * @dev: The device for which the dma_addr was created 3480 * @dma_addr: The DMA address to check 3481 */ 3482 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3483 { 3484 if (dev->dma_ops) 3485 return dev->dma_ops->mapping_error(dev, dma_addr); 3486 return dma_mapping_error(dev->dma_device, dma_addr); 3487 } 3488 3489 /** 3490 * ib_dma_map_single - Map a kernel virtual address to DMA address 3491 * @dev: The device for which the dma_addr is to be created 3492 * @cpu_addr: The kernel virtual address 3493 * @size: The size of the region in bytes 3494 * @direction: The direction of the DMA 3495 */ 3496 static inline u64 ib_dma_map_single(struct ib_device *dev, 3497 void *cpu_addr, size_t size, 3498 enum dma_data_direction direction) 3499 { 3500 if (dev->dma_ops) 3501 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 3502 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3503 } 3504 3505 /** 3506 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3507 * @dev: The device for which the DMA address was created 3508 * @addr: The DMA address 3509 * @size: The size of the region in bytes 3510 * @direction: The direction of the DMA 3511 */ 3512 static inline void ib_dma_unmap_single(struct ib_device *dev, 3513 u64 addr, size_t size, 3514 enum dma_data_direction direction) 3515 { 3516 if (dev->dma_ops) 3517 dev->dma_ops->unmap_single(dev, addr, size, direction); 3518 else 3519 dma_unmap_single(dev->dma_device, addr, size, direction); 3520 } 3521 3522 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 3523 void *cpu_addr, size_t size, 3524 enum dma_data_direction direction, 3525 struct dma_attrs *dma_attrs) 3526 { 3527 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 3528 direction, dma_attrs); 3529 } 3530 3531 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 3532 u64 addr, size_t size, 3533 enum dma_data_direction direction, 3534 struct dma_attrs *dma_attrs) 3535 { 3536 return dma_unmap_single_attrs(dev->dma_device, addr, size, 3537 direction, dma_attrs); 3538 } 3539 3540 /** 3541 * ib_dma_map_page - Map a physical page to DMA address 3542 * @dev: The device for which the dma_addr is to be created 3543 * @page: The page to be mapped 3544 * @offset: The offset within the page 3545 * @size: The size of the region in bytes 3546 * @direction: The direction of the DMA 3547 */ 3548 static inline u64 ib_dma_map_page(struct ib_device *dev, 3549 struct page *page, 3550 unsigned long offset, 3551 size_t size, 3552 enum dma_data_direction direction) 3553 { 3554 if (dev->dma_ops) 3555 return dev->dma_ops->map_page(dev, page, offset, size, direction); 3556 return dma_map_page(dev->dma_device, page, offset, size, direction); 3557 } 3558 3559 /** 3560 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3561 * @dev: The device for which the DMA address was created 3562 * @addr: The DMA address 3563 * @size: The size of the region in bytes 3564 * @direction: The direction of the DMA 3565 */ 3566 static inline void ib_dma_unmap_page(struct ib_device *dev, 3567 u64 addr, size_t size, 3568 enum dma_data_direction direction) 3569 { 3570 if (dev->dma_ops) 3571 dev->dma_ops->unmap_page(dev, addr, size, direction); 3572 else 3573 dma_unmap_page(dev->dma_device, addr, size, direction); 3574 } 3575 3576 /** 3577 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3578 * @dev: The device for which the DMA addresses are to be created 3579 * @sg: The array of scatter/gather entries 3580 * @nents: The number of scatter/gather entries 3581 * @direction: The direction of the DMA 3582 */ 3583 static inline int ib_dma_map_sg(struct ib_device *dev, 3584 struct scatterlist *sg, int nents, 3585 enum dma_data_direction direction) 3586 { 3587 if (dev->dma_ops) 3588 return dev->dma_ops->map_sg(dev, sg, nents, direction); 3589 return dma_map_sg(dev->dma_device, sg, nents, direction); 3590 } 3591 3592 /** 3593 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3594 * @dev: The device for which the DMA addresses were created 3595 * @sg: The array of scatter/gather entries 3596 * @nents: The number of scatter/gather entries 3597 * @direction: The direction of the DMA 3598 */ 3599 static inline void ib_dma_unmap_sg(struct ib_device *dev, 3600 struct scatterlist *sg, int nents, 3601 enum dma_data_direction direction) 3602 { 3603 if (dev->dma_ops) 3604 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 3605 else 3606 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3607 } 3608 3609 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3610 struct scatterlist *sg, int nents, 3611 enum dma_data_direction direction, 3612 struct dma_attrs *dma_attrs) 3613 { 3614 if (dev->dma_ops) 3615 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, 3616 dma_attrs); 3617 else 3618 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3619 dma_attrs); 3620 } 3621 3622 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3623 struct scatterlist *sg, int nents, 3624 enum dma_data_direction direction, 3625 struct dma_attrs *dma_attrs) 3626 { 3627 if (dev->dma_ops) 3628 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, 3629 dma_attrs); 3630 else 3631 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, 3632 dma_attrs); 3633 } 3634 /** 3635 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3636 * @dev: The device for which the DMA addresses were created 3637 * @sg: The scatter/gather entry 3638 * 3639 * Note: this function is obsolete. To do: change all occurrences of 3640 * ib_sg_dma_address() into sg_dma_address(). 3641 */ 3642 static inline u64 ib_sg_dma_address(struct ib_device *dev, 3643 struct scatterlist *sg) 3644 { 3645 return sg_dma_address(sg); 3646 } 3647 3648 /** 3649 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 3650 * @dev: The device for which the DMA addresses were created 3651 * @sg: The scatter/gather entry 3652 * 3653 * Note: this function is obsolete. To do: change all occurrences of 3654 * ib_sg_dma_len() into sg_dma_len(). 3655 */ 3656 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 3657 struct scatterlist *sg) 3658 { 3659 return sg_dma_len(sg); 3660 } 3661 3662 /** 3663 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3664 * @dev: The device for which the DMA address was created 3665 * @addr: The DMA address 3666 * @size: The size of the region in bytes 3667 * @dir: The direction of the DMA 3668 */ 3669 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3670 u64 addr, 3671 size_t size, 3672 enum dma_data_direction dir) 3673 { 3674 if (dev->dma_ops) 3675 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 3676 else 3677 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3678 } 3679 3680 /** 3681 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3682 * @dev: The device for which the DMA address was created 3683 * @addr: The DMA address 3684 * @size: The size of the region in bytes 3685 * @dir: The direction of the DMA 3686 */ 3687 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3688 u64 addr, 3689 size_t size, 3690 enum dma_data_direction dir) 3691 { 3692 if (dev->dma_ops) 3693 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 3694 else 3695 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3696 } 3697 3698 /** 3699 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3700 * @dev: The device for which the DMA address is requested 3701 * @size: The size of the region to allocate in bytes 3702 * @dma_handle: A pointer for returning the DMA address of the region 3703 * @flag: memory allocator flags 3704 */ 3705 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3706 size_t size, 3707 u64 *dma_handle, 3708 gfp_t flag) 3709 { 3710 if (dev->dma_ops) 3711 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 3712 else { 3713 dma_addr_t handle; 3714 void *ret; 3715 3716 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 3717 *dma_handle = handle; 3718 return ret; 3719 } 3720 } 3721 3722 /** 3723 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3724 * @dev: The device for which the DMA addresses were allocated 3725 * @size: The size of the region 3726 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3727 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3728 */ 3729 static inline void ib_dma_free_coherent(struct ib_device *dev, 3730 size_t size, void *cpu_addr, 3731 u64 dma_handle) 3732 { 3733 if (dev->dma_ops) 3734 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 3735 else 3736 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3737 } 3738 3739 /** 3740 * ib_dereg_mr - Deregisters a memory region and removes it from the 3741 * HCA translation table. 3742 * @mr: The memory region to deregister. 3743 * 3744 * This function can fail, if the memory region has memory windows bound to it. 3745 */ 3746 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata); 3747 3748 /** 3749 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the 3750 * HCA translation table. 3751 * @mr: The memory region to deregister. 3752 * 3753 * This function can fail, if the memory region has memory windows bound to it. 3754 * 3755 * NOTE: for user mr use ib_dereg_mr_user with valid udata! 3756 */ 3757 static inline int ib_dereg_mr(struct ib_mr *mr) 3758 { 3759 return ib_dereg_mr_user(mr, NULL); 3760 } 3761 3762 struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type, 3763 u32 max_num_sg, struct ib_udata *udata); 3764 3765 static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3766 enum ib_mr_type mr_type, u32 max_num_sg) 3767 { 3768 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL); 3769 } 3770 3771 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, 3772 u32 max_num_data_sg, 3773 u32 max_num_meta_sg); 3774 3775 /** 3776 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3777 * R_Key and L_Key. 3778 * @mr - struct ib_mr pointer to be updated. 3779 * @newkey - new key to be used. 3780 */ 3781 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3782 { 3783 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3784 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3785 } 3786 3787 /** 3788 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3789 * for calculating a new rkey for type 2 memory windows. 3790 * @rkey - the rkey to increment. 3791 */ 3792 static inline u32 ib_inc_rkey(u32 rkey) 3793 { 3794 const u32 mask = 0x000000ff; 3795 return ((rkey + 1) & mask) | (rkey & ~mask); 3796 } 3797 3798 /** 3799 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3800 * @pd: The protection domain associated with the unmapped region. 3801 * @mr_access_flags: Specifies the memory access rights. 3802 * @fmr_attr: Attributes of the unmapped region. 3803 * 3804 * A fast memory region must be mapped before it can be used as part of 3805 * a work request. 3806 */ 3807 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3808 int mr_access_flags, 3809 struct ib_fmr_attr *fmr_attr); 3810 3811 /** 3812 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3813 * @fmr: The fast memory region to associate with the pages. 3814 * @page_list: An array of physical pages to map to the fast memory region. 3815 * @list_len: The number of pages in page_list. 3816 * @iova: The I/O virtual address to use with the mapped region. 3817 */ 3818 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3819 u64 *page_list, int list_len, 3820 u64 iova) 3821 { 3822 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3823 } 3824 3825 /** 3826 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3827 * @fmr_list: A linked list of fast memory regions to unmap. 3828 */ 3829 int ib_unmap_fmr(struct list_head *fmr_list); 3830 3831 /** 3832 * ib_dealloc_fmr - Deallocates a fast memory region. 3833 * @fmr: The fast memory region to deallocate. 3834 */ 3835 int ib_dealloc_fmr(struct ib_fmr *fmr); 3836 3837 /** 3838 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3839 * @qp: QP to attach to the multicast group. The QP must be type 3840 * IB_QPT_UD. 3841 * @gid: Multicast group GID. 3842 * @lid: Multicast group LID in host byte order. 3843 * 3844 * In order to send and receive multicast packets, subnet 3845 * administration must have created the multicast group and configured 3846 * the fabric appropriately. The port associated with the specified 3847 * QP must also be a member of the multicast group. 3848 */ 3849 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3850 3851 /** 3852 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3853 * @qp: QP to detach from the multicast group. 3854 * @gid: Multicast group GID. 3855 * @lid: Multicast group LID in host byte order. 3856 */ 3857 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3858 3859 /** 3860 * ib_alloc_xrcd - Allocates an XRC domain. 3861 * @device: The device on which to allocate the XRC domain. 3862 * @caller: Module name for kernel consumers 3863 */ 3864 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller); 3865 #define ib_alloc_xrcd(device) \ 3866 __ib_alloc_xrcd((device), "ibcore") 3867 3868 /** 3869 * ib_dealloc_xrcd - Deallocates an XRC domain. 3870 * @xrcd: The XRC domain to deallocate. 3871 * @udata: Valid user data or NULL for kernel object 3872 */ 3873 int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 3874 3875 static inline int ib_check_mr_access(int flags) 3876 { 3877 /* 3878 * Local write permission is required if remote write or 3879 * remote atomic permission is also requested. 3880 */ 3881 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3882 !(flags & IB_ACCESS_LOCAL_WRITE)) 3883 return -EINVAL; 3884 3885 if (flags & ~IB_ACCESS_SUPPORTED) 3886 return -EINVAL; 3887 3888 return 0; 3889 } 3890 3891 static inline bool ib_access_writable(int access_flags) 3892 { 3893 /* 3894 * We have writable memory backing the MR if any of the following 3895 * access flags are set. "Local write" and "remote write" obviously 3896 * require write access. "Remote atomic" can do things like fetch and 3897 * add, which will modify memory, and "MW bind" can change permissions 3898 * by binding a window. 3899 */ 3900 return access_flags & 3901 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 3902 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); 3903 } 3904 3905 /** 3906 * ib_check_mr_status: lightweight check of MR status. 3907 * This routine may provide status checks on a selected 3908 * ib_mr. first use is for signature status check. 3909 * 3910 * @mr: A memory region. 3911 * @check_mask: Bitmask of which checks to perform from 3912 * ib_mr_status_check enumeration. 3913 * @mr_status: The container of relevant status checks. 3914 * failed checks will be indicated in the status bitmask 3915 * and the relevant info shall be in the error item. 3916 */ 3917 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3918 struct ib_mr_status *mr_status); 3919 3920 if_t ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3921 u16 pkey, const union ib_gid *gid, 3922 const struct sockaddr *addr); 3923 struct ib_wq *ib_create_wq(struct ib_pd *pd, 3924 struct ib_wq_init_attr *init_attr); 3925 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); 3926 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3927 u32 wq_attr_mask); 3928 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3929 struct ib_rwq_ind_table_init_attr* 3930 wq_ind_table_init_attr); 3931 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3932 3933 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3934 unsigned int *sg_offset, unsigned int page_size); 3935 3936 static inline int 3937 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3938 unsigned int *sg_offset, unsigned int page_size) 3939 { 3940 int n; 3941 3942 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 3943 mr->iova = 0; 3944 3945 return n; 3946 } 3947 3948 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 3949 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3950 3951 void ib_drain_rq(struct ib_qp *qp); 3952 void ib_drain_sq(struct ib_qp *qp); 3953 void ib_drain_qp(struct ib_qp *qp); 3954 3955 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); 3956 3957 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); 3958 3959 int ib_resolve_eth_dmac(struct ib_device *device, 3960 struct ib_ah_attr *ah_attr); 3961 #endif /* IB_VERBS_H */ 3962