1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/mm.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/kref.h> 47 #include <linux/list.h> 48 #include <linux/rwsem.h> 49 #include <linux/scatterlist.h> 50 #include <linux/workqueue.h> 51 #include <linux/socket.h> 52 #include <linux/irq_poll.h> 53 #include <uapi/linux/if_ether.h> 54 #include <net/ipv6.h> 55 #include <net/ip.h> 56 #include <linux/string.h> 57 #include <linux/slab.h> 58 #include <linux/netdevice.h> 59 60 #include <linux/if_link.h> 61 #include <linux/atomic.h> 62 #include <linux/mmu_notifier.h> 63 #include <linux/uaccess.h> 64 #include <linux/cgroup_rdma.h> 65 66 extern struct workqueue_struct *ib_wq; 67 extern struct workqueue_struct *ib_comp_wq; 68 69 union ib_gid { 70 u8 raw[16]; 71 struct { 72 __be64 subnet_prefix; 73 __be64 interface_id; 74 } global; 75 }; 76 77 extern union ib_gid zgid; 78 79 enum ib_gid_type { 80 /* If link layer is Ethernet, this is RoCE V1 */ 81 IB_GID_TYPE_IB = 0, 82 IB_GID_TYPE_ROCE = 0, 83 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 84 IB_GID_TYPE_SIZE 85 }; 86 87 #define ROCE_V2_UDP_DPORT 4791 88 struct ib_gid_attr { 89 enum ib_gid_type gid_type; 90 struct net_device *ndev; 91 }; 92 93 enum rdma_node_type { 94 /* IB values map to NodeInfo:NodeType. */ 95 RDMA_NODE_IB_CA = 1, 96 RDMA_NODE_IB_SWITCH, 97 RDMA_NODE_IB_ROUTER, 98 RDMA_NODE_RNIC, 99 RDMA_NODE_USNIC, 100 RDMA_NODE_USNIC_UDP, 101 }; 102 103 enum { 104 /* set the local administered indication */ 105 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 106 }; 107 108 enum rdma_transport_type { 109 RDMA_TRANSPORT_IB, 110 RDMA_TRANSPORT_IWARP, 111 RDMA_TRANSPORT_USNIC, 112 RDMA_TRANSPORT_USNIC_UDP 113 }; 114 115 enum rdma_protocol_type { 116 RDMA_PROTOCOL_IB, 117 RDMA_PROTOCOL_IBOE, 118 RDMA_PROTOCOL_IWARP, 119 RDMA_PROTOCOL_USNIC_UDP 120 }; 121 122 __attribute_const__ enum rdma_transport_type 123 rdma_node_get_transport(enum rdma_node_type node_type); 124 125 enum rdma_network_type { 126 RDMA_NETWORK_IB, 127 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 128 RDMA_NETWORK_IPV4, 129 RDMA_NETWORK_IPV6 130 }; 131 132 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 133 { 134 if (network_type == RDMA_NETWORK_IPV4 || 135 network_type == RDMA_NETWORK_IPV6) 136 return IB_GID_TYPE_ROCE_UDP_ENCAP; 137 138 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 139 return IB_GID_TYPE_IB; 140 } 141 142 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 143 union ib_gid *gid) 144 { 145 if (gid_type == IB_GID_TYPE_IB) 146 return RDMA_NETWORK_IB; 147 148 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 149 return RDMA_NETWORK_IPV4; 150 else 151 return RDMA_NETWORK_IPV6; 152 } 153 154 enum rdma_link_layer { 155 IB_LINK_LAYER_UNSPECIFIED, 156 IB_LINK_LAYER_INFINIBAND, 157 IB_LINK_LAYER_ETHERNET, 158 }; 159 160 enum ib_device_cap_flags { 161 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 162 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 163 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 164 IB_DEVICE_RAW_MULTI = (1 << 3), 165 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 166 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 167 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 168 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 169 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 170 IB_DEVICE_INIT_TYPE = (1 << 9), 171 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 172 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 173 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 174 IB_DEVICE_SRQ_RESIZE = (1 << 13), 175 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 176 177 /* 178 * This device supports a per-device lkey or stag that can be 179 * used without performing a memory registration for the local 180 * memory. Note that ULPs should never check this flag, but 181 * instead of use the local_dma_lkey flag in the ib_pd structure, 182 * which will always contain a usable lkey. 183 */ 184 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 185 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), 186 IB_DEVICE_MEM_WINDOW = (1 << 17), 187 /* 188 * Devices should set IB_DEVICE_UD_IP_SUM if they support 189 * insertion of UDP and TCP checksum on outgoing UD IPoIB 190 * messages and can verify the validity of checksum for 191 * incoming messages. Setting this flag implies that the 192 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 193 */ 194 IB_DEVICE_UD_IP_CSUM = (1 << 18), 195 IB_DEVICE_UD_TSO = (1 << 19), 196 IB_DEVICE_XRC = (1 << 20), 197 198 /* 199 * This device supports the IB "base memory management extension", 200 * which includes support for fast registrations (IB_WR_REG_MR, 201 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 202 * also be set by any iWarp device which must support FRs to comply 203 * to the iWarp verbs spec. iWarp devices also support the 204 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 205 * stag. 206 */ 207 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 208 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 209 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 210 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 211 IB_DEVICE_RC_IP_CSUM = (1 << 25), 212 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 213 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 214 /* 215 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 216 * support execution of WQEs that involve synchronization 217 * of I/O operations with single completion queue managed 218 * by hardware. 219 */ 220 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 221 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 222 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 223 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 224 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 225 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 226 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 227 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 228 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35), 229 }; 230 231 enum ib_signature_prot_cap { 232 IB_PROT_T10DIF_TYPE_1 = 1, 233 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 234 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 235 }; 236 237 enum ib_signature_guard_cap { 238 IB_GUARD_T10DIF_CRC = 1, 239 IB_GUARD_T10DIF_CSUM = 1 << 1, 240 }; 241 242 enum ib_atomic_cap { 243 IB_ATOMIC_NONE, 244 IB_ATOMIC_HCA, 245 IB_ATOMIC_GLOB 246 }; 247 248 enum ib_odp_general_cap_bits { 249 IB_ODP_SUPPORT = 1 << 0, 250 IB_ODP_SUPPORT_IMPLICIT = 1 << 1, 251 }; 252 253 enum ib_odp_transport_cap_bits { 254 IB_ODP_SUPPORT_SEND = 1 << 0, 255 IB_ODP_SUPPORT_RECV = 1 << 1, 256 IB_ODP_SUPPORT_WRITE = 1 << 2, 257 IB_ODP_SUPPORT_READ = 1 << 3, 258 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 259 }; 260 261 struct ib_odp_caps { 262 uint64_t general_caps; 263 struct { 264 uint32_t rc_odp_caps; 265 uint32_t uc_odp_caps; 266 uint32_t ud_odp_caps; 267 } per_transport_caps; 268 }; 269 270 struct ib_rss_caps { 271 /* Corresponding bit will be set if qp type from 272 * 'enum ib_qp_type' is supported, e.g. 273 * supported_qpts |= 1 << IB_QPT_UD 274 */ 275 u32 supported_qpts; 276 u32 max_rwq_indirection_tables; 277 u32 max_rwq_indirection_table_size; 278 }; 279 280 enum ib_cq_creation_flags { 281 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 282 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 283 }; 284 285 struct ib_cq_init_attr { 286 unsigned int cqe; 287 int comp_vector; 288 u32 flags; 289 }; 290 291 struct ib_device_attr { 292 u64 fw_ver; 293 __be64 sys_image_guid; 294 u64 max_mr_size; 295 u64 page_size_cap; 296 u32 vendor_id; 297 u32 vendor_part_id; 298 u32 hw_ver; 299 int max_qp; 300 int max_qp_wr; 301 u64 device_cap_flags; 302 int max_sge; 303 int max_sge_rd; 304 int max_cq; 305 int max_cqe; 306 int max_mr; 307 int max_pd; 308 int max_qp_rd_atom; 309 int max_ee_rd_atom; 310 int max_res_rd_atom; 311 int max_qp_init_rd_atom; 312 int max_ee_init_rd_atom; 313 enum ib_atomic_cap atomic_cap; 314 enum ib_atomic_cap masked_atomic_cap; 315 int max_ee; 316 int max_rdd; 317 int max_mw; 318 int max_raw_ipv6_qp; 319 int max_raw_ethy_qp; 320 int max_mcast_grp; 321 int max_mcast_qp_attach; 322 int max_total_mcast_qp_attach; 323 int max_ah; 324 int max_fmr; 325 int max_map_per_fmr; 326 int max_srq; 327 int max_srq_wr; 328 int max_srq_sge; 329 unsigned int max_fast_reg_page_list_len; 330 u16 max_pkeys; 331 u8 local_ca_ack_delay; 332 int sig_prot_cap; 333 int sig_guard_cap; 334 struct ib_odp_caps odp_caps; 335 uint64_t timestamp_mask; 336 uint64_t hca_core_clock; /* in KHZ */ 337 struct ib_rss_caps rss_caps; 338 u32 max_wq_type_rq; 339 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 340 }; 341 342 enum ib_mtu { 343 IB_MTU_256 = 1, 344 IB_MTU_512 = 2, 345 IB_MTU_1024 = 3, 346 IB_MTU_2048 = 4, 347 IB_MTU_4096 = 5 348 }; 349 350 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 351 { 352 switch (mtu) { 353 case IB_MTU_256: return 256; 354 case IB_MTU_512: return 512; 355 case IB_MTU_1024: return 1024; 356 case IB_MTU_2048: return 2048; 357 case IB_MTU_4096: return 4096; 358 default: return -1; 359 } 360 } 361 362 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 363 { 364 if (mtu >= 4096) 365 return IB_MTU_4096; 366 else if (mtu >= 2048) 367 return IB_MTU_2048; 368 else if (mtu >= 1024) 369 return IB_MTU_1024; 370 else if (mtu >= 512) 371 return IB_MTU_512; 372 else 373 return IB_MTU_256; 374 } 375 376 enum ib_port_state { 377 IB_PORT_NOP = 0, 378 IB_PORT_DOWN = 1, 379 IB_PORT_INIT = 2, 380 IB_PORT_ARMED = 3, 381 IB_PORT_ACTIVE = 4, 382 IB_PORT_ACTIVE_DEFER = 5 383 }; 384 385 enum ib_port_cap_flags { 386 IB_PORT_SM = 1 << 1, 387 IB_PORT_NOTICE_SUP = 1 << 2, 388 IB_PORT_TRAP_SUP = 1 << 3, 389 IB_PORT_OPT_IPD_SUP = 1 << 4, 390 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 391 IB_PORT_SL_MAP_SUP = 1 << 6, 392 IB_PORT_MKEY_NVRAM = 1 << 7, 393 IB_PORT_PKEY_NVRAM = 1 << 8, 394 IB_PORT_LED_INFO_SUP = 1 << 9, 395 IB_PORT_SM_DISABLED = 1 << 10, 396 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 397 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 398 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 399 IB_PORT_CM_SUP = 1 << 16, 400 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 401 IB_PORT_REINIT_SUP = 1 << 18, 402 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 403 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 404 IB_PORT_DR_NOTICE_SUP = 1 << 21, 405 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 406 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 407 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 408 IB_PORT_CLIENT_REG_SUP = 1 << 25, 409 IB_PORT_IP_BASED_GIDS = 1 << 26, 410 }; 411 412 enum ib_port_width { 413 IB_WIDTH_1X = 1, 414 IB_WIDTH_4X = 2, 415 IB_WIDTH_8X = 4, 416 IB_WIDTH_12X = 8 417 }; 418 419 static inline int ib_width_enum_to_int(enum ib_port_width width) 420 { 421 switch (width) { 422 case IB_WIDTH_1X: return 1; 423 case IB_WIDTH_4X: return 4; 424 case IB_WIDTH_8X: return 8; 425 case IB_WIDTH_12X: return 12; 426 default: return -1; 427 } 428 } 429 430 enum ib_port_speed { 431 IB_SPEED_SDR = 1, 432 IB_SPEED_DDR = 2, 433 IB_SPEED_QDR = 4, 434 IB_SPEED_FDR10 = 8, 435 IB_SPEED_FDR = 16, 436 IB_SPEED_EDR = 32, 437 IB_SPEED_HDR = 64 438 }; 439 440 /** 441 * struct rdma_hw_stats 442 * @timestamp - Used by the core code to track when the last update was 443 * @lifespan - Used by the core code to determine how old the counters 444 * should be before being updated again. Stored in jiffies, defaults 445 * to 10 milliseconds, drivers can override the default be specifying 446 * their own value during their allocation routine. 447 * @name - Array of pointers to static names used for the counters in 448 * directory. 449 * @num_counters - How many hardware counters there are. If name is 450 * shorter than this number, a kernel oops will result. Driver authors 451 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 452 * in their code to prevent this. 453 * @value - Array of u64 counters that are accessed by the sysfs code and 454 * filled in by the drivers get_stats routine 455 */ 456 struct rdma_hw_stats { 457 unsigned long timestamp; 458 unsigned long lifespan; 459 const char * const *names; 460 int num_counters; 461 u64 value[]; 462 }; 463 464 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 465 /** 466 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 467 * for drivers. 468 * @names - Array of static const char * 469 * @num_counters - How many elements in array 470 * @lifespan - How many milliseconds between updates 471 */ 472 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 473 const char * const *names, int num_counters, 474 unsigned long lifespan) 475 { 476 struct rdma_hw_stats *stats; 477 478 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 479 GFP_KERNEL); 480 if (!stats) 481 return NULL; 482 stats->names = names; 483 stats->num_counters = num_counters; 484 stats->lifespan = msecs_to_jiffies(lifespan); 485 486 return stats; 487 } 488 489 490 /* Define bits for the various functionality this port needs to be supported by 491 * the core. 492 */ 493 /* Management 0x00000FFF */ 494 #define RDMA_CORE_CAP_IB_MAD 0x00000001 495 #define RDMA_CORE_CAP_IB_SMI 0x00000002 496 #define RDMA_CORE_CAP_IB_CM 0x00000004 497 #define RDMA_CORE_CAP_IW_CM 0x00000008 498 #define RDMA_CORE_CAP_IB_SA 0x00000010 499 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 500 501 /* Address format 0x000FF000 */ 502 #define RDMA_CORE_CAP_AF_IB 0x00001000 503 #define RDMA_CORE_CAP_ETH_AH 0x00002000 504 #define RDMA_CORE_CAP_OPA_AH 0x00004000 505 506 /* Protocol 0xFFF00000 */ 507 #define RDMA_CORE_CAP_PROT_IB 0x00100000 508 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 509 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 510 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 511 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 512 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 513 514 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 515 | RDMA_CORE_CAP_IB_MAD \ 516 | RDMA_CORE_CAP_IB_SMI \ 517 | RDMA_CORE_CAP_IB_CM \ 518 | RDMA_CORE_CAP_IB_SA \ 519 | RDMA_CORE_CAP_AF_IB) 520 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 521 | RDMA_CORE_CAP_IB_MAD \ 522 | RDMA_CORE_CAP_IB_CM \ 523 | RDMA_CORE_CAP_AF_IB \ 524 | RDMA_CORE_CAP_ETH_AH) 525 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 526 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 527 | RDMA_CORE_CAP_IB_MAD \ 528 | RDMA_CORE_CAP_IB_CM \ 529 | RDMA_CORE_CAP_AF_IB \ 530 | RDMA_CORE_CAP_ETH_AH) 531 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 532 | RDMA_CORE_CAP_IW_CM) 533 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 534 | RDMA_CORE_CAP_OPA_MAD) 535 536 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) 537 538 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) 539 540 struct ib_port_attr { 541 u64 subnet_prefix; 542 enum ib_port_state state; 543 enum ib_mtu max_mtu; 544 enum ib_mtu active_mtu; 545 int gid_tbl_len; 546 u32 port_cap_flags; 547 u32 max_msg_sz; 548 u32 bad_pkey_cntr; 549 u32 qkey_viol_cntr; 550 u16 pkey_tbl_len; 551 u16 lid; 552 u16 sm_lid; 553 u8 lmc; 554 u8 max_vl_num; 555 u8 sm_sl; 556 u8 subnet_timeout; 557 u8 init_type_reply; 558 u8 active_width; 559 u8 active_speed; 560 u8 phys_state; 561 bool grh_required; 562 }; 563 564 enum ib_device_modify_flags { 565 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 566 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 567 }; 568 569 #define IB_DEVICE_NODE_DESC_MAX 64 570 571 struct ib_device_modify { 572 u64 sys_image_guid; 573 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 574 }; 575 576 enum ib_port_modify_flags { 577 IB_PORT_SHUTDOWN = 1, 578 IB_PORT_INIT_TYPE = (1<<2), 579 IB_PORT_RESET_QKEY_CNTR = (1<<3) 580 }; 581 582 struct ib_port_modify { 583 u32 set_port_cap_mask; 584 u32 clr_port_cap_mask; 585 u8 init_type; 586 }; 587 588 enum ib_event_type { 589 IB_EVENT_CQ_ERR, 590 IB_EVENT_QP_FATAL, 591 IB_EVENT_QP_REQ_ERR, 592 IB_EVENT_QP_ACCESS_ERR, 593 IB_EVENT_COMM_EST, 594 IB_EVENT_SQ_DRAINED, 595 IB_EVENT_PATH_MIG, 596 IB_EVENT_PATH_MIG_ERR, 597 IB_EVENT_DEVICE_FATAL, 598 IB_EVENT_PORT_ACTIVE, 599 IB_EVENT_PORT_ERR, 600 IB_EVENT_LID_CHANGE, 601 IB_EVENT_PKEY_CHANGE, 602 IB_EVENT_SM_CHANGE, 603 IB_EVENT_SRQ_ERR, 604 IB_EVENT_SRQ_LIMIT_REACHED, 605 IB_EVENT_QP_LAST_WQE_REACHED, 606 IB_EVENT_CLIENT_REREGISTER, 607 IB_EVENT_GID_CHANGE, 608 IB_EVENT_WQ_FATAL, 609 }; 610 611 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 612 613 struct ib_event { 614 struct ib_device *device; 615 union { 616 struct ib_cq *cq; 617 struct ib_qp *qp; 618 struct ib_srq *srq; 619 struct ib_wq *wq; 620 u8 port_num; 621 } element; 622 enum ib_event_type event; 623 }; 624 625 struct ib_event_handler { 626 struct ib_device *device; 627 void (*handler)(struct ib_event_handler *, struct ib_event *); 628 struct list_head list; 629 }; 630 631 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 632 do { \ 633 (_ptr)->device = _device; \ 634 (_ptr)->handler = _handler; \ 635 INIT_LIST_HEAD(&(_ptr)->list); \ 636 } while (0) 637 638 struct ib_global_route { 639 union ib_gid dgid; 640 u32 flow_label; 641 u8 sgid_index; 642 u8 hop_limit; 643 u8 traffic_class; 644 }; 645 646 struct ib_grh { 647 __be32 version_tclass_flow; 648 __be16 paylen; 649 u8 next_hdr; 650 u8 hop_limit; 651 union ib_gid sgid; 652 union ib_gid dgid; 653 }; 654 655 union rdma_network_hdr { 656 struct ib_grh ibgrh; 657 struct { 658 /* The IB spec states that if it's IPv4, the header 659 * is located in the last 20 bytes of the header. 660 */ 661 u8 reserved[20]; 662 struct iphdr roce4grh; 663 }; 664 }; 665 666 enum { 667 IB_MULTICAST_QPN = 0xffffff 668 }; 669 670 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 671 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 672 673 enum ib_ah_flags { 674 IB_AH_GRH = 1 675 }; 676 677 enum ib_rate { 678 IB_RATE_PORT_CURRENT = 0, 679 IB_RATE_2_5_GBPS = 2, 680 IB_RATE_5_GBPS = 5, 681 IB_RATE_10_GBPS = 3, 682 IB_RATE_20_GBPS = 6, 683 IB_RATE_30_GBPS = 4, 684 IB_RATE_40_GBPS = 7, 685 IB_RATE_60_GBPS = 8, 686 IB_RATE_80_GBPS = 9, 687 IB_RATE_120_GBPS = 10, 688 IB_RATE_14_GBPS = 11, 689 IB_RATE_56_GBPS = 12, 690 IB_RATE_112_GBPS = 13, 691 IB_RATE_168_GBPS = 14, 692 IB_RATE_25_GBPS = 15, 693 IB_RATE_100_GBPS = 16, 694 IB_RATE_200_GBPS = 17, 695 IB_RATE_300_GBPS = 18 696 }; 697 698 /** 699 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 700 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 701 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 702 * @rate: rate to convert. 703 */ 704 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 705 706 /** 707 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 708 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 709 * @rate: rate to convert. 710 */ 711 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 712 713 714 /** 715 * enum ib_mr_type - memory region type 716 * @IB_MR_TYPE_MEM_REG: memory region that is used for 717 * normal registration 718 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 719 * signature operations (data-integrity 720 * capable regions) 721 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 722 * register any arbitrary sg lists (without 723 * the normal mr constraints - see 724 * ib_map_mr_sg) 725 */ 726 enum ib_mr_type { 727 IB_MR_TYPE_MEM_REG, 728 IB_MR_TYPE_SIGNATURE, 729 IB_MR_TYPE_SG_GAPS, 730 }; 731 732 /** 733 * Signature types 734 * IB_SIG_TYPE_NONE: Unprotected. 735 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 736 */ 737 enum ib_signature_type { 738 IB_SIG_TYPE_NONE, 739 IB_SIG_TYPE_T10_DIF, 740 }; 741 742 /** 743 * Signature T10-DIF block-guard types 744 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 745 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 746 */ 747 enum ib_t10_dif_bg_type { 748 IB_T10DIF_CRC, 749 IB_T10DIF_CSUM 750 }; 751 752 /** 753 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 754 * domain. 755 * @bg_type: T10-DIF block guard type (CRC|CSUM) 756 * @pi_interval: protection information interval. 757 * @bg: seed of guard computation. 758 * @app_tag: application tag of guard block 759 * @ref_tag: initial guard block reference tag. 760 * @ref_remap: Indicate wethear the reftag increments each block 761 * @app_escape: Indicate to skip block check if apptag=0xffff 762 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 763 * @apptag_check_mask: check bitmask of application tag. 764 */ 765 struct ib_t10_dif_domain { 766 enum ib_t10_dif_bg_type bg_type; 767 u16 pi_interval; 768 u16 bg; 769 u16 app_tag; 770 u32 ref_tag; 771 bool ref_remap; 772 bool app_escape; 773 bool ref_escape; 774 u16 apptag_check_mask; 775 }; 776 777 /** 778 * struct ib_sig_domain - Parameters for signature domain 779 * @sig_type: specific signauture type 780 * @sig: union of all signature domain attributes that may 781 * be used to set domain layout. 782 */ 783 struct ib_sig_domain { 784 enum ib_signature_type sig_type; 785 union { 786 struct ib_t10_dif_domain dif; 787 } sig; 788 }; 789 790 /** 791 * struct ib_sig_attrs - Parameters for signature handover operation 792 * @check_mask: bitmask for signature byte check (8 bytes) 793 * @mem: memory domain layout desciptor. 794 * @wire: wire domain layout desciptor. 795 */ 796 struct ib_sig_attrs { 797 u8 check_mask; 798 struct ib_sig_domain mem; 799 struct ib_sig_domain wire; 800 }; 801 802 enum ib_sig_err_type { 803 IB_SIG_BAD_GUARD, 804 IB_SIG_BAD_REFTAG, 805 IB_SIG_BAD_APPTAG, 806 }; 807 808 /** 809 * struct ib_sig_err - signature error descriptor 810 */ 811 struct ib_sig_err { 812 enum ib_sig_err_type err_type; 813 u32 expected; 814 u32 actual; 815 u64 sig_err_offset; 816 u32 key; 817 }; 818 819 enum ib_mr_status_check { 820 IB_MR_CHECK_SIG_STATUS = 1, 821 }; 822 823 /** 824 * struct ib_mr_status - Memory region status container 825 * 826 * @fail_status: Bitmask of MR checks status. For each 827 * failed check a corresponding status bit is set. 828 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 829 * failure. 830 */ 831 struct ib_mr_status { 832 u32 fail_status; 833 struct ib_sig_err sig_err; 834 }; 835 836 /** 837 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 838 * enum. 839 * @mult: multiple to convert. 840 */ 841 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 842 843 enum rdma_ah_attr_type { 844 RDMA_AH_ATTR_TYPE_IB, 845 RDMA_AH_ATTR_TYPE_ROCE, 846 RDMA_AH_ATTR_TYPE_OPA, 847 }; 848 849 struct ib_ah_attr { 850 u16 dlid; 851 u8 src_path_bits; 852 }; 853 854 struct roce_ah_attr { 855 u8 dmac[ETH_ALEN]; 856 }; 857 858 struct opa_ah_attr { 859 u32 dlid; 860 u8 src_path_bits; 861 }; 862 863 struct rdma_ah_attr { 864 struct ib_global_route grh; 865 u8 sl; 866 u8 static_rate; 867 u8 port_num; 868 u8 ah_flags; 869 enum rdma_ah_attr_type type; 870 union { 871 struct ib_ah_attr ib; 872 struct roce_ah_attr roce; 873 struct opa_ah_attr opa; 874 }; 875 }; 876 877 enum ib_wc_status { 878 IB_WC_SUCCESS, 879 IB_WC_LOC_LEN_ERR, 880 IB_WC_LOC_QP_OP_ERR, 881 IB_WC_LOC_EEC_OP_ERR, 882 IB_WC_LOC_PROT_ERR, 883 IB_WC_WR_FLUSH_ERR, 884 IB_WC_MW_BIND_ERR, 885 IB_WC_BAD_RESP_ERR, 886 IB_WC_LOC_ACCESS_ERR, 887 IB_WC_REM_INV_REQ_ERR, 888 IB_WC_REM_ACCESS_ERR, 889 IB_WC_REM_OP_ERR, 890 IB_WC_RETRY_EXC_ERR, 891 IB_WC_RNR_RETRY_EXC_ERR, 892 IB_WC_LOC_RDD_VIOL_ERR, 893 IB_WC_REM_INV_RD_REQ_ERR, 894 IB_WC_REM_ABORT_ERR, 895 IB_WC_INV_EECN_ERR, 896 IB_WC_INV_EEC_STATE_ERR, 897 IB_WC_FATAL_ERR, 898 IB_WC_RESP_TIMEOUT_ERR, 899 IB_WC_GENERAL_ERR 900 }; 901 902 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 903 904 enum ib_wc_opcode { 905 IB_WC_SEND, 906 IB_WC_RDMA_WRITE, 907 IB_WC_RDMA_READ, 908 IB_WC_COMP_SWAP, 909 IB_WC_FETCH_ADD, 910 IB_WC_LSO, 911 IB_WC_LOCAL_INV, 912 IB_WC_REG_MR, 913 IB_WC_MASKED_COMP_SWAP, 914 IB_WC_MASKED_FETCH_ADD, 915 /* 916 * Set value of IB_WC_RECV so consumers can test if a completion is a 917 * receive by testing (opcode & IB_WC_RECV). 918 */ 919 IB_WC_RECV = 1 << 7, 920 IB_WC_RECV_RDMA_WITH_IMM 921 }; 922 923 enum ib_wc_flags { 924 IB_WC_GRH = 1, 925 IB_WC_WITH_IMM = (1<<1), 926 IB_WC_WITH_INVALIDATE = (1<<2), 927 IB_WC_IP_CSUM_OK = (1<<3), 928 IB_WC_WITH_SMAC = (1<<4), 929 IB_WC_WITH_VLAN = (1<<5), 930 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 931 }; 932 933 struct ib_wc { 934 union { 935 u64 wr_id; 936 struct ib_cqe *wr_cqe; 937 }; 938 enum ib_wc_status status; 939 enum ib_wc_opcode opcode; 940 u32 vendor_err; 941 u32 byte_len; 942 struct ib_qp *qp; 943 union { 944 __be32 imm_data; 945 u32 invalidate_rkey; 946 } ex; 947 u32 src_qp; 948 int wc_flags; 949 u16 pkey_index; 950 u16 slid; 951 u8 sl; 952 u8 dlid_path_bits; 953 u8 port_num; /* valid only for DR SMPs on switches */ 954 u8 smac[ETH_ALEN]; 955 u16 vlan_id; 956 u8 network_hdr_type; 957 }; 958 959 enum ib_cq_notify_flags { 960 IB_CQ_SOLICITED = 1 << 0, 961 IB_CQ_NEXT_COMP = 1 << 1, 962 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 963 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 964 }; 965 966 enum ib_srq_type { 967 IB_SRQT_BASIC, 968 IB_SRQT_XRC 969 }; 970 971 enum ib_srq_attr_mask { 972 IB_SRQ_MAX_WR = 1 << 0, 973 IB_SRQ_LIMIT = 1 << 1, 974 }; 975 976 struct ib_srq_attr { 977 u32 max_wr; 978 u32 max_sge; 979 u32 srq_limit; 980 }; 981 982 struct ib_srq_init_attr { 983 void (*event_handler)(struct ib_event *, void *); 984 void *srq_context; 985 struct ib_srq_attr attr; 986 enum ib_srq_type srq_type; 987 988 union { 989 struct { 990 struct ib_xrcd *xrcd; 991 struct ib_cq *cq; 992 } xrc; 993 } ext; 994 }; 995 996 struct ib_qp_cap { 997 u32 max_send_wr; 998 u32 max_recv_wr; 999 u32 max_send_sge; 1000 u32 max_recv_sge; 1001 u32 max_inline_data; 1002 1003 /* 1004 * Maximum number of rdma_rw_ctx structures in flight at a time. 1005 * ib_create_qp() will calculate the right amount of neededed WRs 1006 * and MRs based on this. 1007 */ 1008 u32 max_rdma_ctxs; 1009 }; 1010 1011 enum ib_sig_type { 1012 IB_SIGNAL_ALL_WR, 1013 IB_SIGNAL_REQ_WR 1014 }; 1015 1016 enum ib_qp_type { 1017 /* 1018 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 1019 * here (and in that order) since the MAD layer uses them as 1020 * indices into a 2-entry table. 1021 */ 1022 IB_QPT_SMI, 1023 IB_QPT_GSI, 1024 1025 IB_QPT_RC, 1026 IB_QPT_UC, 1027 IB_QPT_UD, 1028 IB_QPT_RAW_IPV6, 1029 IB_QPT_RAW_ETHERTYPE, 1030 IB_QPT_RAW_PACKET = 8, 1031 IB_QPT_XRC_INI = 9, 1032 IB_QPT_XRC_TGT, 1033 IB_QPT_MAX, 1034 /* Reserve a range for qp types internal to the low level driver. 1035 * These qp types will not be visible at the IB core layer, so the 1036 * IB_QPT_MAX usages should not be affected in the core layer 1037 */ 1038 IB_QPT_RESERVED1 = 0x1000, 1039 IB_QPT_RESERVED2, 1040 IB_QPT_RESERVED3, 1041 IB_QPT_RESERVED4, 1042 IB_QPT_RESERVED5, 1043 IB_QPT_RESERVED6, 1044 IB_QPT_RESERVED7, 1045 IB_QPT_RESERVED8, 1046 IB_QPT_RESERVED9, 1047 IB_QPT_RESERVED10, 1048 }; 1049 1050 enum ib_qp_create_flags { 1051 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1052 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1053 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1054 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1055 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1056 IB_QP_CREATE_NETIF_QP = 1 << 5, 1057 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1058 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 1059 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1060 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, 1061 /* reserve bits 26-31 for low level drivers' internal use */ 1062 IB_QP_CREATE_RESERVED_START = 1 << 26, 1063 IB_QP_CREATE_RESERVED_END = 1 << 31, 1064 }; 1065 1066 /* 1067 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1068 * callback to destroy the passed in QP. 1069 */ 1070 1071 struct ib_qp_init_attr { 1072 void (*event_handler)(struct ib_event *, void *); 1073 void *qp_context; 1074 struct ib_cq *send_cq; 1075 struct ib_cq *recv_cq; 1076 struct ib_srq *srq; 1077 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1078 struct ib_qp_cap cap; 1079 enum ib_sig_type sq_sig_type; 1080 enum ib_qp_type qp_type; 1081 enum ib_qp_create_flags create_flags; 1082 1083 /* 1084 * Only needed for special QP types, or when using the RW API. 1085 */ 1086 u8 port_num; 1087 struct ib_rwq_ind_table *rwq_ind_tbl; 1088 }; 1089 1090 struct ib_qp_open_attr { 1091 void (*event_handler)(struct ib_event *, void *); 1092 void *qp_context; 1093 u32 qp_num; 1094 enum ib_qp_type qp_type; 1095 }; 1096 1097 enum ib_rnr_timeout { 1098 IB_RNR_TIMER_655_36 = 0, 1099 IB_RNR_TIMER_000_01 = 1, 1100 IB_RNR_TIMER_000_02 = 2, 1101 IB_RNR_TIMER_000_03 = 3, 1102 IB_RNR_TIMER_000_04 = 4, 1103 IB_RNR_TIMER_000_06 = 5, 1104 IB_RNR_TIMER_000_08 = 6, 1105 IB_RNR_TIMER_000_12 = 7, 1106 IB_RNR_TIMER_000_16 = 8, 1107 IB_RNR_TIMER_000_24 = 9, 1108 IB_RNR_TIMER_000_32 = 10, 1109 IB_RNR_TIMER_000_48 = 11, 1110 IB_RNR_TIMER_000_64 = 12, 1111 IB_RNR_TIMER_000_96 = 13, 1112 IB_RNR_TIMER_001_28 = 14, 1113 IB_RNR_TIMER_001_92 = 15, 1114 IB_RNR_TIMER_002_56 = 16, 1115 IB_RNR_TIMER_003_84 = 17, 1116 IB_RNR_TIMER_005_12 = 18, 1117 IB_RNR_TIMER_007_68 = 19, 1118 IB_RNR_TIMER_010_24 = 20, 1119 IB_RNR_TIMER_015_36 = 21, 1120 IB_RNR_TIMER_020_48 = 22, 1121 IB_RNR_TIMER_030_72 = 23, 1122 IB_RNR_TIMER_040_96 = 24, 1123 IB_RNR_TIMER_061_44 = 25, 1124 IB_RNR_TIMER_081_92 = 26, 1125 IB_RNR_TIMER_122_88 = 27, 1126 IB_RNR_TIMER_163_84 = 28, 1127 IB_RNR_TIMER_245_76 = 29, 1128 IB_RNR_TIMER_327_68 = 30, 1129 IB_RNR_TIMER_491_52 = 31 1130 }; 1131 1132 enum ib_qp_attr_mask { 1133 IB_QP_STATE = 1, 1134 IB_QP_CUR_STATE = (1<<1), 1135 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1136 IB_QP_ACCESS_FLAGS = (1<<3), 1137 IB_QP_PKEY_INDEX = (1<<4), 1138 IB_QP_PORT = (1<<5), 1139 IB_QP_QKEY = (1<<6), 1140 IB_QP_AV = (1<<7), 1141 IB_QP_PATH_MTU = (1<<8), 1142 IB_QP_TIMEOUT = (1<<9), 1143 IB_QP_RETRY_CNT = (1<<10), 1144 IB_QP_RNR_RETRY = (1<<11), 1145 IB_QP_RQ_PSN = (1<<12), 1146 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1147 IB_QP_ALT_PATH = (1<<14), 1148 IB_QP_MIN_RNR_TIMER = (1<<15), 1149 IB_QP_SQ_PSN = (1<<16), 1150 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1151 IB_QP_PATH_MIG_STATE = (1<<18), 1152 IB_QP_CAP = (1<<19), 1153 IB_QP_DEST_QPN = (1<<20), 1154 IB_QP_RESERVED1 = (1<<21), 1155 IB_QP_RESERVED2 = (1<<22), 1156 IB_QP_RESERVED3 = (1<<23), 1157 IB_QP_RESERVED4 = (1<<24), 1158 IB_QP_RATE_LIMIT = (1<<25), 1159 }; 1160 1161 enum ib_qp_state { 1162 IB_QPS_RESET, 1163 IB_QPS_INIT, 1164 IB_QPS_RTR, 1165 IB_QPS_RTS, 1166 IB_QPS_SQD, 1167 IB_QPS_SQE, 1168 IB_QPS_ERR 1169 }; 1170 1171 enum ib_mig_state { 1172 IB_MIG_MIGRATED, 1173 IB_MIG_REARM, 1174 IB_MIG_ARMED 1175 }; 1176 1177 enum ib_mw_type { 1178 IB_MW_TYPE_1 = 1, 1179 IB_MW_TYPE_2 = 2 1180 }; 1181 1182 struct ib_qp_attr { 1183 enum ib_qp_state qp_state; 1184 enum ib_qp_state cur_qp_state; 1185 enum ib_mtu path_mtu; 1186 enum ib_mig_state path_mig_state; 1187 u32 qkey; 1188 u32 rq_psn; 1189 u32 sq_psn; 1190 u32 dest_qp_num; 1191 int qp_access_flags; 1192 struct ib_qp_cap cap; 1193 struct rdma_ah_attr ah_attr; 1194 struct rdma_ah_attr alt_ah_attr; 1195 u16 pkey_index; 1196 u16 alt_pkey_index; 1197 u8 en_sqd_async_notify; 1198 u8 sq_draining; 1199 u8 max_rd_atomic; 1200 u8 max_dest_rd_atomic; 1201 u8 min_rnr_timer; 1202 u8 port_num; 1203 u8 timeout; 1204 u8 retry_cnt; 1205 u8 rnr_retry; 1206 u8 alt_port_num; 1207 u8 alt_timeout; 1208 u32 rate_limit; 1209 }; 1210 1211 enum ib_wr_opcode { 1212 IB_WR_RDMA_WRITE, 1213 IB_WR_RDMA_WRITE_WITH_IMM, 1214 IB_WR_SEND, 1215 IB_WR_SEND_WITH_IMM, 1216 IB_WR_RDMA_READ, 1217 IB_WR_ATOMIC_CMP_AND_SWP, 1218 IB_WR_ATOMIC_FETCH_AND_ADD, 1219 IB_WR_LSO, 1220 IB_WR_SEND_WITH_INV, 1221 IB_WR_RDMA_READ_WITH_INV, 1222 IB_WR_LOCAL_INV, 1223 IB_WR_REG_MR, 1224 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1225 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1226 IB_WR_REG_SIG_MR, 1227 /* reserve values for low level drivers' internal use. 1228 * These values will not be used at all in the ib core layer. 1229 */ 1230 IB_WR_RESERVED1 = 0xf0, 1231 IB_WR_RESERVED2, 1232 IB_WR_RESERVED3, 1233 IB_WR_RESERVED4, 1234 IB_WR_RESERVED5, 1235 IB_WR_RESERVED6, 1236 IB_WR_RESERVED7, 1237 IB_WR_RESERVED8, 1238 IB_WR_RESERVED9, 1239 IB_WR_RESERVED10, 1240 }; 1241 1242 enum ib_send_flags { 1243 IB_SEND_FENCE = 1, 1244 IB_SEND_SIGNALED = (1<<1), 1245 IB_SEND_SOLICITED = (1<<2), 1246 IB_SEND_INLINE = (1<<3), 1247 IB_SEND_IP_CSUM = (1<<4), 1248 1249 /* reserve bits 26-31 for low level drivers' internal use */ 1250 IB_SEND_RESERVED_START = (1 << 26), 1251 IB_SEND_RESERVED_END = (1 << 31), 1252 }; 1253 1254 struct ib_sge { 1255 u64 addr; 1256 u32 length; 1257 u32 lkey; 1258 }; 1259 1260 struct ib_cqe { 1261 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1262 }; 1263 1264 struct ib_send_wr { 1265 struct ib_send_wr *next; 1266 union { 1267 u64 wr_id; 1268 struct ib_cqe *wr_cqe; 1269 }; 1270 struct ib_sge *sg_list; 1271 int num_sge; 1272 enum ib_wr_opcode opcode; 1273 int send_flags; 1274 union { 1275 __be32 imm_data; 1276 u32 invalidate_rkey; 1277 } ex; 1278 }; 1279 1280 struct ib_rdma_wr { 1281 struct ib_send_wr wr; 1282 u64 remote_addr; 1283 u32 rkey; 1284 }; 1285 1286 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) 1287 { 1288 return container_of(wr, struct ib_rdma_wr, wr); 1289 } 1290 1291 struct ib_atomic_wr { 1292 struct ib_send_wr wr; 1293 u64 remote_addr; 1294 u64 compare_add; 1295 u64 swap; 1296 u64 compare_add_mask; 1297 u64 swap_mask; 1298 u32 rkey; 1299 }; 1300 1301 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) 1302 { 1303 return container_of(wr, struct ib_atomic_wr, wr); 1304 } 1305 1306 struct ib_ud_wr { 1307 struct ib_send_wr wr; 1308 struct ib_ah *ah; 1309 void *header; 1310 int hlen; 1311 int mss; 1312 u32 remote_qpn; 1313 u32 remote_qkey; 1314 u16 pkey_index; /* valid for GSI only */ 1315 u8 port_num; /* valid for DR SMPs on switch only */ 1316 }; 1317 1318 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) 1319 { 1320 return container_of(wr, struct ib_ud_wr, wr); 1321 } 1322 1323 struct ib_reg_wr { 1324 struct ib_send_wr wr; 1325 struct ib_mr *mr; 1326 u32 key; 1327 int access; 1328 }; 1329 1330 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) 1331 { 1332 return container_of(wr, struct ib_reg_wr, wr); 1333 } 1334 1335 struct ib_sig_handover_wr { 1336 struct ib_send_wr wr; 1337 struct ib_sig_attrs *sig_attrs; 1338 struct ib_mr *sig_mr; 1339 int access_flags; 1340 struct ib_sge *prot; 1341 }; 1342 1343 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) 1344 { 1345 return container_of(wr, struct ib_sig_handover_wr, wr); 1346 } 1347 1348 struct ib_recv_wr { 1349 struct ib_recv_wr *next; 1350 union { 1351 u64 wr_id; 1352 struct ib_cqe *wr_cqe; 1353 }; 1354 struct ib_sge *sg_list; 1355 int num_sge; 1356 }; 1357 1358 enum ib_access_flags { 1359 IB_ACCESS_LOCAL_WRITE = 1, 1360 IB_ACCESS_REMOTE_WRITE = (1<<1), 1361 IB_ACCESS_REMOTE_READ = (1<<2), 1362 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1363 IB_ACCESS_MW_BIND = (1<<4), 1364 IB_ZERO_BASED = (1<<5), 1365 IB_ACCESS_ON_DEMAND = (1<<6), 1366 IB_ACCESS_HUGETLB = (1<<7), 1367 }; 1368 1369 /* 1370 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1371 * are hidden here instead of a uapi header! 1372 */ 1373 enum ib_mr_rereg_flags { 1374 IB_MR_REREG_TRANS = 1, 1375 IB_MR_REREG_PD = (1<<1), 1376 IB_MR_REREG_ACCESS = (1<<2), 1377 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1378 }; 1379 1380 struct ib_fmr_attr { 1381 int max_pages; 1382 int max_maps; 1383 u8 page_shift; 1384 }; 1385 1386 struct ib_umem; 1387 1388 enum rdma_remove_reason { 1389 /* Userspace requested uobject deletion. Call could fail */ 1390 RDMA_REMOVE_DESTROY, 1391 /* Context deletion. This call should delete the actual object itself */ 1392 RDMA_REMOVE_CLOSE, 1393 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1394 RDMA_REMOVE_DRIVER_REMOVE, 1395 /* Context is being cleaned-up, but commit was just completed */ 1396 RDMA_REMOVE_DURING_CLEANUP, 1397 }; 1398 1399 struct ib_rdmacg_object { 1400 #ifdef CONFIG_CGROUP_RDMA 1401 struct rdma_cgroup *cg; /* owner rdma cgroup */ 1402 #endif 1403 }; 1404 1405 struct ib_ucontext { 1406 struct ib_device *device; 1407 struct ib_uverbs_file *ufile; 1408 int closing; 1409 1410 /* locking the uobjects_list */ 1411 struct mutex uobjects_lock; 1412 struct list_head uobjects; 1413 /* protects cleanup process from other actions */ 1414 struct rw_semaphore cleanup_rwsem; 1415 enum rdma_remove_reason cleanup_reason; 1416 1417 struct pid *tgid; 1418 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1419 struct rb_root umem_tree; 1420 /* 1421 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1422 * mmu notifiers registration. 1423 */ 1424 struct rw_semaphore umem_rwsem; 1425 void (*invalidate_range)(struct ib_umem *umem, 1426 unsigned long start, unsigned long end); 1427 1428 struct mmu_notifier mn; 1429 atomic_t notifier_count; 1430 /* A list of umems that don't have private mmu notifier counters yet. */ 1431 struct list_head no_private_counters; 1432 int odp_mrs_count; 1433 #endif 1434 1435 struct ib_rdmacg_object cg_obj; 1436 }; 1437 1438 struct ib_uobject { 1439 u64 user_handle; /* handle given to us by userspace */ 1440 struct ib_ucontext *context; /* associated user context */ 1441 void *object; /* containing object */ 1442 struct list_head list; /* link to context's list */ 1443 struct ib_rdmacg_object cg_obj; /* rdmacg object */ 1444 int id; /* index into kernel idr */ 1445 struct kref ref; 1446 atomic_t usecnt; /* protects exclusive access */ 1447 struct rcu_head rcu; /* kfree_rcu() overhead */ 1448 1449 const struct uverbs_obj_type *type; 1450 }; 1451 1452 struct ib_uobject_file { 1453 struct ib_uobject uobj; 1454 /* ufile contains the lock between context release and file close */ 1455 struct ib_uverbs_file *ufile; 1456 }; 1457 1458 struct ib_udata { 1459 const void __user *inbuf; 1460 void __user *outbuf; 1461 size_t inlen; 1462 size_t outlen; 1463 }; 1464 1465 struct ib_pd { 1466 u32 local_dma_lkey; 1467 u32 flags; 1468 struct ib_device *device; 1469 struct ib_uobject *uobject; 1470 atomic_t usecnt; /* count all resources */ 1471 1472 u32 unsafe_global_rkey; 1473 1474 /* 1475 * Implementation details of the RDMA core, don't use in drivers: 1476 */ 1477 struct ib_mr *__internal_mr; 1478 }; 1479 1480 struct ib_xrcd { 1481 struct ib_device *device; 1482 atomic_t usecnt; /* count all exposed resources */ 1483 struct inode *inode; 1484 1485 struct mutex tgt_qp_mutex; 1486 struct list_head tgt_qp_list; 1487 }; 1488 1489 struct ib_ah { 1490 struct ib_device *device; 1491 struct ib_pd *pd; 1492 struct ib_uobject *uobject; 1493 enum rdma_ah_attr_type type; 1494 }; 1495 1496 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1497 1498 enum ib_poll_context { 1499 IB_POLL_DIRECT, /* caller context, no hw completions */ 1500 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1501 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1502 }; 1503 1504 struct ib_cq { 1505 struct ib_device *device; 1506 struct ib_uobject *uobject; 1507 ib_comp_handler comp_handler; 1508 void (*event_handler)(struct ib_event *, void *); 1509 void *cq_context; 1510 int cqe; 1511 atomic_t usecnt; /* count number of work queues */ 1512 enum ib_poll_context poll_ctx; 1513 struct ib_wc *wc; 1514 union { 1515 struct irq_poll iop; 1516 struct work_struct work; 1517 }; 1518 }; 1519 1520 struct ib_srq { 1521 struct ib_device *device; 1522 struct ib_pd *pd; 1523 struct ib_uobject *uobject; 1524 void (*event_handler)(struct ib_event *, void *); 1525 void *srq_context; 1526 enum ib_srq_type srq_type; 1527 atomic_t usecnt; 1528 1529 union { 1530 struct { 1531 struct ib_xrcd *xrcd; 1532 struct ib_cq *cq; 1533 u32 srq_num; 1534 } xrc; 1535 } ext; 1536 }; 1537 1538 enum ib_raw_packet_caps { 1539 /* Strip cvlan from incoming packet and report it in the matching work 1540 * completion is supported. 1541 */ 1542 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1543 /* Scatter FCS field of an incoming packet to host memory is supported. 1544 */ 1545 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1546 /* Checksum offloads are supported (for both send and receive). */ 1547 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), 1548 }; 1549 1550 enum ib_wq_type { 1551 IB_WQT_RQ 1552 }; 1553 1554 enum ib_wq_state { 1555 IB_WQS_RESET, 1556 IB_WQS_RDY, 1557 IB_WQS_ERR 1558 }; 1559 1560 struct ib_wq { 1561 struct ib_device *device; 1562 struct ib_uobject *uobject; 1563 void *wq_context; 1564 void (*event_handler)(struct ib_event *, void *); 1565 struct ib_pd *pd; 1566 struct ib_cq *cq; 1567 u32 wq_num; 1568 enum ib_wq_state state; 1569 enum ib_wq_type wq_type; 1570 atomic_t usecnt; 1571 }; 1572 1573 enum ib_wq_flags { 1574 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1575 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, 1576 }; 1577 1578 struct ib_wq_init_attr { 1579 void *wq_context; 1580 enum ib_wq_type wq_type; 1581 u32 max_wr; 1582 u32 max_sge; 1583 struct ib_cq *cq; 1584 void (*event_handler)(struct ib_event *, void *); 1585 u32 create_flags; /* Use enum ib_wq_flags */ 1586 }; 1587 1588 enum ib_wq_attr_mask { 1589 IB_WQ_STATE = 1 << 0, 1590 IB_WQ_CUR_STATE = 1 << 1, 1591 IB_WQ_FLAGS = 1 << 2, 1592 }; 1593 1594 struct ib_wq_attr { 1595 enum ib_wq_state wq_state; 1596 enum ib_wq_state curr_wq_state; 1597 u32 flags; /* Use enum ib_wq_flags */ 1598 u32 flags_mask; /* Use enum ib_wq_flags */ 1599 }; 1600 1601 struct ib_rwq_ind_table { 1602 struct ib_device *device; 1603 struct ib_uobject *uobject; 1604 atomic_t usecnt; 1605 u32 ind_tbl_num; 1606 u32 log_ind_tbl_size; 1607 struct ib_wq **ind_tbl; 1608 }; 1609 1610 struct ib_rwq_ind_table_init_attr { 1611 u32 log_ind_tbl_size; 1612 /* Each entry is a pointer to Receive Work Queue */ 1613 struct ib_wq **ind_tbl; 1614 }; 1615 1616 /* 1617 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1618 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1619 */ 1620 struct ib_qp { 1621 struct ib_device *device; 1622 struct ib_pd *pd; 1623 struct ib_cq *send_cq; 1624 struct ib_cq *recv_cq; 1625 spinlock_t mr_lock; 1626 int mrs_used; 1627 struct list_head rdma_mrs; 1628 struct list_head sig_mrs; 1629 struct ib_srq *srq; 1630 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1631 struct list_head xrcd_list; 1632 1633 /* count times opened, mcast attaches, flow attaches */ 1634 atomic_t usecnt; 1635 struct list_head open_list; 1636 struct ib_qp *real_qp; 1637 struct ib_uobject *uobject; 1638 void (*event_handler)(struct ib_event *, void *); 1639 void *qp_context; 1640 u32 qp_num; 1641 u32 max_write_sge; 1642 u32 max_read_sge; 1643 enum ib_qp_type qp_type; 1644 struct ib_rwq_ind_table *rwq_ind_tbl; 1645 }; 1646 1647 struct ib_mr { 1648 struct ib_device *device; 1649 struct ib_pd *pd; 1650 u32 lkey; 1651 u32 rkey; 1652 u64 iova; 1653 u32 length; 1654 unsigned int page_size; 1655 bool need_inval; 1656 union { 1657 struct ib_uobject *uobject; /* user */ 1658 struct list_head qp_entry; /* FR */ 1659 }; 1660 }; 1661 1662 struct ib_mw { 1663 struct ib_device *device; 1664 struct ib_pd *pd; 1665 struct ib_uobject *uobject; 1666 u32 rkey; 1667 enum ib_mw_type type; 1668 }; 1669 1670 struct ib_fmr { 1671 struct ib_device *device; 1672 struct ib_pd *pd; 1673 struct list_head list; 1674 u32 lkey; 1675 u32 rkey; 1676 }; 1677 1678 /* Supported steering options */ 1679 enum ib_flow_attr_type { 1680 /* steering according to rule specifications */ 1681 IB_FLOW_ATTR_NORMAL = 0x0, 1682 /* default unicast and multicast rule - 1683 * receive all Eth traffic which isn't steered to any QP 1684 */ 1685 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1686 /* default multicast rule - 1687 * receive all Eth multicast traffic which isn't steered to any QP 1688 */ 1689 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1690 /* sniffer rule - receive all port traffic */ 1691 IB_FLOW_ATTR_SNIFFER = 0x3 1692 }; 1693 1694 /* Supported steering header types */ 1695 enum ib_flow_spec_type { 1696 /* L2 headers*/ 1697 IB_FLOW_SPEC_ETH = 0x20, 1698 IB_FLOW_SPEC_IB = 0x22, 1699 /* L3 header*/ 1700 IB_FLOW_SPEC_IPV4 = 0x30, 1701 IB_FLOW_SPEC_IPV6 = 0x31, 1702 /* L4 headers*/ 1703 IB_FLOW_SPEC_TCP = 0x40, 1704 IB_FLOW_SPEC_UDP = 0x41, 1705 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1706 IB_FLOW_SPEC_INNER = 0x100, 1707 /* Actions */ 1708 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1709 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1710 }; 1711 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1712 #define IB_FLOW_SPEC_SUPPORT_LAYERS 8 1713 1714 /* Flow steering rule priority is set according to it's domain. 1715 * Lower domain value means higher priority. 1716 */ 1717 enum ib_flow_domain { 1718 IB_FLOW_DOMAIN_USER, 1719 IB_FLOW_DOMAIN_ETHTOOL, 1720 IB_FLOW_DOMAIN_RFS, 1721 IB_FLOW_DOMAIN_NIC, 1722 IB_FLOW_DOMAIN_NUM /* Must be last */ 1723 }; 1724 1725 enum ib_flow_flags { 1726 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1727 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ 1728 }; 1729 1730 struct ib_flow_eth_filter { 1731 u8 dst_mac[6]; 1732 u8 src_mac[6]; 1733 __be16 ether_type; 1734 __be16 vlan_tag; 1735 /* Must be last */ 1736 u8 real_sz[0]; 1737 }; 1738 1739 struct ib_flow_spec_eth { 1740 u32 type; 1741 u16 size; 1742 struct ib_flow_eth_filter val; 1743 struct ib_flow_eth_filter mask; 1744 }; 1745 1746 struct ib_flow_ib_filter { 1747 __be16 dlid; 1748 __u8 sl; 1749 /* Must be last */ 1750 u8 real_sz[0]; 1751 }; 1752 1753 struct ib_flow_spec_ib { 1754 u32 type; 1755 u16 size; 1756 struct ib_flow_ib_filter val; 1757 struct ib_flow_ib_filter mask; 1758 }; 1759 1760 /* IPv4 header flags */ 1761 enum ib_ipv4_flags { 1762 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1763 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1764 last have this flag set */ 1765 }; 1766 1767 struct ib_flow_ipv4_filter { 1768 __be32 src_ip; 1769 __be32 dst_ip; 1770 u8 proto; 1771 u8 tos; 1772 u8 ttl; 1773 u8 flags; 1774 /* Must be last */ 1775 u8 real_sz[0]; 1776 }; 1777 1778 struct ib_flow_spec_ipv4 { 1779 u32 type; 1780 u16 size; 1781 struct ib_flow_ipv4_filter val; 1782 struct ib_flow_ipv4_filter mask; 1783 }; 1784 1785 struct ib_flow_ipv6_filter { 1786 u8 src_ip[16]; 1787 u8 dst_ip[16]; 1788 __be32 flow_label; 1789 u8 next_hdr; 1790 u8 traffic_class; 1791 u8 hop_limit; 1792 /* Must be last */ 1793 u8 real_sz[0]; 1794 }; 1795 1796 struct ib_flow_spec_ipv6 { 1797 u32 type; 1798 u16 size; 1799 struct ib_flow_ipv6_filter val; 1800 struct ib_flow_ipv6_filter mask; 1801 }; 1802 1803 struct ib_flow_tcp_udp_filter { 1804 __be16 dst_port; 1805 __be16 src_port; 1806 /* Must be last */ 1807 u8 real_sz[0]; 1808 }; 1809 1810 struct ib_flow_spec_tcp_udp { 1811 u32 type; 1812 u16 size; 1813 struct ib_flow_tcp_udp_filter val; 1814 struct ib_flow_tcp_udp_filter mask; 1815 }; 1816 1817 struct ib_flow_tunnel_filter { 1818 __be32 tunnel_id; 1819 u8 real_sz[0]; 1820 }; 1821 1822 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1823 * the tunnel_id from val has the vni value 1824 */ 1825 struct ib_flow_spec_tunnel { 1826 u32 type; 1827 u16 size; 1828 struct ib_flow_tunnel_filter val; 1829 struct ib_flow_tunnel_filter mask; 1830 }; 1831 1832 struct ib_flow_spec_action_tag { 1833 enum ib_flow_spec_type type; 1834 u16 size; 1835 u32 tag_id; 1836 }; 1837 1838 struct ib_flow_spec_action_drop { 1839 enum ib_flow_spec_type type; 1840 u16 size; 1841 }; 1842 1843 union ib_flow_spec { 1844 struct { 1845 u32 type; 1846 u16 size; 1847 }; 1848 struct ib_flow_spec_eth eth; 1849 struct ib_flow_spec_ib ib; 1850 struct ib_flow_spec_ipv4 ipv4; 1851 struct ib_flow_spec_tcp_udp tcp_udp; 1852 struct ib_flow_spec_ipv6 ipv6; 1853 struct ib_flow_spec_tunnel tunnel; 1854 struct ib_flow_spec_action_tag flow_tag; 1855 struct ib_flow_spec_action_drop drop; 1856 }; 1857 1858 struct ib_flow_attr { 1859 enum ib_flow_attr_type type; 1860 u16 size; 1861 u16 priority; 1862 u32 flags; 1863 u8 num_of_specs; 1864 u8 port; 1865 /* Following are the optional layers according to user request 1866 * struct ib_flow_spec_xxx 1867 * struct ib_flow_spec_yyy 1868 */ 1869 }; 1870 1871 struct ib_flow { 1872 struct ib_qp *qp; 1873 struct ib_uobject *uobject; 1874 }; 1875 1876 struct ib_mad_hdr; 1877 struct ib_grh; 1878 1879 enum ib_process_mad_flags { 1880 IB_MAD_IGNORE_MKEY = 1, 1881 IB_MAD_IGNORE_BKEY = 2, 1882 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1883 }; 1884 1885 enum ib_mad_result { 1886 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1887 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1888 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1889 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1890 }; 1891 1892 #define IB_DEVICE_NAME_MAX 64 1893 1894 struct ib_port_cache { 1895 struct ib_pkey_cache *pkey; 1896 struct ib_gid_table *gid; 1897 u8 lmc; 1898 enum ib_port_state port_state; 1899 }; 1900 1901 struct ib_cache { 1902 rwlock_t lock; 1903 struct ib_event_handler event_handler; 1904 struct ib_port_cache *ports; 1905 }; 1906 1907 struct iw_cm_verbs; 1908 1909 struct ib_port_immutable { 1910 int pkey_tbl_len; 1911 int gid_tbl_len; 1912 u32 core_cap_flags; 1913 u32 max_mad_size; 1914 }; 1915 1916 /* rdma netdev type - specifies protocol type */ 1917 enum rdma_netdev_t { 1918 RDMA_NETDEV_OPA_VNIC, 1919 RDMA_NETDEV_IPOIB, 1920 }; 1921 1922 /** 1923 * struct rdma_netdev - rdma netdev 1924 * For cases where netstack interfacing is required. 1925 */ 1926 struct rdma_netdev { 1927 void *clnt_priv; 1928 struct ib_device *hca; 1929 u8 port_num; 1930 1931 /* control functions */ 1932 void (*set_id)(struct net_device *netdev, int id); 1933 /* send packet */ 1934 int (*send)(struct net_device *dev, struct sk_buff *skb, 1935 struct ib_ah *address, u32 dqpn); 1936 /* multicast */ 1937 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, 1938 union ib_gid *gid, u16 mlid, 1939 int set_qkey, u32 qkey); 1940 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, 1941 union ib_gid *gid, u16 mlid); 1942 }; 1943 1944 struct ib_device { 1945 /* Do not access @dma_device directly from ULP nor from HW drivers. */ 1946 struct device *dma_device; 1947 1948 char name[IB_DEVICE_NAME_MAX]; 1949 1950 struct list_head event_handler_list; 1951 spinlock_t event_handler_lock; 1952 1953 spinlock_t client_data_lock; 1954 struct list_head core_list; 1955 /* Access to the client_data_list is protected by the client_data_lock 1956 * spinlock and the lists_rwsem read-write semaphore */ 1957 struct list_head client_data_list; 1958 1959 struct ib_cache cache; 1960 /** 1961 * port_immutable is indexed by port number 1962 */ 1963 struct ib_port_immutable *port_immutable; 1964 1965 int num_comp_vectors; 1966 1967 struct iw_cm_verbs *iwcm; 1968 1969 /** 1970 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 1971 * driver initialized data. The struct is kfree()'ed by the sysfs 1972 * core when the device is removed. A lifespan of -1 in the return 1973 * struct tells the core to set a default lifespan. 1974 */ 1975 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 1976 u8 port_num); 1977 /** 1978 * get_hw_stats - Fill in the counter value(s) in the stats struct. 1979 * @index - The index in the value array we wish to have updated, or 1980 * num_counters if we want all stats updated 1981 * Return codes - 1982 * < 0 - Error, no counters updated 1983 * index - Updated the single counter pointed to by index 1984 * num_counters - Updated all counters (will reset the timestamp 1985 * and prevent further calls for lifespan milliseconds) 1986 * Drivers are allowed to update all counters in leiu of just the 1987 * one given in index at their option 1988 */ 1989 int (*get_hw_stats)(struct ib_device *device, 1990 struct rdma_hw_stats *stats, 1991 u8 port, int index); 1992 int (*query_device)(struct ib_device *device, 1993 struct ib_device_attr *device_attr, 1994 struct ib_udata *udata); 1995 int (*query_port)(struct ib_device *device, 1996 u8 port_num, 1997 struct ib_port_attr *port_attr); 1998 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1999 u8 port_num); 2000 /* When calling get_netdev, the HW vendor's driver should return the 2001 * net device of device @device at port @port_num or NULL if such 2002 * a net device doesn't exist. The vendor driver should call dev_hold 2003 * on this net device. The HW vendor's device driver must guarantee 2004 * that this function returns NULL before the net device reaches 2005 * NETDEV_UNREGISTER_FINAL state. 2006 */ 2007 struct net_device *(*get_netdev)(struct ib_device *device, 2008 u8 port_num); 2009 int (*query_gid)(struct ib_device *device, 2010 u8 port_num, int index, 2011 union ib_gid *gid); 2012 /* When calling add_gid, the HW vendor's driver should 2013 * add the gid of device @device at gid index @index of 2014 * port @port_num to be @gid. Meta-info of that gid (for example, 2015 * the network device related to this gid is available 2016 * at @attr. @context allows the HW vendor driver to store extra 2017 * information together with a GID entry. The HW vendor may allocate 2018 * memory to contain this information and store it in @context when a 2019 * new GID entry is written to. Params are consistent until the next 2020 * call of add_gid or delete_gid. The function should return 0 on 2021 * success or error otherwise. The function could be called 2022 * concurrently for different ports. This function is only called 2023 * when roce_gid_table is used. 2024 */ 2025 int (*add_gid)(struct ib_device *device, 2026 u8 port_num, 2027 unsigned int index, 2028 const union ib_gid *gid, 2029 const struct ib_gid_attr *attr, 2030 void **context); 2031 /* When calling del_gid, the HW vendor's driver should delete the 2032 * gid of device @device at gid index @index of port @port_num. 2033 * Upon the deletion of a GID entry, the HW vendor must free any 2034 * allocated memory. The caller will clear @context afterwards. 2035 * This function is only called when roce_gid_table is used. 2036 */ 2037 int (*del_gid)(struct ib_device *device, 2038 u8 port_num, 2039 unsigned int index, 2040 void **context); 2041 int (*query_pkey)(struct ib_device *device, 2042 u8 port_num, u16 index, u16 *pkey); 2043 int (*modify_device)(struct ib_device *device, 2044 int device_modify_mask, 2045 struct ib_device_modify *device_modify); 2046 int (*modify_port)(struct ib_device *device, 2047 u8 port_num, int port_modify_mask, 2048 struct ib_port_modify *port_modify); 2049 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 2050 struct ib_udata *udata); 2051 int (*dealloc_ucontext)(struct ib_ucontext *context); 2052 int (*mmap)(struct ib_ucontext *context, 2053 struct vm_area_struct *vma); 2054 struct ib_pd * (*alloc_pd)(struct ib_device *device, 2055 struct ib_ucontext *context, 2056 struct ib_udata *udata); 2057 int (*dealloc_pd)(struct ib_pd *pd); 2058 struct ib_ah * (*create_ah)(struct ib_pd *pd, 2059 struct rdma_ah_attr *ah_attr, 2060 struct ib_udata *udata); 2061 int (*modify_ah)(struct ib_ah *ah, 2062 struct rdma_ah_attr *ah_attr); 2063 int (*query_ah)(struct ib_ah *ah, 2064 struct rdma_ah_attr *ah_attr); 2065 int (*destroy_ah)(struct ib_ah *ah); 2066 struct ib_srq * (*create_srq)(struct ib_pd *pd, 2067 struct ib_srq_init_attr *srq_init_attr, 2068 struct ib_udata *udata); 2069 int (*modify_srq)(struct ib_srq *srq, 2070 struct ib_srq_attr *srq_attr, 2071 enum ib_srq_attr_mask srq_attr_mask, 2072 struct ib_udata *udata); 2073 int (*query_srq)(struct ib_srq *srq, 2074 struct ib_srq_attr *srq_attr); 2075 int (*destroy_srq)(struct ib_srq *srq); 2076 int (*post_srq_recv)(struct ib_srq *srq, 2077 struct ib_recv_wr *recv_wr, 2078 struct ib_recv_wr **bad_recv_wr); 2079 struct ib_qp * (*create_qp)(struct ib_pd *pd, 2080 struct ib_qp_init_attr *qp_init_attr, 2081 struct ib_udata *udata); 2082 int (*modify_qp)(struct ib_qp *qp, 2083 struct ib_qp_attr *qp_attr, 2084 int qp_attr_mask, 2085 struct ib_udata *udata); 2086 int (*query_qp)(struct ib_qp *qp, 2087 struct ib_qp_attr *qp_attr, 2088 int qp_attr_mask, 2089 struct ib_qp_init_attr *qp_init_attr); 2090 int (*destroy_qp)(struct ib_qp *qp); 2091 int (*post_send)(struct ib_qp *qp, 2092 struct ib_send_wr *send_wr, 2093 struct ib_send_wr **bad_send_wr); 2094 int (*post_recv)(struct ib_qp *qp, 2095 struct ib_recv_wr *recv_wr, 2096 struct ib_recv_wr **bad_recv_wr); 2097 struct ib_cq * (*create_cq)(struct ib_device *device, 2098 const struct ib_cq_init_attr *attr, 2099 struct ib_ucontext *context, 2100 struct ib_udata *udata); 2101 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 2102 u16 cq_period); 2103 int (*destroy_cq)(struct ib_cq *cq); 2104 int (*resize_cq)(struct ib_cq *cq, int cqe, 2105 struct ib_udata *udata); 2106 int (*poll_cq)(struct ib_cq *cq, int num_entries, 2107 struct ib_wc *wc); 2108 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2109 int (*req_notify_cq)(struct ib_cq *cq, 2110 enum ib_cq_notify_flags flags); 2111 int (*req_ncomp_notif)(struct ib_cq *cq, 2112 int wc_cnt); 2113 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 2114 int mr_access_flags); 2115 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 2116 u64 start, u64 length, 2117 u64 virt_addr, 2118 int mr_access_flags, 2119 struct ib_udata *udata); 2120 int (*rereg_user_mr)(struct ib_mr *mr, 2121 int flags, 2122 u64 start, u64 length, 2123 u64 virt_addr, 2124 int mr_access_flags, 2125 struct ib_pd *pd, 2126 struct ib_udata *udata); 2127 int (*dereg_mr)(struct ib_mr *mr); 2128 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 2129 enum ib_mr_type mr_type, 2130 u32 max_num_sg); 2131 int (*map_mr_sg)(struct ib_mr *mr, 2132 struct scatterlist *sg, 2133 int sg_nents, 2134 unsigned int *sg_offset); 2135 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 2136 enum ib_mw_type type, 2137 struct ib_udata *udata); 2138 int (*dealloc_mw)(struct ib_mw *mw); 2139 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 2140 int mr_access_flags, 2141 struct ib_fmr_attr *fmr_attr); 2142 int (*map_phys_fmr)(struct ib_fmr *fmr, 2143 u64 *page_list, int list_len, 2144 u64 iova); 2145 int (*unmap_fmr)(struct list_head *fmr_list); 2146 int (*dealloc_fmr)(struct ib_fmr *fmr); 2147 int (*attach_mcast)(struct ib_qp *qp, 2148 union ib_gid *gid, 2149 u16 lid); 2150 int (*detach_mcast)(struct ib_qp *qp, 2151 union ib_gid *gid, 2152 u16 lid); 2153 int (*process_mad)(struct ib_device *device, 2154 int process_mad_flags, 2155 u8 port_num, 2156 const struct ib_wc *in_wc, 2157 const struct ib_grh *in_grh, 2158 const struct ib_mad_hdr *in_mad, 2159 size_t in_mad_size, 2160 struct ib_mad_hdr *out_mad, 2161 size_t *out_mad_size, 2162 u16 *out_mad_pkey_index); 2163 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 2164 struct ib_ucontext *ucontext, 2165 struct ib_udata *udata); 2166 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 2167 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2168 struct ib_flow_attr 2169 *flow_attr, 2170 int domain); 2171 int (*destroy_flow)(struct ib_flow *flow_id); 2172 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2173 struct ib_mr_status *mr_status); 2174 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2175 void (*drain_rq)(struct ib_qp *qp); 2176 void (*drain_sq)(struct ib_qp *qp); 2177 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2178 int state); 2179 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2180 struct ifla_vf_info *ivf); 2181 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2182 struct ifla_vf_stats *stats); 2183 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2184 int type); 2185 struct ib_wq * (*create_wq)(struct ib_pd *pd, 2186 struct ib_wq_init_attr *init_attr, 2187 struct ib_udata *udata); 2188 int (*destroy_wq)(struct ib_wq *wq); 2189 int (*modify_wq)(struct ib_wq *wq, 2190 struct ib_wq_attr *attr, 2191 u32 wq_attr_mask, 2192 struct ib_udata *udata); 2193 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2194 struct ib_rwq_ind_table_init_attr *init_attr, 2195 struct ib_udata *udata); 2196 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2197 /** 2198 * rdma netdev operations 2199 * 2200 * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it 2201 * doesn't support the specified rdma netdev type. 2202 */ 2203 struct net_device *(*alloc_rdma_netdev)( 2204 struct ib_device *device, 2205 u8 port_num, 2206 enum rdma_netdev_t type, 2207 const char *name, 2208 unsigned char name_assign_type, 2209 void (*setup)(struct net_device *)); 2210 void (*free_rdma_netdev)(struct net_device *netdev); 2211 2212 struct module *owner; 2213 struct device dev; 2214 struct kobject *ports_parent; 2215 struct list_head port_list; 2216 2217 enum { 2218 IB_DEV_UNINITIALIZED, 2219 IB_DEV_REGISTERED, 2220 IB_DEV_UNREGISTERED 2221 } reg_state; 2222 2223 int uverbs_abi_ver; 2224 u64 uverbs_cmd_mask; 2225 u64 uverbs_ex_cmd_mask; 2226 2227 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2228 __be64 node_guid; 2229 u32 local_dma_lkey; 2230 u16 is_switch:1; 2231 u8 node_type; 2232 u8 phys_port_cnt; 2233 struct ib_device_attr attrs; 2234 struct attribute_group *hw_stats_ag; 2235 struct rdma_hw_stats *hw_stats; 2236 2237 #ifdef CONFIG_CGROUP_RDMA 2238 struct rdmacg_device cg_device; 2239 #endif 2240 2241 /** 2242 * The following mandatory functions are used only at device 2243 * registration. Keep functions such as these at the end of this 2244 * structure to avoid cache line misses when accessing struct ib_device 2245 * in fast paths. 2246 */ 2247 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 2248 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); 2249 }; 2250 2251 struct ib_client { 2252 char *name; 2253 void (*add) (struct ib_device *); 2254 void (*remove)(struct ib_device *, void *client_data); 2255 2256 /* Returns the net_dev belonging to this ib_client and matching the 2257 * given parameters. 2258 * @dev: An RDMA device that the net_dev use for communication. 2259 * @port: A physical port number on the RDMA device. 2260 * @pkey: P_Key that the net_dev uses if applicable. 2261 * @gid: A GID that the net_dev uses to communicate. 2262 * @addr: An IP address the net_dev is configured with. 2263 * @client_data: The device's client data set by ib_set_client_data(). 2264 * 2265 * An ib_client that implements a net_dev on top of RDMA devices 2266 * (such as IP over IB) should implement this callback, allowing the 2267 * rdma_cm module to find the right net_dev for a given request. 2268 * 2269 * The caller is responsible for calling dev_put on the returned 2270 * netdev. */ 2271 struct net_device *(*get_net_dev_by_params)( 2272 struct ib_device *dev, 2273 u8 port, 2274 u16 pkey, 2275 const union ib_gid *gid, 2276 const struct sockaddr *addr, 2277 void *client_data); 2278 struct list_head list; 2279 }; 2280 2281 struct ib_device *ib_alloc_device(size_t size); 2282 void ib_dealloc_device(struct ib_device *device); 2283 2284 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); 2285 2286 int ib_register_device(struct ib_device *device, 2287 int (*port_callback)(struct ib_device *, 2288 u8, struct kobject *)); 2289 void ib_unregister_device(struct ib_device *device); 2290 2291 int ib_register_client (struct ib_client *client); 2292 void ib_unregister_client(struct ib_client *client); 2293 2294 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 2295 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2296 void *data); 2297 2298 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2299 { 2300 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2301 } 2302 2303 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2304 { 2305 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2306 } 2307 2308 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2309 size_t offset, 2310 size_t len) 2311 { 2312 const void __user *p = udata->inbuf + offset; 2313 bool ret; 2314 u8 *buf; 2315 2316 if (len > USHRT_MAX) 2317 return false; 2318 2319 buf = memdup_user(p, len); 2320 if (IS_ERR(buf)) 2321 return false; 2322 2323 ret = !memchr_inv(buf, 0, len); 2324 kfree(buf); 2325 return ret; 2326 } 2327 2328 /** 2329 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2330 * contains all required attributes and no attributes not allowed for 2331 * the given QP state transition. 2332 * @cur_state: Current QP state 2333 * @next_state: Next QP state 2334 * @type: QP type 2335 * @mask: Mask of supplied QP attributes 2336 * @ll : link layer of port 2337 * 2338 * This function is a helper function that a low-level driver's 2339 * modify_qp method can use to validate the consumer's input. It 2340 * checks that cur_state and next_state are valid QP states, that a 2341 * transition from cur_state to next_state is allowed by the IB spec, 2342 * and that the attribute mask supplied is allowed for the transition. 2343 */ 2344 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2345 enum ib_qp_type type, enum ib_qp_attr_mask mask, 2346 enum rdma_link_layer ll); 2347 2348 int ib_register_event_handler (struct ib_event_handler *event_handler); 2349 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 2350 void ib_dispatch_event(struct ib_event *event); 2351 2352 int ib_query_port(struct ib_device *device, 2353 u8 port_num, struct ib_port_attr *port_attr); 2354 2355 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2356 u8 port_num); 2357 2358 /** 2359 * rdma_cap_ib_switch - Check if the device is IB switch 2360 * @device: Device to check 2361 * 2362 * Device driver is responsible for setting is_switch bit on 2363 * in ib_device structure at init time. 2364 * 2365 * Return: true if the device is IB switch. 2366 */ 2367 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2368 { 2369 return device->is_switch; 2370 } 2371 2372 /** 2373 * rdma_start_port - Return the first valid port number for the device 2374 * specified 2375 * 2376 * @device: Device to be checked 2377 * 2378 * Return start port number 2379 */ 2380 static inline u8 rdma_start_port(const struct ib_device *device) 2381 { 2382 return rdma_cap_ib_switch(device) ? 0 : 1; 2383 } 2384 2385 /** 2386 * rdma_end_port - Return the last valid port number for the device 2387 * specified 2388 * 2389 * @device: Device to be checked 2390 * 2391 * Return last port number 2392 */ 2393 static inline u8 rdma_end_port(const struct ib_device *device) 2394 { 2395 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2396 } 2397 2398 static inline int rdma_is_port_valid(const struct ib_device *device, 2399 unsigned int port) 2400 { 2401 return (port >= rdma_start_port(device) && 2402 port <= rdma_end_port(device)); 2403 } 2404 2405 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2406 { 2407 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2408 } 2409 2410 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2411 { 2412 return device->port_immutable[port_num].core_cap_flags & 2413 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2414 } 2415 2416 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2417 { 2418 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2419 } 2420 2421 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2422 { 2423 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2424 } 2425 2426 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2427 { 2428 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2429 } 2430 2431 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2432 { 2433 return rdma_protocol_ib(device, port_num) || 2434 rdma_protocol_roce(device, port_num); 2435 } 2436 2437 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) 2438 { 2439 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_RAW_PACKET; 2440 } 2441 2442 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) 2443 { 2444 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_USNIC; 2445 } 2446 2447 /** 2448 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2449 * Management Datagrams. 2450 * @device: Device to check 2451 * @port_num: Port number to check 2452 * 2453 * Management Datagrams (MAD) are a required part of the InfiniBand 2454 * specification and are supported on all InfiniBand devices. A slightly 2455 * extended version are also supported on OPA interfaces. 2456 * 2457 * Return: true if the port supports sending/receiving of MAD packets. 2458 */ 2459 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2460 { 2461 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2462 } 2463 2464 /** 2465 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2466 * Management Datagrams. 2467 * @device: Device to check 2468 * @port_num: Port number to check 2469 * 2470 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2471 * datagrams with their own versions. These OPA MADs share many but not all of 2472 * the characteristics of InfiniBand MADs. 2473 * 2474 * OPA MADs differ in the following ways: 2475 * 2476 * 1) MADs are variable size up to 2K 2477 * IBTA defined MADs remain fixed at 256 bytes 2478 * 2) OPA SMPs must carry valid PKeys 2479 * 3) OPA SMP packets are a different format 2480 * 2481 * Return: true if the port supports OPA MAD packet formats. 2482 */ 2483 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2484 { 2485 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2486 == RDMA_CORE_CAP_OPA_MAD; 2487 } 2488 2489 /** 2490 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2491 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2492 * @device: Device to check 2493 * @port_num: Port number to check 2494 * 2495 * Each InfiniBand node is required to provide a Subnet Management Agent 2496 * that the subnet manager can access. Prior to the fabric being fully 2497 * configured by the subnet manager, the SMA is accessed via a well known 2498 * interface called the Subnet Management Interface (SMI). This interface 2499 * uses directed route packets to communicate with the SM to get around the 2500 * chicken and egg problem of the SM needing to know what's on the fabric 2501 * in order to configure the fabric, and needing to configure the fabric in 2502 * order to send packets to the devices on the fabric. These directed 2503 * route packets do not need the fabric fully configured in order to reach 2504 * their destination. The SMI is the only method allowed to send 2505 * directed route packets on an InfiniBand fabric. 2506 * 2507 * Return: true if the port provides an SMI. 2508 */ 2509 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2510 { 2511 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2512 } 2513 2514 /** 2515 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2516 * Communication Manager. 2517 * @device: Device to check 2518 * @port_num: Port number to check 2519 * 2520 * The InfiniBand Communication Manager is one of many pre-defined General 2521 * Service Agents (GSA) that are accessed via the General Service 2522 * Interface (GSI). It's role is to facilitate establishment of connections 2523 * between nodes as well as other management related tasks for established 2524 * connections. 2525 * 2526 * Return: true if the port supports an IB CM (this does not guarantee that 2527 * a CM is actually running however). 2528 */ 2529 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2530 { 2531 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2532 } 2533 2534 /** 2535 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2536 * Communication Manager. 2537 * @device: Device to check 2538 * @port_num: Port number to check 2539 * 2540 * Similar to above, but specific to iWARP connections which have a different 2541 * managment protocol than InfiniBand. 2542 * 2543 * Return: true if the port supports an iWARP CM (this does not guarantee that 2544 * a CM is actually running however). 2545 */ 2546 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2547 { 2548 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2549 } 2550 2551 /** 2552 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2553 * Subnet Administration. 2554 * @device: Device to check 2555 * @port_num: Port number to check 2556 * 2557 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2558 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2559 * fabrics, devices should resolve routes to other hosts by contacting the 2560 * SA to query the proper route. 2561 * 2562 * Return: true if the port should act as a client to the fabric Subnet 2563 * Administration interface. This does not imply that the SA service is 2564 * running locally. 2565 */ 2566 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2567 { 2568 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2569 } 2570 2571 /** 2572 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2573 * Multicast. 2574 * @device: Device to check 2575 * @port_num: Port number to check 2576 * 2577 * InfiniBand multicast registration is more complex than normal IPv4 or 2578 * IPv6 multicast registration. Each Host Channel Adapter must register 2579 * with the Subnet Manager when it wishes to join a multicast group. It 2580 * should do so only once regardless of how many queue pairs it subscribes 2581 * to this group. And it should leave the group only after all queue pairs 2582 * attached to the group have been detached. 2583 * 2584 * Return: true if the port must undertake the additional adminstrative 2585 * overhead of registering/unregistering with the SM and tracking of the 2586 * total number of queue pairs attached to the multicast group. 2587 */ 2588 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2589 { 2590 return rdma_cap_ib_sa(device, port_num); 2591 } 2592 2593 /** 2594 * rdma_cap_af_ib - Check if the port of device has the capability 2595 * Native Infiniband Address. 2596 * @device: Device to check 2597 * @port_num: Port number to check 2598 * 2599 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2600 * GID. RoCE uses a different mechanism, but still generates a GID via 2601 * a prescribed mechanism and port specific data. 2602 * 2603 * Return: true if the port uses a GID address to identify devices on the 2604 * network. 2605 */ 2606 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2607 { 2608 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2609 } 2610 2611 /** 2612 * rdma_cap_eth_ah - Check if the port of device has the capability 2613 * Ethernet Address Handle. 2614 * @device: Device to check 2615 * @port_num: Port number to check 2616 * 2617 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2618 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2619 * port. Normally, packet headers are generated by the sending host 2620 * adapter, but when sending connectionless datagrams, we must manually 2621 * inject the proper headers for the fabric we are communicating over. 2622 * 2623 * Return: true if we are running as a RoCE port and must force the 2624 * addition of a Global Route Header built from our Ethernet Address 2625 * Handle into our header list for connectionless packets. 2626 */ 2627 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2628 { 2629 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2630 } 2631 2632 /** 2633 * rdma_cap_opa_ah - Check if the port of device supports 2634 * OPA Address handles 2635 * @device: Device to check 2636 * @port_num: Port number to check 2637 * 2638 * Return: true if we are running on an OPA device which supports 2639 * the extended OPA addressing. 2640 */ 2641 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) 2642 { 2643 return (device->port_immutable[port_num].core_cap_flags & 2644 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; 2645 } 2646 2647 /** 2648 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2649 * 2650 * @device: Device 2651 * @port_num: Port number 2652 * 2653 * This MAD size includes the MAD headers and MAD payload. No other headers 2654 * are included. 2655 * 2656 * Return the max MAD size required by the Port. Will return 0 if the port 2657 * does not support MADs 2658 */ 2659 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2660 { 2661 return device->port_immutable[port_num].max_mad_size; 2662 } 2663 2664 /** 2665 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 2666 * @device: Device to check 2667 * @port_num: Port number to check 2668 * 2669 * RoCE GID table mechanism manages the various GIDs for a device. 2670 * 2671 * NOTE: if allocating the port's GID table has failed, this call will still 2672 * return true, but any RoCE GID table API will fail. 2673 * 2674 * Return: true if the port uses RoCE GID table mechanism in order to manage 2675 * its GIDs. 2676 */ 2677 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 2678 u8 port_num) 2679 { 2680 return rdma_protocol_roce(device, port_num) && 2681 device->add_gid && device->del_gid; 2682 } 2683 2684 /* 2685 * Check if the device supports READ W/ INVALIDATE. 2686 */ 2687 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 2688 { 2689 /* 2690 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 2691 * has support for it yet. 2692 */ 2693 return rdma_protocol_iwarp(dev, port_num); 2694 } 2695 2696 int ib_query_gid(struct ib_device *device, 2697 u8 port_num, int index, union ib_gid *gid, 2698 struct ib_gid_attr *attr); 2699 2700 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2701 int state); 2702 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2703 struct ifla_vf_info *info); 2704 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2705 struct ifla_vf_stats *stats); 2706 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2707 int type); 2708 2709 int ib_query_pkey(struct ib_device *device, 2710 u8 port_num, u16 index, u16 *pkey); 2711 2712 int ib_modify_device(struct ib_device *device, 2713 int device_modify_mask, 2714 struct ib_device_modify *device_modify); 2715 2716 int ib_modify_port(struct ib_device *device, 2717 u8 port_num, int port_modify_mask, 2718 struct ib_port_modify *port_modify); 2719 2720 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2721 enum ib_gid_type gid_type, struct net_device *ndev, 2722 u8 *port_num, u16 *index); 2723 2724 int ib_find_pkey(struct ib_device *device, 2725 u8 port_num, u16 pkey, u16 *index); 2726 2727 enum ib_pd_flags { 2728 /* 2729 * Create a memory registration for all memory in the system and place 2730 * the rkey for it into pd->unsafe_global_rkey. This can be used by 2731 * ULPs to avoid the overhead of dynamic MRs. 2732 * 2733 * This flag is generally considered unsafe and must only be used in 2734 * extremly trusted environments. Every use of it will log a warning 2735 * in the kernel log. 2736 */ 2737 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 2738 }; 2739 2740 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 2741 const char *caller); 2742 #define ib_alloc_pd(device, flags) \ 2743 __ib_alloc_pd((device), (flags), __func__) 2744 void ib_dealloc_pd(struct ib_pd *pd); 2745 2746 /** 2747 * rdma_create_ah - Creates an address handle for the given address vector. 2748 * @pd: The protection domain associated with the address handle. 2749 * @ah_attr: The attributes of the address vector. 2750 * 2751 * The address handle is used to reference a local or global destination 2752 * in all UD QP post sends. 2753 */ 2754 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr); 2755 2756 /** 2757 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header 2758 * work completion. 2759 * @hdr: the L3 header to parse 2760 * @net_type: type of header to parse 2761 * @sgid: place to store source gid 2762 * @dgid: place to store destination gid 2763 */ 2764 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 2765 enum rdma_network_type net_type, 2766 union ib_gid *sgid, union ib_gid *dgid); 2767 2768 /** 2769 * ib_get_rdma_header_version - Get the header version 2770 * @hdr: the L3 header to parse 2771 */ 2772 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 2773 2774 /** 2775 * ib_init_ah_from_wc - Initializes address handle attributes from a 2776 * work completion. 2777 * @device: Device on which the received message arrived. 2778 * @port_num: Port on which the received message arrived. 2779 * @wc: Work completion associated with the received message. 2780 * @grh: References the received global route header. This parameter is 2781 * ignored unless the work completion indicates that the GRH is valid. 2782 * @ah_attr: Returned attributes that can be used when creating an address 2783 * handle for replying to the message. 2784 */ 2785 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2786 const struct ib_wc *wc, const struct ib_grh *grh, 2787 struct rdma_ah_attr *ah_attr); 2788 2789 /** 2790 * ib_create_ah_from_wc - Creates an address handle associated with the 2791 * sender of the specified work completion. 2792 * @pd: The protection domain associated with the address handle. 2793 * @wc: Work completion information associated with a received message. 2794 * @grh: References the received global route header. This parameter is 2795 * ignored unless the work completion indicates that the GRH is valid. 2796 * @port_num: The outbound port number to associate with the address. 2797 * 2798 * The address handle is used to reference a local or global destination 2799 * in all UD QP post sends. 2800 */ 2801 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2802 const struct ib_grh *grh, u8 port_num); 2803 2804 /** 2805 * rdma_modify_ah - Modifies the address vector associated with an address 2806 * handle. 2807 * @ah: The address handle to modify. 2808 * @ah_attr: The new address vector attributes to associate with the 2809 * address handle. 2810 */ 2811 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2812 2813 /** 2814 * rdma_query_ah - Queries the address vector associated with an address 2815 * handle. 2816 * @ah: The address handle to query. 2817 * @ah_attr: The address vector attributes associated with the address 2818 * handle. 2819 */ 2820 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2821 2822 /** 2823 * rdma_destroy_ah - Destroys an address handle. 2824 * @ah: The address handle to destroy. 2825 */ 2826 int rdma_destroy_ah(struct ib_ah *ah); 2827 2828 /** 2829 * ib_create_srq - Creates a SRQ associated with the specified protection 2830 * domain. 2831 * @pd: The protection domain associated with the SRQ. 2832 * @srq_init_attr: A list of initial attributes required to create the 2833 * SRQ. If SRQ creation succeeds, then the attributes are updated to 2834 * the actual capabilities of the created SRQ. 2835 * 2836 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 2837 * requested size of the SRQ, and set to the actual values allocated 2838 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 2839 * will always be at least as large as the requested values. 2840 */ 2841 struct ib_srq *ib_create_srq(struct ib_pd *pd, 2842 struct ib_srq_init_attr *srq_init_attr); 2843 2844 /** 2845 * ib_modify_srq - Modifies the attributes for the specified SRQ. 2846 * @srq: The SRQ to modify. 2847 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 2848 * the current values of selected SRQ attributes are returned. 2849 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 2850 * are being modified. 2851 * 2852 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 2853 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 2854 * the number of receives queued drops below the limit. 2855 */ 2856 int ib_modify_srq(struct ib_srq *srq, 2857 struct ib_srq_attr *srq_attr, 2858 enum ib_srq_attr_mask srq_attr_mask); 2859 2860 /** 2861 * ib_query_srq - Returns the attribute list and current values for the 2862 * specified SRQ. 2863 * @srq: The SRQ to query. 2864 * @srq_attr: The attributes of the specified SRQ. 2865 */ 2866 int ib_query_srq(struct ib_srq *srq, 2867 struct ib_srq_attr *srq_attr); 2868 2869 /** 2870 * ib_destroy_srq - Destroys the specified SRQ. 2871 * @srq: The SRQ to destroy. 2872 */ 2873 int ib_destroy_srq(struct ib_srq *srq); 2874 2875 /** 2876 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 2877 * @srq: The SRQ to post the work request on. 2878 * @recv_wr: A list of work requests to post on the receive queue. 2879 * @bad_recv_wr: On an immediate failure, this parameter will reference 2880 * the work request that failed to be posted on the QP. 2881 */ 2882 static inline int ib_post_srq_recv(struct ib_srq *srq, 2883 struct ib_recv_wr *recv_wr, 2884 struct ib_recv_wr **bad_recv_wr) 2885 { 2886 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 2887 } 2888 2889 /** 2890 * ib_create_qp - Creates a QP associated with the specified protection 2891 * domain. 2892 * @pd: The protection domain associated with the QP. 2893 * @qp_init_attr: A list of initial attributes required to create the 2894 * QP. If QP creation succeeds, then the attributes are updated to 2895 * the actual capabilities of the created QP. 2896 */ 2897 struct ib_qp *ib_create_qp(struct ib_pd *pd, 2898 struct ib_qp_init_attr *qp_init_attr); 2899 2900 /** 2901 * ib_modify_qp - Modifies the attributes for the specified QP and then 2902 * transitions the QP to the given state. 2903 * @qp: The QP to modify. 2904 * @qp_attr: On input, specifies the QP attributes to modify. On output, 2905 * the current values of selected QP attributes are returned. 2906 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 2907 * are being modified. 2908 */ 2909 int ib_modify_qp(struct ib_qp *qp, 2910 struct ib_qp_attr *qp_attr, 2911 int qp_attr_mask); 2912 2913 /** 2914 * ib_query_qp - Returns the attribute list and current values for the 2915 * specified QP. 2916 * @qp: The QP to query. 2917 * @qp_attr: The attributes of the specified QP. 2918 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 2919 * @qp_init_attr: Additional attributes of the selected QP. 2920 * 2921 * The qp_attr_mask may be used to limit the query to gathering only the 2922 * selected attributes. 2923 */ 2924 int ib_query_qp(struct ib_qp *qp, 2925 struct ib_qp_attr *qp_attr, 2926 int qp_attr_mask, 2927 struct ib_qp_init_attr *qp_init_attr); 2928 2929 /** 2930 * ib_destroy_qp - Destroys the specified QP. 2931 * @qp: The QP to destroy. 2932 */ 2933 int ib_destroy_qp(struct ib_qp *qp); 2934 2935 /** 2936 * ib_open_qp - Obtain a reference to an existing sharable QP. 2937 * @xrcd - XRC domain 2938 * @qp_open_attr: Attributes identifying the QP to open. 2939 * 2940 * Returns a reference to a sharable QP. 2941 */ 2942 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 2943 struct ib_qp_open_attr *qp_open_attr); 2944 2945 /** 2946 * ib_close_qp - Release an external reference to a QP. 2947 * @qp: The QP handle to release 2948 * 2949 * The opened QP handle is released by the caller. The underlying 2950 * shared QP is not destroyed until all internal references are released. 2951 */ 2952 int ib_close_qp(struct ib_qp *qp); 2953 2954 /** 2955 * ib_post_send - Posts a list of work requests to the send queue of 2956 * the specified QP. 2957 * @qp: The QP to post the work request on. 2958 * @send_wr: A list of work requests to post on the send queue. 2959 * @bad_send_wr: On an immediate failure, this parameter will reference 2960 * the work request that failed to be posted on the QP. 2961 * 2962 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 2963 * error is returned, the QP state shall not be affected, 2964 * ib_post_send() will return an immediate error after queueing any 2965 * earlier work requests in the list. 2966 */ 2967 static inline int ib_post_send(struct ib_qp *qp, 2968 struct ib_send_wr *send_wr, 2969 struct ib_send_wr **bad_send_wr) 2970 { 2971 return qp->device->post_send(qp, send_wr, bad_send_wr); 2972 } 2973 2974 /** 2975 * ib_post_recv - Posts a list of work requests to the receive queue of 2976 * the specified QP. 2977 * @qp: The QP to post the work request on. 2978 * @recv_wr: A list of work requests to post on the receive queue. 2979 * @bad_recv_wr: On an immediate failure, this parameter will reference 2980 * the work request that failed to be posted on the QP. 2981 */ 2982 static inline int ib_post_recv(struct ib_qp *qp, 2983 struct ib_recv_wr *recv_wr, 2984 struct ib_recv_wr **bad_recv_wr) 2985 { 2986 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2987 } 2988 2989 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 2990 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); 2991 void ib_free_cq(struct ib_cq *cq); 2992 int ib_process_cq_direct(struct ib_cq *cq, int budget); 2993 2994 /** 2995 * ib_create_cq - Creates a CQ on the specified device. 2996 * @device: The device on which to create the CQ. 2997 * @comp_handler: A user-specified callback that is invoked when a 2998 * completion event occurs on the CQ. 2999 * @event_handler: A user-specified callback that is invoked when an 3000 * asynchronous event not associated with a completion occurs on the CQ. 3001 * @cq_context: Context associated with the CQ returned to the user via 3002 * the associated completion and event handlers. 3003 * @cq_attr: The attributes the CQ should be created upon. 3004 * 3005 * Users can examine the cq structure to determine the actual CQ size. 3006 */ 3007 struct ib_cq *ib_create_cq(struct ib_device *device, 3008 ib_comp_handler comp_handler, 3009 void (*event_handler)(struct ib_event *, void *), 3010 void *cq_context, 3011 const struct ib_cq_init_attr *cq_attr); 3012 3013 /** 3014 * ib_resize_cq - Modifies the capacity of the CQ. 3015 * @cq: The CQ to resize. 3016 * @cqe: The minimum size of the CQ. 3017 * 3018 * Users can examine the cq structure to determine the actual CQ size. 3019 */ 3020 int ib_resize_cq(struct ib_cq *cq, int cqe); 3021 3022 /** 3023 * ib_modify_cq - Modifies moderation params of the CQ 3024 * @cq: The CQ to modify. 3025 * @cq_count: number of CQEs that will trigger an event 3026 * @cq_period: max period of time in usec before triggering an event 3027 * 3028 */ 3029 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 3030 3031 /** 3032 * ib_destroy_cq - Destroys the specified CQ. 3033 * @cq: The CQ to destroy. 3034 */ 3035 int ib_destroy_cq(struct ib_cq *cq); 3036 3037 /** 3038 * ib_poll_cq - poll a CQ for completion(s) 3039 * @cq:the CQ being polled 3040 * @num_entries:maximum number of completions to return 3041 * @wc:array of at least @num_entries &struct ib_wc where completions 3042 * will be returned 3043 * 3044 * Poll a CQ for (possibly multiple) completions. If the return value 3045 * is < 0, an error occurred. If the return value is >= 0, it is the 3046 * number of completions returned. If the return value is 3047 * non-negative and < num_entries, then the CQ was emptied. 3048 */ 3049 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 3050 struct ib_wc *wc) 3051 { 3052 return cq->device->poll_cq(cq, num_entries, wc); 3053 } 3054 3055 /** 3056 * ib_peek_cq - Returns the number of unreaped completions currently 3057 * on the specified CQ. 3058 * @cq: The CQ to peek. 3059 * @wc_cnt: A minimum number of unreaped completions to check for. 3060 * 3061 * If the number of unreaped completions is greater than or equal to wc_cnt, 3062 * this function returns wc_cnt, otherwise, it returns the actual number of 3063 * unreaped completions. 3064 */ 3065 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 3066 3067 /** 3068 * ib_req_notify_cq - Request completion notification on a CQ. 3069 * @cq: The CQ to generate an event for. 3070 * @flags: 3071 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 3072 * to request an event on the next solicited event or next work 3073 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 3074 * may also be |ed in to request a hint about missed events, as 3075 * described below. 3076 * 3077 * Return Value: 3078 * < 0 means an error occurred while requesting notification 3079 * == 0 means notification was requested successfully, and if 3080 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 3081 * were missed and it is safe to wait for another event. In 3082 * this case is it guaranteed that any work completions added 3083 * to the CQ since the last CQ poll will trigger a completion 3084 * notification event. 3085 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 3086 * in. It means that the consumer must poll the CQ again to 3087 * make sure it is empty to avoid missing an event because of a 3088 * race between requesting notification and an entry being 3089 * added to the CQ. This return value means it is possible 3090 * (but not guaranteed) that a work completion has been added 3091 * to the CQ since the last poll without triggering a 3092 * completion notification event. 3093 */ 3094 static inline int ib_req_notify_cq(struct ib_cq *cq, 3095 enum ib_cq_notify_flags flags) 3096 { 3097 return cq->device->req_notify_cq(cq, flags); 3098 } 3099 3100 /** 3101 * ib_req_ncomp_notif - Request completion notification when there are 3102 * at least the specified number of unreaped completions on the CQ. 3103 * @cq: The CQ to generate an event for. 3104 * @wc_cnt: The number of unreaped completions that should be on the 3105 * CQ before an event is generated. 3106 */ 3107 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3108 { 3109 return cq->device->req_ncomp_notif ? 3110 cq->device->req_ncomp_notif(cq, wc_cnt) : 3111 -ENOSYS; 3112 } 3113 3114 /** 3115 * ib_dma_mapping_error - check a DMA addr for error 3116 * @dev: The device for which the dma_addr was created 3117 * @dma_addr: The DMA address to check 3118 */ 3119 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3120 { 3121 return dma_mapping_error(dev->dma_device, dma_addr); 3122 } 3123 3124 /** 3125 * ib_dma_map_single - Map a kernel virtual address to DMA address 3126 * @dev: The device for which the dma_addr is to be created 3127 * @cpu_addr: The kernel virtual address 3128 * @size: The size of the region in bytes 3129 * @direction: The direction of the DMA 3130 */ 3131 static inline u64 ib_dma_map_single(struct ib_device *dev, 3132 void *cpu_addr, size_t size, 3133 enum dma_data_direction direction) 3134 { 3135 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3136 } 3137 3138 /** 3139 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3140 * @dev: The device for which the DMA address was created 3141 * @addr: The DMA address 3142 * @size: The size of the region in bytes 3143 * @direction: The direction of the DMA 3144 */ 3145 static inline void ib_dma_unmap_single(struct ib_device *dev, 3146 u64 addr, size_t size, 3147 enum dma_data_direction direction) 3148 { 3149 dma_unmap_single(dev->dma_device, addr, size, direction); 3150 } 3151 3152 /** 3153 * ib_dma_map_page - Map a physical page to DMA address 3154 * @dev: The device for which the dma_addr is to be created 3155 * @page: The page to be mapped 3156 * @offset: The offset within the page 3157 * @size: The size of the region in bytes 3158 * @direction: The direction of the DMA 3159 */ 3160 static inline u64 ib_dma_map_page(struct ib_device *dev, 3161 struct page *page, 3162 unsigned long offset, 3163 size_t size, 3164 enum dma_data_direction direction) 3165 { 3166 return dma_map_page(dev->dma_device, page, offset, size, direction); 3167 } 3168 3169 /** 3170 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3171 * @dev: The device for which the DMA address was created 3172 * @addr: The DMA address 3173 * @size: The size of the region in bytes 3174 * @direction: The direction of the DMA 3175 */ 3176 static inline void ib_dma_unmap_page(struct ib_device *dev, 3177 u64 addr, size_t size, 3178 enum dma_data_direction direction) 3179 { 3180 dma_unmap_page(dev->dma_device, addr, size, direction); 3181 } 3182 3183 /** 3184 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3185 * @dev: The device for which the DMA addresses are to be created 3186 * @sg: The array of scatter/gather entries 3187 * @nents: The number of scatter/gather entries 3188 * @direction: The direction of the DMA 3189 */ 3190 static inline int ib_dma_map_sg(struct ib_device *dev, 3191 struct scatterlist *sg, int nents, 3192 enum dma_data_direction direction) 3193 { 3194 return dma_map_sg(dev->dma_device, sg, nents, direction); 3195 } 3196 3197 /** 3198 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3199 * @dev: The device for which the DMA addresses were created 3200 * @sg: The array of scatter/gather entries 3201 * @nents: The number of scatter/gather entries 3202 * @direction: The direction of the DMA 3203 */ 3204 static inline void ib_dma_unmap_sg(struct ib_device *dev, 3205 struct scatterlist *sg, int nents, 3206 enum dma_data_direction direction) 3207 { 3208 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3209 } 3210 3211 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3212 struct scatterlist *sg, int nents, 3213 enum dma_data_direction direction, 3214 unsigned long dma_attrs) 3215 { 3216 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3217 dma_attrs); 3218 } 3219 3220 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3221 struct scatterlist *sg, int nents, 3222 enum dma_data_direction direction, 3223 unsigned long dma_attrs) 3224 { 3225 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); 3226 } 3227 /** 3228 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3229 * @dev: The device for which the DMA addresses were created 3230 * @sg: The scatter/gather entry 3231 * 3232 * Note: this function is obsolete. To do: change all occurrences of 3233 * ib_sg_dma_address() into sg_dma_address(). 3234 */ 3235 static inline u64 ib_sg_dma_address(struct ib_device *dev, 3236 struct scatterlist *sg) 3237 { 3238 return sg_dma_address(sg); 3239 } 3240 3241 /** 3242 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 3243 * @dev: The device for which the DMA addresses were created 3244 * @sg: The scatter/gather entry 3245 * 3246 * Note: this function is obsolete. To do: change all occurrences of 3247 * ib_sg_dma_len() into sg_dma_len(). 3248 */ 3249 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 3250 struct scatterlist *sg) 3251 { 3252 return sg_dma_len(sg); 3253 } 3254 3255 /** 3256 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3257 * @dev: The device for which the DMA address was created 3258 * @addr: The DMA address 3259 * @size: The size of the region in bytes 3260 * @dir: The direction of the DMA 3261 */ 3262 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3263 u64 addr, 3264 size_t size, 3265 enum dma_data_direction dir) 3266 { 3267 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3268 } 3269 3270 /** 3271 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3272 * @dev: The device for which the DMA address was created 3273 * @addr: The DMA address 3274 * @size: The size of the region in bytes 3275 * @dir: The direction of the DMA 3276 */ 3277 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3278 u64 addr, 3279 size_t size, 3280 enum dma_data_direction dir) 3281 { 3282 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3283 } 3284 3285 /** 3286 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3287 * @dev: The device for which the DMA address is requested 3288 * @size: The size of the region to allocate in bytes 3289 * @dma_handle: A pointer for returning the DMA address of the region 3290 * @flag: memory allocator flags 3291 */ 3292 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3293 size_t size, 3294 dma_addr_t *dma_handle, 3295 gfp_t flag) 3296 { 3297 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 3298 } 3299 3300 /** 3301 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3302 * @dev: The device for which the DMA addresses were allocated 3303 * @size: The size of the region 3304 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3305 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3306 */ 3307 static inline void ib_dma_free_coherent(struct ib_device *dev, 3308 size_t size, void *cpu_addr, 3309 dma_addr_t dma_handle) 3310 { 3311 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3312 } 3313 3314 /** 3315 * ib_dereg_mr - Deregisters a memory region and removes it from the 3316 * HCA translation table. 3317 * @mr: The memory region to deregister. 3318 * 3319 * This function can fail, if the memory region has memory windows bound to it. 3320 */ 3321 int ib_dereg_mr(struct ib_mr *mr); 3322 3323 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3324 enum ib_mr_type mr_type, 3325 u32 max_num_sg); 3326 3327 /** 3328 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3329 * R_Key and L_Key. 3330 * @mr - struct ib_mr pointer to be updated. 3331 * @newkey - new key to be used. 3332 */ 3333 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3334 { 3335 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3336 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3337 } 3338 3339 /** 3340 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3341 * for calculating a new rkey for type 2 memory windows. 3342 * @rkey - the rkey to increment. 3343 */ 3344 static inline u32 ib_inc_rkey(u32 rkey) 3345 { 3346 const u32 mask = 0x000000ff; 3347 return ((rkey + 1) & mask) | (rkey & ~mask); 3348 } 3349 3350 /** 3351 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3352 * @pd: The protection domain associated with the unmapped region. 3353 * @mr_access_flags: Specifies the memory access rights. 3354 * @fmr_attr: Attributes of the unmapped region. 3355 * 3356 * A fast memory region must be mapped before it can be used as part of 3357 * a work request. 3358 */ 3359 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3360 int mr_access_flags, 3361 struct ib_fmr_attr *fmr_attr); 3362 3363 /** 3364 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3365 * @fmr: The fast memory region to associate with the pages. 3366 * @page_list: An array of physical pages to map to the fast memory region. 3367 * @list_len: The number of pages in page_list. 3368 * @iova: The I/O virtual address to use with the mapped region. 3369 */ 3370 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3371 u64 *page_list, int list_len, 3372 u64 iova) 3373 { 3374 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3375 } 3376 3377 /** 3378 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3379 * @fmr_list: A linked list of fast memory regions to unmap. 3380 */ 3381 int ib_unmap_fmr(struct list_head *fmr_list); 3382 3383 /** 3384 * ib_dealloc_fmr - Deallocates a fast memory region. 3385 * @fmr: The fast memory region to deallocate. 3386 */ 3387 int ib_dealloc_fmr(struct ib_fmr *fmr); 3388 3389 /** 3390 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3391 * @qp: QP to attach to the multicast group. The QP must be type 3392 * IB_QPT_UD. 3393 * @gid: Multicast group GID. 3394 * @lid: Multicast group LID in host byte order. 3395 * 3396 * In order to send and receive multicast packets, subnet 3397 * administration must have created the multicast group and configured 3398 * the fabric appropriately. The port associated with the specified 3399 * QP must also be a member of the multicast group. 3400 */ 3401 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3402 3403 /** 3404 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3405 * @qp: QP to detach from the multicast group. 3406 * @gid: Multicast group GID. 3407 * @lid: Multicast group LID in host byte order. 3408 */ 3409 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3410 3411 /** 3412 * ib_alloc_xrcd - Allocates an XRC domain. 3413 * @device: The device on which to allocate the XRC domain. 3414 */ 3415 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 3416 3417 /** 3418 * ib_dealloc_xrcd - Deallocates an XRC domain. 3419 * @xrcd: The XRC domain to deallocate. 3420 */ 3421 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3422 3423 struct ib_flow *ib_create_flow(struct ib_qp *qp, 3424 struct ib_flow_attr *flow_attr, int domain); 3425 int ib_destroy_flow(struct ib_flow *flow_id); 3426 3427 static inline int ib_check_mr_access(int flags) 3428 { 3429 /* 3430 * Local write permission is required if remote write or 3431 * remote atomic permission is also requested. 3432 */ 3433 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3434 !(flags & IB_ACCESS_LOCAL_WRITE)) 3435 return -EINVAL; 3436 3437 return 0; 3438 } 3439 3440 /** 3441 * ib_check_mr_status: lightweight check of MR status. 3442 * This routine may provide status checks on a selected 3443 * ib_mr. first use is for signature status check. 3444 * 3445 * @mr: A memory region. 3446 * @check_mask: Bitmask of which checks to perform from 3447 * ib_mr_status_check enumeration. 3448 * @mr_status: The container of relevant status checks. 3449 * failed checks will be indicated in the status bitmask 3450 * and the relevant info shall be in the error item. 3451 */ 3452 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3453 struct ib_mr_status *mr_status); 3454 3455 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3456 u16 pkey, const union ib_gid *gid, 3457 const struct sockaddr *addr); 3458 struct ib_wq *ib_create_wq(struct ib_pd *pd, 3459 struct ib_wq_init_attr *init_attr); 3460 int ib_destroy_wq(struct ib_wq *wq); 3461 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3462 u32 wq_attr_mask); 3463 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3464 struct ib_rwq_ind_table_init_attr* 3465 wq_ind_table_init_attr); 3466 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3467 3468 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3469 unsigned int *sg_offset, unsigned int page_size); 3470 3471 static inline int 3472 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3473 unsigned int *sg_offset, unsigned int page_size) 3474 { 3475 int n; 3476 3477 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 3478 mr->iova = 0; 3479 3480 return n; 3481 } 3482 3483 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 3484 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3485 3486 void ib_drain_rq(struct ib_qp *qp); 3487 void ib_drain_sq(struct ib_qp *qp); 3488 void ib_drain_qp(struct ib_qp *qp); 3489 3490 int ib_resolve_eth_dmac(struct ib_device *device, 3491 struct rdma_ah_attr *ah_attr); 3492 3493 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 3494 { 3495 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 3496 return attr->roce.dmac; 3497 return NULL; 3498 } 3499 3500 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid) 3501 { 3502 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 3503 attr->ib.dlid = (u16)dlid; 3504 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3505 attr->opa.dlid = dlid; 3506 } 3507 3508 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr) 3509 { 3510 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 3511 return attr->ib.dlid; 3512 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3513 return attr->opa.dlid; 3514 return 0; 3515 } 3516 3517 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl) 3518 { 3519 attr->sl = sl; 3520 } 3521 3522 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr) 3523 { 3524 return attr->sl; 3525 } 3526 3527 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr, 3528 u8 src_path_bits) 3529 { 3530 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 3531 attr->ib.src_path_bits = src_path_bits; 3532 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3533 attr->opa.src_path_bits = src_path_bits; 3534 } 3535 3536 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) 3537 { 3538 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 3539 return attr->ib.src_path_bits; 3540 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 3541 return attr->opa.src_path_bits; 3542 return 0; 3543 } 3544 3545 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) 3546 { 3547 attr->port_num = port_num; 3548 } 3549 3550 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 3551 { 3552 return attr->port_num; 3553 } 3554 3555 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr, 3556 u8 static_rate) 3557 { 3558 attr->static_rate = static_rate; 3559 } 3560 3561 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr) 3562 { 3563 return attr->static_rate; 3564 } 3565 3566 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr, 3567 enum ib_ah_flags flag) 3568 { 3569 attr->ah_flags = flag; 3570 } 3571 3572 static inline enum ib_ah_flags 3573 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr) 3574 { 3575 return attr->ah_flags; 3576 } 3577 3578 static inline const struct ib_global_route 3579 *rdma_ah_read_grh(const struct rdma_ah_attr *attr) 3580 { 3581 return &attr->grh; 3582 } 3583 3584 /*To retrieve and modify the grh */ 3585 static inline struct ib_global_route 3586 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr) 3587 { 3588 return &attr->grh; 3589 } 3590 3591 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid) 3592 { 3593 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 3594 3595 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); 3596 } 3597 3598 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr, 3599 __be64 prefix) 3600 { 3601 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 3602 3603 grh->dgid.global.subnet_prefix = prefix; 3604 } 3605 3606 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr, 3607 __be64 if_id) 3608 { 3609 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 3610 3611 grh->dgid.global.interface_id = if_id; 3612 } 3613 3614 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, 3615 union ib_gid *dgid, u32 flow_label, 3616 u8 sgid_index, u8 hop_limit, 3617 u8 traffic_class) 3618 { 3619 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 3620 3621 attr->ah_flags = IB_AH_GRH; 3622 if (dgid) 3623 grh->dgid = *dgid; 3624 grh->flow_label = flow_label; 3625 grh->sgid_index = sgid_index; 3626 grh->hop_limit = hop_limit; 3627 grh->traffic_class = traffic_class; 3628 } 3629 3630 /*Get AH type */ 3631 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 3632 u32 port_num) 3633 { 3634 if ((rdma_protocol_roce(dev, port_num)) || 3635 (rdma_protocol_iwarp(dev, port_num))) 3636 return RDMA_AH_ATTR_TYPE_ROCE; 3637 else if ((rdma_protocol_ib(dev, port_num)) && 3638 (rdma_cap_opa_ah(dev, port_num))) 3639 return RDMA_AH_ATTR_TYPE_OPA; 3640 else 3641 return RDMA_AH_ATTR_TYPE_IB; 3642 } 3643 #endif /* IB_VERBS_H */ 3644