1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 5 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 6 * Copyright (c) 2004 Intel Corporation. All rights reserved. 7 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 8 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 10 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 11 * 12 * This software is available to you under a choice of one of two 13 * licenses. You may choose to be licensed under the terms of the GNU 14 * General Public License (GPL) Version 2, available from the file 15 * COPYING in the main directory of this source tree, or the 16 * OpenIB.org BSD license below: 17 * 18 * Redistribution and use in source and binary forms, with or 19 * without modification, are permitted provided that the following 20 * conditions are met: 21 * 22 * - Redistributions of source code must retain the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer. 25 * 26 * - Redistributions in binary form must reproduce the above 27 * copyright notice, this list of conditions and the following 28 * disclaimer in the documentation and/or other materials 29 * provided with the distribution. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 38 * SOFTWARE. 39 * 40 * $FreeBSD$ 41 */ 42 43 #if !defined(IB_VERBS_H) 44 #define IB_VERBS_H 45 46 #include <linux/types.h> 47 #include <linux/device.h> 48 #include <linux/mm.h> 49 #include <linux/dma-mapping.h> 50 #include <linux/kref.h> 51 #include <linux/list.h> 52 #include <linux/rwsem.h> 53 #include <linux/scatterlist.h> 54 #include <linux/workqueue.h> 55 #include <linux/socket.h> 56 #include <linux/if_ether.h> 57 #include <net/ipv6.h> 58 #include <net/ip.h> 59 #include <linux/string.h> 60 #include <linux/slab.h> 61 #include <linux/rcupdate.h> 62 #include <linux/netdevice.h> 63 #include <netinet/ip.h> 64 65 #include <asm/atomic.h> 66 #include <asm/uaccess.h> 67 68 struct ifla_vf_info; 69 struct ifla_vf_stats; 70 71 extern struct workqueue_struct *ib_wq; 72 extern struct workqueue_struct *ib_comp_wq; 73 74 union ib_gid { 75 u8 raw[16]; 76 struct { 77 __be64 subnet_prefix; 78 __be64 interface_id; 79 } global; 80 }; 81 82 extern union ib_gid zgid; 83 84 enum ib_gid_type { 85 /* If link layer is Ethernet, this is RoCE V1 */ 86 IB_GID_TYPE_IB = 0, 87 IB_GID_TYPE_ROCE = 0, 88 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 89 IB_GID_TYPE_SIZE 90 }; 91 92 #define ROCE_V2_UDP_DPORT 4791 93 struct ib_gid_attr { 94 enum ib_gid_type gid_type; 95 struct net_device *ndev; 96 }; 97 98 enum rdma_node_type { 99 /* IB values map to NodeInfo:NodeType. */ 100 RDMA_NODE_IB_CA = 1, 101 RDMA_NODE_IB_SWITCH, 102 RDMA_NODE_IB_ROUTER, 103 RDMA_NODE_RNIC, 104 RDMA_NODE_USNIC, 105 RDMA_NODE_USNIC_UDP, 106 }; 107 108 enum { 109 /* set the local administered indication */ 110 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 111 }; 112 113 enum rdma_transport_type { 114 RDMA_TRANSPORT_IB, 115 RDMA_TRANSPORT_IWARP, 116 RDMA_TRANSPORT_USNIC, 117 RDMA_TRANSPORT_USNIC_UDP 118 }; 119 120 enum rdma_protocol_type { 121 RDMA_PROTOCOL_IB, 122 RDMA_PROTOCOL_IBOE, 123 RDMA_PROTOCOL_IWARP, 124 RDMA_PROTOCOL_USNIC_UDP 125 }; 126 127 __attribute_const__ enum rdma_transport_type 128 rdma_node_get_transport(enum rdma_node_type node_type); 129 130 enum rdma_network_type { 131 RDMA_NETWORK_IB, 132 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 133 RDMA_NETWORK_IPV4, 134 RDMA_NETWORK_IPV6 135 }; 136 137 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 138 { 139 if (network_type == RDMA_NETWORK_IPV4 || 140 network_type == RDMA_NETWORK_IPV6) 141 return IB_GID_TYPE_ROCE_UDP_ENCAP; 142 143 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 144 return IB_GID_TYPE_IB; 145 } 146 147 static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type, 148 union ib_gid *gid) 149 { 150 if (gid_type == IB_GID_TYPE_IB) 151 return RDMA_NETWORK_IB; 152 153 if (ipv6_addr_v4mapped((struct in6_addr *)gid)) 154 return RDMA_NETWORK_IPV4; 155 else 156 return RDMA_NETWORK_IPV6; 157 } 158 159 enum rdma_link_layer { 160 IB_LINK_LAYER_UNSPECIFIED, 161 IB_LINK_LAYER_INFINIBAND, 162 IB_LINK_LAYER_ETHERNET, 163 }; 164 165 enum ib_device_cap_flags { 166 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 167 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 168 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 169 IB_DEVICE_RAW_MULTI = (1 << 3), 170 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 171 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 172 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 173 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 174 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 175 IB_DEVICE_INIT_TYPE = (1 << 9), 176 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 177 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 178 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 179 IB_DEVICE_SRQ_RESIZE = (1 << 13), 180 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 181 182 /* 183 * This device supports a per-device lkey or stag that can be 184 * used without performing a memory registration for the local 185 * memory. Note that ULPs should never check this flag, but 186 * instead of use the local_dma_lkey flag in the ib_pd structure, 187 * which will always contain a usable lkey. 188 */ 189 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 190 IB_DEVICE_RESERVED /* old SEND_W_INV */ = (1 << 16), 191 IB_DEVICE_MEM_WINDOW = (1 << 17), 192 /* 193 * Devices should set IB_DEVICE_UD_IP_SUM if they support 194 * insertion of UDP and TCP checksum on outgoing UD IPoIB 195 * messages and can verify the validity of checksum for 196 * incoming messages. Setting this flag implies that the 197 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 198 */ 199 IB_DEVICE_UD_IP_CSUM = (1 << 18), 200 IB_DEVICE_UD_TSO = (1 << 19), 201 IB_DEVICE_XRC = (1 << 20), 202 203 /* 204 * This device supports the IB "base memory management extension", 205 * which includes support for fast registrations (IB_WR_REG_MR, 206 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 207 * also be set by any iWarp device which must support FRs to comply 208 * to the iWarp verbs spec. iWarp devices also support the 209 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 210 * stag. 211 */ 212 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 213 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 214 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 215 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 216 IB_DEVICE_RC_IP_CSUM = (1 << 25), 217 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 218 /* 219 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 220 * support execution of WQEs that involve synchronization 221 * of I/O operations with single completion queue managed 222 * by hardware. 223 */ 224 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 225 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 226 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), 227 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 228 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 229 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 230 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 231 }; 232 233 enum ib_signature_prot_cap { 234 IB_PROT_T10DIF_TYPE_1 = 1, 235 IB_PROT_T10DIF_TYPE_2 = 1 << 1, 236 IB_PROT_T10DIF_TYPE_3 = 1 << 2, 237 }; 238 239 enum ib_signature_guard_cap { 240 IB_GUARD_T10DIF_CRC = 1, 241 IB_GUARD_T10DIF_CSUM = 1 << 1, 242 }; 243 244 enum ib_atomic_cap { 245 IB_ATOMIC_NONE, 246 IB_ATOMIC_HCA, 247 IB_ATOMIC_GLOB 248 }; 249 250 enum ib_odp_general_cap_bits { 251 IB_ODP_SUPPORT = 1 << 0, 252 }; 253 254 enum ib_odp_transport_cap_bits { 255 IB_ODP_SUPPORT_SEND = 1 << 0, 256 IB_ODP_SUPPORT_RECV = 1 << 1, 257 IB_ODP_SUPPORT_WRITE = 1 << 2, 258 IB_ODP_SUPPORT_READ = 1 << 3, 259 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 260 }; 261 262 struct ib_odp_caps { 263 uint64_t general_caps; 264 struct { 265 uint32_t rc_odp_caps; 266 uint32_t uc_odp_caps; 267 uint32_t ud_odp_caps; 268 } per_transport_caps; 269 }; 270 271 struct ib_rss_caps { 272 /* Corresponding bit will be set if qp type from 273 * 'enum ib_qp_type' is supported, e.g. 274 * supported_qpts |= 1 << IB_QPT_UD 275 */ 276 u32 supported_qpts; 277 u32 max_rwq_indirection_tables; 278 u32 max_rwq_indirection_table_size; 279 }; 280 281 enum ib_cq_creation_flags { 282 IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0, 283 IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1, 284 }; 285 286 struct ib_cq_init_attr { 287 unsigned int cqe; 288 int comp_vector; 289 u32 flags; 290 }; 291 292 struct ib_device_attr { 293 u64 fw_ver; 294 __be64 sys_image_guid; 295 u64 max_mr_size; 296 u64 page_size_cap; 297 u32 vendor_id; 298 u32 vendor_part_id; 299 u32 hw_ver; 300 int max_qp; 301 int max_qp_wr; 302 u64 device_cap_flags; 303 int max_sge; 304 int max_sge_rd; 305 int max_cq; 306 int max_cqe; 307 int max_mr; 308 int max_pd; 309 int max_qp_rd_atom; 310 int max_ee_rd_atom; 311 int max_res_rd_atom; 312 int max_qp_init_rd_atom; 313 int max_ee_init_rd_atom; 314 enum ib_atomic_cap atomic_cap; 315 enum ib_atomic_cap masked_atomic_cap; 316 int max_ee; 317 int max_rdd; 318 int max_mw; 319 int max_raw_ipv6_qp; 320 int max_raw_ethy_qp; 321 int max_mcast_grp; 322 int max_mcast_qp_attach; 323 int max_total_mcast_qp_attach; 324 int max_ah; 325 int max_fmr; 326 int max_map_per_fmr; 327 int max_srq; 328 int max_srq_wr; 329 int max_srq_sge; 330 unsigned int max_fast_reg_page_list_len; 331 u16 max_pkeys; 332 u8 local_ca_ack_delay; 333 int sig_prot_cap; 334 int sig_guard_cap; 335 struct ib_odp_caps odp_caps; 336 uint64_t timestamp_mask; 337 uint64_t hca_core_clock; /* in KHZ */ 338 struct ib_rss_caps rss_caps; 339 u32 max_wq_type_rq; 340 }; 341 342 enum ib_mtu { 343 IB_MTU_256 = 1, 344 IB_MTU_512 = 2, 345 IB_MTU_1024 = 3, 346 IB_MTU_2048 = 4, 347 IB_MTU_4096 = 5 348 }; 349 350 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 351 { 352 switch (mtu) { 353 case IB_MTU_256: return 256; 354 case IB_MTU_512: return 512; 355 case IB_MTU_1024: return 1024; 356 case IB_MTU_2048: return 2048; 357 case IB_MTU_4096: return 4096; 358 default: return -1; 359 } 360 } 361 362 enum ib_port_state { 363 IB_PORT_NOP = 0, 364 IB_PORT_DOWN = 1, 365 IB_PORT_INIT = 2, 366 IB_PORT_ARMED = 3, 367 IB_PORT_ACTIVE = 4, 368 IB_PORT_ACTIVE_DEFER = 5, 369 IB_PORT_DUMMY = -1, /* force enum signed */ 370 }; 371 372 enum ib_port_cap_flags { 373 IB_PORT_SM = 1 << 1, 374 IB_PORT_NOTICE_SUP = 1 << 2, 375 IB_PORT_TRAP_SUP = 1 << 3, 376 IB_PORT_OPT_IPD_SUP = 1 << 4, 377 IB_PORT_AUTO_MIGR_SUP = 1 << 5, 378 IB_PORT_SL_MAP_SUP = 1 << 6, 379 IB_PORT_MKEY_NVRAM = 1 << 7, 380 IB_PORT_PKEY_NVRAM = 1 << 8, 381 IB_PORT_LED_INFO_SUP = 1 << 9, 382 IB_PORT_SM_DISABLED = 1 << 10, 383 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 384 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 385 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14, 386 IB_PORT_CM_SUP = 1 << 16, 387 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 388 IB_PORT_REINIT_SUP = 1 << 18, 389 IB_PORT_DEVICE_MGMT_SUP = 1 << 19, 390 IB_PORT_VENDOR_CLASS_SUP = 1 << 20, 391 IB_PORT_DR_NOTICE_SUP = 1 << 21, 392 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, 393 IB_PORT_BOOT_MGMT_SUP = 1 << 23, 394 IB_PORT_LINK_LATENCY_SUP = 1 << 24, 395 IB_PORT_CLIENT_REG_SUP = 1 << 25, 396 IB_PORT_IP_BASED_GIDS = 1 << 26, 397 }; 398 399 enum ib_port_width { 400 IB_WIDTH_1X = 1, 401 IB_WIDTH_4X = 2, 402 IB_WIDTH_8X = 4, 403 IB_WIDTH_12X = 8 404 }; 405 406 static inline int ib_width_enum_to_int(enum ib_port_width width) 407 { 408 switch (width) { 409 case IB_WIDTH_1X: return 1; 410 case IB_WIDTH_4X: return 4; 411 case IB_WIDTH_8X: return 8; 412 case IB_WIDTH_12X: return 12; 413 default: return -1; 414 } 415 } 416 417 enum ib_port_speed { 418 IB_SPEED_SDR = 1, 419 IB_SPEED_DDR = 2, 420 IB_SPEED_QDR = 4, 421 IB_SPEED_FDR10 = 8, 422 IB_SPEED_FDR = 16, 423 IB_SPEED_EDR = 32 424 }; 425 426 /** 427 * struct rdma_hw_stats 428 * @timestamp - Used by the core code to track when the last update was 429 * @lifespan - Used by the core code to determine how old the counters 430 * should be before being updated again. Stored in jiffies, defaults 431 * to 10 milliseconds, drivers can override the default be specifying 432 * their own value during their allocation routine. 433 * @name - Array of pointers to static names used for the counters in 434 * directory. 435 * @num_counters - How many hardware counters there are. If name is 436 * shorter than this number, a kernel oops will result. Driver authors 437 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 438 * in their code to prevent this. 439 * @value - Array of u64 counters that are accessed by the sysfs code and 440 * filled in by the drivers get_stats routine 441 */ 442 struct rdma_hw_stats { 443 unsigned long timestamp; 444 unsigned long lifespan; 445 const char * const *names; 446 int num_counters; 447 u64 value[]; 448 }; 449 450 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 451 /** 452 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 453 * for drivers. 454 * @names - Array of static const char * 455 * @num_counters - How many elements in array 456 * @lifespan - How many milliseconds between updates 457 */ 458 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 459 const char * const *names, int num_counters, 460 unsigned long lifespan) 461 { 462 struct rdma_hw_stats *stats; 463 464 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 465 GFP_KERNEL); 466 if (!stats) 467 return NULL; 468 stats->names = names; 469 stats->num_counters = num_counters; 470 stats->lifespan = msecs_to_jiffies(lifespan); 471 472 return stats; 473 } 474 475 476 /* Define bits for the various functionality this port needs to be supported by 477 * the core. 478 */ 479 /* Management 0x00000FFF */ 480 #define RDMA_CORE_CAP_IB_MAD 0x00000001 481 #define RDMA_CORE_CAP_IB_SMI 0x00000002 482 #define RDMA_CORE_CAP_IB_CM 0x00000004 483 #define RDMA_CORE_CAP_IW_CM 0x00000008 484 #define RDMA_CORE_CAP_IB_SA 0x00000010 485 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 486 487 /* Address format 0x000FF000 */ 488 #define RDMA_CORE_CAP_AF_IB 0x00001000 489 #define RDMA_CORE_CAP_ETH_AH 0x00002000 490 491 /* Protocol 0xFFF00000 */ 492 #define RDMA_CORE_CAP_PROT_IB 0x00100000 493 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 494 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 495 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 496 497 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 498 | RDMA_CORE_CAP_IB_MAD \ 499 | RDMA_CORE_CAP_IB_SMI \ 500 | RDMA_CORE_CAP_IB_CM \ 501 | RDMA_CORE_CAP_IB_SA \ 502 | RDMA_CORE_CAP_AF_IB) 503 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 504 | RDMA_CORE_CAP_IB_MAD \ 505 | RDMA_CORE_CAP_IB_CM \ 506 | RDMA_CORE_CAP_AF_IB \ 507 | RDMA_CORE_CAP_ETH_AH) 508 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 509 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 510 | RDMA_CORE_CAP_IB_MAD \ 511 | RDMA_CORE_CAP_IB_CM \ 512 | RDMA_CORE_CAP_AF_IB \ 513 | RDMA_CORE_CAP_ETH_AH) 514 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 515 | RDMA_CORE_CAP_IW_CM) 516 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 517 | RDMA_CORE_CAP_OPA_MAD) 518 519 struct ib_port_attr { 520 u64 subnet_prefix; 521 enum ib_port_state state; 522 enum ib_mtu max_mtu; 523 enum ib_mtu active_mtu; 524 int gid_tbl_len; 525 u32 port_cap_flags; 526 u32 max_msg_sz; 527 u32 bad_pkey_cntr; 528 u32 qkey_viol_cntr; 529 u16 pkey_tbl_len; 530 u16 lid; 531 u16 sm_lid; 532 u8 lmc; 533 u8 max_vl_num; 534 u8 sm_sl; 535 u8 subnet_timeout; 536 u8 init_type_reply; 537 u8 active_width; 538 u8 active_speed; 539 u8 phys_state; 540 bool grh_required; 541 }; 542 543 enum ib_device_modify_flags { 544 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 545 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 546 }; 547 548 #define IB_DEVICE_NODE_DESC_MAX 64 549 550 struct ib_device_modify { 551 u64 sys_image_guid; 552 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 553 }; 554 555 enum ib_port_modify_flags { 556 IB_PORT_SHUTDOWN = 1, 557 IB_PORT_INIT_TYPE = (1<<2), 558 IB_PORT_RESET_QKEY_CNTR = (1<<3) 559 }; 560 561 struct ib_port_modify { 562 u32 set_port_cap_mask; 563 u32 clr_port_cap_mask; 564 u8 init_type; 565 }; 566 567 enum ib_event_type { 568 IB_EVENT_CQ_ERR, 569 IB_EVENT_QP_FATAL, 570 IB_EVENT_QP_REQ_ERR, 571 IB_EVENT_QP_ACCESS_ERR, 572 IB_EVENT_COMM_EST, 573 IB_EVENT_SQ_DRAINED, 574 IB_EVENT_PATH_MIG, 575 IB_EVENT_PATH_MIG_ERR, 576 IB_EVENT_DEVICE_FATAL, 577 IB_EVENT_PORT_ACTIVE, 578 IB_EVENT_PORT_ERR, 579 IB_EVENT_LID_CHANGE, 580 IB_EVENT_PKEY_CHANGE, 581 IB_EVENT_SM_CHANGE, 582 IB_EVENT_SRQ_ERR, 583 IB_EVENT_SRQ_LIMIT_REACHED, 584 IB_EVENT_QP_LAST_WQE_REACHED, 585 IB_EVENT_CLIENT_REREGISTER, 586 IB_EVENT_GID_CHANGE, 587 IB_EVENT_WQ_FATAL, 588 }; 589 590 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 591 592 struct ib_event { 593 struct ib_device *device; 594 union { 595 struct ib_cq *cq; 596 struct ib_qp *qp; 597 struct ib_srq *srq; 598 struct ib_wq *wq; 599 u8 port_num; 600 } element; 601 enum ib_event_type event; 602 }; 603 604 struct ib_event_handler { 605 struct ib_device *device; 606 void (*handler)(struct ib_event_handler *, struct ib_event *); 607 struct list_head list; 608 }; 609 610 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 611 do { \ 612 (_ptr)->device = _device; \ 613 (_ptr)->handler = _handler; \ 614 INIT_LIST_HEAD(&(_ptr)->list); \ 615 } while (0) 616 617 struct ib_global_route { 618 union ib_gid dgid; 619 u32 flow_label; 620 u8 sgid_index; 621 u8 hop_limit; 622 u8 traffic_class; 623 }; 624 625 struct ib_grh { 626 __be32 version_tclass_flow; 627 __be16 paylen; 628 u8 next_hdr; 629 u8 hop_limit; 630 union ib_gid sgid; 631 union ib_gid dgid; 632 }; 633 634 union rdma_network_hdr { 635 struct ib_grh ibgrh; 636 struct { 637 /* The IB spec states that if it's IPv4, the header 638 * is located in the last 20 bytes of the header. 639 */ 640 u8 reserved[20]; 641 struct ip roce4grh; 642 }; 643 }; 644 645 enum { 646 IB_MULTICAST_QPN = 0xffffff 647 }; 648 649 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 650 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 651 652 enum ib_ah_flags { 653 IB_AH_GRH = 1 654 }; 655 656 enum ib_rate { 657 IB_RATE_PORT_CURRENT = 0, 658 IB_RATE_2_5_GBPS = 2, 659 IB_RATE_5_GBPS = 5, 660 IB_RATE_10_GBPS = 3, 661 IB_RATE_20_GBPS = 6, 662 IB_RATE_30_GBPS = 4, 663 IB_RATE_40_GBPS = 7, 664 IB_RATE_60_GBPS = 8, 665 IB_RATE_80_GBPS = 9, 666 IB_RATE_120_GBPS = 10, 667 IB_RATE_14_GBPS = 11, 668 IB_RATE_56_GBPS = 12, 669 IB_RATE_112_GBPS = 13, 670 IB_RATE_168_GBPS = 14, 671 IB_RATE_25_GBPS = 15, 672 IB_RATE_100_GBPS = 16, 673 IB_RATE_200_GBPS = 17, 674 IB_RATE_300_GBPS = 18 675 }; 676 677 /** 678 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 679 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 680 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 681 * @rate: rate to convert. 682 */ 683 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 684 685 /** 686 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 687 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 688 * @rate: rate to convert. 689 */ 690 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 691 692 693 /** 694 * enum ib_mr_type - memory region type 695 * @IB_MR_TYPE_MEM_REG: memory region that is used for 696 * normal registration 697 * @IB_MR_TYPE_SIGNATURE: memory region that is used for 698 * signature operations (data-integrity 699 * capable regions) 700 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 701 * register any arbitrary sg lists (without 702 * the normal mr constraints - see 703 * ib_map_mr_sg) 704 */ 705 enum ib_mr_type { 706 IB_MR_TYPE_MEM_REG, 707 IB_MR_TYPE_SIGNATURE, 708 IB_MR_TYPE_SG_GAPS, 709 }; 710 711 /** 712 * Signature types 713 * IB_SIG_TYPE_NONE: Unprotected. 714 * IB_SIG_TYPE_T10_DIF: Type T10-DIF 715 */ 716 enum ib_signature_type { 717 IB_SIG_TYPE_NONE, 718 IB_SIG_TYPE_T10_DIF, 719 }; 720 721 /** 722 * Signature T10-DIF block-guard types 723 * IB_T10DIF_CRC: Corresponds to T10-PI mandated CRC checksum rules. 724 * IB_T10DIF_CSUM: Corresponds to IP checksum rules. 725 */ 726 enum ib_t10_dif_bg_type { 727 IB_T10DIF_CRC, 728 IB_T10DIF_CSUM 729 }; 730 731 /** 732 * struct ib_t10_dif_domain - Parameters specific for T10-DIF 733 * domain. 734 * @bg_type: T10-DIF block guard type (CRC|CSUM) 735 * @pi_interval: protection information interval. 736 * @bg: seed of guard computation. 737 * @app_tag: application tag of guard block 738 * @ref_tag: initial guard block reference tag. 739 * @ref_remap: Indicate wethear the reftag increments each block 740 * @app_escape: Indicate to skip block check if apptag=0xffff 741 * @ref_escape: Indicate to skip block check if reftag=0xffffffff 742 * @apptag_check_mask: check bitmask of application tag. 743 */ 744 struct ib_t10_dif_domain { 745 enum ib_t10_dif_bg_type bg_type; 746 u16 pi_interval; 747 u16 bg; 748 u16 app_tag; 749 u32 ref_tag; 750 bool ref_remap; 751 bool app_escape; 752 bool ref_escape; 753 u16 apptag_check_mask; 754 }; 755 756 /** 757 * struct ib_sig_domain - Parameters for signature domain 758 * @sig_type: specific signauture type 759 * @sig: union of all signature domain attributes that may 760 * be used to set domain layout. 761 */ 762 struct ib_sig_domain { 763 enum ib_signature_type sig_type; 764 union { 765 struct ib_t10_dif_domain dif; 766 } sig; 767 }; 768 769 /** 770 * struct ib_sig_attrs - Parameters for signature handover operation 771 * @check_mask: bitmask for signature byte check (8 bytes) 772 * @mem: memory domain layout desciptor. 773 * @wire: wire domain layout desciptor. 774 */ 775 struct ib_sig_attrs { 776 u8 check_mask; 777 struct ib_sig_domain mem; 778 struct ib_sig_domain wire; 779 }; 780 781 enum ib_sig_err_type { 782 IB_SIG_BAD_GUARD, 783 IB_SIG_BAD_REFTAG, 784 IB_SIG_BAD_APPTAG, 785 }; 786 787 /** 788 * struct ib_sig_err - signature error descriptor 789 */ 790 struct ib_sig_err { 791 enum ib_sig_err_type err_type; 792 u32 expected; 793 u32 actual; 794 u64 sig_err_offset; 795 u32 key; 796 }; 797 798 enum ib_mr_status_check { 799 IB_MR_CHECK_SIG_STATUS = 1, 800 }; 801 802 /** 803 * struct ib_mr_status - Memory region status container 804 * 805 * @fail_status: Bitmask of MR checks status. For each 806 * failed check a corresponding status bit is set. 807 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 808 * failure. 809 */ 810 struct ib_mr_status { 811 u32 fail_status; 812 struct ib_sig_err sig_err; 813 }; 814 815 /** 816 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 817 * enum. 818 * @mult: multiple to convert. 819 */ 820 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 821 822 struct ib_ah_attr { 823 struct ib_global_route grh; 824 u16 dlid; 825 u8 sl; 826 u8 src_path_bits; 827 u8 static_rate; 828 u8 ah_flags; 829 u8 port_num; 830 u8 dmac[ETH_ALEN]; 831 }; 832 833 enum ib_wc_status { 834 IB_WC_SUCCESS, 835 IB_WC_LOC_LEN_ERR, 836 IB_WC_LOC_QP_OP_ERR, 837 IB_WC_LOC_EEC_OP_ERR, 838 IB_WC_LOC_PROT_ERR, 839 IB_WC_WR_FLUSH_ERR, 840 IB_WC_MW_BIND_ERR, 841 IB_WC_BAD_RESP_ERR, 842 IB_WC_LOC_ACCESS_ERR, 843 IB_WC_REM_INV_REQ_ERR, 844 IB_WC_REM_ACCESS_ERR, 845 IB_WC_REM_OP_ERR, 846 IB_WC_RETRY_EXC_ERR, 847 IB_WC_RNR_RETRY_EXC_ERR, 848 IB_WC_LOC_RDD_VIOL_ERR, 849 IB_WC_REM_INV_RD_REQ_ERR, 850 IB_WC_REM_ABORT_ERR, 851 IB_WC_INV_EECN_ERR, 852 IB_WC_INV_EEC_STATE_ERR, 853 IB_WC_FATAL_ERR, 854 IB_WC_RESP_TIMEOUT_ERR, 855 IB_WC_GENERAL_ERR 856 }; 857 858 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 859 860 enum ib_wc_opcode { 861 IB_WC_SEND, 862 IB_WC_RDMA_WRITE, 863 IB_WC_RDMA_READ, 864 IB_WC_COMP_SWAP, 865 IB_WC_FETCH_ADD, 866 IB_WC_LSO, 867 IB_WC_LOCAL_INV, 868 IB_WC_REG_MR, 869 IB_WC_MASKED_COMP_SWAP, 870 IB_WC_MASKED_FETCH_ADD, 871 /* 872 * Set value of IB_WC_RECV so consumers can test if a completion is a 873 * receive by testing (opcode & IB_WC_RECV). 874 */ 875 IB_WC_RECV = 1 << 7, 876 IB_WC_RECV_RDMA_WITH_IMM, 877 IB_WC_DUMMY = -1, /* force enum signed */ 878 }; 879 880 enum ib_wc_flags { 881 IB_WC_GRH = 1, 882 IB_WC_WITH_IMM = (1<<1), 883 IB_WC_WITH_INVALIDATE = (1<<2), 884 IB_WC_IP_CSUM_OK = (1<<3), 885 IB_WC_WITH_SMAC = (1<<4), 886 IB_WC_WITH_VLAN = (1<<5), 887 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 888 }; 889 890 struct ib_wc { 891 union { 892 u64 wr_id; 893 struct ib_cqe *wr_cqe; 894 }; 895 enum ib_wc_status status; 896 enum ib_wc_opcode opcode; 897 u32 vendor_err; 898 u32 byte_len; 899 struct ib_qp *qp; 900 union { 901 __be32 imm_data; 902 u32 invalidate_rkey; 903 } ex; 904 u32 src_qp; 905 int wc_flags; 906 u16 pkey_index; 907 u16 slid; 908 u8 sl; 909 u8 dlid_path_bits; 910 u8 port_num; /* valid only for DR SMPs on switches */ 911 u8 smac[ETH_ALEN]; 912 u16 vlan_id; 913 u8 network_hdr_type; 914 }; 915 916 enum ib_cq_notify_flags { 917 IB_CQ_SOLICITED = 1 << 0, 918 IB_CQ_NEXT_COMP = 1 << 1, 919 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 920 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 921 }; 922 923 enum ib_srq_type { 924 IB_SRQT_BASIC, 925 IB_SRQT_XRC 926 }; 927 928 enum ib_srq_attr_mask { 929 IB_SRQ_MAX_WR = 1 << 0, 930 IB_SRQ_LIMIT = 1 << 1, 931 }; 932 933 struct ib_srq_attr { 934 u32 max_wr; 935 u32 max_sge; 936 u32 srq_limit; 937 }; 938 939 struct ib_srq_init_attr { 940 void (*event_handler)(struct ib_event *, void *); 941 void *srq_context; 942 struct ib_srq_attr attr; 943 enum ib_srq_type srq_type; 944 945 union { 946 struct { 947 struct ib_xrcd *xrcd; 948 struct ib_cq *cq; 949 } xrc; 950 } ext; 951 }; 952 953 struct ib_qp_cap { 954 u32 max_send_wr; 955 u32 max_recv_wr; 956 u32 max_send_sge; 957 u32 max_recv_sge; 958 u32 max_inline_data; 959 960 /* 961 * Maximum number of rdma_rw_ctx structures in flight at a time. 962 * ib_create_qp() will calculate the right amount of neededed WRs 963 * and MRs based on this. 964 */ 965 u32 max_rdma_ctxs; 966 }; 967 968 enum ib_sig_type { 969 IB_SIGNAL_ALL_WR, 970 IB_SIGNAL_REQ_WR 971 }; 972 973 enum ib_qp_type { 974 /* 975 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 976 * here (and in that order) since the MAD layer uses them as 977 * indices into a 2-entry table. 978 */ 979 IB_QPT_SMI, 980 IB_QPT_GSI, 981 982 IB_QPT_RC, 983 IB_QPT_UC, 984 IB_QPT_UD, 985 IB_QPT_RAW_IPV6, 986 IB_QPT_RAW_ETHERTYPE, 987 IB_QPT_RAW_PACKET = 8, 988 IB_QPT_XRC_INI = 9, 989 IB_QPT_XRC_TGT, 990 IB_QPT_MAX, 991 /* Reserve a range for qp types internal to the low level driver. 992 * These qp types will not be visible at the IB core layer, so the 993 * IB_QPT_MAX usages should not be affected in the core layer 994 */ 995 IB_QPT_RESERVED1 = 0x1000, 996 IB_QPT_RESERVED2, 997 IB_QPT_RESERVED3, 998 IB_QPT_RESERVED4, 999 IB_QPT_RESERVED5, 1000 IB_QPT_RESERVED6, 1001 IB_QPT_RESERVED7, 1002 IB_QPT_RESERVED8, 1003 IB_QPT_RESERVED9, 1004 IB_QPT_RESERVED10, 1005 }; 1006 1007 enum ib_qp_create_flags { 1008 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1009 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1010 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1011 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1012 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1013 IB_QP_CREATE_NETIF_QP = 1 << 5, 1014 IB_QP_CREATE_SIGNATURE_EN = 1 << 6, 1015 IB_QP_CREATE_USE_GFP_NOIO = 1 << 7, 1016 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1017 /* reserve bits 26-31 for low level drivers' internal use */ 1018 IB_QP_CREATE_RESERVED_START = 1 << 26, 1019 IB_QP_CREATE_RESERVED_END = 1 << 31, 1020 }; 1021 1022 /* 1023 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1024 * callback to destroy the passed in QP. 1025 */ 1026 1027 struct ib_qp_init_attr { 1028 void (*event_handler)(struct ib_event *, void *); 1029 void *qp_context; 1030 struct ib_cq *send_cq; 1031 struct ib_cq *recv_cq; 1032 struct ib_srq *srq; 1033 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1034 struct ib_qp_cap cap; 1035 enum ib_sig_type sq_sig_type; 1036 enum ib_qp_type qp_type; 1037 enum ib_qp_create_flags create_flags; 1038 1039 /* 1040 * Only needed for special QP types, or when using the RW API. 1041 */ 1042 u8 port_num; 1043 struct ib_rwq_ind_table *rwq_ind_tbl; 1044 }; 1045 1046 struct ib_qp_open_attr { 1047 void (*event_handler)(struct ib_event *, void *); 1048 void *qp_context; 1049 u32 qp_num; 1050 enum ib_qp_type qp_type; 1051 }; 1052 1053 enum ib_rnr_timeout { 1054 IB_RNR_TIMER_655_36 = 0, 1055 IB_RNR_TIMER_000_01 = 1, 1056 IB_RNR_TIMER_000_02 = 2, 1057 IB_RNR_TIMER_000_03 = 3, 1058 IB_RNR_TIMER_000_04 = 4, 1059 IB_RNR_TIMER_000_06 = 5, 1060 IB_RNR_TIMER_000_08 = 6, 1061 IB_RNR_TIMER_000_12 = 7, 1062 IB_RNR_TIMER_000_16 = 8, 1063 IB_RNR_TIMER_000_24 = 9, 1064 IB_RNR_TIMER_000_32 = 10, 1065 IB_RNR_TIMER_000_48 = 11, 1066 IB_RNR_TIMER_000_64 = 12, 1067 IB_RNR_TIMER_000_96 = 13, 1068 IB_RNR_TIMER_001_28 = 14, 1069 IB_RNR_TIMER_001_92 = 15, 1070 IB_RNR_TIMER_002_56 = 16, 1071 IB_RNR_TIMER_003_84 = 17, 1072 IB_RNR_TIMER_005_12 = 18, 1073 IB_RNR_TIMER_007_68 = 19, 1074 IB_RNR_TIMER_010_24 = 20, 1075 IB_RNR_TIMER_015_36 = 21, 1076 IB_RNR_TIMER_020_48 = 22, 1077 IB_RNR_TIMER_030_72 = 23, 1078 IB_RNR_TIMER_040_96 = 24, 1079 IB_RNR_TIMER_061_44 = 25, 1080 IB_RNR_TIMER_081_92 = 26, 1081 IB_RNR_TIMER_122_88 = 27, 1082 IB_RNR_TIMER_163_84 = 28, 1083 IB_RNR_TIMER_245_76 = 29, 1084 IB_RNR_TIMER_327_68 = 30, 1085 IB_RNR_TIMER_491_52 = 31 1086 }; 1087 1088 enum ib_qp_attr_mask { 1089 IB_QP_STATE = 1, 1090 IB_QP_CUR_STATE = (1<<1), 1091 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1092 IB_QP_ACCESS_FLAGS = (1<<3), 1093 IB_QP_PKEY_INDEX = (1<<4), 1094 IB_QP_PORT = (1<<5), 1095 IB_QP_QKEY = (1<<6), 1096 IB_QP_AV = (1<<7), 1097 IB_QP_PATH_MTU = (1<<8), 1098 IB_QP_TIMEOUT = (1<<9), 1099 IB_QP_RETRY_CNT = (1<<10), 1100 IB_QP_RNR_RETRY = (1<<11), 1101 IB_QP_RQ_PSN = (1<<12), 1102 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1103 IB_QP_ALT_PATH = (1<<14), 1104 IB_QP_MIN_RNR_TIMER = (1<<15), 1105 IB_QP_SQ_PSN = (1<<16), 1106 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1107 IB_QP_PATH_MIG_STATE = (1<<18), 1108 IB_QP_CAP = (1<<19), 1109 IB_QP_DEST_QPN = (1<<20), 1110 IB_QP_RESERVED1 = (1<<21), 1111 IB_QP_RESERVED2 = (1<<22), 1112 IB_QP_RESERVED3 = (1<<23), 1113 IB_QP_RESERVED4 = (1<<24), 1114 }; 1115 1116 enum ib_qp_state { 1117 IB_QPS_RESET, 1118 IB_QPS_INIT, 1119 IB_QPS_RTR, 1120 IB_QPS_RTS, 1121 IB_QPS_SQD, 1122 IB_QPS_SQE, 1123 IB_QPS_ERR, 1124 IB_QPS_DUMMY = -1, /* force enum signed */ 1125 }; 1126 1127 enum ib_mig_state { 1128 IB_MIG_MIGRATED, 1129 IB_MIG_REARM, 1130 IB_MIG_ARMED 1131 }; 1132 1133 enum ib_mw_type { 1134 IB_MW_TYPE_1 = 1, 1135 IB_MW_TYPE_2 = 2 1136 }; 1137 1138 struct ib_qp_attr { 1139 enum ib_qp_state qp_state; 1140 enum ib_qp_state cur_qp_state; 1141 enum ib_mtu path_mtu; 1142 enum ib_mig_state path_mig_state; 1143 u32 qkey; 1144 u32 rq_psn; 1145 u32 sq_psn; 1146 u32 dest_qp_num; 1147 int qp_access_flags; 1148 struct ib_qp_cap cap; 1149 struct ib_ah_attr ah_attr; 1150 struct ib_ah_attr alt_ah_attr; 1151 u16 pkey_index; 1152 u16 alt_pkey_index; 1153 u8 en_sqd_async_notify; 1154 u8 sq_draining; 1155 u8 max_rd_atomic; 1156 u8 max_dest_rd_atomic; 1157 u8 min_rnr_timer; 1158 u8 port_num; 1159 u8 timeout; 1160 u8 retry_cnt; 1161 u8 rnr_retry; 1162 u8 alt_port_num; 1163 u8 alt_timeout; 1164 }; 1165 1166 enum ib_wr_opcode { 1167 IB_WR_RDMA_WRITE, 1168 IB_WR_RDMA_WRITE_WITH_IMM, 1169 IB_WR_SEND, 1170 IB_WR_SEND_WITH_IMM, 1171 IB_WR_RDMA_READ, 1172 IB_WR_ATOMIC_CMP_AND_SWP, 1173 IB_WR_ATOMIC_FETCH_AND_ADD, 1174 IB_WR_LSO, 1175 IB_WR_SEND_WITH_INV, 1176 IB_WR_RDMA_READ_WITH_INV, 1177 IB_WR_LOCAL_INV, 1178 IB_WR_REG_MR, 1179 IB_WR_MASKED_ATOMIC_CMP_AND_SWP, 1180 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1181 IB_WR_REG_SIG_MR, 1182 /* reserve values for low level drivers' internal use. 1183 * These values will not be used at all in the ib core layer. 1184 */ 1185 IB_WR_RESERVED1 = 0xf0, 1186 IB_WR_RESERVED2, 1187 IB_WR_RESERVED3, 1188 IB_WR_RESERVED4, 1189 IB_WR_RESERVED5, 1190 IB_WR_RESERVED6, 1191 IB_WR_RESERVED7, 1192 IB_WR_RESERVED8, 1193 IB_WR_RESERVED9, 1194 IB_WR_RESERVED10, 1195 IB_WR_DUMMY = -1, /* force enum signed */ 1196 }; 1197 1198 enum ib_send_flags { 1199 IB_SEND_FENCE = 1, 1200 IB_SEND_SIGNALED = (1<<1), 1201 IB_SEND_SOLICITED = (1<<2), 1202 IB_SEND_INLINE = (1<<3), 1203 IB_SEND_IP_CSUM = (1<<4), 1204 1205 /* reserve bits 26-31 for low level drivers' internal use */ 1206 IB_SEND_RESERVED_START = (1 << 26), 1207 IB_SEND_RESERVED_END = (1 << 31), 1208 }; 1209 1210 struct ib_sge { 1211 u64 addr; 1212 u32 length; 1213 u32 lkey; 1214 }; 1215 1216 struct ib_cqe { 1217 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1218 }; 1219 1220 struct ib_send_wr { 1221 struct ib_send_wr *next; 1222 union { 1223 u64 wr_id; 1224 struct ib_cqe *wr_cqe; 1225 }; 1226 struct ib_sge *sg_list; 1227 int num_sge; 1228 enum ib_wr_opcode opcode; 1229 int send_flags; 1230 union { 1231 __be32 imm_data; 1232 u32 invalidate_rkey; 1233 } ex; 1234 }; 1235 1236 struct ib_rdma_wr { 1237 struct ib_send_wr wr; 1238 u64 remote_addr; 1239 u32 rkey; 1240 }; 1241 1242 static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr) 1243 { 1244 return container_of(wr, struct ib_rdma_wr, wr); 1245 } 1246 1247 struct ib_atomic_wr { 1248 struct ib_send_wr wr; 1249 u64 remote_addr; 1250 u64 compare_add; 1251 u64 swap; 1252 u64 compare_add_mask; 1253 u64 swap_mask; 1254 u32 rkey; 1255 }; 1256 1257 static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr) 1258 { 1259 return container_of(wr, struct ib_atomic_wr, wr); 1260 } 1261 1262 struct ib_ud_wr { 1263 struct ib_send_wr wr; 1264 struct ib_ah *ah; 1265 void *header; 1266 int hlen; 1267 int mss; 1268 u32 remote_qpn; 1269 u32 remote_qkey; 1270 u16 pkey_index; /* valid for GSI only */ 1271 u8 port_num; /* valid for DR SMPs on switch only */ 1272 }; 1273 1274 static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr) 1275 { 1276 return container_of(wr, struct ib_ud_wr, wr); 1277 } 1278 1279 struct ib_reg_wr { 1280 struct ib_send_wr wr; 1281 struct ib_mr *mr; 1282 u32 key; 1283 int access; 1284 }; 1285 1286 static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr) 1287 { 1288 return container_of(wr, struct ib_reg_wr, wr); 1289 } 1290 1291 struct ib_sig_handover_wr { 1292 struct ib_send_wr wr; 1293 struct ib_sig_attrs *sig_attrs; 1294 struct ib_mr *sig_mr; 1295 int access_flags; 1296 struct ib_sge *prot; 1297 }; 1298 1299 static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr) 1300 { 1301 return container_of(wr, struct ib_sig_handover_wr, wr); 1302 } 1303 1304 struct ib_recv_wr { 1305 struct ib_recv_wr *next; 1306 union { 1307 u64 wr_id; 1308 struct ib_cqe *wr_cqe; 1309 }; 1310 struct ib_sge *sg_list; 1311 int num_sge; 1312 }; 1313 1314 enum ib_access_flags { 1315 IB_ACCESS_LOCAL_WRITE = 1, 1316 IB_ACCESS_REMOTE_WRITE = (1<<1), 1317 IB_ACCESS_REMOTE_READ = (1<<2), 1318 IB_ACCESS_REMOTE_ATOMIC = (1<<3), 1319 IB_ACCESS_MW_BIND = (1<<4), 1320 IB_ZERO_BASED = (1<<5), 1321 IB_ACCESS_ON_DEMAND = (1<<6), 1322 }; 1323 1324 /* 1325 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1326 * are hidden here instead of a uapi header! 1327 */ 1328 enum ib_mr_rereg_flags { 1329 IB_MR_REREG_TRANS = 1, 1330 IB_MR_REREG_PD = (1<<1), 1331 IB_MR_REREG_ACCESS = (1<<2), 1332 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1333 }; 1334 1335 struct ib_fmr_attr { 1336 int max_pages; 1337 int max_maps; 1338 u8 page_shift; 1339 }; 1340 1341 struct ib_umem; 1342 1343 struct ib_ucontext { 1344 struct ib_device *device; 1345 struct list_head pd_list; 1346 struct list_head mr_list; 1347 struct list_head mw_list; 1348 struct list_head cq_list; 1349 struct list_head qp_list; 1350 struct list_head srq_list; 1351 struct list_head ah_list; 1352 struct list_head xrcd_list; 1353 struct list_head rule_list; 1354 struct list_head wq_list; 1355 struct list_head rwq_ind_tbl_list; 1356 int closing; 1357 1358 pid_t tgid; 1359 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1360 struct rb_root umem_tree; 1361 /* 1362 * Protects .umem_rbroot and tree, as well as odp_mrs_count and 1363 * mmu notifiers registration. 1364 */ 1365 struct rw_semaphore umem_rwsem; 1366 void (*invalidate_range)(struct ib_umem *umem, 1367 unsigned long start, unsigned long end); 1368 1369 struct mmu_notifier mn; 1370 atomic_t notifier_count; 1371 /* A list of umems that don't have private mmu notifier counters yet. */ 1372 struct list_head no_private_counters; 1373 int odp_mrs_count; 1374 #endif 1375 }; 1376 1377 struct ib_uobject { 1378 u64 user_handle; /* handle given to us by userspace */ 1379 struct ib_ucontext *context; /* associated user context */ 1380 void *object; /* containing object */ 1381 struct list_head list; /* link to context's list */ 1382 int id; /* index into kernel idr */ 1383 struct kref ref; 1384 struct rw_semaphore mutex; /* protects .live */ 1385 struct rcu_head rcu; /* kfree_rcu() overhead */ 1386 int live; 1387 }; 1388 1389 struct ib_udata { 1390 const void __user *inbuf; 1391 void __user *outbuf; 1392 size_t inlen; 1393 size_t outlen; 1394 }; 1395 1396 struct ib_pd { 1397 u32 local_dma_lkey; 1398 u32 flags; 1399 struct ib_device *device; 1400 struct ib_uobject *uobject; 1401 atomic_t usecnt; /* count all resources */ 1402 1403 u32 unsafe_global_rkey; 1404 1405 /* 1406 * Implementation details of the RDMA core, don't use in drivers: 1407 */ 1408 struct ib_mr *__internal_mr; 1409 }; 1410 1411 struct ib_xrcd { 1412 struct ib_device *device; 1413 atomic_t usecnt; /* count all exposed resources */ 1414 struct inode *inode; 1415 1416 struct mutex tgt_qp_mutex; 1417 struct list_head tgt_qp_list; 1418 }; 1419 1420 struct ib_ah { 1421 struct ib_device *device; 1422 struct ib_pd *pd; 1423 struct ib_uobject *uobject; 1424 }; 1425 1426 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1427 1428 enum ib_poll_context { 1429 IB_POLL_DIRECT, /* caller context, no hw completions */ 1430 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1431 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1432 }; 1433 1434 struct ib_cq { 1435 struct ib_device *device; 1436 struct ib_uobject *uobject; 1437 ib_comp_handler comp_handler; 1438 void (*event_handler)(struct ib_event *, void *); 1439 void *cq_context; 1440 int cqe; 1441 atomic_t usecnt; /* count number of work queues */ 1442 enum ib_poll_context poll_ctx; 1443 struct work_struct work; 1444 }; 1445 1446 struct ib_srq { 1447 struct ib_device *device; 1448 struct ib_pd *pd; 1449 struct ib_uobject *uobject; 1450 void (*event_handler)(struct ib_event *, void *); 1451 void *srq_context; 1452 enum ib_srq_type srq_type; 1453 atomic_t usecnt; 1454 1455 union { 1456 struct { 1457 struct ib_xrcd *xrcd; 1458 struct ib_cq *cq; 1459 u32 srq_num; 1460 } xrc; 1461 } ext; 1462 }; 1463 1464 enum ib_wq_type { 1465 IB_WQT_RQ 1466 }; 1467 1468 enum ib_wq_state { 1469 IB_WQS_RESET, 1470 IB_WQS_RDY, 1471 IB_WQS_ERR 1472 }; 1473 1474 struct ib_wq { 1475 struct ib_device *device; 1476 struct ib_uobject *uobject; 1477 void *wq_context; 1478 void (*event_handler)(struct ib_event *, void *); 1479 struct ib_pd *pd; 1480 struct ib_cq *cq; 1481 u32 wq_num; 1482 enum ib_wq_state state; 1483 enum ib_wq_type wq_type; 1484 atomic_t usecnt; 1485 }; 1486 1487 struct ib_wq_init_attr { 1488 void *wq_context; 1489 enum ib_wq_type wq_type; 1490 u32 max_wr; 1491 u32 max_sge; 1492 struct ib_cq *cq; 1493 void (*event_handler)(struct ib_event *, void *); 1494 }; 1495 1496 enum ib_wq_attr_mask { 1497 IB_WQ_STATE = 1 << 0, 1498 IB_WQ_CUR_STATE = 1 << 1, 1499 }; 1500 1501 struct ib_wq_attr { 1502 enum ib_wq_state wq_state; 1503 enum ib_wq_state curr_wq_state; 1504 }; 1505 1506 struct ib_rwq_ind_table { 1507 struct ib_device *device; 1508 struct ib_uobject *uobject; 1509 atomic_t usecnt; 1510 u32 ind_tbl_num; 1511 u32 log_ind_tbl_size; 1512 struct ib_wq **ind_tbl; 1513 }; 1514 1515 struct ib_rwq_ind_table_init_attr { 1516 u32 log_ind_tbl_size; 1517 /* Each entry is a pointer to Receive Work Queue */ 1518 struct ib_wq **ind_tbl; 1519 }; 1520 1521 /* 1522 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1523 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1524 */ 1525 struct ib_qp { 1526 struct ib_device *device; 1527 struct ib_pd *pd; 1528 struct ib_cq *send_cq; 1529 struct ib_cq *recv_cq; 1530 spinlock_t mr_lock; 1531 struct ib_srq *srq; 1532 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1533 struct list_head xrcd_list; 1534 1535 /* count times opened, mcast attaches, flow attaches */ 1536 atomic_t usecnt; 1537 struct list_head open_list; 1538 struct ib_qp *real_qp; 1539 struct ib_uobject *uobject; 1540 void (*event_handler)(struct ib_event *, void *); 1541 void *qp_context; 1542 u32 qp_num; 1543 u32 max_write_sge; 1544 u32 max_read_sge; 1545 enum ib_qp_type qp_type; 1546 struct ib_rwq_ind_table *rwq_ind_tbl; 1547 }; 1548 1549 struct ib_mr { 1550 struct ib_device *device; 1551 struct ib_pd *pd; 1552 u32 lkey; 1553 u32 rkey; 1554 u64 iova; 1555 u32 length; 1556 unsigned int page_size; 1557 bool need_inval; 1558 union { 1559 struct ib_uobject *uobject; /* user */ 1560 struct list_head qp_entry; /* FR */ 1561 }; 1562 }; 1563 1564 struct ib_mw { 1565 struct ib_device *device; 1566 struct ib_pd *pd; 1567 struct ib_uobject *uobject; 1568 u32 rkey; 1569 enum ib_mw_type type; 1570 }; 1571 1572 struct ib_fmr { 1573 struct ib_device *device; 1574 struct ib_pd *pd; 1575 struct list_head list; 1576 u32 lkey; 1577 u32 rkey; 1578 }; 1579 1580 /* Supported steering options */ 1581 enum ib_flow_attr_type { 1582 /* steering according to rule specifications */ 1583 IB_FLOW_ATTR_NORMAL = 0x0, 1584 /* default unicast and multicast rule - 1585 * receive all Eth traffic which isn't steered to any QP 1586 */ 1587 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1588 /* default multicast rule - 1589 * receive all Eth multicast traffic which isn't steered to any QP 1590 */ 1591 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1592 /* sniffer rule - receive all port traffic */ 1593 IB_FLOW_ATTR_SNIFFER = 0x3 1594 }; 1595 1596 /* Supported steering header types */ 1597 enum ib_flow_spec_type { 1598 /* L2 headers*/ 1599 IB_FLOW_SPEC_ETH = 0x20, 1600 IB_FLOW_SPEC_IB = 0x22, 1601 /* L3 header*/ 1602 IB_FLOW_SPEC_IPV4 = 0x30, 1603 IB_FLOW_SPEC_IPV6 = 0x31, 1604 /* L4 headers*/ 1605 IB_FLOW_SPEC_TCP = 0x40, 1606 IB_FLOW_SPEC_UDP = 0x41 1607 }; 1608 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1609 #define IB_FLOW_SPEC_SUPPORT_LAYERS 4 1610 1611 /* Flow steering rule priority is set according to it's domain. 1612 * Lower domain value means higher priority. 1613 */ 1614 enum ib_flow_domain { 1615 IB_FLOW_DOMAIN_USER, 1616 IB_FLOW_DOMAIN_ETHTOOL, 1617 IB_FLOW_DOMAIN_RFS, 1618 IB_FLOW_DOMAIN_NIC, 1619 IB_FLOW_DOMAIN_NUM /* Must be last */ 1620 }; 1621 1622 enum ib_flow_flags { 1623 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1624 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 2 /* Must be last */ 1625 }; 1626 1627 struct ib_flow_eth_filter { 1628 u8 dst_mac[6]; 1629 u8 src_mac[6]; 1630 __be16 ether_type; 1631 __be16 vlan_tag; 1632 /* Must be last */ 1633 u8 real_sz[0]; 1634 }; 1635 1636 struct ib_flow_spec_eth { 1637 enum ib_flow_spec_type type; 1638 u16 size; 1639 struct ib_flow_eth_filter val; 1640 struct ib_flow_eth_filter mask; 1641 }; 1642 1643 struct ib_flow_ib_filter { 1644 __be16 dlid; 1645 __u8 sl; 1646 /* Must be last */ 1647 u8 real_sz[0]; 1648 }; 1649 1650 struct ib_flow_spec_ib { 1651 enum ib_flow_spec_type type; 1652 u16 size; 1653 struct ib_flow_ib_filter val; 1654 struct ib_flow_ib_filter mask; 1655 }; 1656 1657 /* IPv4 header flags */ 1658 enum ib_ipv4_flags { 1659 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1660 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1661 last have this flag set */ 1662 }; 1663 1664 struct ib_flow_ipv4_filter { 1665 __be32 src_ip; 1666 __be32 dst_ip; 1667 u8 proto; 1668 u8 tos; 1669 u8 ttl; 1670 u8 flags; 1671 /* Must be last */ 1672 u8 real_sz[0]; 1673 }; 1674 1675 struct ib_flow_spec_ipv4 { 1676 enum ib_flow_spec_type type; 1677 u16 size; 1678 struct ib_flow_ipv4_filter val; 1679 struct ib_flow_ipv4_filter mask; 1680 }; 1681 1682 struct ib_flow_ipv6_filter { 1683 u8 src_ip[16]; 1684 u8 dst_ip[16]; 1685 __be32 flow_label; 1686 u8 next_hdr; 1687 u8 traffic_class; 1688 u8 hop_limit; 1689 /* Must be last */ 1690 u8 real_sz[0]; 1691 }; 1692 1693 struct ib_flow_spec_ipv6 { 1694 enum ib_flow_spec_type type; 1695 u16 size; 1696 struct ib_flow_ipv6_filter val; 1697 struct ib_flow_ipv6_filter mask; 1698 }; 1699 1700 struct ib_flow_tcp_udp_filter { 1701 __be16 dst_port; 1702 __be16 src_port; 1703 /* Must be last */ 1704 u8 real_sz[0]; 1705 }; 1706 1707 struct ib_flow_spec_tcp_udp { 1708 enum ib_flow_spec_type type; 1709 u16 size; 1710 struct ib_flow_tcp_udp_filter val; 1711 struct ib_flow_tcp_udp_filter mask; 1712 }; 1713 1714 union ib_flow_spec { 1715 struct { 1716 enum ib_flow_spec_type type; 1717 u16 size; 1718 }; 1719 struct ib_flow_spec_eth eth; 1720 struct ib_flow_spec_ib ib; 1721 struct ib_flow_spec_ipv4 ipv4; 1722 struct ib_flow_spec_tcp_udp tcp_udp; 1723 struct ib_flow_spec_ipv6 ipv6; 1724 }; 1725 1726 struct ib_flow_attr { 1727 enum ib_flow_attr_type type; 1728 u16 size; 1729 u16 priority; 1730 u32 flags; 1731 u8 num_of_specs; 1732 u8 port; 1733 /* Following are the optional layers according to user request 1734 * struct ib_flow_spec_xxx 1735 * struct ib_flow_spec_yyy 1736 */ 1737 }; 1738 1739 struct ib_flow { 1740 struct ib_qp *qp; 1741 struct ib_uobject *uobject; 1742 }; 1743 1744 struct ib_mad_hdr; 1745 struct ib_grh; 1746 1747 enum ib_process_mad_flags { 1748 IB_MAD_IGNORE_MKEY = 1, 1749 IB_MAD_IGNORE_BKEY = 2, 1750 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 1751 }; 1752 1753 enum ib_mad_result { 1754 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 1755 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 1756 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 1757 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 1758 }; 1759 1760 #define IB_DEVICE_NAME_MAX 64 1761 1762 struct ib_cache { 1763 rwlock_t lock; 1764 struct ib_event_handler event_handler; 1765 struct ib_pkey_cache **pkey_cache; 1766 struct ib_gid_table **gid_cache; 1767 u8 *lmc_cache; 1768 }; 1769 1770 struct ib_dma_mapping_ops { 1771 int (*mapping_error)(struct ib_device *dev, 1772 u64 dma_addr); 1773 u64 (*map_single)(struct ib_device *dev, 1774 void *ptr, size_t size, 1775 enum dma_data_direction direction); 1776 void (*unmap_single)(struct ib_device *dev, 1777 u64 addr, size_t size, 1778 enum dma_data_direction direction); 1779 u64 (*map_page)(struct ib_device *dev, 1780 struct page *page, unsigned long offset, 1781 size_t size, 1782 enum dma_data_direction direction); 1783 void (*unmap_page)(struct ib_device *dev, 1784 u64 addr, size_t size, 1785 enum dma_data_direction direction); 1786 int (*map_sg)(struct ib_device *dev, 1787 struct scatterlist *sg, int nents, 1788 enum dma_data_direction direction); 1789 void (*unmap_sg)(struct ib_device *dev, 1790 struct scatterlist *sg, int nents, 1791 enum dma_data_direction direction); 1792 int (*map_sg_attrs)(struct ib_device *dev, 1793 struct scatterlist *sg, int nents, 1794 enum dma_data_direction direction, 1795 struct dma_attrs *attrs); 1796 void (*unmap_sg_attrs)(struct ib_device *dev, 1797 struct scatterlist *sg, int nents, 1798 enum dma_data_direction direction, 1799 struct dma_attrs *attrs); 1800 void (*sync_single_for_cpu)(struct ib_device *dev, 1801 u64 dma_handle, 1802 size_t size, 1803 enum dma_data_direction dir); 1804 void (*sync_single_for_device)(struct ib_device *dev, 1805 u64 dma_handle, 1806 size_t size, 1807 enum dma_data_direction dir); 1808 void *(*alloc_coherent)(struct ib_device *dev, 1809 size_t size, 1810 u64 *dma_handle, 1811 gfp_t flag); 1812 void (*free_coherent)(struct ib_device *dev, 1813 size_t size, void *cpu_addr, 1814 u64 dma_handle); 1815 }; 1816 1817 struct iw_cm_verbs; 1818 1819 struct ib_port_immutable { 1820 int pkey_tbl_len; 1821 int gid_tbl_len; 1822 u32 core_cap_flags; 1823 u32 max_mad_size; 1824 }; 1825 1826 struct ib_device { 1827 struct device *dma_device; 1828 1829 char name[IB_DEVICE_NAME_MAX]; 1830 1831 struct list_head event_handler_list; 1832 spinlock_t event_handler_lock; 1833 1834 spinlock_t client_data_lock; 1835 struct list_head core_list; 1836 /* Access to the client_data_list is protected by the client_data_lock 1837 * spinlock and the lists_rwsem read-write semaphore */ 1838 struct list_head client_data_list; 1839 1840 struct ib_cache cache; 1841 /** 1842 * port_immutable is indexed by port number 1843 */ 1844 struct ib_port_immutable *port_immutable; 1845 1846 int num_comp_vectors; 1847 1848 struct iw_cm_verbs *iwcm; 1849 1850 /** 1851 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 1852 * driver initialized data. The struct is kfree()'ed by the sysfs 1853 * core when the device is removed. A lifespan of -1 in the return 1854 * struct tells the core to set a default lifespan. 1855 */ 1856 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 1857 u8 port_num); 1858 /** 1859 * get_hw_stats - Fill in the counter value(s) in the stats struct. 1860 * @index - The index in the value array we wish to have updated, or 1861 * num_counters if we want all stats updated 1862 * Return codes - 1863 * < 0 - Error, no counters updated 1864 * index - Updated the single counter pointed to by index 1865 * num_counters - Updated all counters (will reset the timestamp 1866 * and prevent further calls for lifespan milliseconds) 1867 * Drivers are allowed to update all counters in leiu of just the 1868 * one given in index at their option 1869 */ 1870 int (*get_hw_stats)(struct ib_device *device, 1871 struct rdma_hw_stats *stats, 1872 u8 port, int index); 1873 int (*query_device)(struct ib_device *device, 1874 struct ib_device_attr *device_attr, 1875 struct ib_udata *udata); 1876 int (*query_port)(struct ib_device *device, 1877 u8 port_num, 1878 struct ib_port_attr *port_attr); 1879 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 1880 u8 port_num); 1881 /* When calling get_netdev, the HW vendor's driver should return the 1882 * net device of device @device at port @port_num or NULL if such 1883 * a net device doesn't exist. The vendor driver should call dev_hold 1884 * on this net device. The HW vendor's device driver must guarantee 1885 * that this function returns NULL before the net device reaches 1886 * NETDEV_UNREGISTER_FINAL state. 1887 */ 1888 struct net_device *(*get_netdev)(struct ib_device *device, 1889 u8 port_num); 1890 int (*query_gid)(struct ib_device *device, 1891 u8 port_num, int index, 1892 union ib_gid *gid); 1893 /* When calling add_gid, the HW vendor's driver should 1894 * add the gid of device @device at gid index @index of 1895 * port @port_num to be @gid. Meta-info of that gid (for example, 1896 * the network device related to this gid is available 1897 * at @attr. @context allows the HW vendor driver to store extra 1898 * information together with a GID entry. The HW vendor may allocate 1899 * memory to contain this information and store it in @context when a 1900 * new GID entry is written to. Params are consistent until the next 1901 * call of add_gid or delete_gid. The function should return 0 on 1902 * success or error otherwise. The function could be called 1903 * concurrently for different ports. This function is only called 1904 * when roce_gid_table is used. 1905 */ 1906 int (*add_gid)(struct ib_device *device, 1907 u8 port_num, 1908 unsigned int index, 1909 const union ib_gid *gid, 1910 const struct ib_gid_attr *attr, 1911 void **context); 1912 /* When calling del_gid, the HW vendor's driver should delete the 1913 * gid of device @device at gid index @index of port @port_num. 1914 * Upon the deletion of a GID entry, the HW vendor must free any 1915 * allocated memory. The caller will clear @context afterwards. 1916 * This function is only called when roce_gid_table is used. 1917 */ 1918 int (*del_gid)(struct ib_device *device, 1919 u8 port_num, 1920 unsigned int index, 1921 void **context); 1922 int (*query_pkey)(struct ib_device *device, 1923 u8 port_num, u16 index, u16 *pkey); 1924 int (*modify_device)(struct ib_device *device, 1925 int device_modify_mask, 1926 struct ib_device_modify *device_modify); 1927 int (*modify_port)(struct ib_device *device, 1928 u8 port_num, int port_modify_mask, 1929 struct ib_port_modify *port_modify); 1930 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, 1931 struct ib_udata *udata); 1932 int (*dealloc_ucontext)(struct ib_ucontext *context); 1933 int (*mmap)(struct ib_ucontext *context, 1934 struct vm_area_struct *vma); 1935 struct ib_pd * (*alloc_pd)(struct ib_device *device, 1936 struct ib_ucontext *context, 1937 struct ib_udata *udata); 1938 int (*dealloc_pd)(struct ib_pd *pd); 1939 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1940 struct ib_ah_attr *ah_attr, 1941 struct ib_udata *udata); 1942 int (*modify_ah)(struct ib_ah *ah, 1943 struct ib_ah_attr *ah_attr); 1944 int (*query_ah)(struct ib_ah *ah, 1945 struct ib_ah_attr *ah_attr); 1946 int (*destroy_ah)(struct ib_ah *ah); 1947 struct ib_srq * (*create_srq)(struct ib_pd *pd, 1948 struct ib_srq_init_attr *srq_init_attr, 1949 struct ib_udata *udata); 1950 int (*modify_srq)(struct ib_srq *srq, 1951 struct ib_srq_attr *srq_attr, 1952 enum ib_srq_attr_mask srq_attr_mask, 1953 struct ib_udata *udata); 1954 int (*query_srq)(struct ib_srq *srq, 1955 struct ib_srq_attr *srq_attr); 1956 int (*destroy_srq)(struct ib_srq *srq); 1957 int (*post_srq_recv)(struct ib_srq *srq, 1958 struct ib_recv_wr *recv_wr, 1959 struct ib_recv_wr **bad_recv_wr); 1960 struct ib_qp * (*create_qp)(struct ib_pd *pd, 1961 struct ib_qp_init_attr *qp_init_attr, 1962 struct ib_udata *udata); 1963 int (*modify_qp)(struct ib_qp *qp, 1964 struct ib_qp_attr *qp_attr, 1965 int qp_attr_mask, 1966 struct ib_udata *udata); 1967 int (*query_qp)(struct ib_qp *qp, 1968 struct ib_qp_attr *qp_attr, 1969 int qp_attr_mask, 1970 struct ib_qp_init_attr *qp_init_attr); 1971 int (*destroy_qp)(struct ib_qp *qp); 1972 int (*post_send)(struct ib_qp *qp, 1973 struct ib_send_wr *send_wr, 1974 struct ib_send_wr **bad_send_wr); 1975 int (*post_recv)(struct ib_qp *qp, 1976 struct ib_recv_wr *recv_wr, 1977 struct ib_recv_wr **bad_recv_wr); 1978 struct ib_cq * (*create_cq)(struct ib_device *device, 1979 const struct ib_cq_init_attr *attr, 1980 struct ib_ucontext *context, 1981 struct ib_udata *udata); 1982 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1983 u16 cq_period); 1984 int (*destroy_cq)(struct ib_cq *cq); 1985 int (*resize_cq)(struct ib_cq *cq, int cqe, 1986 struct ib_udata *udata); 1987 int (*poll_cq)(struct ib_cq *cq, int num_entries, 1988 struct ib_wc *wc); 1989 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 1990 int (*req_notify_cq)(struct ib_cq *cq, 1991 enum ib_cq_notify_flags flags); 1992 int (*req_ncomp_notif)(struct ib_cq *cq, 1993 int wc_cnt); 1994 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 1995 int mr_access_flags); 1996 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd, 1997 u64 start, u64 length, 1998 u64 virt_addr, 1999 int mr_access_flags, 2000 struct ib_udata *udata); 2001 int (*rereg_user_mr)(struct ib_mr *mr, 2002 int flags, 2003 u64 start, u64 length, 2004 u64 virt_addr, 2005 int mr_access_flags, 2006 struct ib_pd *pd, 2007 struct ib_udata *udata); 2008 int (*dereg_mr)(struct ib_mr *mr); 2009 struct ib_mr * (*alloc_mr)(struct ib_pd *pd, 2010 enum ib_mr_type mr_type, 2011 u32 max_num_sg); 2012 int (*map_mr_sg)(struct ib_mr *mr, 2013 struct scatterlist *sg, 2014 int sg_nents, 2015 unsigned int *sg_offset); 2016 struct ib_mw * (*alloc_mw)(struct ib_pd *pd, 2017 enum ib_mw_type type, 2018 struct ib_udata *udata); 2019 int (*dealloc_mw)(struct ib_mw *mw); 2020 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, 2021 int mr_access_flags, 2022 struct ib_fmr_attr *fmr_attr); 2023 int (*map_phys_fmr)(struct ib_fmr *fmr, 2024 u64 *page_list, int list_len, 2025 u64 iova); 2026 int (*unmap_fmr)(struct list_head *fmr_list); 2027 int (*dealloc_fmr)(struct ib_fmr *fmr); 2028 int (*attach_mcast)(struct ib_qp *qp, 2029 union ib_gid *gid, 2030 u16 lid); 2031 int (*detach_mcast)(struct ib_qp *qp, 2032 union ib_gid *gid, 2033 u16 lid); 2034 int (*process_mad)(struct ib_device *device, 2035 int process_mad_flags, 2036 u8 port_num, 2037 const struct ib_wc *in_wc, 2038 const struct ib_grh *in_grh, 2039 const struct ib_mad_hdr *in_mad, 2040 size_t in_mad_size, 2041 struct ib_mad_hdr *out_mad, 2042 size_t *out_mad_size, 2043 u16 *out_mad_pkey_index); 2044 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device, 2045 struct ib_ucontext *ucontext, 2046 struct ib_udata *udata); 2047 int (*dealloc_xrcd)(struct ib_xrcd *xrcd); 2048 struct ib_flow * (*create_flow)(struct ib_qp *qp, 2049 struct ib_flow_attr 2050 *flow_attr, 2051 int domain); 2052 int (*destroy_flow)(struct ib_flow *flow_id); 2053 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2054 struct ib_mr_status *mr_status); 2055 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2056 void (*drain_rq)(struct ib_qp *qp); 2057 void (*drain_sq)(struct ib_qp *qp); 2058 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2059 int state); 2060 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2061 struct ifla_vf_info *ivf); 2062 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2063 struct ifla_vf_stats *stats); 2064 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2065 int type); 2066 struct ib_wq * (*create_wq)(struct ib_pd *pd, 2067 struct ib_wq_init_attr *init_attr, 2068 struct ib_udata *udata); 2069 int (*destroy_wq)(struct ib_wq *wq); 2070 int (*modify_wq)(struct ib_wq *wq, 2071 struct ib_wq_attr *attr, 2072 u32 wq_attr_mask, 2073 struct ib_udata *udata); 2074 struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, 2075 struct ib_rwq_ind_table_init_attr *init_attr, 2076 struct ib_udata *udata); 2077 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2078 struct ib_dma_mapping_ops *dma_ops; 2079 2080 struct module *owner; 2081 struct device dev; 2082 struct kobject *ports_parent; 2083 struct list_head port_list; 2084 2085 enum { 2086 IB_DEV_UNINITIALIZED, 2087 IB_DEV_REGISTERED, 2088 IB_DEV_UNREGISTERED 2089 } reg_state; 2090 2091 int uverbs_abi_ver; 2092 u64 uverbs_cmd_mask; 2093 u64 uverbs_ex_cmd_mask; 2094 2095 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2096 __be64 node_guid; 2097 u32 local_dma_lkey; 2098 u16 is_switch:1; 2099 u8 node_type; 2100 u8 phys_port_cnt; 2101 struct ib_device_attr attrs; 2102 struct attribute_group *hw_stats_ag; 2103 struct rdma_hw_stats *hw_stats; 2104 2105 /** 2106 * The following mandatory functions are used only at device 2107 * registration. Keep functions such as these at the end of this 2108 * structure to avoid cache line misses when accessing struct ib_device 2109 * in fast paths. 2110 */ 2111 int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); 2112 void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); 2113 }; 2114 2115 struct ib_client { 2116 char *name; 2117 void (*add) (struct ib_device *); 2118 void (*remove)(struct ib_device *, void *client_data); 2119 2120 /* Returns the net_dev belonging to this ib_client and matching the 2121 * given parameters. 2122 * @dev: An RDMA device that the net_dev use for communication. 2123 * @port: A physical port number on the RDMA device. 2124 * @pkey: P_Key that the net_dev uses if applicable. 2125 * @gid: A GID that the net_dev uses to communicate. 2126 * @addr: An IP address the net_dev is configured with. 2127 * @client_data: The device's client data set by ib_set_client_data(). 2128 * 2129 * An ib_client that implements a net_dev on top of RDMA devices 2130 * (such as IP over IB) should implement this callback, allowing the 2131 * rdma_cm module to find the right net_dev for a given request. 2132 * 2133 * The caller is responsible for calling dev_put on the returned 2134 * netdev. */ 2135 struct net_device *(*get_net_dev_by_params)( 2136 struct ib_device *dev, 2137 u8 port, 2138 u16 pkey, 2139 const union ib_gid *gid, 2140 const struct sockaddr *addr, 2141 void *client_data); 2142 struct list_head list; 2143 }; 2144 2145 struct ib_device *ib_alloc_device(size_t size); 2146 void ib_dealloc_device(struct ib_device *device); 2147 2148 void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); 2149 2150 int ib_register_device(struct ib_device *device, 2151 int (*port_callback)(struct ib_device *, 2152 u8, struct kobject *)); 2153 void ib_unregister_device(struct ib_device *device); 2154 2155 int ib_register_client (struct ib_client *client); 2156 void ib_unregister_client(struct ib_client *client); 2157 2158 void *ib_get_client_data(struct ib_device *device, struct ib_client *client); 2159 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2160 void *data); 2161 2162 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2163 { 2164 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2165 } 2166 2167 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2168 { 2169 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2170 } 2171 2172 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2173 size_t offset, 2174 size_t len) 2175 { 2176 const void __user *p = (const char __user *)udata->inbuf + offset; 2177 bool ret; 2178 u8 *buf; 2179 2180 if (len > USHRT_MAX) 2181 return false; 2182 2183 buf = memdup_user(p, len); 2184 if (IS_ERR(buf)) 2185 return false; 2186 2187 ret = !memchr_inv(buf, 0, len); 2188 kfree(buf); 2189 return ret; 2190 } 2191 2192 /** 2193 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2194 * contains all required attributes and no attributes not allowed for 2195 * the given QP state transition. 2196 * @cur_state: Current QP state 2197 * @next_state: Next QP state 2198 * @type: QP type 2199 * @mask: Mask of supplied QP attributes 2200 * @ll : link layer of port 2201 * 2202 * This function is a helper function that a low-level driver's 2203 * modify_qp method can use to validate the consumer's input. It 2204 * checks that cur_state and next_state are valid QP states, that a 2205 * transition from cur_state to next_state is allowed by the IB spec, 2206 * and that the attribute mask supplied is allowed for the transition. 2207 */ 2208 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2209 enum ib_qp_type type, enum ib_qp_attr_mask mask, 2210 enum rdma_link_layer ll); 2211 2212 int ib_register_event_handler (struct ib_event_handler *event_handler); 2213 int ib_unregister_event_handler(struct ib_event_handler *event_handler); 2214 void ib_dispatch_event(struct ib_event *event); 2215 2216 int ib_query_port(struct ib_device *device, 2217 u8 port_num, struct ib_port_attr *port_attr); 2218 2219 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2220 u8 port_num); 2221 2222 /** 2223 * rdma_cap_ib_switch - Check if the device is IB switch 2224 * @device: Device to check 2225 * 2226 * Device driver is responsible for setting is_switch bit on 2227 * in ib_device structure at init time. 2228 * 2229 * Return: true if the device is IB switch. 2230 */ 2231 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2232 { 2233 return device->is_switch; 2234 } 2235 2236 /** 2237 * rdma_start_port - Return the first valid port number for the device 2238 * specified 2239 * 2240 * @device: Device to be checked 2241 * 2242 * Return start port number 2243 */ 2244 static inline u8 rdma_start_port(const struct ib_device *device) 2245 { 2246 return rdma_cap_ib_switch(device) ? 0 : 1; 2247 } 2248 2249 /** 2250 * rdma_end_port - Return the last valid port number for the device 2251 * specified 2252 * 2253 * @device: Device to be checked 2254 * 2255 * Return last port number 2256 */ 2257 static inline u8 rdma_end_port(const struct ib_device *device) 2258 { 2259 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2260 } 2261 2262 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2263 { 2264 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; 2265 } 2266 2267 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2268 { 2269 return device->port_immutable[port_num].core_cap_flags & 2270 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2271 } 2272 2273 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2274 { 2275 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2276 } 2277 2278 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2279 { 2280 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; 2281 } 2282 2283 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 2284 { 2285 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; 2286 } 2287 2288 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 2289 { 2290 return rdma_protocol_ib(device, port_num) || 2291 rdma_protocol_roce(device, port_num); 2292 } 2293 2294 /** 2295 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 2296 * Management Datagrams. 2297 * @device: Device to check 2298 * @port_num: Port number to check 2299 * 2300 * Management Datagrams (MAD) are a required part of the InfiniBand 2301 * specification and are supported on all InfiniBand devices. A slightly 2302 * extended version are also supported on OPA interfaces. 2303 * 2304 * Return: true if the port supports sending/receiving of MAD packets. 2305 */ 2306 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 2307 { 2308 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; 2309 } 2310 2311 /** 2312 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 2313 * Management Datagrams. 2314 * @device: Device to check 2315 * @port_num: Port number to check 2316 * 2317 * Intel OmniPath devices extend and/or replace the InfiniBand Management 2318 * datagrams with their own versions. These OPA MADs share many but not all of 2319 * the characteristics of InfiniBand MADs. 2320 * 2321 * OPA MADs differ in the following ways: 2322 * 2323 * 1) MADs are variable size up to 2K 2324 * IBTA defined MADs remain fixed at 256 bytes 2325 * 2) OPA SMPs must carry valid PKeys 2326 * 3) OPA SMP packets are a different format 2327 * 2328 * Return: true if the port supports OPA MAD packet formats. 2329 */ 2330 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 2331 { 2332 return (device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_OPA_MAD) 2333 == RDMA_CORE_CAP_OPA_MAD; 2334 } 2335 2336 /** 2337 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 2338 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 2339 * @device: Device to check 2340 * @port_num: Port number to check 2341 * 2342 * Each InfiniBand node is required to provide a Subnet Management Agent 2343 * that the subnet manager can access. Prior to the fabric being fully 2344 * configured by the subnet manager, the SMA is accessed via a well known 2345 * interface called the Subnet Management Interface (SMI). This interface 2346 * uses directed route packets to communicate with the SM to get around the 2347 * chicken and egg problem of the SM needing to know what's on the fabric 2348 * in order to configure the fabric, and needing to configure the fabric in 2349 * order to send packets to the devices on the fabric. These directed 2350 * route packets do not need the fabric fully configured in order to reach 2351 * their destination. The SMI is the only method allowed to send 2352 * directed route packets on an InfiniBand fabric. 2353 * 2354 * Return: true if the port provides an SMI. 2355 */ 2356 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 2357 { 2358 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; 2359 } 2360 2361 /** 2362 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 2363 * Communication Manager. 2364 * @device: Device to check 2365 * @port_num: Port number to check 2366 * 2367 * The InfiniBand Communication Manager is one of many pre-defined General 2368 * Service Agents (GSA) that are accessed via the General Service 2369 * Interface (GSI). It's role is to facilitate establishment of connections 2370 * between nodes as well as other management related tasks for established 2371 * connections. 2372 * 2373 * Return: true if the port supports an IB CM (this does not guarantee that 2374 * a CM is actually running however). 2375 */ 2376 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 2377 { 2378 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; 2379 } 2380 2381 /** 2382 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 2383 * Communication Manager. 2384 * @device: Device to check 2385 * @port_num: Port number to check 2386 * 2387 * Similar to above, but specific to iWARP connections which have a different 2388 * managment protocol than InfiniBand. 2389 * 2390 * Return: true if the port supports an iWARP CM (this does not guarantee that 2391 * a CM is actually running however). 2392 */ 2393 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 2394 { 2395 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; 2396 } 2397 2398 /** 2399 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 2400 * Subnet Administration. 2401 * @device: Device to check 2402 * @port_num: Port number to check 2403 * 2404 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 2405 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 2406 * fabrics, devices should resolve routes to other hosts by contacting the 2407 * SA to query the proper route. 2408 * 2409 * Return: true if the port should act as a client to the fabric Subnet 2410 * Administration interface. This does not imply that the SA service is 2411 * running locally. 2412 */ 2413 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 2414 { 2415 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; 2416 } 2417 2418 /** 2419 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 2420 * Multicast. 2421 * @device: Device to check 2422 * @port_num: Port number to check 2423 * 2424 * InfiniBand multicast registration is more complex than normal IPv4 or 2425 * IPv6 multicast registration. Each Host Channel Adapter must register 2426 * with the Subnet Manager when it wishes to join a multicast group. It 2427 * should do so only once regardless of how many queue pairs it subscribes 2428 * to this group. And it should leave the group only after all queue pairs 2429 * attached to the group have been detached. 2430 * 2431 * Return: true if the port must undertake the additional adminstrative 2432 * overhead of registering/unregistering with the SM and tracking of the 2433 * total number of queue pairs attached to the multicast group. 2434 */ 2435 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 2436 { 2437 return rdma_cap_ib_sa(device, port_num); 2438 } 2439 2440 /** 2441 * rdma_cap_af_ib - Check if the port of device has the capability 2442 * Native Infiniband Address. 2443 * @device: Device to check 2444 * @port_num: Port number to check 2445 * 2446 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 2447 * GID. RoCE uses a different mechanism, but still generates a GID via 2448 * a prescribed mechanism and port specific data. 2449 * 2450 * Return: true if the port uses a GID address to identify devices on the 2451 * network. 2452 */ 2453 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 2454 { 2455 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; 2456 } 2457 2458 /** 2459 * rdma_cap_eth_ah - Check if the port of device has the capability 2460 * Ethernet Address Handle. 2461 * @device: Device to check 2462 * @port_num: Port number to check 2463 * 2464 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 2465 * to fabricate GIDs over Ethernet/IP specific addresses native to the 2466 * port. Normally, packet headers are generated by the sending host 2467 * adapter, but when sending connectionless datagrams, we must manually 2468 * inject the proper headers for the fabric we are communicating over. 2469 * 2470 * Return: true if we are running as a RoCE port and must force the 2471 * addition of a Global Route Header built from our Ethernet Address 2472 * Handle into our header list for connectionless packets. 2473 */ 2474 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 2475 { 2476 return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; 2477 } 2478 2479 /** 2480 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 2481 * 2482 * @device: Device 2483 * @port_num: Port number 2484 * 2485 * This MAD size includes the MAD headers and MAD payload. No other headers 2486 * are included. 2487 * 2488 * Return the max MAD size required by the Port. Will return 0 if the port 2489 * does not support MADs 2490 */ 2491 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 2492 { 2493 return device->port_immutable[port_num].max_mad_size; 2494 } 2495 2496 /** 2497 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 2498 * @device: Device to check 2499 * @port_num: Port number to check 2500 * 2501 * RoCE GID table mechanism manages the various GIDs for a device. 2502 * 2503 * NOTE: if allocating the port's GID table has failed, this call will still 2504 * return true, but any RoCE GID table API will fail. 2505 * 2506 * Return: true if the port uses RoCE GID table mechanism in order to manage 2507 * its GIDs. 2508 */ 2509 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 2510 u8 port_num) 2511 { 2512 return rdma_protocol_roce(device, port_num) && 2513 device->add_gid && device->del_gid; 2514 } 2515 2516 /* 2517 * Check if the device supports READ W/ INVALIDATE. 2518 */ 2519 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 2520 { 2521 /* 2522 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 2523 * has support for it yet. 2524 */ 2525 return rdma_protocol_iwarp(dev, port_num); 2526 } 2527 2528 int ib_query_gid(struct ib_device *device, 2529 u8 port_num, int index, union ib_gid *gid, 2530 struct ib_gid_attr *attr); 2531 2532 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 2533 int state); 2534 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 2535 struct ifla_vf_info *info); 2536 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 2537 struct ifla_vf_stats *stats); 2538 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 2539 int type); 2540 2541 int ib_query_pkey(struct ib_device *device, 2542 u8 port_num, u16 index, u16 *pkey); 2543 2544 int ib_modify_device(struct ib_device *device, 2545 int device_modify_mask, 2546 struct ib_device_modify *device_modify); 2547 2548 int ib_modify_port(struct ib_device *device, 2549 u8 port_num, int port_modify_mask, 2550 struct ib_port_modify *port_modify); 2551 2552 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 2553 enum ib_gid_type gid_type, struct net_device *ndev, 2554 u8 *port_num, u16 *index); 2555 2556 int ib_find_pkey(struct ib_device *device, 2557 u8 port_num, u16 pkey, u16 *index); 2558 2559 enum ib_pd_flags { 2560 /* 2561 * Create a memory registration for all memory in the system and place 2562 * the rkey for it into pd->unsafe_global_rkey. This can be used by 2563 * ULPs to avoid the overhead of dynamic MRs. 2564 * 2565 * This flag is generally considered unsafe and must only be used in 2566 * extremly trusted environments. Every use of it will log a warning 2567 * in the kernel log. 2568 */ 2569 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 2570 }; 2571 2572 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 2573 const char *caller); 2574 #define ib_alloc_pd(device, flags) \ 2575 __ib_alloc_pd((device), (flags), __func__) 2576 void ib_dealloc_pd(struct ib_pd *pd); 2577 2578 /** 2579 * ib_create_ah - Creates an address handle for the given address vector. 2580 * @pd: The protection domain associated with the address handle. 2581 * @ah_attr: The attributes of the address vector. 2582 * 2583 * The address handle is used to reference a local or global destination 2584 * in all UD QP post sends. 2585 */ 2586 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 2587 2588 /** 2589 * ib_init_ah_from_wc - Initializes address handle attributes from a 2590 * work completion. 2591 * @device: Device on which the received message arrived. 2592 * @port_num: Port on which the received message arrived. 2593 * @wc: Work completion associated with the received message. 2594 * @grh: References the received global route header. This parameter is 2595 * ignored unless the work completion indicates that the GRH is valid. 2596 * @ah_attr: Returned attributes that can be used when creating an address 2597 * handle for replying to the message. 2598 */ 2599 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 2600 const struct ib_wc *wc, const struct ib_grh *grh, 2601 struct ib_ah_attr *ah_attr); 2602 2603 /** 2604 * ib_create_ah_from_wc - Creates an address handle associated with the 2605 * sender of the specified work completion. 2606 * @pd: The protection domain associated with the address handle. 2607 * @wc: Work completion information associated with a received message. 2608 * @grh: References the received global route header. This parameter is 2609 * ignored unless the work completion indicates that the GRH is valid. 2610 * @port_num: The outbound port number to associate with the address. 2611 * 2612 * The address handle is used to reference a local or global destination 2613 * in all UD QP post sends. 2614 */ 2615 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 2616 const struct ib_grh *grh, u8 port_num); 2617 2618 /** 2619 * ib_modify_ah - Modifies the address vector associated with an address 2620 * handle. 2621 * @ah: The address handle to modify. 2622 * @ah_attr: The new address vector attributes to associate with the 2623 * address handle. 2624 */ 2625 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2626 2627 /** 2628 * ib_query_ah - Queries the address vector associated with an address 2629 * handle. 2630 * @ah: The address handle to query. 2631 * @ah_attr: The address vector attributes associated with the address 2632 * handle. 2633 */ 2634 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); 2635 2636 /** 2637 * ib_destroy_ah - Destroys an address handle. 2638 * @ah: The address handle to destroy. 2639 */ 2640 int ib_destroy_ah(struct ib_ah *ah); 2641 2642 /** 2643 * ib_create_srq - Creates a SRQ associated with the specified protection 2644 * domain. 2645 * @pd: The protection domain associated with the SRQ. 2646 * @srq_init_attr: A list of initial attributes required to create the 2647 * SRQ. If SRQ creation succeeds, then the attributes are updated to 2648 * the actual capabilities of the created SRQ. 2649 * 2650 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 2651 * requested size of the SRQ, and set to the actual values allocated 2652 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 2653 * will always be at least as large as the requested values. 2654 */ 2655 struct ib_srq *ib_create_srq(struct ib_pd *pd, 2656 struct ib_srq_init_attr *srq_init_attr); 2657 2658 /** 2659 * ib_modify_srq - Modifies the attributes for the specified SRQ. 2660 * @srq: The SRQ to modify. 2661 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 2662 * the current values of selected SRQ attributes are returned. 2663 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 2664 * are being modified. 2665 * 2666 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 2667 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 2668 * the number of receives queued drops below the limit. 2669 */ 2670 int ib_modify_srq(struct ib_srq *srq, 2671 struct ib_srq_attr *srq_attr, 2672 enum ib_srq_attr_mask srq_attr_mask); 2673 2674 /** 2675 * ib_query_srq - Returns the attribute list and current values for the 2676 * specified SRQ. 2677 * @srq: The SRQ to query. 2678 * @srq_attr: The attributes of the specified SRQ. 2679 */ 2680 int ib_query_srq(struct ib_srq *srq, 2681 struct ib_srq_attr *srq_attr); 2682 2683 /** 2684 * ib_destroy_srq - Destroys the specified SRQ. 2685 * @srq: The SRQ to destroy. 2686 */ 2687 int ib_destroy_srq(struct ib_srq *srq); 2688 2689 /** 2690 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 2691 * @srq: The SRQ to post the work request on. 2692 * @recv_wr: A list of work requests to post on the receive queue. 2693 * @bad_recv_wr: On an immediate failure, this parameter will reference 2694 * the work request that failed to be posted on the QP. 2695 */ 2696 static inline int ib_post_srq_recv(struct ib_srq *srq, 2697 struct ib_recv_wr *recv_wr, 2698 struct ib_recv_wr **bad_recv_wr) 2699 { 2700 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); 2701 } 2702 2703 /** 2704 * ib_create_qp - Creates a QP associated with the specified protection 2705 * domain. 2706 * @pd: The protection domain associated with the QP. 2707 * @qp_init_attr: A list of initial attributes required to create the 2708 * QP. If QP creation succeeds, then the attributes are updated to 2709 * the actual capabilities of the created QP. 2710 */ 2711 struct ib_qp *ib_create_qp(struct ib_pd *pd, 2712 struct ib_qp_init_attr *qp_init_attr); 2713 2714 /** 2715 * ib_modify_qp - Modifies the attributes for the specified QP and then 2716 * transitions the QP to the given state. 2717 * @qp: The QP to modify. 2718 * @qp_attr: On input, specifies the QP attributes to modify. On output, 2719 * the current values of selected QP attributes are returned. 2720 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 2721 * are being modified. 2722 */ 2723 int ib_modify_qp(struct ib_qp *qp, 2724 struct ib_qp_attr *qp_attr, 2725 int qp_attr_mask); 2726 2727 /** 2728 * ib_query_qp - Returns the attribute list and current values for the 2729 * specified QP. 2730 * @qp: The QP to query. 2731 * @qp_attr: The attributes of the specified QP. 2732 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 2733 * @qp_init_attr: Additional attributes of the selected QP. 2734 * 2735 * The qp_attr_mask may be used to limit the query to gathering only the 2736 * selected attributes. 2737 */ 2738 int ib_query_qp(struct ib_qp *qp, 2739 struct ib_qp_attr *qp_attr, 2740 int qp_attr_mask, 2741 struct ib_qp_init_attr *qp_init_attr); 2742 2743 /** 2744 * ib_destroy_qp - Destroys the specified QP. 2745 * @qp: The QP to destroy. 2746 */ 2747 int ib_destroy_qp(struct ib_qp *qp); 2748 2749 /** 2750 * ib_open_qp - Obtain a reference to an existing sharable QP. 2751 * @xrcd - XRC domain 2752 * @qp_open_attr: Attributes identifying the QP to open. 2753 * 2754 * Returns a reference to a sharable QP. 2755 */ 2756 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 2757 struct ib_qp_open_attr *qp_open_attr); 2758 2759 /** 2760 * ib_close_qp - Release an external reference to a QP. 2761 * @qp: The QP handle to release 2762 * 2763 * The opened QP handle is released by the caller. The underlying 2764 * shared QP is not destroyed until all internal references are released. 2765 */ 2766 int ib_close_qp(struct ib_qp *qp); 2767 2768 /** 2769 * ib_post_send - Posts a list of work requests to the send queue of 2770 * the specified QP. 2771 * @qp: The QP to post the work request on. 2772 * @send_wr: A list of work requests to post on the send queue. 2773 * @bad_send_wr: On an immediate failure, this parameter will reference 2774 * the work request that failed to be posted on the QP. 2775 * 2776 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 2777 * error is returned, the QP state shall not be affected, 2778 * ib_post_send() will return an immediate error after queueing any 2779 * earlier work requests in the list. 2780 */ 2781 static inline int ib_post_send(struct ib_qp *qp, 2782 struct ib_send_wr *send_wr, 2783 struct ib_send_wr **bad_send_wr) 2784 { 2785 return qp->device->post_send(qp, send_wr, bad_send_wr); 2786 } 2787 2788 /** 2789 * ib_post_recv - Posts a list of work requests to the receive queue of 2790 * the specified QP. 2791 * @qp: The QP to post the work request on. 2792 * @recv_wr: A list of work requests to post on the receive queue. 2793 * @bad_recv_wr: On an immediate failure, this parameter will reference 2794 * the work request that failed to be posted on the QP. 2795 */ 2796 static inline int ib_post_recv(struct ib_qp *qp, 2797 struct ib_recv_wr *recv_wr, 2798 struct ib_recv_wr **bad_recv_wr) 2799 { 2800 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); 2801 } 2802 2803 struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 2804 int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx); 2805 void ib_free_cq(struct ib_cq *cq); 2806 2807 /** 2808 * ib_create_cq - Creates a CQ on the specified device. 2809 * @device: The device on which to create the CQ. 2810 * @comp_handler: A user-specified callback that is invoked when a 2811 * completion event occurs on the CQ. 2812 * @event_handler: A user-specified callback that is invoked when an 2813 * asynchronous event not associated with a completion occurs on the CQ. 2814 * @cq_context: Context associated with the CQ returned to the user via 2815 * the associated completion and event handlers. 2816 * @cq_attr: The attributes the CQ should be created upon. 2817 * 2818 * Users can examine the cq structure to determine the actual CQ size. 2819 */ 2820 struct ib_cq *ib_create_cq(struct ib_device *device, 2821 ib_comp_handler comp_handler, 2822 void (*event_handler)(struct ib_event *, void *), 2823 void *cq_context, 2824 const struct ib_cq_init_attr *cq_attr); 2825 2826 /** 2827 * ib_resize_cq - Modifies the capacity of the CQ. 2828 * @cq: The CQ to resize. 2829 * @cqe: The minimum size of the CQ. 2830 * 2831 * Users can examine the cq structure to determine the actual CQ size. 2832 */ 2833 int ib_resize_cq(struct ib_cq *cq, int cqe); 2834 2835 /** 2836 * ib_modify_cq - Modifies moderation params of the CQ 2837 * @cq: The CQ to modify. 2838 * @cq_count: number of CQEs that will trigger an event 2839 * @cq_period: max period of time in usec before triggering an event 2840 * 2841 */ 2842 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2843 2844 /** 2845 * ib_destroy_cq - Destroys the specified CQ. 2846 * @cq: The CQ to destroy. 2847 */ 2848 int ib_destroy_cq(struct ib_cq *cq); 2849 2850 /** 2851 * ib_poll_cq - poll a CQ for completion(s) 2852 * @cq:the CQ being polled 2853 * @num_entries:maximum number of completions to return 2854 * @wc:array of at least @num_entries &struct ib_wc where completions 2855 * will be returned 2856 * 2857 * Poll a CQ for (possibly multiple) completions. If the return value 2858 * is < 0, an error occurred. If the return value is >= 0, it is the 2859 * number of completions returned. If the return value is 2860 * non-negative and < num_entries, then the CQ was emptied. 2861 */ 2862 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 2863 struct ib_wc *wc) 2864 { 2865 return cq->device->poll_cq(cq, num_entries, wc); 2866 } 2867 2868 /** 2869 * ib_peek_cq - Returns the number of unreaped completions currently 2870 * on the specified CQ. 2871 * @cq: The CQ to peek. 2872 * @wc_cnt: A minimum number of unreaped completions to check for. 2873 * 2874 * If the number of unreaped completions is greater than or equal to wc_cnt, 2875 * this function returns wc_cnt, otherwise, it returns the actual number of 2876 * unreaped completions. 2877 */ 2878 int ib_peek_cq(struct ib_cq *cq, int wc_cnt); 2879 2880 /** 2881 * ib_req_notify_cq - Request completion notification on a CQ. 2882 * @cq: The CQ to generate an event for. 2883 * @flags: 2884 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 2885 * to request an event on the next solicited event or next work 2886 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 2887 * may also be |ed in to request a hint about missed events, as 2888 * described below. 2889 * 2890 * Return Value: 2891 * < 0 means an error occurred while requesting notification 2892 * == 0 means notification was requested successfully, and if 2893 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 2894 * were missed and it is safe to wait for another event. In 2895 * this case is it guaranteed that any work completions added 2896 * to the CQ since the last CQ poll will trigger a completion 2897 * notification event. 2898 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 2899 * in. It means that the consumer must poll the CQ again to 2900 * make sure it is empty to avoid missing an event because of a 2901 * race between requesting notification and an entry being 2902 * added to the CQ. This return value means it is possible 2903 * (but not guaranteed) that a work completion has been added 2904 * to the CQ since the last poll without triggering a 2905 * completion notification event. 2906 */ 2907 static inline int ib_req_notify_cq(struct ib_cq *cq, 2908 enum ib_cq_notify_flags flags) 2909 { 2910 return cq->device->req_notify_cq(cq, flags); 2911 } 2912 2913 /** 2914 * ib_req_ncomp_notif - Request completion notification when there are 2915 * at least the specified number of unreaped completions on the CQ. 2916 * @cq: The CQ to generate an event for. 2917 * @wc_cnt: The number of unreaped completions that should be on the 2918 * CQ before an event is generated. 2919 */ 2920 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 2921 { 2922 return cq->device->req_ncomp_notif ? 2923 cq->device->req_ncomp_notif(cq, wc_cnt) : 2924 -ENOSYS; 2925 } 2926 2927 /** 2928 * ib_dma_mapping_error - check a DMA addr for error 2929 * @dev: The device for which the dma_addr was created 2930 * @dma_addr: The DMA address to check 2931 */ 2932 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2933 { 2934 if (dev->dma_ops) 2935 return dev->dma_ops->mapping_error(dev, dma_addr); 2936 return dma_mapping_error(dev->dma_device, dma_addr); 2937 } 2938 2939 /** 2940 * ib_dma_map_single - Map a kernel virtual address to DMA address 2941 * @dev: The device for which the dma_addr is to be created 2942 * @cpu_addr: The kernel virtual address 2943 * @size: The size of the region in bytes 2944 * @direction: The direction of the DMA 2945 */ 2946 static inline u64 ib_dma_map_single(struct ib_device *dev, 2947 void *cpu_addr, size_t size, 2948 enum dma_data_direction direction) 2949 { 2950 if (dev->dma_ops) 2951 return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 2952 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 2953 } 2954 2955 /** 2956 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 2957 * @dev: The device for which the DMA address was created 2958 * @addr: The DMA address 2959 * @size: The size of the region in bytes 2960 * @direction: The direction of the DMA 2961 */ 2962 static inline void ib_dma_unmap_single(struct ib_device *dev, 2963 u64 addr, size_t size, 2964 enum dma_data_direction direction) 2965 { 2966 if (dev->dma_ops) 2967 dev->dma_ops->unmap_single(dev, addr, size, direction); 2968 else 2969 dma_unmap_single(dev->dma_device, addr, size, direction); 2970 } 2971 2972 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 2973 void *cpu_addr, size_t size, 2974 enum dma_data_direction direction, 2975 struct dma_attrs *dma_attrs) 2976 { 2977 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 2978 direction, dma_attrs); 2979 } 2980 2981 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 2982 u64 addr, size_t size, 2983 enum dma_data_direction direction, 2984 struct dma_attrs *dma_attrs) 2985 { 2986 return dma_unmap_single_attrs(dev->dma_device, addr, size, 2987 direction, dma_attrs); 2988 } 2989 2990 /** 2991 * ib_dma_map_page - Map a physical page to DMA address 2992 * @dev: The device for which the dma_addr is to be created 2993 * @page: The page to be mapped 2994 * @offset: The offset within the page 2995 * @size: The size of the region in bytes 2996 * @direction: The direction of the DMA 2997 */ 2998 static inline u64 ib_dma_map_page(struct ib_device *dev, 2999 struct page *page, 3000 unsigned long offset, 3001 size_t size, 3002 enum dma_data_direction direction) 3003 { 3004 if (dev->dma_ops) 3005 return dev->dma_ops->map_page(dev, page, offset, size, direction); 3006 return dma_map_page(dev->dma_device, page, offset, size, direction); 3007 } 3008 3009 /** 3010 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3011 * @dev: The device for which the DMA address was created 3012 * @addr: The DMA address 3013 * @size: The size of the region in bytes 3014 * @direction: The direction of the DMA 3015 */ 3016 static inline void ib_dma_unmap_page(struct ib_device *dev, 3017 u64 addr, size_t size, 3018 enum dma_data_direction direction) 3019 { 3020 if (dev->dma_ops) 3021 dev->dma_ops->unmap_page(dev, addr, size, direction); 3022 else 3023 dma_unmap_page(dev->dma_device, addr, size, direction); 3024 } 3025 3026 /** 3027 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3028 * @dev: The device for which the DMA addresses are to be created 3029 * @sg: The array of scatter/gather entries 3030 * @nents: The number of scatter/gather entries 3031 * @direction: The direction of the DMA 3032 */ 3033 static inline int ib_dma_map_sg(struct ib_device *dev, 3034 struct scatterlist *sg, int nents, 3035 enum dma_data_direction direction) 3036 { 3037 if (dev->dma_ops) 3038 return dev->dma_ops->map_sg(dev, sg, nents, direction); 3039 return dma_map_sg(dev->dma_device, sg, nents, direction); 3040 } 3041 3042 /** 3043 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 3044 * @dev: The device for which the DMA addresses were created 3045 * @sg: The array of scatter/gather entries 3046 * @nents: The number of scatter/gather entries 3047 * @direction: The direction of the DMA 3048 */ 3049 static inline void ib_dma_unmap_sg(struct ib_device *dev, 3050 struct scatterlist *sg, int nents, 3051 enum dma_data_direction direction) 3052 { 3053 if (dev->dma_ops) 3054 dev->dma_ops->unmap_sg(dev, sg, nents, direction); 3055 else 3056 dma_unmap_sg(dev->dma_device, sg, nents, direction); 3057 } 3058 3059 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 3060 struct scatterlist *sg, int nents, 3061 enum dma_data_direction direction, 3062 struct dma_attrs *dma_attrs) 3063 { 3064 if (dev->dma_ops) 3065 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, 3066 dma_attrs); 3067 else 3068 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3069 dma_attrs); 3070 } 3071 3072 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3073 struct scatterlist *sg, int nents, 3074 enum dma_data_direction direction, 3075 struct dma_attrs *dma_attrs) 3076 { 3077 if (dev->dma_ops) 3078 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, 3079 dma_attrs); 3080 else 3081 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, 3082 dma_attrs); 3083 } 3084 /** 3085 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3086 * @dev: The device for which the DMA addresses were created 3087 * @sg: The scatter/gather entry 3088 * 3089 * Note: this function is obsolete. To do: change all occurrences of 3090 * ib_sg_dma_address() into sg_dma_address(). 3091 */ 3092 static inline u64 ib_sg_dma_address(struct ib_device *dev, 3093 struct scatterlist *sg) 3094 { 3095 return sg_dma_address(sg); 3096 } 3097 3098 /** 3099 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 3100 * @dev: The device for which the DMA addresses were created 3101 * @sg: The scatter/gather entry 3102 * 3103 * Note: this function is obsolete. To do: change all occurrences of 3104 * ib_sg_dma_len() into sg_dma_len(). 3105 */ 3106 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 3107 struct scatterlist *sg) 3108 { 3109 return sg_dma_len(sg); 3110 } 3111 3112 /** 3113 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 3114 * @dev: The device for which the DMA address was created 3115 * @addr: The DMA address 3116 * @size: The size of the region in bytes 3117 * @dir: The direction of the DMA 3118 */ 3119 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 3120 u64 addr, 3121 size_t size, 3122 enum dma_data_direction dir) 3123 { 3124 if (dev->dma_ops) 3125 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 3126 else 3127 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3128 } 3129 3130 /** 3131 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 3132 * @dev: The device for which the DMA address was created 3133 * @addr: The DMA address 3134 * @size: The size of the region in bytes 3135 * @dir: The direction of the DMA 3136 */ 3137 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 3138 u64 addr, 3139 size_t size, 3140 enum dma_data_direction dir) 3141 { 3142 if (dev->dma_ops) 3143 dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 3144 else 3145 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3146 } 3147 3148 /** 3149 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 3150 * @dev: The device for which the DMA address is requested 3151 * @size: The size of the region to allocate in bytes 3152 * @dma_handle: A pointer for returning the DMA address of the region 3153 * @flag: memory allocator flags 3154 */ 3155 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3156 size_t size, 3157 u64 *dma_handle, 3158 gfp_t flag) 3159 { 3160 if (dev->dma_ops) 3161 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 3162 else { 3163 dma_addr_t handle; 3164 void *ret; 3165 3166 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 3167 *dma_handle = handle; 3168 return ret; 3169 } 3170 } 3171 3172 /** 3173 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 3174 * @dev: The device for which the DMA addresses were allocated 3175 * @size: The size of the region 3176 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 3177 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 3178 */ 3179 static inline void ib_dma_free_coherent(struct ib_device *dev, 3180 size_t size, void *cpu_addr, 3181 u64 dma_handle) 3182 { 3183 if (dev->dma_ops) 3184 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 3185 else 3186 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3187 } 3188 3189 /** 3190 * ib_dereg_mr - Deregisters a memory region and removes it from the 3191 * HCA translation table. 3192 * @mr: The memory region to deregister. 3193 * 3194 * This function can fail, if the memory region has memory windows bound to it. 3195 */ 3196 int ib_dereg_mr(struct ib_mr *mr); 3197 3198 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 3199 enum ib_mr_type mr_type, 3200 u32 max_num_sg); 3201 3202 /** 3203 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 3204 * R_Key and L_Key. 3205 * @mr - struct ib_mr pointer to be updated. 3206 * @newkey - new key to be used. 3207 */ 3208 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 3209 { 3210 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 3211 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 3212 } 3213 3214 /** 3215 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 3216 * for calculating a new rkey for type 2 memory windows. 3217 * @rkey - the rkey to increment. 3218 */ 3219 static inline u32 ib_inc_rkey(u32 rkey) 3220 { 3221 const u32 mask = 0x000000ff; 3222 return ((rkey + 1) & mask) | (rkey & ~mask); 3223 } 3224 3225 /** 3226 * ib_alloc_fmr - Allocates a unmapped fast memory region. 3227 * @pd: The protection domain associated with the unmapped region. 3228 * @mr_access_flags: Specifies the memory access rights. 3229 * @fmr_attr: Attributes of the unmapped region. 3230 * 3231 * A fast memory region must be mapped before it can be used as part of 3232 * a work request. 3233 */ 3234 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 3235 int mr_access_flags, 3236 struct ib_fmr_attr *fmr_attr); 3237 3238 /** 3239 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 3240 * @fmr: The fast memory region to associate with the pages. 3241 * @page_list: An array of physical pages to map to the fast memory region. 3242 * @list_len: The number of pages in page_list. 3243 * @iova: The I/O virtual address to use with the mapped region. 3244 */ 3245 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 3246 u64 *page_list, int list_len, 3247 u64 iova) 3248 { 3249 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); 3250 } 3251 3252 /** 3253 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 3254 * @fmr_list: A linked list of fast memory regions to unmap. 3255 */ 3256 int ib_unmap_fmr(struct list_head *fmr_list); 3257 3258 /** 3259 * ib_dealloc_fmr - Deallocates a fast memory region. 3260 * @fmr: The fast memory region to deallocate. 3261 */ 3262 int ib_dealloc_fmr(struct ib_fmr *fmr); 3263 3264 /** 3265 * ib_attach_mcast - Attaches the specified QP to a multicast group. 3266 * @qp: QP to attach to the multicast group. The QP must be type 3267 * IB_QPT_UD. 3268 * @gid: Multicast group GID. 3269 * @lid: Multicast group LID in host byte order. 3270 * 3271 * In order to send and receive multicast packets, subnet 3272 * administration must have created the multicast group and configured 3273 * the fabric appropriately. The port associated with the specified 3274 * QP must also be a member of the multicast group. 3275 */ 3276 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3277 3278 /** 3279 * ib_detach_mcast - Detaches the specified QP from a multicast group. 3280 * @qp: QP to detach from the multicast group. 3281 * @gid: Multicast group GID. 3282 * @lid: Multicast group LID in host byte order. 3283 */ 3284 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 3285 3286 /** 3287 * ib_alloc_xrcd - Allocates an XRC domain. 3288 * @device: The device on which to allocate the XRC domain. 3289 */ 3290 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device); 3291 3292 /** 3293 * ib_dealloc_xrcd - Deallocates an XRC domain. 3294 * @xrcd: The XRC domain to deallocate. 3295 */ 3296 int ib_dealloc_xrcd(struct ib_xrcd *xrcd); 3297 3298 struct ib_flow *ib_create_flow(struct ib_qp *qp, 3299 struct ib_flow_attr *flow_attr, int domain); 3300 int ib_destroy_flow(struct ib_flow *flow_id); 3301 3302 static inline int ib_check_mr_access(int flags) 3303 { 3304 /* 3305 * Local write permission is required if remote write or 3306 * remote atomic permission is also requested. 3307 */ 3308 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 3309 !(flags & IB_ACCESS_LOCAL_WRITE)) 3310 return -EINVAL; 3311 3312 return 0; 3313 } 3314 3315 /** 3316 * ib_check_mr_status: lightweight check of MR status. 3317 * This routine may provide status checks on a selected 3318 * ib_mr. first use is for signature status check. 3319 * 3320 * @mr: A memory region. 3321 * @check_mask: Bitmask of which checks to perform from 3322 * ib_mr_status_check enumeration. 3323 * @mr_status: The container of relevant status checks. 3324 * failed checks will be indicated in the status bitmask 3325 * and the relevant info shall be in the error item. 3326 */ 3327 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 3328 struct ib_mr_status *mr_status); 3329 3330 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 3331 u16 pkey, const union ib_gid *gid, 3332 const struct sockaddr *addr); 3333 struct ib_wq *ib_create_wq(struct ib_pd *pd, 3334 struct ib_wq_init_attr *init_attr); 3335 int ib_destroy_wq(struct ib_wq *wq); 3336 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 3337 u32 wq_attr_mask); 3338 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 3339 struct ib_rwq_ind_table_init_attr* 3340 wq_ind_table_init_attr); 3341 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 3342 3343 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3344 unsigned int *sg_offset, unsigned int page_size); 3345 3346 static inline int 3347 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 3348 unsigned int *sg_offset, unsigned int page_size) 3349 { 3350 int n; 3351 3352 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 3353 mr->iova = 0; 3354 3355 return n; 3356 } 3357 3358 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 3359 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 3360 3361 void ib_drain_rq(struct ib_qp *qp); 3362 void ib_drain_sq(struct ib_qp *qp); 3363 void ib_drain_qp(struct ib_qp *qp); 3364 3365 int ib_resolve_eth_dmac(struct ib_device *device, 3366 struct ib_ah_attr *ah_attr); 3367 #endif /* IB_VERBS_H */ 3368