1 /* 2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 9 * 10 * This software is available to you under a choice of one of two 11 * licenses. You may choose to be licensed under the terms of the GNU 12 * General Public License (GPL) Version 2, available from the file 13 * COPYING in the main directory of this source tree, or the 14 * OpenIB.org BSD license below: 15 * 16 * Redistribution and use in source and binary forms, with or 17 * without modification, are permitted provided that the following 18 * conditions are met: 19 * 20 * - Redistributions of source code must retain the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer. 23 * 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials 27 * provided with the distribution. 28 * 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 36 * SOFTWARE. 37 */ 38 39 #if !defined(IB_VERBS_H) 40 #define IB_VERBS_H 41 42 #include <linux/types.h> 43 #include <linux/device.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/kref.h> 46 #include <linux/list.h> 47 #include <linux/rwsem.h> 48 #include <linux/workqueue.h> 49 #include <linux/irq_poll.h> 50 #include <uapi/linux/if_ether.h> 51 #include <net/ipv6.h> 52 #include <net/ip.h> 53 #include <linux/string.h> 54 #include <linux/slab.h> 55 #include <linux/netdevice.h> 56 #include <linux/refcount.h> 57 #include <linux/if_link.h> 58 #include <linux/atomic.h> 59 #include <linux/mmu_notifier.h> 60 #include <linux/uaccess.h> 61 #include <linux/cgroup_rdma.h> 62 #include <linux/irqflags.h> 63 #include <linux/preempt.h> 64 #include <linux/dim.h> 65 #include <uapi/rdma/ib_user_verbs.h> 66 #include <rdma/rdma_counter.h> 67 #include <rdma/restrack.h> 68 #include <rdma/signature.h> 69 #include <uapi/rdma/rdma_user_ioctl.h> 70 #include <uapi/rdma/ib_user_ioctl_verbs.h> 71 72 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN 73 74 struct ib_umem_odp; 75 76 extern struct workqueue_struct *ib_wq; 77 extern struct workqueue_struct *ib_comp_wq; 78 extern struct workqueue_struct *ib_comp_unbound_wq; 79 80 __printf(3, 4) __cold 81 void ibdev_printk(const char *level, const struct ib_device *ibdev, 82 const char *format, ...); 83 __printf(2, 3) __cold 84 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...); 85 __printf(2, 3) __cold 86 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...); 87 __printf(2, 3) __cold 88 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...); 89 __printf(2, 3) __cold 90 void ibdev_err(const struct ib_device *ibdev, const char *format, ...); 91 __printf(2, 3) __cold 92 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...); 93 __printf(2, 3) __cold 94 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...); 95 __printf(2, 3) __cold 96 void ibdev_info(const struct ib_device *ibdev, const char *format, ...); 97 98 #if defined(CONFIG_DYNAMIC_DEBUG) 99 #define ibdev_dbg(__dev, format, args...) \ 100 dynamic_ibdev_dbg(__dev, format, ##args) 101 #else 102 __printf(2, 3) __cold 103 static inline 104 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} 105 #endif 106 107 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \ 108 do { \ 109 static DEFINE_RATELIMIT_STATE(_rs, \ 110 DEFAULT_RATELIMIT_INTERVAL, \ 111 DEFAULT_RATELIMIT_BURST); \ 112 if (__ratelimit(&_rs)) \ 113 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \ 114 } while (0) 115 116 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \ 117 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__) 118 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \ 119 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__) 120 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \ 121 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__) 122 #define ibdev_err_ratelimited(ibdev, fmt, ...) \ 123 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__) 124 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \ 125 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__) 126 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \ 127 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__) 128 #define ibdev_info_ratelimited(ibdev, fmt, ...) \ 129 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) 130 131 #if defined(CONFIG_DYNAMIC_DEBUG) 132 /* descriptor check is first to prevent flooding with "callbacks suppressed" */ 133 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ 134 do { \ 135 static DEFINE_RATELIMIT_STATE(_rs, \ 136 DEFAULT_RATELIMIT_INTERVAL, \ 137 DEFAULT_RATELIMIT_BURST); \ 138 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 139 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \ 140 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \ 141 ##__VA_ARGS__); \ 142 } while (0) 143 #else 144 __printf(2, 3) __cold 145 static inline 146 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {} 147 #endif 148 149 union ib_gid { 150 u8 raw[16]; 151 struct { 152 __be64 subnet_prefix; 153 __be64 interface_id; 154 } global; 155 }; 156 157 extern union ib_gid zgid; 158 159 enum ib_gid_type { 160 /* If link layer is Ethernet, this is RoCE V1 */ 161 IB_GID_TYPE_IB = 0, 162 IB_GID_TYPE_ROCE = 0, 163 IB_GID_TYPE_ROCE_UDP_ENCAP = 1, 164 IB_GID_TYPE_SIZE 165 }; 166 167 #define ROCE_V2_UDP_DPORT 4791 168 struct ib_gid_attr { 169 struct net_device __rcu *ndev; 170 struct ib_device *device; 171 union ib_gid gid; 172 enum ib_gid_type gid_type; 173 u16 index; 174 u8 port_num; 175 }; 176 177 enum { 178 /* set the local administered indication */ 179 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 180 }; 181 182 enum rdma_transport_type { 183 RDMA_TRANSPORT_IB, 184 RDMA_TRANSPORT_IWARP, 185 RDMA_TRANSPORT_USNIC, 186 RDMA_TRANSPORT_USNIC_UDP, 187 RDMA_TRANSPORT_UNSPECIFIED, 188 }; 189 190 enum rdma_protocol_type { 191 RDMA_PROTOCOL_IB, 192 RDMA_PROTOCOL_IBOE, 193 RDMA_PROTOCOL_IWARP, 194 RDMA_PROTOCOL_USNIC_UDP 195 }; 196 197 __attribute_const__ enum rdma_transport_type 198 rdma_node_get_transport(unsigned int node_type); 199 200 enum rdma_network_type { 201 RDMA_NETWORK_IB, 202 RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, 203 RDMA_NETWORK_IPV4, 204 RDMA_NETWORK_IPV6 205 }; 206 207 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 208 { 209 if (network_type == RDMA_NETWORK_IPV4 || 210 network_type == RDMA_NETWORK_IPV6) 211 return IB_GID_TYPE_ROCE_UDP_ENCAP; 212 213 /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ 214 return IB_GID_TYPE_IB; 215 } 216 217 static inline enum rdma_network_type 218 rdma_gid_attr_network_type(const struct ib_gid_attr *attr) 219 { 220 if (attr->gid_type == IB_GID_TYPE_IB) 221 return RDMA_NETWORK_IB; 222 223 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) 224 return RDMA_NETWORK_IPV4; 225 else 226 return RDMA_NETWORK_IPV6; 227 } 228 229 enum rdma_link_layer { 230 IB_LINK_LAYER_UNSPECIFIED, 231 IB_LINK_LAYER_INFINIBAND, 232 IB_LINK_LAYER_ETHERNET, 233 }; 234 235 enum ib_device_cap_flags { 236 IB_DEVICE_RESIZE_MAX_WR = (1 << 0), 237 IB_DEVICE_BAD_PKEY_CNTR = (1 << 1), 238 IB_DEVICE_BAD_QKEY_CNTR = (1 << 2), 239 IB_DEVICE_RAW_MULTI = (1 << 3), 240 IB_DEVICE_AUTO_PATH_MIG = (1 << 4), 241 IB_DEVICE_CHANGE_PHY_PORT = (1 << 5), 242 IB_DEVICE_UD_AV_PORT_ENFORCE = (1 << 6), 243 IB_DEVICE_CURR_QP_STATE_MOD = (1 << 7), 244 IB_DEVICE_SHUTDOWN_PORT = (1 << 8), 245 /* Not in use, former INIT_TYPE = (1 << 9),*/ 246 IB_DEVICE_PORT_ACTIVE_EVENT = (1 << 10), 247 IB_DEVICE_SYS_IMAGE_GUID = (1 << 11), 248 IB_DEVICE_RC_RNR_NAK_GEN = (1 << 12), 249 IB_DEVICE_SRQ_RESIZE = (1 << 13), 250 IB_DEVICE_N_NOTIFY_CQ = (1 << 14), 251 252 /* 253 * This device supports a per-device lkey or stag that can be 254 * used without performing a memory registration for the local 255 * memory. Note that ULPs should never check this flag, but 256 * instead of use the local_dma_lkey flag in the ib_pd structure, 257 * which will always contain a usable lkey. 258 */ 259 IB_DEVICE_LOCAL_DMA_LKEY = (1 << 15), 260 /* Reserved, old SEND_W_INV = (1 << 16),*/ 261 IB_DEVICE_MEM_WINDOW = (1 << 17), 262 /* 263 * Devices should set IB_DEVICE_UD_IP_SUM if they support 264 * insertion of UDP and TCP checksum on outgoing UD IPoIB 265 * messages and can verify the validity of checksum for 266 * incoming messages. Setting this flag implies that the 267 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 268 */ 269 IB_DEVICE_UD_IP_CSUM = (1 << 18), 270 IB_DEVICE_UD_TSO = (1 << 19), 271 IB_DEVICE_XRC = (1 << 20), 272 273 /* 274 * This device supports the IB "base memory management extension", 275 * which includes support for fast registrations (IB_WR_REG_MR, 276 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 277 * also be set by any iWarp device which must support FRs to comply 278 * to the iWarp verbs spec. iWarp devices also support the 279 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 280 * stag. 281 */ 282 IB_DEVICE_MEM_MGT_EXTENSIONS = (1 << 21), 283 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1 << 22), 284 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1 << 23), 285 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1 << 24), 286 IB_DEVICE_RC_IP_CSUM = (1 << 25), 287 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 288 IB_DEVICE_RAW_IP_CSUM = (1 << 26), 289 /* 290 * Devices should set IB_DEVICE_CROSS_CHANNEL if they 291 * support execution of WQEs that involve synchronization 292 * of I/O operations with single completion queue managed 293 * by hardware. 294 */ 295 IB_DEVICE_CROSS_CHANNEL = (1 << 27), 296 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), 297 IB_DEVICE_INTEGRITY_HANDOVER = (1 << 30), 298 IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), 299 IB_DEVICE_SG_GAPS_REG = (1ULL << 32), 300 IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), 301 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 302 IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), 303 IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35), 304 /* The device supports padding incoming writes to cacheline. */ 305 IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36), 306 IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37), 307 }; 308 309 enum ib_atomic_cap { 310 IB_ATOMIC_NONE, 311 IB_ATOMIC_HCA, 312 IB_ATOMIC_GLOB 313 }; 314 315 enum ib_odp_general_cap_bits { 316 IB_ODP_SUPPORT = 1 << 0, 317 IB_ODP_SUPPORT_IMPLICIT = 1 << 1, 318 }; 319 320 enum ib_odp_transport_cap_bits { 321 IB_ODP_SUPPORT_SEND = 1 << 0, 322 IB_ODP_SUPPORT_RECV = 1 << 1, 323 IB_ODP_SUPPORT_WRITE = 1 << 2, 324 IB_ODP_SUPPORT_READ = 1 << 3, 325 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 326 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5, 327 }; 328 329 struct ib_odp_caps { 330 uint64_t general_caps; 331 struct { 332 uint32_t rc_odp_caps; 333 uint32_t uc_odp_caps; 334 uint32_t ud_odp_caps; 335 uint32_t xrc_odp_caps; 336 } per_transport_caps; 337 }; 338 339 struct ib_rss_caps { 340 /* Corresponding bit will be set if qp type from 341 * 'enum ib_qp_type' is supported, e.g. 342 * supported_qpts |= 1 << IB_QPT_UD 343 */ 344 u32 supported_qpts; 345 u32 max_rwq_indirection_tables; 346 u32 max_rwq_indirection_table_size; 347 }; 348 349 enum ib_tm_cap_flags { 350 /* Support tag matching with rendezvous offload for RC transport */ 351 IB_TM_CAP_RNDV_RC = 1 << 0, 352 }; 353 354 struct ib_tm_caps { 355 /* Max size of RNDV header */ 356 u32 max_rndv_hdr_size; 357 /* Max number of entries in tag matching list */ 358 u32 max_num_tags; 359 /* From enum ib_tm_cap_flags */ 360 u32 flags; 361 /* Max number of outstanding list operations */ 362 u32 max_ops; 363 /* Max number of SGE in tag matching entry */ 364 u32 max_sge; 365 }; 366 367 struct ib_cq_init_attr { 368 unsigned int cqe; 369 int comp_vector; 370 u32 flags; 371 }; 372 373 enum ib_cq_attr_mask { 374 IB_CQ_MODERATE = 1 << 0, 375 }; 376 377 struct ib_cq_caps { 378 u16 max_cq_moderation_count; 379 u16 max_cq_moderation_period; 380 }; 381 382 struct ib_dm_mr_attr { 383 u64 length; 384 u64 offset; 385 u32 access_flags; 386 }; 387 388 struct ib_dm_alloc_attr { 389 u64 length; 390 u32 alignment; 391 u32 flags; 392 }; 393 394 struct ib_device_attr { 395 u64 fw_ver; 396 __be64 sys_image_guid; 397 u64 max_mr_size; 398 u64 page_size_cap; 399 u32 vendor_id; 400 u32 vendor_part_id; 401 u32 hw_ver; 402 int max_qp; 403 int max_qp_wr; 404 u64 device_cap_flags; 405 int max_send_sge; 406 int max_recv_sge; 407 int max_sge_rd; 408 int max_cq; 409 int max_cqe; 410 int max_mr; 411 int max_pd; 412 int max_qp_rd_atom; 413 int max_ee_rd_atom; 414 int max_res_rd_atom; 415 int max_qp_init_rd_atom; 416 int max_ee_init_rd_atom; 417 enum ib_atomic_cap atomic_cap; 418 enum ib_atomic_cap masked_atomic_cap; 419 int max_ee; 420 int max_rdd; 421 int max_mw; 422 int max_raw_ipv6_qp; 423 int max_raw_ethy_qp; 424 int max_mcast_grp; 425 int max_mcast_qp_attach; 426 int max_total_mcast_qp_attach; 427 int max_ah; 428 int max_fmr; 429 int max_map_per_fmr; 430 int max_srq; 431 int max_srq_wr; 432 int max_srq_sge; 433 unsigned int max_fast_reg_page_list_len; 434 unsigned int max_pi_fast_reg_page_list_len; 435 u16 max_pkeys; 436 u8 local_ca_ack_delay; 437 int sig_prot_cap; 438 int sig_guard_cap; 439 struct ib_odp_caps odp_caps; 440 uint64_t timestamp_mask; 441 uint64_t hca_core_clock; /* in KHZ */ 442 struct ib_rss_caps rss_caps; 443 u32 max_wq_type_rq; 444 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 445 struct ib_tm_caps tm_caps; 446 struct ib_cq_caps cq_caps; 447 u64 max_dm_size; 448 }; 449 450 enum ib_mtu { 451 IB_MTU_256 = 1, 452 IB_MTU_512 = 2, 453 IB_MTU_1024 = 3, 454 IB_MTU_2048 = 4, 455 IB_MTU_4096 = 5 456 }; 457 458 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 459 { 460 switch (mtu) { 461 case IB_MTU_256: return 256; 462 case IB_MTU_512: return 512; 463 case IB_MTU_1024: return 1024; 464 case IB_MTU_2048: return 2048; 465 case IB_MTU_4096: return 4096; 466 default: return -1; 467 } 468 } 469 470 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 471 { 472 if (mtu >= 4096) 473 return IB_MTU_4096; 474 else if (mtu >= 2048) 475 return IB_MTU_2048; 476 else if (mtu >= 1024) 477 return IB_MTU_1024; 478 else if (mtu >= 512) 479 return IB_MTU_512; 480 else 481 return IB_MTU_256; 482 } 483 484 enum ib_port_state { 485 IB_PORT_NOP = 0, 486 IB_PORT_DOWN = 1, 487 IB_PORT_INIT = 2, 488 IB_PORT_ARMED = 3, 489 IB_PORT_ACTIVE = 4, 490 IB_PORT_ACTIVE_DEFER = 5 491 }; 492 493 enum ib_port_phys_state { 494 IB_PORT_PHYS_STATE_SLEEP = 1, 495 IB_PORT_PHYS_STATE_POLLING = 2, 496 IB_PORT_PHYS_STATE_DISABLED = 3, 497 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, 498 IB_PORT_PHYS_STATE_LINK_UP = 5, 499 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, 500 IB_PORT_PHYS_STATE_PHY_TEST = 7, 501 }; 502 503 enum ib_port_width { 504 IB_WIDTH_1X = 1, 505 IB_WIDTH_2X = 16, 506 IB_WIDTH_4X = 2, 507 IB_WIDTH_8X = 4, 508 IB_WIDTH_12X = 8 509 }; 510 511 static inline int ib_width_enum_to_int(enum ib_port_width width) 512 { 513 switch (width) { 514 case IB_WIDTH_1X: return 1; 515 case IB_WIDTH_2X: return 2; 516 case IB_WIDTH_4X: return 4; 517 case IB_WIDTH_8X: return 8; 518 case IB_WIDTH_12X: return 12; 519 default: return -1; 520 } 521 } 522 523 enum ib_port_speed { 524 IB_SPEED_SDR = 1, 525 IB_SPEED_DDR = 2, 526 IB_SPEED_QDR = 4, 527 IB_SPEED_FDR10 = 8, 528 IB_SPEED_FDR = 16, 529 IB_SPEED_EDR = 32, 530 IB_SPEED_HDR = 64 531 }; 532 533 /** 534 * struct rdma_hw_stats 535 * @lock - Mutex to protect parallel write access to lifespan and values 536 * of counters, which are 64bits and not guaranteeed to be written 537 * atomicaly on 32bits systems. 538 * @timestamp - Used by the core code to track when the last update was 539 * @lifespan - Used by the core code to determine how old the counters 540 * should be before being updated again. Stored in jiffies, defaults 541 * to 10 milliseconds, drivers can override the default be specifying 542 * their own value during their allocation routine. 543 * @name - Array of pointers to static names used for the counters in 544 * directory. 545 * @num_counters - How many hardware counters there are. If name is 546 * shorter than this number, a kernel oops will result. Driver authors 547 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 548 * in their code to prevent this. 549 * @value - Array of u64 counters that are accessed by the sysfs code and 550 * filled in by the drivers get_stats routine 551 */ 552 struct rdma_hw_stats { 553 struct mutex lock; /* Protect lifespan and values[] */ 554 unsigned long timestamp; 555 unsigned long lifespan; 556 const char * const *names; 557 int num_counters; 558 u64 value[]; 559 }; 560 561 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 562 /** 563 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct 564 * for drivers. 565 * @names - Array of static const char * 566 * @num_counters - How many elements in array 567 * @lifespan - How many milliseconds between updates 568 */ 569 static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 570 const char * const *names, int num_counters, 571 unsigned long lifespan) 572 { 573 struct rdma_hw_stats *stats; 574 575 stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64), 576 GFP_KERNEL); 577 if (!stats) 578 return NULL; 579 stats->names = names; 580 stats->num_counters = num_counters; 581 stats->lifespan = msecs_to_jiffies(lifespan); 582 583 return stats; 584 } 585 586 587 /* Define bits for the various functionality this port needs to be supported by 588 * the core. 589 */ 590 /* Management 0x00000FFF */ 591 #define RDMA_CORE_CAP_IB_MAD 0x00000001 592 #define RDMA_CORE_CAP_IB_SMI 0x00000002 593 #define RDMA_CORE_CAP_IB_CM 0x00000004 594 #define RDMA_CORE_CAP_IW_CM 0x00000008 595 #define RDMA_CORE_CAP_IB_SA 0x00000010 596 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 597 598 /* Address format 0x000FF000 */ 599 #define RDMA_CORE_CAP_AF_IB 0x00001000 600 #define RDMA_CORE_CAP_ETH_AH 0x00002000 601 #define RDMA_CORE_CAP_OPA_AH 0x00004000 602 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000 603 604 /* Protocol 0xFFF00000 */ 605 #define RDMA_CORE_CAP_PROT_IB 0x00100000 606 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 607 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 608 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 609 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 610 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 611 612 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \ 613 | RDMA_CORE_CAP_PROT_ROCE \ 614 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP) 615 616 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 617 | RDMA_CORE_CAP_IB_MAD \ 618 | RDMA_CORE_CAP_IB_SMI \ 619 | RDMA_CORE_CAP_IB_CM \ 620 | RDMA_CORE_CAP_IB_SA \ 621 | RDMA_CORE_CAP_AF_IB) 622 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 623 | RDMA_CORE_CAP_IB_MAD \ 624 | RDMA_CORE_CAP_IB_CM \ 625 | RDMA_CORE_CAP_AF_IB \ 626 | RDMA_CORE_CAP_ETH_AH) 627 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 628 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 629 | RDMA_CORE_CAP_IB_MAD \ 630 | RDMA_CORE_CAP_IB_CM \ 631 | RDMA_CORE_CAP_AF_IB \ 632 | RDMA_CORE_CAP_ETH_AH) 633 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 634 | RDMA_CORE_CAP_IW_CM) 635 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 636 | RDMA_CORE_CAP_OPA_MAD) 637 638 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) 639 640 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) 641 642 struct ib_port_attr { 643 u64 subnet_prefix; 644 enum ib_port_state state; 645 enum ib_mtu max_mtu; 646 enum ib_mtu active_mtu; 647 int gid_tbl_len; 648 unsigned int ip_gids:1; 649 /* This is the value from PortInfo CapabilityMask, defined by IBA */ 650 u32 port_cap_flags; 651 u32 max_msg_sz; 652 u32 bad_pkey_cntr; 653 u32 qkey_viol_cntr; 654 u16 pkey_tbl_len; 655 u32 sm_lid; 656 u32 lid; 657 u8 lmc; 658 u8 max_vl_num; 659 u8 sm_sl; 660 u8 subnet_timeout; 661 u8 init_type_reply; 662 u8 active_width; 663 u8 active_speed; 664 u8 phys_state; 665 u16 port_cap_flags2; 666 }; 667 668 enum ib_device_modify_flags { 669 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 670 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 671 }; 672 673 #define IB_DEVICE_NODE_DESC_MAX 64 674 675 struct ib_device_modify { 676 u64 sys_image_guid; 677 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 678 }; 679 680 enum ib_port_modify_flags { 681 IB_PORT_SHUTDOWN = 1, 682 IB_PORT_INIT_TYPE = (1<<2), 683 IB_PORT_RESET_QKEY_CNTR = (1<<3), 684 IB_PORT_OPA_MASK_CHG = (1<<4) 685 }; 686 687 struct ib_port_modify { 688 u32 set_port_cap_mask; 689 u32 clr_port_cap_mask; 690 u8 init_type; 691 }; 692 693 enum ib_event_type { 694 IB_EVENT_CQ_ERR, 695 IB_EVENT_QP_FATAL, 696 IB_EVENT_QP_REQ_ERR, 697 IB_EVENT_QP_ACCESS_ERR, 698 IB_EVENT_COMM_EST, 699 IB_EVENT_SQ_DRAINED, 700 IB_EVENT_PATH_MIG, 701 IB_EVENT_PATH_MIG_ERR, 702 IB_EVENT_DEVICE_FATAL, 703 IB_EVENT_PORT_ACTIVE, 704 IB_EVENT_PORT_ERR, 705 IB_EVENT_LID_CHANGE, 706 IB_EVENT_PKEY_CHANGE, 707 IB_EVENT_SM_CHANGE, 708 IB_EVENT_SRQ_ERR, 709 IB_EVENT_SRQ_LIMIT_REACHED, 710 IB_EVENT_QP_LAST_WQE_REACHED, 711 IB_EVENT_CLIENT_REREGISTER, 712 IB_EVENT_GID_CHANGE, 713 IB_EVENT_WQ_FATAL, 714 }; 715 716 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 717 718 struct ib_event { 719 struct ib_device *device; 720 union { 721 struct ib_cq *cq; 722 struct ib_qp *qp; 723 struct ib_srq *srq; 724 struct ib_wq *wq; 725 u8 port_num; 726 } element; 727 enum ib_event_type event; 728 }; 729 730 struct ib_event_handler { 731 struct ib_device *device; 732 void (*handler)(struct ib_event_handler *, struct ib_event *); 733 struct list_head list; 734 }; 735 736 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 737 do { \ 738 (_ptr)->device = _device; \ 739 (_ptr)->handler = _handler; \ 740 INIT_LIST_HEAD(&(_ptr)->list); \ 741 } while (0) 742 743 struct ib_global_route { 744 const struct ib_gid_attr *sgid_attr; 745 union ib_gid dgid; 746 u32 flow_label; 747 u8 sgid_index; 748 u8 hop_limit; 749 u8 traffic_class; 750 }; 751 752 struct ib_grh { 753 __be32 version_tclass_flow; 754 __be16 paylen; 755 u8 next_hdr; 756 u8 hop_limit; 757 union ib_gid sgid; 758 union ib_gid dgid; 759 }; 760 761 union rdma_network_hdr { 762 struct ib_grh ibgrh; 763 struct { 764 /* The IB spec states that if it's IPv4, the header 765 * is located in the last 20 bytes of the header. 766 */ 767 u8 reserved[20]; 768 struct iphdr roce4grh; 769 }; 770 }; 771 772 #define IB_QPN_MASK 0xFFFFFF 773 774 enum { 775 IB_MULTICAST_QPN = 0xffffff 776 }; 777 778 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 779 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 780 781 enum ib_ah_flags { 782 IB_AH_GRH = 1 783 }; 784 785 enum ib_rate { 786 IB_RATE_PORT_CURRENT = 0, 787 IB_RATE_2_5_GBPS = 2, 788 IB_RATE_5_GBPS = 5, 789 IB_RATE_10_GBPS = 3, 790 IB_RATE_20_GBPS = 6, 791 IB_RATE_30_GBPS = 4, 792 IB_RATE_40_GBPS = 7, 793 IB_RATE_60_GBPS = 8, 794 IB_RATE_80_GBPS = 9, 795 IB_RATE_120_GBPS = 10, 796 IB_RATE_14_GBPS = 11, 797 IB_RATE_56_GBPS = 12, 798 IB_RATE_112_GBPS = 13, 799 IB_RATE_168_GBPS = 14, 800 IB_RATE_25_GBPS = 15, 801 IB_RATE_100_GBPS = 16, 802 IB_RATE_200_GBPS = 17, 803 IB_RATE_300_GBPS = 18, 804 IB_RATE_28_GBPS = 19, 805 IB_RATE_50_GBPS = 20, 806 IB_RATE_400_GBPS = 21, 807 IB_RATE_600_GBPS = 22, 808 }; 809 810 /** 811 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 812 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 813 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 814 * @rate: rate to convert. 815 */ 816 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 817 818 /** 819 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 820 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 821 * @rate: rate to convert. 822 */ 823 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 824 825 826 /** 827 * enum ib_mr_type - memory region type 828 * @IB_MR_TYPE_MEM_REG: memory region that is used for 829 * normal registration 830 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 831 * register any arbitrary sg lists (without 832 * the normal mr constraints - see 833 * ib_map_mr_sg) 834 * @IB_MR_TYPE_DM: memory region that is used for device 835 * memory registration 836 * @IB_MR_TYPE_USER: memory region that is used for the user-space 837 * application 838 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations 839 * without address translations (VA=PA) 840 * @IB_MR_TYPE_INTEGRITY: memory region that is used for 841 * data integrity operations 842 */ 843 enum ib_mr_type { 844 IB_MR_TYPE_MEM_REG, 845 IB_MR_TYPE_SG_GAPS, 846 IB_MR_TYPE_DM, 847 IB_MR_TYPE_USER, 848 IB_MR_TYPE_DMA, 849 IB_MR_TYPE_INTEGRITY, 850 }; 851 852 enum ib_mr_status_check { 853 IB_MR_CHECK_SIG_STATUS = 1, 854 }; 855 856 /** 857 * struct ib_mr_status - Memory region status container 858 * 859 * @fail_status: Bitmask of MR checks status. For each 860 * failed check a corresponding status bit is set. 861 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 862 * failure. 863 */ 864 struct ib_mr_status { 865 u32 fail_status; 866 struct ib_sig_err sig_err; 867 }; 868 869 /** 870 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 871 * enum. 872 * @mult: multiple to convert. 873 */ 874 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 875 876 enum rdma_ah_attr_type { 877 RDMA_AH_ATTR_TYPE_UNDEFINED, 878 RDMA_AH_ATTR_TYPE_IB, 879 RDMA_AH_ATTR_TYPE_ROCE, 880 RDMA_AH_ATTR_TYPE_OPA, 881 }; 882 883 struct ib_ah_attr { 884 u16 dlid; 885 u8 src_path_bits; 886 }; 887 888 struct roce_ah_attr { 889 u8 dmac[ETH_ALEN]; 890 }; 891 892 struct opa_ah_attr { 893 u32 dlid; 894 u8 src_path_bits; 895 bool make_grd; 896 }; 897 898 struct rdma_ah_attr { 899 struct ib_global_route grh; 900 u8 sl; 901 u8 static_rate; 902 u8 port_num; 903 u8 ah_flags; 904 enum rdma_ah_attr_type type; 905 union { 906 struct ib_ah_attr ib; 907 struct roce_ah_attr roce; 908 struct opa_ah_attr opa; 909 }; 910 }; 911 912 enum ib_wc_status { 913 IB_WC_SUCCESS, 914 IB_WC_LOC_LEN_ERR, 915 IB_WC_LOC_QP_OP_ERR, 916 IB_WC_LOC_EEC_OP_ERR, 917 IB_WC_LOC_PROT_ERR, 918 IB_WC_WR_FLUSH_ERR, 919 IB_WC_MW_BIND_ERR, 920 IB_WC_BAD_RESP_ERR, 921 IB_WC_LOC_ACCESS_ERR, 922 IB_WC_REM_INV_REQ_ERR, 923 IB_WC_REM_ACCESS_ERR, 924 IB_WC_REM_OP_ERR, 925 IB_WC_RETRY_EXC_ERR, 926 IB_WC_RNR_RETRY_EXC_ERR, 927 IB_WC_LOC_RDD_VIOL_ERR, 928 IB_WC_REM_INV_RD_REQ_ERR, 929 IB_WC_REM_ABORT_ERR, 930 IB_WC_INV_EECN_ERR, 931 IB_WC_INV_EEC_STATE_ERR, 932 IB_WC_FATAL_ERR, 933 IB_WC_RESP_TIMEOUT_ERR, 934 IB_WC_GENERAL_ERR 935 }; 936 937 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 938 939 enum ib_wc_opcode { 940 IB_WC_SEND, 941 IB_WC_RDMA_WRITE, 942 IB_WC_RDMA_READ, 943 IB_WC_COMP_SWAP, 944 IB_WC_FETCH_ADD, 945 IB_WC_LSO, 946 IB_WC_LOCAL_INV, 947 IB_WC_REG_MR, 948 IB_WC_MASKED_COMP_SWAP, 949 IB_WC_MASKED_FETCH_ADD, 950 /* 951 * Set value of IB_WC_RECV so consumers can test if a completion is a 952 * receive by testing (opcode & IB_WC_RECV). 953 */ 954 IB_WC_RECV = 1 << 7, 955 IB_WC_RECV_RDMA_WITH_IMM 956 }; 957 958 enum ib_wc_flags { 959 IB_WC_GRH = 1, 960 IB_WC_WITH_IMM = (1<<1), 961 IB_WC_WITH_INVALIDATE = (1<<2), 962 IB_WC_IP_CSUM_OK = (1<<3), 963 IB_WC_WITH_SMAC = (1<<4), 964 IB_WC_WITH_VLAN = (1<<5), 965 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 966 }; 967 968 struct ib_wc { 969 union { 970 u64 wr_id; 971 struct ib_cqe *wr_cqe; 972 }; 973 enum ib_wc_status status; 974 enum ib_wc_opcode opcode; 975 u32 vendor_err; 976 u32 byte_len; 977 struct ib_qp *qp; 978 union { 979 __be32 imm_data; 980 u32 invalidate_rkey; 981 } ex; 982 u32 src_qp; 983 u32 slid; 984 int wc_flags; 985 u16 pkey_index; 986 u8 sl; 987 u8 dlid_path_bits; 988 u8 port_num; /* valid only for DR SMPs on switches */ 989 u8 smac[ETH_ALEN]; 990 u16 vlan_id; 991 u8 network_hdr_type; 992 }; 993 994 enum ib_cq_notify_flags { 995 IB_CQ_SOLICITED = 1 << 0, 996 IB_CQ_NEXT_COMP = 1 << 1, 997 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 998 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 999 }; 1000 1001 enum ib_srq_type { 1002 IB_SRQT_BASIC, 1003 IB_SRQT_XRC, 1004 IB_SRQT_TM, 1005 }; 1006 1007 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) 1008 { 1009 return srq_type == IB_SRQT_XRC || 1010 srq_type == IB_SRQT_TM; 1011 } 1012 1013 enum ib_srq_attr_mask { 1014 IB_SRQ_MAX_WR = 1 << 0, 1015 IB_SRQ_LIMIT = 1 << 1, 1016 }; 1017 1018 struct ib_srq_attr { 1019 u32 max_wr; 1020 u32 max_sge; 1021 u32 srq_limit; 1022 }; 1023 1024 struct ib_srq_init_attr { 1025 void (*event_handler)(struct ib_event *, void *); 1026 void *srq_context; 1027 struct ib_srq_attr attr; 1028 enum ib_srq_type srq_type; 1029 1030 struct { 1031 struct ib_cq *cq; 1032 union { 1033 struct { 1034 struct ib_xrcd *xrcd; 1035 } xrc; 1036 1037 struct { 1038 u32 max_num_tags; 1039 } tag_matching; 1040 }; 1041 } ext; 1042 }; 1043 1044 struct ib_qp_cap { 1045 u32 max_send_wr; 1046 u32 max_recv_wr; 1047 u32 max_send_sge; 1048 u32 max_recv_sge; 1049 u32 max_inline_data; 1050 1051 /* 1052 * Maximum number of rdma_rw_ctx structures in flight at a time. 1053 * ib_create_qp() will calculate the right amount of neededed WRs 1054 * and MRs based on this. 1055 */ 1056 u32 max_rdma_ctxs; 1057 }; 1058 1059 enum ib_sig_type { 1060 IB_SIGNAL_ALL_WR, 1061 IB_SIGNAL_REQ_WR 1062 }; 1063 1064 enum ib_qp_type { 1065 /* 1066 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 1067 * here (and in that order) since the MAD layer uses them as 1068 * indices into a 2-entry table. 1069 */ 1070 IB_QPT_SMI, 1071 IB_QPT_GSI, 1072 1073 IB_QPT_RC, 1074 IB_QPT_UC, 1075 IB_QPT_UD, 1076 IB_QPT_RAW_IPV6, 1077 IB_QPT_RAW_ETHERTYPE, 1078 IB_QPT_RAW_PACKET = 8, 1079 IB_QPT_XRC_INI = 9, 1080 IB_QPT_XRC_TGT, 1081 IB_QPT_MAX, 1082 IB_QPT_DRIVER = 0xFF, 1083 /* Reserve a range for qp types internal to the low level driver. 1084 * These qp types will not be visible at the IB core layer, so the 1085 * IB_QPT_MAX usages should not be affected in the core layer 1086 */ 1087 IB_QPT_RESERVED1 = 0x1000, 1088 IB_QPT_RESERVED2, 1089 IB_QPT_RESERVED3, 1090 IB_QPT_RESERVED4, 1091 IB_QPT_RESERVED5, 1092 IB_QPT_RESERVED6, 1093 IB_QPT_RESERVED7, 1094 IB_QPT_RESERVED8, 1095 IB_QPT_RESERVED9, 1096 IB_QPT_RESERVED10, 1097 }; 1098 1099 enum ib_qp_create_flags { 1100 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1101 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, 1102 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1103 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1104 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1105 IB_QP_CREATE_NETIF_QP = 1 << 5, 1106 IB_QP_CREATE_INTEGRITY_EN = 1 << 6, 1107 /* FREE = 1 << 7, */ 1108 IB_QP_CREATE_SCATTER_FCS = 1 << 8, 1109 IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, 1110 IB_QP_CREATE_SOURCE_QPN = 1 << 10, 1111 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11, 1112 /* reserve bits 26-31 for low level drivers' internal use */ 1113 IB_QP_CREATE_RESERVED_START = 1 << 26, 1114 IB_QP_CREATE_RESERVED_END = 1 << 31, 1115 }; 1116 1117 /* 1118 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1119 * callback to destroy the passed in QP. 1120 */ 1121 1122 struct ib_qp_init_attr { 1123 /* Consumer's event_handler callback must not block */ 1124 void (*event_handler)(struct ib_event *, void *); 1125 1126 void *qp_context; 1127 struct ib_cq *send_cq; 1128 struct ib_cq *recv_cq; 1129 struct ib_srq *srq; 1130 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1131 struct ib_qp_cap cap; 1132 enum ib_sig_type sq_sig_type; 1133 enum ib_qp_type qp_type; 1134 u32 create_flags; 1135 1136 /* 1137 * Only needed for special QP types, or when using the RW API. 1138 */ 1139 u8 port_num; 1140 struct ib_rwq_ind_table *rwq_ind_tbl; 1141 u32 source_qpn; 1142 }; 1143 1144 struct ib_qp_open_attr { 1145 void (*event_handler)(struct ib_event *, void *); 1146 void *qp_context; 1147 u32 qp_num; 1148 enum ib_qp_type qp_type; 1149 }; 1150 1151 enum ib_rnr_timeout { 1152 IB_RNR_TIMER_655_36 = 0, 1153 IB_RNR_TIMER_000_01 = 1, 1154 IB_RNR_TIMER_000_02 = 2, 1155 IB_RNR_TIMER_000_03 = 3, 1156 IB_RNR_TIMER_000_04 = 4, 1157 IB_RNR_TIMER_000_06 = 5, 1158 IB_RNR_TIMER_000_08 = 6, 1159 IB_RNR_TIMER_000_12 = 7, 1160 IB_RNR_TIMER_000_16 = 8, 1161 IB_RNR_TIMER_000_24 = 9, 1162 IB_RNR_TIMER_000_32 = 10, 1163 IB_RNR_TIMER_000_48 = 11, 1164 IB_RNR_TIMER_000_64 = 12, 1165 IB_RNR_TIMER_000_96 = 13, 1166 IB_RNR_TIMER_001_28 = 14, 1167 IB_RNR_TIMER_001_92 = 15, 1168 IB_RNR_TIMER_002_56 = 16, 1169 IB_RNR_TIMER_003_84 = 17, 1170 IB_RNR_TIMER_005_12 = 18, 1171 IB_RNR_TIMER_007_68 = 19, 1172 IB_RNR_TIMER_010_24 = 20, 1173 IB_RNR_TIMER_015_36 = 21, 1174 IB_RNR_TIMER_020_48 = 22, 1175 IB_RNR_TIMER_030_72 = 23, 1176 IB_RNR_TIMER_040_96 = 24, 1177 IB_RNR_TIMER_061_44 = 25, 1178 IB_RNR_TIMER_081_92 = 26, 1179 IB_RNR_TIMER_122_88 = 27, 1180 IB_RNR_TIMER_163_84 = 28, 1181 IB_RNR_TIMER_245_76 = 29, 1182 IB_RNR_TIMER_327_68 = 30, 1183 IB_RNR_TIMER_491_52 = 31 1184 }; 1185 1186 enum ib_qp_attr_mask { 1187 IB_QP_STATE = 1, 1188 IB_QP_CUR_STATE = (1<<1), 1189 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1190 IB_QP_ACCESS_FLAGS = (1<<3), 1191 IB_QP_PKEY_INDEX = (1<<4), 1192 IB_QP_PORT = (1<<5), 1193 IB_QP_QKEY = (1<<6), 1194 IB_QP_AV = (1<<7), 1195 IB_QP_PATH_MTU = (1<<8), 1196 IB_QP_TIMEOUT = (1<<9), 1197 IB_QP_RETRY_CNT = (1<<10), 1198 IB_QP_RNR_RETRY = (1<<11), 1199 IB_QP_RQ_PSN = (1<<12), 1200 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1201 IB_QP_ALT_PATH = (1<<14), 1202 IB_QP_MIN_RNR_TIMER = (1<<15), 1203 IB_QP_SQ_PSN = (1<<16), 1204 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1205 IB_QP_PATH_MIG_STATE = (1<<18), 1206 IB_QP_CAP = (1<<19), 1207 IB_QP_DEST_QPN = (1<<20), 1208 IB_QP_RESERVED1 = (1<<21), 1209 IB_QP_RESERVED2 = (1<<22), 1210 IB_QP_RESERVED3 = (1<<23), 1211 IB_QP_RESERVED4 = (1<<24), 1212 IB_QP_RATE_LIMIT = (1<<25), 1213 }; 1214 1215 enum ib_qp_state { 1216 IB_QPS_RESET, 1217 IB_QPS_INIT, 1218 IB_QPS_RTR, 1219 IB_QPS_RTS, 1220 IB_QPS_SQD, 1221 IB_QPS_SQE, 1222 IB_QPS_ERR 1223 }; 1224 1225 enum ib_mig_state { 1226 IB_MIG_MIGRATED, 1227 IB_MIG_REARM, 1228 IB_MIG_ARMED 1229 }; 1230 1231 enum ib_mw_type { 1232 IB_MW_TYPE_1 = 1, 1233 IB_MW_TYPE_2 = 2 1234 }; 1235 1236 struct ib_qp_attr { 1237 enum ib_qp_state qp_state; 1238 enum ib_qp_state cur_qp_state; 1239 enum ib_mtu path_mtu; 1240 enum ib_mig_state path_mig_state; 1241 u32 qkey; 1242 u32 rq_psn; 1243 u32 sq_psn; 1244 u32 dest_qp_num; 1245 int qp_access_flags; 1246 struct ib_qp_cap cap; 1247 struct rdma_ah_attr ah_attr; 1248 struct rdma_ah_attr alt_ah_attr; 1249 u16 pkey_index; 1250 u16 alt_pkey_index; 1251 u8 en_sqd_async_notify; 1252 u8 sq_draining; 1253 u8 max_rd_atomic; 1254 u8 max_dest_rd_atomic; 1255 u8 min_rnr_timer; 1256 u8 port_num; 1257 u8 timeout; 1258 u8 retry_cnt; 1259 u8 rnr_retry; 1260 u8 alt_port_num; 1261 u8 alt_timeout; 1262 u32 rate_limit; 1263 }; 1264 1265 enum ib_wr_opcode { 1266 /* These are shared with userspace */ 1267 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE, 1268 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM, 1269 IB_WR_SEND = IB_UVERBS_WR_SEND, 1270 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM, 1271 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, 1272 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, 1273 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, 1274 IB_WR_LSO = IB_UVERBS_WR_TSO, 1275 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, 1276 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, 1277 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV, 1278 IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 1279 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP, 1280 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 1281 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1282 1283 /* These are kernel only and can not be issued by userspace */ 1284 IB_WR_REG_MR = 0x20, 1285 IB_WR_REG_MR_INTEGRITY, 1286 1287 /* reserve values for low level drivers' internal use. 1288 * These values will not be used at all in the ib core layer. 1289 */ 1290 IB_WR_RESERVED1 = 0xf0, 1291 IB_WR_RESERVED2, 1292 IB_WR_RESERVED3, 1293 IB_WR_RESERVED4, 1294 IB_WR_RESERVED5, 1295 IB_WR_RESERVED6, 1296 IB_WR_RESERVED7, 1297 IB_WR_RESERVED8, 1298 IB_WR_RESERVED9, 1299 IB_WR_RESERVED10, 1300 }; 1301 1302 enum ib_send_flags { 1303 IB_SEND_FENCE = 1, 1304 IB_SEND_SIGNALED = (1<<1), 1305 IB_SEND_SOLICITED = (1<<2), 1306 IB_SEND_INLINE = (1<<3), 1307 IB_SEND_IP_CSUM = (1<<4), 1308 1309 /* reserve bits 26-31 for low level drivers' internal use */ 1310 IB_SEND_RESERVED_START = (1 << 26), 1311 IB_SEND_RESERVED_END = (1 << 31), 1312 }; 1313 1314 struct ib_sge { 1315 u64 addr; 1316 u32 length; 1317 u32 lkey; 1318 }; 1319 1320 struct ib_cqe { 1321 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1322 }; 1323 1324 struct ib_send_wr { 1325 struct ib_send_wr *next; 1326 union { 1327 u64 wr_id; 1328 struct ib_cqe *wr_cqe; 1329 }; 1330 struct ib_sge *sg_list; 1331 int num_sge; 1332 enum ib_wr_opcode opcode; 1333 int send_flags; 1334 union { 1335 __be32 imm_data; 1336 u32 invalidate_rkey; 1337 } ex; 1338 }; 1339 1340 struct ib_rdma_wr { 1341 struct ib_send_wr wr; 1342 u64 remote_addr; 1343 u32 rkey; 1344 }; 1345 1346 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr) 1347 { 1348 return container_of(wr, struct ib_rdma_wr, wr); 1349 } 1350 1351 struct ib_atomic_wr { 1352 struct ib_send_wr wr; 1353 u64 remote_addr; 1354 u64 compare_add; 1355 u64 swap; 1356 u64 compare_add_mask; 1357 u64 swap_mask; 1358 u32 rkey; 1359 }; 1360 1361 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr) 1362 { 1363 return container_of(wr, struct ib_atomic_wr, wr); 1364 } 1365 1366 struct ib_ud_wr { 1367 struct ib_send_wr wr; 1368 struct ib_ah *ah; 1369 void *header; 1370 int hlen; 1371 int mss; 1372 u32 remote_qpn; 1373 u32 remote_qkey; 1374 u16 pkey_index; /* valid for GSI only */ 1375 u8 port_num; /* valid for DR SMPs on switch only */ 1376 }; 1377 1378 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) 1379 { 1380 return container_of(wr, struct ib_ud_wr, wr); 1381 } 1382 1383 struct ib_reg_wr { 1384 struct ib_send_wr wr; 1385 struct ib_mr *mr; 1386 u32 key; 1387 int access; 1388 }; 1389 1390 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) 1391 { 1392 return container_of(wr, struct ib_reg_wr, wr); 1393 } 1394 1395 struct ib_recv_wr { 1396 struct ib_recv_wr *next; 1397 union { 1398 u64 wr_id; 1399 struct ib_cqe *wr_cqe; 1400 }; 1401 struct ib_sge *sg_list; 1402 int num_sge; 1403 }; 1404 1405 enum ib_access_flags { 1406 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE, 1407 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE, 1408 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ, 1409 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC, 1410 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND, 1411 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED, 1412 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND, 1413 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB, 1414 1415 IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1) 1416 }; 1417 1418 /* 1419 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1420 * are hidden here instead of a uapi header! 1421 */ 1422 enum ib_mr_rereg_flags { 1423 IB_MR_REREG_TRANS = 1, 1424 IB_MR_REREG_PD = (1<<1), 1425 IB_MR_REREG_ACCESS = (1<<2), 1426 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1427 }; 1428 1429 struct ib_fmr_attr { 1430 int max_pages; 1431 int max_maps; 1432 u8 page_shift; 1433 }; 1434 1435 struct ib_umem; 1436 1437 enum rdma_remove_reason { 1438 /* 1439 * Userspace requested uobject deletion or initial try 1440 * to remove uobject via cleanup. Call could fail 1441 */ 1442 RDMA_REMOVE_DESTROY, 1443 /* Context deletion. This call should delete the actual object itself */ 1444 RDMA_REMOVE_CLOSE, 1445 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1446 RDMA_REMOVE_DRIVER_REMOVE, 1447 /* uobj is being cleaned-up before being committed */ 1448 RDMA_REMOVE_ABORT, 1449 }; 1450 1451 struct ib_rdmacg_object { 1452 #ifdef CONFIG_CGROUP_RDMA 1453 struct rdma_cgroup *cg; /* owner rdma cgroup */ 1454 #endif 1455 }; 1456 1457 struct ib_ucontext { 1458 struct ib_device *device; 1459 struct ib_uverbs_file *ufile; 1460 /* 1461 * 'closing' can be read by the driver only during a destroy callback, 1462 * it is set when we are closing the file descriptor and indicates 1463 * that mm_sem may be locked. 1464 */ 1465 bool closing; 1466 1467 bool cleanup_retryable; 1468 1469 struct mutex per_mm_list_lock; 1470 struct list_head per_mm_list; 1471 1472 struct ib_rdmacg_object cg_obj; 1473 /* 1474 * Implementation details of the RDMA core, don't use in drivers: 1475 */ 1476 struct rdma_restrack_entry res; 1477 }; 1478 1479 struct ib_uobject { 1480 u64 user_handle; /* handle given to us by userspace */ 1481 /* ufile & ucontext owning this object */ 1482 struct ib_uverbs_file *ufile; 1483 /* FIXME, save memory: ufile->context == context */ 1484 struct ib_ucontext *context; /* associated user context */ 1485 void *object; /* containing object */ 1486 struct list_head list; /* link to context's list */ 1487 struct ib_rdmacg_object cg_obj; /* rdmacg object */ 1488 int id; /* index into kernel idr */ 1489 struct kref ref; 1490 atomic_t usecnt; /* protects exclusive access */ 1491 struct rcu_head rcu; /* kfree_rcu() overhead */ 1492 1493 const struct uverbs_api_object *uapi_object; 1494 }; 1495 1496 struct ib_udata { 1497 const void __user *inbuf; 1498 void __user *outbuf; 1499 size_t inlen; 1500 size_t outlen; 1501 }; 1502 1503 struct ib_pd { 1504 u32 local_dma_lkey; 1505 u32 flags; 1506 struct ib_device *device; 1507 struct ib_uobject *uobject; 1508 atomic_t usecnt; /* count all resources */ 1509 1510 u32 unsafe_global_rkey; 1511 1512 /* 1513 * Implementation details of the RDMA core, don't use in drivers: 1514 */ 1515 struct ib_mr *__internal_mr; 1516 struct rdma_restrack_entry res; 1517 }; 1518 1519 struct ib_xrcd { 1520 struct ib_device *device; 1521 atomic_t usecnt; /* count all exposed resources */ 1522 struct inode *inode; 1523 1524 struct mutex tgt_qp_mutex; 1525 struct list_head tgt_qp_list; 1526 }; 1527 1528 struct ib_ah { 1529 struct ib_device *device; 1530 struct ib_pd *pd; 1531 struct ib_uobject *uobject; 1532 const struct ib_gid_attr *sgid_attr; 1533 enum rdma_ah_attr_type type; 1534 }; 1535 1536 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1537 1538 enum ib_poll_context { 1539 IB_POLL_DIRECT, /* caller context, no hw completions */ 1540 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1541 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1542 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ 1543 }; 1544 1545 struct ib_cq { 1546 struct ib_device *device; 1547 struct ib_uobject *uobject; 1548 ib_comp_handler comp_handler; 1549 void (*event_handler)(struct ib_event *, void *); 1550 void *cq_context; 1551 int cqe; 1552 atomic_t usecnt; /* count number of work queues */ 1553 enum ib_poll_context poll_ctx; 1554 struct ib_wc *wc; 1555 union { 1556 struct irq_poll iop; 1557 struct work_struct work; 1558 }; 1559 struct workqueue_struct *comp_wq; 1560 struct dim *dim; 1561 /* 1562 * Implementation details of the RDMA core, don't use in drivers: 1563 */ 1564 struct rdma_restrack_entry res; 1565 }; 1566 1567 struct ib_srq { 1568 struct ib_device *device; 1569 struct ib_pd *pd; 1570 struct ib_uobject *uobject; 1571 void (*event_handler)(struct ib_event *, void *); 1572 void *srq_context; 1573 enum ib_srq_type srq_type; 1574 atomic_t usecnt; 1575 1576 struct { 1577 struct ib_cq *cq; 1578 union { 1579 struct { 1580 struct ib_xrcd *xrcd; 1581 u32 srq_num; 1582 } xrc; 1583 }; 1584 } ext; 1585 }; 1586 1587 enum ib_raw_packet_caps { 1588 /* Strip cvlan from incoming packet and report it in the matching work 1589 * completion is supported. 1590 */ 1591 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = (1 << 0), 1592 /* Scatter FCS field of an incoming packet to host memory is supported. 1593 */ 1594 IB_RAW_PACKET_CAP_SCATTER_FCS = (1 << 1), 1595 /* Checksum offloads are supported (for both send and receive). */ 1596 IB_RAW_PACKET_CAP_IP_CSUM = (1 << 2), 1597 /* When a packet is received for an RQ with no receive WQEs, the 1598 * packet processing is delayed. 1599 */ 1600 IB_RAW_PACKET_CAP_DELAY_DROP = (1 << 3), 1601 }; 1602 1603 enum ib_wq_type { 1604 IB_WQT_RQ 1605 }; 1606 1607 enum ib_wq_state { 1608 IB_WQS_RESET, 1609 IB_WQS_RDY, 1610 IB_WQS_ERR 1611 }; 1612 1613 struct ib_wq { 1614 struct ib_device *device; 1615 struct ib_uobject *uobject; 1616 void *wq_context; 1617 void (*event_handler)(struct ib_event *, void *); 1618 struct ib_pd *pd; 1619 struct ib_cq *cq; 1620 u32 wq_num; 1621 enum ib_wq_state state; 1622 enum ib_wq_type wq_type; 1623 atomic_t usecnt; 1624 }; 1625 1626 enum ib_wq_flags { 1627 IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, 1628 IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, 1629 IB_WQ_FLAGS_DELAY_DROP = 1 << 2, 1630 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3, 1631 }; 1632 1633 struct ib_wq_init_attr { 1634 void *wq_context; 1635 enum ib_wq_type wq_type; 1636 u32 max_wr; 1637 u32 max_sge; 1638 struct ib_cq *cq; 1639 void (*event_handler)(struct ib_event *, void *); 1640 u32 create_flags; /* Use enum ib_wq_flags */ 1641 }; 1642 1643 enum ib_wq_attr_mask { 1644 IB_WQ_STATE = 1 << 0, 1645 IB_WQ_CUR_STATE = 1 << 1, 1646 IB_WQ_FLAGS = 1 << 2, 1647 }; 1648 1649 struct ib_wq_attr { 1650 enum ib_wq_state wq_state; 1651 enum ib_wq_state curr_wq_state; 1652 u32 flags; /* Use enum ib_wq_flags */ 1653 u32 flags_mask; /* Use enum ib_wq_flags */ 1654 }; 1655 1656 struct ib_rwq_ind_table { 1657 struct ib_device *device; 1658 struct ib_uobject *uobject; 1659 atomic_t usecnt; 1660 u32 ind_tbl_num; 1661 u32 log_ind_tbl_size; 1662 struct ib_wq **ind_tbl; 1663 }; 1664 1665 struct ib_rwq_ind_table_init_attr { 1666 u32 log_ind_tbl_size; 1667 /* Each entry is a pointer to Receive Work Queue */ 1668 struct ib_wq **ind_tbl; 1669 }; 1670 1671 enum port_pkey_state { 1672 IB_PORT_PKEY_NOT_VALID = 0, 1673 IB_PORT_PKEY_VALID = 1, 1674 IB_PORT_PKEY_LISTED = 2, 1675 }; 1676 1677 struct ib_qp_security; 1678 1679 struct ib_port_pkey { 1680 enum port_pkey_state state; 1681 u16 pkey_index; 1682 u8 port_num; 1683 struct list_head qp_list; 1684 struct list_head to_error_list; 1685 struct ib_qp_security *sec; 1686 }; 1687 1688 struct ib_ports_pkeys { 1689 struct ib_port_pkey main; 1690 struct ib_port_pkey alt; 1691 }; 1692 1693 struct ib_qp_security { 1694 struct ib_qp *qp; 1695 struct ib_device *dev; 1696 /* Hold this mutex when changing port and pkey settings. */ 1697 struct mutex mutex; 1698 struct ib_ports_pkeys *ports_pkeys; 1699 /* A list of all open shared QP handles. Required to enforce security 1700 * properly for all users of a shared QP. 1701 */ 1702 struct list_head shared_qp_list; 1703 void *security; 1704 bool destroying; 1705 atomic_t error_list_count; 1706 struct completion error_complete; 1707 int error_comps_pending; 1708 }; 1709 1710 /* 1711 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1712 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1713 */ 1714 struct ib_qp { 1715 struct ib_device *device; 1716 struct ib_pd *pd; 1717 struct ib_cq *send_cq; 1718 struct ib_cq *recv_cq; 1719 spinlock_t mr_lock; 1720 int mrs_used; 1721 struct list_head rdma_mrs; 1722 struct list_head sig_mrs; 1723 struct ib_srq *srq; 1724 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1725 struct list_head xrcd_list; 1726 1727 /* count times opened, mcast attaches, flow attaches */ 1728 atomic_t usecnt; 1729 struct list_head open_list; 1730 struct ib_qp *real_qp; 1731 struct ib_uobject *uobject; 1732 void (*event_handler)(struct ib_event *, void *); 1733 void *qp_context; 1734 /* sgid_attrs associated with the AV's */ 1735 const struct ib_gid_attr *av_sgid_attr; 1736 const struct ib_gid_attr *alt_path_sgid_attr; 1737 u32 qp_num; 1738 u32 max_write_sge; 1739 u32 max_read_sge; 1740 enum ib_qp_type qp_type; 1741 struct ib_rwq_ind_table *rwq_ind_tbl; 1742 struct ib_qp_security *qp_sec; 1743 u8 port; 1744 1745 bool integrity_en; 1746 /* 1747 * Implementation details of the RDMA core, don't use in drivers: 1748 */ 1749 struct rdma_restrack_entry res; 1750 1751 /* The counter the qp is bind to */ 1752 struct rdma_counter *counter; 1753 }; 1754 1755 struct ib_dm { 1756 struct ib_device *device; 1757 u32 length; 1758 u32 flags; 1759 struct ib_uobject *uobject; 1760 atomic_t usecnt; 1761 }; 1762 1763 struct ib_mr { 1764 struct ib_device *device; 1765 struct ib_pd *pd; 1766 u32 lkey; 1767 u32 rkey; 1768 u64 iova; 1769 u64 length; 1770 unsigned int page_size; 1771 enum ib_mr_type type; 1772 bool need_inval; 1773 union { 1774 struct ib_uobject *uobject; /* user */ 1775 struct list_head qp_entry; /* FR */ 1776 }; 1777 1778 struct ib_dm *dm; 1779 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ 1780 /* 1781 * Implementation details of the RDMA core, don't use in drivers: 1782 */ 1783 struct rdma_restrack_entry res; 1784 }; 1785 1786 struct ib_mw { 1787 struct ib_device *device; 1788 struct ib_pd *pd; 1789 struct ib_uobject *uobject; 1790 u32 rkey; 1791 enum ib_mw_type type; 1792 }; 1793 1794 struct ib_fmr { 1795 struct ib_device *device; 1796 struct ib_pd *pd; 1797 struct list_head list; 1798 u32 lkey; 1799 u32 rkey; 1800 }; 1801 1802 /* Supported steering options */ 1803 enum ib_flow_attr_type { 1804 /* steering according to rule specifications */ 1805 IB_FLOW_ATTR_NORMAL = 0x0, 1806 /* default unicast and multicast rule - 1807 * receive all Eth traffic which isn't steered to any QP 1808 */ 1809 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1810 /* default multicast rule - 1811 * receive all Eth multicast traffic which isn't steered to any QP 1812 */ 1813 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1814 /* sniffer rule - receive all port traffic */ 1815 IB_FLOW_ATTR_SNIFFER = 0x3 1816 }; 1817 1818 /* Supported steering header types */ 1819 enum ib_flow_spec_type { 1820 /* L2 headers*/ 1821 IB_FLOW_SPEC_ETH = 0x20, 1822 IB_FLOW_SPEC_IB = 0x22, 1823 /* L3 header*/ 1824 IB_FLOW_SPEC_IPV4 = 0x30, 1825 IB_FLOW_SPEC_IPV6 = 0x31, 1826 IB_FLOW_SPEC_ESP = 0x34, 1827 /* L4 headers*/ 1828 IB_FLOW_SPEC_TCP = 0x40, 1829 IB_FLOW_SPEC_UDP = 0x41, 1830 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1831 IB_FLOW_SPEC_GRE = 0x51, 1832 IB_FLOW_SPEC_MPLS = 0x60, 1833 IB_FLOW_SPEC_INNER = 0x100, 1834 /* Actions */ 1835 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1836 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1837 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, 1838 IB_FLOW_SPEC_ACTION_COUNT = 0x1003, 1839 }; 1840 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1841 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 1842 1843 /* Flow steering rule priority is set according to it's domain. 1844 * Lower domain value means higher priority. 1845 */ 1846 enum ib_flow_domain { 1847 IB_FLOW_DOMAIN_USER, 1848 IB_FLOW_DOMAIN_ETHTOOL, 1849 IB_FLOW_DOMAIN_RFS, 1850 IB_FLOW_DOMAIN_NIC, 1851 IB_FLOW_DOMAIN_NUM /* Must be last */ 1852 }; 1853 1854 enum ib_flow_flags { 1855 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1856 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ 1857 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ 1858 }; 1859 1860 struct ib_flow_eth_filter { 1861 u8 dst_mac[6]; 1862 u8 src_mac[6]; 1863 __be16 ether_type; 1864 __be16 vlan_tag; 1865 /* Must be last */ 1866 u8 real_sz[0]; 1867 }; 1868 1869 struct ib_flow_spec_eth { 1870 u32 type; 1871 u16 size; 1872 struct ib_flow_eth_filter val; 1873 struct ib_flow_eth_filter mask; 1874 }; 1875 1876 struct ib_flow_ib_filter { 1877 __be16 dlid; 1878 __u8 sl; 1879 /* Must be last */ 1880 u8 real_sz[0]; 1881 }; 1882 1883 struct ib_flow_spec_ib { 1884 u32 type; 1885 u16 size; 1886 struct ib_flow_ib_filter val; 1887 struct ib_flow_ib_filter mask; 1888 }; 1889 1890 /* IPv4 header flags */ 1891 enum ib_ipv4_flags { 1892 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1893 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1894 last have this flag set */ 1895 }; 1896 1897 struct ib_flow_ipv4_filter { 1898 __be32 src_ip; 1899 __be32 dst_ip; 1900 u8 proto; 1901 u8 tos; 1902 u8 ttl; 1903 u8 flags; 1904 /* Must be last */ 1905 u8 real_sz[0]; 1906 }; 1907 1908 struct ib_flow_spec_ipv4 { 1909 u32 type; 1910 u16 size; 1911 struct ib_flow_ipv4_filter val; 1912 struct ib_flow_ipv4_filter mask; 1913 }; 1914 1915 struct ib_flow_ipv6_filter { 1916 u8 src_ip[16]; 1917 u8 dst_ip[16]; 1918 __be32 flow_label; 1919 u8 next_hdr; 1920 u8 traffic_class; 1921 u8 hop_limit; 1922 /* Must be last */ 1923 u8 real_sz[0]; 1924 }; 1925 1926 struct ib_flow_spec_ipv6 { 1927 u32 type; 1928 u16 size; 1929 struct ib_flow_ipv6_filter val; 1930 struct ib_flow_ipv6_filter mask; 1931 }; 1932 1933 struct ib_flow_tcp_udp_filter { 1934 __be16 dst_port; 1935 __be16 src_port; 1936 /* Must be last */ 1937 u8 real_sz[0]; 1938 }; 1939 1940 struct ib_flow_spec_tcp_udp { 1941 u32 type; 1942 u16 size; 1943 struct ib_flow_tcp_udp_filter val; 1944 struct ib_flow_tcp_udp_filter mask; 1945 }; 1946 1947 struct ib_flow_tunnel_filter { 1948 __be32 tunnel_id; 1949 u8 real_sz[0]; 1950 }; 1951 1952 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1953 * the tunnel_id from val has the vni value 1954 */ 1955 struct ib_flow_spec_tunnel { 1956 u32 type; 1957 u16 size; 1958 struct ib_flow_tunnel_filter val; 1959 struct ib_flow_tunnel_filter mask; 1960 }; 1961 1962 struct ib_flow_esp_filter { 1963 __be32 spi; 1964 __be32 seq; 1965 /* Must be last */ 1966 u8 real_sz[0]; 1967 }; 1968 1969 struct ib_flow_spec_esp { 1970 u32 type; 1971 u16 size; 1972 struct ib_flow_esp_filter val; 1973 struct ib_flow_esp_filter mask; 1974 }; 1975 1976 struct ib_flow_gre_filter { 1977 __be16 c_ks_res0_ver; 1978 __be16 protocol; 1979 __be32 key; 1980 /* Must be last */ 1981 u8 real_sz[0]; 1982 }; 1983 1984 struct ib_flow_spec_gre { 1985 u32 type; 1986 u16 size; 1987 struct ib_flow_gre_filter val; 1988 struct ib_flow_gre_filter mask; 1989 }; 1990 1991 struct ib_flow_mpls_filter { 1992 __be32 tag; 1993 /* Must be last */ 1994 u8 real_sz[0]; 1995 }; 1996 1997 struct ib_flow_spec_mpls { 1998 u32 type; 1999 u16 size; 2000 struct ib_flow_mpls_filter val; 2001 struct ib_flow_mpls_filter mask; 2002 }; 2003 2004 struct ib_flow_spec_action_tag { 2005 enum ib_flow_spec_type type; 2006 u16 size; 2007 u32 tag_id; 2008 }; 2009 2010 struct ib_flow_spec_action_drop { 2011 enum ib_flow_spec_type type; 2012 u16 size; 2013 }; 2014 2015 struct ib_flow_spec_action_handle { 2016 enum ib_flow_spec_type type; 2017 u16 size; 2018 struct ib_flow_action *act; 2019 }; 2020 2021 enum ib_counters_description { 2022 IB_COUNTER_PACKETS, 2023 IB_COUNTER_BYTES, 2024 }; 2025 2026 struct ib_flow_spec_action_count { 2027 enum ib_flow_spec_type type; 2028 u16 size; 2029 struct ib_counters *counters; 2030 }; 2031 2032 union ib_flow_spec { 2033 struct { 2034 u32 type; 2035 u16 size; 2036 }; 2037 struct ib_flow_spec_eth eth; 2038 struct ib_flow_spec_ib ib; 2039 struct ib_flow_spec_ipv4 ipv4; 2040 struct ib_flow_spec_tcp_udp tcp_udp; 2041 struct ib_flow_spec_ipv6 ipv6; 2042 struct ib_flow_spec_tunnel tunnel; 2043 struct ib_flow_spec_esp esp; 2044 struct ib_flow_spec_gre gre; 2045 struct ib_flow_spec_mpls mpls; 2046 struct ib_flow_spec_action_tag flow_tag; 2047 struct ib_flow_spec_action_drop drop; 2048 struct ib_flow_spec_action_handle action; 2049 struct ib_flow_spec_action_count flow_count; 2050 }; 2051 2052 struct ib_flow_attr { 2053 enum ib_flow_attr_type type; 2054 u16 size; 2055 u16 priority; 2056 u32 flags; 2057 u8 num_of_specs; 2058 u8 port; 2059 union ib_flow_spec flows[]; 2060 }; 2061 2062 struct ib_flow { 2063 struct ib_qp *qp; 2064 struct ib_device *device; 2065 struct ib_uobject *uobject; 2066 }; 2067 2068 enum ib_flow_action_type { 2069 IB_FLOW_ACTION_UNSPECIFIED, 2070 IB_FLOW_ACTION_ESP = 1, 2071 }; 2072 2073 struct ib_flow_action_attrs_esp_keymats { 2074 enum ib_uverbs_flow_action_esp_keymat protocol; 2075 union { 2076 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; 2077 } keymat; 2078 }; 2079 2080 struct ib_flow_action_attrs_esp_replays { 2081 enum ib_uverbs_flow_action_esp_replay protocol; 2082 union { 2083 struct ib_uverbs_flow_action_esp_replay_bmp bmp; 2084 } replay; 2085 }; 2086 2087 enum ib_flow_action_attrs_esp_flags { 2088 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags 2089 * This is done in order to share the same flags between user-space and 2090 * kernel and spare an unnecessary translation. 2091 */ 2092 2093 /* Kernel flags */ 2094 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, 2095 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, 2096 }; 2097 2098 struct ib_flow_spec_list { 2099 struct ib_flow_spec_list *next; 2100 union ib_flow_spec spec; 2101 }; 2102 2103 struct ib_flow_action_attrs_esp { 2104 struct ib_flow_action_attrs_esp_keymats *keymat; 2105 struct ib_flow_action_attrs_esp_replays *replay; 2106 struct ib_flow_spec_list *encap; 2107 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. 2108 * Value of 0 is a valid value. 2109 */ 2110 u32 esn; 2111 u32 spi; 2112 u32 seq; 2113 u32 tfc_pad; 2114 /* Use enum ib_flow_action_attrs_esp_flags */ 2115 u64 flags; 2116 u64 hard_limit_pkts; 2117 }; 2118 2119 struct ib_flow_action { 2120 struct ib_device *device; 2121 struct ib_uobject *uobject; 2122 enum ib_flow_action_type type; 2123 atomic_t usecnt; 2124 }; 2125 2126 struct ib_mad_hdr; 2127 struct ib_grh; 2128 2129 enum ib_process_mad_flags { 2130 IB_MAD_IGNORE_MKEY = 1, 2131 IB_MAD_IGNORE_BKEY = 2, 2132 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 2133 }; 2134 2135 enum ib_mad_result { 2136 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 2137 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 2138 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 2139 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 2140 }; 2141 2142 struct ib_port_cache { 2143 u64 subnet_prefix; 2144 struct ib_pkey_cache *pkey; 2145 struct ib_gid_table *gid; 2146 u8 lmc; 2147 enum ib_port_state port_state; 2148 }; 2149 2150 struct ib_cache { 2151 rwlock_t lock; 2152 struct ib_event_handler event_handler; 2153 }; 2154 2155 struct ib_port_immutable { 2156 int pkey_tbl_len; 2157 int gid_tbl_len; 2158 u32 core_cap_flags; 2159 u32 max_mad_size; 2160 }; 2161 2162 struct ib_port_data { 2163 struct ib_device *ib_dev; 2164 2165 struct ib_port_immutable immutable; 2166 2167 spinlock_t pkey_list_lock; 2168 struct list_head pkey_list; 2169 2170 struct ib_port_cache cache; 2171 2172 spinlock_t netdev_lock; 2173 struct net_device __rcu *netdev; 2174 struct hlist_node ndev_hash_link; 2175 struct rdma_port_counter port_counter; 2176 struct rdma_hw_stats *hw_stats; 2177 }; 2178 2179 /* rdma netdev type - specifies protocol type */ 2180 enum rdma_netdev_t { 2181 RDMA_NETDEV_OPA_VNIC, 2182 RDMA_NETDEV_IPOIB, 2183 }; 2184 2185 /** 2186 * struct rdma_netdev - rdma netdev 2187 * For cases where netstack interfacing is required. 2188 */ 2189 struct rdma_netdev { 2190 void *clnt_priv; 2191 struct ib_device *hca; 2192 u8 port_num; 2193 2194 /* 2195 * cleanup function must be specified. 2196 * FIXME: This is only used for OPA_VNIC and that usage should be 2197 * removed too. 2198 */ 2199 void (*free_rdma_netdev)(struct net_device *netdev); 2200 2201 /* control functions */ 2202 void (*set_id)(struct net_device *netdev, int id); 2203 /* send packet */ 2204 int (*send)(struct net_device *dev, struct sk_buff *skb, 2205 struct ib_ah *address, u32 dqpn); 2206 /* multicast */ 2207 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, 2208 union ib_gid *gid, u16 mlid, 2209 int set_qkey, u32 qkey); 2210 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, 2211 union ib_gid *gid, u16 mlid); 2212 }; 2213 2214 struct rdma_netdev_alloc_params { 2215 size_t sizeof_priv; 2216 unsigned int txqs; 2217 unsigned int rxqs; 2218 void *param; 2219 2220 int (*initialize_rdma_netdev)(struct ib_device *device, u8 port_num, 2221 struct net_device *netdev, void *param); 2222 }; 2223 2224 struct ib_counters { 2225 struct ib_device *device; 2226 struct ib_uobject *uobject; 2227 /* num of objects attached */ 2228 atomic_t usecnt; 2229 }; 2230 2231 struct ib_counters_read_attr { 2232 u64 *counters_buff; 2233 u32 ncounters; 2234 u32 flags; /* use enum ib_read_counters_flags */ 2235 }; 2236 2237 struct uverbs_attr_bundle; 2238 struct iw_cm_id; 2239 struct iw_cm_conn_param; 2240 2241 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ 2242 .size_##ib_struct = \ 2243 (sizeof(struct drv_struct) + \ 2244 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ 2245 BUILD_BUG_ON_ZERO( \ 2246 !__same_type(((struct drv_struct *)NULL)->member, \ 2247 struct ib_struct))) 2248 2249 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ 2250 ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp)) 2251 2252 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ 2253 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) 2254 2255 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct 2256 2257 /** 2258 * struct ib_device_ops - InfiniBand device operations 2259 * This structure defines all the InfiniBand device operations, providers will 2260 * need to define the supported operations, otherwise they will be set to null. 2261 */ 2262 struct ib_device_ops { 2263 struct module *owner; 2264 enum rdma_driver_id driver_id; 2265 u32 uverbs_abi_ver; 2266 unsigned int uverbs_no_driver_id_binding:1; 2267 2268 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, 2269 const struct ib_send_wr **bad_send_wr); 2270 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, 2271 const struct ib_recv_wr **bad_recv_wr); 2272 void (*drain_rq)(struct ib_qp *qp); 2273 void (*drain_sq)(struct ib_qp *qp); 2274 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); 2275 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2276 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); 2277 int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt); 2278 int (*post_srq_recv)(struct ib_srq *srq, 2279 const struct ib_recv_wr *recv_wr, 2280 const struct ib_recv_wr **bad_recv_wr); 2281 int (*process_mad)(struct ib_device *device, int process_mad_flags, 2282 u8 port_num, const struct ib_wc *in_wc, 2283 const struct ib_grh *in_grh, 2284 const struct ib_mad_hdr *in_mad, size_t in_mad_size, 2285 struct ib_mad_hdr *out_mad, size_t *out_mad_size, 2286 u16 *out_mad_pkey_index); 2287 int (*query_device)(struct ib_device *device, 2288 struct ib_device_attr *device_attr, 2289 struct ib_udata *udata); 2290 int (*modify_device)(struct ib_device *device, int device_modify_mask, 2291 struct ib_device_modify *device_modify); 2292 void (*get_dev_fw_str)(struct ib_device *device, char *str); 2293 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, 2294 int comp_vector); 2295 int (*query_port)(struct ib_device *device, u8 port_num, 2296 struct ib_port_attr *port_attr); 2297 int (*modify_port)(struct ib_device *device, u8 port_num, 2298 int port_modify_mask, 2299 struct ib_port_modify *port_modify); 2300 /** 2301 * The following mandatory functions are used only at device 2302 * registration. Keep functions such as these at the end of this 2303 * structure to avoid cache line misses when accessing struct ib_device 2304 * in fast paths. 2305 */ 2306 int (*get_port_immutable)(struct ib_device *device, u8 port_num, 2307 struct ib_port_immutable *immutable); 2308 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 2309 u8 port_num); 2310 /** 2311 * When calling get_netdev, the HW vendor's driver should return the 2312 * net device of device @device at port @port_num or NULL if such 2313 * a net device doesn't exist. The vendor driver should call dev_hold 2314 * on this net device. The HW vendor's device driver must guarantee 2315 * that this function returns NULL before the net device has finished 2316 * NETDEV_UNREGISTER state. 2317 */ 2318 struct net_device *(*get_netdev)(struct ib_device *device, u8 port_num); 2319 /** 2320 * rdma netdev operation 2321 * 2322 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params 2323 * must return -EOPNOTSUPP if it doesn't support the specified type. 2324 */ 2325 struct net_device *(*alloc_rdma_netdev)( 2326 struct ib_device *device, u8 port_num, enum rdma_netdev_t type, 2327 const char *name, unsigned char name_assign_type, 2328 void (*setup)(struct net_device *)); 2329 2330 int (*rdma_netdev_get_params)(struct ib_device *device, u8 port_num, 2331 enum rdma_netdev_t type, 2332 struct rdma_netdev_alloc_params *params); 2333 /** 2334 * query_gid should be return GID value for @device, when @port_num 2335 * link layer is either IB or iWarp. It is no-op if @port_num port 2336 * is RoCE link layer. 2337 */ 2338 int (*query_gid)(struct ib_device *device, u8 port_num, int index, 2339 union ib_gid *gid); 2340 /** 2341 * When calling add_gid, the HW vendor's driver should add the gid 2342 * of device of port at gid index available at @attr. Meta-info of 2343 * that gid (for example, the network device related to this gid) is 2344 * available at @attr. @context allows the HW vendor driver to store 2345 * extra information together with a GID entry. The HW vendor driver may 2346 * allocate memory to contain this information and store it in @context 2347 * when a new GID entry is written to. Params are consistent until the 2348 * next call of add_gid or delete_gid. The function should return 0 on 2349 * success or error otherwise. The function could be called 2350 * concurrently for different ports. This function is only called when 2351 * roce_gid_table is used. 2352 */ 2353 int (*add_gid)(const struct ib_gid_attr *attr, void **context); 2354 /** 2355 * When calling del_gid, the HW vendor's driver should delete the 2356 * gid of device @device at gid index gid_index of port port_num 2357 * available in @attr. 2358 * Upon the deletion of a GID entry, the HW vendor must free any 2359 * allocated memory. The caller will clear @context afterwards. 2360 * This function is only called when roce_gid_table is used. 2361 */ 2362 int (*del_gid)(const struct ib_gid_attr *attr, void **context); 2363 int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index, 2364 u16 *pkey); 2365 int (*alloc_ucontext)(struct ib_ucontext *context, 2366 struct ib_udata *udata); 2367 void (*dealloc_ucontext)(struct ib_ucontext *context); 2368 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); 2369 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2370 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2371 void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2372 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, 2373 u32 flags, struct ib_udata *udata); 2374 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2375 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2376 void (*destroy_ah)(struct ib_ah *ah, u32 flags); 2377 int (*create_srq)(struct ib_srq *srq, 2378 struct ib_srq_init_attr *srq_init_attr, 2379 struct ib_udata *udata); 2380 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr, 2381 enum ib_srq_attr_mask srq_attr_mask, 2382 struct ib_udata *udata); 2383 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 2384 void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); 2385 struct ib_qp *(*create_qp)(struct ib_pd *pd, 2386 struct ib_qp_init_attr *qp_init_attr, 2387 struct ib_udata *udata); 2388 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2389 int qp_attr_mask, struct ib_udata *udata); 2390 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2391 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 2392 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); 2393 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, 2394 struct ib_udata *udata); 2395 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2396 void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); 2397 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); 2398 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); 2399 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, 2400 u64 virt_addr, int mr_access_flags, 2401 struct ib_udata *udata); 2402 int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, 2403 u64 virt_addr, int mr_access_flags, 2404 struct ib_pd *pd, struct ib_udata *udata); 2405 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata); 2406 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, 2407 u32 max_num_sg, struct ib_udata *udata); 2408 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd, 2409 u32 max_num_data_sg, 2410 u32 max_num_meta_sg); 2411 int (*advise_mr)(struct ib_pd *pd, 2412 enum ib_uverbs_advise_mr_advice advice, u32 flags, 2413 struct ib_sge *sg_list, u32 num_sge, 2414 struct uverbs_attr_bundle *attrs); 2415 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 2416 unsigned int *sg_offset); 2417 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2418 struct ib_mr_status *mr_status); 2419 struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, 2420 struct ib_udata *udata); 2421 int (*dealloc_mw)(struct ib_mw *mw); 2422 struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags, 2423 struct ib_fmr_attr *fmr_attr); 2424 int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len, 2425 u64 iova); 2426 int (*unmap_fmr)(struct list_head *fmr_list); 2427 int (*dealloc_fmr)(struct ib_fmr *fmr); 2428 void (*invalidate_range)(struct ib_umem_odp *umem_odp, 2429 unsigned long start, unsigned long end); 2430 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2431 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2432 struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, 2433 struct ib_udata *udata); 2434 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); 2435 struct ib_flow *(*create_flow)(struct ib_qp *qp, 2436 struct ib_flow_attr *flow_attr, 2437 int domain, struct ib_udata *udata); 2438 int (*destroy_flow)(struct ib_flow *flow_id); 2439 struct ib_flow_action *(*create_flow_action_esp)( 2440 struct ib_device *device, 2441 const struct ib_flow_action_attrs_esp *attr, 2442 struct uverbs_attr_bundle *attrs); 2443 int (*destroy_flow_action)(struct ib_flow_action *action); 2444 int (*modify_flow_action_esp)( 2445 struct ib_flow_action *action, 2446 const struct ib_flow_action_attrs_esp *attr, 2447 struct uverbs_attr_bundle *attrs); 2448 int (*set_vf_link_state)(struct ib_device *device, int vf, u8 port, 2449 int state); 2450 int (*get_vf_config)(struct ib_device *device, int vf, u8 port, 2451 struct ifla_vf_info *ivf); 2452 int (*get_vf_stats)(struct ib_device *device, int vf, u8 port, 2453 struct ifla_vf_stats *stats); 2454 int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, 2455 int type); 2456 struct ib_wq *(*create_wq)(struct ib_pd *pd, 2457 struct ib_wq_init_attr *init_attr, 2458 struct ib_udata *udata); 2459 void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); 2460 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, 2461 u32 wq_attr_mask, struct ib_udata *udata); 2462 struct ib_rwq_ind_table *(*create_rwq_ind_table)( 2463 struct ib_device *device, 2464 struct ib_rwq_ind_table_init_attr *init_attr, 2465 struct ib_udata *udata); 2466 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2467 struct ib_dm *(*alloc_dm)(struct ib_device *device, 2468 struct ib_ucontext *context, 2469 struct ib_dm_alloc_attr *attr, 2470 struct uverbs_attr_bundle *attrs); 2471 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs); 2472 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, 2473 struct ib_dm_mr_attr *attr, 2474 struct uverbs_attr_bundle *attrs); 2475 struct ib_counters *(*create_counters)( 2476 struct ib_device *device, struct uverbs_attr_bundle *attrs); 2477 int (*destroy_counters)(struct ib_counters *counters); 2478 int (*read_counters)(struct ib_counters *counters, 2479 struct ib_counters_read_attr *counters_read_attr, 2480 struct uverbs_attr_bundle *attrs); 2481 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg, 2482 int data_sg_nents, unsigned int *data_sg_offset, 2483 struct scatterlist *meta_sg, int meta_sg_nents, 2484 unsigned int *meta_sg_offset); 2485 2486 /** 2487 * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the 2488 * driver initialized data. The struct is kfree()'ed by the sysfs 2489 * core when the device is removed. A lifespan of -1 in the return 2490 * struct tells the core to set a default lifespan. 2491 */ 2492 struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device, 2493 u8 port_num); 2494 /** 2495 * get_hw_stats - Fill in the counter value(s) in the stats struct. 2496 * @index - The index in the value array we wish to have updated, or 2497 * num_counters if we want all stats updated 2498 * Return codes - 2499 * < 0 - Error, no counters updated 2500 * index - Updated the single counter pointed to by index 2501 * num_counters - Updated all counters (will reset the timestamp 2502 * and prevent further calls for lifespan milliseconds) 2503 * Drivers are allowed to update all counters in leiu of just the 2504 * one given in index at their option 2505 */ 2506 int (*get_hw_stats)(struct ib_device *device, 2507 struct rdma_hw_stats *stats, u8 port, int index); 2508 /* 2509 * This function is called once for each port when a ib device is 2510 * registered. 2511 */ 2512 int (*init_port)(struct ib_device *device, u8 port_num, 2513 struct kobject *port_sysfs); 2514 /** 2515 * Allows rdma drivers to add their own restrack attributes. 2516 */ 2517 int (*fill_res_entry)(struct sk_buff *msg, 2518 struct rdma_restrack_entry *entry); 2519 2520 /* Device lifecycle callbacks */ 2521 /* 2522 * Called after the device becomes registered, before clients are 2523 * attached 2524 */ 2525 int (*enable_driver)(struct ib_device *dev); 2526 /* 2527 * This is called as part of ib_dealloc_device(). 2528 */ 2529 void (*dealloc_driver)(struct ib_device *dev); 2530 2531 /* iWarp CM callbacks */ 2532 void (*iw_add_ref)(struct ib_qp *qp); 2533 void (*iw_rem_ref)(struct ib_qp *qp); 2534 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn); 2535 int (*iw_connect)(struct iw_cm_id *cm_id, 2536 struct iw_cm_conn_param *conn_param); 2537 int (*iw_accept)(struct iw_cm_id *cm_id, 2538 struct iw_cm_conn_param *conn_param); 2539 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata, 2540 u8 pdata_len); 2541 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog); 2542 int (*iw_destroy_listen)(struct iw_cm_id *cm_id); 2543 /** 2544 * counter_bind_qp - Bind a QP to a counter. 2545 * @counter - The counter to be bound. If counter->id is zero then 2546 * the driver needs to allocate a new counter and set counter->id 2547 */ 2548 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp); 2549 /** 2550 * counter_unbind_qp - Unbind the qp from the dynamically-allocated 2551 * counter and bind it onto the default one 2552 */ 2553 int (*counter_unbind_qp)(struct ib_qp *qp); 2554 /** 2555 * counter_dealloc -De-allocate the hw counter 2556 */ 2557 int (*counter_dealloc)(struct rdma_counter *counter); 2558 /** 2559 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in 2560 * the driver initialized data. 2561 */ 2562 struct rdma_hw_stats *(*counter_alloc_stats)( 2563 struct rdma_counter *counter); 2564 /** 2565 * counter_update_stats - Query the stats value of this counter 2566 */ 2567 int (*counter_update_stats)(struct rdma_counter *counter); 2568 2569 DECLARE_RDMA_OBJ_SIZE(ib_ah); 2570 DECLARE_RDMA_OBJ_SIZE(ib_cq); 2571 DECLARE_RDMA_OBJ_SIZE(ib_pd); 2572 DECLARE_RDMA_OBJ_SIZE(ib_srq); 2573 DECLARE_RDMA_OBJ_SIZE(ib_ucontext); 2574 }; 2575 2576 struct ib_core_device { 2577 /* device must be the first element in structure until, 2578 * union of ib_core_device and device exists in ib_device. 2579 */ 2580 struct device dev; 2581 possible_net_t rdma_net; 2582 struct kobject *ports_kobj; 2583 struct list_head port_list; 2584 struct ib_device *owner; /* reach back to owner ib_device */ 2585 }; 2586 2587 struct rdma_restrack_root; 2588 struct ib_device { 2589 /* Do not access @dma_device directly from ULP nor from HW drivers. */ 2590 struct device *dma_device; 2591 struct ib_device_ops ops; 2592 char name[IB_DEVICE_NAME_MAX]; 2593 struct rcu_head rcu_head; 2594 2595 struct list_head event_handler_list; 2596 spinlock_t event_handler_lock; 2597 2598 struct rw_semaphore client_data_rwsem; 2599 struct xarray client_data; 2600 struct mutex unregistration_lock; 2601 2602 struct ib_cache cache; 2603 /** 2604 * port_data is indexed by port number 2605 */ 2606 struct ib_port_data *port_data; 2607 2608 int num_comp_vectors; 2609 2610 union { 2611 struct device dev; 2612 struct ib_core_device coredev; 2613 }; 2614 2615 /* First group for device attributes, 2616 * Second group for driver provided attributes (optional). 2617 * It is NULL terminated array. 2618 */ 2619 const struct attribute_group *groups[3]; 2620 2621 u64 uverbs_cmd_mask; 2622 u64 uverbs_ex_cmd_mask; 2623 2624 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2625 __be64 node_guid; 2626 u32 local_dma_lkey; 2627 u16 is_switch:1; 2628 /* Indicates kernel verbs support, should not be used in drivers */ 2629 u16 kverbs_provider:1; 2630 /* CQ adaptive moderation (RDMA DIM) */ 2631 u16 use_cq_dim:1; 2632 u8 node_type; 2633 u8 phys_port_cnt; 2634 struct ib_device_attr attrs; 2635 struct attribute_group *hw_stats_ag; 2636 struct rdma_hw_stats *hw_stats; 2637 2638 #ifdef CONFIG_CGROUP_RDMA 2639 struct rdmacg_device cg_device; 2640 #endif 2641 2642 u32 index; 2643 struct rdma_restrack_root *res; 2644 2645 const struct uapi_definition *driver_def; 2646 2647 /* 2648 * Positive refcount indicates that the device is currently 2649 * registered and cannot be unregistered. 2650 */ 2651 refcount_t refcount; 2652 struct completion unreg_completion; 2653 struct work_struct unregistration_work; 2654 2655 const struct rdma_link_ops *link_ops; 2656 2657 /* Protects compat_devs xarray modifications */ 2658 struct mutex compat_devs_mutex; 2659 /* Maintains compat devices for each net namespace */ 2660 struct xarray compat_devs; 2661 2662 /* Used by iWarp CM */ 2663 char iw_ifname[IFNAMSIZ]; 2664 u32 iw_driver_flags; 2665 }; 2666 2667 struct ib_client_nl_info; 2668 struct ib_client { 2669 const char *name; 2670 void (*add) (struct ib_device *); 2671 void (*remove)(struct ib_device *, void *client_data); 2672 void (*rename)(struct ib_device *dev, void *client_data); 2673 int (*get_nl_info)(struct ib_device *ibdev, void *client_data, 2674 struct ib_client_nl_info *res); 2675 int (*get_global_nl_info)(struct ib_client_nl_info *res); 2676 2677 /* Returns the net_dev belonging to this ib_client and matching the 2678 * given parameters. 2679 * @dev: An RDMA device that the net_dev use for communication. 2680 * @port: A physical port number on the RDMA device. 2681 * @pkey: P_Key that the net_dev uses if applicable. 2682 * @gid: A GID that the net_dev uses to communicate. 2683 * @addr: An IP address the net_dev is configured with. 2684 * @client_data: The device's client data set by ib_set_client_data(). 2685 * 2686 * An ib_client that implements a net_dev on top of RDMA devices 2687 * (such as IP over IB) should implement this callback, allowing the 2688 * rdma_cm module to find the right net_dev for a given request. 2689 * 2690 * The caller is responsible for calling dev_put on the returned 2691 * netdev. */ 2692 struct net_device *(*get_net_dev_by_params)( 2693 struct ib_device *dev, 2694 u8 port, 2695 u16 pkey, 2696 const union ib_gid *gid, 2697 const struct sockaddr *addr, 2698 void *client_data); 2699 2700 refcount_t uses; 2701 struct completion uses_zero; 2702 u32 client_id; 2703 2704 /* kverbs are not required by the client */ 2705 u8 no_kverbs_req:1; 2706 }; 2707 2708 /* 2709 * IB block DMA iterator 2710 * 2711 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned 2712 * to a HW supported page size. 2713 */ 2714 struct ib_block_iter { 2715 /* internal states */ 2716 struct scatterlist *__sg; /* sg holding the current aligned block */ 2717 dma_addr_t __dma_addr; /* unaligned DMA address of this block */ 2718 unsigned int __sg_nents; /* number of SG entries */ 2719 unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ 2720 unsigned int __pg_bit; /* alignment of current block */ 2721 }; 2722 2723 struct ib_device *_ib_alloc_device(size_t size); 2724 #define ib_alloc_device(drv_struct, member) \ 2725 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ 2726 BUILD_BUG_ON_ZERO(offsetof( \ 2727 struct drv_struct, member))), \ 2728 struct drv_struct, member) 2729 2730 void ib_dealloc_device(struct ib_device *device); 2731 2732 void ib_get_device_fw_str(struct ib_device *device, char *str); 2733 2734 int ib_register_device(struct ib_device *device, const char *name); 2735 void ib_unregister_device(struct ib_device *device); 2736 void ib_unregister_driver(enum rdma_driver_id driver_id); 2737 void ib_unregister_device_and_put(struct ib_device *device); 2738 void ib_unregister_device_queued(struct ib_device *ib_dev); 2739 2740 int ib_register_client (struct ib_client *client); 2741 void ib_unregister_client(struct ib_client *client); 2742 2743 void __rdma_block_iter_start(struct ib_block_iter *biter, 2744 struct scatterlist *sglist, 2745 unsigned int nents, 2746 unsigned long pgsz); 2747 bool __rdma_block_iter_next(struct ib_block_iter *biter); 2748 2749 /** 2750 * rdma_block_iter_dma_address - get the aligned dma address of the current 2751 * block held by the block iterator. 2752 * @biter: block iterator holding the memory block 2753 */ 2754 static inline dma_addr_t 2755 rdma_block_iter_dma_address(struct ib_block_iter *biter) 2756 { 2757 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); 2758 } 2759 2760 /** 2761 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list 2762 * @sglist: sglist to iterate over 2763 * @biter: block iterator holding the memory block 2764 * @nents: maximum number of sg entries to iterate over 2765 * @pgsz: best HW supported page size to use 2766 * 2767 * Callers may use rdma_block_iter_dma_address() to get each 2768 * blocks aligned DMA address. 2769 */ 2770 #define rdma_for_each_block(sglist, biter, nents, pgsz) \ 2771 for (__rdma_block_iter_start(biter, sglist, nents, \ 2772 pgsz); \ 2773 __rdma_block_iter_next(biter);) 2774 2775 /** 2776 * ib_get_client_data - Get IB client context 2777 * @device:Device to get context for 2778 * @client:Client to get context for 2779 * 2780 * ib_get_client_data() returns the client context data set with 2781 * ib_set_client_data(). This can only be called while the client is 2782 * registered to the device, once the ib_client remove() callback returns this 2783 * cannot be called. 2784 */ 2785 static inline void *ib_get_client_data(struct ib_device *device, 2786 struct ib_client *client) 2787 { 2788 return xa_load(&device->client_data, client->client_id); 2789 } 2790 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2791 void *data); 2792 void ib_set_device_ops(struct ib_device *device, 2793 const struct ib_device_ops *ops); 2794 2795 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) 2796 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, 2797 unsigned long pfn, unsigned long size, pgprot_t prot); 2798 #else 2799 static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext, 2800 struct vm_area_struct *vma, 2801 unsigned long pfn, unsigned long size, 2802 pgprot_t prot) 2803 { 2804 return -EINVAL; 2805 } 2806 #endif 2807 2808 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2809 { 2810 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2811 } 2812 2813 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2814 { 2815 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2816 } 2817 2818 static inline bool ib_is_buffer_cleared(const void __user *p, 2819 size_t len) 2820 { 2821 bool ret; 2822 u8 *buf; 2823 2824 if (len > USHRT_MAX) 2825 return false; 2826 2827 buf = memdup_user(p, len); 2828 if (IS_ERR(buf)) 2829 return false; 2830 2831 ret = !memchr_inv(buf, 0, len); 2832 kfree(buf); 2833 return ret; 2834 } 2835 2836 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 2837 size_t offset, 2838 size_t len) 2839 { 2840 return ib_is_buffer_cleared(udata->inbuf + offset, len); 2841 } 2842 2843 /** 2844 * ib_is_destroy_retryable - Check whether the uobject destruction 2845 * is retryable. 2846 * @ret: The initial destruction return code 2847 * @why: remove reason 2848 * @uobj: The uobject that is destroyed 2849 * 2850 * This function is a helper function that IB layer and low-level drivers 2851 * can use to consider whether the destruction of the given uobject is 2852 * retry-able. 2853 * It checks the original return code, if it wasn't success the destruction 2854 * is retryable according to the ucontext state (i.e. cleanup_retryable) and 2855 * the remove reason. (i.e. why). 2856 * Must be called with the object locked for destroy. 2857 */ 2858 static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why, 2859 struct ib_uobject *uobj) 2860 { 2861 return ret && (why == RDMA_REMOVE_DESTROY || 2862 uobj->context->cleanup_retryable); 2863 } 2864 2865 /** 2866 * ib_destroy_usecnt - Called during destruction to check the usecnt 2867 * @usecnt: The usecnt atomic 2868 * @why: remove reason 2869 * @uobj: The uobject that is destroyed 2870 * 2871 * Non-zero usecnts will block destruction unless destruction was triggered by 2872 * a ucontext cleanup. 2873 */ 2874 static inline int ib_destroy_usecnt(atomic_t *usecnt, 2875 enum rdma_remove_reason why, 2876 struct ib_uobject *uobj) 2877 { 2878 if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj)) 2879 return -EBUSY; 2880 return 0; 2881 } 2882 2883 /** 2884 * ib_modify_qp_is_ok - Check that the supplied attribute mask 2885 * contains all required attributes and no attributes not allowed for 2886 * the given QP state transition. 2887 * @cur_state: Current QP state 2888 * @next_state: Next QP state 2889 * @type: QP type 2890 * @mask: Mask of supplied QP attributes 2891 * 2892 * This function is a helper function that a low-level driver's 2893 * modify_qp method can use to validate the consumer's input. It 2894 * checks that cur_state and next_state are valid QP states, that a 2895 * transition from cur_state to next_state is allowed by the IB spec, 2896 * and that the attribute mask supplied is allowed for the transition. 2897 */ 2898 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 2899 enum ib_qp_type type, enum ib_qp_attr_mask mask); 2900 2901 void ib_register_event_handler(struct ib_event_handler *event_handler); 2902 void ib_unregister_event_handler(struct ib_event_handler *event_handler); 2903 void ib_dispatch_event(struct ib_event *event); 2904 2905 int ib_query_port(struct ib_device *device, 2906 u8 port_num, struct ib_port_attr *port_attr); 2907 2908 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 2909 u8 port_num); 2910 2911 /** 2912 * rdma_cap_ib_switch - Check if the device is IB switch 2913 * @device: Device to check 2914 * 2915 * Device driver is responsible for setting is_switch bit on 2916 * in ib_device structure at init time. 2917 * 2918 * Return: true if the device is IB switch. 2919 */ 2920 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 2921 { 2922 return device->is_switch; 2923 } 2924 2925 /** 2926 * rdma_start_port - Return the first valid port number for the device 2927 * specified 2928 * 2929 * @device: Device to be checked 2930 * 2931 * Return start port number 2932 */ 2933 static inline u8 rdma_start_port(const struct ib_device *device) 2934 { 2935 return rdma_cap_ib_switch(device) ? 0 : 1; 2936 } 2937 2938 /** 2939 * rdma_for_each_port - Iterate over all valid port numbers of the IB device 2940 * @device - The struct ib_device * to iterate over 2941 * @iter - The unsigned int to store the port number 2942 */ 2943 #define rdma_for_each_port(device, iter) \ 2944 for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \ 2945 unsigned int, iter))); \ 2946 iter <= rdma_end_port(device); (iter)++) 2947 2948 /** 2949 * rdma_end_port - Return the last valid port number for the device 2950 * specified 2951 * 2952 * @device: Device to be checked 2953 * 2954 * Return last port number 2955 */ 2956 static inline u8 rdma_end_port(const struct ib_device *device) 2957 { 2958 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 2959 } 2960 2961 static inline int rdma_is_port_valid(const struct ib_device *device, 2962 unsigned int port) 2963 { 2964 return (port >= rdma_start_port(device) && 2965 port <= rdma_end_port(device)); 2966 } 2967 2968 static inline bool rdma_is_grh_required(const struct ib_device *device, 2969 u8 port_num) 2970 { 2971 return device->port_data[port_num].immutable.core_cap_flags & 2972 RDMA_CORE_PORT_IB_GRH_REQUIRED; 2973 } 2974 2975 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 2976 { 2977 return device->port_data[port_num].immutable.core_cap_flags & 2978 RDMA_CORE_CAP_PROT_IB; 2979 } 2980 2981 static inline bool rdma_protocol_roce(const struct ib_device *device, u8 port_num) 2982 { 2983 return device->port_data[port_num].immutable.core_cap_flags & 2984 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 2985 } 2986 2987 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, u8 port_num) 2988 { 2989 return device->port_data[port_num].immutable.core_cap_flags & 2990 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 2991 } 2992 2993 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, u8 port_num) 2994 { 2995 return device->port_data[port_num].immutable.core_cap_flags & 2996 RDMA_CORE_CAP_PROT_ROCE; 2997 } 2998 2999 static inline bool rdma_protocol_iwarp(const struct ib_device *device, u8 port_num) 3000 { 3001 return device->port_data[port_num].immutable.core_cap_flags & 3002 RDMA_CORE_CAP_PROT_IWARP; 3003 } 3004 3005 static inline bool rdma_ib_or_roce(const struct ib_device *device, u8 port_num) 3006 { 3007 return rdma_protocol_ib(device, port_num) || 3008 rdma_protocol_roce(device, port_num); 3009 } 3010 3011 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, u8 port_num) 3012 { 3013 return device->port_data[port_num].immutable.core_cap_flags & 3014 RDMA_CORE_CAP_PROT_RAW_PACKET; 3015 } 3016 3017 static inline bool rdma_protocol_usnic(const struct ib_device *device, u8 port_num) 3018 { 3019 return device->port_data[port_num].immutable.core_cap_flags & 3020 RDMA_CORE_CAP_PROT_USNIC; 3021 } 3022 3023 /** 3024 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 3025 * Management Datagrams. 3026 * @device: Device to check 3027 * @port_num: Port number to check 3028 * 3029 * Management Datagrams (MAD) are a required part of the InfiniBand 3030 * specification and are supported on all InfiniBand devices. A slightly 3031 * extended version are also supported on OPA interfaces. 3032 * 3033 * Return: true if the port supports sending/receiving of MAD packets. 3034 */ 3035 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num) 3036 { 3037 return device->port_data[port_num].immutable.core_cap_flags & 3038 RDMA_CORE_CAP_IB_MAD; 3039 } 3040 3041 /** 3042 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 3043 * Management Datagrams. 3044 * @device: Device to check 3045 * @port_num: Port number to check 3046 * 3047 * Intel OmniPath devices extend and/or replace the InfiniBand Management 3048 * datagrams with their own versions. These OPA MADs share many but not all of 3049 * the characteristics of InfiniBand MADs. 3050 * 3051 * OPA MADs differ in the following ways: 3052 * 3053 * 1) MADs are variable size up to 2K 3054 * IBTA defined MADs remain fixed at 256 bytes 3055 * 2) OPA SMPs must carry valid PKeys 3056 * 3) OPA SMP packets are a different format 3057 * 3058 * Return: true if the port supports OPA MAD packet formats. 3059 */ 3060 static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num) 3061 { 3062 return device->port_data[port_num].immutable.core_cap_flags & 3063 RDMA_CORE_CAP_OPA_MAD; 3064 } 3065 3066 /** 3067 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 3068 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 3069 * @device: Device to check 3070 * @port_num: Port number to check 3071 * 3072 * Each InfiniBand node is required to provide a Subnet Management Agent 3073 * that the subnet manager can access. Prior to the fabric being fully 3074 * configured by the subnet manager, the SMA is accessed via a well known 3075 * interface called the Subnet Management Interface (SMI). This interface 3076 * uses directed route packets to communicate with the SM to get around the 3077 * chicken and egg problem of the SM needing to know what's on the fabric 3078 * in order to configure the fabric, and needing to configure the fabric in 3079 * order to send packets to the devices on the fabric. These directed 3080 * route packets do not need the fabric fully configured in order to reach 3081 * their destination. The SMI is the only method allowed to send 3082 * directed route packets on an InfiniBand fabric. 3083 * 3084 * Return: true if the port provides an SMI. 3085 */ 3086 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u8 port_num) 3087 { 3088 return device->port_data[port_num].immutable.core_cap_flags & 3089 RDMA_CORE_CAP_IB_SMI; 3090 } 3091 3092 /** 3093 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 3094 * Communication Manager. 3095 * @device: Device to check 3096 * @port_num: Port number to check 3097 * 3098 * The InfiniBand Communication Manager is one of many pre-defined General 3099 * Service Agents (GSA) that are accessed via the General Service 3100 * Interface (GSI). It's role is to facilitate establishment of connections 3101 * between nodes as well as other management related tasks for established 3102 * connections. 3103 * 3104 * Return: true if the port supports an IB CM (this does not guarantee that 3105 * a CM is actually running however). 3106 */ 3107 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u8 port_num) 3108 { 3109 return device->port_data[port_num].immutable.core_cap_flags & 3110 RDMA_CORE_CAP_IB_CM; 3111 } 3112 3113 /** 3114 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 3115 * Communication Manager. 3116 * @device: Device to check 3117 * @port_num: Port number to check 3118 * 3119 * Similar to above, but specific to iWARP connections which have a different 3120 * managment protocol than InfiniBand. 3121 * 3122 * Return: true if the port supports an iWARP CM (this does not guarantee that 3123 * a CM is actually running however). 3124 */ 3125 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u8 port_num) 3126 { 3127 return device->port_data[port_num].immutable.core_cap_flags & 3128 RDMA_CORE_CAP_IW_CM; 3129 } 3130 3131 /** 3132 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 3133 * Subnet Administration. 3134 * @device: Device to check 3135 * @port_num: Port number to check 3136 * 3137 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 3138 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 3139 * fabrics, devices should resolve routes to other hosts by contacting the 3140 * SA to query the proper route. 3141 * 3142 * Return: true if the port should act as a client to the fabric Subnet 3143 * Administration interface. This does not imply that the SA service is 3144 * running locally. 3145 */ 3146 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u8 port_num) 3147 { 3148 return device->port_data[port_num].immutable.core_cap_flags & 3149 RDMA_CORE_CAP_IB_SA; 3150 } 3151 3152 /** 3153 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 3154 * Multicast. 3155 * @device: Device to check 3156 * @port_num: Port number to check 3157 * 3158 * InfiniBand multicast registration is more complex than normal IPv4 or 3159 * IPv6 multicast registration. Each Host Channel Adapter must register 3160 * with the Subnet Manager when it wishes to join a multicast group. It 3161 * should do so only once regardless of how many queue pairs it subscribes 3162 * to this group. And it should leave the group only after all queue pairs 3163 * attached to the group have been detached. 3164 * 3165 * Return: true if the port must undertake the additional adminstrative 3166 * overhead of registering/unregistering with the SM and tracking of the 3167 * total number of queue pairs attached to the multicast group. 3168 */ 3169 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, u8 port_num) 3170 { 3171 return rdma_cap_ib_sa(device, port_num); 3172 } 3173 3174 /** 3175 * rdma_cap_af_ib - Check if the port of device has the capability 3176 * Native Infiniband Address. 3177 * @device: Device to check 3178 * @port_num: Port number to check 3179 * 3180 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 3181 * GID. RoCE uses a different mechanism, but still generates a GID via 3182 * a prescribed mechanism and port specific data. 3183 * 3184 * Return: true if the port uses a GID address to identify devices on the 3185 * network. 3186 */ 3187 static inline bool rdma_cap_af_ib(const struct ib_device *device, u8 port_num) 3188 { 3189 return device->port_data[port_num].immutable.core_cap_flags & 3190 RDMA_CORE_CAP_AF_IB; 3191 } 3192 3193 /** 3194 * rdma_cap_eth_ah - Check if the port of device has the capability 3195 * Ethernet Address Handle. 3196 * @device: Device to check 3197 * @port_num: Port number to check 3198 * 3199 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 3200 * to fabricate GIDs over Ethernet/IP specific addresses native to the 3201 * port. Normally, packet headers are generated by the sending host 3202 * adapter, but when sending connectionless datagrams, we must manually 3203 * inject the proper headers for the fabric we are communicating over. 3204 * 3205 * Return: true if we are running as a RoCE port and must force the 3206 * addition of a Global Route Header built from our Ethernet Address 3207 * Handle into our header list for connectionless packets. 3208 */ 3209 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num) 3210 { 3211 return device->port_data[port_num].immutable.core_cap_flags & 3212 RDMA_CORE_CAP_ETH_AH; 3213 } 3214 3215 /** 3216 * rdma_cap_opa_ah - Check if the port of device supports 3217 * OPA Address handles 3218 * @device: Device to check 3219 * @port_num: Port number to check 3220 * 3221 * Return: true if we are running on an OPA device which supports 3222 * the extended OPA addressing. 3223 */ 3224 static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num) 3225 { 3226 return (device->port_data[port_num].immutable.core_cap_flags & 3227 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; 3228 } 3229 3230 /** 3231 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 3232 * 3233 * @device: Device 3234 * @port_num: Port number 3235 * 3236 * This MAD size includes the MAD headers and MAD payload. No other headers 3237 * are included. 3238 * 3239 * Return the max MAD size required by the Port. Will return 0 if the port 3240 * does not support MADs 3241 */ 3242 static inline size_t rdma_max_mad_size(const struct ib_device *device, u8 port_num) 3243 { 3244 return device->port_data[port_num].immutable.max_mad_size; 3245 } 3246 3247 /** 3248 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 3249 * @device: Device to check 3250 * @port_num: Port number to check 3251 * 3252 * RoCE GID table mechanism manages the various GIDs for a device. 3253 * 3254 * NOTE: if allocating the port's GID table has failed, this call will still 3255 * return true, but any RoCE GID table API will fail. 3256 * 3257 * Return: true if the port uses RoCE GID table mechanism in order to manage 3258 * its GIDs. 3259 */ 3260 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 3261 u8 port_num) 3262 { 3263 return rdma_protocol_roce(device, port_num) && 3264 device->ops.add_gid && device->ops.del_gid; 3265 } 3266 3267 /* 3268 * Check if the device supports READ W/ INVALIDATE. 3269 */ 3270 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 3271 { 3272 /* 3273 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 3274 * has support for it yet. 3275 */ 3276 return rdma_protocol_iwarp(dev, port_num); 3277 } 3278 3279 /** 3280 * rdma_find_pg_bit - Find page bit given address and HW supported page sizes 3281 * 3282 * @addr: address 3283 * @pgsz_bitmap: bitmap of HW supported page sizes 3284 */ 3285 static inline unsigned int rdma_find_pg_bit(unsigned long addr, 3286 unsigned long pgsz_bitmap) 3287 { 3288 unsigned long align; 3289 unsigned long pgsz; 3290 3291 align = addr & -addr; 3292 3293 /* Find page bit such that addr is aligned to the highest supported 3294 * HW page size 3295 */ 3296 pgsz = pgsz_bitmap & ~(-align << 1); 3297 if (!pgsz) 3298 return __ffs(pgsz_bitmap); 3299 3300 return __fls(pgsz); 3301 } 3302 3303 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, 3304 int state); 3305 int ib_get_vf_config(struct ib_device *device, int vf, u8 port, 3306 struct ifla_vf_info *info); 3307 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, 3308 struct ifla_vf_stats *stats); 3309 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, 3310 int type); 3311 3312 int ib_query_pkey(struct ib_device *device, 3313 u8 port_num, u16 index, u16 *pkey); 3314 3315 int ib_modify_device(struct ib_device *device, 3316 int device_modify_mask, 3317 struct ib_device_modify *device_modify); 3318 3319 int ib_modify_port(struct ib_device *device, 3320 u8 port_num, int port_modify_mask, 3321 struct ib_port_modify *port_modify); 3322 3323 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 3324 u8 *port_num, u16 *index); 3325 3326 int ib_find_pkey(struct ib_device *device, 3327 u8 port_num, u16 pkey, u16 *index); 3328 3329 enum ib_pd_flags { 3330 /* 3331 * Create a memory registration for all memory in the system and place 3332 * the rkey for it into pd->unsafe_global_rkey. This can be used by 3333 * ULPs to avoid the overhead of dynamic MRs. 3334 * 3335 * This flag is generally considered unsafe and must only be used in 3336 * extremly trusted environments. Every use of it will log a warning 3337 * in the kernel log. 3338 */ 3339 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 3340 }; 3341 3342 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 3343 const char *caller); 3344 3345 #define ib_alloc_pd(device, flags) \ 3346 __ib_alloc_pd((device), (flags), KBUILD_MODNAME) 3347 3348 /** 3349 * ib_dealloc_pd_user - Deallocate kernel/user PD 3350 * @pd: The protection domain 3351 * @udata: Valid user data or NULL for kernel objects 3352 */ 3353 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); 3354 3355 /** 3356 * ib_dealloc_pd - Deallocate kernel PD 3357 * @pd: The protection domain 3358 * 3359 * NOTE: for user PD use ib_dealloc_pd_user with valid udata! 3360 */ 3361 static inline void ib_dealloc_pd(struct ib_pd *pd) 3362 { 3363 ib_dealloc_pd_user(pd, NULL); 3364 } 3365 3366 enum rdma_create_ah_flags { 3367 /* In a sleepable context */ 3368 RDMA_CREATE_AH_SLEEPABLE = BIT(0), 3369 }; 3370 3371 /** 3372 * rdma_create_ah - Creates an address handle for the given address vector. 3373 * @pd: The protection domain associated with the address handle. 3374 * @ah_attr: The attributes of the address vector. 3375 * @flags: Create address handle flags (see enum rdma_create_ah_flags). 3376 * 3377 * The address handle is used to reference a local or global destination 3378 * in all UD QP post sends. 3379 */ 3380 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, 3381 u32 flags); 3382 3383 /** 3384 * rdma_create_user_ah - Creates an address handle for the given address vector. 3385 * It resolves destination mac address for ah attribute of RoCE type. 3386 * @pd: The protection domain associated with the address handle. 3387 * @ah_attr: The attributes of the address vector. 3388 * @udata: pointer to user's input output buffer information need by 3389 * provider driver. 3390 * 3391 * It returns 0 on success and returns appropriate error code on error. 3392 * The address handle is used to reference a local or global destination 3393 * in all UD QP post sends. 3394 */ 3395 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, 3396 struct rdma_ah_attr *ah_attr, 3397 struct ib_udata *udata); 3398 /** 3399 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header 3400 * work completion. 3401 * @hdr: the L3 header to parse 3402 * @net_type: type of header to parse 3403 * @sgid: place to store source gid 3404 * @dgid: place to store destination gid 3405 */ 3406 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 3407 enum rdma_network_type net_type, 3408 union ib_gid *sgid, union ib_gid *dgid); 3409 3410 /** 3411 * ib_get_rdma_header_version - Get the header version 3412 * @hdr: the L3 header to parse 3413 */ 3414 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 3415 3416 /** 3417 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a 3418 * work completion. 3419 * @device: Device on which the received message arrived. 3420 * @port_num: Port on which the received message arrived. 3421 * @wc: Work completion associated with the received message. 3422 * @grh: References the received global route header. This parameter is 3423 * ignored unless the work completion indicates that the GRH is valid. 3424 * @ah_attr: Returned attributes that can be used when creating an address 3425 * handle for replying to the message. 3426 * When ib_init_ah_attr_from_wc() returns success, 3427 * (a) for IB link layer it optionally contains a reference to SGID attribute 3428 * when GRH is present for IB link layer. 3429 * (b) for RoCE link layer it contains a reference to SGID attribute. 3430 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID 3431 * attributes which are initialized using ib_init_ah_attr_from_wc(). 3432 * 3433 */ 3434 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num, 3435 const struct ib_wc *wc, const struct ib_grh *grh, 3436 struct rdma_ah_attr *ah_attr); 3437 3438 /** 3439 * ib_create_ah_from_wc - Creates an address handle associated with the 3440 * sender of the specified work completion. 3441 * @pd: The protection domain associated with the address handle. 3442 * @wc: Work completion information associated with a received message. 3443 * @grh: References the received global route header. This parameter is 3444 * ignored unless the work completion indicates that the GRH is valid. 3445 * @port_num: The outbound port number to associate with the address. 3446 * 3447 * The address handle is used to reference a local or global destination 3448 * in all UD QP post sends. 3449 */ 3450 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 3451 const struct ib_grh *grh, u8 port_num); 3452 3453 /** 3454 * rdma_modify_ah - Modifies the address vector associated with an address 3455 * handle. 3456 * @ah: The address handle to modify. 3457 * @ah_attr: The new address vector attributes to associate with the 3458 * address handle. 3459 */ 3460 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3461 3462 /** 3463 * rdma_query_ah - Queries the address vector associated with an address 3464 * handle. 3465 * @ah: The address handle to query. 3466 * @ah_attr: The address vector attributes associated with the address 3467 * handle. 3468 */ 3469 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3470 3471 enum rdma_destroy_ah_flags { 3472 /* In a sleepable context */ 3473 RDMA_DESTROY_AH_SLEEPABLE = BIT(0), 3474 }; 3475 3476 /** 3477 * rdma_destroy_ah_user - Destroys an address handle. 3478 * @ah: The address handle to destroy. 3479 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3480 * @udata: Valid user data or NULL for kernel objects 3481 */ 3482 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); 3483 3484 /** 3485 * rdma_destroy_ah - Destroys an kernel address handle. 3486 * @ah: The address handle to destroy. 3487 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3488 * 3489 * NOTE: for user ah use rdma_destroy_ah_user with valid udata! 3490 */ 3491 static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags) 3492 { 3493 return rdma_destroy_ah_user(ah, flags, NULL); 3494 } 3495 3496 /** 3497 * ib_create_srq - Creates a SRQ associated with the specified protection 3498 * domain. 3499 * @pd: The protection domain associated with the SRQ. 3500 * @srq_init_attr: A list of initial attributes required to create the 3501 * SRQ. If SRQ creation succeeds, then the attributes are updated to 3502 * the actual capabilities of the created SRQ. 3503 * 3504 * srq_attr->max_wr and srq_attr->max_sge are read the determine the 3505 * requested size of the SRQ, and set to the actual values allocated 3506 * on return. If ib_create_srq() succeeds, then max_wr and max_sge 3507 * will always be at least as large as the requested values. 3508 */ 3509 struct ib_srq *ib_create_srq(struct ib_pd *pd, 3510 struct ib_srq_init_attr *srq_init_attr); 3511 3512 /** 3513 * ib_modify_srq - Modifies the attributes for the specified SRQ. 3514 * @srq: The SRQ to modify. 3515 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 3516 * the current values of selected SRQ attributes are returned. 3517 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 3518 * are being modified. 3519 * 3520 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 3521 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 3522 * the number of receives queued drops below the limit. 3523 */ 3524 int ib_modify_srq(struct ib_srq *srq, 3525 struct ib_srq_attr *srq_attr, 3526 enum ib_srq_attr_mask srq_attr_mask); 3527 3528 /** 3529 * ib_query_srq - Returns the attribute list and current values for the 3530 * specified SRQ. 3531 * @srq: The SRQ to query. 3532 * @srq_attr: The attributes of the specified SRQ. 3533 */ 3534 int ib_query_srq(struct ib_srq *srq, 3535 struct ib_srq_attr *srq_attr); 3536 3537 /** 3538 * ib_destroy_srq_user - Destroys the specified SRQ. 3539 * @srq: The SRQ to destroy. 3540 * @udata: Valid user data or NULL for kernel objects 3541 */ 3542 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); 3543 3544 /** 3545 * ib_destroy_srq - Destroys the specified kernel SRQ. 3546 * @srq: The SRQ to destroy. 3547 * 3548 * NOTE: for user srq use ib_destroy_srq_user with valid udata! 3549 */ 3550 static inline int ib_destroy_srq(struct ib_srq *srq) 3551 { 3552 return ib_destroy_srq_user(srq, NULL); 3553 } 3554 3555 /** 3556 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 3557 * @srq: The SRQ to post the work request on. 3558 * @recv_wr: A list of work requests to post on the receive queue. 3559 * @bad_recv_wr: On an immediate failure, this parameter will reference 3560 * the work request that failed to be posted on the QP. 3561 */ 3562 static inline int ib_post_srq_recv(struct ib_srq *srq, 3563 const struct ib_recv_wr *recv_wr, 3564 const struct ib_recv_wr **bad_recv_wr) 3565 { 3566 const struct ib_recv_wr *dummy; 3567 3568 return srq->device->ops.post_srq_recv(srq, recv_wr, 3569 bad_recv_wr ? : &dummy); 3570 } 3571 3572 /** 3573 * ib_create_qp_user - Creates a QP associated with the specified protection 3574 * domain. 3575 * @pd: The protection domain associated with the QP. 3576 * @qp_init_attr: A list of initial attributes required to create the 3577 * QP. If QP creation succeeds, then the attributes are updated to 3578 * the actual capabilities of the created QP. 3579 * @udata: Valid user data or NULL for kernel objects 3580 */ 3581 struct ib_qp *ib_create_qp_user(struct ib_pd *pd, 3582 struct ib_qp_init_attr *qp_init_attr, 3583 struct ib_udata *udata); 3584 3585 /** 3586 * ib_create_qp - Creates a kernel QP associated with the specified protection 3587 * domain. 3588 * @pd: The protection domain associated with the QP. 3589 * @qp_init_attr: A list of initial attributes required to create the 3590 * QP. If QP creation succeeds, then the attributes are updated to 3591 * the actual capabilities of the created QP. 3592 * @udata: Valid user data or NULL for kernel objects 3593 * 3594 * NOTE: for user qp use ib_create_qp_user with valid udata! 3595 */ 3596 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd, 3597 struct ib_qp_init_attr *qp_init_attr) 3598 { 3599 return ib_create_qp_user(pd, qp_init_attr, NULL); 3600 } 3601 3602 /** 3603 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. 3604 * @qp: The QP to modify. 3605 * @attr: On input, specifies the QP attributes to modify. On output, 3606 * the current values of selected QP attributes are returned. 3607 * @attr_mask: A bit-mask used to specify which attributes of the QP 3608 * are being modified. 3609 * @udata: pointer to user's input output buffer information 3610 * are being modified. 3611 * It returns 0 on success and returns appropriate error code on error. 3612 */ 3613 int ib_modify_qp_with_udata(struct ib_qp *qp, 3614 struct ib_qp_attr *attr, 3615 int attr_mask, 3616 struct ib_udata *udata); 3617 3618 /** 3619 * ib_modify_qp - Modifies the attributes for the specified QP and then 3620 * transitions the QP to the given state. 3621 * @qp: The QP to modify. 3622 * @qp_attr: On input, specifies the QP attributes to modify. On output, 3623 * the current values of selected QP attributes are returned. 3624 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 3625 * are being modified. 3626 */ 3627 int ib_modify_qp(struct ib_qp *qp, 3628 struct ib_qp_attr *qp_attr, 3629 int qp_attr_mask); 3630 3631 /** 3632 * ib_query_qp - Returns the attribute list and current values for the 3633 * specified QP. 3634 * @qp: The QP to query. 3635 * @qp_attr: The attributes of the specified QP. 3636 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 3637 * @qp_init_attr: Additional attributes of the selected QP. 3638 * 3639 * The qp_attr_mask may be used to limit the query to gathering only the 3640 * selected attributes. 3641 */ 3642 int ib_query_qp(struct ib_qp *qp, 3643 struct ib_qp_attr *qp_attr, 3644 int qp_attr_mask, 3645 struct ib_qp_init_attr *qp_init_attr); 3646 3647 /** 3648 * ib_destroy_qp - Destroys the specified QP. 3649 * @qp: The QP to destroy. 3650 * @udata: Valid udata or NULL for kernel objects 3651 */ 3652 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata); 3653 3654 /** 3655 * ib_destroy_qp - Destroys the specified kernel QP. 3656 * @qp: The QP to destroy. 3657 * 3658 * NOTE: for user qp use ib_destroy_qp_user with valid udata! 3659 */ 3660 static inline int ib_destroy_qp(struct ib_qp *qp) 3661 { 3662 return ib_destroy_qp_user(qp, NULL); 3663 } 3664 3665 /** 3666 * ib_open_qp - Obtain a reference to an existing sharable QP. 3667 * @xrcd - XRC domain 3668 * @qp_open_attr: Attributes identifying the QP to open. 3669 * 3670 * Returns a reference to a sharable QP. 3671 */ 3672 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 3673 struct ib_qp_open_attr *qp_open_attr); 3674 3675 /** 3676 * ib_close_qp - Release an external reference to a QP. 3677 * @qp: The QP handle to release 3678 * 3679 * The opened QP handle is released by the caller. The underlying 3680 * shared QP is not destroyed until all internal references are released. 3681 */ 3682 int ib_close_qp(struct ib_qp *qp); 3683 3684 /** 3685 * ib_post_send - Posts a list of work requests to the send queue of 3686 * the specified QP. 3687 * @qp: The QP to post the work request on. 3688 * @send_wr: A list of work requests to post on the send queue. 3689 * @bad_send_wr: On an immediate failure, this parameter will reference 3690 * the work request that failed to be posted on the QP. 3691 * 3692 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 3693 * error is returned, the QP state shall not be affected, 3694 * ib_post_send() will return an immediate error after queueing any 3695 * earlier work requests in the list. 3696 */ 3697 static inline int ib_post_send(struct ib_qp *qp, 3698 const struct ib_send_wr *send_wr, 3699 const struct ib_send_wr **bad_send_wr) 3700 { 3701 const struct ib_send_wr *dummy; 3702 3703 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy); 3704 } 3705 3706 /** 3707 * ib_post_recv - Posts a list of work requests to the receive queue of 3708 * the specified QP. 3709 * @qp: The QP to post the work request on. 3710 * @recv_wr: A list of work requests to post on the receive queue. 3711 * @bad_recv_wr: On an immediate failure, this parameter will reference 3712 * the work request that failed to be posted on the QP. 3713 */ 3714 static inline int ib_post_recv(struct ib_qp *qp, 3715 const struct ib_recv_wr *recv_wr, 3716 const struct ib_recv_wr **bad_recv_wr) 3717 { 3718 const struct ib_recv_wr *dummy; 3719 3720 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); 3721 } 3722 3723 struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, 3724 int nr_cqe, int comp_vector, 3725 enum ib_poll_context poll_ctx, 3726 const char *caller, struct ib_udata *udata); 3727 3728 /** 3729 * ib_alloc_cq_user: Allocate kernel/user CQ 3730 * @dev: The IB device 3731 * @private: Private data attached to the CQE 3732 * @nr_cqe: Number of CQEs in the CQ 3733 * @comp_vector: Completion vector used for the IRQs 3734 * @poll_ctx: Context used for polling the CQ 3735 * @udata: Valid user data or NULL for kernel objects 3736 */ 3737 static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev, 3738 void *private, int nr_cqe, 3739 int comp_vector, 3740 enum ib_poll_context poll_ctx, 3741 struct ib_udata *udata) 3742 { 3743 return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 3744 KBUILD_MODNAME, udata); 3745 } 3746 3747 /** 3748 * ib_alloc_cq: Allocate kernel CQ 3749 * @dev: The IB device 3750 * @private: Private data attached to the CQE 3751 * @nr_cqe: Number of CQEs in the CQ 3752 * @comp_vector: Completion vector used for the IRQs 3753 * @poll_ctx: Context used for polling the CQ 3754 * 3755 * NOTE: for user cq use ib_alloc_cq_user with valid udata! 3756 */ 3757 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 3758 int nr_cqe, int comp_vector, 3759 enum ib_poll_context poll_ctx) 3760 { 3761 return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, 3762 NULL); 3763 } 3764 3765 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, 3766 int nr_cqe, enum ib_poll_context poll_ctx, 3767 const char *caller); 3768 3769 /** 3770 * ib_alloc_cq_any: Allocate kernel CQ 3771 * @dev: The IB device 3772 * @private: Private data attached to the CQE 3773 * @nr_cqe: Number of CQEs in the CQ 3774 * @poll_ctx: Context used for polling the CQ 3775 */ 3776 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, 3777 void *private, int nr_cqe, 3778 enum ib_poll_context poll_ctx) 3779 { 3780 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, 3781 KBUILD_MODNAME); 3782 } 3783 3784 /** 3785 * ib_free_cq_user - Free kernel/user CQ 3786 * @cq: The CQ to free 3787 * @udata: Valid user data or NULL for kernel objects 3788 */ 3789 void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3790 3791 /** 3792 * ib_free_cq - Free kernel CQ 3793 * @cq: The CQ to free 3794 * 3795 * NOTE: for user cq use ib_free_cq_user with valid udata! 3796 */ 3797 static inline void ib_free_cq(struct ib_cq *cq) 3798 { 3799 ib_free_cq_user(cq, NULL); 3800 } 3801 3802 int ib_process_cq_direct(struct ib_cq *cq, int budget); 3803 3804 /** 3805 * ib_create_cq - Creates a CQ on the specified device. 3806 * @device: The device on which to create the CQ. 3807 * @comp_handler: A user-specified callback that is invoked when a 3808 * completion event occurs on the CQ. 3809 * @event_handler: A user-specified callback that is invoked when an 3810 * asynchronous event not associated with a completion occurs on the CQ. 3811 * @cq_context: Context associated with the CQ returned to the user via 3812 * the associated completion and event handlers. 3813 * @cq_attr: The attributes the CQ should be created upon. 3814 * 3815 * Users can examine the cq structure to determine the actual CQ size. 3816 */ 3817 struct ib_cq *__ib_create_cq(struct ib_device *device, 3818 ib_comp_handler comp_handler, 3819 void (*event_handler)(struct ib_event *, void *), 3820 void *cq_context, 3821 const struct ib_cq_init_attr *cq_attr, 3822 const char *caller); 3823 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ 3824 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME) 3825 3826 /** 3827 * ib_resize_cq - Modifies the capacity of the CQ. 3828 * @cq: The CQ to resize. 3829 * @cqe: The minimum size of the CQ. 3830 * 3831 * Users can examine the cq structure to determine the actual CQ size. 3832 */ 3833 int ib_resize_cq(struct ib_cq *cq, int cqe); 3834 3835 /** 3836 * rdma_set_cq_moderation - Modifies moderation params of the CQ 3837 * @cq: The CQ to modify. 3838 * @cq_count: number of CQEs that will trigger an event 3839 * @cq_period: max period of time in usec before triggering an event 3840 * 3841 */ 3842 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); 3843 3844 /** 3845 * ib_destroy_cq_user - Destroys the specified CQ. 3846 * @cq: The CQ to destroy. 3847 * @udata: Valid user data or NULL for kernel objects 3848 */ 3849 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3850 3851 /** 3852 * ib_destroy_cq - Destroys the specified kernel CQ. 3853 * @cq: The CQ to destroy. 3854 * 3855 * NOTE: for user cq use ib_destroy_cq_user with valid udata! 3856 */ 3857 static inline void ib_destroy_cq(struct ib_cq *cq) 3858 { 3859 ib_destroy_cq_user(cq, NULL); 3860 } 3861 3862 /** 3863 * ib_poll_cq - poll a CQ for completion(s) 3864 * @cq:the CQ being polled 3865 * @num_entries:maximum number of completions to return 3866 * @wc:array of at least @num_entries &struct ib_wc where completions 3867 * will be returned 3868 * 3869 * Poll a CQ for (possibly multiple) completions. If the return value 3870 * is < 0, an error occurred. If the return value is >= 0, it is the 3871 * number of completions returned. If the return value is 3872 * non-negative and < num_entries, then the CQ was emptied. 3873 */ 3874 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 3875 struct ib_wc *wc) 3876 { 3877 return cq->device->ops.poll_cq(cq, num_entries, wc); 3878 } 3879 3880 /** 3881 * ib_req_notify_cq - Request completion notification on a CQ. 3882 * @cq: The CQ to generate an event for. 3883 * @flags: 3884 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 3885 * to request an event on the next solicited event or next work 3886 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 3887 * may also be |ed in to request a hint about missed events, as 3888 * described below. 3889 * 3890 * Return Value: 3891 * < 0 means an error occurred while requesting notification 3892 * == 0 means notification was requested successfully, and if 3893 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 3894 * were missed and it is safe to wait for another event. In 3895 * this case is it guaranteed that any work completions added 3896 * to the CQ since the last CQ poll will trigger a completion 3897 * notification event. 3898 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 3899 * in. It means that the consumer must poll the CQ again to 3900 * make sure it is empty to avoid missing an event because of a 3901 * race between requesting notification and an entry being 3902 * added to the CQ. This return value means it is possible 3903 * (but not guaranteed) that a work completion has been added 3904 * to the CQ since the last poll without triggering a 3905 * completion notification event. 3906 */ 3907 static inline int ib_req_notify_cq(struct ib_cq *cq, 3908 enum ib_cq_notify_flags flags) 3909 { 3910 return cq->device->ops.req_notify_cq(cq, flags); 3911 } 3912 3913 /** 3914 * ib_req_ncomp_notif - Request completion notification when there are 3915 * at least the specified number of unreaped completions on the CQ. 3916 * @cq: The CQ to generate an event for. 3917 * @wc_cnt: The number of unreaped completions that should be on the 3918 * CQ before an event is generated. 3919 */ 3920 static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) 3921 { 3922 return cq->device->ops.req_ncomp_notif ? 3923 cq->device->ops.req_ncomp_notif(cq, wc_cnt) : 3924 -ENOSYS; 3925 } 3926 3927 /** 3928 * ib_dma_mapping_error - check a DMA addr for error 3929 * @dev: The device for which the dma_addr was created 3930 * @dma_addr: The DMA address to check 3931 */ 3932 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 3933 { 3934 return dma_mapping_error(dev->dma_device, dma_addr); 3935 } 3936 3937 /** 3938 * ib_dma_map_single - Map a kernel virtual address to DMA address 3939 * @dev: The device for which the dma_addr is to be created 3940 * @cpu_addr: The kernel virtual address 3941 * @size: The size of the region in bytes 3942 * @direction: The direction of the DMA 3943 */ 3944 static inline u64 ib_dma_map_single(struct ib_device *dev, 3945 void *cpu_addr, size_t size, 3946 enum dma_data_direction direction) 3947 { 3948 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3949 } 3950 3951 /** 3952 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 3953 * @dev: The device for which the DMA address was created 3954 * @addr: The DMA address 3955 * @size: The size of the region in bytes 3956 * @direction: The direction of the DMA 3957 */ 3958 static inline void ib_dma_unmap_single(struct ib_device *dev, 3959 u64 addr, size_t size, 3960 enum dma_data_direction direction) 3961 { 3962 dma_unmap_single(dev->dma_device, addr, size, direction); 3963 } 3964 3965 /** 3966 * ib_dma_map_page - Map a physical page to DMA address 3967 * @dev: The device for which the dma_addr is to be created 3968 * @page: The page to be mapped 3969 * @offset: The offset within the page 3970 * @size: The size of the region in bytes 3971 * @direction: The direction of the DMA 3972 */ 3973 static inline u64 ib_dma_map_page(struct ib_device *dev, 3974 struct page *page, 3975 unsigned long offset, 3976 size_t size, 3977 enum dma_data_direction direction) 3978 { 3979 return dma_map_page(dev->dma_device, page, offset, size, direction); 3980 } 3981 3982 /** 3983 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 3984 * @dev: The device for which the DMA address was created 3985 * @addr: The DMA address 3986 * @size: The size of the region in bytes 3987 * @direction: The direction of the DMA 3988 */ 3989 static inline void ib_dma_unmap_page(struct ib_device *dev, 3990 u64 addr, size_t size, 3991 enum dma_data_direction direction) 3992 { 3993 dma_unmap_page(dev->dma_device, addr, size, direction); 3994 } 3995 3996 /** 3997 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 3998 * @dev: The device for which the DMA addresses are to be created 3999 * @sg: The array of scatter/gather entries 4000 * @nents: The number of scatter/gather entries 4001 * @direction: The direction of the DMA 4002 */ 4003 static inline int ib_dma_map_sg(struct ib_device *dev, 4004 struct scatterlist *sg, int nents, 4005 enum dma_data_direction direction) 4006 { 4007 return dma_map_sg(dev->dma_device, sg, nents, direction); 4008 } 4009 4010 /** 4011 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 4012 * @dev: The device for which the DMA addresses were created 4013 * @sg: The array of scatter/gather entries 4014 * @nents: The number of scatter/gather entries 4015 * @direction: The direction of the DMA 4016 */ 4017 static inline void ib_dma_unmap_sg(struct ib_device *dev, 4018 struct scatterlist *sg, int nents, 4019 enum dma_data_direction direction) 4020 { 4021 dma_unmap_sg(dev->dma_device, sg, nents, direction); 4022 } 4023 4024 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 4025 struct scatterlist *sg, int nents, 4026 enum dma_data_direction direction, 4027 unsigned long dma_attrs) 4028 { 4029 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 4030 dma_attrs); 4031 } 4032 4033 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 4034 struct scatterlist *sg, int nents, 4035 enum dma_data_direction direction, 4036 unsigned long dma_attrs) 4037 { 4038 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); 4039 } 4040 4041 /** 4042 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer 4043 * @dev: The device to query 4044 * 4045 * The returned value represents a size in bytes. 4046 */ 4047 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev) 4048 { 4049 struct device_dma_parameters *p = dev->dma_device->dma_parms; 4050 4051 return p ? p->max_segment_size : UINT_MAX; 4052 } 4053 4054 /** 4055 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 4056 * @dev: The device for which the DMA address was created 4057 * @addr: The DMA address 4058 * @size: The size of the region in bytes 4059 * @dir: The direction of the DMA 4060 */ 4061 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 4062 u64 addr, 4063 size_t size, 4064 enum dma_data_direction dir) 4065 { 4066 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 4067 } 4068 4069 /** 4070 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 4071 * @dev: The device for which the DMA address was created 4072 * @addr: The DMA address 4073 * @size: The size of the region in bytes 4074 * @dir: The direction of the DMA 4075 */ 4076 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 4077 u64 addr, 4078 size_t size, 4079 enum dma_data_direction dir) 4080 { 4081 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 4082 } 4083 4084 /** 4085 * ib_dma_alloc_coherent - Allocate memory and map it for DMA 4086 * @dev: The device for which the DMA address is requested 4087 * @size: The size of the region to allocate in bytes 4088 * @dma_handle: A pointer for returning the DMA address of the region 4089 * @flag: memory allocator flags 4090 */ 4091 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 4092 size_t size, 4093 dma_addr_t *dma_handle, 4094 gfp_t flag) 4095 { 4096 return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 4097 } 4098 4099 /** 4100 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 4101 * @dev: The device for which the DMA addresses were allocated 4102 * @size: The size of the region 4103 * @cpu_addr: the address returned by ib_dma_alloc_coherent() 4104 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 4105 */ 4106 static inline void ib_dma_free_coherent(struct ib_device *dev, 4107 size_t size, void *cpu_addr, 4108 dma_addr_t dma_handle) 4109 { 4110 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 4111 } 4112 4113 /** 4114 * ib_dereg_mr_user - Deregisters a memory region and removes it from the 4115 * HCA translation table. 4116 * @mr: The memory region to deregister. 4117 * @udata: Valid user data or NULL for kernel object 4118 * 4119 * This function can fail, if the memory region has memory windows bound to it. 4120 */ 4121 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata); 4122 4123 /** 4124 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the 4125 * HCA translation table. 4126 * @mr: The memory region to deregister. 4127 * 4128 * This function can fail, if the memory region has memory windows bound to it. 4129 * 4130 * NOTE: for user mr use ib_dereg_mr_user with valid udata! 4131 */ 4132 static inline int ib_dereg_mr(struct ib_mr *mr) 4133 { 4134 return ib_dereg_mr_user(mr, NULL); 4135 } 4136 4137 struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type, 4138 u32 max_num_sg, struct ib_udata *udata); 4139 4140 static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd, 4141 enum ib_mr_type mr_type, u32 max_num_sg) 4142 { 4143 return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL); 4144 } 4145 4146 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, 4147 u32 max_num_data_sg, 4148 u32 max_num_meta_sg); 4149 4150 /** 4151 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 4152 * R_Key and L_Key. 4153 * @mr - struct ib_mr pointer to be updated. 4154 * @newkey - new key to be used. 4155 */ 4156 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 4157 { 4158 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 4159 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 4160 } 4161 4162 /** 4163 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 4164 * for calculating a new rkey for type 2 memory windows. 4165 * @rkey - the rkey to increment. 4166 */ 4167 static inline u32 ib_inc_rkey(u32 rkey) 4168 { 4169 const u32 mask = 0x000000ff; 4170 return ((rkey + 1) & mask) | (rkey & ~mask); 4171 } 4172 4173 /** 4174 * ib_alloc_fmr - Allocates a unmapped fast memory region. 4175 * @pd: The protection domain associated with the unmapped region. 4176 * @mr_access_flags: Specifies the memory access rights. 4177 * @fmr_attr: Attributes of the unmapped region. 4178 * 4179 * A fast memory region must be mapped before it can be used as part of 4180 * a work request. 4181 */ 4182 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, 4183 int mr_access_flags, 4184 struct ib_fmr_attr *fmr_attr); 4185 4186 /** 4187 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. 4188 * @fmr: The fast memory region to associate with the pages. 4189 * @page_list: An array of physical pages to map to the fast memory region. 4190 * @list_len: The number of pages in page_list. 4191 * @iova: The I/O virtual address to use with the mapped region. 4192 */ 4193 static inline int ib_map_phys_fmr(struct ib_fmr *fmr, 4194 u64 *page_list, int list_len, 4195 u64 iova) 4196 { 4197 return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova); 4198 } 4199 4200 /** 4201 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. 4202 * @fmr_list: A linked list of fast memory regions to unmap. 4203 */ 4204 int ib_unmap_fmr(struct list_head *fmr_list); 4205 4206 /** 4207 * ib_dealloc_fmr - Deallocates a fast memory region. 4208 * @fmr: The fast memory region to deallocate. 4209 */ 4210 int ib_dealloc_fmr(struct ib_fmr *fmr); 4211 4212 /** 4213 * ib_attach_mcast - Attaches the specified QP to a multicast group. 4214 * @qp: QP to attach to the multicast group. The QP must be type 4215 * IB_QPT_UD. 4216 * @gid: Multicast group GID. 4217 * @lid: Multicast group LID in host byte order. 4218 * 4219 * In order to send and receive multicast packets, subnet 4220 * administration must have created the multicast group and configured 4221 * the fabric appropriately. The port associated with the specified 4222 * QP must also be a member of the multicast group. 4223 */ 4224 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 4225 4226 /** 4227 * ib_detach_mcast - Detaches the specified QP from a multicast group. 4228 * @qp: QP to detach from the multicast group. 4229 * @gid: Multicast group GID. 4230 * @lid: Multicast group LID in host byte order. 4231 */ 4232 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 4233 4234 /** 4235 * ib_alloc_xrcd - Allocates an XRC domain. 4236 * @device: The device on which to allocate the XRC domain. 4237 * @caller: Module name for kernel consumers 4238 */ 4239 struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller); 4240 #define ib_alloc_xrcd(device) \ 4241 __ib_alloc_xrcd((device), KBUILD_MODNAME) 4242 4243 /** 4244 * ib_dealloc_xrcd - Deallocates an XRC domain. 4245 * @xrcd: The XRC domain to deallocate. 4246 * @udata: Valid user data or NULL for kernel object 4247 */ 4248 int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata); 4249 4250 static inline int ib_check_mr_access(int flags) 4251 { 4252 /* 4253 * Local write permission is required if remote write or 4254 * remote atomic permission is also requested. 4255 */ 4256 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 4257 !(flags & IB_ACCESS_LOCAL_WRITE)) 4258 return -EINVAL; 4259 4260 return 0; 4261 } 4262 4263 static inline bool ib_access_writable(int access_flags) 4264 { 4265 /* 4266 * We have writable memory backing the MR if any of the following 4267 * access flags are set. "Local write" and "remote write" obviously 4268 * require write access. "Remote atomic" can do things like fetch and 4269 * add, which will modify memory, and "MW bind" can change permissions 4270 * by binding a window. 4271 */ 4272 return access_flags & 4273 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 4274 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); 4275 } 4276 4277 /** 4278 * ib_check_mr_status: lightweight check of MR status. 4279 * This routine may provide status checks on a selected 4280 * ib_mr. first use is for signature status check. 4281 * 4282 * @mr: A memory region. 4283 * @check_mask: Bitmask of which checks to perform from 4284 * ib_mr_status_check enumeration. 4285 * @mr_status: The container of relevant status checks. 4286 * failed checks will be indicated in the status bitmask 4287 * and the relevant info shall be in the error item. 4288 */ 4289 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 4290 struct ib_mr_status *mr_status); 4291 4292 /** 4293 * ib_device_try_get: Hold a registration lock 4294 * device: The device to lock 4295 * 4296 * A device under an active registration lock cannot become unregistered. It 4297 * is only possible to obtain a registration lock on a device that is fully 4298 * registered, otherwise this function returns false. 4299 * 4300 * The registration lock is only necessary for actions which require the 4301 * device to still be registered. Uses that only require the device pointer to 4302 * be valid should use get_device(&ibdev->dev) to hold the memory. 4303 * 4304 */ 4305 static inline bool ib_device_try_get(struct ib_device *dev) 4306 { 4307 return refcount_inc_not_zero(&dev->refcount); 4308 } 4309 4310 void ib_device_put(struct ib_device *device); 4311 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, 4312 enum rdma_driver_id driver_id); 4313 struct ib_device *ib_device_get_by_name(const char *name, 4314 enum rdma_driver_id driver_id); 4315 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, 4316 u16 pkey, const union ib_gid *gid, 4317 const struct sockaddr *addr); 4318 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 4319 unsigned int port); 4320 struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); 4321 4322 struct ib_wq *ib_create_wq(struct ib_pd *pd, 4323 struct ib_wq_init_attr *init_attr); 4324 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); 4325 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, 4326 u32 wq_attr_mask); 4327 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, 4328 struct ib_rwq_ind_table_init_attr* 4329 wq_ind_table_init_attr); 4330 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); 4331 4332 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4333 unsigned int *sg_offset, unsigned int page_size); 4334 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, 4335 int data_sg_nents, unsigned int *data_sg_offset, 4336 struct scatterlist *meta_sg, int meta_sg_nents, 4337 unsigned int *meta_sg_offset, unsigned int page_size); 4338 4339 static inline int 4340 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4341 unsigned int *sg_offset, unsigned int page_size) 4342 { 4343 int n; 4344 4345 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 4346 mr->iova = 0; 4347 4348 return n; 4349 } 4350 4351 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 4352 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 4353 4354 void ib_drain_rq(struct ib_qp *qp); 4355 void ib_drain_sq(struct ib_qp *qp); 4356 void ib_drain_qp(struct ib_qp *qp); 4357 4358 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); 4359 4360 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 4361 { 4362 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 4363 return attr->roce.dmac; 4364 return NULL; 4365 } 4366 4367 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid) 4368 { 4369 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4370 attr->ib.dlid = (u16)dlid; 4371 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4372 attr->opa.dlid = dlid; 4373 } 4374 4375 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr) 4376 { 4377 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4378 return attr->ib.dlid; 4379 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4380 return attr->opa.dlid; 4381 return 0; 4382 } 4383 4384 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl) 4385 { 4386 attr->sl = sl; 4387 } 4388 4389 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr) 4390 { 4391 return attr->sl; 4392 } 4393 4394 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr, 4395 u8 src_path_bits) 4396 { 4397 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4398 attr->ib.src_path_bits = src_path_bits; 4399 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4400 attr->opa.src_path_bits = src_path_bits; 4401 } 4402 4403 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) 4404 { 4405 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4406 return attr->ib.src_path_bits; 4407 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4408 return attr->opa.src_path_bits; 4409 return 0; 4410 } 4411 4412 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr, 4413 bool make_grd) 4414 { 4415 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4416 attr->opa.make_grd = make_grd; 4417 } 4418 4419 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) 4420 { 4421 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4422 return attr->opa.make_grd; 4423 return false; 4424 } 4425 4426 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num) 4427 { 4428 attr->port_num = port_num; 4429 } 4430 4431 static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 4432 { 4433 return attr->port_num; 4434 } 4435 4436 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr, 4437 u8 static_rate) 4438 { 4439 attr->static_rate = static_rate; 4440 } 4441 4442 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr) 4443 { 4444 return attr->static_rate; 4445 } 4446 4447 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr, 4448 enum ib_ah_flags flag) 4449 { 4450 attr->ah_flags = flag; 4451 } 4452 4453 static inline enum ib_ah_flags 4454 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr) 4455 { 4456 return attr->ah_flags; 4457 } 4458 4459 static inline const struct ib_global_route 4460 *rdma_ah_read_grh(const struct rdma_ah_attr *attr) 4461 { 4462 return &attr->grh; 4463 } 4464 4465 /*To retrieve and modify the grh */ 4466 static inline struct ib_global_route 4467 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr) 4468 { 4469 return &attr->grh; 4470 } 4471 4472 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid) 4473 { 4474 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4475 4476 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); 4477 } 4478 4479 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr, 4480 __be64 prefix) 4481 { 4482 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4483 4484 grh->dgid.global.subnet_prefix = prefix; 4485 } 4486 4487 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr, 4488 __be64 if_id) 4489 { 4490 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4491 4492 grh->dgid.global.interface_id = if_id; 4493 } 4494 4495 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, 4496 union ib_gid *dgid, u32 flow_label, 4497 u8 sgid_index, u8 hop_limit, 4498 u8 traffic_class) 4499 { 4500 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4501 4502 attr->ah_flags = IB_AH_GRH; 4503 if (dgid) 4504 grh->dgid = *dgid; 4505 grh->flow_label = flow_label; 4506 grh->sgid_index = sgid_index; 4507 grh->hop_limit = hop_limit; 4508 grh->traffic_class = traffic_class; 4509 grh->sgid_attr = NULL; 4510 } 4511 4512 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr); 4513 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, 4514 u32 flow_label, u8 hop_limit, u8 traffic_class, 4515 const struct ib_gid_attr *sgid_attr); 4516 void rdma_copy_ah_attr(struct rdma_ah_attr *dest, 4517 const struct rdma_ah_attr *src); 4518 void rdma_replace_ah_attr(struct rdma_ah_attr *old, 4519 const struct rdma_ah_attr *new); 4520 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src); 4521 4522 /** 4523 * rdma_ah_find_type - Return address handle type. 4524 * 4525 * @dev: Device to be checked 4526 * @port_num: Port number 4527 */ 4528 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 4529 u8 port_num) 4530 { 4531 if (rdma_protocol_roce(dev, port_num)) 4532 return RDMA_AH_ATTR_TYPE_ROCE; 4533 if (rdma_protocol_ib(dev, port_num)) { 4534 if (rdma_cap_opa_ah(dev, port_num)) 4535 return RDMA_AH_ATTR_TYPE_OPA; 4536 return RDMA_AH_ATTR_TYPE_IB; 4537 } 4538 4539 return RDMA_AH_ATTR_TYPE_UNDEFINED; 4540 } 4541 4542 /** 4543 * ib_lid_cpu16 - Return lid in 16bit CPU encoding. 4544 * In the current implementation the only way to get 4545 * get the 32bit lid is from other sources for OPA. 4546 * For IB, lids will always be 16bits so cast the 4547 * value accordingly. 4548 * 4549 * @lid: A 32bit LID 4550 */ 4551 static inline u16 ib_lid_cpu16(u32 lid) 4552 { 4553 WARN_ON_ONCE(lid & 0xFFFF0000); 4554 return (u16)lid; 4555 } 4556 4557 /** 4558 * ib_lid_be16 - Return lid in 16bit BE encoding. 4559 * 4560 * @lid: A 32bit LID 4561 */ 4562 static inline __be16 ib_lid_be16(u32 lid) 4563 { 4564 WARN_ON_ONCE(lid & 0xFFFF0000); 4565 return cpu_to_be16((u16)lid); 4566 } 4567 4568 /** 4569 * ib_get_vector_affinity - Get the affinity mappings of a given completion 4570 * vector 4571 * @device: the rdma device 4572 * @comp_vector: index of completion vector 4573 * 4574 * Returns NULL on failure, otherwise a corresponding cpu map of the 4575 * completion vector (returns all-cpus map if the device driver doesn't 4576 * implement get_vector_affinity). 4577 */ 4578 static inline const struct cpumask * 4579 ib_get_vector_affinity(struct ib_device *device, int comp_vector) 4580 { 4581 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || 4582 !device->ops.get_vector_affinity) 4583 return NULL; 4584 4585 return device->ops.get_vector_affinity(device, comp_vector); 4586 4587 } 4588 4589 /** 4590 * rdma_roce_rescan_device - Rescan all of the network devices in the system 4591 * and add their gids, as needed, to the relevant RoCE devices. 4592 * 4593 * @device: the rdma device 4594 */ 4595 void rdma_roce_rescan_device(struct ib_device *ibdev); 4596 4597 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); 4598 4599 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); 4600 4601 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, 4602 enum rdma_netdev_t type, const char *name, 4603 unsigned char name_assign_type, 4604 void (*setup)(struct net_device *)); 4605 4606 int rdma_init_netdev(struct ib_device *device, u8 port_num, 4607 enum rdma_netdev_t type, const char *name, 4608 unsigned char name_assign_type, 4609 void (*setup)(struct net_device *), 4610 struct net_device *netdev); 4611 4612 /** 4613 * rdma_set_device_sysfs_group - Set device attributes group to have 4614 * driver specific sysfs entries at 4615 * for infiniband class. 4616 * 4617 * @device: device pointer for which attributes to be created 4618 * @group: Pointer to group which should be added when device 4619 * is registered with sysfs. 4620 * rdma_set_device_sysfs_group() allows existing drivers to expose one 4621 * group per device to have sysfs attributes. 4622 * 4623 * NOTE: New drivers should not make use of this API; instead new device 4624 * parameter should be exposed via netlink command. This API and mechanism 4625 * exist only for existing drivers. 4626 */ 4627 static inline void 4628 rdma_set_device_sysfs_group(struct ib_device *dev, 4629 const struct attribute_group *group) 4630 { 4631 dev->groups[1] = group; 4632 } 4633 4634 /** 4635 * rdma_device_to_ibdev - Get ib_device pointer from device pointer 4636 * 4637 * @device: device pointer for which ib_device pointer to retrieve 4638 * 4639 * rdma_device_to_ibdev() retrieves ib_device pointer from device. 4640 * 4641 */ 4642 static inline struct ib_device *rdma_device_to_ibdev(struct device *device) 4643 { 4644 struct ib_core_device *coredev = 4645 container_of(device, struct ib_core_device, dev); 4646 4647 return coredev->owner; 4648 } 4649 4650 /** 4651 * rdma_device_to_drv_device - Helper macro to reach back to driver's 4652 * ib_device holder structure from device pointer. 4653 * 4654 * NOTE: New drivers should not make use of this API; This API is only for 4655 * existing drivers who have exposed sysfs entries using 4656 * rdma_set_device_sysfs_group(). 4657 */ 4658 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \ 4659 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member) 4660 4661 bool rdma_dev_access_netns(const struct ib_device *device, 4662 const struct net *net); 4663 #endif /* IB_VERBS_H */ 4664