1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 /* 3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 5 * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved. 6 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 7 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 9 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 10 */ 11 12 #ifndef IB_VERBS_H 13 #define IB_VERBS_H 14 15 #include <linux/ethtool.h> 16 #include <linux/types.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/kref.h> 20 #include <linux/list.h> 21 #include <linux/rwsem.h> 22 #include <linux/workqueue.h> 23 #include <linux/irq_poll.h> 24 #include <uapi/linux/if_ether.h> 25 #include <net/ipv6.h> 26 #include <net/ip.h> 27 #include <linux/string.h> 28 #include <linux/slab.h> 29 #include <linux/netdevice.h> 30 #include <linux/refcount.h> 31 #include <linux/if_link.h> 32 #include <linux/atomic.h> 33 #include <linux/mmu_notifier.h> 34 #include <linux/uaccess.h> 35 #include <linux/cgroup_rdma.h> 36 #include <linux/irqflags.h> 37 #include <linux/preempt.h> 38 #include <linux/dim.h> 39 #include <uapi/rdma/ib_user_verbs.h> 40 #include <rdma/rdma_counter.h> 41 #include <rdma/restrack.h> 42 #include <rdma/signature.h> 43 #include <uapi/rdma/rdma_user_ioctl.h> 44 #include <uapi/rdma/ib_user_ioctl_verbs.h> 45 46 #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN 47 48 struct ib_umem_odp; 49 struct ib_uqp_object; 50 struct ib_usrq_object; 51 struct ib_uwq_object; 52 struct rdma_cm_id; 53 struct ib_port; 54 struct hw_stats_device_data; 55 56 extern struct workqueue_struct *ib_wq; 57 extern struct workqueue_struct *ib_comp_wq; 58 extern struct workqueue_struct *ib_comp_unbound_wq; 59 60 struct ib_ucq_object; 61 62 __printf(2, 3) __cold 63 void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...); 64 __printf(2, 3) __cold 65 void ibdev_alert(const struct ib_device *ibdev, const char *format, ...); 66 __printf(2, 3) __cold 67 void ibdev_crit(const struct ib_device *ibdev, const char *format, ...); 68 __printf(2, 3) __cold 69 void ibdev_err(const struct ib_device *ibdev, const char *format, ...); 70 __printf(2, 3) __cold 71 void ibdev_warn(const struct ib_device *ibdev, const char *format, ...); 72 __printf(2, 3) __cold 73 void ibdev_notice(const struct ib_device *ibdev, const char *format, ...); 74 __printf(2, 3) __cold 75 void ibdev_info(const struct ib_device *ibdev, const char *format, ...); 76 77 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 78 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 79 #define ibdev_dbg(__dev, format, args...) \ 80 dynamic_ibdev_dbg(__dev, format, ##args) 81 #else 82 __printf(2, 3) __cold 83 static inline 84 void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {} 85 #endif 86 87 #define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \ 88 do { \ 89 static DEFINE_RATELIMIT_STATE(_rs, \ 90 DEFAULT_RATELIMIT_INTERVAL, \ 91 DEFAULT_RATELIMIT_BURST); \ 92 if (__ratelimit(&_rs)) \ 93 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \ 94 } while (0) 95 96 #define ibdev_emerg_ratelimited(ibdev, fmt, ...) \ 97 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__) 98 #define ibdev_alert_ratelimited(ibdev, fmt, ...) \ 99 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__) 100 #define ibdev_crit_ratelimited(ibdev, fmt, ...) \ 101 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__) 102 #define ibdev_err_ratelimited(ibdev, fmt, ...) \ 103 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__) 104 #define ibdev_warn_ratelimited(ibdev, fmt, ...) \ 105 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__) 106 #define ibdev_notice_ratelimited(ibdev, fmt, ...) \ 107 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__) 108 #define ibdev_info_ratelimited(ibdev, fmt, ...) \ 109 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) 110 111 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 112 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 113 /* descriptor check is first to prevent flooding with "callbacks suppressed" */ 114 #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ 115 do { \ 116 static DEFINE_RATELIMIT_STATE(_rs, \ 117 DEFAULT_RATELIMIT_INTERVAL, \ 118 DEFAULT_RATELIMIT_BURST); \ 119 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 120 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \ 121 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \ 122 ##__VA_ARGS__); \ 123 } while (0) 124 #else 125 __printf(2, 3) __cold 126 static inline 127 void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {} 128 #endif 129 130 union ib_gid { 131 u8 raw[16]; 132 struct { 133 __be64 subnet_prefix; 134 __be64 interface_id; 135 } global; 136 }; 137 138 extern union ib_gid zgid; 139 140 enum ib_gid_type { 141 IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB, 142 IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1, 143 IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2, 144 IB_GID_TYPE_SIZE 145 }; 146 147 #define ROCE_V2_UDP_DPORT 4791 148 struct ib_gid_attr { 149 struct net_device __rcu *ndev; 150 struct ib_device *device; 151 union ib_gid gid; 152 enum ib_gid_type gid_type; 153 u16 index; 154 u32 port_num; 155 }; 156 157 enum { 158 /* set the local administered indication */ 159 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2, 160 }; 161 162 enum rdma_transport_type { 163 RDMA_TRANSPORT_IB, 164 RDMA_TRANSPORT_IWARP, 165 RDMA_TRANSPORT_USNIC, 166 RDMA_TRANSPORT_USNIC_UDP, 167 RDMA_TRANSPORT_UNSPECIFIED, 168 }; 169 170 enum rdma_protocol_type { 171 RDMA_PROTOCOL_IB, 172 RDMA_PROTOCOL_IBOE, 173 RDMA_PROTOCOL_IWARP, 174 RDMA_PROTOCOL_USNIC_UDP 175 }; 176 177 __attribute_const__ enum rdma_transport_type 178 rdma_node_get_transport(unsigned int node_type); 179 180 enum rdma_network_type { 181 RDMA_NETWORK_IB, 182 RDMA_NETWORK_ROCE_V1, 183 RDMA_NETWORK_IPV4, 184 RDMA_NETWORK_IPV6 185 }; 186 187 static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type) 188 { 189 if (network_type == RDMA_NETWORK_IPV4 || 190 network_type == RDMA_NETWORK_IPV6) 191 return IB_GID_TYPE_ROCE_UDP_ENCAP; 192 else if (network_type == RDMA_NETWORK_ROCE_V1) 193 return IB_GID_TYPE_ROCE; 194 else 195 return IB_GID_TYPE_IB; 196 } 197 198 static inline enum rdma_network_type 199 rdma_gid_attr_network_type(const struct ib_gid_attr *attr) 200 { 201 if (attr->gid_type == IB_GID_TYPE_IB) 202 return RDMA_NETWORK_IB; 203 204 if (attr->gid_type == IB_GID_TYPE_ROCE) 205 return RDMA_NETWORK_ROCE_V1; 206 207 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) 208 return RDMA_NETWORK_IPV4; 209 else 210 return RDMA_NETWORK_IPV6; 211 } 212 213 enum rdma_link_layer { 214 IB_LINK_LAYER_UNSPECIFIED, 215 IB_LINK_LAYER_INFINIBAND, 216 IB_LINK_LAYER_ETHERNET, 217 }; 218 219 enum ib_device_cap_flags { 220 IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR, 221 IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR, 222 IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR, 223 IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI, 224 IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG, 225 IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT, 226 IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE, 227 IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD, 228 IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT, 229 /* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */ 230 IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT, 231 IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID, 232 IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN, 233 IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE, 234 IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ, 235 236 /* Reserved, old SEND_W_INV = 1 << 16,*/ 237 IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW, 238 /* 239 * Devices should set IB_DEVICE_UD_IP_SUM if they support 240 * insertion of UDP and TCP checksum on outgoing UD IPoIB 241 * messages and can verify the validity of checksum for 242 * incoming messages. Setting this flag implies that the 243 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 244 */ 245 IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM, 246 IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC, 247 248 /* 249 * This device supports the IB "base memory management extension", 250 * which includes support for fast registrations (IB_WR_REG_MR, 251 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should 252 * also be set by any iWarp device which must support FRs to comply 253 * to the iWarp verbs spec. iWarp devices also support the 254 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the 255 * stag. 256 */ 257 IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS, 258 IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A, 259 IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B, 260 IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM, 261 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 262 IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM, 263 IB_DEVICE_MANAGED_FLOW_STEERING = 264 IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING, 265 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 266 IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS, 267 /* The device supports padding incoming writes to cacheline. */ 268 IB_DEVICE_PCI_WRITE_END_PADDING = 269 IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING, 270 /* Placement type attributes */ 271 IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL, 272 IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT, 273 IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE, 274 }; 275 276 enum ib_kernel_cap_flags { 277 /* 278 * This device supports a per-device lkey or stag that can be 279 * used without performing a memory registration for the local 280 * memory. Note that ULPs should never check this flag, but 281 * instead of use the local_dma_lkey flag in the ib_pd structure, 282 * which will always contain a usable lkey. 283 */ 284 IBK_LOCAL_DMA_LKEY = 1 << 0, 285 /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */ 286 IBK_INTEGRITY_HANDOVER = 1 << 1, 287 /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */ 288 IBK_ON_DEMAND_PAGING = 1 << 2, 289 /* IB_MR_TYPE_SG_GAPS is supported */ 290 IBK_SG_GAPS_REG = 1 << 3, 291 /* Driver supports RDMA_NLDEV_CMD_DELLINK */ 292 IBK_ALLOW_USER_UNREG = 1 << 4, 293 294 /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */ 295 IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5, 296 /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */ 297 IBK_UD_TSO = 1 << 6, 298 /* iopib will use the device ops: 299 * get_vf_config 300 * get_vf_guid 301 * get_vf_stats 302 * set_vf_guid 303 * set_vf_link_state 304 */ 305 IBK_VIRTUAL_FUNCTION = 1 << 7, 306 /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */ 307 IBK_RDMA_NETDEV_OPA = 1 << 8, 308 }; 309 310 enum ib_atomic_cap { 311 IB_ATOMIC_NONE, 312 IB_ATOMIC_HCA, 313 IB_ATOMIC_GLOB 314 }; 315 316 enum ib_odp_general_cap_bits { 317 IB_ODP_SUPPORT = 1 << 0, 318 IB_ODP_SUPPORT_IMPLICIT = 1 << 1, 319 }; 320 321 enum ib_odp_transport_cap_bits { 322 IB_ODP_SUPPORT_SEND = 1 << 0, 323 IB_ODP_SUPPORT_RECV = 1 << 1, 324 IB_ODP_SUPPORT_WRITE = 1 << 2, 325 IB_ODP_SUPPORT_READ = 1 << 3, 326 IB_ODP_SUPPORT_ATOMIC = 1 << 4, 327 IB_ODP_SUPPORT_SRQ_RECV = 1 << 5, 328 }; 329 330 struct ib_odp_caps { 331 uint64_t general_caps; 332 struct { 333 uint32_t rc_odp_caps; 334 uint32_t uc_odp_caps; 335 uint32_t ud_odp_caps; 336 uint32_t xrc_odp_caps; 337 } per_transport_caps; 338 }; 339 340 struct ib_rss_caps { 341 /* Corresponding bit will be set if qp type from 342 * 'enum ib_qp_type' is supported, e.g. 343 * supported_qpts |= 1 << IB_QPT_UD 344 */ 345 u32 supported_qpts; 346 u32 max_rwq_indirection_tables; 347 u32 max_rwq_indirection_table_size; 348 }; 349 350 enum ib_tm_cap_flags { 351 /* Support tag matching with rendezvous offload for RC transport */ 352 IB_TM_CAP_RNDV_RC = 1 << 0, 353 }; 354 355 struct ib_tm_caps { 356 /* Max size of RNDV header */ 357 u32 max_rndv_hdr_size; 358 /* Max number of entries in tag matching list */ 359 u32 max_num_tags; 360 /* From enum ib_tm_cap_flags */ 361 u32 flags; 362 /* Max number of outstanding list operations */ 363 u32 max_ops; 364 /* Max number of SGE in tag matching entry */ 365 u32 max_sge; 366 }; 367 368 struct ib_cq_init_attr { 369 unsigned int cqe; 370 u32 comp_vector; 371 u32 flags; 372 }; 373 374 enum ib_cq_attr_mask { 375 IB_CQ_MODERATE = 1 << 0, 376 }; 377 378 struct ib_cq_caps { 379 u16 max_cq_moderation_count; 380 u16 max_cq_moderation_period; 381 }; 382 383 struct ib_dm_mr_attr { 384 u64 length; 385 u64 offset; 386 u32 access_flags; 387 }; 388 389 struct ib_dm_alloc_attr { 390 u64 length; 391 u32 alignment; 392 u32 flags; 393 }; 394 395 struct ib_device_attr { 396 u64 fw_ver; 397 __be64 sys_image_guid; 398 u64 max_mr_size; 399 u64 page_size_cap; 400 u32 vendor_id; 401 u32 vendor_part_id; 402 u32 hw_ver; 403 int max_qp; 404 int max_qp_wr; 405 u64 device_cap_flags; 406 u64 kernel_cap_flags; 407 int max_send_sge; 408 int max_recv_sge; 409 int max_sge_rd; 410 int max_cq; 411 int max_cqe; 412 int max_mr; 413 int max_pd; 414 int max_qp_rd_atom; 415 int max_ee_rd_atom; 416 int max_res_rd_atom; 417 int max_qp_init_rd_atom; 418 int max_ee_init_rd_atom; 419 enum ib_atomic_cap atomic_cap; 420 enum ib_atomic_cap masked_atomic_cap; 421 int max_ee; 422 int max_rdd; 423 int max_mw; 424 int max_raw_ipv6_qp; 425 int max_raw_ethy_qp; 426 int max_mcast_grp; 427 int max_mcast_qp_attach; 428 int max_total_mcast_qp_attach; 429 int max_ah; 430 int max_srq; 431 int max_srq_wr; 432 int max_srq_sge; 433 unsigned int max_fast_reg_page_list_len; 434 unsigned int max_pi_fast_reg_page_list_len; 435 u16 max_pkeys; 436 u8 local_ca_ack_delay; 437 int sig_prot_cap; 438 int sig_guard_cap; 439 struct ib_odp_caps odp_caps; 440 uint64_t timestamp_mask; 441 uint64_t hca_core_clock; /* in KHZ */ 442 struct ib_rss_caps rss_caps; 443 u32 max_wq_type_rq; 444 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 445 struct ib_tm_caps tm_caps; 446 struct ib_cq_caps cq_caps; 447 u64 max_dm_size; 448 /* Max entries for sgl for optimized performance per READ */ 449 u32 max_sgl_rd; 450 }; 451 452 enum ib_mtu { 453 IB_MTU_256 = 1, 454 IB_MTU_512 = 2, 455 IB_MTU_1024 = 3, 456 IB_MTU_2048 = 4, 457 IB_MTU_4096 = 5 458 }; 459 460 enum opa_mtu { 461 OPA_MTU_8192 = 6, 462 OPA_MTU_10240 = 7 463 }; 464 465 static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) 466 { 467 switch (mtu) { 468 case IB_MTU_256: return 256; 469 case IB_MTU_512: return 512; 470 case IB_MTU_1024: return 1024; 471 case IB_MTU_2048: return 2048; 472 case IB_MTU_4096: return 4096; 473 default: return -1; 474 } 475 } 476 477 static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 478 { 479 if (mtu >= 4096) 480 return IB_MTU_4096; 481 else if (mtu >= 2048) 482 return IB_MTU_2048; 483 else if (mtu >= 1024) 484 return IB_MTU_1024; 485 else if (mtu >= 512) 486 return IB_MTU_512; 487 else 488 return IB_MTU_256; 489 } 490 491 static inline int opa_mtu_enum_to_int(enum opa_mtu mtu) 492 { 493 switch (mtu) { 494 case OPA_MTU_8192: 495 return 8192; 496 case OPA_MTU_10240: 497 return 10240; 498 default: 499 return(ib_mtu_enum_to_int((enum ib_mtu)mtu)); 500 } 501 } 502 503 static inline enum opa_mtu opa_mtu_int_to_enum(int mtu) 504 { 505 if (mtu >= 10240) 506 return OPA_MTU_10240; 507 else if (mtu >= 8192) 508 return OPA_MTU_8192; 509 else 510 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu)); 511 } 512 513 enum ib_port_state { 514 IB_PORT_NOP = 0, 515 IB_PORT_DOWN = 1, 516 IB_PORT_INIT = 2, 517 IB_PORT_ARMED = 3, 518 IB_PORT_ACTIVE = 4, 519 IB_PORT_ACTIVE_DEFER = 5 520 }; 521 522 enum ib_port_phys_state { 523 IB_PORT_PHYS_STATE_SLEEP = 1, 524 IB_PORT_PHYS_STATE_POLLING = 2, 525 IB_PORT_PHYS_STATE_DISABLED = 3, 526 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4, 527 IB_PORT_PHYS_STATE_LINK_UP = 5, 528 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6, 529 IB_PORT_PHYS_STATE_PHY_TEST = 7, 530 }; 531 532 enum ib_port_width { 533 IB_WIDTH_1X = 1, 534 IB_WIDTH_2X = 16, 535 IB_WIDTH_4X = 2, 536 IB_WIDTH_8X = 4, 537 IB_WIDTH_12X = 8 538 }; 539 540 static inline int ib_width_enum_to_int(enum ib_port_width width) 541 { 542 switch (width) { 543 case IB_WIDTH_1X: return 1; 544 case IB_WIDTH_2X: return 2; 545 case IB_WIDTH_4X: return 4; 546 case IB_WIDTH_8X: return 8; 547 case IB_WIDTH_12X: return 12; 548 default: return -1; 549 } 550 } 551 552 enum ib_port_speed { 553 IB_SPEED_SDR = 1, 554 IB_SPEED_DDR = 2, 555 IB_SPEED_QDR = 4, 556 IB_SPEED_FDR10 = 8, 557 IB_SPEED_FDR = 16, 558 IB_SPEED_EDR = 32, 559 IB_SPEED_HDR = 64, 560 IB_SPEED_NDR = 128, 561 IB_SPEED_XDR = 256, 562 }; 563 564 enum ib_stat_flag { 565 IB_STAT_FLAG_OPTIONAL = 1 << 0, 566 }; 567 568 /** 569 * struct rdma_stat_desc 570 * @name - The name of the counter 571 * @flags - Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL 572 * @priv - Driver private information; Core code should not use 573 */ 574 struct rdma_stat_desc { 575 const char *name; 576 unsigned int flags; 577 const void *priv; 578 }; 579 580 /** 581 * struct rdma_hw_stats 582 * @lock - Mutex to protect parallel write access to lifespan and values 583 * of counters, which are 64bits and not guaranteed to be written 584 * atomicaly on 32bits systems. 585 * @timestamp - Used by the core code to track when the last update was 586 * @lifespan - Used by the core code to determine how old the counters 587 * should be before being updated again. Stored in jiffies, defaults 588 * to 10 milliseconds, drivers can override the default be specifying 589 * their own value during their allocation routine. 590 * @descs - Array of pointers to static descriptors used for the counters 591 * in directory. 592 * @is_disabled - A bitmap to indicate each counter is currently disabled 593 * or not. 594 * @num_counters - How many hardware counters there are. If name is 595 * shorter than this number, a kernel oops will result. Driver authors 596 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters) 597 * in their code to prevent this. 598 * @value - Array of u64 counters that are accessed by the sysfs code and 599 * filled in by the drivers get_stats routine 600 */ 601 struct rdma_hw_stats { 602 struct mutex lock; /* Protect lifespan and values[] */ 603 unsigned long timestamp; 604 unsigned long lifespan; 605 const struct rdma_stat_desc *descs; 606 unsigned long *is_disabled; 607 int num_counters; 608 u64 value[] __counted_by(num_counters); 609 }; 610 611 #define RDMA_HW_STATS_DEFAULT_LIFESPAN 10 612 613 struct rdma_hw_stats *rdma_alloc_hw_stats_struct( 614 const struct rdma_stat_desc *descs, int num_counters, 615 unsigned long lifespan); 616 617 void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats); 618 619 /* Define bits for the various functionality this port needs to be supported by 620 * the core. 621 */ 622 /* Management 0x00000FFF */ 623 #define RDMA_CORE_CAP_IB_MAD 0x00000001 624 #define RDMA_CORE_CAP_IB_SMI 0x00000002 625 #define RDMA_CORE_CAP_IB_CM 0x00000004 626 #define RDMA_CORE_CAP_IW_CM 0x00000008 627 #define RDMA_CORE_CAP_IB_SA 0x00000010 628 #define RDMA_CORE_CAP_OPA_MAD 0x00000020 629 630 /* Address format 0x000FF000 */ 631 #define RDMA_CORE_CAP_AF_IB 0x00001000 632 #define RDMA_CORE_CAP_ETH_AH 0x00002000 633 #define RDMA_CORE_CAP_OPA_AH 0x00004000 634 #define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000 635 636 /* Protocol 0xFFF00000 */ 637 #define RDMA_CORE_CAP_PROT_IB 0x00100000 638 #define RDMA_CORE_CAP_PROT_ROCE 0x00200000 639 #define RDMA_CORE_CAP_PROT_IWARP 0x00400000 640 #define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000 641 #define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000 642 #define RDMA_CORE_CAP_PROT_USNIC 0x02000000 643 644 #define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \ 645 | RDMA_CORE_CAP_PROT_ROCE \ 646 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP) 647 648 #define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ 649 | RDMA_CORE_CAP_IB_MAD \ 650 | RDMA_CORE_CAP_IB_SMI \ 651 | RDMA_CORE_CAP_IB_CM \ 652 | RDMA_CORE_CAP_IB_SA \ 653 | RDMA_CORE_CAP_AF_IB) 654 #define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ 655 | RDMA_CORE_CAP_IB_MAD \ 656 | RDMA_CORE_CAP_IB_CM \ 657 | RDMA_CORE_CAP_AF_IB \ 658 | RDMA_CORE_CAP_ETH_AH) 659 #define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \ 660 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \ 661 | RDMA_CORE_CAP_IB_MAD \ 662 | RDMA_CORE_CAP_IB_CM \ 663 | RDMA_CORE_CAP_AF_IB \ 664 | RDMA_CORE_CAP_ETH_AH) 665 #define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ 666 | RDMA_CORE_CAP_IW_CM) 667 #define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \ 668 | RDMA_CORE_CAP_OPA_MAD) 669 670 #define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET) 671 672 #define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC) 673 674 struct ib_port_attr { 675 u64 subnet_prefix; 676 enum ib_port_state state; 677 enum ib_mtu max_mtu; 678 enum ib_mtu active_mtu; 679 u32 phys_mtu; 680 int gid_tbl_len; 681 unsigned int ip_gids:1; 682 /* This is the value from PortInfo CapabilityMask, defined by IBA */ 683 u32 port_cap_flags; 684 u32 max_msg_sz; 685 u32 bad_pkey_cntr; 686 u32 qkey_viol_cntr; 687 u16 pkey_tbl_len; 688 u32 sm_lid; 689 u32 lid; 690 u8 lmc; 691 u8 max_vl_num; 692 u8 sm_sl; 693 u8 subnet_timeout; 694 u8 init_type_reply; 695 u8 active_width; 696 u16 active_speed; 697 u8 phys_state; 698 u16 port_cap_flags2; 699 }; 700 701 enum ib_device_modify_flags { 702 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0, 703 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1 704 }; 705 706 #define IB_DEVICE_NODE_DESC_MAX 64 707 708 struct ib_device_modify { 709 u64 sys_image_guid; 710 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 711 }; 712 713 enum ib_port_modify_flags { 714 IB_PORT_SHUTDOWN = 1, 715 IB_PORT_INIT_TYPE = (1<<2), 716 IB_PORT_RESET_QKEY_CNTR = (1<<3), 717 IB_PORT_OPA_MASK_CHG = (1<<4) 718 }; 719 720 struct ib_port_modify { 721 u32 set_port_cap_mask; 722 u32 clr_port_cap_mask; 723 u8 init_type; 724 }; 725 726 enum ib_event_type { 727 IB_EVENT_CQ_ERR, 728 IB_EVENT_QP_FATAL, 729 IB_EVENT_QP_REQ_ERR, 730 IB_EVENT_QP_ACCESS_ERR, 731 IB_EVENT_COMM_EST, 732 IB_EVENT_SQ_DRAINED, 733 IB_EVENT_PATH_MIG, 734 IB_EVENT_PATH_MIG_ERR, 735 IB_EVENT_DEVICE_FATAL, 736 IB_EVENT_PORT_ACTIVE, 737 IB_EVENT_PORT_ERR, 738 IB_EVENT_LID_CHANGE, 739 IB_EVENT_PKEY_CHANGE, 740 IB_EVENT_SM_CHANGE, 741 IB_EVENT_SRQ_ERR, 742 IB_EVENT_SRQ_LIMIT_REACHED, 743 IB_EVENT_QP_LAST_WQE_REACHED, 744 IB_EVENT_CLIENT_REREGISTER, 745 IB_EVENT_GID_CHANGE, 746 IB_EVENT_WQ_FATAL, 747 }; 748 749 const char *__attribute_const__ ib_event_msg(enum ib_event_type event); 750 751 struct ib_event { 752 struct ib_device *device; 753 union { 754 struct ib_cq *cq; 755 struct ib_qp *qp; 756 struct ib_srq *srq; 757 struct ib_wq *wq; 758 u32 port_num; 759 } element; 760 enum ib_event_type event; 761 }; 762 763 struct ib_event_handler { 764 struct ib_device *device; 765 void (*handler)(struct ib_event_handler *, struct ib_event *); 766 struct list_head list; 767 }; 768 769 #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ 770 do { \ 771 (_ptr)->device = _device; \ 772 (_ptr)->handler = _handler; \ 773 INIT_LIST_HEAD(&(_ptr)->list); \ 774 } while (0) 775 776 struct ib_global_route { 777 const struct ib_gid_attr *sgid_attr; 778 union ib_gid dgid; 779 u32 flow_label; 780 u8 sgid_index; 781 u8 hop_limit; 782 u8 traffic_class; 783 }; 784 785 struct ib_grh { 786 __be32 version_tclass_flow; 787 __be16 paylen; 788 u8 next_hdr; 789 u8 hop_limit; 790 union ib_gid sgid; 791 union ib_gid dgid; 792 }; 793 794 union rdma_network_hdr { 795 struct ib_grh ibgrh; 796 struct { 797 /* The IB spec states that if it's IPv4, the header 798 * is located in the last 20 bytes of the header. 799 */ 800 u8 reserved[20]; 801 struct iphdr roce4grh; 802 }; 803 }; 804 805 #define IB_QPN_MASK 0xFFFFFF 806 807 enum { 808 IB_MULTICAST_QPN = 0xffffff 809 }; 810 811 #define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF) 812 #define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000) 813 814 enum ib_ah_flags { 815 IB_AH_GRH = 1 816 }; 817 818 enum ib_rate { 819 IB_RATE_PORT_CURRENT = 0, 820 IB_RATE_2_5_GBPS = 2, 821 IB_RATE_5_GBPS = 5, 822 IB_RATE_10_GBPS = 3, 823 IB_RATE_20_GBPS = 6, 824 IB_RATE_30_GBPS = 4, 825 IB_RATE_40_GBPS = 7, 826 IB_RATE_60_GBPS = 8, 827 IB_RATE_80_GBPS = 9, 828 IB_RATE_120_GBPS = 10, 829 IB_RATE_14_GBPS = 11, 830 IB_RATE_56_GBPS = 12, 831 IB_RATE_112_GBPS = 13, 832 IB_RATE_168_GBPS = 14, 833 IB_RATE_25_GBPS = 15, 834 IB_RATE_100_GBPS = 16, 835 IB_RATE_200_GBPS = 17, 836 IB_RATE_300_GBPS = 18, 837 IB_RATE_28_GBPS = 19, 838 IB_RATE_50_GBPS = 20, 839 IB_RATE_400_GBPS = 21, 840 IB_RATE_600_GBPS = 22, 841 IB_RATE_800_GBPS = 23, 842 }; 843 844 /** 845 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 846 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 847 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 848 * @rate: rate to convert. 849 */ 850 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate); 851 852 /** 853 * ib_rate_to_mbps - Convert the IB rate enum to Mbps. 854 * For example, IB_RATE_2_5_GBPS will be converted to 2500. 855 * @rate: rate to convert. 856 */ 857 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); 858 859 860 /** 861 * enum ib_mr_type - memory region type 862 * @IB_MR_TYPE_MEM_REG: memory region that is used for 863 * normal registration 864 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to 865 * register any arbitrary sg lists (without 866 * the normal mr constraints - see 867 * ib_map_mr_sg) 868 * @IB_MR_TYPE_DM: memory region that is used for device 869 * memory registration 870 * @IB_MR_TYPE_USER: memory region that is used for the user-space 871 * application 872 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations 873 * without address translations (VA=PA) 874 * @IB_MR_TYPE_INTEGRITY: memory region that is used for 875 * data integrity operations 876 */ 877 enum ib_mr_type { 878 IB_MR_TYPE_MEM_REG, 879 IB_MR_TYPE_SG_GAPS, 880 IB_MR_TYPE_DM, 881 IB_MR_TYPE_USER, 882 IB_MR_TYPE_DMA, 883 IB_MR_TYPE_INTEGRITY, 884 }; 885 886 enum ib_mr_status_check { 887 IB_MR_CHECK_SIG_STATUS = 1, 888 }; 889 890 /** 891 * struct ib_mr_status - Memory region status container 892 * 893 * @fail_status: Bitmask of MR checks status. For each 894 * failed check a corresponding status bit is set. 895 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS 896 * failure. 897 */ 898 struct ib_mr_status { 899 u32 fail_status; 900 struct ib_sig_err sig_err; 901 }; 902 903 /** 904 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 905 * enum. 906 * @mult: multiple to convert. 907 */ 908 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); 909 910 struct rdma_ah_init_attr { 911 struct rdma_ah_attr *ah_attr; 912 u32 flags; 913 struct net_device *xmit_slave; 914 }; 915 916 enum rdma_ah_attr_type { 917 RDMA_AH_ATTR_TYPE_UNDEFINED, 918 RDMA_AH_ATTR_TYPE_IB, 919 RDMA_AH_ATTR_TYPE_ROCE, 920 RDMA_AH_ATTR_TYPE_OPA, 921 }; 922 923 struct ib_ah_attr { 924 u16 dlid; 925 u8 src_path_bits; 926 }; 927 928 struct roce_ah_attr { 929 u8 dmac[ETH_ALEN]; 930 }; 931 932 struct opa_ah_attr { 933 u32 dlid; 934 u8 src_path_bits; 935 bool make_grd; 936 }; 937 938 struct rdma_ah_attr { 939 struct ib_global_route grh; 940 u8 sl; 941 u8 static_rate; 942 u32 port_num; 943 u8 ah_flags; 944 enum rdma_ah_attr_type type; 945 union { 946 struct ib_ah_attr ib; 947 struct roce_ah_attr roce; 948 struct opa_ah_attr opa; 949 }; 950 }; 951 952 enum ib_wc_status { 953 IB_WC_SUCCESS, 954 IB_WC_LOC_LEN_ERR, 955 IB_WC_LOC_QP_OP_ERR, 956 IB_WC_LOC_EEC_OP_ERR, 957 IB_WC_LOC_PROT_ERR, 958 IB_WC_WR_FLUSH_ERR, 959 IB_WC_MW_BIND_ERR, 960 IB_WC_BAD_RESP_ERR, 961 IB_WC_LOC_ACCESS_ERR, 962 IB_WC_REM_INV_REQ_ERR, 963 IB_WC_REM_ACCESS_ERR, 964 IB_WC_REM_OP_ERR, 965 IB_WC_RETRY_EXC_ERR, 966 IB_WC_RNR_RETRY_EXC_ERR, 967 IB_WC_LOC_RDD_VIOL_ERR, 968 IB_WC_REM_INV_RD_REQ_ERR, 969 IB_WC_REM_ABORT_ERR, 970 IB_WC_INV_EECN_ERR, 971 IB_WC_INV_EEC_STATE_ERR, 972 IB_WC_FATAL_ERR, 973 IB_WC_RESP_TIMEOUT_ERR, 974 IB_WC_GENERAL_ERR 975 }; 976 977 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); 978 979 enum ib_wc_opcode { 980 IB_WC_SEND = IB_UVERBS_WC_SEND, 981 IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE, 982 IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ, 983 IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP, 984 IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD, 985 IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW, 986 IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV, 987 IB_WC_LSO = IB_UVERBS_WC_TSO, 988 IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE, 989 IB_WC_REG_MR, 990 IB_WC_MASKED_COMP_SWAP, 991 IB_WC_MASKED_FETCH_ADD, 992 IB_WC_FLUSH = IB_UVERBS_WC_FLUSH, 993 /* 994 * Set value of IB_WC_RECV so consumers can test if a completion is a 995 * receive by testing (opcode & IB_WC_RECV). 996 */ 997 IB_WC_RECV = 1 << 7, 998 IB_WC_RECV_RDMA_WITH_IMM 999 }; 1000 1001 enum ib_wc_flags { 1002 IB_WC_GRH = 1, 1003 IB_WC_WITH_IMM = (1<<1), 1004 IB_WC_WITH_INVALIDATE = (1<<2), 1005 IB_WC_IP_CSUM_OK = (1<<3), 1006 IB_WC_WITH_SMAC = (1<<4), 1007 IB_WC_WITH_VLAN = (1<<5), 1008 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6), 1009 }; 1010 1011 struct ib_wc { 1012 union { 1013 u64 wr_id; 1014 struct ib_cqe *wr_cqe; 1015 }; 1016 enum ib_wc_status status; 1017 enum ib_wc_opcode opcode; 1018 u32 vendor_err; 1019 u32 byte_len; 1020 struct ib_qp *qp; 1021 union { 1022 __be32 imm_data; 1023 u32 invalidate_rkey; 1024 } ex; 1025 u32 src_qp; 1026 u32 slid; 1027 int wc_flags; 1028 u16 pkey_index; 1029 u8 sl; 1030 u8 dlid_path_bits; 1031 u32 port_num; /* valid only for DR SMPs on switches */ 1032 u8 smac[ETH_ALEN]; 1033 u16 vlan_id; 1034 u8 network_hdr_type; 1035 }; 1036 1037 enum ib_cq_notify_flags { 1038 IB_CQ_SOLICITED = 1 << 0, 1039 IB_CQ_NEXT_COMP = 1 << 1, 1040 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP, 1041 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 1042 }; 1043 1044 enum ib_srq_type { 1045 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC, 1046 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC, 1047 IB_SRQT_TM = IB_UVERBS_SRQT_TM, 1048 }; 1049 1050 static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) 1051 { 1052 return srq_type == IB_SRQT_XRC || 1053 srq_type == IB_SRQT_TM; 1054 } 1055 1056 enum ib_srq_attr_mask { 1057 IB_SRQ_MAX_WR = 1 << 0, 1058 IB_SRQ_LIMIT = 1 << 1, 1059 }; 1060 1061 struct ib_srq_attr { 1062 u32 max_wr; 1063 u32 max_sge; 1064 u32 srq_limit; 1065 }; 1066 1067 struct ib_srq_init_attr { 1068 void (*event_handler)(struct ib_event *, void *); 1069 void *srq_context; 1070 struct ib_srq_attr attr; 1071 enum ib_srq_type srq_type; 1072 1073 struct { 1074 struct ib_cq *cq; 1075 union { 1076 struct { 1077 struct ib_xrcd *xrcd; 1078 } xrc; 1079 1080 struct { 1081 u32 max_num_tags; 1082 } tag_matching; 1083 }; 1084 } ext; 1085 }; 1086 1087 struct ib_qp_cap { 1088 u32 max_send_wr; 1089 u32 max_recv_wr; 1090 u32 max_send_sge; 1091 u32 max_recv_sge; 1092 u32 max_inline_data; 1093 1094 /* 1095 * Maximum number of rdma_rw_ctx structures in flight at a time. 1096 * ib_create_qp() will calculate the right amount of needed WRs 1097 * and MRs based on this. 1098 */ 1099 u32 max_rdma_ctxs; 1100 }; 1101 1102 enum ib_sig_type { 1103 IB_SIGNAL_ALL_WR, 1104 IB_SIGNAL_REQ_WR 1105 }; 1106 1107 enum ib_qp_type { 1108 /* 1109 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries 1110 * here (and in that order) since the MAD layer uses them as 1111 * indices into a 2-entry table. 1112 */ 1113 IB_QPT_SMI, 1114 IB_QPT_GSI, 1115 1116 IB_QPT_RC = IB_UVERBS_QPT_RC, 1117 IB_QPT_UC = IB_UVERBS_QPT_UC, 1118 IB_QPT_UD = IB_UVERBS_QPT_UD, 1119 IB_QPT_RAW_IPV6, 1120 IB_QPT_RAW_ETHERTYPE, 1121 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET, 1122 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI, 1123 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT, 1124 IB_QPT_MAX, 1125 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER, 1126 /* Reserve a range for qp types internal to the low level driver. 1127 * These qp types will not be visible at the IB core layer, so the 1128 * IB_QPT_MAX usages should not be affected in the core layer 1129 */ 1130 IB_QPT_RESERVED1 = 0x1000, 1131 IB_QPT_RESERVED2, 1132 IB_QPT_RESERVED3, 1133 IB_QPT_RESERVED4, 1134 IB_QPT_RESERVED5, 1135 IB_QPT_RESERVED6, 1136 IB_QPT_RESERVED7, 1137 IB_QPT_RESERVED8, 1138 IB_QPT_RESERVED9, 1139 IB_QPT_RESERVED10, 1140 }; 1141 1142 enum ib_qp_create_flags { 1143 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, 1144 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1145 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 1146 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, 1147 IB_QP_CREATE_MANAGED_SEND = 1 << 3, 1148 IB_QP_CREATE_MANAGED_RECV = 1 << 4, 1149 IB_QP_CREATE_NETIF_QP = 1 << 5, 1150 IB_QP_CREATE_INTEGRITY_EN = 1 << 6, 1151 IB_QP_CREATE_NETDEV_USE = 1 << 7, 1152 IB_QP_CREATE_SCATTER_FCS = 1153 IB_UVERBS_QP_CREATE_SCATTER_FCS, 1154 IB_QP_CREATE_CVLAN_STRIPPING = 1155 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING, 1156 IB_QP_CREATE_SOURCE_QPN = 1 << 10, 1157 IB_QP_CREATE_PCI_WRITE_END_PADDING = 1158 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING, 1159 /* reserve bits 26-31 for low level drivers' internal use */ 1160 IB_QP_CREATE_RESERVED_START = 1 << 26, 1161 IB_QP_CREATE_RESERVED_END = 1 << 31, 1162 }; 1163 1164 /* 1165 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 1166 * callback to destroy the passed in QP. 1167 */ 1168 1169 struct ib_qp_init_attr { 1170 /* This callback occurs in workqueue context */ 1171 void (*event_handler)(struct ib_event *, void *); 1172 1173 void *qp_context; 1174 struct ib_cq *send_cq; 1175 struct ib_cq *recv_cq; 1176 struct ib_srq *srq; 1177 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1178 struct ib_qp_cap cap; 1179 enum ib_sig_type sq_sig_type; 1180 enum ib_qp_type qp_type; 1181 u32 create_flags; 1182 1183 /* 1184 * Only needed for special QP types, or when using the RW API. 1185 */ 1186 u32 port_num; 1187 struct ib_rwq_ind_table *rwq_ind_tbl; 1188 u32 source_qpn; 1189 }; 1190 1191 struct ib_qp_open_attr { 1192 void (*event_handler)(struct ib_event *, void *); 1193 void *qp_context; 1194 u32 qp_num; 1195 enum ib_qp_type qp_type; 1196 }; 1197 1198 enum ib_rnr_timeout { 1199 IB_RNR_TIMER_655_36 = 0, 1200 IB_RNR_TIMER_000_01 = 1, 1201 IB_RNR_TIMER_000_02 = 2, 1202 IB_RNR_TIMER_000_03 = 3, 1203 IB_RNR_TIMER_000_04 = 4, 1204 IB_RNR_TIMER_000_06 = 5, 1205 IB_RNR_TIMER_000_08 = 6, 1206 IB_RNR_TIMER_000_12 = 7, 1207 IB_RNR_TIMER_000_16 = 8, 1208 IB_RNR_TIMER_000_24 = 9, 1209 IB_RNR_TIMER_000_32 = 10, 1210 IB_RNR_TIMER_000_48 = 11, 1211 IB_RNR_TIMER_000_64 = 12, 1212 IB_RNR_TIMER_000_96 = 13, 1213 IB_RNR_TIMER_001_28 = 14, 1214 IB_RNR_TIMER_001_92 = 15, 1215 IB_RNR_TIMER_002_56 = 16, 1216 IB_RNR_TIMER_003_84 = 17, 1217 IB_RNR_TIMER_005_12 = 18, 1218 IB_RNR_TIMER_007_68 = 19, 1219 IB_RNR_TIMER_010_24 = 20, 1220 IB_RNR_TIMER_015_36 = 21, 1221 IB_RNR_TIMER_020_48 = 22, 1222 IB_RNR_TIMER_030_72 = 23, 1223 IB_RNR_TIMER_040_96 = 24, 1224 IB_RNR_TIMER_061_44 = 25, 1225 IB_RNR_TIMER_081_92 = 26, 1226 IB_RNR_TIMER_122_88 = 27, 1227 IB_RNR_TIMER_163_84 = 28, 1228 IB_RNR_TIMER_245_76 = 29, 1229 IB_RNR_TIMER_327_68 = 30, 1230 IB_RNR_TIMER_491_52 = 31 1231 }; 1232 1233 enum ib_qp_attr_mask { 1234 IB_QP_STATE = 1, 1235 IB_QP_CUR_STATE = (1<<1), 1236 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), 1237 IB_QP_ACCESS_FLAGS = (1<<3), 1238 IB_QP_PKEY_INDEX = (1<<4), 1239 IB_QP_PORT = (1<<5), 1240 IB_QP_QKEY = (1<<6), 1241 IB_QP_AV = (1<<7), 1242 IB_QP_PATH_MTU = (1<<8), 1243 IB_QP_TIMEOUT = (1<<9), 1244 IB_QP_RETRY_CNT = (1<<10), 1245 IB_QP_RNR_RETRY = (1<<11), 1246 IB_QP_RQ_PSN = (1<<12), 1247 IB_QP_MAX_QP_RD_ATOMIC = (1<<13), 1248 IB_QP_ALT_PATH = (1<<14), 1249 IB_QP_MIN_RNR_TIMER = (1<<15), 1250 IB_QP_SQ_PSN = (1<<16), 1251 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), 1252 IB_QP_PATH_MIG_STATE = (1<<18), 1253 IB_QP_CAP = (1<<19), 1254 IB_QP_DEST_QPN = (1<<20), 1255 IB_QP_RESERVED1 = (1<<21), 1256 IB_QP_RESERVED2 = (1<<22), 1257 IB_QP_RESERVED3 = (1<<23), 1258 IB_QP_RESERVED4 = (1<<24), 1259 IB_QP_RATE_LIMIT = (1<<25), 1260 1261 IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0), 1262 }; 1263 1264 enum ib_qp_state { 1265 IB_QPS_RESET, 1266 IB_QPS_INIT, 1267 IB_QPS_RTR, 1268 IB_QPS_RTS, 1269 IB_QPS_SQD, 1270 IB_QPS_SQE, 1271 IB_QPS_ERR 1272 }; 1273 1274 enum ib_mig_state { 1275 IB_MIG_MIGRATED, 1276 IB_MIG_REARM, 1277 IB_MIG_ARMED 1278 }; 1279 1280 enum ib_mw_type { 1281 IB_MW_TYPE_1 = 1, 1282 IB_MW_TYPE_2 = 2 1283 }; 1284 1285 struct ib_qp_attr { 1286 enum ib_qp_state qp_state; 1287 enum ib_qp_state cur_qp_state; 1288 enum ib_mtu path_mtu; 1289 enum ib_mig_state path_mig_state; 1290 u32 qkey; 1291 u32 rq_psn; 1292 u32 sq_psn; 1293 u32 dest_qp_num; 1294 int qp_access_flags; 1295 struct ib_qp_cap cap; 1296 struct rdma_ah_attr ah_attr; 1297 struct rdma_ah_attr alt_ah_attr; 1298 u16 pkey_index; 1299 u16 alt_pkey_index; 1300 u8 en_sqd_async_notify; 1301 u8 sq_draining; 1302 u8 max_rd_atomic; 1303 u8 max_dest_rd_atomic; 1304 u8 min_rnr_timer; 1305 u32 port_num; 1306 u8 timeout; 1307 u8 retry_cnt; 1308 u8 rnr_retry; 1309 u32 alt_port_num; 1310 u8 alt_timeout; 1311 u32 rate_limit; 1312 struct net_device *xmit_slave; 1313 }; 1314 1315 enum ib_wr_opcode { 1316 /* These are shared with userspace */ 1317 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE, 1318 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM, 1319 IB_WR_SEND = IB_UVERBS_WR_SEND, 1320 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM, 1321 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, 1322 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, 1323 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, 1324 IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW, 1325 IB_WR_LSO = IB_UVERBS_WR_TSO, 1326 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, 1327 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, 1328 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV, 1329 IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 1330 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP, 1331 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 1332 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD, 1333 IB_WR_FLUSH = IB_UVERBS_WR_FLUSH, 1334 IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE, 1335 1336 /* These are kernel only and can not be issued by userspace */ 1337 IB_WR_REG_MR = 0x20, 1338 IB_WR_REG_MR_INTEGRITY, 1339 1340 /* reserve values for low level drivers' internal use. 1341 * These values will not be used at all in the ib core layer. 1342 */ 1343 IB_WR_RESERVED1 = 0xf0, 1344 IB_WR_RESERVED2, 1345 IB_WR_RESERVED3, 1346 IB_WR_RESERVED4, 1347 IB_WR_RESERVED5, 1348 IB_WR_RESERVED6, 1349 IB_WR_RESERVED7, 1350 IB_WR_RESERVED8, 1351 IB_WR_RESERVED9, 1352 IB_WR_RESERVED10, 1353 }; 1354 1355 enum ib_send_flags { 1356 IB_SEND_FENCE = 1, 1357 IB_SEND_SIGNALED = (1<<1), 1358 IB_SEND_SOLICITED = (1<<2), 1359 IB_SEND_INLINE = (1<<3), 1360 IB_SEND_IP_CSUM = (1<<4), 1361 1362 /* reserve bits 26-31 for low level drivers' internal use */ 1363 IB_SEND_RESERVED_START = (1 << 26), 1364 IB_SEND_RESERVED_END = (1 << 31), 1365 }; 1366 1367 struct ib_sge { 1368 u64 addr; 1369 u32 length; 1370 u32 lkey; 1371 }; 1372 1373 struct ib_cqe { 1374 void (*done)(struct ib_cq *cq, struct ib_wc *wc); 1375 }; 1376 1377 struct ib_send_wr { 1378 struct ib_send_wr *next; 1379 union { 1380 u64 wr_id; 1381 struct ib_cqe *wr_cqe; 1382 }; 1383 struct ib_sge *sg_list; 1384 int num_sge; 1385 enum ib_wr_opcode opcode; 1386 int send_flags; 1387 union { 1388 __be32 imm_data; 1389 u32 invalidate_rkey; 1390 } ex; 1391 }; 1392 1393 struct ib_rdma_wr { 1394 struct ib_send_wr wr; 1395 u64 remote_addr; 1396 u32 rkey; 1397 }; 1398 1399 static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr) 1400 { 1401 return container_of(wr, struct ib_rdma_wr, wr); 1402 } 1403 1404 struct ib_atomic_wr { 1405 struct ib_send_wr wr; 1406 u64 remote_addr; 1407 u64 compare_add; 1408 u64 swap; 1409 u64 compare_add_mask; 1410 u64 swap_mask; 1411 u32 rkey; 1412 }; 1413 1414 static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr) 1415 { 1416 return container_of(wr, struct ib_atomic_wr, wr); 1417 } 1418 1419 struct ib_ud_wr { 1420 struct ib_send_wr wr; 1421 struct ib_ah *ah; 1422 void *header; 1423 int hlen; 1424 int mss; 1425 u32 remote_qpn; 1426 u32 remote_qkey; 1427 u16 pkey_index; /* valid for GSI only */ 1428 u32 port_num; /* valid for DR SMPs on switch only */ 1429 }; 1430 1431 static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr) 1432 { 1433 return container_of(wr, struct ib_ud_wr, wr); 1434 } 1435 1436 struct ib_reg_wr { 1437 struct ib_send_wr wr; 1438 struct ib_mr *mr; 1439 u32 key; 1440 int access; 1441 }; 1442 1443 static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) 1444 { 1445 return container_of(wr, struct ib_reg_wr, wr); 1446 } 1447 1448 struct ib_recv_wr { 1449 struct ib_recv_wr *next; 1450 union { 1451 u64 wr_id; 1452 struct ib_cqe *wr_cqe; 1453 }; 1454 struct ib_sge *sg_list; 1455 int num_sge; 1456 }; 1457 1458 enum ib_access_flags { 1459 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE, 1460 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE, 1461 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ, 1462 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC, 1463 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND, 1464 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED, 1465 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND, 1466 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB, 1467 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING, 1468 IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL, 1469 IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT, 1470 1471 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE, 1472 IB_ACCESS_SUPPORTED = 1473 ((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL, 1474 }; 1475 1476 /* 1477 * XXX: these are apparently used for ->rereg_user_mr, no idea why they 1478 * are hidden here instead of a uapi header! 1479 */ 1480 enum ib_mr_rereg_flags { 1481 IB_MR_REREG_TRANS = 1, 1482 IB_MR_REREG_PD = (1<<1), 1483 IB_MR_REREG_ACCESS = (1<<2), 1484 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) 1485 }; 1486 1487 struct ib_umem; 1488 1489 enum rdma_remove_reason { 1490 /* 1491 * Userspace requested uobject deletion or initial try 1492 * to remove uobject via cleanup. Call could fail 1493 */ 1494 RDMA_REMOVE_DESTROY, 1495 /* Context deletion. This call should delete the actual object itself */ 1496 RDMA_REMOVE_CLOSE, 1497 /* Driver is being hot-unplugged. This call should delete the actual object itself */ 1498 RDMA_REMOVE_DRIVER_REMOVE, 1499 /* uobj is being cleaned-up before being committed */ 1500 RDMA_REMOVE_ABORT, 1501 /* The driver failed to destroy the uobject and is being disconnected */ 1502 RDMA_REMOVE_DRIVER_FAILURE, 1503 }; 1504 1505 struct ib_rdmacg_object { 1506 #ifdef CONFIG_CGROUP_RDMA 1507 struct rdma_cgroup *cg; /* owner rdma cgroup */ 1508 #endif 1509 }; 1510 1511 struct ib_ucontext { 1512 struct ib_device *device; 1513 struct ib_uverbs_file *ufile; 1514 1515 struct ib_rdmacg_object cg_obj; 1516 /* 1517 * Implementation details of the RDMA core, don't use in drivers: 1518 */ 1519 struct rdma_restrack_entry res; 1520 struct xarray mmap_xa; 1521 }; 1522 1523 struct ib_uobject { 1524 u64 user_handle; /* handle given to us by userspace */ 1525 /* ufile & ucontext owning this object */ 1526 struct ib_uverbs_file *ufile; 1527 /* FIXME, save memory: ufile->context == context */ 1528 struct ib_ucontext *context; /* associated user context */ 1529 void *object; /* containing object */ 1530 struct list_head list; /* link to context's list */ 1531 struct ib_rdmacg_object cg_obj; /* rdmacg object */ 1532 int id; /* index into kernel idr */ 1533 struct kref ref; 1534 atomic_t usecnt; /* protects exclusive access */ 1535 struct rcu_head rcu; /* kfree_rcu() overhead */ 1536 1537 const struct uverbs_api_object *uapi_object; 1538 }; 1539 1540 struct ib_udata { 1541 const void __user *inbuf; 1542 void __user *outbuf; 1543 size_t inlen; 1544 size_t outlen; 1545 }; 1546 1547 struct ib_pd { 1548 u32 local_dma_lkey; 1549 u32 flags; 1550 struct ib_device *device; 1551 struct ib_uobject *uobject; 1552 atomic_t usecnt; /* count all resources */ 1553 1554 u32 unsafe_global_rkey; 1555 1556 /* 1557 * Implementation details of the RDMA core, don't use in drivers: 1558 */ 1559 struct ib_mr *__internal_mr; 1560 struct rdma_restrack_entry res; 1561 }; 1562 1563 struct ib_xrcd { 1564 struct ib_device *device; 1565 atomic_t usecnt; /* count all exposed resources */ 1566 struct inode *inode; 1567 struct rw_semaphore tgt_qps_rwsem; 1568 struct xarray tgt_qps; 1569 }; 1570 1571 struct ib_ah { 1572 struct ib_device *device; 1573 struct ib_pd *pd; 1574 struct ib_uobject *uobject; 1575 const struct ib_gid_attr *sgid_attr; 1576 enum rdma_ah_attr_type type; 1577 }; 1578 1579 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1580 1581 enum ib_poll_context { 1582 IB_POLL_SOFTIRQ, /* poll from softirq context */ 1583 IB_POLL_WORKQUEUE, /* poll from workqueue */ 1584 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ 1585 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE, 1586 1587 IB_POLL_DIRECT, /* caller context, no hw completions */ 1588 }; 1589 1590 struct ib_cq { 1591 struct ib_device *device; 1592 struct ib_ucq_object *uobject; 1593 ib_comp_handler comp_handler; 1594 void (*event_handler)(struct ib_event *, void *); 1595 void *cq_context; 1596 int cqe; 1597 unsigned int cqe_used; 1598 atomic_t usecnt; /* count number of work queues */ 1599 enum ib_poll_context poll_ctx; 1600 struct ib_wc *wc; 1601 struct list_head pool_entry; 1602 union { 1603 struct irq_poll iop; 1604 struct work_struct work; 1605 }; 1606 struct workqueue_struct *comp_wq; 1607 struct dim *dim; 1608 1609 /* updated only by trace points */ 1610 ktime_t timestamp; 1611 u8 interrupt:1; 1612 u8 shared:1; 1613 unsigned int comp_vector; 1614 1615 /* 1616 * Implementation details of the RDMA core, don't use in drivers: 1617 */ 1618 struct rdma_restrack_entry res; 1619 }; 1620 1621 struct ib_srq { 1622 struct ib_device *device; 1623 struct ib_pd *pd; 1624 struct ib_usrq_object *uobject; 1625 void (*event_handler)(struct ib_event *, void *); 1626 void *srq_context; 1627 enum ib_srq_type srq_type; 1628 atomic_t usecnt; 1629 1630 struct { 1631 struct ib_cq *cq; 1632 union { 1633 struct { 1634 struct ib_xrcd *xrcd; 1635 u32 srq_num; 1636 } xrc; 1637 }; 1638 } ext; 1639 1640 /* 1641 * Implementation details of the RDMA core, don't use in drivers: 1642 */ 1643 struct rdma_restrack_entry res; 1644 }; 1645 1646 enum ib_raw_packet_caps { 1647 /* 1648 * Strip cvlan from incoming packet and report it in the matching work 1649 * completion is supported. 1650 */ 1651 IB_RAW_PACKET_CAP_CVLAN_STRIPPING = 1652 IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING, 1653 /* 1654 * Scatter FCS field of an incoming packet to host memory is supported. 1655 */ 1656 IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS, 1657 /* Checksum offloads are supported (for both send and receive). */ 1658 IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM, 1659 /* 1660 * When a packet is received for an RQ with no receive WQEs, the 1661 * packet processing is delayed. 1662 */ 1663 IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP, 1664 }; 1665 1666 enum ib_wq_type { 1667 IB_WQT_RQ = IB_UVERBS_WQT_RQ, 1668 }; 1669 1670 enum ib_wq_state { 1671 IB_WQS_RESET, 1672 IB_WQS_RDY, 1673 IB_WQS_ERR 1674 }; 1675 1676 struct ib_wq { 1677 struct ib_device *device; 1678 struct ib_uwq_object *uobject; 1679 void *wq_context; 1680 void (*event_handler)(struct ib_event *, void *); 1681 struct ib_pd *pd; 1682 struct ib_cq *cq; 1683 u32 wq_num; 1684 enum ib_wq_state state; 1685 enum ib_wq_type wq_type; 1686 atomic_t usecnt; 1687 }; 1688 1689 enum ib_wq_flags { 1690 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING, 1691 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS, 1692 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP, 1693 IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1694 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING, 1695 }; 1696 1697 struct ib_wq_init_attr { 1698 void *wq_context; 1699 enum ib_wq_type wq_type; 1700 u32 max_wr; 1701 u32 max_sge; 1702 struct ib_cq *cq; 1703 void (*event_handler)(struct ib_event *, void *); 1704 u32 create_flags; /* Use enum ib_wq_flags */ 1705 }; 1706 1707 enum ib_wq_attr_mask { 1708 IB_WQ_STATE = 1 << 0, 1709 IB_WQ_CUR_STATE = 1 << 1, 1710 IB_WQ_FLAGS = 1 << 2, 1711 }; 1712 1713 struct ib_wq_attr { 1714 enum ib_wq_state wq_state; 1715 enum ib_wq_state curr_wq_state; 1716 u32 flags; /* Use enum ib_wq_flags */ 1717 u32 flags_mask; /* Use enum ib_wq_flags */ 1718 }; 1719 1720 struct ib_rwq_ind_table { 1721 struct ib_device *device; 1722 struct ib_uobject *uobject; 1723 atomic_t usecnt; 1724 u32 ind_tbl_num; 1725 u32 log_ind_tbl_size; 1726 struct ib_wq **ind_tbl; 1727 }; 1728 1729 struct ib_rwq_ind_table_init_attr { 1730 u32 log_ind_tbl_size; 1731 /* Each entry is a pointer to Receive Work Queue */ 1732 struct ib_wq **ind_tbl; 1733 }; 1734 1735 enum port_pkey_state { 1736 IB_PORT_PKEY_NOT_VALID = 0, 1737 IB_PORT_PKEY_VALID = 1, 1738 IB_PORT_PKEY_LISTED = 2, 1739 }; 1740 1741 struct ib_qp_security; 1742 1743 struct ib_port_pkey { 1744 enum port_pkey_state state; 1745 u16 pkey_index; 1746 u32 port_num; 1747 struct list_head qp_list; 1748 struct list_head to_error_list; 1749 struct ib_qp_security *sec; 1750 }; 1751 1752 struct ib_ports_pkeys { 1753 struct ib_port_pkey main; 1754 struct ib_port_pkey alt; 1755 }; 1756 1757 struct ib_qp_security { 1758 struct ib_qp *qp; 1759 struct ib_device *dev; 1760 /* Hold this mutex when changing port and pkey settings. */ 1761 struct mutex mutex; 1762 struct ib_ports_pkeys *ports_pkeys; 1763 /* A list of all open shared QP handles. Required to enforce security 1764 * properly for all users of a shared QP. 1765 */ 1766 struct list_head shared_qp_list; 1767 void *security; 1768 bool destroying; 1769 atomic_t error_list_count; 1770 struct completion error_complete; 1771 int error_comps_pending; 1772 }; 1773 1774 /* 1775 * @max_write_sge: Maximum SGE elements per RDMA WRITE request. 1776 * @max_read_sge: Maximum SGE elements per RDMA READ request. 1777 */ 1778 struct ib_qp { 1779 struct ib_device *device; 1780 struct ib_pd *pd; 1781 struct ib_cq *send_cq; 1782 struct ib_cq *recv_cq; 1783 spinlock_t mr_lock; 1784 int mrs_used; 1785 struct list_head rdma_mrs; 1786 struct list_head sig_mrs; 1787 struct ib_srq *srq; 1788 struct completion srq_completion; 1789 struct ib_xrcd *xrcd; /* XRC TGT QPs only */ 1790 struct list_head xrcd_list; 1791 1792 /* count times opened, mcast attaches, flow attaches */ 1793 atomic_t usecnt; 1794 struct list_head open_list; 1795 struct ib_qp *real_qp; 1796 struct ib_uqp_object *uobject; 1797 void (*event_handler)(struct ib_event *, void *); 1798 void (*registered_event_handler)(struct ib_event *, void *); 1799 void *qp_context; 1800 /* sgid_attrs associated with the AV's */ 1801 const struct ib_gid_attr *av_sgid_attr; 1802 const struct ib_gid_attr *alt_path_sgid_attr; 1803 u32 qp_num; 1804 u32 max_write_sge; 1805 u32 max_read_sge; 1806 enum ib_qp_type qp_type; 1807 struct ib_rwq_ind_table *rwq_ind_tbl; 1808 struct ib_qp_security *qp_sec; 1809 u32 port; 1810 1811 bool integrity_en; 1812 /* 1813 * Implementation details of the RDMA core, don't use in drivers: 1814 */ 1815 struct rdma_restrack_entry res; 1816 1817 /* The counter the qp is bind to */ 1818 struct rdma_counter *counter; 1819 }; 1820 1821 struct ib_dm { 1822 struct ib_device *device; 1823 u32 length; 1824 u32 flags; 1825 struct ib_uobject *uobject; 1826 atomic_t usecnt; 1827 }; 1828 1829 struct ib_mr { 1830 struct ib_device *device; 1831 struct ib_pd *pd; 1832 u32 lkey; 1833 u32 rkey; 1834 u64 iova; 1835 u64 length; 1836 unsigned int page_size; 1837 enum ib_mr_type type; 1838 bool need_inval; 1839 union { 1840 struct ib_uobject *uobject; /* user */ 1841 struct list_head qp_entry; /* FR */ 1842 }; 1843 1844 struct ib_dm *dm; 1845 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ 1846 /* 1847 * Implementation details of the RDMA core, don't use in drivers: 1848 */ 1849 struct rdma_restrack_entry res; 1850 }; 1851 1852 struct ib_mw { 1853 struct ib_device *device; 1854 struct ib_pd *pd; 1855 struct ib_uobject *uobject; 1856 u32 rkey; 1857 enum ib_mw_type type; 1858 }; 1859 1860 /* Supported steering options */ 1861 enum ib_flow_attr_type { 1862 /* steering according to rule specifications */ 1863 IB_FLOW_ATTR_NORMAL = 0x0, 1864 /* default unicast and multicast rule - 1865 * receive all Eth traffic which isn't steered to any QP 1866 */ 1867 IB_FLOW_ATTR_ALL_DEFAULT = 0x1, 1868 /* default multicast rule - 1869 * receive all Eth multicast traffic which isn't steered to any QP 1870 */ 1871 IB_FLOW_ATTR_MC_DEFAULT = 0x2, 1872 /* sniffer rule - receive all port traffic */ 1873 IB_FLOW_ATTR_SNIFFER = 0x3 1874 }; 1875 1876 /* Supported steering header types */ 1877 enum ib_flow_spec_type { 1878 /* L2 headers*/ 1879 IB_FLOW_SPEC_ETH = 0x20, 1880 IB_FLOW_SPEC_IB = 0x22, 1881 /* L3 header*/ 1882 IB_FLOW_SPEC_IPV4 = 0x30, 1883 IB_FLOW_SPEC_IPV6 = 0x31, 1884 IB_FLOW_SPEC_ESP = 0x34, 1885 /* L4 headers*/ 1886 IB_FLOW_SPEC_TCP = 0x40, 1887 IB_FLOW_SPEC_UDP = 0x41, 1888 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50, 1889 IB_FLOW_SPEC_GRE = 0x51, 1890 IB_FLOW_SPEC_MPLS = 0x60, 1891 IB_FLOW_SPEC_INNER = 0x100, 1892 /* Actions */ 1893 IB_FLOW_SPEC_ACTION_TAG = 0x1000, 1894 IB_FLOW_SPEC_ACTION_DROP = 0x1001, 1895 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002, 1896 IB_FLOW_SPEC_ACTION_COUNT = 0x1003, 1897 }; 1898 #define IB_FLOW_SPEC_LAYER_MASK 0xF0 1899 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 1900 1901 enum ib_flow_flags { 1902 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ 1903 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ 1904 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */ 1905 }; 1906 1907 struct ib_flow_eth_filter { 1908 u8 dst_mac[6]; 1909 u8 src_mac[6]; 1910 __be16 ether_type; 1911 __be16 vlan_tag; 1912 }; 1913 1914 struct ib_flow_spec_eth { 1915 u32 type; 1916 u16 size; 1917 struct ib_flow_eth_filter val; 1918 struct ib_flow_eth_filter mask; 1919 }; 1920 1921 struct ib_flow_ib_filter { 1922 __be16 dlid; 1923 __u8 sl; 1924 }; 1925 1926 struct ib_flow_spec_ib { 1927 u32 type; 1928 u16 size; 1929 struct ib_flow_ib_filter val; 1930 struct ib_flow_ib_filter mask; 1931 }; 1932 1933 /* IPv4 header flags */ 1934 enum ib_ipv4_flags { 1935 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */ 1936 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the 1937 last have this flag set */ 1938 }; 1939 1940 struct ib_flow_ipv4_filter { 1941 __be32 src_ip; 1942 __be32 dst_ip; 1943 u8 proto; 1944 u8 tos; 1945 u8 ttl; 1946 u8 flags; 1947 }; 1948 1949 struct ib_flow_spec_ipv4 { 1950 u32 type; 1951 u16 size; 1952 struct ib_flow_ipv4_filter val; 1953 struct ib_flow_ipv4_filter mask; 1954 }; 1955 1956 struct ib_flow_ipv6_filter { 1957 u8 src_ip[16]; 1958 u8 dst_ip[16]; 1959 __be32 flow_label; 1960 u8 next_hdr; 1961 u8 traffic_class; 1962 u8 hop_limit; 1963 } __packed; 1964 1965 struct ib_flow_spec_ipv6 { 1966 u32 type; 1967 u16 size; 1968 struct ib_flow_ipv6_filter val; 1969 struct ib_flow_ipv6_filter mask; 1970 }; 1971 1972 struct ib_flow_tcp_udp_filter { 1973 __be16 dst_port; 1974 __be16 src_port; 1975 }; 1976 1977 struct ib_flow_spec_tcp_udp { 1978 u32 type; 1979 u16 size; 1980 struct ib_flow_tcp_udp_filter val; 1981 struct ib_flow_tcp_udp_filter mask; 1982 }; 1983 1984 struct ib_flow_tunnel_filter { 1985 __be32 tunnel_id; 1986 }; 1987 1988 /* ib_flow_spec_tunnel describes the Vxlan tunnel 1989 * the tunnel_id from val has the vni value 1990 */ 1991 struct ib_flow_spec_tunnel { 1992 u32 type; 1993 u16 size; 1994 struct ib_flow_tunnel_filter val; 1995 struct ib_flow_tunnel_filter mask; 1996 }; 1997 1998 struct ib_flow_esp_filter { 1999 __be32 spi; 2000 __be32 seq; 2001 }; 2002 2003 struct ib_flow_spec_esp { 2004 u32 type; 2005 u16 size; 2006 struct ib_flow_esp_filter val; 2007 struct ib_flow_esp_filter mask; 2008 }; 2009 2010 struct ib_flow_gre_filter { 2011 __be16 c_ks_res0_ver; 2012 __be16 protocol; 2013 __be32 key; 2014 }; 2015 2016 struct ib_flow_spec_gre { 2017 u32 type; 2018 u16 size; 2019 struct ib_flow_gre_filter val; 2020 struct ib_flow_gre_filter mask; 2021 }; 2022 2023 struct ib_flow_mpls_filter { 2024 __be32 tag; 2025 }; 2026 2027 struct ib_flow_spec_mpls { 2028 u32 type; 2029 u16 size; 2030 struct ib_flow_mpls_filter val; 2031 struct ib_flow_mpls_filter mask; 2032 }; 2033 2034 struct ib_flow_spec_action_tag { 2035 enum ib_flow_spec_type type; 2036 u16 size; 2037 u32 tag_id; 2038 }; 2039 2040 struct ib_flow_spec_action_drop { 2041 enum ib_flow_spec_type type; 2042 u16 size; 2043 }; 2044 2045 struct ib_flow_spec_action_handle { 2046 enum ib_flow_spec_type type; 2047 u16 size; 2048 struct ib_flow_action *act; 2049 }; 2050 2051 enum ib_counters_description { 2052 IB_COUNTER_PACKETS, 2053 IB_COUNTER_BYTES, 2054 }; 2055 2056 struct ib_flow_spec_action_count { 2057 enum ib_flow_spec_type type; 2058 u16 size; 2059 struct ib_counters *counters; 2060 }; 2061 2062 union ib_flow_spec { 2063 struct { 2064 u32 type; 2065 u16 size; 2066 }; 2067 struct ib_flow_spec_eth eth; 2068 struct ib_flow_spec_ib ib; 2069 struct ib_flow_spec_ipv4 ipv4; 2070 struct ib_flow_spec_tcp_udp tcp_udp; 2071 struct ib_flow_spec_ipv6 ipv6; 2072 struct ib_flow_spec_tunnel tunnel; 2073 struct ib_flow_spec_esp esp; 2074 struct ib_flow_spec_gre gre; 2075 struct ib_flow_spec_mpls mpls; 2076 struct ib_flow_spec_action_tag flow_tag; 2077 struct ib_flow_spec_action_drop drop; 2078 struct ib_flow_spec_action_handle action; 2079 struct ib_flow_spec_action_count flow_count; 2080 }; 2081 2082 struct ib_flow_attr { 2083 enum ib_flow_attr_type type; 2084 u16 size; 2085 u16 priority; 2086 u32 flags; 2087 u8 num_of_specs; 2088 u32 port; 2089 union ib_flow_spec flows[]; 2090 }; 2091 2092 struct ib_flow { 2093 struct ib_qp *qp; 2094 struct ib_device *device; 2095 struct ib_uobject *uobject; 2096 }; 2097 2098 enum ib_flow_action_type { 2099 IB_FLOW_ACTION_UNSPECIFIED, 2100 IB_FLOW_ACTION_ESP = 1, 2101 }; 2102 2103 struct ib_flow_action_attrs_esp_keymats { 2104 enum ib_uverbs_flow_action_esp_keymat protocol; 2105 union { 2106 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm; 2107 } keymat; 2108 }; 2109 2110 struct ib_flow_action_attrs_esp_replays { 2111 enum ib_uverbs_flow_action_esp_replay protocol; 2112 union { 2113 struct ib_uverbs_flow_action_esp_replay_bmp bmp; 2114 } replay; 2115 }; 2116 2117 enum ib_flow_action_attrs_esp_flags { 2118 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags 2119 * This is done in order to share the same flags between user-space and 2120 * kernel and spare an unnecessary translation. 2121 */ 2122 2123 /* Kernel flags */ 2124 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32, 2125 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33, 2126 }; 2127 2128 struct ib_flow_spec_list { 2129 struct ib_flow_spec_list *next; 2130 union ib_flow_spec spec; 2131 }; 2132 2133 struct ib_flow_action_attrs_esp { 2134 struct ib_flow_action_attrs_esp_keymats *keymat; 2135 struct ib_flow_action_attrs_esp_replays *replay; 2136 struct ib_flow_spec_list *encap; 2137 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled. 2138 * Value of 0 is a valid value. 2139 */ 2140 u32 esn; 2141 u32 spi; 2142 u32 seq; 2143 u32 tfc_pad; 2144 /* Use enum ib_flow_action_attrs_esp_flags */ 2145 u64 flags; 2146 u64 hard_limit_pkts; 2147 }; 2148 2149 struct ib_flow_action { 2150 struct ib_device *device; 2151 struct ib_uobject *uobject; 2152 enum ib_flow_action_type type; 2153 atomic_t usecnt; 2154 }; 2155 2156 struct ib_mad; 2157 2158 enum ib_process_mad_flags { 2159 IB_MAD_IGNORE_MKEY = 1, 2160 IB_MAD_IGNORE_BKEY = 2, 2161 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY 2162 }; 2163 2164 enum ib_mad_result { 2165 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ 2166 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ 2167 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ 2168 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ 2169 }; 2170 2171 struct ib_port_cache { 2172 u64 subnet_prefix; 2173 struct ib_pkey_cache *pkey; 2174 struct ib_gid_table *gid; 2175 u8 lmc; 2176 enum ib_port_state port_state; 2177 enum ib_port_state last_port_state; 2178 }; 2179 2180 struct ib_port_immutable { 2181 int pkey_tbl_len; 2182 int gid_tbl_len; 2183 u32 core_cap_flags; 2184 u32 max_mad_size; 2185 }; 2186 2187 struct ib_port_data { 2188 struct ib_device *ib_dev; 2189 2190 struct ib_port_immutable immutable; 2191 2192 spinlock_t pkey_list_lock; 2193 2194 spinlock_t netdev_lock; 2195 2196 struct list_head pkey_list; 2197 2198 struct ib_port_cache cache; 2199 2200 struct net_device __rcu *netdev; 2201 netdevice_tracker netdev_tracker; 2202 struct hlist_node ndev_hash_link; 2203 struct rdma_port_counter port_counter; 2204 struct ib_port *sysfs; 2205 }; 2206 2207 /* rdma netdev type - specifies protocol type */ 2208 enum rdma_netdev_t { 2209 RDMA_NETDEV_OPA_VNIC, 2210 RDMA_NETDEV_IPOIB, 2211 }; 2212 2213 /** 2214 * struct rdma_netdev - rdma netdev 2215 * For cases where netstack interfacing is required. 2216 */ 2217 struct rdma_netdev { 2218 void *clnt_priv; 2219 struct ib_device *hca; 2220 u32 port_num; 2221 int mtu; 2222 2223 /* 2224 * cleanup function must be specified. 2225 * FIXME: This is only used for OPA_VNIC and that usage should be 2226 * removed too. 2227 */ 2228 void (*free_rdma_netdev)(struct net_device *netdev); 2229 2230 /* control functions */ 2231 void (*set_id)(struct net_device *netdev, int id); 2232 /* send packet */ 2233 int (*send)(struct net_device *dev, struct sk_buff *skb, 2234 struct ib_ah *address, u32 dqpn); 2235 /* multicast */ 2236 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca, 2237 union ib_gid *gid, u16 mlid, 2238 int set_qkey, u32 qkey); 2239 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca, 2240 union ib_gid *gid, u16 mlid); 2241 /* timeout */ 2242 void (*tx_timeout)(struct net_device *dev, unsigned int txqueue); 2243 }; 2244 2245 struct rdma_netdev_alloc_params { 2246 size_t sizeof_priv; 2247 unsigned int txqs; 2248 unsigned int rxqs; 2249 void *param; 2250 2251 int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num, 2252 struct net_device *netdev, void *param); 2253 }; 2254 2255 struct ib_odp_counters { 2256 atomic64_t faults; 2257 atomic64_t faults_handled; 2258 atomic64_t invalidations; 2259 atomic64_t invalidations_handled; 2260 atomic64_t prefetch; 2261 }; 2262 2263 struct ib_counters { 2264 struct ib_device *device; 2265 struct ib_uobject *uobject; 2266 /* num of objects attached */ 2267 atomic_t usecnt; 2268 }; 2269 2270 struct ib_counters_read_attr { 2271 u64 *counters_buff; 2272 u32 ncounters; 2273 u32 flags; /* use enum ib_read_counters_flags */ 2274 }; 2275 2276 struct uverbs_attr_bundle; 2277 struct iw_cm_id; 2278 struct iw_cm_conn_param; 2279 2280 #define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \ 2281 .size_##ib_struct = \ 2282 (sizeof(struct drv_struct) + \ 2283 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \ 2284 BUILD_BUG_ON_ZERO( \ 2285 !__same_type(((struct drv_struct *)NULL)->member, \ 2286 struct ib_struct))) 2287 2288 #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ 2289 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \ 2290 gfp, false)) 2291 2292 #define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \ 2293 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \ 2294 GFP_KERNEL, true)) 2295 2296 #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ 2297 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) 2298 2299 #define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct 2300 2301 struct rdma_user_mmap_entry { 2302 struct kref ref; 2303 struct ib_ucontext *ucontext; 2304 unsigned long start_pgoff; 2305 size_t npages; 2306 bool driver_removed; 2307 }; 2308 2309 /* Return the offset (in bytes) the user should pass to libc's mmap() */ 2310 static inline u64 2311 rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry) 2312 { 2313 return (u64)entry->start_pgoff << PAGE_SHIFT; 2314 } 2315 2316 /** 2317 * struct ib_device_ops - InfiniBand device operations 2318 * This structure defines all the InfiniBand device operations, providers will 2319 * need to define the supported operations, otherwise they will be set to null. 2320 */ 2321 struct ib_device_ops { 2322 struct module *owner; 2323 enum rdma_driver_id driver_id; 2324 u32 uverbs_abi_ver; 2325 unsigned int uverbs_no_driver_id_binding:1; 2326 2327 /* 2328 * NOTE: New drivers should not make use of device_group; instead new 2329 * device parameter should be exposed via netlink command. This 2330 * mechanism exists only for existing drivers. 2331 */ 2332 const struct attribute_group *device_group; 2333 const struct attribute_group **port_groups; 2334 2335 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr, 2336 const struct ib_send_wr **bad_send_wr); 2337 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, 2338 const struct ib_recv_wr **bad_recv_wr); 2339 void (*drain_rq)(struct ib_qp *qp); 2340 void (*drain_sq)(struct ib_qp *qp); 2341 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); 2342 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 2343 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); 2344 int (*post_srq_recv)(struct ib_srq *srq, 2345 const struct ib_recv_wr *recv_wr, 2346 const struct ib_recv_wr **bad_recv_wr); 2347 int (*process_mad)(struct ib_device *device, int process_mad_flags, 2348 u32 port_num, const struct ib_wc *in_wc, 2349 const struct ib_grh *in_grh, 2350 const struct ib_mad *in_mad, struct ib_mad *out_mad, 2351 size_t *out_mad_size, u16 *out_mad_pkey_index); 2352 int (*query_device)(struct ib_device *device, 2353 struct ib_device_attr *device_attr, 2354 struct ib_udata *udata); 2355 int (*modify_device)(struct ib_device *device, int device_modify_mask, 2356 struct ib_device_modify *device_modify); 2357 void (*get_dev_fw_str)(struct ib_device *device, char *str); 2358 const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev, 2359 int comp_vector); 2360 int (*query_port)(struct ib_device *device, u32 port_num, 2361 struct ib_port_attr *port_attr); 2362 int (*modify_port)(struct ib_device *device, u32 port_num, 2363 int port_modify_mask, 2364 struct ib_port_modify *port_modify); 2365 /** 2366 * The following mandatory functions are used only at device 2367 * registration. Keep functions such as these at the end of this 2368 * structure to avoid cache line misses when accessing struct ib_device 2369 * in fast paths. 2370 */ 2371 int (*get_port_immutable)(struct ib_device *device, u32 port_num, 2372 struct ib_port_immutable *immutable); 2373 enum rdma_link_layer (*get_link_layer)(struct ib_device *device, 2374 u32 port_num); 2375 /** 2376 * When calling get_netdev, the HW vendor's driver should return the 2377 * net device of device @device at port @port_num or NULL if such 2378 * a net device doesn't exist. The vendor driver should call dev_hold 2379 * on this net device. The HW vendor's device driver must guarantee 2380 * that this function returns NULL before the net device has finished 2381 * NETDEV_UNREGISTER state. 2382 */ 2383 struct net_device *(*get_netdev)(struct ib_device *device, 2384 u32 port_num); 2385 /** 2386 * rdma netdev operation 2387 * 2388 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params 2389 * must return -EOPNOTSUPP if it doesn't support the specified type. 2390 */ 2391 struct net_device *(*alloc_rdma_netdev)( 2392 struct ib_device *device, u32 port_num, enum rdma_netdev_t type, 2393 const char *name, unsigned char name_assign_type, 2394 void (*setup)(struct net_device *)); 2395 2396 int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num, 2397 enum rdma_netdev_t type, 2398 struct rdma_netdev_alloc_params *params); 2399 /** 2400 * query_gid should be return GID value for @device, when @port_num 2401 * link layer is either IB or iWarp. It is no-op if @port_num port 2402 * is RoCE link layer. 2403 */ 2404 int (*query_gid)(struct ib_device *device, u32 port_num, int index, 2405 union ib_gid *gid); 2406 /** 2407 * When calling add_gid, the HW vendor's driver should add the gid 2408 * of device of port at gid index available at @attr. Meta-info of 2409 * that gid (for example, the network device related to this gid) is 2410 * available at @attr. @context allows the HW vendor driver to store 2411 * extra information together with a GID entry. The HW vendor driver may 2412 * allocate memory to contain this information and store it in @context 2413 * when a new GID entry is written to. Params are consistent until the 2414 * next call of add_gid or delete_gid. The function should return 0 on 2415 * success or error otherwise. The function could be called 2416 * concurrently for different ports. This function is only called when 2417 * roce_gid_table is used. 2418 */ 2419 int (*add_gid)(const struct ib_gid_attr *attr, void **context); 2420 /** 2421 * When calling del_gid, the HW vendor's driver should delete the 2422 * gid of device @device at gid index gid_index of port port_num 2423 * available in @attr. 2424 * Upon the deletion of a GID entry, the HW vendor must free any 2425 * allocated memory. The caller will clear @context afterwards. 2426 * This function is only called when roce_gid_table is used. 2427 */ 2428 int (*del_gid)(const struct ib_gid_attr *attr, void **context); 2429 int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index, 2430 u16 *pkey); 2431 int (*alloc_ucontext)(struct ib_ucontext *context, 2432 struct ib_udata *udata); 2433 void (*dealloc_ucontext)(struct ib_ucontext *context); 2434 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma); 2435 /** 2436 * This will be called once refcount of an entry in mmap_xa reaches 2437 * zero. The type of the memory that was mapped may differ between 2438 * entries and is opaque to the rdma_user_mmap interface. 2439 * Therefore needs to be implemented by the driver in mmap_free. 2440 */ 2441 void (*mmap_free)(struct rdma_user_mmap_entry *entry); 2442 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); 2443 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2444 int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); 2445 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr, 2446 struct ib_udata *udata); 2447 int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr, 2448 struct ib_udata *udata); 2449 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2450 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 2451 int (*destroy_ah)(struct ib_ah *ah, u32 flags); 2452 int (*create_srq)(struct ib_srq *srq, 2453 struct ib_srq_init_attr *srq_init_attr, 2454 struct ib_udata *udata); 2455 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr, 2456 enum ib_srq_attr_mask srq_attr_mask, 2457 struct ib_udata *udata); 2458 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); 2459 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); 2460 int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr, 2461 struct ib_udata *udata); 2462 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2463 int qp_attr_mask, struct ib_udata *udata); 2464 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 2465 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); 2466 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); 2467 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, 2468 struct uverbs_attr_bundle *attrs); 2469 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); 2470 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); 2471 int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); 2472 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); 2473 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, 2474 u64 virt_addr, int mr_access_flags, 2475 struct ib_udata *udata); 2476 struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset, 2477 u64 length, u64 virt_addr, int fd, 2478 int mr_access_flags, 2479 struct uverbs_attr_bundle *attrs); 2480 struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, 2481 u64 length, u64 virt_addr, 2482 int mr_access_flags, struct ib_pd *pd, 2483 struct ib_udata *udata); 2484 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata); 2485 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type, 2486 u32 max_num_sg); 2487 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd, 2488 u32 max_num_data_sg, 2489 u32 max_num_meta_sg); 2490 int (*advise_mr)(struct ib_pd *pd, 2491 enum ib_uverbs_advise_mr_advice advice, u32 flags, 2492 struct ib_sge *sg_list, u32 num_sge, 2493 struct uverbs_attr_bundle *attrs); 2494 2495 /* 2496 * Kernel users should universally support relaxed ordering (RO), as 2497 * they are designed to read data only after observing the CQE and use 2498 * the DMA API correctly. 2499 * 2500 * Some drivers implicitly enable RO if platform supports it. 2501 */ 2502 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 2503 unsigned int *sg_offset); 2504 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, 2505 struct ib_mr_status *mr_status); 2506 int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata); 2507 int (*dealloc_mw)(struct ib_mw *mw); 2508 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2509 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2510 int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); 2511 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); 2512 struct ib_flow *(*create_flow)(struct ib_qp *qp, 2513 struct ib_flow_attr *flow_attr, 2514 struct ib_udata *udata); 2515 int (*destroy_flow)(struct ib_flow *flow_id); 2516 int (*destroy_flow_action)(struct ib_flow_action *action); 2517 int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port, 2518 int state); 2519 int (*get_vf_config)(struct ib_device *device, int vf, u32 port, 2520 struct ifla_vf_info *ivf); 2521 int (*get_vf_stats)(struct ib_device *device, int vf, u32 port, 2522 struct ifla_vf_stats *stats); 2523 int (*get_vf_guid)(struct ib_device *device, int vf, u32 port, 2524 struct ifla_vf_guid *node_guid, 2525 struct ifla_vf_guid *port_guid); 2526 int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid, 2527 int type); 2528 struct ib_wq *(*create_wq)(struct ib_pd *pd, 2529 struct ib_wq_init_attr *init_attr, 2530 struct ib_udata *udata); 2531 int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); 2532 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, 2533 u32 wq_attr_mask, struct ib_udata *udata); 2534 int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table, 2535 struct ib_rwq_ind_table_init_attr *init_attr, 2536 struct ib_udata *udata); 2537 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2538 struct ib_dm *(*alloc_dm)(struct ib_device *device, 2539 struct ib_ucontext *context, 2540 struct ib_dm_alloc_attr *attr, 2541 struct uverbs_attr_bundle *attrs); 2542 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs); 2543 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, 2544 struct ib_dm_mr_attr *attr, 2545 struct uverbs_attr_bundle *attrs); 2546 int (*create_counters)(struct ib_counters *counters, 2547 struct uverbs_attr_bundle *attrs); 2548 int (*destroy_counters)(struct ib_counters *counters); 2549 int (*read_counters)(struct ib_counters *counters, 2550 struct ib_counters_read_attr *counters_read_attr, 2551 struct uverbs_attr_bundle *attrs); 2552 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg, 2553 int data_sg_nents, unsigned int *data_sg_offset, 2554 struct scatterlist *meta_sg, int meta_sg_nents, 2555 unsigned int *meta_sg_offset); 2556 2557 /** 2558 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and 2559 * fill in the driver initialized data. The struct is kfree()'ed by 2560 * the sysfs core when the device is removed. A lifespan of -1 in the 2561 * return struct tells the core to set a default lifespan. 2562 */ 2563 struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device); 2564 struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device, 2565 u32 port_num); 2566 /** 2567 * get_hw_stats - Fill in the counter value(s) in the stats struct. 2568 * @index - The index in the value array we wish to have updated, or 2569 * num_counters if we want all stats updated 2570 * Return codes - 2571 * < 0 - Error, no counters updated 2572 * index - Updated the single counter pointed to by index 2573 * num_counters - Updated all counters (will reset the timestamp 2574 * and prevent further calls for lifespan milliseconds) 2575 * Drivers are allowed to update all counters in leiu of just the 2576 * one given in index at their option 2577 */ 2578 int (*get_hw_stats)(struct ib_device *device, 2579 struct rdma_hw_stats *stats, u32 port, int index); 2580 2581 /** 2582 * modify_hw_stat - Modify the counter configuration 2583 * @enable: true/false when enable/disable a counter 2584 * Return codes - 0 on success or error code otherwise. 2585 */ 2586 int (*modify_hw_stat)(struct ib_device *device, u32 port, 2587 unsigned int counter_index, bool enable); 2588 /** 2589 * Allows rdma drivers to add their own restrack attributes. 2590 */ 2591 int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); 2592 int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr); 2593 int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq); 2594 int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq); 2595 int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp); 2596 int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp); 2597 int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id); 2598 int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq); 2599 int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq); 2600 2601 /* Device lifecycle callbacks */ 2602 /* 2603 * Called after the device becomes registered, before clients are 2604 * attached 2605 */ 2606 int (*enable_driver)(struct ib_device *dev); 2607 /* 2608 * This is called as part of ib_dealloc_device(). 2609 */ 2610 void (*dealloc_driver)(struct ib_device *dev); 2611 2612 /* iWarp CM callbacks */ 2613 void (*iw_add_ref)(struct ib_qp *qp); 2614 void (*iw_rem_ref)(struct ib_qp *qp); 2615 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn); 2616 int (*iw_connect)(struct iw_cm_id *cm_id, 2617 struct iw_cm_conn_param *conn_param); 2618 int (*iw_accept)(struct iw_cm_id *cm_id, 2619 struct iw_cm_conn_param *conn_param); 2620 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata, 2621 u8 pdata_len); 2622 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog); 2623 int (*iw_destroy_listen)(struct iw_cm_id *cm_id); 2624 /** 2625 * counter_bind_qp - Bind a QP to a counter. 2626 * @counter - The counter to be bound. If counter->id is zero then 2627 * the driver needs to allocate a new counter and set counter->id 2628 */ 2629 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp); 2630 /** 2631 * counter_unbind_qp - Unbind the qp from the dynamically-allocated 2632 * counter and bind it onto the default one 2633 */ 2634 int (*counter_unbind_qp)(struct ib_qp *qp); 2635 /** 2636 * counter_dealloc -De-allocate the hw counter 2637 */ 2638 int (*counter_dealloc)(struct rdma_counter *counter); 2639 /** 2640 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in 2641 * the driver initialized data. 2642 */ 2643 struct rdma_hw_stats *(*counter_alloc_stats)( 2644 struct rdma_counter *counter); 2645 /** 2646 * counter_update_stats - Query the stats value of this counter 2647 */ 2648 int (*counter_update_stats)(struct rdma_counter *counter); 2649 2650 /** 2651 * Allows rdma drivers to add their own restrack attributes 2652 * dumped via 'rdma stat' iproute2 command. 2653 */ 2654 int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr); 2655 2656 /* query driver for its ucontext properties */ 2657 int (*query_ucontext)(struct ib_ucontext *context, 2658 struct uverbs_attr_bundle *attrs); 2659 2660 /* 2661 * Provide NUMA node. This API exists for rdmavt/hfi1 only. 2662 * Everyone else relies on Linux memory management model. 2663 */ 2664 int (*get_numa_node)(struct ib_device *dev); 2665 2666 /** 2667 * add_sub_dev - Add a sub IB device 2668 */ 2669 struct ib_device *(*add_sub_dev)(struct ib_device *parent, 2670 enum rdma_nl_dev_type type, 2671 const char *name); 2672 2673 /** 2674 * del_sub_dev - Delete a sub IB device 2675 */ 2676 void (*del_sub_dev)(struct ib_device *sub_dev); 2677 2678 /** 2679 * ufile_cleanup - Attempt to cleanup ubojects HW resources inside 2680 * the ufile. 2681 */ 2682 void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile); 2683 2684 /** 2685 * report_port_event - Drivers need to implement this if they have 2686 * some private stuff to handle when link status changes. 2687 */ 2688 void (*report_port_event)(struct ib_device *ibdev, 2689 struct net_device *ndev, unsigned long event); 2690 2691 DECLARE_RDMA_OBJ_SIZE(ib_ah); 2692 DECLARE_RDMA_OBJ_SIZE(ib_counters); 2693 DECLARE_RDMA_OBJ_SIZE(ib_cq); 2694 DECLARE_RDMA_OBJ_SIZE(ib_mw); 2695 DECLARE_RDMA_OBJ_SIZE(ib_pd); 2696 DECLARE_RDMA_OBJ_SIZE(ib_qp); 2697 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table); 2698 DECLARE_RDMA_OBJ_SIZE(ib_srq); 2699 DECLARE_RDMA_OBJ_SIZE(ib_ucontext); 2700 DECLARE_RDMA_OBJ_SIZE(ib_xrcd); 2701 }; 2702 2703 struct ib_core_device { 2704 /* device must be the first element in structure until, 2705 * union of ib_core_device and device exists in ib_device. 2706 */ 2707 struct device dev; 2708 possible_net_t rdma_net; 2709 struct kobject *ports_kobj; 2710 struct list_head port_list; 2711 struct ib_device *owner; /* reach back to owner ib_device */ 2712 }; 2713 2714 struct rdma_restrack_root; 2715 struct ib_device { 2716 /* Do not access @dma_device directly from ULP nor from HW drivers. */ 2717 struct device *dma_device; 2718 struct ib_device_ops ops; 2719 char name[IB_DEVICE_NAME_MAX]; 2720 struct rcu_head rcu_head; 2721 2722 struct list_head event_handler_list; 2723 /* Protects event_handler_list */ 2724 struct rw_semaphore event_handler_rwsem; 2725 2726 /* Protects QP's event_handler calls and open_qp list */ 2727 spinlock_t qp_open_list_lock; 2728 2729 struct rw_semaphore client_data_rwsem; 2730 struct xarray client_data; 2731 struct mutex unregistration_lock; 2732 2733 /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */ 2734 rwlock_t cache_lock; 2735 /** 2736 * port_data is indexed by port number 2737 */ 2738 struct ib_port_data *port_data; 2739 2740 int num_comp_vectors; 2741 2742 union { 2743 struct device dev; 2744 struct ib_core_device coredev; 2745 }; 2746 2747 /* First group is for device attributes, 2748 * Second group is for driver provided attributes (optional). 2749 * Third group is for the hw_stats 2750 * It is a NULL terminated array. 2751 */ 2752 const struct attribute_group *groups[4]; 2753 2754 u64 uverbs_cmd_mask; 2755 2756 char node_desc[IB_DEVICE_NODE_DESC_MAX]; 2757 __be64 node_guid; 2758 u32 local_dma_lkey; 2759 u16 is_switch:1; 2760 /* Indicates kernel verbs support, should not be used in drivers */ 2761 u16 kverbs_provider:1; 2762 /* CQ adaptive moderation (RDMA DIM) */ 2763 u16 use_cq_dim:1; 2764 u8 node_type; 2765 u32 phys_port_cnt; 2766 struct ib_device_attr attrs; 2767 struct hw_stats_device_data *hw_stats_data; 2768 2769 #ifdef CONFIG_CGROUP_RDMA 2770 struct rdmacg_device cg_device; 2771 #endif 2772 2773 u32 index; 2774 2775 spinlock_t cq_pools_lock; 2776 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1]; 2777 2778 struct rdma_restrack_root *res; 2779 2780 const struct uapi_definition *driver_def; 2781 2782 /* 2783 * Positive refcount indicates that the device is currently 2784 * registered and cannot be unregistered. 2785 */ 2786 refcount_t refcount; 2787 struct completion unreg_completion; 2788 struct work_struct unregistration_work; 2789 2790 const struct rdma_link_ops *link_ops; 2791 2792 /* Protects compat_devs xarray modifications */ 2793 struct mutex compat_devs_mutex; 2794 /* Maintains compat devices for each net namespace */ 2795 struct xarray compat_devs; 2796 2797 /* Used by iWarp CM */ 2798 char iw_ifname[IFNAMSIZ]; 2799 u32 iw_driver_flags; 2800 u32 lag_flags; 2801 2802 /* A parent device has a list of sub-devices */ 2803 struct mutex subdev_lock; 2804 struct list_head subdev_list_head; 2805 2806 /* A sub device has a type and a parent */ 2807 enum rdma_nl_dev_type type; 2808 struct ib_device *parent; 2809 struct list_head subdev_list; 2810 2811 enum rdma_nl_name_assign_type name_assign_type; 2812 }; 2813 2814 static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size, 2815 gfp_t gfp, bool is_numa_aware) 2816 { 2817 if (is_numa_aware && dev->ops.get_numa_node) 2818 return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev)); 2819 2820 return kzalloc(size, gfp); 2821 } 2822 2823 struct ib_client_nl_info; 2824 struct ib_client { 2825 const char *name; 2826 int (*add)(struct ib_device *ibdev); 2827 void (*remove)(struct ib_device *, void *client_data); 2828 void (*rename)(struct ib_device *dev, void *client_data); 2829 int (*get_nl_info)(struct ib_device *ibdev, void *client_data, 2830 struct ib_client_nl_info *res); 2831 int (*get_global_nl_info)(struct ib_client_nl_info *res); 2832 2833 /* Returns the net_dev belonging to this ib_client and matching the 2834 * given parameters. 2835 * @dev: An RDMA device that the net_dev use for communication. 2836 * @port: A physical port number on the RDMA device. 2837 * @pkey: P_Key that the net_dev uses if applicable. 2838 * @gid: A GID that the net_dev uses to communicate. 2839 * @addr: An IP address the net_dev is configured with. 2840 * @client_data: The device's client data set by ib_set_client_data(). 2841 * 2842 * An ib_client that implements a net_dev on top of RDMA devices 2843 * (such as IP over IB) should implement this callback, allowing the 2844 * rdma_cm module to find the right net_dev for a given request. 2845 * 2846 * The caller is responsible for calling dev_put on the returned 2847 * netdev. */ 2848 struct net_device *(*get_net_dev_by_params)( 2849 struct ib_device *dev, 2850 u32 port, 2851 u16 pkey, 2852 const union ib_gid *gid, 2853 const struct sockaddr *addr, 2854 void *client_data); 2855 2856 refcount_t uses; 2857 struct completion uses_zero; 2858 u32 client_id; 2859 2860 /* kverbs are not required by the client */ 2861 u8 no_kverbs_req:1; 2862 }; 2863 2864 /* 2865 * IB block DMA iterator 2866 * 2867 * Iterates the DMA-mapped SGL in contiguous memory blocks aligned 2868 * to a HW supported page size. 2869 */ 2870 struct ib_block_iter { 2871 /* internal states */ 2872 struct scatterlist *__sg; /* sg holding the current aligned block */ 2873 dma_addr_t __dma_addr; /* unaligned DMA address of this block */ 2874 size_t __sg_numblocks; /* ib_umem_num_dma_blocks() */ 2875 unsigned int __sg_nents; /* number of SG entries */ 2876 unsigned int __sg_advance; /* number of bytes to advance in sg in next step */ 2877 unsigned int __pg_bit; /* alignment of current block */ 2878 }; 2879 2880 struct ib_device *_ib_alloc_device(size_t size); 2881 #define ib_alloc_device(drv_struct, member) \ 2882 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ 2883 BUILD_BUG_ON_ZERO(offsetof( \ 2884 struct drv_struct, member))), \ 2885 struct drv_struct, member) 2886 2887 void ib_dealloc_device(struct ib_device *device); 2888 2889 void ib_get_device_fw_str(struct ib_device *device, char *str); 2890 2891 int ib_register_device(struct ib_device *device, const char *name, 2892 struct device *dma_device); 2893 void ib_unregister_device(struct ib_device *device); 2894 void ib_unregister_driver(enum rdma_driver_id driver_id); 2895 void ib_unregister_device_and_put(struct ib_device *device); 2896 void ib_unregister_device_queued(struct ib_device *ib_dev); 2897 2898 int ib_register_client (struct ib_client *client); 2899 void ib_unregister_client(struct ib_client *client); 2900 2901 void __rdma_block_iter_start(struct ib_block_iter *biter, 2902 struct scatterlist *sglist, 2903 unsigned int nents, 2904 unsigned long pgsz); 2905 bool __rdma_block_iter_next(struct ib_block_iter *biter); 2906 2907 /** 2908 * rdma_block_iter_dma_address - get the aligned dma address of the current 2909 * block held by the block iterator. 2910 * @biter: block iterator holding the memory block 2911 */ 2912 static inline dma_addr_t 2913 rdma_block_iter_dma_address(struct ib_block_iter *biter) 2914 { 2915 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); 2916 } 2917 2918 /** 2919 * rdma_for_each_block - iterate over contiguous memory blocks of the sg list 2920 * @sglist: sglist to iterate over 2921 * @biter: block iterator holding the memory block 2922 * @nents: maximum number of sg entries to iterate over 2923 * @pgsz: best HW supported page size to use 2924 * 2925 * Callers may use rdma_block_iter_dma_address() to get each 2926 * blocks aligned DMA address. 2927 */ 2928 #define rdma_for_each_block(sglist, biter, nents, pgsz) \ 2929 for (__rdma_block_iter_start(biter, sglist, nents, \ 2930 pgsz); \ 2931 __rdma_block_iter_next(biter);) 2932 2933 /** 2934 * ib_get_client_data - Get IB client context 2935 * @device:Device to get context for 2936 * @client:Client to get context for 2937 * 2938 * ib_get_client_data() returns the client context data set with 2939 * ib_set_client_data(). This can only be called while the client is 2940 * registered to the device, once the ib_client remove() callback returns this 2941 * cannot be called. 2942 */ 2943 static inline void *ib_get_client_data(struct ib_device *device, 2944 struct ib_client *client) 2945 { 2946 return xa_load(&device->client_data, client->client_id); 2947 } 2948 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 2949 void *data); 2950 void ib_set_device_ops(struct ib_device *device, 2951 const struct ib_device_ops *ops); 2952 2953 int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, 2954 unsigned long pfn, unsigned long size, pgprot_t prot, 2955 struct rdma_user_mmap_entry *entry); 2956 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, 2957 struct rdma_user_mmap_entry *entry, 2958 size_t length); 2959 int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, 2960 struct rdma_user_mmap_entry *entry, 2961 size_t length, u32 min_pgoff, 2962 u32 max_pgoff); 2963 2964 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) 2965 void rdma_user_mmap_disassociate(struct ib_device *device); 2966 #else 2967 static inline void rdma_user_mmap_disassociate(struct ib_device *device) 2968 { 2969 } 2970 #endif 2971 2972 static inline int 2973 rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext, 2974 struct rdma_user_mmap_entry *entry, 2975 size_t length, u32 pgoff) 2976 { 2977 return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff, 2978 pgoff); 2979 } 2980 2981 struct rdma_user_mmap_entry * 2982 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext, 2983 unsigned long pgoff); 2984 struct rdma_user_mmap_entry * 2985 rdma_user_mmap_entry_get(struct ib_ucontext *ucontext, 2986 struct vm_area_struct *vma); 2987 void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry); 2988 2989 void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry); 2990 2991 static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) 2992 { 2993 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0; 2994 } 2995 2996 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) 2997 { 2998 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0; 2999 } 3000 3001 static inline bool ib_is_buffer_cleared(const void __user *p, 3002 size_t len) 3003 { 3004 bool ret; 3005 u8 *buf; 3006 3007 if (len > USHRT_MAX) 3008 return false; 3009 3010 buf = memdup_user(p, len); 3011 if (IS_ERR(buf)) 3012 return false; 3013 3014 ret = !memchr_inv(buf, 0, len); 3015 kfree(buf); 3016 return ret; 3017 } 3018 3019 static inline bool ib_is_udata_cleared(struct ib_udata *udata, 3020 size_t offset, 3021 size_t len) 3022 { 3023 return ib_is_buffer_cleared(udata->inbuf + offset, len); 3024 } 3025 3026 /** 3027 * ib_modify_qp_is_ok - Check that the supplied attribute mask 3028 * contains all required attributes and no attributes not allowed for 3029 * the given QP state transition. 3030 * @cur_state: Current QP state 3031 * @next_state: Next QP state 3032 * @type: QP type 3033 * @mask: Mask of supplied QP attributes 3034 * 3035 * This function is a helper function that a low-level driver's 3036 * modify_qp method can use to validate the consumer's input. It 3037 * checks that cur_state and next_state are valid QP states, that a 3038 * transition from cur_state to next_state is allowed by the IB spec, 3039 * and that the attribute mask supplied is allowed for the transition. 3040 */ 3041 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, 3042 enum ib_qp_type type, enum ib_qp_attr_mask mask); 3043 3044 void ib_register_event_handler(struct ib_event_handler *event_handler); 3045 void ib_unregister_event_handler(struct ib_event_handler *event_handler); 3046 void ib_dispatch_event(const struct ib_event *event); 3047 3048 int ib_query_port(struct ib_device *device, 3049 u32 port_num, struct ib_port_attr *port_attr); 3050 3051 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, 3052 u32 port_num); 3053 3054 /** 3055 * rdma_cap_ib_switch - Check if the device is IB switch 3056 * @device: Device to check 3057 * 3058 * Device driver is responsible for setting is_switch bit on 3059 * in ib_device structure at init time. 3060 * 3061 * Return: true if the device is IB switch. 3062 */ 3063 static inline bool rdma_cap_ib_switch(const struct ib_device *device) 3064 { 3065 return device->is_switch; 3066 } 3067 3068 /** 3069 * rdma_start_port - Return the first valid port number for the device 3070 * specified 3071 * 3072 * @device: Device to be checked 3073 * 3074 * Return start port number 3075 */ 3076 static inline u32 rdma_start_port(const struct ib_device *device) 3077 { 3078 return rdma_cap_ib_switch(device) ? 0 : 1; 3079 } 3080 3081 /** 3082 * rdma_for_each_port - Iterate over all valid port numbers of the IB device 3083 * @device - The struct ib_device * to iterate over 3084 * @iter - The unsigned int to store the port number 3085 */ 3086 #define rdma_for_each_port(device, iter) \ 3087 for (iter = rdma_start_port(device + \ 3088 BUILD_BUG_ON_ZERO(!__same_type(u32, \ 3089 iter))); \ 3090 iter <= rdma_end_port(device); iter++) 3091 3092 /** 3093 * rdma_end_port - Return the last valid port number for the device 3094 * specified 3095 * 3096 * @device: Device to be checked 3097 * 3098 * Return last port number 3099 */ 3100 static inline u32 rdma_end_port(const struct ib_device *device) 3101 { 3102 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt; 3103 } 3104 3105 static inline int rdma_is_port_valid(const struct ib_device *device, 3106 unsigned int port) 3107 { 3108 return (port >= rdma_start_port(device) && 3109 port <= rdma_end_port(device)); 3110 } 3111 3112 static inline bool rdma_is_grh_required(const struct ib_device *device, 3113 u32 port_num) 3114 { 3115 return device->port_data[port_num].immutable.core_cap_flags & 3116 RDMA_CORE_PORT_IB_GRH_REQUIRED; 3117 } 3118 3119 static inline bool rdma_protocol_ib(const struct ib_device *device, 3120 u32 port_num) 3121 { 3122 return device->port_data[port_num].immutable.core_cap_flags & 3123 RDMA_CORE_CAP_PROT_IB; 3124 } 3125 3126 static inline bool rdma_protocol_roce(const struct ib_device *device, 3127 u32 port_num) 3128 { 3129 return device->port_data[port_num].immutable.core_cap_flags & 3130 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP); 3131 } 3132 3133 static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device, 3134 u32 port_num) 3135 { 3136 return device->port_data[port_num].immutable.core_cap_flags & 3137 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; 3138 } 3139 3140 static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device, 3141 u32 port_num) 3142 { 3143 return device->port_data[port_num].immutable.core_cap_flags & 3144 RDMA_CORE_CAP_PROT_ROCE; 3145 } 3146 3147 static inline bool rdma_protocol_iwarp(const struct ib_device *device, 3148 u32 port_num) 3149 { 3150 return device->port_data[port_num].immutable.core_cap_flags & 3151 RDMA_CORE_CAP_PROT_IWARP; 3152 } 3153 3154 static inline bool rdma_ib_or_roce(const struct ib_device *device, 3155 u32 port_num) 3156 { 3157 return rdma_protocol_ib(device, port_num) || 3158 rdma_protocol_roce(device, port_num); 3159 } 3160 3161 static inline bool rdma_protocol_raw_packet(const struct ib_device *device, 3162 u32 port_num) 3163 { 3164 return device->port_data[port_num].immutable.core_cap_flags & 3165 RDMA_CORE_CAP_PROT_RAW_PACKET; 3166 } 3167 3168 static inline bool rdma_protocol_usnic(const struct ib_device *device, 3169 u32 port_num) 3170 { 3171 return device->port_data[port_num].immutable.core_cap_flags & 3172 RDMA_CORE_CAP_PROT_USNIC; 3173 } 3174 3175 /** 3176 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband 3177 * Management Datagrams. 3178 * @device: Device to check 3179 * @port_num: Port number to check 3180 * 3181 * Management Datagrams (MAD) are a required part of the InfiniBand 3182 * specification and are supported on all InfiniBand devices. A slightly 3183 * extended version are also supported on OPA interfaces. 3184 * 3185 * Return: true if the port supports sending/receiving of MAD packets. 3186 */ 3187 static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num) 3188 { 3189 return device->port_data[port_num].immutable.core_cap_flags & 3190 RDMA_CORE_CAP_IB_MAD; 3191 } 3192 3193 /** 3194 * rdma_cap_opa_mad - Check if the port of device provides support for OPA 3195 * Management Datagrams. 3196 * @device: Device to check 3197 * @port_num: Port number to check 3198 * 3199 * Intel OmniPath devices extend and/or replace the InfiniBand Management 3200 * datagrams with their own versions. These OPA MADs share many but not all of 3201 * the characteristics of InfiniBand MADs. 3202 * 3203 * OPA MADs differ in the following ways: 3204 * 3205 * 1) MADs are variable size up to 2K 3206 * IBTA defined MADs remain fixed at 256 bytes 3207 * 2) OPA SMPs must carry valid PKeys 3208 * 3) OPA SMP packets are a different format 3209 * 3210 * Return: true if the port supports OPA MAD packet formats. 3211 */ 3212 static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num) 3213 { 3214 return device->port_data[port_num].immutable.core_cap_flags & 3215 RDMA_CORE_CAP_OPA_MAD; 3216 } 3217 3218 /** 3219 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband 3220 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). 3221 * @device: Device to check 3222 * @port_num: Port number to check 3223 * 3224 * Each InfiniBand node is required to provide a Subnet Management Agent 3225 * that the subnet manager can access. Prior to the fabric being fully 3226 * configured by the subnet manager, the SMA is accessed via a well known 3227 * interface called the Subnet Management Interface (SMI). This interface 3228 * uses directed route packets to communicate with the SM to get around the 3229 * chicken and egg problem of the SM needing to know what's on the fabric 3230 * in order to configure the fabric, and needing to configure the fabric in 3231 * order to send packets to the devices on the fabric. These directed 3232 * route packets do not need the fabric fully configured in order to reach 3233 * their destination. The SMI is the only method allowed to send 3234 * directed route packets on an InfiniBand fabric. 3235 * 3236 * Return: true if the port provides an SMI. 3237 */ 3238 static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num) 3239 { 3240 return device->port_data[port_num].immutable.core_cap_flags & 3241 RDMA_CORE_CAP_IB_SMI; 3242 } 3243 3244 /** 3245 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband 3246 * Communication Manager. 3247 * @device: Device to check 3248 * @port_num: Port number to check 3249 * 3250 * The InfiniBand Communication Manager is one of many pre-defined General 3251 * Service Agents (GSA) that are accessed via the General Service 3252 * Interface (GSI). It's role is to facilitate establishment of connections 3253 * between nodes as well as other management related tasks for established 3254 * connections. 3255 * 3256 * Return: true if the port supports an IB CM (this does not guarantee that 3257 * a CM is actually running however). 3258 */ 3259 static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num) 3260 { 3261 return device->port_data[port_num].immutable.core_cap_flags & 3262 RDMA_CORE_CAP_IB_CM; 3263 } 3264 3265 /** 3266 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP 3267 * Communication Manager. 3268 * @device: Device to check 3269 * @port_num: Port number to check 3270 * 3271 * Similar to above, but specific to iWARP connections which have a different 3272 * managment protocol than InfiniBand. 3273 * 3274 * Return: true if the port supports an iWARP CM (this does not guarantee that 3275 * a CM is actually running however). 3276 */ 3277 static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num) 3278 { 3279 return device->port_data[port_num].immutable.core_cap_flags & 3280 RDMA_CORE_CAP_IW_CM; 3281 } 3282 3283 /** 3284 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband 3285 * Subnet Administration. 3286 * @device: Device to check 3287 * @port_num: Port number to check 3288 * 3289 * An InfiniBand Subnet Administration (SA) service is a pre-defined General 3290 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand 3291 * fabrics, devices should resolve routes to other hosts by contacting the 3292 * SA to query the proper route. 3293 * 3294 * Return: true if the port should act as a client to the fabric Subnet 3295 * Administration interface. This does not imply that the SA service is 3296 * running locally. 3297 */ 3298 static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num) 3299 { 3300 return device->port_data[port_num].immutable.core_cap_flags & 3301 RDMA_CORE_CAP_IB_SA; 3302 } 3303 3304 /** 3305 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband 3306 * Multicast. 3307 * @device: Device to check 3308 * @port_num: Port number to check 3309 * 3310 * InfiniBand multicast registration is more complex than normal IPv4 or 3311 * IPv6 multicast registration. Each Host Channel Adapter must register 3312 * with the Subnet Manager when it wishes to join a multicast group. It 3313 * should do so only once regardless of how many queue pairs it subscribes 3314 * to this group. And it should leave the group only after all queue pairs 3315 * attached to the group have been detached. 3316 * 3317 * Return: true if the port must undertake the additional adminstrative 3318 * overhead of registering/unregistering with the SM and tracking of the 3319 * total number of queue pairs attached to the multicast group. 3320 */ 3321 static inline bool rdma_cap_ib_mcast(const struct ib_device *device, 3322 u32 port_num) 3323 { 3324 return rdma_cap_ib_sa(device, port_num); 3325 } 3326 3327 /** 3328 * rdma_cap_af_ib - Check if the port of device has the capability 3329 * Native Infiniband Address. 3330 * @device: Device to check 3331 * @port_num: Port number to check 3332 * 3333 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default 3334 * GID. RoCE uses a different mechanism, but still generates a GID via 3335 * a prescribed mechanism and port specific data. 3336 * 3337 * Return: true if the port uses a GID address to identify devices on the 3338 * network. 3339 */ 3340 static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num) 3341 { 3342 return device->port_data[port_num].immutable.core_cap_flags & 3343 RDMA_CORE_CAP_AF_IB; 3344 } 3345 3346 /** 3347 * rdma_cap_eth_ah - Check if the port of device has the capability 3348 * Ethernet Address Handle. 3349 * @device: Device to check 3350 * @port_num: Port number to check 3351 * 3352 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique 3353 * to fabricate GIDs over Ethernet/IP specific addresses native to the 3354 * port. Normally, packet headers are generated by the sending host 3355 * adapter, but when sending connectionless datagrams, we must manually 3356 * inject the proper headers for the fabric we are communicating over. 3357 * 3358 * Return: true if we are running as a RoCE port and must force the 3359 * addition of a Global Route Header built from our Ethernet Address 3360 * Handle into our header list for connectionless packets. 3361 */ 3362 static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num) 3363 { 3364 return device->port_data[port_num].immutable.core_cap_flags & 3365 RDMA_CORE_CAP_ETH_AH; 3366 } 3367 3368 /** 3369 * rdma_cap_opa_ah - Check if the port of device supports 3370 * OPA Address handles 3371 * @device: Device to check 3372 * @port_num: Port number to check 3373 * 3374 * Return: true if we are running on an OPA device which supports 3375 * the extended OPA addressing. 3376 */ 3377 static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num) 3378 { 3379 return (device->port_data[port_num].immutable.core_cap_flags & 3380 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH; 3381 } 3382 3383 /** 3384 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port. 3385 * 3386 * @device: Device 3387 * @port_num: Port number 3388 * 3389 * This MAD size includes the MAD headers and MAD payload. No other headers 3390 * are included. 3391 * 3392 * Return the max MAD size required by the Port. Will return 0 if the port 3393 * does not support MADs 3394 */ 3395 static inline size_t rdma_max_mad_size(const struct ib_device *device, 3396 u32 port_num) 3397 { 3398 return device->port_data[port_num].immutable.max_mad_size; 3399 } 3400 3401 /** 3402 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table 3403 * @device: Device to check 3404 * @port_num: Port number to check 3405 * 3406 * RoCE GID table mechanism manages the various GIDs for a device. 3407 * 3408 * NOTE: if allocating the port's GID table has failed, this call will still 3409 * return true, but any RoCE GID table API will fail. 3410 * 3411 * Return: true if the port uses RoCE GID table mechanism in order to manage 3412 * its GIDs. 3413 */ 3414 static inline bool rdma_cap_roce_gid_table(const struct ib_device *device, 3415 u32 port_num) 3416 { 3417 return rdma_protocol_roce(device, port_num) && 3418 device->ops.add_gid && device->ops.del_gid; 3419 } 3420 3421 /* 3422 * Check if the device supports READ W/ INVALIDATE. 3423 */ 3424 static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) 3425 { 3426 /* 3427 * iWarp drivers must support READ W/ INVALIDATE. No other protocol 3428 * has support for it yet. 3429 */ 3430 return rdma_protocol_iwarp(dev, port_num); 3431 } 3432 3433 /** 3434 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not. 3435 * @device: Device 3436 * @port_num: 1 based Port number 3437 * 3438 * Return true if port is an Intel OPA port , false if not 3439 */ 3440 static inline bool rdma_core_cap_opa_port(struct ib_device *device, 3441 u32 port_num) 3442 { 3443 return (device->port_data[port_num].immutable.core_cap_flags & 3444 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA; 3445 } 3446 3447 /** 3448 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value. 3449 * @device: Device 3450 * @port_num: Port number 3451 * @mtu: enum value of MTU 3452 * 3453 * Return the MTU size supported by the port as an integer value. Will return 3454 * -1 if enum value of mtu is not supported. 3455 */ 3456 static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port, 3457 int mtu) 3458 { 3459 if (rdma_core_cap_opa_port(device, port)) 3460 return opa_mtu_enum_to_int((enum opa_mtu)mtu); 3461 else 3462 return ib_mtu_enum_to_int((enum ib_mtu)mtu); 3463 } 3464 3465 /** 3466 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute. 3467 * @device: Device 3468 * @port_num: Port number 3469 * @attr: port attribute 3470 * 3471 * Return the MTU size supported by the port as an integer value. 3472 */ 3473 static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port, 3474 struct ib_port_attr *attr) 3475 { 3476 if (rdma_core_cap_opa_port(device, port)) 3477 return attr->phys_mtu; 3478 else 3479 return ib_mtu_enum_to_int(attr->max_mtu); 3480 } 3481 3482 int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port, 3483 int state); 3484 int ib_get_vf_config(struct ib_device *device, int vf, u32 port, 3485 struct ifla_vf_info *info); 3486 int ib_get_vf_stats(struct ib_device *device, int vf, u32 port, 3487 struct ifla_vf_stats *stats); 3488 int ib_get_vf_guid(struct ib_device *device, int vf, u32 port, 3489 struct ifla_vf_guid *node_guid, 3490 struct ifla_vf_guid *port_guid); 3491 int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid, 3492 int type); 3493 3494 int ib_query_pkey(struct ib_device *device, 3495 u32 port_num, u16 index, u16 *pkey); 3496 3497 int ib_modify_device(struct ib_device *device, 3498 int device_modify_mask, 3499 struct ib_device_modify *device_modify); 3500 3501 int ib_modify_port(struct ib_device *device, 3502 u32 port_num, int port_modify_mask, 3503 struct ib_port_modify *port_modify); 3504 3505 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 3506 u32 *port_num, u16 *index); 3507 3508 int ib_find_pkey(struct ib_device *device, 3509 u32 port_num, u16 pkey, u16 *index); 3510 3511 enum ib_pd_flags { 3512 /* 3513 * Create a memory registration for all memory in the system and place 3514 * the rkey for it into pd->unsafe_global_rkey. This can be used by 3515 * ULPs to avoid the overhead of dynamic MRs. 3516 * 3517 * This flag is generally considered unsafe and must only be used in 3518 * extremly trusted environments. Every use of it will log a warning 3519 * in the kernel log. 3520 */ 3521 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01, 3522 }; 3523 3524 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, 3525 const char *caller); 3526 3527 /** 3528 * ib_alloc_pd - Allocates an unused protection domain. 3529 * @device: The device on which to allocate the protection domain. 3530 * @flags: protection domain flags 3531 * 3532 * A protection domain object provides an association between QPs, shared 3533 * receive queues, address handles, memory regions, and memory windows. 3534 * 3535 * Every PD has a local_dma_lkey which can be used as the lkey value for local 3536 * memory operations. 3537 */ 3538 #define ib_alloc_pd(device, flags) \ 3539 __ib_alloc_pd((device), (flags), KBUILD_MODNAME) 3540 3541 int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); 3542 3543 /** 3544 * ib_dealloc_pd - Deallocate kernel PD 3545 * @pd: The protection domain 3546 * 3547 * NOTE: for user PD use ib_dealloc_pd_user with valid udata! 3548 */ 3549 static inline void ib_dealloc_pd(struct ib_pd *pd) 3550 { 3551 int ret = ib_dealloc_pd_user(pd, NULL); 3552 3553 WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail"); 3554 } 3555 3556 enum rdma_create_ah_flags { 3557 /* In a sleepable context */ 3558 RDMA_CREATE_AH_SLEEPABLE = BIT(0), 3559 }; 3560 3561 /** 3562 * rdma_create_ah - Creates an address handle for the given address vector. 3563 * @pd: The protection domain associated with the address handle. 3564 * @ah_attr: The attributes of the address vector. 3565 * @flags: Create address handle flags (see enum rdma_create_ah_flags). 3566 * 3567 * The address handle is used to reference a local or global destination 3568 * in all UD QP post sends. 3569 */ 3570 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, 3571 u32 flags); 3572 3573 /** 3574 * rdma_create_user_ah - Creates an address handle for the given address vector. 3575 * It resolves destination mac address for ah attribute of RoCE type. 3576 * @pd: The protection domain associated with the address handle. 3577 * @ah_attr: The attributes of the address vector. 3578 * @udata: pointer to user's input output buffer information need by 3579 * provider driver. 3580 * 3581 * It returns 0 on success and returns appropriate error code on error. 3582 * The address handle is used to reference a local or global destination 3583 * in all UD QP post sends. 3584 */ 3585 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, 3586 struct rdma_ah_attr *ah_attr, 3587 struct ib_udata *udata); 3588 /** 3589 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header 3590 * work completion. 3591 * @hdr: the L3 header to parse 3592 * @net_type: type of header to parse 3593 * @sgid: place to store source gid 3594 * @dgid: place to store destination gid 3595 */ 3596 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, 3597 enum rdma_network_type net_type, 3598 union ib_gid *sgid, union ib_gid *dgid); 3599 3600 /** 3601 * ib_get_rdma_header_version - Get the header version 3602 * @hdr: the L3 header to parse 3603 */ 3604 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr); 3605 3606 /** 3607 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a 3608 * work completion. 3609 * @device: Device on which the received message arrived. 3610 * @port_num: Port on which the received message arrived. 3611 * @wc: Work completion associated with the received message. 3612 * @grh: References the received global route header. This parameter is 3613 * ignored unless the work completion indicates that the GRH is valid. 3614 * @ah_attr: Returned attributes that can be used when creating an address 3615 * handle for replying to the message. 3616 * When ib_init_ah_attr_from_wc() returns success, 3617 * (a) for IB link layer it optionally contains a reference to SGID attribute 3618 * when GRH is present for IB link layer. 3619 * (b) for RoCE link layer it contains a reference to SGID attribute. 3620 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID 3621 * attributes which are initialized using ib_init_ah_attr_from_wc(). 3622 * 3623 */ 3624 int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num, 3625 const struct ib_wc *wc, const struct ib_grh *grh, 3626 struct rdma_ah_attr *ah_attr); 3627 3628 /** 3629 * ib_create_ah_from_wc - Creates an address handle associated with the 3630 * sender of the specified work completion. 3631 * @pd: The protection domain associated with the address handle. 3632 * @wc: Work completion information associated with a received message. 3633 * @grh: References the received global route header. This parameter is 3634 * ignored unless the work completion indicates that the GRH is valid. 3635 * @port_num: The outbound port number to associate with the address. 3636 * 3637 * The address handle is used to reference a local or global destination 3638 * in all UD QP post sends. 3639 */ 3640 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, 3641 const struct ib_grh *grh, u32 port_num); 3642 3643 /** 3644 * rdma_modify_ah - Modifies the address vector associated with an address 3645 * handle. 3646 * @ah: The address handle to modify. 3647 * @ah_attr: The new address vector attributes to associate with the 3648 * address handle. 3649 */ 3650 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3651 3652 /** 3653 * rdma_query_ah - Queries the address vector associated with an address 3654 * handle. 3655 * @ah: The address handle to query. 3656 * @ah_attr: The address vector attributes associated with the address 3657 * handle. 3658 */ 3659 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); 3660 3661 enum rdma_destroy_ah_flags { 3662 /* In a sleepable context */ 3663 RDMA_DESTROY_AH_SLEEPABLE = BIT(0), 3664 }; 3665 3666 /** 3667 * rdma_destroy_ah_user - Destroys an address handle. 3668 * @ah: The address handle to destroy. 3669 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3670 * @udata: Valid user data or NULL for kernel objects 3671 */ 3672 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); 3673 3674 /** 3675 * rdma_destroy_ah - Destroys an kernel address handle. 3676 * @ah: The address handle to destroy. 3677 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags). 3678 * 3679 * NOTE: for user ah use rdma_destroy_ah_user with valid udata! 3680 */ 3681 static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags) 3682 { 3683 int ret = rdma_destroy_ah_user(ah, flags, NULL); 3684 3685 WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail"); 3686 } 3687 3688 struct ib_srq *ib_create_srq_user(struct ib_pd *pd, 3689 struct ib_srq_init_attr *srq_init_attr, 3690 struct ib_usrq_object *uobject, 3691 struct ib_udata *udata); 3692 static inline struct ib_srq * 3693 ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr) 3694 { 3695 if (!pd->device->ops.create_srq) 3696 return ERR_PTR(-EOPNOTSUPP); 3697 3698 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL); 3699 } 3700 3701 /** 3702 * ib_modify_srq - Modifies the attributes for the specified SRQ. 3703 * @srq: The SRQ to modify. 3704 * @srq_attr: On input, specifies the SRQ attributes to modify. On output, 3705 * the current values of selected SRQ attributes are returned. 3706 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ 3707 * are being modified. 3708 * 3709 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or 3710 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when 3711 * the number of receives queued drops below the limit. 3712 */ 3713 int ib_modify_srq(struct ib_srq *srq, 3714 struct ib_srq_attr *srq_attr, 3715 enum ib_srq_attr_mask srq_attr_mask); 3716 3717 /** 3718 * ib_query_srq - Returns the attribute list and current values for the 3719 * specified SRQ. 3720 * @srq: The SRQ to query. 3721 * @srq_attr: The attributes of the specified SRQ. 3722 */ 3723 int ib_query_srq(struct ib_srq *srq, 3724 struct ib_srq_attr *srq_attr); 3725 3726 /** 3727 * ib_destroy_srq_user - Destroys the specified SRQ. 3728 * @srq: The SRQ to destroy. 3729 * @udata: Valid user data or NULL for kernel objects 3730 */ 3731 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); 3732 3733 /** 3734 * ib_destroy_srq - Destroys the specified kernel SRQ. 3735 * @srq: The SRQ to destroy. 3736 * 3737 * NOTE: for user srq use ib_destroy_srq_user with valid udata! 3738 */ 3739 static inline void ib_destroy_srq(struct ib_srq *srq) 3740 { 3741 int ret = ib_destroy_srq_user(srq, NULL); 3742 3743 WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail"); 3744 } 3745 3746 /** 3747 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. 3748 * @srq: The SRQ to post the work request on. 3749 * @recv_wr: A list of work requests to post on the receive queue. 3750 * @bad_recv_wr: On an immediate failure, this parameter will reference 3751 * the work request that failed to be posted on the QP. 3752 */ 3753 static inline int ib_post_srq_recv(struct ib_srq *srq, 3754 const struct ib_recv_wr *recv_wr, 3755 const struct ib_recv_wr **bad_recv_wr) 3756 { 3757 const struct ib_recv_wr *dummy; 3758 3759 return srq->device->ops.post_srq_recv(srq, recv_wr, 3760 bad_recv_wr ? : &dummy); 3761 } 3762 3763 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd, 3764 struct ib_qp_init_attr *qp_init_attr, 3765 const char *caller); 3766 /** 3767 * ib_create_qp - Creates a kernel QP associated with the specific protection 3768 * domain. 3769 * @pd: The protection domain associated with the QP. 3770 * @init_attr: A list of initial attributes required to create the 3771 * QP. If QP creation succeeds, then the attributes are updated to 3772 * the actual capabilities of the created QP. 3773 */ 3774 static inline struct ib_qp *ib_create_qp(struct ib_pd *pd, 3775 struct ib_qp_init_attr *init_attr) 3776 { 3777 return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME); 3778 } 3779 3780 /** 3781 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. 3782 * @qp: The QP to modify. 3783 * @attr: On input, specifies the QP attributes to modify. On output, 3784 * the current values of selected QP attributes are returned. 3785 * @attr_mask: A bit-mask used to specify which attributes of the QP 3786 * are being modified. 3787 * @udata: pointer to user's input output buffer information 3788 * are being modified. 3789 * It returns 0 on success and returns appropriate error code on error. 3790 */ 3791 int ib_modify_qp_with_udata(struct ib_qp *qp, 3792 struct ib_qp_attr *attr, 3793 int attr_mask, 3794 struct ib_udata *udata); 3795 3796 /** 3797 * ib_modify_qp - Modifies the attributes for the specified QP and then 3798 * transitions the QP to the given state. 3799 * @qp: The QP to modify. 3800 * @qp_attr: On input, specifies the QP attributes to modify. On output, 3801 * the current values of selected QP attributes are returned. 3802 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP 3803 * are being modified. 3804 */ 3805 int ib_modify_qp(struct ib_qp *qp, 3806 struct ib_qp_attr *qp_attr, 3807 int qp_attr_mask); 3808 3809 /** 3810 * ib_query_qp - Returns the attribute list and current values for the 3811 * specified QP. 3812 * @qp: The QP to query. 3813 * @qp_attr: The attributes of the specified QP. 3814 * @qp_attr_mask: A bit-mask used to select specific attributes to query. 3815 * @qp_init_attr: Additional attributes of the selected QP. 3816 * 3817 * The qp_attr_mask may be used to limit the query to gathering only the 3818 * selected attributes. 3819 */ 3820 int ib_query_qp(struct ib_qp *qp, 3821 struct ib_qp_attr *qp_attr, 3822 int qp_attr_mask, 3823 struct ib_qp_init_attr *qp_init_attr); 3824 3825 /** 3826 * ib_destroy_qp - Destroys the specified QP. 3827 * @qp: The QP to destroy. 3828 * @udata: Valid udata or NULL for kernel objects 3829 */ 3830 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata); 3831 3832 /** 3833 * ib_destroy_qp - Destroys the specified kernel QP. 3834 * @qp: The QP to destroy. 3835 * 3836 * NOTE: for user qp use ib_destroy_qp_user with valid udata! 3837 */ 3838 static inline int ib_destroy_qp(struct ib_qp *qp) 3839 { 3840 return ib_destroy_qp_user(qp, NULL); 3841 } 3842 3843 /** 3844 * ib_open_qp - Obtain a reference to an existing sharable QP. 3845 * @xrcd - XRC domain 3846 * @qp_open_attr: Attributes identifying the QP to open. 3847 * 3848 * Returns a reference to a sharable QP. 3849 */ 3850 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, 3851 struct ib_qp_open_attr *qp_open_attr); 3852 3853 /** 3854 * ib_close_qp - Release an external reference to a QP. 3855 * @qp: The QP handle to release 3856 * 3857 * The opened QP handle is released by the caller. The underlying 3858 * shared QP is not destroyed until all internal references are released. 3859 */ 3860 int ib_close_qp(struct ib_qp *qp); 3861 3862 /** 3863 * ib_post_send - Posts a list of work requests to the send queue of 3864 * the specified QP. 3865 * @qp: The QP to post the work request on. 3866 * @send_wr: A list of work requests to post on the send queue. 3867 * @bad_send_wr: On an immediate failure, this parameter will reference 3868 * the work request that failed to be posted on the QP. 3869 * 3870 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate 3871 * error is returned, the QP state shall not be affected, 3872 * ib_post_send() will return an immediate error after queueing any 3873 * earlier work requests in the list. 3874 */ 3875 static inline int ib_post_send(struct ib_qp *qp, 3876 const struct ib_send_wr *send_wr, 3877 const struct ib_send_wr **bad_send_wr) 3878 { 3879 const struct ib_send_wr *dummy; 3880 3881 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy); 3882 } 3883 3884 /** 3885 * ib_post_recv - Posts a list of work requests to the receive queue of 3886 * the specified QP. 3887 * @qp: The QP to post the work request on. 3888 * @recv_wr: A list of work requests to post on the receive queue. 3889 * @bad_recv_wr: On an immediate failure, this parameter will reference 3890 * the work request that failed to be posted on the QP. 3891 */ 3892 static inline int ib_post_recv(struct ib_qp *qp, 3893 const struct ib_recv_wr *recv_wr, 3894 const struct ib_recv_wr **bad_recv_wr) 3895 { 3896 const struct ib_recv_wr *dummy; 3897 3898 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); 3899 } 3900 3901 struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, 3902 int comp_vector, enum ib_poll_context poll_ctx, 3903 const char *caller); 3904 static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, 3905 int nr_cqe, int comp_vector, 3906 enum ib_poll_context poll_ctx) 3907 { 3908 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, 3909 KBUILD_MODNAME); 3910 } 3911 3912 struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, 3913 int nr_cqe, enum ib_poll_context poll_ctx, 3914 const char *caller); 3915 3916 /** 3917 * ib_alloc_cq_any: Allocate kernel CQ 3918 * @dev: The IB device 3919 * @private: Private data attached to the CQE 3920 * @nr_cqe: Number of CQEs in the CQ 3921 * @poll_ctx: Context used for polling the CQ 3922 */ 3923 static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, 3924 void *private, int nr_cqe, 3925 enum ib_poll_context poll_ctx) 3926 { 3927 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx, 3928 KBUILD_MODNAME); 3929 } 3930 3931 void ib_free_cq(struct ib_cq *cq); 3932 int ib_process_cq_direct(struct ib_cq *cq, int budget); 3933 3934 /** 3935 * ib_create_cq - Creates a CQ on the specified device. 3936 * @device: The device on which to create the CQ. 3937 * @comp_handler: A user-specified callback that is invoked when a 3938 * completion event occurs on the CQ. 3939 * @event_handler: A user-specified callback that is invoked when an 3940 * asynchronous event not associated with a completion occurs on the CQ. 3941 * @cq_context: Context associated with the CQ returned to the user via 3942 * the associated completion and event handlers. 3943 * @cq_attr: The attributes the CQ should be created upon. 3944 * 3945 * Users can examine the cq structure to determine the actual CQ size. 3946 */ 3947 struct ib_cq *__ib_create_cq(struct ib_device *device, 3948 ib_comp_handler comp_handler, 3949 void (*event_handler)(struct ib_event *, void *), 3950 void *cq_context, 3951 const struct ib_cq_init_attr *cq_attr, 3952 const char *caller); 3953 #define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \ 3954 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME) 3955 3956 /** 3957 * ib_resize_cq - Modifies the capacity of the CQ. 3958 * @cq: The CQ to resize. 3959 * @cqe: The minimum size of the CQ. 3960 * 3961 * Users can examine the cq structure to determine the actual CQ size. 3962 */ 3963 int ib_resize_cq(struct ib_cq *cq, int cqe); 3964 3965 /** 3966 * rdma_set_cq_moderation - Modifies moderation params of the CQ 3967 * @cq: The CQ to modify. 3968 * @cq_count: number of CQEs that will trigger an event 3969 * @cq_period: max period of time in usec before triggering an event 3970 * 3971 */ 3972 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); 3973 3974 /** 3975 * ib_destroy_cq_user - Destroys the specified CQ. 3976 * @cq: The CQ to destroy. 3977 * @udata: Valid user data or NULL for kernel objects 3978 */ 3979 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); 3980 3981 /** 3982 * ib_destroy_cq - Destroys the specified kernel CQ. 3983 * @cq: The CQ to destroy. 3984 * 3985 * NOTE: for user cq use ib_destroy_cq_user with valid udata! 3986 */ 3987 static inline void ib_destroy_cq(struct ib_cq *cq) 3988 { 3989 int ret = ib_destroy_cq_user(cq, NULL); 3990 3991 WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); 3992 } 3993 3994 /** 3995 * ib_poll_cq - poll a CQ for completion(s) 3996 * @cq:the CQ being polled 3997 * @num_entries:maximum number of completions to return 3998 * @wc:array of at least @num_entries &struct ib_wc where completions 3999 * will be returned 4000 * 4001 * Poll a CQ for (possibly multiple) completions. If the return value 4002 * is < 0, an error occurred. If the return value is >= 0, it is the 4003 * number of completions returned. If the return value is 4004 * non-negative and < num_entries, then the CQ was emptied. 4005 */ 4006 static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, 4007 struct ib_wc *wc) 4008 { 4009 return cq->device->ops.poll_cq(cq, num_entries, wc); 4010 } 4011 4012 /** 4013 * ib_req_notify_cq - Request completion notification on a CQ. 4014 * @cq: The CQ to generate an event for. 4015 * @flags: 4016 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP 4017 * to request an event on the next solicited event or next work 4018 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS 4019 * may also be |ed in to request a hint about missed events, as 4020 * described below. 4021 * 4022 * Return Value: 4023 * < 0 means an error occurred while requesting notification 4024 * == 0 means notification was requested successfully, and if 4025 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events 4026 * were missed and it is safe to wait for another event. In 4027 * this case is it guaranteed that any work completions added 4028 * to the CQ since the last CQ poll will trigger a completion 4029 * notification event. 4030 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed 4031 * in. It means that the consumer must poll the CQ again to 4032 * make sure it is empty to avoid missing an event because of a 4033 * race between requesting notification and an entry being 4034 * added to the CQ. This return value means it is possible 4035 * (but not guaranteed) that a work completion has been added 4036 * to the CQ since the last poll without triggering a 4037 * completion notification event. 4038 */ 4039 static inline int ib_req_notify_cq(struct ib_cq *cq, 4040 enum ib_cq_notify_flags flags) 4041 { 4042 return cq->device->ops.req_notify_cq(cq, flags); 4043 } 4044 4045 struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, 4046 int comp_vector_hint, 4047 enum ib_poll_context poll_ctx); 4048 4049 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe); 4050 4051 /* 4052 * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to 4053 * NULL. This causes the ib_dma* helpers to just stash the kernel virtual 4054 * address into the dma address. 4055 */ 4056 static inline bool ib_uses_virt_dma(struct ib_device *dev) 4057 { 4058 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device; 4059 } 4060 4061 /* 4062 * Check if a IB device's underlying DMA mapping supports P2PDMA transfers. 4063 */ 4064 static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev) 4065 { 4066 if (ib_uses_virt_dma(dev)) 4067 return false; 4068 4069 return dma_pci_p2pdma_supported(dev->dma_device); 4070 } 4071 4072 /** 4073 * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer 4074 * @dma_addr: The DMA address 4075 * 4076 * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after 4077 * going through the dma_addr marshalling. 4078 */ 4079 static inline void *ib_virt_dma_to_ptr(u64 dma_addr) 4080 { 4081 /* virt_dma mode maps the kvs's directly into the dma addr */ 4082 return (void *)(uintptr_t)dma_addr; 4083 } 4084 4085 /** 4086 * ib_virt_dma_to_page - Convert a dma_addr to a struct page 4087 * @dma_addr: The DMA address 4088 * 4089 * Used by ib_uses_virt_dma() device to get back to the struct page after going 4090 * through the dma_addr marshalling. 4091 */ 4092 static inline struct page *ib_virt_dma_to_page(u64 dma_addr) 4093 { 4094 return virt_to_page(ib_virt_dma_to_ptr(dma_addr)); 4095 } 4096 4097 /** 4098 * ib_dma_mapping_error - check a DMA addr for error 4099 * @dev: The device for which the dma_addr was created 4100 * @dma_addr: The DMA address to check 4101 */ 4102 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 4103 { 4104 if (ib_uses_virt_dma(dev)) 4105 return 0; 4106 return dma_mapping_error(dev->dma_device, dma_addr); 4107 } 4108 4109 /** 4110 * ib_dma_map_single - Map a kernel virtual address to DMA address 4111 * @dev: The device for which the dma_addr is to be created 4112 * @cpu_addr: The kernel virtual address 4113 * @size: The size of the region in bytes 4114 * @direction: The direction of the DMA 4115 */ 4116 static inline u64 ib_dma_map_single(struct ib_device *dev, 4117 void *cpu_addr, size_t size, 4118 enum dma_data_direction direction) 4119 { 4120 if (ib_uses_virt_dma(dev)) 4121 return (uintptr_t)cpu_addr; 4122 return dma_map_single(dev->dma_device, cpu_addr, size, direction); 4123 } 4124 4125 /** 4126 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 4127 * @dev: The device for which the DMA address was created 4128 * @addr: The DMA address 4129 * @size: The size of the region in bytes 4130 * @direction: The direction of the DMA 4131 */ 4132 static inline void ib_dma_unmap_single(struct ib_device *dev, 4133 u64 addr, size_t size, 4134 enum dma_data_direction direction) 4135 { 4136 if (!ib_uses_virt_dma(dev)) 4137 dma_unmap_single(dev->dma_device, addr, size, direction); 4138 } 4139 4140 /** 4141 * ib_dma_map_page - Map a physical page to DMA address 4142 * @dev: The device for which the dma_addr is to be created 4143 * @page: The page to be mapped 4144 * @offset: The offset within the page 4145 * @size: The size of the region in bytes 4146 * @direction: The direction of the DMA 4147 */ 4148 static inline u64 ib_dma_map_page(struct ib_device *dev, 4149 struct page *page, 4150 unsigned long offset, 4151 size_t size, 4152 enum dma_data_direction direction) 4153 { 4154 if (ib_uses_virt_dma(dev)) 4155 return (uintptr_t)(page_address(page) + offset); 4156 return dma_map_page(dev->dma_device, page, offset, size, direction); 4157 } 4158 4159 /** 4160 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 4161 * @dev: The device for which the DMA address was created 4162 * @addr: The DMA address 4163 * @size: The size of the region in bytes 4164 * @direction: The direction of the DMA 4165 */ 4166 static inline void ib_dma_unmap_page(struct ib_device *dev, 4167 u64 addr, size_t size, 4168 enum dma_data_direction direction) 4169 { 4170 if (!ib_uses_virt_dma(dev)) 4171 dma_unmap_page(dev->dma_device, addr, size, direction); 4172 } 4173 4174 int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents); 4175 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 4176 struct scatterlist *sg, int nents, 4177 enum dma_data_direction direction, 4178 unsigned long dma_attrs) 4179 { 4180 if (ib_uses_virt_dma(dev)) 4181 return ib_dma_virt_map_sg(dev, sg, nents); 4182 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 4183 dma_attrs); 4184 } 4185 4186 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 4187 struct scatterlist *sg, int nents, 4188 enum dma_data_direction direction, 4189 unsigned long dma_attrs) 4190 { 4191 if (!ib_uses_virt_dma(dev)) 4192 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, 4193 dma_attrs); 4194 } 4195 4196 /** 4197 * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses 4198 * @dev: The device for which the DMA addresses are to be created 4199 * @sg: The sg_table object describing the buffer 4200 * @direction: The direction of the DMA 4201 * @attrs: Optional DMA attributes for the map operation 4202 */ 4203 static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev, 4204 struct sg_table *sgt, 4205 enum dma_data_direction direction, 4206 unsigned long dma_attrs) 4207 { 4208 int nents; 4209 4210 if (ib_uses_virt_dma(dev)) { 4211 nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents); 4212 if (!nents) 4213 return -EIO; 4214 sgt->nents = nents; 4215 return 0; 4216 } 4217 return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs); 4218 } 4219 4220 static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev, 4221 struct sg_table *sgt, 4222 enum dma_data_direction direction, 4223 unsigned long dma_attrs) 4224 { 4225 if (!ib_uses_virt_dma(dev)) 4226 dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs); 4227 } 4228 4229 /** 4230 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 4231 * @dev: The device for which the DMA addresses are to be created 4232 * @sg: The array of scatter/gather entries 4233 * @nents: The number of scatter/gather entries 4234 * @direction: The direction of the DMA 4235 */ 4236 static inline int ib_dma_map_sg(struct ib_device *dev, 4237 struct scatterlist *sg, int nents, 4238 enum dma_data_direction direction) 4239 { 4240 return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0); 4241 } 4242 4243 /** 4244 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 4245 * @dev: The device for which the DMA addresses were created 4246 * @sg: The array of scatter/gather entries 4247 * @nents: The number of scatter/gather entries 4248 * @direction: The direction of the DMA 4249 */ 4250 static inline void ib_dma_unmap_sg(struct ib_device *dev, 4251 struct scatterlist *sg, int nents, 4252 enum dma_data_direction direction) 4253 { 4254 ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0); 4255 } 4256 4257 /** 4258 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer 4259 * @dev: The device to query 4260 * 4261 * The returned value represents a size in bytes. 4262 */ 4263 static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev) 4264 { 4265 if (ib_uses_virt_dma(dev)) 4266 return UINT_MAX; 4267 return dma_get_max_seg_size(dev->dma_device); 4268 } 4269 4270 /** 4271 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 4272 * @dev: The device for which the DMA address was created 4273 * @addr: The DMA address 4274 * @size: The size of the region in bytes 4275 * @dir: The direction of the DMA 4276 */ 4277 static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 4278 u64 addr, 4279 size_t size, 4280 enum dma_data_direction dir) 4281 { 4282 if (!ib_uses_virt_dma(dev)) 4283 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 4284 } 4285 4286 /** 4287 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 4288 * @dev: The device for which the DMA address was created 4289 * @addr: The DMA address 4290 * @size: The size of the region in bytes 4291 * @dir: The direction of the DMA 4292 */ 4293 static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 4294 u64 addr, 4295 size_t size, 4296 enum dma_data_direction dir) 4297 { 4298 if (!ib_uses_virt_dma(dev)) 4299 dma_sync_single_for_device(dev->dma_device, addr, size, dir); 4300 } 4301 4302 /* ib_reg_user_mr - register a memory region for virtual addresses from kernel 4303 * space. This function should be called when 'current' is the owning MM. 4304 */ 4305 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 4306 u64 virt_addr, int mr_access_flags); 4307 4308 /* ib_advise_mr - give an advice about an address range in a memory region */ 4309 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, 4310 u32 flags, struct ib_sge *sg_list, u32 num_sge); 4311 /** 4312 * ib_dereg_mr_user - Deregisters a memory region and removes it from the 4313 * HCA translation table. 4314 * @mr: The memory region to deregister. 4315 * @udata: Valid user data or NULL for kernel object 4316 * 4317 * This function can fail, if the memory region has memory windows bound to it. 4318 */ 4319 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata); 4320 4321 /** 4322 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the 4323 * HCA translation table. 4324 * @mr: The memory region to deregister. 4325 * 4326 * This function can fail, if the memory region has memory windows bound to it. 4327 * 4328 * NOTE: for user mr use ib_dereg_mr_user with valid udata! 4329 */ 4330 static inline int ib_dereg_mr(struct ib_mr *mr) 4331 { 4332 return ib_dereg_mr_user(mr, NULL); 4333 } 4334 4335 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, 4336 u32 max_num_sg); 4337 4338 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd, 4339 u32 max_num_data_sg, 4340 u32 max_num_meta_sg); 4341 4342 /** 4343 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR 4344 * R_Key and L_Key. 4345 * @mr - struct ib_mr pointer to be updated. 4346 * @newkey - new key to be used. 4347 */ 4348 static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey) 4349 { 4350 mr->lkey = (mr->lkey & 0xffffff00) | newkey; 4351 mr->rkey = (mr->rkey & 0xffffff00) | newkey; 4352 } 4353 4354 /** 4355 * ib_inc_rkey - increments the key portion of the given rkey. Can be used 4356 * for calculating a new rkey for type 2 memory windows. 4357 * @rkey - the rkey to increment. 4358 */ 4359 static inline u32 ib_inc_rkey(u32 rkey) 4360 { 4361 const u32 mask = 0x000000ff; 4362 return ((rkey + 1) & mask) | (rkey & ~mask); 4363 } 4364 4365 /** 4366 * ib_attach_mcast - Attaches the specified QP to a multicast group. 4367 * @qp: QP to attach to the multicast group. The QP must be type 4368 * IB_QPT_UD. 4369 * @gid: Multicast group GID. 4370 * @lid: Multicast group LID in host byte order. 4371 * 4372 * In order to send and receive multicast packets, subnet 4373 * administration must have created the multicast group and configured 4374 * the fabric appropriately. The port associated with the specified 4375 * QP must also be a member of the multicast group. 4376 */ 4377 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 4378 4379 /** 4380 * ib_detach_mcast - Detaches the specified QP from a multicast group. 4381 * @qp: QP to detach from the multicast group. 4382 * @gid: Multicast group GID. 4383 * @lid: Multicast group LID in host byte order. 4384 */ 4385 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 4386 4387 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, 4388 struct inode *inode, struct ib_udata *udata); 4389 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata); 4390 4391 static inline int ib_check_mr_access(struct ib_device *ib_dev, 4392 unsigned int flags) 4393 { 4394 u64 device_cap = ib_dev->attrs.device_cap_flags; 4395 4396 /* 4397 * Local write permission is required if remote write or 4398 * remote atomic permission is also requested. 4399 */ 4400 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) && 4401 !(flags & IB_ACCESS_LOCAL_WRITE)) 4402 return -EINVAL; 4403 4404 if (flags & ~IB_ACCESS_SUPPORTED) 4405 return -EINVAL; 4406 4407 if (flags & IB_ACCESS_ON_DEMAND && 4408 !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING)) 4409 return -EOPNOTSUPP; 4410 4411 if ((flags & IB_ACCESS_FLUSH_GLOBAL && 4412 !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) || 4413 (flags & IB_ACCESS_FLUSH_PERSISTENT && 4414 !(device_cap & IB_DEVICE_FLUSH_PERSISTENT))) 4415 return -EOPNOTSUPP; 4416 4417 return 0; 4418 } 4419 4420 static inline bool ib_access_writable(int access_flags) 4421 { 4422 /* 4423 * We have writable memory backing the MR if any of the following 4424 * access flags are set. "Local write" and "remote write" obviously 4425 * require write access. "Remote atomic" can do things like fetch and 4426 * add, which will modify memory, and "MW bind" can change permissions 4427 * by binding a window. 4428 */ 4429 return access_flags & 4430 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | 4431 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND); 4432 } 4433 4434 /** 4435 * ib_check_mr_status: lightweight check of MR status. 4436 * This routine may provide status checks on a selected 4437 * ib_mr. first use is for signature status check. 4438 * 4439 * @mr: A memory region. 4440 * @check_mask: Bitmask of which checks to perform from 4441 * ib_mr_status_check enumeration. 4442 * @mr_status: The container of relevant status checks. 4443 * failed checks will be indicated in the status bitmask 4444 * and the relevant info shall be in the error item. 4445 */ 4446 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, 4447 struct ib_mr_status *mr_status); 4448 4449 /** 4450 * ib_device_try_get: Hold a registration lock 4451 * device: The device to lock 4452 * 4453 * A device under an active registration lock cannot become unregistered. It 4454 * is only possible to obtain a registration lock on a device that is fully 4455 * registered, otherwise this function returns false. 4456 * 4457 * The registration lock is only necessary for actions which require the 4458 * device to still be registered. Uses that only require the device pointer to 4459 * be valid should use get_device(&ibdev->dev) to hold the memory. 4460 * 4461 */ 4462 static inline bool ib_device_try_get(struct ib_device *dev) 4463 { 4464 return refcount_inc_not_zero(&dev->refcount); 4465 } 4466 4467 void ib_device_put(struct ib_device *device); 4468 struct ib_device *ib_device_get_by_netdev(struct net_device *ndev, 4469 enum rdma_driver_id driver_id); 4470 struct ib_device *ib_device_get_by_name(const char *name, 4471 enum rdma_driver_id driver_id); 4472 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port, 4473 u16 pkey, const union ib_gid *gid, 4474 const struct sockaddr *addr); 4475 int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, 4476 unsigned int port); 4477 struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, 4478 u32 port); 4479 int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev, 4480 u32 *port); 4481 4482 static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev) 4483 { 4484 return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ? 4485 IB_PORT_ACTIVE : IB_PORT_DOWN; 4486 } 4487 4488 void ib_dispatch_port_state_event(struct ib_device *ibdev, 4489 struct net_device *ndev); 4490 struct ib_wq *ib_create_wq(struct ib_pd *pd, 4491 struct ib_wq_init_attr *init_attr); 4492 int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); 4493 4494 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4495 unsigned int *sg_offset, unsigned int page_size); 4496 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg, 4497 int data_sg_nents, unsigned int *data_sg_offset, 4498 struct scatterlist *meta_sg, int meta_sg_nents, 4499 unsigned int *meta_sg_offset, unsigned int page_size); 4500 4501 static inline int 4502 ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, 4503 unsigned int *sg_offset, unsigned int page_size) 4504 { 4505 int n; 4506 4507 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size); 4508 mr->iova = 0; 4509 4510 return n; 4511 } 4512 4513 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents, 4514 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64)); 4515 4516 void ib_drain_rq(struct ib_qp *qp); 4517 void ib_drain_sq(struct ib_qp *qp); 4518 void ib_drain_qp(struct ib_qp *qp); 4519 4520 int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, 4521 u8 *width); 4522 4523 static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) 4524 { 4525 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE) 4526 return attr->roce.dmac; 4527 return NULL; 4528 } 4529 4530 static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid) 4531 { 4532 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4533 attr->ib.dlid = (u16)dlid; 4534 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4535 attr->opa.dlid = dlid; 4536 } 4537 4538 static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr) 4539 { 4540 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4541 return attr->ib.dlid; 4542 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4543 return attr->opa.dlid; 4544 return 0; 4545 } 4546 4547 static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl) 4548 { 4549 attr->sl = sl; 4550 } 4551 4552 static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr) 4553 { 4554 return attr->sl; 4555 } 4556 4557 static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr, 4558 u8 src_path_bits) 4559 { 4560 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4561 attr->ib.src_path_bits = src_path_bits; 4562 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4563 attr->opa.src_path_bits = src_path_bits; 4564 } 4565 4566 static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr) 4567 { 4568 if (attr->type == RDMA_AH_ATTR_TYPE_IB) 4569 return attr->ib.src_path_bits; 4570 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4571 return attr->opa.src_path_bits; 4572 return 0; 4573 } 4574 4575 static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr, 4576 bool make_grd) 4577 { 4578 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4579 attr->opa.make_grd = make_grd; 4580 } 4581 4582 static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr) 4583 { 4584 if (attr->type == RDMA_AH_ATTR_TYPE_OPA) 4585 return attr->opa.make_grd; 4586 return false; 4587 } 4588 4589 static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num) 4590 { 4591 attr->port_num = port_num; 4592 } 4593 4594 static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr) 4595 { 4596 return attr->port_num; 4597 } 4598 4599 static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr, 4600 u8 static_rate) 4601 { 4602 attr->static_rate = static_rate; 4603 } 4604 4605 static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr) 4606 { 4607 return attr->static_rate; 4608 } 4609 4610 static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr, 4611 enum ib_ah_flags flag) 4612 { 4613 attr->ah_flags = flag; 4614 } 4615 4616 static inline enum ib_ah_flags 4617 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr) 4618 { 4619 return attr->ah_flags; 4620 } 4621 4622 static inline const struct ib_global_route 4623 *rdma_ah_read_grh(const struct rdma_ah_attr *attr) 4624 { 4625 return &attr->grh; 4626 } 4627 4628 /*To retrieve and modify the grh */ 4629 static inline struct ib_global_route 4630 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr) 4631 { 4632 return &attr->grh; 4633 } 4634 4635 static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid) 4636 { 4637 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4638 4639 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid)); 4640 } 4641 4642 static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr, 4643 __be64 prefix) 4644 { 4645 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4646 4647 grh->dgid.global.subnet_prefix = prefix; 4648 } 4649 4650 static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr, 4651 __be64 if_id) 4652 { 4653 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4654 4655 grh->dgid.global.interface_id = if_id; 4656 } 4657 4658 static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr, 4659 union ib_gid *dgid, u32 flow_label, 4660 u8 sgid_index, u8 hop_limit, 4661 u8 traffic_class) 4662 { 4663 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr); 4664 4665 attr->ah_flags = IB_AH_GRH; 4666 if (dgid) 4667 grh->dgid = *dgid; 4668 grh->flow_label = flow_label; 4669 grh->sgid_index = sgid_index; 4670 grh->hop_limit = hop_limit; 4671 grh->traffic_class = traffic_class; 4672 grh->sgid_attr = NULL; 4673 } 4674 4675 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr); 4676 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid, 4677 u32 flow_label, u8 hop_limit, u8 traffic_class, 4678 const struct ib_gid_attr *sgid_attr); 4679 void rdma_copy_ah_attr(struct rdma_ah_attr *dest, 4680 const struct rdma_ah_attr *src); 4681 void rdma_replace_ah_attr(struct rdma_ah_attr *old, 4682 const struct rdma_ah_attr *new); 4683 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src); 4684 4685 /** 4686 * rdma_ah_find_type - Return address handle type. 4687 * 4688 * @dev: Device to be checked 4689 * @port_num: Port number 4690 */ 4691 static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, 4692 u32 port_num) 4693 { 4694 if (rdma_protocol_roce(dev, port_num)) 4695 return RDMA_AH_ATTR_TYPE_ROCE; 4696 if (rdma_protocol_ib(dev, port_num)) { 4697 if (rdma_cap_opa_ah(dev, port_num)) 4698 return RDMA_AH_ATTR_TYPE_OPA; 4699 return RDMA_AH_ATTR_TYPE_IB; 4700 } 4701 if (dev->type == RDMA_DEVICE_TYPE_SMI) 4702 return RDMA_AH_ATTR_TYPE_IB; 4703 4704 return RDMA_AH_ATTR_TYPE_UNDEFINED; 4705 } 4706 4707 /** 4708 * ib_lid_cpu16 - Return lid in 16bit CPU encoding. 4709 * In the current implementation the only way to 4710 * get the 32bit lid is from other sources for OPA. 4711 * For IB, lids will always be 16bits so cast the 4712 * value accordingly. 4713 * 4714 * @lid: A 32bit LID 4715 */ 4716 static inline u16 ib_lid_cpu16(u32 lid) 4717 { 4718 WARN_ON_ONCE(lid & 0xFFFF0000); 4719 return (u16)lid; 4720 } 4721 4722 /** 4723 * ib_lid_be16 - Return lid in 16bit BE encoding. 4724 * 4725 * @lid: A 32bit LID 4726 */ 4727 static inline __be16 ib_lid_be16(u32 lid) 4728 { 4729 WARN_ON_ONCE(lid & 0xFFFF0000); 4730 return cpu_to_be16((u16)lid); 4731 } 4732 4733 /** 4734 * ib_get_vector_affinity - Get the affinity mappings of a given completion 4735 * vector 4736 * @device: the rdma device 4737 * @comp_vector: index of completion vector 4738 * 4739 * Returns NULL on failure, otherwise a corresponding cpu map of the 4740 * completion vector (returns all-cpus map if the device driver doesn't 4741 * implement get_vector_affinity). 4742 */ 4743 static inline const struct cpumask * 4744 ib_get_vector_affinity(struct ib_device *device, int comp_vector) 4745 { 4746 if (comp_vector < 0 || comp_vector >= device->num_comp_vectors || 4747 !device->ops.get_vector_affinity) 4748 return NULL; 4749 4750 return device->ops.get_vector_affinity(device, comp_vector); 4751 4752 } 4753 4754 /** 4755 * rdma_roce_rescan_device - Rescan all of the network devices in the system 4756 * and add their gids, as needed, to the relevant RoCE devices. 4757 * 4758 * @device: the rdma device 4759 */ 4760 void rdma_roce_rescan_device(struct ib_device *ibdev); 4761 void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port); 4762 void roce_del_all_netdev_gids(struct ib_device *ib_dev, 4763 u32 port, struct net_device *ndev); 4764 4765 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); 4766 4767 int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); 4768 4769 struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, 4770 enum rdma_netdev_t type, const char *name, 4771 unsigned char name_assign_type, 4772 void (*setup)(struct net_device *)); 4773 4774 int rdma_init_netdev(struct ib_device *device, u32 port_num, 4775 enum rdma_netdev_t type, const char *name, 4776 unsigned char name_assign_type, 4777 void (*setup)(struct net_device *), 4778 struct net_device *netdev); 4779 4780 /** 4781 * rdma_device_to_ibdev - Get ib_device pointer from device pointer 4782 * 4783 * @device: device pointer for which ib_device pointer to retrieve 4784 * 4785 * rdma_device_to_ibdev() retrieves ib_device pointer from device. 4786 * 4787 */ 4788 static inline struct ib_device *rdma_device_to_ibdev(struct device *device) 4789 { 4790 struct ib_core_device *coredev = 4791 container_of(device, struct ib_core_device, dev); 4792 4793 return coredev->owner; 4794 } 4795 4796 /** 4797 * ibdev_to_node - return the NUMA node for a given ib_device 4798 * @dev: device to get the NUMA node for. 4799 */ 4800 static inline int ibdev_to_node(struct ib_device *ibdev) 4801 { 4802 struct device *parent = ibdev->dev.parent; 4803 4804 if (!parent) 4805 return NUMA_NO_NODE; 4806 return dev_to_node(parent); 4807 } 4808 4809 /** 4810 * rdma_device_to_drv_device - Helper macro to reach back to driver's 4811 * ib_device holder structure from device pointer. 4812 * 4813 * NOTE: New drivers should not make use of this API; This API is only for 4814 * existing drivers who have exposed sysfs entries using 4815 * ops->device_group. 4816 */ 4817 #define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \ 4818 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member) 4819 4820 bool rdma_dev_access_netns(const struct ib_device *device, 4821 const struct net *net); 4822 4823 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) 4824 #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) 4825 #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) 4826 4827 /** 4828 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based 4829 * on the flow_label 4830 * 4831 * This function will convert the 20 bit flow_label input to a valid RoCE v2 4832 * UDP src port 14 bit value. All RoCE V2 drivers should use this same 4833 * convention. 4834 */ 4835 static inline u16 rdma_flow_label_to_udp_sport(u32 fl) 4836 { 4837 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000; 4838 4839 fl_low ^= fl_high >> 14; 4840 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN); 4841 } 4842 4843 /** 4844 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on 4845 * local and remote qpn values 4846 * 4847 * This function folded the multiplication results of two qpns, 24 bit each, 4848 * fields, and converts it to a 20 bit results. 4849 * 4850 * This function will create symmetric flow_label value based on the local 4851 * and remote qpn values. this will allow both the requester and responder 4852 * to calculate the same flow_label for a given connection. 4853 * 4854 * This helper function should be used by driver in case the upper layer 4855 * provide a zero flow_label value. This is to improve entropy of RDMA 4856 * traffic in the network. 4857 */ 4858 static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn) 4859 { 4860 u64 v = (u64)lqpn * rqpn; 4861 4862 v ^= v >> 20; 4863 v ^= v >> 40; 4864 4865 return (u32)(v & IB_GRH_FLOWLABEL_MASK); 4866 } 4867 4868 /** 4869 * rdma_get_udp_sport - Calculate and set UDP source port based on the flow 4870 * label. If flow label is not defined in GRH then 4871 * calculate it based on lqpn/rqpn. 4872 * 4873 * @fl: flow label from GRH 4874 * @lqpn: local qp number 4875 * @rqpn: remote qp number 4876 */ 4877 static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) 4878 { 4879 if (!fl) 4880 fl = rdma_calc_flow_label(lqpn, rqpn); 4881 4882 return rdma_flow_label_to_udp_sport(fl); 4883 } 4884 4885 const struct ib_port_immutable* 4886 ib_port_immutable_read(struct ib_device *dev, unsigned int port); 4887 4888 /** ib_add_sub_device - Add a sub IB device on an existing one 4889 * 4890 * @parent: The IB device that needs to add a sub device 4891 * @type: The type of the new sub device 4892 * @name: The name of the new sub device 4893 * 4894 * 4895 * Return 0 on success, an error code otherwise 4896 */ 4897 int ib_add_sub_device(struct ib_device *parent, 4898 enum rdma_nl_dev_type type, 4899 const char *name); 4900 4901 4902 /** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get' 4903 * 4904 * @sub: The sub device that is going to be deleted 4905 * 4906 * Return 0 on success, an error code otherwise 4907 */ 4908 int ib_del_sub_device_and_put(struct ib_device *sub); 4909 4910 static inline void ib_mark_name_assigned_by_user(struct ib_device *ibdev) 4911 { 4912 ibdev->name_assign_type = RDMA_NAME_ASSIGN_TYPE_USER; 4913 } 4914 4915 #endif /* IB_VERBS_H */ 4916