1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (c) 2022 Microsoft Corporation. All rights reserved. 4 */ 5 6 #ifndef _MANA_IB_H_ 7 #define _MANA_IB_H_ 8 9 #include <rdma/ib_verbs.h> 10 #include <rdma/ib_mad.h> 11 #include <rdma/ib_umem.h> 12 #include <rdma/mana-abi.h> 13 #include <rdma/uverbs_ioctl.h> 14 #include <linux/dmapool.h> 15 16 #include <net/mana/mana.h> 17 #include "shadow_queue.h" 18 #include "counters.h" 19 20 #define PAGE_SZ_BM \ 21 (SZ_4K | SZ_8K | SZ_16K | SZ_32K | SZ_64K | SZ_128K | SZ_256K | \ 22 SZ_512K | SZ_1M | SZ_2M) 23 24 /* MANA doesn't have any limit for MR size */ 25 #define MANA_IB_MAX_MR_SIZE U64_MAX 26 27 /* Send queue ID mask */ 28 #define MANA_SENDQ_MASK BIT(31) 29 30 /* 31 * The hardware limit of number of MRs is greater than maximum number of MRs 32 * that can possibly represent in 24 bits 33 */ 34 #define MANA_IB_MAX_MR 0xFFFFFFu 35 36 /* 37 * The CA timeout is approx. 260ms (4us * 2^(DELAY)) 38 */ 39 #define MANA_CA_ACK_DELAY 16 40 41 /* 42 * The buffer used for writing AV 43 */ 44 #define MANA_AV_BUFFER_SIZE 64 45 46 struct mana_ib_adapter_caps { 47 u32 max_sq_id; 48 u32 max_rq_id; 49 u32 max_cq_id; 50 u32 max_qp_count; 51 u32 max_cq_count; 52 u32 max_mr_count; 53 u32 max_pd_count; 54 u32 max_inbound_read_limit; 55 u32 max_outbound_read_limit; 56 u32 mw_count; 57 u32 max_srq_count; 58 u32 max_qp_wr; 59 u32 max_send_sge_count; 60 u32 max_recv_sge_count; 61 u32 max_inline_data_size; 62 u64 feature_flags; 63 u64 page_size_cap; 64 }; 65 66 struct mana_ib_queue { 67 struct ib_umem *umem; 68 struct gdma_queue *kmem; 69 u64 gdma_region; 70 u64 id; 71 }; 72 73 struct mana_ib_dev { 74 struct ib_device ib_dev; 75 struct gdma_dev *gdma_dev; 76 mana_handle_t adapter_handle; 77 struct gdma_queue *fatal_err_eq; 78 struct gdma_queue **eqs; 79 struct xarray qp_table_wq; 80 struct mana_ib_adapter_caps adapter_caps; 81 struct dma_pool *av_pool; 82 netdevice_tracker dev_tracker; 83 struct notifier_block nb; 84 }; 85 86 struct mana_ib_wq { 87 struct ib_wq ibwq; 88 struct mana_ib_queue queue; 89 int wqe; 90 u32 wq_buf_size; 91 mana_handle_t rx_object; 92 }; 93 94 struct mana_ib_pd { 95 struct ib_pd ibpd; 96 u32 pdn; 97 mana_handle_t pd_handle; 98 99 /* Mutex for sharing access to vport_use_count */ 100 struct mutex vport_mutex; 101 int vport_use_count; 102 103 bool tx_shortform_allowed; 104 u32 tx_vp_offset; 105 }; 106 107 struct mana_ib_av { 108 u8 dest_ip[16]; 109 u8 dest_mac[ETH_ALEN]; 110 u16 udp_src_port; 111 u8 src_ip[16]; 112 u32 hop_limit : 8; 113 u32 reserved1 : 12; 114 u32 dscp : 6; 115 u32 reserved2 : 5; 116 u32 is_ipv6 : 1; 117 u32 reserved3 : 32; 118 }; 119 120 struct mana_ib_ah { 121 struct ib_ah ibah; 122 struct mana_ib_av *av; 123 dma_addr_t dma_handle; 124 }; 125 126 struct mana_ib_mr { 127 struct ib_mr ibmr; 128 struct ib_umem *umem; 129 mana_handle_t mr_handle; 130 }; 131 132 struct mana_ib_cq { 133 struct ib_cq ibcq; 134 struct mana_ib_queue queue; 135 /* protects CQ polling */ 136 spinlock_t cq_lock; 137 struct list_head list_send_qp; 138 struct list_head list_recv_qp; 139 int cqe; 140 u32 comp_vector; 141 mana_handle_t cq_handle; 142 }; 143 144 enum mana_rc_queue_type { 145 MANA_RC_SEND_QUEUE_REQUESTER = 0, 146 MANA_RC_SEND_QUEUE_RESPONDER, 147 MANA_RC_SEND_QUEUE_FMR, 148 MANA_RC_RECV_QUEUE_REQUESTER, 149 MANA_RC_RECV_QUEUE_RESPONDER, 150 MANA_RC_QUEUE_TYPE_MAX, 151 }; 152 153 struct mana_ib_rc_qp { 154 struct mana_ib_queue queues[MANA_RC_QUEUE_TYPE_MAX]; 155 }; 156 157 enum mana_ud_queue_type { 158 MANA_UD_SEND_QUEUE = 0, 159 MANA_UD_RECV_QUEUE, 160 MANA_UD_QUEUE_TYPE_MAX, 161 }; 162 163 struct mana_ib_ud_qp { 164 struct mana_ib_queue queues[MANA_UD_QUEUE_TYPE_MAX]; 165 u32 sq_psn; 166 }; 167 168 struct mana_ib_qp { 169 struct ib_qp ibqp; 170 171 mana_handle_t qp_handle; 172 union { 173 struct mana_ib_queue raw_sq; 174 struct mana_ib_rc_qp rc_qp; 175 struct mana_ib_ud_qp ud_qp; 176 }; 177 178 /* The port on the IB device, starting with 1 */ 179 u32 port; 180 181 struct list_head cq_send_list; 182 struct list_head cq_recv_list; 183 struct shadow_queue shadow_rq; 184 struct shadow_queue shadow_sq; 185 186 refcount_t refcount; 187 struct completion free; 188 }; 189 190 struct mana_ib_ucontext { 191 struct ib_ucontext ibucontext; 192 u32 doorbell; 193 }; 194 195 struct mana_ib_rwq_ind_table { 196 struct ib_rwq_ind_table ib_ind_table; 197 }; 198 199 enum mana_ib_command_code { 200 MANA_IB_GET_ADAPTER_CAP = 0x30001, 201 MANA_IB_CREATE_ADAPTER = 0x30002, 202 MANA_IB_DESTROY_ADAPTER = 0x30003, 203 MANA_IB_CONFIG_IP_ADDR = 0x30004, 204 MANA_IB_CONFIG_MAC_ADDR = 0x30005, 205 MANA_IB_CREATE_UD_QP = 0x30006, 206 MANA_IB_DESTROY_UD_QP = 0x30007, 207 MANA_IB_CREATE_CQ = 0x30008, 208 MANA_IB_DESTROY_CQ = 0x30009, 209 MANA_IB_CREATE_RC_QP = 0x3000a, 210 MANA_IB_DESTROY_RC_QP = 0x3000b, 211 MANA_IB_SET_QP_STATE = 0x3000d, 212 MANA_IB_QUERY_VF_COUNTERS = 0x30022, 213 MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023, 214 }; 215 216 struct mana_ib_query_adapter_caps_req { 217 struct gdma_req_hdr hdr; 218 }; /*HW Data */ 219 220 enum mana_ib_adapter_features { 221 MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4), 222 MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5), 223 }; 224 225 struct mana_ib_query_adapter_caps_resp { 226 struct gdma_resp_hdr hdr; 227 u32 max_sq_id; 228 u32 max_rq_id; 229 u32 max_cq_id; 230 u32 max_qp_count; 231 u32 max_cq_count; 232 u32 max_mr_count; 233 u32 max_pd_count; 234 u32 max_inbound_read_limit; 235 u32 max_outbound_read_limit; 236 u32 mw_count; 237 u32 max_srq_count; 238 u32 max_requester_sq_size; 239 u32 max_responder_sq_size; 240 u32 max_requester_rq_size; 241 u32 max_responder_rq_size; 242 u32 max_send_sge_count; 243 u32 max_recv_sge_count; 244 u32 max_inline_data_size; 245 u64 feature_flags; 246 }; /* HW Data */ 247 248 enum mana_ib_adapter_features_request { 249 MANA_IB_FEATURE_CLIENT_ERROR_CQE_REQUEST = BIT(1), 250 }; /*HW Data */ 251 252 struct mana_rnic_create_adapter_req { 253 struct gdma_req_hdr hdr; 254 u32 notify_eq_id; 255 u32 reserved; 256 u64 feature_flags; 257 }; /*HW Data */ 258 259 struct mana_rnic_create_adapter_resp { 260 struct gdma_resp_hdr hdr; 261 mana_handle_t adapter; 262 }; /* HW Data */ 263 264 struct mana_rnic_destroy_adapter_req { 265 struct gdma_req_hdr hdr; 266 mana_handle_t adapter; 267 }; /*HW Data */ 268 269 struct mana_rnic_destroy_adapter_resp { 270 struct gdma_resp_hdr hdr; 271 }; /* HW Data */ 272 273 enum mana_ib_addr_op { 274 ADDR_OP_ADD = 1, 275 ADDR_OP_REMOVE = 2, 276 }; 277 278 enum sgid_entry_type { 279 SGID_TYPE_IPV4 = 1, 280 SGID_TYPE_IPV6 = 2, 281 }; 282 283 struct mana_rnic_config_addr_req { 284 struct gdma_req_hdr hdr; 285 mana_handle_t adapter; 286 enum mana_ib_addr_op op; 287 enum sgid_entry_type sgid_type; 288 u8 ip_addr[16]; 289 }; /* HW Data */ 290 291 struct mana_rnic_config_addr_resp { 292 struct gdma_resp_hdr hdr; 293 }; /* HW Data */ 294 295 struct mana_rnic_config_mac_addr_req { 296 struct gdma_req_hdr hdr; 297 mana_handle_t adapter; 298 enum mana_ib_addr_op op; 299 u8 mac_addr[ETH_ALEN]; 300 u8 reserved[6]; 301 }; /* HW Data */ 302 303 struct mana_rnic_config_mac_addr_resp { 304 struct gdma_resp_hdr hdr; 305 }; /* HW Data */ 306 307 struct mana_rnic_create_cq_req { 308 struct gdma_req_hdr hdr; 309 mana_handle_t adapter; 310 u64 gdma_region; 311 u32 eq_id; 312 u32 doorbell_page; 313 }; /* HW Data */ 314 315 struct mana_rnic_create_cq_resp { 316 struct gdma_resp_hdr hdr; 317 mana_handle_t cq_handle; 318 u32 cq_id; 319 u32 reserved; 320 }; /* HW Data */ 321 322 struct mana_rnic_destroy_cq_req { 323 struct gdma_req_hdr hdr; 324 mana_handle_t adapter; 325 mana_handle_t cq_handle; 326 }; /* HW Data */ 327 328 struct mana_rnic_destroy_cq_resp { 329 struct gdma_resp_hdr hdr; 330 }; /* HW Data */ 331 332 enum mana_rnic_create_rc_flags { 333 MANA_RC_FLAG_NO_FMR = 2, 334 }; 335 336 struct mana_rnic_create_qp_req { 337 struct gdma_req_hdr hdr; 338 mana_handle_t adapter; 339 mana_handle_t pd_handle; 340 mana_handle_t send_cq_handle; 341 mana_handle_t recv_cq_handle; 342 u64 dma_region[MANA_RC_QUEUE_TYPE_MAX]; 343 u64 deprecated[2]; 344 u64 flags; 345 u32 doorbell_page; 346 u32 max_send_wr; 347 u32 max_recv_wr; 348 u32 max_send_sge; 349 u32 max_recv_sge; 350 u32 reserved; 351 }; /* HW Data */ 352 353 struct mana_rnic_create_qp_resp { 354 struct gdma_resp_hdr hdr; 355 mana_handle_t rc_qp_handle; 356 u32 queue_ids[MANA_RC_QUEUE_TYPE_MAX]; 357 u32 reserved; 358 }; /* HW Data*/ 359 360 struct mana_rnic_destroy_rc_qp_req { 361 struct gdma_req_hdr hdr; 362 mana_handle_t adapter; 363 mana_handle_t rc_qp_handle; 364 }; /* HW Data */ 365 366 struct mana_rnic_destroy_rc_qp_resp { 367 struct gdma_resp_hdr hdr; 368 }; /* HW Data */ 369 370 struct mana_rnic_create_udqp_req { 371 struct gdma_req_hdr hdr; 372 mana_handle_t adapter; 373 mana_handle_t pd_handle; 374 mana_handle_t send_cq_handle; 375 mana_handle_t recv_cq_handle; 376 u64 dma_region[MANA_UD_QUEUE_TYPE_MAX]; 377 u32 qp_type; 378 u32 doorbell_page; 379 u32 max_send_wr; 380 u32 max_recv_wr; 381 u32 max_send_sge; 382 u32 max_recv_sge; 383 }; /* HW Data */ 384 385 struct mana_rnic_create_udqp_resp { 386 struct gdma_resp_hdr hdr; 387 mana_handle_t qp_handle; 388 u32 queue_ids[MANA_UD_QUEUE_TYPE_MAX]; 389 }; /* HW Data*/ 390 391 struct mana_rnic_destroy_udqp_req { 392 struct gdma_req_hdr hdr; 393 mana_handle_t adapter; 394 mana_handle_t qp_handle; 395 }; /* HW Data */ 396 397 struct mana_rnic_destroy_udqp_resp { 398 struct gdma_resp_hdr hdr; 399 }; /* HW Data */ 400 401 struct mana_ib_ah_attr { 402 u8 src_addr[16]; 403 u8 dest_addr[16]; 404 u8 src_mac[ETH_ALEN]; 405 u8 dest_mac[ETH_ALEN]; 406 u8 src_addr_type; 407 u8 dest_addr_type; 408 u8 hop_limit; 409 u8 traffic_class; 410 u16 src_port; 411 u16 dest_port; 412 u32 reserved; 413 }; 414 415 struct mana_rnic_set_qp_state_req { 416 struct gdma_req_hdr hdr; 417 mana_handle_t adapter; 418 mana_handle_t qp_handle; 419 u64 attr_mask; 420 u32 qp_state; 421 u32 path_mtu; 422 u32 rq_psn; 423 u32 sq_psn; 424 u32 dest_qpn; 425 u32 max_dest_rd_atomic; 426 u32 retry_cnt; 427 u32 rnr_retry; 428 u32 min_rnr_timer; 429 u32 reserved; 430 struct mana_ib_ah_attr ah_attr; 431 }; /* HW Data */ 432 433 struct mana_rnic_set_qp_state_resp { 434 struct gdma_resp_hdr hdr; 435 }; /* HW Data */ 436 437 enum WQE_OPCODE_TYPES { 438 WQE_TYPE_UD_SEND = 0, 439 WQE_TYPE_UD_RECV = 8, 440 }; /* HW DATA */ 441 442 struct rdma_send_oob { 443 u32 wqe_type : 5; 444 u32 fence : 1; 445 u32 signaled : 1; 446 u32 solicited : 1; 447 u32 psn : 24; 448 449 u32 ssn_or_rqpn : 24; 450 u32 reserved1 : 8; 451 union { 452 struct { 453 u32 remote_qkey; 454 u32 immediate; 455 u32 reserved1; 456 u32 reserved2; 457 } ud_send; 458 }; 459 }; /* HW DATA */ 460 461 struct mana_rdma_cqe { 462 union { 463 struct { 464 u8 cqe_type; 465 u8 data[GDMA_COMP_DATA_SIZE - 1]; 466 }; 467 struct { 468 u32 cqe_type : 8; 469 u32 vendor_error : 9; 470 u32 reserved1 : 15; 471 u32 sge_offset : 5; 472 u32 tx_wqe_offset : 27; 473 } ud_send; 474 struct { 475 u32 cqe_type : 8; 476 u32 reserved1 : 24; 477 u32 msg_len; 478 u32 src_qpn : 24; 479 u32 reserved2 : 8; 480 u32 imm_data; 481 u32 rx_wqe_offset; 482 } ud_recv; 483 }; 484 }; /* HW DATA */ 485 486 struct mana_rnic_query_vf_cntrs_req { 487 struct gdma_req_hdr hdr; 488 mana_handle_t adapter; 489 }; /* HW Data */ 490 491 struct mana_rnic_query_vf_cntrs_resp { 492 struct gdma_resp_hdr hdr; 493 u64 requester_timeout; 494 u64 requester_oos_nak; 495 u64 requester_rnr_nak; 496 u64 responder_rnr_nak; 497 u64 responder_oos; 498 u64 responder_dup_request; 499 u64 requester_implicit_nak; 500 u64 requester_readresp_psn_mismatch; 501 u64 nak_inv_req; 502 u64 nak_access_err; 503 u64 nak_opp_err; 504 u64 nak_inv_read; 505 u64 responder_local_len_err; 506 u64 requestor_local_prot_err; 507 u64 responder_rem_access_err; 508 u64 responder_local_qp_err; 509 u64 responder_malformed_wqe; 510 u64 general_hw_err; 511 u64 requester_rnr_nak_retries_exceeded; 512 u64 requester_retries_exceeded; 513 u64 total_fatal_err; 514 u64 received_cnps; 515 u64 num_qps_congested; 516 u64 rate_inc_events; 517 u64 num_qps_recovered; 518 u64 current_rate; 519 u64 dup_rx_req; 520 u64 tx_bytes; 521 u64 rx_bytes; 522 u64 rx_send_req; 523 u64 rx_write_req; 524 u64 rx_read_req; 525 u64 tx_pkt; 526 u64 rx_pkt; 527 }; /* HW Data */ 528 529 struct mana_rnic_query_device_cntrs_req { 530 struct gdma_req_hdr hdr; 531 mana_handle_t adapter; 532 }; /* HW Data */ 533 534 struct mana_rnic_query_device_cntrs_resp { 535 struct gdma_resp_hdr hdr; 536 u32 sent_cnps; 537 u32 received_ecns; 538 u32 reserved1; 539 u32 received_cnp_count; 540 u32 qp_congested_events; 541 u32 qp_recovered_events; 542 u32 rate_inc_events; 543 u32 reserved2; 544 }; /* HW Data */ 545 546 static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev) 547 { 548 return mdev->gdma_dev->gdma_context; 549 } 550 551 static inline struct mana_ib_qp *mana_get_qp_ref(struct mana_ib_dev *mdev, 552 u32 qid, bool is_sq) 553 { 554 struct mana_ib_qp *qp; 555 unsigned long flag; 556 557 if (is_sq) 558 qid |= MANA_SENDQ_MASK; 559 560 xa_lock_irqsave(&mdev->qp_table_wq, flag); 561 qp = xa_load(&mdev->qp_table_wq, qid); 562 if (qp) 563 refcount_inc(&qp->refcount); 564 xa_unlock_irqrestore(&mdev->qp_table_wq, flag); 565 return qp; 566 } 567 568 static inline void mana_put_qp_ref(struct mana_ib_qp *qp) 569 { 570 if (refcount_dec_and_test(&qp->refcount)) 571 complete(&qp->free); 572 } 573 574 static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev) 575 { 576 return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB; 577 } 578 579 static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port) 580 { 581 struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); 582 struct gdma_context *gc = mdev_to_gc(mdev); 583 struct mana_context *mc = gc->mana.driver_data; 584 585 if (port < 1 || port > mc->num_ports) 586 return NULL; 587 return mc->ports[port - 1]; 588 } 589 590 static inline void copy_in_reverse(u8 *dst, const u8 *src, u32 size) 591 { 592 u32 i; 593 594 for (i = 0; i < size; i++) 595 dst[size - 1 - i] = src[i]; 596 } 597 598 int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq); 599 void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq); 600 601 int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, 602 mana_handle_t *gdma_region); 603 604 int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, 605 mana_handle_t *gdma_region, u64 virt); 606 607 int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, 608 mana_handle_t gdma_region); 609 610 int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type, 611 struct mana_ib_queue *queue); 612 int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, 613 struct mana_ib_queue *queue); 614 void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue); 615 616 struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, 617 struct ib_wq_init_attr *init_attr, 618 struct ib_udata *udata); 619 620 int mana_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 621 u32 wq_attr_mask, struct ib_udata *udata); 622 623 int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata); 624 625 int mana_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, 626 struct ib_rwq_ind_table_init_attr *init_attr, 627 struct ib_udata *udata); 628 629 int mana_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl); 630 631 struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags); 632 633 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 634 u64 iova, int access_flags, 635 struct ib_dmah *dmah, 636 struct ib_udata *udata); 637 638 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); 639 640 int mana_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr, 641 struct ib_udata *udata); 642 643 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 644 int attr_mask, struct ib_udata *udata); 645 646 int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); 647 648 int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port_id, 649 struct mana_ib_pd *pd, u32 doorbell_id); 650 void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd, 651 u32 port); 652 653 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, 654 struct uverbs_attr_bundle *attrs); 655 656 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); 657 658 int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 659 int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); 660 661 int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext, 662 struct ib_udata *udata); 663 void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); 664 665 int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma); 666 667 int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, 668 struct ib_port_immutable *immutable); 669 int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, 670 struct ib_udata *uhw); 671 int mana_ib_query_port(struct ib_device *ibdev, u32 port, 672 struct ib_port_attr *props); 673 int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index, 674 union ib_gid *gid); 675 676 void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext); 677 678 int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev); 679 int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev); 680 681 int mana_ib_create_eqs(struct mana_ib_dev *mdev); 682 683 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev); 684 685 int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev); 686 687 int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev); 688 689 int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); 690 691 enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num); 692 693 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context); 694 695 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context); 696 697 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac); 698 699 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell); 700 701 int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq); 702 703 int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp, 704 struct ib_qp_init_attr *attr, u32 doorbell, u64 flags); 705 int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp); 706 707 int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp, 708 struct ib_qp_init_attr *attr, u32 doorbell, u32 type); 709 int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp); 710 711 int mana_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, 712 struct ib_udata *udata); 713 int mana_ib_destroy_ah(struct ib_ah *ah, u32 flags); 714 715 int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 716 const struct ib_recv_wr **bad_wr); 717 int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 718 const struct ib_send_wr **bad_wr); 719 720 int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); 721 int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 722 723 struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length, 724 u64 iova, int fd, int mr_access_flags, 725 struct ib_dmah *dmah, 726 struct uverbs_attr_bundle *attrs); 727 #endif 728