1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Microsoft Corp. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 * 32 */ 33 34 #ifndef _GDMA_H 35 #define _GDMA_H 36 37 #include <sys/bus.h> 38 #include <sys/bus_dma.h> 39 #include <sys/types.h> 40 #include <sys/limits.h> 41 #include <sys/sx.h> 42 43 #include "gdma_util.h" 44 #include "shm_channel.h" 45 46 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 47 * them are naturally aligned and hence don't need __packed. 48 */ 49 50 #define GDMA_BAR0 0 51 52 #define GDMA_IRQNAME_SZ 40 53 54 struct gdma_bus { 55 bus_space_handle_t bar0_h; 56 bus_space_tag_t bar0_t; 57 }; 58 59 struct gdma_msix_entry { 60 int entry; 61 int vector; 62 }; 63 64 enum gdma_request_type { 65 GDMA_VERIFY_VF_DRIVER_VERSION = 1, 66 GDMA_QUERY_MAX_RESOURCES = 2, 67 GDMA_LIST_DEVICES = 3, 68 GDMA_REGISTER_DEVICE = 4, 69 GDMA_DEREGISTER_DEVICE = 5, 70 GDMA_GENERATE_TEST_EQE = 10, 71 GDMA_CREATE_QUEUE = 12, 72 GDMA_DISABLE_QUEUE = 13, 73 GDMA_CREATE_DMA_REGION = 25, 74 GDMA_DMA_REGION_ADD_PAGES = 26, 75 GDMA_DESTROY_DMA_REGION = 27, 76 }; 77 78 enum gdma_queue_type { 79 GDMA_INVALID_QUEUE, 80 GDMA_SQ, 81 GDMA_RQ, 82 GDMA_CQ, 83 GDMA_EQ, 84 }; 85 86 enum gdma_work_request_flags { 87 GDMA_WR_NONE = 0, 88 GDMA_WR_OOB_IN_SGL = BIT(0), 89 GDMA_WR_PAD_BY_SGE0 = BIT(1), 90 }; 91 92 enum gdma_eqe_type { 93 GDMA_EQE_COMPLETION = 3, 94 GDMA_EQE_TEST_EVENT = 64, 95 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, 96 GDMA_EQE_HWC_INIT_DATA = 130, 97 GDMA_EQE_HWC_INIT_DONE = 131, 98 }; 99 100 enum { 101 GDMA_DEVICE_NONE = 0, 102 GDMA_DEVICE_HWC = 1, 103 GDMA_DEVICE_MANA = 2, 104 }; 105 106 107 struct gdma_resource { 108 /* Protect the bitmap */ 109 struct mtx lock_spin; 110 111 /* The bitmap size in bits. */ 112 uint32_t size; 113 114 /* The bitmap tracks the resources. */ 115 unsigned long *map; 116 }; 117 118 union gdma_doorbell_entry { 119 uint64_t as_uint64; 120 121 struct { 122 uint64_t id : 24; 123 uint64_t reserved : 8; 124 uint64_t tail_ptr : 31; 125 uint64_t arm : 1; 126 } cq; 127 128 struct { 129 uint64_t id : 24; 130 uint64_t wqe_cnt : 8; 131 uint64_t tail_ptr : 32; 132 } rq; 133 134 struct { 135 uint64_t id : 24; 136 uint64_t reserved : 8; 137 uint64_t tail_ptr : 32; 138 } sq; 139 140 struct { 141 uint64_t id : 16; 142 uint64_t reserved : 16; 143 uint64_t tail_ptr : 31; 144 uint64_t arm : 1; 145 } eq; 146 }; /* HW DATA */ 147 148 struct gdma_msg_hdr { 149 uint32_t hdr_type; 150 uint32_t msg_type; 151 uint16_t msg_version; 152 uint16_t hwc_msg_id; 153 uint32_t msg_size; 154 }; /* HW DATA */ 155 156 struct gdma_dev_id { 157 union { 158 struct { 159 uint16_t type; 160 uint16_t instance; 161 }; 162 163 uint32_t as_uint32; 164 }; 165 }; /* HW DATA */ 166 167 struct gdma_req_hdr { 168 struct gdma_msg_hdr req; 169 struct gdma_msg_hdr resp; /* The expected response */ 170 struct gdma_dev_id dev_id; 171 uint32_t activity_id; 172 }; /* HW DATA */ 173 174 struct gdma_resp_hdr { 175 struct gdma_msg_hdr response; 176 struct gdma_dev_id dev_id; 177 uint32_t activity_id; 178 uint32_t status; 179 uint32_t reserved; 180 }; /* HW DATA */ 181 182 struct gdma_general_req { 183 struct gdma_req_hdr hdr; 184 }; /* HW DATA */ 185 186 #define GDMA_MESSAGE_V1 1 187 188 struct gdma_general_resp { 189 struct gdma_resp_hdr hdr; 190 }; /* HW DATA */ 191 192 #define GDMA_STANDARD_HEADER_TYPE 0 193 194 static inline void 195 mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, uint32_t code, 196 uint32_t req_size, uint32_t resp_size) 197 { 198 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; 199 hdr->req.msg_type = code; 200 hdr->req.msg_version = GDMA_MESSAGE_V1; 201 hdr->req.msg_size = req_size; 202 203 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; 204 hdr->resp.msg_type = code; 205 hdr->resp.msg_version = GDMA_MESSAGE_V1; 206 hdr->resp.msg_size = resp_size; 207 } 208 209 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */ 210 struct gdma_sge { 211 uint64_t address; 212 uint32_t mem_key; 213 uint32_t size; 214 }; /* HW DATA */ 215 216 struct gdma_wqe_request { 217 struct gdma_sge *sgl; 218 uint32_t num_sge; 219 220 uint32_t inline_oob_size; 221 const void *inline_oob_data; 222 223 uint32_t flags; 224 uint32_t client_data_unit; 225 }; 226 227 enum gdma_page_type { 228 GDMA_PAGE_TYPE_4K, 229 }; 230 231 #define GDMA_INVALID_DMA_REGION 0 232 233 struct gdma_mem_info { 234 device_t dev; 235 236 bus_dma_tag_t dma_tag; 237 bus_dmamap_t dma_map; 238 bus_addr_t dma_handle; /* Physical address */ 239 void *virt_addr; /* Virtual address */ 240 uint64_t length; 241 242 /* Allocated by the PF driver */ 243 uint64_t gdma_region; 244 }; 245 246 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 247 248 struct gdma_dev { 249 struct gdma_context *gdma_context; 250 251 struct gdma_dev_id dev_id; 252 253 uint32_t pdid; 254 uint32_t doorbell; 255 uint32_t gpa_mkey; 256 257 /* GDMA driver specific pointer */ 258 void *driver_data; 259 }; 260 261 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE 262 263 #define GDMA_CQE_SIZE 64 264 #define GDMA_EQE_SIZE 16 265 #define GDMA_MAX_SQE_SIZE 512 266 #define GDMA_MAX_RQE_SIZE 256 267 268 #define GDMA_COMP_DATA_SIZE 0x3C 269 270 #define GDMA_EVENT_DATA_SIZE 0xC 271 272 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ 273 #define GDMA_WQE_BU_SIZE 32 274 275 #define INVALID_PDID UINT_MAX 276 #define INVALID_DOORBELL UINT_MAX 277 #define INVALID_MEM_KEY UINT_MAX 278 #define INVALID_QUEUE_ID UINT_MAX 279 #define INVALID_PCI_MSIX_INDEX UINT_MAX 280 281 struct gdma_comp { 282 uint32_t cqe_data[GDMA_COMP_DATA_SIZE / 4]; 283 uint32_t wq_num; 284 bool is_sq; 285 }; 286 287 struct gdma_event { 288 uint32_t details[GDMA_EVENT_DATA_SIZE / 4]; 289 uint8_t type; 290 }; 291 292 struct gdma_queue; 293 294 #define CQE_POLLING_BUFFER 512 295 296 typedef void gdma_eq_callback(void *context, struct gdma_queue *q, 297 struct gdma_event *e); 298 299 typedef void gdma_cq_callback(void *context, struct gdma_queue *q); 300 301 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE 302 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the 303 * driver increases the 'head' in BUs rather than in bytes, and notifies 304 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track 305 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. 306 * 307 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is 308 * processed, the driver increases the 'tail' to indicate that WQEs have 309 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. 310 * 311 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures 312 * that the EQ/CQ is big enough so they can't overflow, and the driver uses 313 * the owner bits mechanism to detect if the queue has become empty. 314 */ 315 struct gdma_queue { 316 struct gdma_dev *gdma_dev; 317 318 enum gdma_queue_type type; 319 uint32_t id; 320 321 struct gdma_mem_info mem_info; 322 323 void *queue_mem_ptr; 324 uint32_t queue_size; 325 326 bool monitor_avl_buf; 327 328 uint32_t head; 329 uint32_t tail; 330 331 /* Extra fields specific to EQ/CQ. */ 332 union { 333 struct { 334 bool disable_needed; 335 336 gdma_eq_callback *callback; 337 void *context; 338 339 unsigned int msix_index; 340 341 uint32_t log2_throttle_limit; 342 343 struct task cleanup_task; 344 struct taskqueue *cleanup_tq; 345 int cpu; 346 bool do_not_ring_db; 347 348 int work_done; 349 int budget; 350 } eq; 351 352 struct { 353 gdma_cq_callback *callback; 354 void *context; 355 356 /* For CQ/EQ relationship */ 357 struct gdma_queue *parent; 358 } cq; 359 }; 360 }; 361 362 struct gdma_queue_spec { 363 enum gdma_queue_type type; 364 bool monitor_avl_buf; 365 unsigned int queue_size; 366 367 /* Extra fields specific to EQ/CQ. */ 368 union { 369 struct { 370 gdma_eq_callback *callback; 371 void *context; 372 373 unsigned long log2_throttle_limit; 374 375 /* Only used by the MANA device. */ 376 struct ifnet *ndev; 377 } eq; 378 379 struct { 380 gdma_cq_callback *callback; 381 void *context; 382 383 struct gdma_queue *parent_eq; 384 385 } cq; 386 }; 387 }; 388 389 struct mana_eq { 390 struct gdma_queue *eq; 391 struct gdma_comp cqe_poll[CQE_POLLING_BUFFER]; 392 }; 393 394 struct gdma_irq_context { 395 struct gdma_msix_entry msix_e; 396 struct resource *res; 397 driver_intr_t *handler; 398 void *arg; 399 void *cookie; 400 bool requested; 401 int cpu; 402 char name[GDMA_IRQNAME_SZ]; 403 }; 404 405 struct gdma_context { 406 device_t dev; 407 408 struct gdma_bus gd_bus; 409 410 /* Per-vPort max number of queues */ 411 unsigned int max_num_queues; 412 unsigned int max_num_msix; 413 unsigned int num_msix_usable; 414 struct gdma_resource msix_resource; 415 struct gdma_irq_context *irq_contexts; 416 417 /* This maps a CQ index to the queue structure. */ 418 unsigned int max_num_cqs; 419 struct gdma_queue **cq_table; 420 421 /* Protect eq_test_event and test_event_eq_id */ 422 struct sx eq_test_event_sx; 423 struct completion eq_test_event; 424 uint32_t test_event_eq_id; 425 426 struct resource *bar0; 427 struct resource *msix; 428 int msix_rid; 429 void __iomem *shm_base; 430 void __iomem *db_page_base; 431 uint32_t db_page_size; 432 433 /* Shared memory chanenl (used to bootstrap HWC) */ 434 struct shm_channel shm_channel; 435 436 /* Hardware communication channel (HWC) */ 437 struct gdma_dev hwc; 438 439 /* Azure network adapter */ 440 struct gdma_dev mana; 441 }; 442 443 #define MAX_NUM_GDMA_DEVICES 4 444 445 static inline bool mana_gd_is_mana(struct gdma_dev *gd) 446 { 447 return gd->dev_id.type == GDMA_DEVICE_MANA; 448 } 449 450 static inline bool mana_gd_is_hwc(struct gdma_dev *gd) 451 { 452 return gd->dev_id.type == GDMA_DEVICE_HWC; 453 } 454 455 uint8_t *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, uint32_t wqe_offset); 456 uint32_t mana_gd_wq_avail_space(struct gdma_queue *wq); 457 458 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); 459 460 int mana_gd_create_hwc_queue(struct gdma_dev *gd, 461 const struct gdma_queue_spec *spec, 462 struct gdma_queue **queue_ptr); 463 464 int mana_gd_create_mana_eq(struct gdma_dev *gd, 465 const struct gdma_queue_spec *spec, 466 struct gdma_queue **queue_ptr); 467 468 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, 469 const struct gdma_queue_spec *spec, 470 struct gdma_queue **queue_ptr); 471 472 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); 473 474 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); 475 476 void mana_gd_arm_cq(struct gdma_queue *cq); 477 478 struct gdma_wqe { 479 uint32_t reserved :24; 480 uint32_t last_vbytes :8; 481 482 union { 483 uint32_t flags; 484 485 struct { 486 uint32_t num_sge :8; 487 uint32_t inline_oob_size_div4 :3; 488 uint32_t client_oob_in_sgl :1; 489 uint32_t reserved1 :4; 490 uint32_t client_data_unit :14; 491 uint32_t reserved2 :2; 492 }; 493 }; 494 }; /* HW DATA */ 495 496 #define INLINE_OOB_SMALL_SIZE 8 497 #define INLINE_OOB_LARGE_SIZE 24 498 499 #define MAX_TX_WQE_SIZE 512 500 #define MAX_RX_WQE_SIZE 256 501 502 struct gdma_cqe { 503 uint32_t cqe_data[GDMA_COMP_DATA_SIZE / 4]; 504 505 union { 506 uint32_t as_uint32; 507 508 struct { 509 uint32_t wq_num :24; 510 uint32_t is_sq :1; 511 uint32_t reserved :4; 512 uint32_t owner_bits :3; 513 }; 514 } cqe_info; 515 }; /* HW DATA */ 516 517 #define GDMA_CQE_OWNER_BITS 3 518 519 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) 520 521 #define SET_ARM_BIT 1 522 523 #define GDMA_EQE_OWNER_BITS 3 524 525 union gdma_eqe_info { 526 uint32_t as_uint32; 527 528 struct { 529 uint32_t type : 8; 530 uint32_t reserved1 : 8; 531 uint32_t client_id : 2; 532 uint32_t reserved2 : 11; 533 uint32_t owner_bits : 3; 534 }; 535 }; /* HW DATA */ 536 537 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) 538 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) 539 540 struct gdma_eqe { 541 uint32_t details[GDMA_EVENT_DATA_SIZE / 4]; 542 uint32_t eqe_info; 543 }; /* HW DATA */ 544 545 #define GDMA_REG_DB_PAGE_OFFSET 8 546 #define GDMA_REG_DB_PAGE_SIZE 0x10 547 #define GDMA_REG_SHM_OFFSET 0x18 548 549 struct gdma_posted_wqe_info { 550 uint32_t wqe_size_in_bu; 551 }; 552 553 /* GDMA_GENERATE_TEST_EQE */ 554 struct gdma_generate_test_event_req { 555 struct gdma_req_hdr hdr; 556 uint32_t queue_index; 557 }; /* HW DATA */ 558 559 /* GDMA_VERIFY_VF_DRIVER_VERSION */ 560 enum { 561 GDMA_PROTOCOL_V1 = 1, 562 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, 563 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, 564 }; 565 566 struct gdma_verify_ver_req { 567 struct gdma_req_hdr hdr; 568 569 /* Mandatory fields required for protocol establishment */ 570 uint64_t protocol_ver_min; 571 uint64_t protocol_ver_max; 572 uint64_t drv_cap_flags1; 573 uint64_t drv_cap_flags2; 574 uint64_t drv_cap_flags3; 575 uint64_t drv_cap_flags4; 576 577 /* Advisory fields */ 578 uint64_t drv_ver; 579 uint32_t os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ 580 uint32_t reserved; 581 uint32_t os_ver_major; 582 uint32_t os_ver_minor; 583 uint32_t os_ver_build; 584 uint32_t os_ver_platform; 585 uint64_t reserved_2; 586 uint8_t os_ver_str1[128]; 587 uint8_t os_ver_str2[128]; 588 uint8_t os_ver_str3[128]; 589 uint8_t os_ver_str4[128]; 590 }; /* HW DATA */ 591 592 struct gdma_verify_ver_resp { 593 struct gdma_resp_hdr hdr; 594 uint64_t gdma_protocol_ver; 595 uint64_t pf_cap_flags1; 596 uint64_t pf_cap_flags2; 597 uint64_t pf_cap_flags3; 598 uint64_t pf_cap_flags4; 599 }; /* HW DATA */ 600 601 /* GDMA_QUERY_MAX_RESOURCES */ 602 struct gdma_query_max_resources_resp { 603 struct gdma_resp_hdr hdr; 604 uint32_t status; 605 uint32_t max_sq; 606 uint32_t max_rq; 607 uint32_t max_cq; 608 uint32_t max_eq; 609 uint32_t max_db; 610 uint32_t max_mst; 611 uint32_t max_cq_mod_ctx; 612 uint32_t max_mod_cq; 613 uint32_t max_msix; 614 }; /* HW DATA */ 615 616 /* GDMA_LIST_DEVICES */ 617 struct gdma_list_devices_resp { 618 struct gdma_resp_hdr hdr; 619 uint32_t num_of_devs; 620 uint32_t reserved; 621 struct gdma_dev_id devs[64]; 622 }; /* HW DATA */ 623 624 /* GDMA_REGISTER_DEVICE */ 625 struct gdma_register_device_resp { 626 struct gdma_resp_hdr hdr; 627 uint32_t pdid; 628 uint32_t gpa_mkey; 629 uint32_t db_id; 630 }; /* HW DATA */ 631 632 /* GDMA_CREATE_QUEUE */ 633 struct gdma_create_queue_req { 634 struct gdma_req_hdr hdr; 635 uint32_t type; 636 uint32_t reserved1; 637 uint32_t pdid; 638 uint32_t doolbell_id; 639 uint64_t gdma_region; 640 uint32_t reserved2; 641 uint32_t queue_size; 642 uint32_t log2_throttle_limit; 643 uint32_t eq_pci_msix_index; 644 uint32_t cq_mod_ctx_id; 645 uint32_t cq_parent_eq_id; 646 uint8_t rq_drop_on_overrun; 647 uint8_t rq_err_on_wqe_overflow; 648 uint8_t rq_chain_rec_wqes; 649 uint8_t sq_hw_db; 650 uint32_t reserved3; 651 }; /* HW DATA */ 652 653 struct gdma_create_queue_resp { 654 struct gdma_resp_hdr hdr; 655 uint32_t queue_index; 656 }; /* HW DATA */ 657 658 /* GDMA_DISABLE_QUEUE */ 659 struct gdma_disable_queue_req { 660 struct gdma_req_hdr hdr; 661 uint32_t type; 662 uint32_t queue_index; 663 uint32_t alloc_res_id_on_creation; 664 }; /* HW DATA */ 665 666 /* GDMA_CREATE_DMA_REGION */ 667 struct gdma_create_dma_region_req { 668 struct gdma_req_hdr hdr; 669 670 /* The total size of the DMA region */ 671 uint64_t length; 672 673 /* The offset in the first page */ 674 uint32_t offset_in_page; 675 676 /* enum gdma_page_type */ 677 uint32_t gdma_page_type; 678 679 /* The total number of pages */ 680 uint32_t page_count; 681 682 /* If page_addr_list_len is smaller than page_count, 683 * the remaining page addresses will be added via the 684 * message GDMA_DMA_REGION_ADD_PAGES. 685 */ 686 uint32_t page_addr_list_len; 687 uint64_t page_addr_list[]; 688 }; /* HW DATA */ 689 690 struct gdma_create_dma_region_resp { 691 struct gdma_resp_hdr hdr; 692 uint64_t gdma_region; 693 }; /* HW DATA */ 694 695 /* GDMA_DMA_REGION_ADD_PAGES */ 696 struct gdma_dma_region_add_pages_req { 697 struct gdma_req_hdr hdr; 698 699 uint64_t gdma_region; 700 701 uint32_t page_addr_list_len; 702 uint32_t reserved3; 703 704 uint64_t page_addr_list[]; 705 }; /* HW DATA */ 706 707 /* GDMA_DESTROY_DMA_REGION */ 708 struct gdma_destroy_dma_region_req { 709 struct gdma_req_hdr hdr; 710 711 uint64_t gdma_region; 712 }; /* HW DATA */ 713 714 int mana_gd_verify_vf_version(device_t dev); 715 716 int mana_gd_register_device(struct gdma_dev *gd); 717 int mana_gd_deregister_device(struct gdma_dev *gd); 718 719 int mana_gd_post_work_request(struct gdma_queue *wq, 720 const struct gdma_wqe_request *wqe_req, 721 struct gdma_posted_wqe_info *wqe_info); 722 723 int mana_gd_post_and_ring(struct gdma_queue *queue, 724 const struct gdma_wqe_request *wqe, 725 struct gdma_posted_wqe_info *wqe_info); 726 727 int mana_gd_alloc_res_map(uint32_t res_avil, struct gdma_resource *r, 728 const char *lock_name); 729 void mana_gd_free_res_map(struct gdma_resource *r); 730 731 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, 732 struct gdma_queue *queue); 733 734 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 735 struct gdma_mem_info *gmi); 736 737 void mana_gd_free_memory(struct gdma_mem_info *gmi); 738 739 void mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs, 740 int nseg, int error); 741 742 int mana_gd_send_request(struct gdma_context *gc, uint32_t req_len, 743 const void *req, uint32_t resp_len, void *resp); 744 #endif /* _GDMA_H */ 745