1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _GDMA_H 5 #define _GDMA_H 6 7 #include <linux/dma-mapping.h> 8 #include <linux/netdevice.h> 9 10 #include "shm_channel.h" 11 12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105 13 14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 15 * them are naturally aligned and hence don't need __packed. 16 */ 17 18 enum gdma_request_type { 19 GDMA_VERIFY_VF_DRIVER_VERSION = 1, 20 GDMA_QUERY_MAX_RESOURCES = 2, 21 GDMA_LIST_DEVICES = 3, 22 GDMA_REGISTER_DEVICE = 4, 23 GDMA_DEREGISTER_DEVICE = 5, 24 GDMA_GENERATE_TEST_EQE = 10, 25 GDMA_CREATE_QUEUE = 12, 26 GDMA_DISABLE_QUEUE = 13, 27 GDMA_ALLOCATE_RESOURCE_RANGE = 22, 28 GDMA_DESTROY_RESOURCE_RANGE = 24, 29 GDMA_CREATE_DMA_REGION = 25, 30 GDMA_DMA_REGION_ADD_PAGES = 26, 31 GDMA_DESTROY_DMA_REGION = 27, 32 GDMA_CREATE_PD = 29, 33 GDMA_DESTROY_PD = 30, 34 GDMA_CREATE_MR = 31, 35 GDMA_DESTROY_MR = 32, 36 GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */ 37 }; 38 39 #define GDMA_RESOURCE_DOORBELL_PAGE 27 40 41 enum gdma_queue_type { 42 GDMA_INVALID_QUEUE, 43 GDMA_SQ, 44 GDMA_RQ, 45 GDMA_CQ, 46 GDMA_EQ, 47 }; 48 49 enum gdma_work_request_flags { 50 GDMA_WR_NONE = 0, 51 GDMA_WR_OOB_IN_SGL = BIT(0), 52 GDMA_WR_PAD_BY_SGE0 = BIT(1), 53 }; 54 55 enum gdma_eqe_type { 56 GDMA_EQE_COMPLETION = 3, 57 GDMA_EQE_TEST_EVENT = 64, 58 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, 59 GDMA_EQE_HWC_INIT_DATA = 130, 60 GDMA_EQE_HWC_INIT_DONE = 131, 61 GDMA_EQE_HWC_SOC_RECONFIG = 132, 62 GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133, 63 }; 64 65 enum { 66 GDMA_DEVICE_NONE = 0, 67 GDMA_DEVICE_HWC = 1, 68 GDMA_DEVICE_MANA = 2, 69 GDMA_DEVICE_MANA_IB = 3, 70 }; 71 72 struct gdma_resource { 73 /* Protect the bitmap */ 74 spinlock_t lock; 75 76 /* The bitmap size in bits. */ 77 u32 size; 78 79 /* The bitmap tracks the resources. */ 80 unsigned long *map; 81 }; 82 83 union gdma_doorbell_entry { 84 u64 as_uint64; 85 86 struct { 87 u64 id : 24; 88 u64 reserved : 8; 89 u64 tail_ptr : 31; 90 u64 arm : 1; 91 } cq; 92 93 struct { 94 u64 id : 24; 95 u64 wqe_cnt : 8; 96 u64 tail_ptr : 32; 97 } rq; 98 99 struct { 100 u64 id : 24; 101 u64 reserved : 8; 102 u64 tail_ptr : 32; 103 } sq; 104 105 struct { 106 u64 id : 16; 107 u64 reserved : 16; 108 u64 tail_ptr : 31; 109 u64 arm : 1; 110 } eq; 111 }; /* HW DATA */ 112 113 struct gdma_msg_hdr { 114 u32 hdr_type; 115 u32 msg_type; 116 u16 msg_version; 117 u16 hwc_msg_id; 118 u32 msg_size; 119 }; /* HW DATA */ 120 121 struct gdma_dev_id { 122 union { 123 struct { 124 u16 type; 125 u16 instance; 126 }; 127 128 u32 as_uint32; 129 }; 130 }; /* HW DATA */ 131 132 struct gdma_req_hdr { 133 struct gdma_msg_hdr req; 134 struct gdma_msg_hdr resp; /* The expected response */ 135 struct gdma_dev_id dev_id; 136 u32 activity_id; 137 }; /* HW DATA */ 138 139 struct gdma_resp_hdr { 140 struct gdma_msg_hdr response; 141 struct gdma_dev_id dev_id; 142 u32 activity_id; 143 u32 status; 144 u32 reserved; 145 }; /* HW DATA */ 146 147 struct gdma_general_req { 148 struct gdma_req_hdr hdr; 149 }; /* HW DATA */ 150 151 #define GDMA_MESSAGE_V1 1 152 #define GDMA_MESSAGE_V2 2 153 #define GDMA_MESSAGE_V3 3 154 155 struct gdma_general_resp { 156 struct gdma_resp_hdr hdr; 157 }; /* HW DATA */ 158 159 #define GDMA_STANDARD_HEADER_TYPE 0 160 161 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, 162 u32 req_size, u32 resp_size) 163 { 164 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; 165 hdr->req.msg_type = code; 166 hdr->req.msg_version = GDMA_MESSAGE_V1; 167 hdr->req.msg_size = req_size; 168 169 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; 170 hdr->resp.msg_type = code; 171 hdr->resp.msg_version = GDMA_MESSAGE_V1; 172 hdr->resp.msg_size = resp_size; 173 } 174 175 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */ 176 struct gdma_sge { 177 u64 address; 178 u32 mem_key; 179 u32 size; 180 }; /* HW DATA */ 181 182 struct gdma_wqe_request { 183 struct gdma_sge *sgl; 184 u32 num_sge; 185 186 u32 inline_oob_size; 187 const void *inline_oob_data; 188 189 u32 flags; 190 u32 client_data_unit; 191 }; 192 193 enum gdma_page_type { 194 GDMA_PAGE_TYPE_4K, 195 }; 196 197 #define GDMA_INVALID_DMA_REGION 0 198 199 struct gdma_mem_info { 200 struct device *dev; 201 202 dma_addr_t dma_handle; 203 void *virt_addr; 204 u64 length; 205 206 /* Allocated by the PF driver */ 207 u64 dma_region_handle; 208 }; 209 210 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 211 212 struct gdma_dev { 213 struct gdma_context *gdma_context; 214 215 struct gdma_dev_id dev_id; 216 217 u32 pdid; 218 u32 doorbell; 219 u32 gpa_mkey; 220 221 /* GDMA driver specific pointer */ 222 void *driver_data; 223 224 struct auxiliary_device *adev; 225 }; 226 227 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE 228 229 #define GDMA_CQE_SIZE 64 230 #define GDMA_EQE_SIZE 16 231 #define GDMA_MAX_SQE_SIZE 512 232 #define GDMA_MAX_RQE_SIZE 256 233 234 #define GDMA_COMP_DATA_SIZE 0x3C 235 236 #define GDMA_EVENT_DATA_SIZE 0xC 237 238 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ 239 #define GDMA_WQE_BU_SIZE 32 240 241 #define INVALID_PDID UINT_MAX 242 #define INVALID_DOORBELL UINT_MAX 243 #define INVALID_MEM_KEY UINT_MAX 244 #define INVALID_QUEUE_ID UINT_MAX 245 #define INVALID_PCI_MSIX_INDEX UINT_MAX 246 247 struct gdma_comp { 248 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 249 u32 wq_num; 250 bool is_sq; 251 }; 252 253 struct gdma_event { 254 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 255 u8 type; 256 }; 257 258 struct gdma_queue; 259 260 struct mana_eq { 261 struct gdma_queue *eq; 262 }; 263 264 typedef void gdma_eq_callback(void *context, struct gdma_queue *q, 265 struct gdma_event *e); 266 267 typedef void gdma_cq_callback(void *context, struct gdma_queue *q); 268 269 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE 270 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the 271 * driver increases the 'head' in BUs rather than in bytes, and notifies 272 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track 273 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. 274 * 275 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is 276 * processed, the driver increases the 'tail' to indicate that WQEs have 277 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. 278 * 279 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures 280 * that the EQ/CQ is big enough so they can't overflow, and the driver uses 281 * the owner bits mechanism to detect if the queue has become empty. 282 */ 283 struct gdma_queue { 284 struct gdma_dev *gdma_dev; 285 286 enum gdma_queue_type type; 287 u32 id; 288 289 struct gdma_mem_info mem_info; 290 291 void *queue_mem_ptr; 292 u32 queue_size; 293 294 bool monitor_avl_buf; 295 296 u32 head; 297 u32 tail; 298 struct list_head entry; 299 300 /* Extra fields specific to EQ/CQ. */ 301 union { 302 struct { 303 bool disable_needed; 304 305 gdma_eq_callback *callback; 306 void *context; 307 308 unsigned int msix_index; 309 310 u32 log2_throttle_limit; 311 } eq; 312 313 struct { 314 gdma_cq_callback *callback; 315 void *context; 316 317 struct gdma_queue *parent; /* For CQ/EQ relationship */ 318 } cq; 319 }; 320 }; 321 322 struct gdma_queue_spec { 323 enum gdma_queue_type type; 324 bool monitor_avl_buf; 325 unsigned int queue_size; 326 327 /* Extra fields specific to EQ/CQ. */ 328 union { 329 struct { 330 gdma_eq_callback *callback; 331 void *context; 332 333 unsigned long log2_throttle_limit; 334 unsigned int msix_index; 335 } eq; 336 337 struct { 338 gdma_cq_callback *callback; 339 void *context; 340 341 struct gdma_queue *parent_eq; 342 343 } cq; 344 }; 345 }; 346 347 #define MANA_IRQ_NAME_SZ 32 348 349 struct gdma_irq_context { 350 void (*handler)(void *arg); 351 /* Protect the eq_list */ 352 spinlock_t lock; 353 struct list_head eq_list; 354 char name[MANA_IRQ_NAME_SZ]; 355 }; 356 357 struct gdma_context { 358 struct device *dev; 359 360 /* Per-vPort max number of queues */ 361 unsigned int max_num_queues; 362 unsigned int max_num_msix; 363 unsigned int num_msix_usable; 364 struct gdma_irq_context *irq_contexts; 365 366 /* L2 MTU */ 367 u16 adapter_mtu; 368 369 /* This maps a CQ index to the queue structure. */ 370 unsigned int max_num_cqs; 371 struct gdma_queue **cq_table; 372 373 /* Protect eq_test_event and test_event_eq_id */ 374 struct mutex eq_test_event_mutex; 375 struct completion eq_test_event; 376 u32 test_event_eq_id; 377 378 bool is_pf; 379 phys_addr_t bar0_pa; 380 void __iomem *bar0_va; 381 void __iomem *shm_base; 382 void __iomem *db_page_base; 383 phys_addr_t phys_db_page_base; 384 u32 db_page_size; 385 int numa_node; 386 387 /* Shared memory chanenl (used to bootstrap HWC) */ 388 struct shm_channel shm_channel; 389 390 /* Hardware communication channel (HWC) */ 391 struct gdma_dev hwc; 392 393 /* Azure network adapter */ 394 struct gdma_dev mana; 395 396 /* Azure RDMA adapter */ 397 struct gdma_dev mana_ib; 398 }; 399 400 #define MAX_NUM_GDMA_DEVICES 4 401 402 static inline bool mana_gd_is_mana(struct gdma_dev *gd) 403 { 404 return gd->dev_id.type == GDMA_DEVICE_MANA; 405 } 406 407 static inline bool mana_gd_is_hwc(struct gdma_dev *gd) 408 { 409 return gd->dev_id.type == GDMA_DEVICE_HWC; 410 } 411 412 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); 413 u32 mana_gd_wq_avail_space(struct gdma_queue *wq); 414 415 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); 416 417 int mana_gd_create_hwc_queue(struct gdma_dev *gd, 418 const struct gdma_queue_spec *spec, 419 struct gdma_queue **queue_ptr); 420 421 int mana_gd_create_mana_eq(struct gdma_dev *gd, 422 const struct gdma_queue_spec *spec, 423 struct gdma_queue **queue_ptr); 424 425 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, 426 const struct gdma_queue_spec *spec, 427 struct gdma_queue **queue_ptr); 428 429 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); 430 431 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); 432 433 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); 434 435 struct gdma_wqe { 436 u32 reserved :24; 437 u32 last_vbytes :8; 438 439 union { 440 u32 flags; 441 442 struct { 443 u32 num_sge :8; 444 u32 inline_oob_size_div4:3; 445 u32 client_oob_in_sgl :1; 446 u32 reserved1 :4; 447 u32 client_data_unit :14; 448 u32 reserved2 :2; 449 }; 450 }; 451 }; /* HW DATA */ 452 453 #define INLINE_OOB_SMALL_SIZE 8 454 #define INLINE_OOB_LARGE_SIZE 24 455 456 #define MAX_TX_WQE_SIZE 512 457 #define MAX_RX_WQE_SIZE 256 458 459 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ 460 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ 461 sizeof(struct gdma_sge)) 462 463 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \ 464 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge)) 465 466 struct gdma_cqe { 467 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 468 469 union { 470 u32 as_uint32; 471 472 struct { 473 u32 wq_num : 24; 474 u32 is_sq : 1; 475 u32 reserved : 4; 476 u32 owner_bits : 3; 477 }; 478 } cqe_info; 479 }; /* HW DATA */ 480 481 #define GDMA_CQE_OWNER_BITS 3 482 483 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) 484 485 #define SET_ARM_BIT 1 486 487 #define GDMA_EQE_OWNER_BITS 3 488 489 union gdma_eqe_info { 490 u32 as_uint32; 491 492 struct { 493 u32 type : 8; 494 u32 reserved1 : 8; 495 u32 client_id : 2; 496 u32 reserved2 : 11; 497 u32 owner_bits : 3; 498 }; 499 }; /* HW DATA */ 500 501 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) 502 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) 503 504 struct gdma_eqe { 505 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 506 u32 eqe_info; 507 }; /* HW DATA */ 508 509 #define GDMA_REG_DB_PAGE_OFFSET 8 510 #define GDMA_REG_DB_PAGE_SIZE 0x10 511 #define GDMA_REG_SHM_OFFSET 0x18 512 513 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 514 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8 515 #define GDMA_PF_REG_SHM_OFF 0x70 516 517 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 518 519 #define MANA_PF_DEVICE_ID 0x00B9 520 #define MANA_VF_DEVICE_ID 0x00BA 521 522 struct gdma_posted_wqe_info { 523 u32 wqe_size_in_bu; 524 }; 525 526 /* GDMA_GENERATE_TEST_EQE */ 527 struct gdma_generate_test_event_req { 528 struct gdma_req_hdr hdr; 529 u32 queue_index; 530 }; /* HW DATA */ 531 532 /* GDMA_VERIFY_VF_DRIVER_VERSION */ 533 enum { 534 GDMA_PROTOCOL_V1 = 1, 535 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, 536 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, 537 }; 538 539 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) 540 541 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed, 542 * so the driver is able to reliably support features like busy_poll. 543 */ 544 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) 545 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) 546 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5) 547 548 #define GDMA_DRV_CAP_FLAGS1 \ 549 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ 550 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ 551 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ 552 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT) 553 554 #define GDMA_DRV_CAP_FLAGS2 0 555 556 #define GDMA_DRV_CAP_FLAGS3 0 557 558 #define GDMA_DRV_CAP_FLAGS4 0 559 560 struct gdma_verify_ver_req { 561 struct gdma_req_hdr hdr; 562 563 /* Mandatory fields required for protocol establishment */ 564 u64 protocol_ver_min; 565 u64 protocol_ver_max; 566 567 /* Gdma Driver Capability Flags */ 568 u64 gd_drv_cap_flags1; 569 u64 gd_drv_cap_flags2; 570 u64 gd_drv_cap_flags3; 571 u64 gd_drv_cap_flags4; 572 573 /* Advisory fields */ 574 u64 drv_ver; 575 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ 576 u32 reserved; 577 u32 os_ver_major; 578 u32 os_ver_minor; 579 u32 os_ver_build; 580 u32 os_ver_platform; 581 u64 reserved_2; 582 u8 os_ver_str1[128]; 583 u8 os_ver_str2[128]; 584 u8 os_ver_str3[128]; 585 u8 os_ver_str4[128]; 586 }; /* HW DATA */ 587 588 struct gdma_verify_ver_resp { 589 struct gdma_resp_hdr hdr; 590 u64 gdma_protocol_ver; 591 u64 pf_cap_flags1; 592 u64 pf_cap_flags2; 593 u64 pf_cap_flags3; 594 u64 pf_cap_flags4; 595 }; /* HW DATA */ 596 597 /* GDMA_QUERY_MAX_RESOURCES */ 598 struct gdma_query_max_resources_resp { 599 struct gdma_resp_hdr hdr; 600 u32 status; 601 u32 max_sq; 602 u32 max_rq; 603 u32 max_cq; 604 u32 max_eq; 605 u32 max_db; 606 u32 max_mst; 607 u32 max_cq_mod_ctx; 608 u32 max_mod_cq; 609 u32 max_msix; 610 }; /* HW DATA */ 611 612 /* GDMA_LIST_DEVICES */ 613 struct gdma_list_devices_resp { 614 struct gdma_resp_hdr hdr; 615 u32 num_of_devs; 616 u32 reserved; 617 struct gdma_dev_id devs[64]; 618 }; /* HW DATA */ 619 620 /* GDMA_REGISTER_DEVICE */ 621 struct gdma_register_device_resp { 622 struct gdma_resp_hdr hdr; 623 u32 pdid; 624 u32 gpa_mkey; 625 u32 db_id; 626 }; /* HW DATA */ 627 628 struct gdma_allocate_resource_range_req { 629 struct gdma_req_hdr hdr; 630 u32 resource_type; 631 u32 num_resources; 632 u32 alignment; 633 u32 allocated_resources; 634 }; 635 636 struct gdma_allocate_resource_range_resp { 637 struct gdma_resp_hdr hdr; 638 u32 allocated_resources; 639 }; 640 641 struct gdma_destroy_resource_range_req { 642 struct gdma_req_hdr hdr; 643 u32 resource_type; 644 u32 num_resources; 645 u32 allocated_resources; 646 }; 647 648 /* GDMA_CREATE_QUEUE */ 649 struct gdma_create_queue_req { 650 struct gdma_req_hdr hdr; 651 u32 type; 652 u32 reserved1; 653 u32 pdid; 654 u32 doolbell_id; 655 u64 gdma_region; 656 u32 reserved2; 657 u32 queue_size; 658 u32 log2_throttle_limit; 659 u32 eq_pci_msix_index; 660 u32 cq_mod_ctx_id; 661 u32 cq_parent_eq_id; 662 u8 rq_drop_on_overrun; 663 u8 rq_err_on_wqe_overflow; 664 u8 rq_chain_rec_wqes; 665 u8 sq_hw_db; 666 u32 reserved3; 667 }; /* HW DATA */ 668 669 struct gdma_create_queue_resp { 670 struct gdma_resp_hdr hdr; 671 u32 queue_index; 672 }; /* HW DATA */ 673 674 /* GDMA_DISABLE_QUEUE */ 675 struct gdma_disable_queue_req { 676 struct gdma_req_hdr hdr; 677 u32 type; 678 u32 queue_index; 679 u32 alloc_res_id_on_creation; 680 }; /* HW DATA */ 681 682 /* GDMA_QUERY_HWC_TIMEOUT */ 683 struct gdma_query_hwc_timeout_req { 684 struct gdma_req_hdr hdr; 685 u32 timeout_ms; 686 u32 reserved; 687 }; 688 689 struct gdma_query_hwc_timeout_resp { 690 struct gdma_resp_hdr hdr; 691 u32 timeout_ms; 692 u32 reserved; 693 }; 694 695 enum atb_page_size { 696 ATB_PAGE_SIZE_4K, 697 ATB_PAGE_SIZE_8K, 698 ATB_PAGE_SIZE_16K, 699 ATB_PAGE_SIZE_32K, 700 ATB_PAGE_SIZE_64K, 701 ATB_PAGE_SIZE_128K, 702 ATB_PAGE_SIZE_256K, 703 ATB_PAGE_SIZE_512K, 704 ATB_PAGE_SIZE_1M, 705 ATB_PAGE_SIZE_2M, 706 ATB_PAGE_SIZE_MAX, 707 }; 708 709 enum gdma_mr_access_flags { 710 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0), 711 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1), 712 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2), 713 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3), 714 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4), 715 }; 716 717 /* GDMA_CREATE_DMA_REGION */ 718 struct gdma_create_dma_region_req { 719 struct gdma_req_hdr hdr; 720 721 /* The total size of the DMA region */ 722 u64 length; 723 724 /* The offset in the first page */ 725 u32 offset_in_page; 726 727 /* enum gdma_page_type */ 728 u32 gdma_page_type; 729 730 /* The total number of pages */ 731 u32 page_count; 732 733 /* If page_addr_list_len is smaller than page_count, 734 * the remaining page addresses will be added via the 735 * message GDMA_DMA_REGION_ADD_PAGES. 736 */ 737 u32 page_addr_list_len; 738 u64 page_addr_list[]; 739 }; /* HW DATA */ 740 741 struct gdma_create_dma_region_resp { 742 struct gdma_resp_hdr hdr; 743 u64 dma_region_handle; 744 }; /* HW DATA */ 745 746 /* GDMA_DMA_REGION_ADD_PAGES */ 747 struct gdma_dma_region_add_pages_req { 748 struct gdma_req_hdr hdr; 749 750 u64 dma_region_handle; 751 752 u32 page_addr_list_len; 753 u32 reserved3; 754 755 u64 page_addr_list[]; 756 }; /* HW DATA */ 757 758 /* GDMA_DESTROY_DMA_REGION */ 759 struct gdma_destroy_dma_region_req { 760 struct gdma_req_hdr hdr; 761 762 u64 dma_region_handle; 763 }; /* HW DATA */ 764 765 enum gdma_pd_flags { 766 GDMA_PD_FLAG_INVALID = 0, 767 }; 768 769 struct gdma_create_pd_req { 770 struct gdma_req_hdr hdr; 771 enum gdma_pd_flags flags; 772 u32 reserved; 773 };/* HW DATA */ 774 775 struct gdma_create_pd_resp { 776 struct gdma_resp_hdr hdr; 777 u64 pd_handle; 778 u32 pd_id; 779 u32 reserved; 780 };/* HW DATA */ 781 782 struct gdma_destroy_pd_req { 783 struct gdma_req_hdr hdr; 784 u64 pd_handle; 785 };/* HW DATA */ 786 787 struct gdma_destory_pd_resp { 788 struct gdma_resp_hdr hdr; 789 };/* HW DATA */ 790 791 enum gdma_mr_type { 792 /* Guest Virtual Address - MRs of this type allow access 793 * to memory mapped by PTEs associated with this MR using a virtual 794 * address that is set up in the MST 795 */ 796 GDMA_MR_TYPE_GVA = 2, 797 }; 798 799 struct gdma_create_mr_params { 800 u64 pd_handle; 801 enum gdma_mr_type mr_type; 802 union { 803 struct { 804 u64 dma_region_handle; 805 u64 virtual_address; 806 enum gdma_mr_access_flags access_flags; 807 } gva; 808 }; 809 }; 810 811 struct gdma_create_mr_request { 812 struct gdma_req_hdr hdr; 813 u64 pd_handle; 814 enum gdma_mr_type mr_type; 815 u32 reserved_1; 816 817 union { 818 struct { 819 u64 dma_region_handle; 820 u64 virtual_address; 821 enum gdma_mr_access_flags access_flags; 822 } gva; 823 824 }; 825 u32 reserved_2; 826 };/* HW DATA */ 827 828 struct gdma_create_mr_response { 829 struct gdma_resp_hdr hdr; 830 u64 mr_handle; 831 u32 lkey; 832 u32 rkey; 833 };/* HW DATA */ 834 835 struct gdma_destroy_mr_request { 836 struct gdma_req_hdr hdr; 837 u64 mr_handle; 838 };/* HW DATA */ 839 840 struct gdma_destroy_mr_response { 841 struct gdma_resp_hdr hdr; 842 };/* HW DATA */ 843 844 int mana_gd_verify_vf_version(struct pci_dev *pdev); 845 846 int mana_gd_register_device(struct gdma_dev *gd); 847 int mana_gd_deregister_device(struct gdma_dev *gd); 848 849 int mana_gd_post_work_request(struct gdma_queue *wq, 850 const struct gdma_wqe_request *wqe_req, 851 struct gdma_posted_wqe_info *wqe_info); 852 853 int mana_gd_post_and_ring(struct gdma_queue *queue, 854 const struct gdma_wqe_request *wqe, 855 struct gdma_posted_wqe_info *wqe_info); 856 857 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); 858 void mana_gd_free_res_map(struct gdma_resource *r); 859 860 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, 861 struct gdma_queue *queue); 862 863 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 864 struct gdma_mem_info *gmi); 865 866 void mana_gd_free_memory(struct gdma_mem_info *gmi); 867 868 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, 869 u32 resp_len, void *resp); 870 871 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); 872 873 #endif /* _GDMA_H */ 874