1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _GDMA_H 5 #define _GDMA_H 6 7 #include <linux/dma-mapping.h> 8 #include <linux/netdevice.h> 9 10 #include "shm_channel.h" 11 12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105 13 #define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff 14 15 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 16 * them are naturally aligned and hence don't need __packed. 17 */ 18 19 enum gdma_request_type { 20 GDMA_VERIFY_VF_DRIVER_VERSION = 1, 21 GDMA_QUERY_MAX_RESOURCES = 2, 22 GDMA_LIST_DEVICES = 3, 23 GDMA_REGISTER_DEVICE = 4, 24 GDMA_DEREGISTER_DEVICE = 5, 25 GDMA_GENERATE_TEST_EQE = 10, 26 GDMA_CREATE_QUEUE = 12, 27 GDMA_DISABLE_QUEUE = 13, 28 GDMA_ALLOCATE_RESOURCE_RANGE = 22, 29 GDMA_DESTROY_RESOURCE_RANGE = 24, 30 GDMA_CREATE_DMA_REGION = 25, 31 GDMA_DMA_REGION_ADD_PAGES = 26, 32 GDMA_DESTROY_DMA_REGION = 27, 33 GDMA_CREATE_PD = 29, 34 GDMA_DESTROY_PD = 30, 35 GDMA_CREATE_MR = 31, 36 GDMA_DESTROY_MR = 32, 37 GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */ 38 }; 39 40 #define GDMA_RESOURCE_DOORBELL_PAGE 27 41 42 enum gdma_queue_type { 43 GDMA_INVALID_QUEUE, 44 GDMA_SQ, 45 GDMA_RQ, 46 GDMA_CQ, 47 GDMA_EQ, 48 }; 49 50 enum gdma_work_request_flags { 51 GDMA_WR_NONE = 0, 52 GDMA_WR_OOB_IN_SGL = BIT(0), 53 GDMA_WR_PAD_BY_SGE0 = BIT(1), 54 }; 55 56 enum gdma_eqe_type { 57 GDMA_EQE_COMPLETION = 3, 58 GDMA_EQE_TEST_EVENT = 64, 59 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, 60 GDMA_EQE_HWC_INIT_DATA = 130, 61 GDMA_EQE_HWC_INIT_DONE = 131, 62 GDMA_EQE_HWC_FPGA_RECONFIG = 132, 63 GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133, 64 GDMA_EQE_HWC_SOC_SERVICE = 134, 65 GDMA_EQE_HWC_RESET_REQUEST = 135, 66 GDMA_EQE_RNIC_QP_FATAL = 176, 67 }; 68 69 enum { 70 GDMA_DEVICE_NONE = 0, 71 GDMA_DEVICE_HWC = 1, 72 GDMA_DEVICE_MANA = 2, 73 GDMA_DEVICE_MANA_IB = 3, 74 }; 75 76 enum gdma_service_type { 77 GDMA_SERVICE_TYPE_NONE = 0, 78 GDMA_SERVICE_TYPE_RDMA_SUSPEND = 1, 79 GDMA_SERVICE_TYPE_RDMA_RESUME = 2, 80 }; 81 82 struct mana_service_work { 83 struct work_struct work; 84 struct gdma_dev *gdma_dev; 85 enum gdma_service_type event; 86 }; 87 88 struct gdma_resource { 89 /* Protect the bitmap */ 90 spinlock_t lock; 91 92 /* The bitmap size in bits. */ 93 u32 size; 94 95 /* The bitmap tracks the resources. */ 96 unsigned long *map; 97 }; 98 99 union gdma_doorbell_entry { 100 u64 as_uint64; 101 102 struct { 103 u64 id : 24; 104 u64 reserved : 8; 105 u64 tail_ptr : 31; 106 u64 arm : 1; 107 } cq; 108 109 struct { 110 u64 id : 24; 111 u64 wqe_cnt : 8; 112 u64 tail_ptr : 32; 113 } rq; 114 115 struct { 116 u64 id : 24; 117 u64 reserved : 8; 118 u64 tail_ptr : 32; 119 } sq; 120 121 struct { 122 u64 id : 16; 123 u64 reserved : 16; 124 u64 tail_ptr : 31; 125 u64 arm : 1; 126 } eq; 127 }; /* HW DATA */ 128 129 struct gdma_msg_hdr { 130 u32 hdr_type; 131 u32 msg_type; 132 u16 msg_version; 133 u16 hwc_msg_id; 134 u32 msg_size; 135 }; /* HW DATA */ 136 137 struct gdma_dev_id { 138 union { 139 struct { 140 u16 type; 141 u16 instance; 142 }; 143 144 u32 as_uint32; 145 }; 146 }; /* HW DATA */ 147 148 struct gdma_req_hdr { 149 struct gdma_msg_hdr req; 150 struct gdma_msg_hdr resp; /* The expected response */ 151 struct gdma_dev_id dev_id; 152 u32 activity_id; 153 }; /* HW DATA */ 154 155 struct gdma_resp_hdr { 156 struct gdma_msg_hdr response; 157 struct gdma_dev_id dev_id; 158 u32 activity_id; 159 u32 status; 160 u32 reserved; 161 }; /* HW DATA */ 162 163 struct gdma_general_req { 164 struct gdma_req_hdr hdr; 165 }; /* HW DATA */ 166 167 #define GDMA_MESSAGE_V1 1 168 #define GDMA_MESSAGE_V2 2 169 #define GDMA_MESSAGE_V3 3 170 #define GDMA_MESSAGE_V4 4 171 172 struct gdma_general_resp { 173 struct gdma_resp_hdr hdr; 174 }; /* HW DATA */ 175 176 #define GDMA_STANDARD_HEADER_TYPE 0 177 178 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, 179 u32 req_size, u32 resp_size) 180 { 181 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; 182 hdr->req.msg_type = code; 183 hdr->req.msg_version = GDMA_MESSAGE_V1; 184 hdr->req.msg_size = req_size; 185 186 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; 187 hdr->resp.msg_type = code; 188 hdr->resp.msg_version = GDMA_MESSAGE_V1; 189 hdr->resp.msg_size = resp_size; 190 } 191 192 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */ 193 struct gdma_sge { 194 u64 address; 195 u32 mem_key; 196 u32 size; 197 }; /* HW DATA */ 198 199 struct gdma_wqe_request { 200 struct gdma_sge *sgl; 201 u32 num_sge; 202 203 u32 inline_oob_size; 204 const void *inline_oob_data; 205 206 u32 flags; 207 u32 client_data_unit; 208 }; 209 210 enum gdma_page_type { 211 GDMA_PAGE_TYPE_4K, 212 }; 213 214 #define GDMA_INVALID_DMA_REGION 0 215 216 struct gdma_mem_info { 217 struct device *dev; 218 219 dma_addr_t dma_handle; 220 void *virt_addr; 221 u64 length; 222 223 /* Allocated by the PF driver */ 224 u64 dma_region_handle; 225 }; 226 227 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 228 229 struct gdma_dev { 230 struct gdma_context *gdma_context; 231 232 struct gdma_dev_id dev_id; 233 234 u32 pdid; 235 u32 doorbell; 236 u32 gpa_mkey; 237 238 /* GDMA driver specific pointer */ 239 void *driver_data; 240 241 struct auxiliary_device *adev; 242 bool is_suspended; 243 bool rdma_teardown; 244 }; 245 246 /* MANA_PAGE_SIZE is the DMA unit */ 247 #define MANA_PAGE_SHIFT 12 248 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT) 249 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE) 250 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE) 251 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT) 252 253 /* Required by HW */ 254 #define MANA_MIN_QSIZE MANA_PAGE_SIZE 255 256 #define GDMA_CQE_SIZE 64 257 #define GDMA_EQE_SIZE 16 258 #define GDMA_MAX_SQE_SIZE 512 259 #define GDMA_MAX_RQE_SIZE 256 260 261 #define GDMA_COMP_DATA_SIZE 0x3C 262 263 #define GDMA_EVENT_DATA_SIZE 0xC 264 265 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ 266 #define GDMA_WQE_BU_SIZE 32 267 268 #define INVALID_PDID UINT_MAX 269 #define INVALID_DOORBELL UINT_MAX 270 #define INVALID_MEM_KEY UINT_MAX 271 #define INVALID_QUEUE_ID UINT_MAX 272 #define INVALID_PCI_MSIX_INDEX UINT_MAX 273 274 struct gdma_comp { 275 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 276 u32 wq_num; 277 bool is_sq; 278 }; 279 280 struct gdma_event { 281 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 282 u8 type; 283 }; 284 285 struct gdma_queue; 286 287 struct mana_eq { 288 struct gdma_queue *eq; 289 struct dentry *mana_eq_debugfs; 290 }; 291 292 typedef void gdma_eq_callback(void *context, struct gdma_queue *q, 293 struct gdma_event *e); 294 295 typedef void gdma_cq_callback(void *context, struct gdma_queue *q); 296 297 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE 298 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the 299 * driver increases the 'head' in BUs rather than in bytes, and notifies 300 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track 301 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. 302 * 303 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is 304 * processed, the driver increases the 'tail' to indicate that WQEs have 305 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. 306 * 307 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures 308 * that the EQ/CQ is big enough so they can't overflow, and the driver uses 309 * the owner bits mechanism to detect if the queue has become empty. 310 */ 311 struct gdma_queue { 312 struct gdma_dev *gdma_dev; 313 314 enum gdma_queue_type type; 315 u32 id; 316 317 struct gdma_mem_info mem_info; 318 319 void *queue_mem_ptr; 320 u32 queue_size; 321 322 bool monitor_avl_buf; 323 324 u32 head; 325 u32 tail; 326 struct list_head entry; 327 328 /* Extra fields specific to EQ/CQ. */ 329 union { 330 struct { 331 bool disable_needed; 332 333 gdma_eq_callback *callback; 334 void *context; 335 336 unsigned int msix_index; 337 338 u32 log2_throttle_limit; 339 } eq; 340 341 struct { 342 gdma_cq_callback *callback; 343 void *context; 344 345 struct gdma_queue *parent; /* For CQ/EQ relationship */ 346 } cq; 347 }; 348 }; 349 350 struct gdma_queue_spec { 351 enum gdma_queue_type type; 352 bool monitor_avl_buf; 353 unsigned int queue_size; 354 355 /* Extra fields specific to EQ/CQ. */ 356 union { 357 struct { 358 gdma_eq_callback *callback; 359 void *context; 360 361 unsigned long log2_throttle_limit; 362 unsigned int msix_index; 363 } eq; 364 365 struct { 366 gdma_cq_callback *callback; 367 void *context; 368 369 struct gdma_queue *parent_eq; 370 371 } cq; 372 }; 373 }; 374 375 #define MANA_IRQ_NAME_SZ 32 376 377 struct gdma_irq_context { 378 void (*handler)(void *arg); 379 /* Protect the eq_list */ 380 spinlock_t lock; 381 struct list_head eq_list; 382 char name[MANA_IRQ_NAME_SZ]; 383 }; 384 385 struct gdma_context { 386 struct device *dev; 387 struct dentry *mana_pci_debugfs; 388 389 /* Per-vPort max number of queues */ 390 unsigned int max_num_queues; 391 unsigned int max_num_msix; 392 unsigned int num_msix_usable; 393 struct xarray irq_contexts; 394 395 /* L2 MTU */ 396 u16 adapter_mtu; 397 398 /* This maps a CQ index to the queue structure. */ 399 unsigned int max_num_cqs; 400 struct gdma_queue **cq_table; 401 402 /* Protect eq_test_event and test_event_eq_id */ 403 struct mutex eq_test_event_mutex; 404 struct completion eq_test_event; 405 u32 test_event_eq_id; 406 407 bool is_pf; 408 bool in_service; 409 410 phys_addr_t bar0_pa; 411 void __iomem *bar0_va; 412 void __iomem *shm_base; 413 void __iomem *db_page_base; 414 phys_addr_t phys_db_page_base; 415 u32 db_page_size; 416 int numa_node; 417 418 /* Shared memory chanenl (used to bootstrap HWC) */ 419 struct shm_channel shm_channel; 420 421 /* Hardware communication channel (HWC) */ 422 struct gdma_dev hwc; 423 424 /* Azure network adapter */ 425 struct gdma_dev mana; 426 427 /* Azure RDMA adapter */ 428 struct gdma_dev mana_ib; 429 430 u64 pf_cap_flags1; 431 432 struct workqueue_struct *service_wq; 433 }; 434 435 static inline bool mana_gd_is_mana(struct gdma_dev *gd) 436 { 437 return gd->dev_id.type == GDMA_DEVICE_MANA; 438 } 439 440 static inline bool mana_gd_is_hwc(struct gdma_dev *gd) 441 { 442 return gd->dev_id.type == GDMA_DEVICE_HWC; 443 } 444 445 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); 446 u32 mana_gd_wq_avail_space(struct gdma_queue *wq); 447 448 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); 449 450 int mana_gd_create_hwc_queue(struct gdma_dev *gd, 451 const struct gdma_queue_spec *spec, 452 struct gdma_queue **queue_ptr); 453 454 int mana_gd_create_mana_eq(struct gdma_dev *gd, 455 const struct gdma_queue_spec *spec, 456 struct gdma_queue **queue_ptr); 457 458 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, 459 const struct gdma_queue_spec *spec, 460 struct gdma_queue **queue_ptr); 461 462 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); 463 464 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); 465 466 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); 467 468 struct gdma_wqe { 469 u32 reserved :24; 470 u32 last_vbytes :8; 471 472 union { 473 u32 flags; 474 475 struct { 476 u32 num_sge :8; 477 u32 inline_oob_size_div4:3; 478 u32 client_oob_in_sgl :1; 479 u32 reserved1 :4; 480 u32 client_data_unit :14; 481 u32 reserved2 :2; 482 }; 483 }; 484 }; /* HW DATA */ 485 486 #define INLINE_OOB_SMALL_SIZE 8 487 #define INLINE_OOB_LARGE_SIZE 24 488 489 #define MAX_TX_WQE_SIZE 512 490 #define MAX_RX_WQE_SIZE 256 491 492 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ 493 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ 494 sizeof(struct gdma_sge)) 495 496 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \ 497 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge)) 498 499 struct gdma_cqe { 500 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 501 502 union { 503 u32 as_uint32; 504 505 struct { 506 u32 wq_num : 24; 507 u32 is_sq : 1; 508 u32 reserved : 4; 509 u32 owner_bits : 3; 510 }; 511 } cqe_info; 512 }; /* HW DATA */ 513 514 #define GDMA_CQE_OWNER_BITS 3 515 516 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) 517 518 #define SET_ARM_BIT 1 519 520 #define GDMA_EQE_OWNER_BITS 3 521 522 union gdma_eqe_info { 523 u32 as_uint32; 524 525 struct { 526 u32 type : 8; 527 u32 reserved1 : 8; 528 u32 client_id : 2; 529 u32 reserved2 : 11; 530 u32 owner_bits : 3; 531 }; 532 }; /* HW DATA */ 533 534 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) 535 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) 536 537 struct gdma_eqe { 538 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 539 u32 eqe_info; 540 }; /* HW DATA */ 541 542 #define GDMA_REG_DB_PAGE_OFFSET 8 543 #define GDMA_REG_DB_PAGE_SIZE 0x10 544 #define GDMA_REG_SHM_OFFSET 0x18 545 546 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 547 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8 548 #define GDMA_PF_REG_SHM_OFF 0x70 549 550 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 551 552 #define MANA_PF_DEVICE_ID 0x00B9 553 #define MANA_VF_DEVICE_ID 0x00BA 554 555 struct gdma_posted_wqe_info { 556 u32 wqe_size_in_bu; 557 }; 558 559 /* GDMA_GENERATE_TEST_EQE */ 560 struct gdma_generate_test_event_req { 561 struct gdma_req_hdr hdr; 562 u32 queue_index; 563 }; /* HW DATA */ 564 565 /* GDMA_VERIFY_VF_DRIVER_VERSION */ 566 enum { 567 GDMA_PROTOCOL_V1 = 1, 568 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, 569 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, 570 }; 571 572 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) 573 574 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed, 575 * so the driver is able to reliably support features like busy_poll. 576 */ 577 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) 578 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) 579 #define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4) 580 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5) 581 582 /* Driver can handle holes (zeros) in the device list */ 583 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11) 584 585 /* Driver supports dynamic MSI-X vector allocation */ 586 #define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13) 587 588 /* Driver can self reset on EQE notification */ 589 #define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14) 590 591 /* Driver can self reset on FPGA Reconfig EQE notification */ 592 #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17) 593 #define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6) 594 595 #define GDMA_DRV_CAP_FLAGS1 \ 596 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ 597 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ 598 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ 599 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \ 600 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \ 601 GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \ 602 GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \ 603 GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \ 604 GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE) 605 606 #define GDMA_DRV_CAP_FLAGS2 0 607 608 #define GDMA_DRV_CAP_FLAGS3 0 609 610 #define GDMA_DRV_CAP_FLAGS4 0 611 612 struct gdma_verify_ver_req { 613 struct gdma_req_hdr hdr; 614 615 /* Mandatory fields required for protocol establishment */ 616 u64 protocol_ver_min; 617 u64 protocol_ver_max; 618 619 /* Gdma Driver Capability Flags */ 620 u64 gd_drv_cap_flags1; 621 u64 gd_drv_cap_flags2; 622 u64 gd_drv_cap_flags3; 623 u64 gd_drv_cap_flags4; 624 625 /* Advisory fields */ 626 u64 drv_ver; 627 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ 628 u32 reserved; 629 u32 os_ver_major; 630 u32 os_ver_minor; 631 u32 os_ver_build; 632 u32 os_ver_platform; 633 u64 reserved_2; 634 u8 os_ver_str1[128]; 635 u8 os_ver_str2[128]; 636 u8 os_ver_str3[128]; 637 u8 os_ver_str4[128]; 638 }; /* HW DATA */ 639 640 struct gdma_verify_ver_resp { 641 struct gdma_resp_hdr hdr; 642 u64 gdma_protocol_ver; 643 u64 pf_cap_flags1; 644 u64 pf_cap_flags2; 645 u64 pf_cap_flags3; 646 u64 pf_cap_flags4; 647 }; /* HW DATA */ 648 649 /* GDMA_QUERY_MAX_RESOURCES */ 650 struct gdma_query_max_resources_resp { 651 struct gdma_resp_hdr hdr; 652 u32 status; 653 u32 max_sq; 654 u32 max_rq; 655 u32 max_cq; 656 u32 max_eq; 657 u32 max_db; 658 u32 max_mst; 659 u32 max_cq_mod_ctx; 660 u32 max_mod_cq; 661 u32 max_msix; 662 }; /* HW DATA */ 663 664 /* GDMA_LIST_DEVICES */ 665 #define GDMA_DEV_LIST_SIZE 64 666 struct gdma_list_devices_resp { 667 struct gdma_resp_hdr hdr; 668 u32 num_of_devs; 669 u32 reserved; 670 struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE]; 671 }; /* HW DATA */ 672 673 /* GDMA_REGISTER_DEVICE */ 674 struct gdma_register_device_resp { 675 struct gdma_resp_hdr hdr; 676 u32 pdid; 677 u32 gpa_mkey; 678 u32 db_id; 679 }; /* HW DATA */ 680 681 struct gdma_allocate_resource_range_req { 682 struct gdma_req_hdr hdr; 683 u32 resource_type; 684 u32 num_resources; 685 u32 alignment; 686 u32 allocated_resources; 687 }; 688 689 struct gdma_allocate_resource_range_resp { 690 struct gdma_resp_hdr hdr; 691 u32 allocated_resources; 692 }; 693 694 struct gdma_destroy_resource_range_req { 695 struct gdma_req_hdr hdr; 696 u32 resource_type; 697 u32 num_resources; 698 u32 allocated_resources; 699 }; 700 701 /* GDMA_CREATE_QUEUE */ 702 struct gdma_create_queue_req { 703 struct gdma_req_hdr hdr; 704 u32 type; 705 u32 reserved1; 706 u32 pdid; 707 u32 doolbell_id; 708 u64 gdma_region; 709 u32 reserved2; 710 u32 queue_size; 711 u32 log2_throttle_limit; 712 u32 eq_pci_msix_index; 713 u32 cq_mod_ctx_id; 714 u32 cq_parent_eq_id; 715 u8 rq_drop_on_overrun; 716 u8 rq_err_on_wqe_overflow; 717 u8 rq_chain_rec_wqes; 718 u8 sq_hw_db; 719 u32 reserved3; 720 }; /* HW DATA */ 721 722 struct gdma_create_queue_resp { 723 struct gdma_resp_hdr hdr; 724 u32 queue_index; 725 }; /* HW DATA */ 726 727 /* GDMA_DISABLE_QUEUE */ 728 struct gdma_disable_queue_req { 729 struct gdma_req_hdr hdr; 730 u32 type; 731 u32 queue_index; 732 u32 alloc_res_id_on_creation; 733 }; /* HW DATA */ 734 735 /* GDMA_QUERY_HWC_TIMEOUT */ 736 struct gdma_query_hwc_timeout_req { 737 struct gdma_req_hdr hdr; 738 u32 timeout_ms; 739 u32 reserved; 740 }; 741 742 struct gdma_query_hwc_timeout_resp { 743 struct gdma_resp_hdr hdr; 744 u32 timeout_ms; 745 u32 reserved; 746 }; 747 748 enum gdma_mr_access_flags { 749 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0), 750 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1), 751 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2), 752 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3), 753 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4), 754 }; 755 756 /* GDMA_CREATE_DMA_REGION */ 757 struct gdma_create_dma_region_req { 758 struct gdma_req_hdr hdr; 759 760 /* The total size of the DMA region */ 761 u64 length; 762 763 /* The offset in the first page */ 764 u32 offset_in_page; 765 766 /* enum gdma_page_type */ 767 u32 gdma_page_type; 768 769 /* The total number of pages */ 770 u32 page_count; 771 772 /* If page_addr_list_len is smaller than page_count, 773 * the remaining page addresses will be added via the 774 * message GDMA_DMA_REGION_ADD_PAGES. 775 */ 776 u32 page_addr_list_len; 777 u64 page_addr_list[]; 778 }; /* HW DATA */ 779 780 struct gdma_create_dma_region_resp { 781 struct gdma_resp_hdr hdr; 782 u64 dma_region_handle; 783 }; /* HW DATA */ 784 785 /* GDMA_DMA_REGION_ADD_PAGES */ 786 struct gdma_dma_region_add_pages_req { 787 struct gdma_req_hdr hdr; 788 789 u64 dma_region_handle; 790 791 u32 page_addr_list_len; 792 u32 reserved3; 793 794 u64 page_addr_list[]; 795 }; /* HW DATA */ 796 797 /* GDMA_DESTROY_DMA_REGION */ 798 struct gdma_destroy_dma_region_req { 799 struct gdma_req_hdr hdr; 800 801 u64 dma_region_handle; 802 }; /* HW DATA */ 803 804 enum gdma_pd_flags { 805 GDMA_PD_FLAG_INVALID = 0, 806 GDMA_PD_FLAG_ALLOW_GPA_MR = 1, 807 }; 808 809 struct gdma_create_pd_req { 810 struct gdma_req_hdr hdr; 811 enum gdma_pd_flags flags; 812 u32 reserved; 813 };/* HW DATA */ 814 815 struct gdma_create_pd_resp { 816 struct gdma_resp_hdr hdr; 817 u64 pd_handle; 818 u32 pd_id; 819 u32 reserved; 820 };/* HW DATA */ 821 822 struct gdma_destroy_pd_req { 823 struct gdma_req_hdr hdr; 824 u64 pd_handle; 825 };/* HW DATA */ 826 827 struct gdma_destory_pd_resp { 828 struct gdma_resp_hdr hdr; 829 };/* HW DATA */ 830 831 enum gdma_mr_type { 832 /* 833 * Guest Physical Address - MRs of this type allow access 834 * to any DMA-mapped memory using bus-logical address 835 */ 836 GDMA_MR_TYPE_GPA = 1, 837 /* Guest Virtual Address - MRs of this type allow access 838 * to memory mapped by PTEs associated with this MR using a virtual 839 * address that is set up in the MST 840 */ 841 GDMA_MR_TYPE_GVA = 2, 842 /* Guest zero-based address MRs */ 843 GDMA_MR_TYPE_ZBVA = 4, 844 }; 845 846 struct gdma_create_mr_params { 847 u64 pd_handle; 848 enum gdma_mr_type mr_type; 849 union { 850 struct { 851 u64 dma_region_handle; 852 u64 virtual_address; 853 enum gdma_mr_access_flags access_flags; 854 } gva; 855 struct { 856 u64 dma_region_handle; 857 enum gdma_mr_access_flags access_flags; 858 } zbva; 859 }; 860 }; 861 862 struct gdma_create_mr_request { 863 struct gdma_req_hdr hdr; 864 u64 pd_handle; 865 enum gdma_mr_type mr_type; 866 u32 reserved_1; 867 868 union { 869 struct { 870 u64 dma_region_handle; 871 u64 virtual_address; 872 enum gdma_mr_access_flags access_flags; 873 } gva; 874 struct { 875 u64 dma_region_handle; 876 enum gdma_mr_access_flags access_flags; 877 } zbva; 878 }; 879 u32 reserved_2; 880 };/* HW DATA */ 881 882 struct gdma_create_mr_response { 883 struct gdma_resp_hdr hdr; 884 u64 mr_handle; 885 u32 lkey; 886 u32 rkey; 887 };/* HW DATA */ 888 889 struct gdma_destroy_mr_request { 890 struct gdma_req_hdr hdr; 891 u64 mr_handle; 892 };/* HW DATA */ 893 894 struct gdma_destroy_mr_response { 895 struct gdma_resp_hdr hdr; 896 };/* HW DATA */ 897 898 int mana_gd_verify_vf_version(struct pci_dev *pdev); 899 900 int mana_gd_register_device(struct gdma_dev *gd); 901 int mana_gd_deregister_device(struct gdma_dev *gd); 902 903 int mana_gd_post_work_request(struct gdma_queue *wq, 904 const struct gdma_wqe_request *wqe_req, 905 struct gdma_posted_wqe_info *wqe_info); 906 907 int mana_gd_post_and_ring(struct gdma_queue *queue, 908 const struct gdma_wqe_request *wqe, 909 struct gdma_posted_wqe_info *wqe_info); 910 911 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); 912 void mana_gd_free_res_map(struct gdma_resource *r); 913 914 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, 915 struct gdma_queue *queue); 916 917 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 918 struct gdma_mem_info *gmi); 919 920 void mana_gd_free_memory(struct gdma_mem_info *gmi); 921 922 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, 923 u32 resp_len, void *resp); 924 925 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); 926 void mana_register_debugfs(void); 927 void mana_unregister_debugfs(void); 928 929 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event); 930 931 int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state); 932 int mana_gd_resume(struct pci_dev *pdev); 933 934 bool mana_need_log(struct gdma_context *gc, int err); 935 936 #endif /* _GDMA_H */ 937