1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 /* Copyright (c) 2021, Microsoft Corporation. */ 3 4 #ifndef _GDMA_H 5 #define _GDMA_H 6 7 #include <linux/dma-mapping.h> 8 #include <linux/netdevice.h> 9 10 #include "shm_channel.h" 11 12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105 13 #define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff 14 15 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of 16 * them are naturally aligned and hence don't need __packed. 17 */ 18 19 enum gdma_request_type { 20 GDMA_VERIFY_VF_DRIVER_VERSION = 1, 21 GDMA_QUERY_MAX_RESOURCES = 2, 22 GDMA_LIST_DEVICES = 3, 23 GDMA_REGISTER_DEVICE = 4, 24 GDMA_DEREGISTER_DEVICE = 5, 25 GDMA_GENERATE_TEST_EQE = 10, 26 GDMA_CREATE_QUEUE = 12, 27 GDMA_DISABLE_QUEUE = 13, 28 GDMA_ALLOCATE_RESOURCE_RANGE = 22, 29 GDMA_DESTROY_RESOURCE_RANGE = 24, 30 GDMA_CREATE_DMA_REGION = 25, 31 GDMA_DMA_REGION_ADD_PAGES = 26, 32 GDMA_DESTROY_DMA_REGION = 27, 33 GDMA_CREATE_PD = 29, 34 GDMA_DESTROY_PD = 30, 35 GDMA_CREATE_MR = 31, 36 GDMA_DESTROY_MR = 32, 37 GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */ 38 }; 39 40 #define GDMA_RESOURCE_DOORBELL_PAGE 27 41 42 enum gdma_queue_type { 43 GDMA_INVALID_QUEUE, 44 GDMA_SQ, 45 GDMA_RQ, 46 GDMA_CQ, 47 GDMA_EQ, 48 }; 49 50 enum gdma_work_request_flags { 51 GDMA_WR_NONE = 0, 52 GDMA_WR_OOB_IN_SGL = BIT(0), 53 GDMA_WR_PAD_BY_SGE0 = BIT(1), 54 }; 55 56 enum gdma_eqe_type { 57 GDMA_EQE_COMPLETION = 3, 58 GDMA_EQE_TEST_EVENT = 64, 59 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, 60 GDMA_EQE_HWC_INIT_DATA = 130, 61 GDMA_EQE_HWC_INIT_DONE = 131, 62 GDMA_EQE_HWC_FPGA_RECONFIG = 132, 63 GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133, 64 GDMA_EQE_HWC_SOC_SERVICE = 134, 65 GDMA_EQE_HWC_RESET_REQUEST = 135, 66 GDMA_EQE_RNIC_QP_FATAL = 176, 67 }; 68 69 enum { 70 GDMA_DEVICE_NONE = 0, 71 GDMA_DEVICE_HWC = 1, 72 GDMA_DEVICE_MANA = 2, 73 GDMA_DEVICE_MANA_IB = 3, 74 }; 75 76 enum gdma_service_type { 77 GDMA_SERVICE_TYPE_NONE = 0, 78 GDMA_SERVICE_TYPE_RDMA_SUSPEND = 1, 79 GDMA_SERVICE_TYPE_RDMA_RESUME = 2, 80 }; 81 82 struct mana_service_work { 83 struct work_struct work; 84 struct gdma_dev *gdma_dev; 85 enum gdma_service_type event; 86 }; 87 88 struct gdma_resource { 89 /* Protect the bitmap */ 90 spinlock_t lock; 91 92 /* The bitmap size in bits. */ 93 u32 size; 94 95 /* The bitmap tracks the resources. */ 96 unsigned long *map; 97 }; 98 99 union gdma_doorbell_entry { 100 u64 as_uint64; 101 102 struct { 103 u64 id : 24; 104 u64 reserved : 8; 105 u64 tail_ptr : 31; 106 u64 arm : 1; 107 } cq; 108 109 struct { 110 u64 id : 24; 111 u64 wqe_cnt : 8; 112 u64 tail_ptr : 32; 113 } rq; 114 115 struct { 116 u64 id : 24; 117 u64 reserved : 8; 118 u64 tail_ptr : 32; 119 } sq; 120 121 struct { 122 u64 id : 16; 123 u64 reserved : 16; 124 u64 tail_ptr : 31; 125 u64 arm : 1; 126 } eq; 127 }; /* HW DATA */ 128 129 struct gdma_msg_hdr { 130 u32 hdr_type; 131 u32 msg_type; 132 u16 msg_version; 133 u16 hwc_msg_id; 134 u32 msg_size; 135 }; /* HW DATA */ 136 137 struct gdma_dev_id { 138 union { 139 struct { 140 u16 type; 141 u16 instance; 142 }; 143 144 u32 as_uint32; 145 }; 146 }; /* HW DATA */ 147 148 struct gdma_req_hdr { 149 struct gdma_msg_hdr req; 150 struct gdma_msg_hdr resp; /* The expected response */ 151 struct gdma_dev_id dev_id; 152 u32 activity_id; 153 }; /* HW DATA */ 154 155 struct gdma_resp_hdr { 156 struct gdma_msg_hdr response; 157 struct gdma_dev_id dev_id; 158 u32 activity_id; 159 u32 status; 160 u32 reserved; 161 }; /* HW DATA */ 162 163 struct gdma_general_req { 164 struct gdma_req_hdr hdr; 165 }; /* HW DATA */ 166 167 #define GDMA_MESSAGE_V1 1 168 #define GDMA_MESSAGE_V2 2 169 #define GDMA_MESSAGE_V3 3 170 #define GDMA_MESSAGE_V4 4 171 172 struct gdma_general_resp { 173 struct gdma_resp_hdr hdr; 174 }; /* HW DATA */ 175 176 #define GDMA_STANDARD_HEADER_TYPE 0 177 178 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, 179 u32 req_size, u32 resp_size) 180 { 181 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; 182 hdr->req.msg_type = code; 183 hdr->req.msg_version = GDMA_MESSAGE_V1; 184 hdr->req.msg_size = req_size; 185 186 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; 187 hdr->resp.msg_type = code; 188 hdr->resp.msg_version = GDMA_MESSAGE_V1; 189 hdr->resp.msg_size = resp_size; 190 } 191 192 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */ 193 struct gdma_sge { 194 u64 address; 195 u32 mem_key; 196 u32 size; 197 }; /* HW DATA */ 198 199 struct gdma_wqe_request { 200 struct gdma_sge *sgl; 201 u32 num_sge; 202 203 u32 inline_oob_size; 204 const void *inline_oob_data; 205 206 u32 flags; 207 u32 client_data_unit; 208 }; 209 210 enum gdma_page_type { 211 GDMA_PAGE_TYPE_4K, 212 }; 213 214 #define GDMA_INVALID_DMA_REGION 0 215 216 struct gdma_mem_info { 217 struct device *dev; 218 219 dma_addr_t dma_handle; 220 void *virt_addr; 221 u64 length; 222 223 /* Allocated by the PF driver */ 224 u64 dma_region_handle; 225 }; 226 227 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 228 229 struct gdma_dev { 230 struct gdma_context *gdma_context; 231 232 struct gdma_dev_id dev_id; 233 234 u32 pdid; 235 u32 doorbell; 236 u32 gpa_mkey; 237 238 /* GDMA driver specific pointer */ 239 void *driver_data; 240 241 struct auxiliary_device *adev; 242 bool is_suspended; 243 bool rdma_teardown; 244 }; 245 246 /* MANA_PAGE_SIZE is the DMA unit */ 247 #define MANA_PAGE_SHIFT 12 248 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT) 249 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE) 250 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE) 251 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT) 252 253 /* Required by HW */ 254 #define MANA_MIN_QSIZE MANA_PAGE_SIZE 255 256 #define GDMA_CQE_SIZE 64 257 #define GDMA_EQE_SIZE 16 258 #define GDMA_MAX_SQE_SIZE 512 259 #define GDMA_MAX_RQE_SIZE 256 260 261 #define GDMA_COMP_DATA_SIZE 0x3C 262 263 #define GDMA_EVENT_DATA_SIZE 0xC 264 265 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ 266 #define GDMA_WQE_BU_SIZE 32 267 268 #define INVALID_PDID UINT_MAX 269 #define INVALID_DOORBELL UINT_MAX 270 #define INVALID_MEM_KEY UINT_MAX 271 #define INVALID_QUEUE_ID UINT_MAX 272 #define INVALID_PCI_MSIX_INDEX UINT_MAX 273 274 struct gdma_comp { 275 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 276 u32 wq_num; 277 bool is_sq; 278 }; 279 280 struct gdma_event { 281 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 282 u8 type; 283 }; 284 285 struct gdma_queue; 286 287 struct mana_eq { 288 struct gdma_queue *eq; 289 struct dentry *mana_eq_debugfs; 290 }; 291 292 typedef void gdma_eq_callback(void *context, struct gdma_queue *q, 293 struct gdma_event *e); 294 295 typedef void gdma_cq_callback(void *context, struct gdma_queue *q); 296 297 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE 298 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the 299 * driver increases the 'head' in BUs rather than in bytes, and notifies 300 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track 301 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. 302 * 303 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is 304 * processed, the driver increases the 'tail' to indicate that WQEs have 305 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. 306 * 307 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures 308 * that the EQ/CQ is big enough so they can't overflow, and the driver uses 309 * the owner bits mechanism to detect if the queue has become empty. 310 */ 311 struct gdma_queue { 312 struct gdma_dev *gdma_dev; 313 314 enum gdma_queue_type type; 315 u32 id; 316 317 struct gdma_mem_info mem_info; 318 319 void *queue_mem_ptr; 320 u32 queue_size; 321 322 bool monitor_avl_buf; 323 324 u32 head; 325 u32 tail; 326 struct list_head entry; 327 328 /* Extra fields specific to EQ/CQ. */ 329 union { 330 struct { 331 bool disable_needed; 332 333 gdma_eq_callback *callback; 334 void *context; 335 336 unsigned int msix_index; 337 338 u32 log2_throttle_limit; 339 } eq; 340 341 struct { 342 gdma_cq_callback *callback; 343 void *context; 344 345 struct gdma_queue *parent; /* For CQ/EQ relationship */ 346 } cq; 347 }; 348 }; 349 350 struct gdma_queue_spec { 351 enum gdma_queue_type type; 352 bool monitor_avl_buf; 353 unsigned int queue_size; 354 355 /* Extra fields specific to EQ/CQ. */ 356 union { 357 struct { 358 gdma_eq_callback *callback; 359 void *context; 360 361 unsigned long log2_throttle_limit; 362 unsigned int msix_index; 363 } eq; 364 365 struct { 366 gdma_cq_callback *callback; 367 void *context; 368 369 struct gdma_queue *parent_eq; 370 371 } cq; 372 }; 373 }; 374 375 #define MANA_IRQ_NAME_SZ 32 376 377 struct gdma_irq_context { 378 void (*handler)(void *arg); 379 /* Protect the eq_list */ 380 spinlock_t lock; 381 struct list_head eq_list; 382 char name[MANA_IRQ_NAME_SZ]; 383 }; 384 385 struct gdma_context { 386 struct device *dev; 387 struct dentry *mana_pci_debugfs; 388 389 /* Per-vPort max number of queues */ 390 unsigned int max_num_queues; 391 unsigned int max_num_msix; 392 unsigned int num_msix_usable; 393 struct xarray irq_contexts; 394 395 /* L2 MTU */ 396 u16 adapter_mtu; 397 398 /* This maps a CQ index to the queue structure. */ 399 unsigned int max_num_cqs; 400 struct gdma_queue **cq_table; 401 402 /* Protect eq_test_event and test_event_eq_id */ 403 struct mutex eq_test_event_mutex; 404 struct completion eq_test_event; 405 u32 test_event_eq_id; 406 407 bool is_pf; 408 bool in_service; 409 410 phys_addr_t bar0_pa; 411 void __iomem *bar0_va; 412 void __iomem *shm_base; 413 void __iomem *db_page_base; 414 phys_addr_t phys_db_page_base; 415 u32 db_page_size; 416 int numa_node; 417 418 /* Shared memory chanenl (used to bootstrap HWC) */ 419 struct shm_channel shm_channel; 420 421 /* Hardware communication channel (HWC) */ 422 struct gdma_dev hwc; 423 424 /* Azure network adapter */ 425 struct gdma_dev mana; 426 427 /* Azure RDMA adapter */ 428 struct gdma_dev mana_ib; 429 430 u64 pf_cap_flags1; 431 432 struct workqueue_struct *service_wq; 433 }; 434 435 static inline bool mana_gd_is_mana(struct gdma_dev *gd) 436 { 437 return gd->dev_id.type == GDMA_DEVICE_MANA; 438 } 439 440 static inline bool mana_gd_is_hwc(struct gdma_dev *gd) 441 { 442 return gd->dev_id.type == GDMA_DEVICE_HWC; 443 } 444 445 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); 446 u32 mana_gd_wq_avail_space(struct gdma_queue *wq); 447 448 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); 449 450 int mana_gd_create_hwc_queue(struct gdma_dev *gd, 451 const struct gdma_queue_spec *spec, 452 struct gdma_queue **queue_ptr); 453 454 int mana_gd_create_mana_eq(struct gdma_dev *gd, 455 const struct gdma_queue_spec *spec, 456 struct gdma_queue **queue_ptr); 457 458 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, 459 const struct gdma_queue_spec *spec, 460 struct gdma_queue **queue_ptr); 461 462 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); 463 464 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); 465 466 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); 467 468 struct gdma_wqe { 469 u32 reserved :24; 470 u32 last_vbytes :8; 471 472 union { 473 u32 flags; 474 475 struct { 476 u32 num_sge :8; 477 u32 inline_oob_size_div4:3; 478 u32 client_oob_in_sgl :1; 479 u32 reserved1 :4; 480 u32 client_data_unit :14; 481 u32 reserved2 :2; 482 }; 483 }; 484 }; /* HW DATA */ 485 486 #define INLINE_OOB_SMALL_SIZE 8 487 #define INLINE_OOB_LARGE_SIZE 24 488 489 #define MANA_MAX_TX_WQE_SGL_ENTRIES 30 490 491 #define MAX_TX_WQE_SIZE 512 492 #define MAX_RX_WQE_SIZE 256 493 494 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \ 495 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \ 496 sizeof(struct gdma_sge)) 497 498 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \ 499 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge)) 500 501 struct gdma_cqe { 502 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; 503 504 union { 505 u32 as_uint32; 506 507 struct { 508 u32 wq_num : 24; 509 u32 is_sq : 1; 510 u32 reserved : 4; 511 u32 owner_bits : 3; 512 }; 513 } cqe_info; 514 }; /* HW DATA */ 515 516 #define GDMA_CQE_OWNER_BITS 3 517 518 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) 519 520 #define SET_ARM_BIT 1 521 522 #define GDMA_EQE_OWNER_BITS 3 523 524 union gdma_eqe_info { 525 u32 as_uint32; 526 527 struct { 528 u32 type : 8; 529 u32 reserved1 : 8; 530 u32 client_id : 2; 531 u32 reserved2 : 11; 532 u32 owner_bits : 3; 533 }; 534 }; /* HW DATA */ 535 536 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) 537 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) 538 539 struct gdma_eqe { 540 u32 details[GDMA_EVENT_DATA_SIZE / 4]; 541 u32 eqe_info; 542 }; /* HW DATA */ 543 544 #define GDMA_REG_DB_PAGE_OFFSET 8 545 #define GDMA_REG_DB_PAGE_SIZE 0x10 546 #define GDMA_REG_SHM_OFFSET 0x18 547 548 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 549 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8 550 #define GDMA_PF_REG_SHM_OFF 0x70 551 552 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 553 554 #define MANA_PF_DEVICE_ID 0x00B9 555 #define MANA_VF_DEVICE_ID 0x00BA 556 557 struct gdma_posted_wqe_info { 558 u32 wqe_size_in_bu; 559 }; 560 561 /* GDMA_GENERATE_TEST_EQE */ 562 struct gdma_generate_test_event_req { 563 struct gdma_req_hdr hdr; 564 u32 queue_index; 565 }; /* HW DATA */ 566 567 /* GDMA_VERIFY_VF_DRIVER_VERSION */ 568 enum { 569 GDMA_PROTOCOL_V1 = 1, 570 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, 571 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, 572 }; 573 574 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) 575 576 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed, 577 * so the driver is able to reliably support features like busy_poll. 578 */ 579 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) 580 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) 581 #define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4) 582 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5) 583 584 /* Driver can handle holes (zeros) in the device list */ 585 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11) 586 587 /* Driver supports dynamic MSI-X vector allocation */ 588 #define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13) 589 590 /* Driver can self reset on EQE notification */ 591 #define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14) 592 593 /* Driver can self reset on FPGA Reconfig EQE notification */ 594 #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17) 595 #define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6) 596 597 /* Driver supports linearizing the skb when num_sge exceeds hardware limit */ 598 #define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20) 599 600 /* Driver can send HWC periodically to query stats */ 601 #define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21) 602 603 #define GDMA_DRV_CAP_FLAGS1 \ 604 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ 605 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ 606 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ 607 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \ 608 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \ 609 GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \ 610 GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \ 611 GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \ 612 GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \ 613 GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \ 614 GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE) 615 616 #define GDMA_DRV_CAP_FLAGS2 0 617 618 #define GDMA_DRV_CAP_FLAGS3 0 619 620 #define GDMA_DRV_CAP_FLAGS4 0 621 622 struct gdma_verify_ver_req { 623 struct gdma_req_hdr hdr; 624 625 /* Mandatory fields required for protocol establishment */ 626 u64 protocol_ver_min; 627 u64 protocol_ver_max; 628 629 /* Gdma Driver Capability Flags */ 630 u64 gd_drv_cap_flags1; 631 u64 gd_drv_cap_flags2; 632 u64 gd_drv_cap_flags3; 633 u64 gd_drv_cap_flags4; 634 635 /* Advisory fields */ 636 u64 drv_ver; 637 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ 638 u32 reserved; 639 u32 os_ver_major; 640 u32 os_ver_minor; 641 u32 os_ver_build; 642 u32 os_ver_platform; 643 u64 reserved_2; 644 u8 os_ver_str1[128]; 645 u8 os_ver_str2[128]; 646 u8 os_ver_str3[128]; 647 u8 os_ver_str4[128]; 648 }; /* HW DATA */ 649 650 struct gdma_verify_ver_resp { 651 struct gdma_resp_hdr hdr; 652 u64 gdma_protocol_ver; 653 u64 pf_cap_flags1; 654 u64 pf_cap_flags2; 655 u64 pf_cap_flags3; 656 u64 pf_cap_flags4; 657 }; /* HW DATA */ 658 659 /* GDMA_QUERY_MAX_RESOURCES */ 660 struct gdma_query_max_resources_resp { 661 struct gdma_resp_hdr hdr; 662 u32 status; 663 u32 max_sq; 664 u32 max_rq; 665 u32 max_cq; 666 u32 max_eq; 667 u32 max_db; 668 u32 max_mst; 669 u32 max_cq_mod_ctx; 670 u32 max_mod_cq; 671 u32 max_msix; 672 }; /* HW DATA */ 673 674 /* GDMA_LIST_DEVICES */ 675 #define GDMA_DEV_LIST_SIZE 64 676 struct gdma_list_devices_resp { 677 struct gdma_resp_hdr hdr; 678 u32 num_of_devs; 679 u32 reserved; 680 struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE]; 681 }; /* HW DATA */ 682 683 /* GDMA_REGISTER_DEVICE */ 684 struct gdma_register_device_resp { 685 struct gdma_resp_hdr hdr; 686 u32 pdid; 687 u32 gpa_mkey; 688 u32 db_id; 689 }; /* HW DATA */ 690 691 struct gdma_allocate_resource_range_req { 692 struct gdma_req_hdr hdr; 693 u32 resource_type; 694 u32 num_resources; 695 u32 alignment; 696 u32 allocated_resources; 697 }; 698 699 struct gdma_allocate_resource_range_resp { 700 struct gdma_resp_hdr hdr; 701 u32 allocated_resources; 702 }; 703 704 struct gdma_destroy_resource_range_req { 705 struct gdma_req_hdr hdr; 706 u32 resource_type; 707 u32 num_resources; 708 u32 allocated_resources; 709 }; 710 711 /* GDMA_CREATE_QUEUE */ 712 struct gdma_create_queue_req { 713 struct gdma_req_hdr hdr; 714 u32 type; 715 u32 reserved1; 716 u32 pdid; 717 u32 doolbell_id; 718 u64 gdma_region; 719 u32 reserved2; 720 u32 queue_size; 721 u32 log2_throttle_limit; 722 u32 eq_pci_msix_index; 723 u32 cq_mod_ctx_id; 724 u32 cq_parent_eq_id; 725 u8 rq_drop_on_overrun; 726 u8 rq_err_on_wqe_overflow; 727 u8 rq_chain_rec_wqes; 728 u8 sq_hw_db; 729 u32 reserved3; 730 }; /* HW DATA */ 731 732 struct gdma_create_queue_resp { 733 struct gdma_resp_hdr hdr; 734 u32 queue_index; 735 }; /* HW DATA */ 736 737 /* GDMA_DISABLE_QUEUE */ 738 struct gdma_disable_queue_req { 739 struct gdma_req_hdr hdr; 740 u32 type; 741 u32 queue_index; 742 u32 alloc_res_id_on_creation; 743 }; /* HW DATA */ 744 745 /* GDMA_QUERY_HWC_TIMEOUT */ 746 struct gdma_query_hwc_timeout_req { 747 struct gdma_req_hdr hdr; 748 u32 timeout_ms; 749 u32 reserved; 750 }; 751 752 struct gdma_query_hwc_timeout_resp { 753 struct gdma_resp_hdr hdr; 754 u32 timeout_ms; 755 u32 reserved; 756 }; 757 758 enum gdma_mr_access_flags { 759 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0), 760 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1), 761 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2), 762 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3), 763 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4), 764 }; 765 766 /* GDMA_CREATE_DMA_REGION */ 767 struct gdma_create_dma_region_req { 768 struct gdma_req_hdr hdr; 769 770 /* The total size of the DMA region */ 771 u64 length; 772 773 /* The offset in the first page */ 774 u32 offset_in_page; 775 776 /* enum gdma_page_type */ 777 u32 gdma_page_type; 778 779 /* The total number of pages */ 780 u32 page_count; 781 782 /* If page_addr_list_len is smaller than page_count, 783 * the remaining page addresses will be added via the 784 * message GDMA_DMA_REGION_ADD_PAGES. 785 */ 786 u32 page_addr_list_len; 787 u64 page_addr_list[]; 788 }; /* HW DATA */ 789 790 struct gdma_create_dma_region_resp { 791 struct gdma_resp_hdr hdr; 792 u64 dma_region_handle; 793 }; /* HW DATA */ 794 795 /* GDMA_DMA_REGION_ADD_PAGES */ 796 struct gdma_dma_region_add_pages_req { 797 struct gdma_req_hdr hdr; 798 799 u64 dma_region_handle; 800 801 u32 page_addr_list_len; 802 u32 reserved3; 803 804 u64 page_addr_list[]; 805 }; /* HW DATA */ 806 807 /* GDMA_DESTROY_DMA_REGION */ 808 struct gdma_destroy_dma_region_req { 809 struct gdma_req_hdr hdr; 810 811 u64 dma_region_handle; 812 }; /* HW DATA */ 813 814 enum gdma_pd_flags { 815 GDMA_PD_FLAG_INVALID = 0, 816 GDMA_PD_FLAG_ALLOW_GPA_MR = 1, 817 }; 818 819 struct gdma_create_pd_req { 820 struct gdma_req_hdr hdr; 821 enum gdma_pd_flags flags; 822 u32 reserved; 823 };/* HW DATA */ 824 825 struct gdma_create_pd_resp { 826 struct gdma_resp_hdr hdr; 827 u64 pd_handle; 828 u32 pd_id; 829 u32 reserved; 830 };/* HW DATA */ 831 832 struct gdma_destroy_pd_req { 833 struct gdma_req_hdr hdr; 834 u64 pd_handle; 835 };/* HW DATA */ 836 837 struct gdma_destory_pd_resp { 838 struct gdma_resp_hdr hdr; 839 };/* HW DATA */ 840 841 enum gdma_mr_type { 842 /* 843 * Guest Physical Address - MRs of this type allow access 844 * to any DMA-mapped memory using bus-logical address 845 */ 846 GDMA_MR_TYPE_GPA = 1, 847 /* Guest Virtual Address - MRs of this type allow access 848 * to memory mapped by PTEs associated with this MR using a virtual 849 * address that is set up in the MST 850 */ 851 GDMA_MR_TYPE_GVA = 2, 852 /* Guest zero-based address MRs */ 853 GDMA_MR_TYPE_ZBVA = 4, 854 }; 855 856 struct gdma_create_mr_params { 857 u64 pd_handle; 858 enum gdma_mr_type mr_type; 859 union { 860 struct { 861 u64 dma_region_handle; 862 u64 virtual_address; 863 enum gdma_mr_access_flags access_flags; 864 } gva; 865 struct { 866 u64 dma_region_handle; 867 enum gdma_mr_access_flags access_flags; 868 } zbva; 869 }; 870 }; 871 872 struct gdma_create_mr_request { 873 struct gdma_req_hdr hdr; 874 u64 pd_handle; 875 enum gdma_mr_type mr_type; 876 u32 reserved_1; 877 878 union { 879 struct { 880 u64 dma_region_handle; 881 u64 virtual_address; 882 enum gdma_mr_access_flags access_flags; 883 } gva; 884 struct { 885 u64 dma_region_handle; 886 enum gdma_mr_access_flags access_flags; 887 } zbva; 888 }; 889 u32 reserved_2; 890 };/* HW DATA */ 891 892 struct gdma_create_mr_response { 893 struct gdma_resp_hdr hdr; 894 u64 mr_handle; 895 u32 lkey; 896 u32 rkey; 897 };/* HW DATA */ 898 899 struct gdma_destroy_mr_request { 900 struct gdma_req_hdr hdr; 901 u64 mr_handle; 902 };/* HW DATA */ 903 904 struct gdma_destroy_mr_response { 905 struct gdma_resp_hdr hdr; 906 };/* HW DATA */ 907 908 int mana_gd_verify_vf_version(struct pci_dev *pdev); 909 910 int mana_gd_register_device(struct gdma_dev *gd); 911 int mana_gd_deregister_device(struct gdma_dev *gd); 912 913 int mana_gd_post_work_request(struct gdma_queue *wq, 914 const struct gdma_wqe_request *wqe_req, 915 struct gdma_posted_wqe_info *wqe_info); 916 917 int mana_gd_post_and_ring(struct gdma_queue *queue, 918 const struct gdma_wqe_request *wqe, 919 struct gdma_posted_wqe_info *wqe_info); 920 921 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); 922 void mana_gd_free_res_map(struct gdma_resource *r); 923 924 void mana_gd_wq_ring_doorbell(struct gdma_context *gc, 925 struct gdma_queue *queue); 926 927 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, 928 struct gdma_mem_info *gmi); 929 930 void mana_gd_free_memory(struct gdma_mem_info *gmi); 931 932 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, 933 u32 resp_len, void *resp); 934 935 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); 936 void mana_register_debugfs(void); 937 void mana_unregister_debugfs(void); 938 939 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event); 940 941 int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state); 942 int mana_gd_resume(struct pci_dev *pdev); 943 944 bool mana_need_log(struct gdma_context *gc, int err); 945 946 #endif /* _GDMA_H */ 947