1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #ifndef _GDMA_H
5 #define _GDMA_H
6
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9
10 #include "shm_channel.h"
11
12 #define GDMA_STATUS_MORE_ENTRIES 0x00000105
13 #define GDMA_STATUS_CMD_UNSUPPORTED 0xffffffff
14
15 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
16 * them are naturally aligned and hence don't need __packed.
17 */
18
19 enum gdma_request_type {
20 GDMA_VERIFY_VF_DRIVER_VERSION = 1,
21 GDMA_QUERY_MAX_RESOURCES = 2,
22 GDMA_LIST_DEVICES = 3,
23 GDMA_REGISTER_DEVICE = 4,
24 GDMA_DEREGISTER_DEVICE = 5,
25 GDMA_GENERATE_TEST_EQE = 10,
26 GDMA_CREATE_QUEUE = 12,
27 GDMA_DISABLE_QUEUE = 13,
28 GDMA_ALLOCATE_RESOURCE_RANGE = 22,
29 GDMA_DESTROY_RESOURCE_RANGE = 24,
30 GDMA_CREATE_DMA_REGION = 25,
31 GDMA_DMA_REGION_ADD_PAGES = 26,
32 GDMA_DESTROY_DMA_REGION = 27,
33 GDMA_CREATE_PD = 29,
34 GDMA_DESTROY_PD = 30,
35 GDMA_CREATE_MR = 31,
36 GDMA_DESTROY_MR = 32,
37 GDMA_QUERY_HWC_TIMEOUT = 84, /* 0x54 */
38 };
39
40 #define GDMA_RESOURCE_DOORBELL_PAGE 27
41
42 enum gdma_queue_type {
43 GDMA_INVALID_QUEUE,
44 GDMA_SQ,
45 GDMA_RQ,
46 GDMA_CQ,
47 GDMA_EQ,
48 };
49
50 enum gdma_work_request_flags {
51 GDMA_WR_NONE = 0,
52 GDMA_WR_OOB_IN_SGL = BIT(0),
53 GDMA_WR_PAD_BY_SGE0 = BIT(1),
54 };
55
56 enum gdma_eqe_type {
57 GDMA_EQE_COMPLETION = 3,
58 GDMA_EQE_TEST_EVENT = 64,
59 GDMA_EQE_HWC_INIT_EQ_ID_DB = 129,
60 GDMA_EQE_HWC_INIT_DATA = 130,
61 GDMA_EQE_HWC_INIT_DONE = 131,
62 GDMA_EQE_HWC_FPGA_RECONFIG = 132,
63 GDMA_EQE_HWC_SOC_RECONFIG_DATA = 133,
64 GDMA_EQE_HWC_SOC_SERVICE = 134,
65 GDMA_EQE_HWC_RESET_REQUEST = 135,
66 GDMA_EQE_RNIC_QP_FATAL = 176,
67 };
68
69 enum {
70 GDMA_DEVICE_NONE = 0,
71 GDMA_DEVICE_HWC = 1,
72 GDMA_DEVICE_MANA = 2,
73 GDMA_DEVICE_MANA_IB = 3,
74 };
75
76 enum gdma_service_type {
77 GDMA_SERVICE_TYPE_NONE = 0,
78 GDMA_SERVICE_TYPE_RDMA_SUSPEND = 1,
79 GDMA_SERVICE_TYPE_RDMA_RESUME = 2,
80 };
81
82 struct mana_service_work {
83 struct work_struct work;
84 struct gdma_dev *gdma_dev;
85 enum gdma_service_type event;
86 };
87
88 struct gdma_resource {
89 /* Protect the bitmap */
90 spinlock_t lock;
91
92 /* The bitmap size in bits. */
93 u32 size;
94
95 /* The bitmap tracks the resources. */
96 unsigned long *map;
97 };
98
99 union gdma_doorbell_entry {
100 u64 as_uint64;
101
102 struct {
103 u64 id : 24;
104 u64 reserved : 8;
105 u64 tail_ptr : 31;
106 u64 arm : 1;
107 } cq;
108
109 struct {
110 u64 id : 24;
111 u64 wqe_cnt : 8;
112 u64 tail_ptr : 32;
113 } rq;
114
115 struct {
116 u64 id : 24;
117 u64 reserved : 8;
118 u64 tail_ptr : 32;
119 } sq;
120
121 struct {
122 u64 id : 16;
123 u64 reserved : 16;
124 u64 tail_ptr : 31;
125 u64 arm : 1;
126 } eq;
127 }; /* HW DATA */
128
129 struct gdma_msg_hdr {
130 u32 hdr_type;
131 u32 msg_type;
132 u16 msg_version;
133 u16 hwc_msg_id;
134 u32 msg_size;
135 }; /* HW DATA */
136
137 struct gdma_dev_id {
138 union {
139 struct {
140 u16 type;
141 u16 instance;
142 };
143
144 u32 as_uint32;
145 };
146 }; /* HW DATA */
147
148 struct gdma_req_hdr {
149 struct gdma_msg_hdr req;
150 struct gdma_msg_hdr resp; /* The expected response */
151 struct gdma_dev_id dev_id;
152 u32 activity_id;
153 }; /* HW DATA */
154
155 struct gdma_resp_hdr {
156 struct gdma_msg_hdr response;
157 struct gdma_dev_id dev_id;
158 u32 activity_id;
159 u32 status;
160 u32 reserved;
161 }; /* HW DATA */
162
163 struct gdma_general_req {
164 struct gdma_req_hdr hdr;
165 }; /* HW DATA */
166
167 #define GDMA_MESSAGE_V1 1
168 #define GDMA_MESSAGE_V2 2
169 #define GDMA_MESSAGE_V3 3
170 #define GDMA_MESSAGE_V4 4
171
172 struct gdma_general_resp {
173 struct gdma_resp_hdr hdr;
174 }; /* HW DATA */
175
176 #define GDMA_STANDARD_HEADER_TYPE 0
177
mana_gd_init_req_hdr(struct gdma_req_hdr * hdr,u32 code,u32 req_size,u32 resp_size)178 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
179 u32 req_size, u32 resp_size)
180 {
181 hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
182 hdr->req.msg_type = code;
183 hdr->req.msg_version = GDMA_MESSAGE_V1;
184 hdr->req.msg_size = req_size;
185
186 hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
187 hdr->resp.msg_type = code;
188 hdr->resp.msg_version = GDMA_MESSAGE_V1;
189 hdr->resp.msg_size = resp_size;
190 }
191
192 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
193 struct gdma_sge {
194 u64 address;
195 u32 mem_key;
196 u32 size;
197 }; /* HW DATA */
198
199 struct gdma_wqe_request {
200 struct gdma_sge *sgl;
201 u32 num_sge;
202
203 u32 inline_oob_size;
204 const void *inline_oob_data;
205
206 u32 flags;
207 u32 client_data_unit;
208 };
209
210 enum gdma_page_type {
211 GDMA_PAGE_TYPE_4K,
212 };
213
214 #define GDMA_INVALID_DMA_REGION 0
215
216 struct gdma_mem_info {
217 struct device *dev;
218
219 dma_addr_t dma_handle;
220 void *virt_addr;
221 u64 length;
222
223 /* Allocated by the PF driver */
224 u64 dma_region_handle;
225 };
226
227 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
228
229 struct gdma_dev {
230 struct gdma_context *gdma_context;
231
232 struct gdma_dev_id dev_id;
233
234 u32 pdid;
235 u32 doorbell;
236 u32 gpa_mkey;
237
238 /* GDMA driver specific pointer */
239 void *driver_data;
240
241 struct auxiliary_device *adev;
242 bool is_suspended;
243 bool rdma_teardown;
244 };
245
246 /* MANA_PAGE_SIZE is the DMA unit */
247 #define MANA_PAGE_SHIFT 12
248 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
249 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
250 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
251 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
252
253 /* Required by HW */
254 #define MANA_MIN_QSIZE MANA_PAGE_SIZE
255
256 #define GDMA_CQE_SIZE 64
257 #define GDMA_EQE_SIZE 16
258 #define GDMA_MAX_SQE_SIZE 512
259 #define GDMA_MAX_RQE_SIZE 256
260
261 #define GDMA_COMP_DATA_SIZE 0x3C
262
263 #define GDMA_EVENT_DATA_SIZE 0xC
264
265 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
266 #define GDMA_WQE_BU_SIZE 32
267
268 #define INVALID_PDID UINT_MAX
269 #define INVALID_DOORBELL UINT_MAX
270 #define INVALID_MEM_KEY UINT_MAX
271 #define INVALID_QUEUE_ID UINT_MAX
272 #define INVALID_PCI_MSIX_INDEX UINT_MAX
273
274 struct gdma_comp {
275 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
276 u32 wq_num;
277 bool is_sq;
278 };
279
280 struct gdma_event {
281 u32 details[GDMA_EVENT_DATA_SIZE / 4];
282 u8 type;
283 };
284
285 struct gdma_queue;
286
287 struct mana_eq {
288 struct gdma_queue *eq;
289 struct dentry *mana_eq_debugfs;
290 };
291
292 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
293 struct gdma_event *e);
294
295 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
296
297 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
298 * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
299 * driver increases the 'head' in BUs rather than in bytes, and notifies
300 * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
301 * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
302 *
303 * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
304 * processed, the driver increases the 'tail' to indicate that WQEs have
305 * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
306 *
307 * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
308 * that the EQ/CQ is big enough so they can't overflow, and the driver uses
309 * the owner bits mechanism to detect if the queue has become empty.
310 */
311 struct gdma_queue {
312 struct gdma_dev *gdma_dev;
313
314 enum gdma_queue_type type;
315 u32 id;
316
317 struct gdma_mem_info mem_info;
318
319 void *queue_mem_ptr;
320 u32 queue_size;
321
322 bool monitor_avl_buf;
323
324 u32 head;
325 u32 tail;
326 struct list_head entry;
327
328 /* Extra fields specific to EQ/CQ. */
329 union {
330 struct {
331 bool disable_needed;
332
333 gdma_eq_callback *callback;
334 void *context;
335
336 unsigned int msix_index;
337
338 u32 log2_throttle_limit;
339 } eq;
340
341 struct {
342 gdma_cq_callback *callback;
343 void *context;
344
345 struct gdma_queue *parent; /* For CQ/EQ relationship */
346 } cq;
347 };
348 };
349
350 struct gdma_queue_spec {
351 enum gdma_queue_type type;
352 bool monitor_avl_buf;
353 unsigned int queue_size;
354
355 /* Extra fields specific to EQ/CQ. */
356 union {
357 struct {
358 gdma_eq_callback *callback;
359 void *context;
360
361 unsigned long log2_throttle_limit;
362 unsigned int msix_index;
363 } eq;
364
365 struct {
366 gdma_cq_callback *callback;
367 void *context;
368
369 struct gdma_queue *parent_eq;
370
371 } cq;
372 };
373 };
374
375 #define MANA_IRQ_NAME_SZ 32
376
377 struct gdma_irq_context {
378 void (*handler)(void *arg);
379 /* Protect the eq_list */
380 spinlock_t lock;
381 struct list_head eq_list;
382 char name[MANA_IRQ_NAME_SZ];
383 };
384
385 struct gdma_context {
386 struct device *dev;
387 struct dentry *mana_pci_debugfs;
388
389 /* Per-vPort max number of queues */
390 unsigned int max_num_queues;
391 unsigned int max_num_msix;
392 unsigned int num_msix_usable;
393 struct xarray irq_contexts;
394
395 /* L2 MTU */
396 u16 adapter_mtu;
397
398 /* This maps a CQ index to the queue structure. */
399 unsigned int max_num_cqs;
400 struct gdma_queue **cq_table;
401
402 /* Protect eq_test_event and test_event_eq_id */
403 struct mutex eq_test_event_mutex;
404 struct completion eq_test_event;
405 u32 test_event_eq_id;
406
407 bool is_pf;
408 bool in_service;
409
410 phys_addr_t bar0_pa;
411 void __iomem *bar0_va;
412 void __iomem *shm_base;
413 void __iomem *db_page_base;
414 phys_addr_t phys_db_page_base;
415 u32 db_page_size;
416 int numa_node;
417
418 /* Shared memory chanenl (used to bootstrap HWC) */
419 struct shm_channel shm_channel;
420
421 /* Hardware communication channel (HWC) */
422 struct gdma_dev hwc;
423
424 /* Azure network adapter */
425 struct gdma_dev mana;
426
427 /* Azure RDMA adapter */
428 struct gdma_dev mana_ib;
429
430 u64 pf_cap_flags1;
431
432 struct workqueue_struct *service_wq;
433 };
434
mana_gd_is_mana(struct gdma_dev * gd)435 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
436 {
437 return gd->dev_id.type == GDMA_DEVICE_MANA;
438 }
439
mana_gd_is_hwc(struct gdma_dev * gd)440 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
441 {
442 return gd->dev_id.type == GDMA_DEVICE_HWC;
443 }
444
445 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
446 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
447
448 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
449
450 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
451 const struct gdma_queue_spec *spec,
452 struct gdma_queue **queue_ptr);
453
454 int mana_gd_create_mana_eq(struct gdma_dev *gd,
455 const struct gdma_queue_spec *spec,
456 struct gdma_queue **queue_ptr);
457
458 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
459 const struct gdma_queue_spec *spec,
460 struct gdma_queue **queue_ptr);
461
462 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
463
464 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
465
466 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
467
468 struct gdma_wqe {
469 u32 reserved :24;
470 u32 last_vbytes :8;
471
472 union {
473 u32 flags;
474
475 struct {
476 u32 num_sge :8;
477 u32 inline_oob_size_div4:3;
478 u32 client_oob_in_sgl :1;
479 u32 reserved1 :4;
480 u32 client_data_unit :14;
481 u32 reserved2 :2;
482 };
483 };
484 }; /* HW DATA */
485
486 #define INLINE_OOB_SMALL_SIZE 8
487 #define INLINE_OOB_LARGE_SIZE 24
488
489 #define MAX_TX_WQE_SIZE 512
490 #define MAX_RX_WQE_SIZE 256
491
492 #define MAX_TX_WQE_SGL_ENTRIES ((GDMA_MAX_SQE_SIZE - \
493 sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
494 sizeof(struct gdma_sge))
495
496 #define MAX_RX_WQE_SGL_ENTRIES ((GDMA_MAX_RQE_SIZE - \
497 sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
498
499 struct gdma_cqe {
500 u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
501
502 union {
503 u32 as_uint32;
504
505 struct {
506 u32 wq_num : 24;
507 u32 is_sq : 1;
508 u32 reserved : 4;
509 u32 owner_bits : 3;
510 };
511 } cqe_info;
512 }; /* HW DATA */
513
514 #define GDMA_CQE_OWNER_BITS 3
515
516 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
517
518 #define SET_ARM_BIT 1
519
520 #define GDMA_EQE_OWNER_BITS 3
521
522 union gdma_eqe_info {
523 u32 as_uint32;
524
525 struct {
526 u32 type : 8;
527 u32 reserved1 : 8;
528 u32 client_id : 2;
529 u32 reserved2 : 11;
530 u32 owner_bits : 3;
531 };
532 }; /* HW DATA */
533
534 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
535 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
536
537 struct gdma_eqe {
538 u32 details[GDMA_EVENT_DATA_SIZE / 4];
539 u32 eqe_info;
540 }; /* HW DATA */
541
542 #define GDMA_REG_DB_PAGE_OFFSET 8
543 #define GDMA_REG_DB_PAGE_SIZE 0x10
544 #define GDMA_REG_SHM_OFFSET 0x18
545
546 #define GDMA_PF_REG_DB_PAGE_SIZE 0xD0
547 #define GDMA_PF_REG_DB_PAGE_OFF 0xC8
548 #define GDMA_PF_REG_SHM_OFF 0x70
549
550 #define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108
551
552 #define MANA_PF_DEVICE_ID 0x00B9
553 #define MANA_VF_DEVICE_ID 0x00BA
554
555 struct gdma_posted_wqe_info {
556 u32 wqe_size_in_bu;
557 };
558
559 /* GDMA_GENERATE_TEST_EQE */
560 struct gdma_generate_test_event_req {
561 struct gdma_req_hdr hdr;
562 u32 queue_index;
563 }; /* HW DATA */
564
565 /* GDMA_VERIFY_VF_DRIVER_VERSION */
566 enum {
567 GDMA_PROTOCOL_V1 = 1,
568 GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1,
569 GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1,
570 };
571
572 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
573
574 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
575 * so the driver is able to reliably support features like busy_poll.
576 */
577 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
578 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
579 #define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
580 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
581
582 /* Driver can handle holes (zeros) in the device list */
583 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
584
585 /* Driver supports dynamic MSI-X vector allocation */
586 #define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
587
588 /* Driver can self reset on EQE notification */
589 #define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
590
591 /* Driver can self reset on FPGA Reconfig EQE notification */
592 #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
593
594 #define GDMA_DRV_CAP_FLAGS1 \
595 (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
596 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
597 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
598 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
599 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
600 GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
601 GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
602 GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE)
603
604 #define GDMA_DRV_CAP_FLAGS2 0
605
606 #define GDMA_DRV_CAP_FLAGS3 0
607
608 #define GDMA_DRV_CAP_FLAGS4 0
609
610 struct gdma_verify_ver_req {
611 struct gdma_req_hdr hdr;
612
613 /* Mandatory fields required for protocol establishment */
614 u64 protocol_ver_min;
615 u64 protocol_ver_max;
616
617 /* Gdma Driver Capability Flags */
618 u64 gd_drv_cap_flags1;
619 u64 gd_drv_cap_flags2;
620 u64 gd_drv_cap_flags3;
621 u64 gd_drv_cap_flags4;
622
623 /* Advisory fields */
624 u64 drv_ver;
625 u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
626 u32 reserved;
627 u32 os_ver_major;
628 u32 os_ver_minor;
629 u32 os_ver_build;
630 u32 os_ver_platform;
631 u64 reserved_2;
632 u8 os_ver_str1[128];
633 u8 os_ver_str2[128];
634 u8 os_ver_str3[128];
635 u8 os_ver_str4[128];
636 }; /* HW DATA */
637
638 struct gdma_verify_ver_resp {
639 struct gdma_resp_hdr hdr;
640 u64 gdma_protocol_ver;
641 u64 pf_cap_flags1;
642 u64 pf_cap_flags2;
643 u64 pf_cap_flags3;
644 u64 pf_cap_flags4;
645 }; /* HW DATA */
646
647 /* GDMA_QUERY_MAX_RESOURCES */
648 struct gdma_query_max_resources_resp {
649 struct gdma_resp_hdr hdr;
650 u32 status;
651 u32 max_sq;
652 u32 max_rq;
653 u32 max_cq;
654 u32 max_eq;
655 u32 max_db;
656 u32 max_mst;
657 u32 max_cq_mod_ctx;
658 u32 max_mod_cq;
659 u32 max_msix;
660 }; /* HW DATA */
661
662 /* GDMA_LIST_DEVICES */
663 #define GDMA_DEV_LIST_SIZE 64
664 struct gdma_list_devices_resp {
665 struct gdma_resp_hdr hdr;
666 u32 num_of_devs;
667 u32 reserved;
668 struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
669 }; /* HW DATA */
670
671 /* GDMA_REGISTER_DEVICE */
672 struct gdma_register_device_resp {
673 struct gdma_resp_hdr hdr;
674 u32 pdid;
675 u32 gpa_mkey;
676 u32 db_id;
677 }; /* HW DATA */
678
679 struct gdma_allocate_resource_range_req {
680 struct gdma_req_hdr hdr;
681 u32 resource_type;
682 u32 num_resources;
683 u32 alignment;
684 u32 allocated_resources;
685 };
686
687 struct gdma_allocate_resource_range_resp {
688 struct gdma_resp_hdr hdr;
689 u32 allocated_resources;
690 };
691
692 struct gdma_destroy_resource_range_req {
693 struct gdma_req_hdr hdr;
694 u32 resource_type;
695 u32 num_resources;
696 u32 allocated_resources;
697 };
698
699 /* GDMA_CREATE_QUEUE */
700 struct gdma_create_queue_req {
701 struct gdma_req_hdr hdr;
702 u32 type;
703 u32 reserved1;
704 u32 pdid;
705 u32 doolbell_id;
706 u64 gdma_region;
707 u32 reserved2;
708 u32 queue_size;
709 u32 log2_throttle_limit;
710 u32 eq_pci_msix_index;
711 u32 cq_mod_ctx_id;
712 u32 cq_parent_eq_id;
713 u8 rq_drop_on_overrun;
714 u8 rq_err_on_wqe_overflow;
715 u8 rq_chain_rec_wqes;
716 u8 sq_hw_db;
717 u32 reserved3;
718 }; /* HW DATA */
719
720 struct gdma_create_queue_resp {
721 struct gdma_resp_hdr hdr;
722 u32 queue_index;
723 }; /* HW DATA */
724
725 /* GDMA_DISABLE_QUEUE */
726 struct gdma_disable_queue_req {
727 struct gdma_req_hdr hdr;
728 u32 type;
729 u32 queue_index;
730 u32 alloc_res_id_on_creation;
731 }; /* HW DATA */
732
733 /* GDMA_QUERY_HWC_TIMEOUT */
734 struct gdma_query_hwc_timeout_req {
735 struct gdma_req_hdr hdr;
736 u32 timeout_ms;
737 u32 reserved;
738 };
739
740 struct gdma_query_hwc_timeout_resp {
741 struct gdma_resp_hdr hdr;
742 u32 timeout_ms;
743 u32 reserved;
744 };
745
746 enum gdma_mr_access_flags {
747 GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
748 GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
749 GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
750 GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
751 GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
752 };
753
754 /* GDMA_CREATE_DMA_REGION */
755 struct gdma_create_dma_region_req {
756 struct gdma_req_hdr hdr;
757
758 /* The total size of the DMA region */
759 u64 length;
760
761 /* The offset in the first page */
762 u32 offset_in_page;
763
764 /* enum gdma_page_type */
765 u32 gdma_page_type;
766
767 /* The total number of pages */
768 u32 page_count;
769
770 /* If page_addr_list_len is smaller than page_count,
771 * the remaining page addresses will be added via the
772 * message GDMA_DMA_REGION_ADD_PAGES.
773 */
774 u32 page_addr_list_len;
775 u64 page_addr_list[];
776 }; /* HW DATA */
777
778 struct gdma_create_dma_region_resp {
779 struct gdma_resp_hdr hdr;
780 u64 dma_region_handle;
781 }; /* HW DATA */
782
783 /* GDMA_DMA_REGION_ADD_PAGES */
784 struct gdma_dma_region_add_pages_req {
785 struct gdma_req_hdr hdr;
786
787 u64 dma_region_handle;
788
789 u32 page_addr_list_len;
790 u32 reserved3;
791
792 u64 page_addr_list[];
793 }; /* HW DATA */
794
795 /* GDMA_DESTROY_DMA_REGION */
796 struct gdma_destroy_dma_region_req {
797 struct gdma_req_hdr hdr;
798
799 u64 dma_region_handle;
800 }; /* HW DATA */
801
802 enum gdma_pd_flags {
803 GDMA_PD_FLAG_INVALID = 0,
804 GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
805 };
806
807 struct gdma_create_pd_req {
808 struct gdma_req_hdr hdr;
809 enum gdma_pd_flags flags;
810 u32 reserved;
811 };/* HW DATA */
812
813 struct gdma_create_pd_resp {
814 struct gdma_resp_hdr hdr;
815 u64 pd_handle;
816 u32 pd_id;
817 u32 reserved;
818 };/* HW DATA */
819
820 struct gdma_destroy_pd_req {
821 struct gdma_req_hdr hdr;
822 u64 pd_handle;
823 };/* HW DATA */
824
825 struct gdma_destory_pd_resp {
826 struct gdma_resp_hdr hdr;
827 };/* HW DATA */
828
829 enum gdma_mr_type {
830 /*
831 * Guest Physical Address - MRs of this type allow access
832 * to any DMA-mapped memory using bus-logical address
833 */
834 GDMA_MR_TYPE_GPA = 1,
835 /* Guest Virtual Address - MRs of this type allow access
836 * to memory mapped by PTEs associated with this MR using a virtual
837 * address that is set up in the MST
838 */
839 GDMA_MR_TYPE_GVA = 2,
840 /* Guest zero-based address MRs */
841 GDMA_MR_TYPE_ZBVA = 4,
842 };
843
844 struct gdma_create_mr_params {
845 u64 pd_handle;
846 enum gdma_mr_type mr_type;
847 union {
848 struct {
849 u64 dma_region_handle;
850 u64 virtual_address;
851 enum gdma_mr_access_flags access_flags;
852 } gva;
853 struct {
854 u64 dma_region_handle;
855 enum gdma_mr_access_flags access_flags;
856 } zbva;
857 };
858 };
859
860 struct gdma_create_mr_request {
861 struct gdma_req_hdr hdr;
862 u64 pd_handle;
863 enum gdma_mr_type mr_type;
864 u32 reserved_1;
865
866 union {
867 struct {
868 u64 dma_region_handle;
869 u64 virtual_address;
870 enum gdma_mr_access_flags access_flags;
871 } gva;
872 struct {
873 u64 dma_region_handle;
874 enum gdma_mr_access_flags access_flags;
875 } zbva;
876 };
877 u32 reserved_2;
878 };/* HW DATA */
879
880 struct gdma_create_mr_response {
881 struct gdma_resp_hdr hdr;
882 u64 mr_handle;
883 u32 lkey;
884 u32 rkey;
885 };/* HW DATA */
886
887 struct gdma_destroy_mr_request {
888 struct gdma_req_hdr hdr;
889 u64 mr_handle;
890 };/* HW DATA */
891
892 struct gdma_destroy_mr_response {
893 struct gdma_resp_hdr hdr;
894 };/* HW DATA */
895
896 int mana_gd_verify_vf_version(struct pci_dev *pdev);
897
898 int mana_gd_register_device(struct gdma_dev *gd);
899 int mana_gd_deregister_device(struct gdma_dev *gd);
900
901 int mana_gd_post_work_request(struct gdma_queue *wq,
902 const struct gdma_wqe_request *wqe_req,
903 struct gdma_posted_wqe_info *wqe_info);
904
905 int mana_gd_post_and_ring(struct gdma_queue *queue,
906 const struct gdma_wqe_request *wqe,
907 struct gdma_posted_wqe_info *wqe_info);
908
909 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
910 void mana_gd_free_res_map(struct gdma_resource *r);
911
912 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
913 struct gdma_queue *queue);
914
915 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
916 struct gdma_mem_info *gmi);
917
918 void mana_gd_free_memory(struct gdma_mem_info *gmi);
919
920 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
921 u32 resp_len, void *resp);
922
923 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
924 void mana_register_debugfs(void);
925 void mana_unregister_debugfs(void);
926
927 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
928
929 int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
930 int mana_gd_resume(struct pci_dev *pdev);
931
932 bool mana_need_log(struct gdma_context *gc, int err);
933
934 #endif /* _GDMA_H */
935