xref: /linux/include/net/mana/gdma.h (revision cff9c565e65f3622e8dc1dcc21c1520a083dff35)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _GDMA_H
5 #define _GDMA_H
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9 
10 #include "shm_channel.h"
11 
12 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
13 
14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
15  * them are naturally aligned and hence don't need __packed.
16  */
17 
18 enum gdma_request_type {
19 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
20 	GDMA_QUERY_MAX_RESOURCES	= 2,
21 	GDMA_LIST_DEVICES		= 3,
22 	GDMA_REGISTER_DEVICE		= 4,
23 	GDMA_DEREGISTER_DEVICE		= 5,
24 	GDMA_GENERATE_TEST_EQE		= 10,
25 	GDMA_CREATE_QUEUE		= 12,
26 	GDMA_DISABLE_QUEUE		= 13,
27 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
28 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
29 	GDMA_CREATE_DMA_REGION		= 25,
30 	GDMA_DMA_REGION_ADD_PAGES	= 26,
31 	GDMA_DESTROY_DMA_REGION		= 27,
32 	GDMA_CREATE_PD			= 29,
33 	GDMA_DESTROY_PD			= 30,
34 	GDMA_CREATE_MR			= 31,
35 	GDMA_DESTROY_MR			= 32,
36 	GDMA_QUERY_HWC_TIMEOUT		= 84, /* 0x54 */
37 };
38 
39 #define GDMA_RESOURCE_DOORBELL_PAGE	27
40 
41 enum gdma_queue_type {
42 	GDMA_INVALID_QUEUE,
43 	GDMA_SQ,
44 	GDMA_RQ,
45 	GDMA_CQ,
46 	GDMA_EQ,
47 };
48 
49 enum gdma_work_request_flags {
50 	GDMA_WR_NONE			= 0,
51 	GDMA_WR_OOB_IN_SGL		= BIT(0),
52 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
53 };
54 
55 enum gdma_eqe_type {
56 	GDMA_EQE_COMPLETION		= 3,
57 	GDMA_EQE_TEST_EVENT		= 64,
58 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
59 	GDMA_EQE_HWC_INIT_DATA		= 130,
60 	GDMA_EQE_HWC_INIT_DONE		= 131,
61 	GDMA_EQE_HWC_SOC_RECONFIG	= 132,
62 	GDMA_EQE_HWC_SOC_RECONFIG_DATA	= 133,
63 };
64 
65 enum {
66 	GDMA_DEVICE_NONE	= 0,
67 	GDMA_DEVICE_HWC		= 1,
68 	GDMA_DEVICE_MANA	= 2,
69 };
70 
71 struct gdma_resource {
72 	/* Protect the bitmap */
73 	spinlock_t lock;
74 
75 	/* The bitmap size in bits. */
76 	u32 size;
77 
78 	/* The bitmap tracks the resources. */
79 	unsigned long *map;
80 };
81 
82 union gdma_doorbell_entry {
83 	u64	as_uint64;
84 
85 	struct {
86 		u64 id		: 24;
87 		u64 reserved	: 8;
88 		u64 tail_ptr	: 31;
89 		u64 arm		: 1;
90 	} cq;
91 
92 	struct {
93 		u64 id		: 24;
94 		u64 wqe_cnt	: 8;
95 		u64 tail_ptr	: 32;
96 	} rq;
97 
98 	struct {
99 		u64 id		: 24;
100 		u64 reserved	: 8;
101 		u64 tail_ptr	: 32;
102 	} sq;
103 
104 	struct {
105 		u64 id		: 16;
106 		u64 reserved	: 16;
107 		u64 tail_ptr	: 31;
108 		u64 arm		: 1;
109 	} eq;
110 }; /* HW DATA */
111 
112 struct gdma_msg_hdr {
113 	u32 hdr_type;
114 	u32 msg_type;
115 	u16 msg_version;
116 	u16 hwc_msg_id;
117 	u32 msg_size;
118 }; /* HW DATA */
119 
120 struct gdma_dev_id {
121 	union {
122 		struct {
123 			u16 type;
124 			u16 instance;
125 		};
126 
127 		u32 as_uint32;
128 	};
129 }; /* HW DATA */
130 
131 struct gdma_req_hdr {
132 	struct gdma_msg_hdr req;
133 	struct gdma_msg_hdr resp; /* The expected response */
134 	struct gdma_dev_id dev_id;
135 	u32 activity_id;
136 }; /* HW DATA */
137 
138 struct gdma_resp_hdr {
139 	struct gdma_msg_hdr response;
140 	struct gdma_dev_id dev_id;
141 	u32 activity_id;
142 	u32 status;
143 	u32 reserved;
144 }; /* HW DATA */
145 
146 struct gdma_general_req {
147 	struct gdma_req_hdr hdr;
148 }; /* HW DATA */
149 
150 #define GDMA_MESSAGE_V1 1
151 #define GDMA_MESSAGE_V2 2
152 
153 struct gdma_general_resp {
154 	struct gdma_resp_hdr hdr;
155 }; /* HW DATA */
156 
157 #define GDMA_STANDARD_HEADER_TYPE 0
158 
159 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
160 					u32 req_size, u32 resp_size)
161 {
162 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
163 	hdr->req.msg_type = code;
164 	hdr->req.msg_version = GDMA_MESSAGE_V1;
165 	hdr->req.msg_size = req_size;
166 
167 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
168 	hdr->resp.msg_type = code;
169 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
170 	hdr->resp.msg_size = resp_size;
171 }
172 
173 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
174 struct gdma_sge {
175 	u64 address;
176 	u32 mem_key;
177 	u32 size;
178 }; /* HW DATA */
179 
180 struct gdma_wqe_request {
181 	struct gdma_sge *sgl;
182 	u32 num_sge;
183 
184 	u32 inline_oob_size;
185 	const void *inline_oob_data;
186 
187 	u32 flags;
188 	u32 client_data_unit;
189 };
190 
191 enum gdma_page_type {
192 	GDMA_PAGE_TYPE_4K,
193 };
194 
195 #define GDMA_INVALID_DMA_REGION 0
196 
197 struct gdma_mem_info {
198 	struct device *dev;
199 
200 	dma_addr_t dma_handle;
201 	void *virt_addr;
202 	u64 length;
203 
204 	/* Allocated by the PF driver */
205 	u64 dma_region_handle;
206 };
207 
208 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
209 
210 struct gdma_dev {
211 	struct gdma_context *gdma_context;
212 
213 	struct gdma_dev_id dev_id;
214 
215 	u32 pdid;
216 	u32 doorbell;
217 	u32 gpa_mkey;
218 
219 	/* GDMA driver specific pointer */
220 	void *driver_data;
221 
222 	struct auxiliary_device *adev;
223 };
224 
225 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
226 
227 #define GDMA_CQE_SIZE 64
228 #define GDMA_EQE_SIZE 16
229 #define GDMA_MAX_SQE_SIZE 512
230 #define GDMA_MAX_RQE_SIZE 256
231 
232 #define GDMA_COMP_DATA_SIZE 0x3C
233 
234 #define GDMA_EVENT_DATA_SIZE 0xC
235 
236 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
237 #define GDMA_WQE_BU_SIZE 32
238 
239 #define INVALID_PDID		UINT_MAX
240 #define INVALID_DOORBELL	UINT_MAX
241 #define INVALID_MEM_KEY		UINT_MAX
242 #define INVALID_QUEUE_ID	UINT_MAX
243 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
244 
245 struct gdma_comp {
246 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
247 	u32 wq_num;
248 	bool is_sq;
249 };
250 
251 struct gdma_event {
252 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
253 	u8  type;
254 };
255 
256 struct gdma_queue;
257 
258 struct mana_eq {
259 	struct gdma_queue *eq;
260 };
261 
262 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
263 			      struct gdma_event *e);
264 
265 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
266 
267 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
268  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
269  * driver increases the 'head' in BUs rather than in bytes, and notifies
270  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
271  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
272  *
273  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
274  * processed, the driver increases the 'tail' to indicate that WQEs have
275  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
276  *
277  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
278  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
279  * the owner bits mechanism to detect if the queue has become empty.
280  */
281 struct gdma_queue {
282 	struct gdma_dev *gdma_dev;
283 
284 	enum gdma_queue_type type;
285 	u32 id;
286 
287 	struct gdma_mem_info mem_info;
288 
289 	void *queue_mem_ptr;
290 	u32 queue_size;
291 
292 	bool monitor_avl_buf;
293 
294 	u32 head;
295 	u32 tail;
296 	struct list_head entry;
297 
298 	/* Extra fields specific to EQ/CQ. */
299 	union {
300 		struct {
301 			bool disable_needed;
302 
303 			gdma_eq_callback *callback;
304 			void *context;
305 
306 			unsigned int msix_index;
307 
308 			u32 log2_throttle_limit;
309 		} eq;
310 
311 		struct {
312 			gdma_cq_callback *callback;
313 			void *context;
314 
315 			struct gdma_queue *parent; /* For CQ/EQ relationship */
316 		} cq;
317 	};
318 };
319 
320 struct gdma_queue_spec {
321 	enum gdma_queue_type type;
322 	bool monitor_avl_buf;
323 	unsigned int queue_size;
324 
325 	/* Extra fields specific to EQ/CQ. */
326 	union {
327 		struct {
328 			gdma_eq_callback *callback;
329 			void *context;
330 
331 			unsigned long log2_throttle_limit;
332 			unsigned int msix_index;
333 		} eq;
334 
335 		struct {
336 			gdma_cq_callback *callback;
337 			void *context;
338 
339 			struct gdma_queue *parent_eq;
340 
341 		} cq;
342 	};
343 };
344 
345 #define MANA_IRQ_NAME_SZ 32
346 
347 struct gdma_irq_context {
348 	void (*handler)(void *arg);
349 	/* Protect the eq_list */
350 	spinlock_t lock;
351 	struct list_head eq_list;
352 	char name[MANA_IRQ_NAME_SZ];
353 };
354 
355 struct gdma_context {
356 	struct device		*dev;
357 
358 	/* Per-vPort max number of queues */
359 	unsigned int		max_num_queues;
360 	unsigned int		max_num_msix;
361 	unsigned int		num_msix_usable;
362 	struct gdma_irq_context	*irq_contexts;
363 
364 	/* L2 MTU */
365 	u16 adapter_mtu;
366 
367 	/* This maps a CQ index to the queue structure. */
368 	unsigned int		max_num_cqs;
369 	struct gdma_queue	**cq_table;
370 
371 	/* Protect eq_test_event and test_event_eq_id  */
372 	struct mutex		eq_test_event_mutex;
373 	struct completion	eq_test_event;
374 	u32			test_event_eq_id;
375 
376 	bool			is_pf;
377 	phys_addr_t		bar0_pa;
378 	void __iomem		*bar0_va;
379 	void __iomem		*shm_base;
380 	void __iomem		*db_page_base;
381 	phys_addr_t		phys_db_page_base;
382 	u32 db_page_size;
383 	int                     numa_node;
384 
385 	/* Shared memory chanenl (used to bootstrap HWC) */
386 	struct shm_channel	shm_channel;
387 
388 	/* Hardware communication channel (HWC) */
389 	struct gdma_dev		hwc;
390 
391 	/* Azure network adapter */
392 	struct gdma_dev		mana;
393 };
394 
395 #define MAX_NUM_GDMA_DEVICES	4
396 
397 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
398 {
399 	return gd->dev_id.type == GDMA_DEVICE_MANA;
400 }
401 
402 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
403 {
404 	return gd->dev_id.type == GDMA_DEVICE_HWC;
405 }
406 
407 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
408 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
409 
410 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
411 
412 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
413 			     const struct gdma_queue_spec *spec,
414 			     struct gdma_queue **queue_ptr);
415 
416 int mana_gd_create_mana_eq(struct gdma_dev *gd,
417 			   const struct gdma_queue_spec *spec,
418 			   struct gdma_queue **queue_ptr);
419 
420 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
421 			      const struct gdma_queue_spec *spec,
422 			      struct gdma_queue **queue_ptr);
423 
424 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
425 
426 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
427 
428 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
429 
430 struct gdma_wqe {
431 	u32 reserved	:24;
432 	u32 last_vbytes	:8;
433 
434 	union {
435 		u32 flags;
436 
437 		struct {
438 			u32 num_sge		:8;
439 			u32 inline_oob_size_div4:3;
440 			u32 client_oob_in_sgl	:1;
441 			u32 reserved1		:4;
442 			u32 client_data_unit	:14;
443 			u32 reserved2		:2;
444 		};
445 	};
446 }; /* HW DATA */
447 
448 #define INLINE_OOB_SMALL_SIZE 8
449 #define INLINE_OOB_LARGE_SIZE 24
450 
451 #define MAX_TX_WQE_SIZE 512
452 #define MAX_RX_WQE_SIZE 256
453 
454 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
455 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
456 			sizeof(struct gdma_sge))
457 
458 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
459 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
460 
461 struct gdma_cqe {
462 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
463 
464 	union {
465 		u32 as_uint32;
466 
467 		struct {
468 			u32 wq_num	: 24;
469 			u32 is_sq	: 1;
470 			u32 reserved	: 4;
471 			u32 owner_bits	: 3;
472 		};
473 	} cqe_info;
474 }; /* HW DATA */
475 
476 #define GDMA_CQE_OWNER_BITS 3
477 
478 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
479 
480 #define SET_ARM_BIT 1
481 
482 #define GDMA_EQE_OWNER_BITS 3
483 
484 union gdma_eqe_info {
485 	u32 as_uint32;
486 
487 	struct {
488 		u32 type	: 8;
489 		u32 reserved1	: 8;
490 		u32 client_id	: 2;
491 		u32 reserved2	: 11;
492 		u32 owner_bits	: 3;
493 	};
494 }; /* HW DATA */
495 
496 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
497 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
498 
499 struct gdma_eqe {
500 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
501 	u32 eqe_info;
502 }; /* HW DATA */
503 
504 #define GDMA_REG_DB_PAGE_OFFSET	8
505 #define GDMA_REG_DB_PAGE_SIZE	0x10
506 #define GDMA_REG_SHM_OFFSET	0x18
507 
508 #define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
509 #define GDMA_PF_REG_DB_PAGE_OFF		0xC8
510 #define GDMA_PF_REG_SHM_OFF		0x70
511 
512 #define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108
513 
514 #define MANA_PF_DEVICE_ID 0x00B9
515 #define MANA_VF_DEVICE_ID 0x00BA
516 
517 struct gdma_posted_wqe_info {
518 	u32 wqe_size_in_bu;
519 };
520 
521 /* GDMA_GENERATE_TEST_EQE */
522 struct gdma_generate_test_event_req {
523 	struct gdma_req_hdr hdr;
524 	u32 queue_index;
525 }; /* HW DATA */
526 
527 /* GDMA_VERIFY_VF_DRIVER_VERSION */
528 enum {
529 	GDMA_PROTOCOL_V1	= 1,
530 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
531 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
532 };
533 
534 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
535 
536 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
537  * so the driver is able to reliably support features like busy_poll.
538  */
539 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
540 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
541 
542 #define GDMA_DRV_CAP_FLAGS1 \
543 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
544 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
545 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG)
546 
547 #define GDMA_DRV_CAP_FLAGS2 0
548 
549 #define GDMA_DRV_CAP_FLAGS3 0
550 
551 #define GDMA_DRV_CAP_FLAGS4 0
552 
553 struct gdma_verify_ver_req {
554 	struct gdma_req_hdr hdr;
555 
556 	/* Mandatory fields required for protocol establishment */
557 	u64 protocol_ver_min;
558 	u64 protocol_ver_max;
559 
560 	/* Gdma Driver Capability Flags */
561 	u64 gd_drv_cap_flags1;
562 	u64 gd_drv_cap_flags2;
563 	u64 gd_drv_cap_flags3;
564 	u64 gd_drv_cap_flags4;
565 
566 	/* Advisory fields */
567 	u64 drv_ver;
568 	u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
569 	u32 reserved;
570 	u32 os_ver_major;
571 	u32 os_ver_minor;
572 	u32 os_ver_build;
573 	u32 os_ver_platform;
574 	u64 reserved_2;
575 	u8 os_ver_str1[128];
576 	u8 os_ver_str2[128];
577 	u8 os_ver_str3[128];
578 	u8 os_ver_str4[128];
579 }; /* HW DATA */
580 
581 struct gdma_verify_ver_resp {
582 	struct gdma_resp_hdr hdr;
583 	u64 gdma_protocol_ver;
584 	u64 pf_cap_flags1;
585 	u64 pf_cap_flags2;
586 	u64 pf_cap_flags3;
587 	u64 pf_cap_flags4;
588 }; /* HW DATA */
589 
590 /* GDMA_QUERY_MAX_RESOURCES */
591 struct gdma_query_max_resources_resp {
592 	struct gdma_resp_hdr hdr;
593 	u32 status;
594 	u32 max_sq;
595 	u32 max_rq;
596 	u32 max_cq;
597 	u32 max_eq;
598 	u32 max_db;
599 	u32 max_mst;
600 	u32 max_cq_mod_ctx;
601 	u32 max_mod_cq;
602 	u32 max_msix;
603 }; /* HW DATA */
604 
605 /* GDMA_LIST_DEVICES */
606 struct gdma_list_devices_resp {
607 	struct gdma_resp_hdr hdr;
608 	u32 num_of_devs;
609 	u32 reserved;
610 	struct gdma_dev_id devs[64];
611 }; /* HW DATA */
612 
613 /* GDMA_REGISTER_DEVICE */
614 struct gdma_register_device_resp {
615 	struct gdma_resp_hdr hdr;
616 	u32 pdid;
617 	u32 gpa_mkey;
618 	u32 db_id;
619 }; /* HW DATA */
620 
621 struct gdma_allocate_resource_range_req {
622 	struct gdma_req_hdr hdr;
623 	u32 resource_type;
624 	u32 num_resources;
625 	u32 alignment;
626 	u32 allocated_resources;
627 };
628 
629 struct gdma_allocate_resource_range_resp {
630 	struct gdma_resp_hdr hdr;
631 	u32 allocated_resources;
632 };
633 
634 struct gdma_destroy_resource_range_req {
635 	struct gdma_req_hdr hdr;
636 	u32 resource_type;
637 	u32 num_resources;
638 	u32 allocated_resources;
639 };
640 
641 /* GDMA_CREATE_QUEUE */
642 struct gdma_create_queue_req {
643 	struct gdma_req_hdr hdr;
644 	u32 type;
645 	u32 reserved1;
646 	u32 pdid;
647 	u32 doolbell_id;
648 	u64 gdma_region;
649 	u32 reserved2;
650 	u32 queue_size;
651 	u32 log2_throttle_limit;
652 	u32 eq_pci_msix_index;
653 	u32 cq_mod_ctx_id;
654 	u32 cq_parent_eq_id;
655 	u8  rq_drop_on_overrun;
656 	u8  rq_err_on_wqe_overflow;
657 	u8  rq_chain_rec_wqes;
658 	u8  sq_hw_db;
659 	u32 reserved3;
660 }; /* HW DATA */
661 
662 struct gdma_create_queue_resp {
663 	struct gdma_resp_hdr hdr;
664 	u32 queue_index;
665 }; /* HW DATA */
666 
667 /* GDMA_DISABLE_QUEUE */
668 struct gdma_disable_queue_req {
669 	struct gdma_req_hdr hdr;
670 	u32 type;
671 	u32 queue_index;
672 	u32 alloc_res_id_on_creation;
673 }; /* HW DATA */
674 
675 /* GDMA_QUERY_HWC_TIMEOUT */
676 struct gdma_query_hwc_timeout_req {
677 	struct gdma_req_hdr hdr;
678 	u32 timeout_ms;
679 	u32 reserved;
680 };
681 
682 struct gdma_query_hwc_timeout_resp {
683 	struct gdma_resp_hdr hdr;
684 	u32 timeout_ms;
685 	u32 reserved;
686 };
687 
688 enum atb_page_size {
689 	ATB_PAGE_SIZE_4K,
690 	ATB_PAGE_SIZE_8K,
691 	ATB_PAGE_SIZE_16K,
692 	ATB_PAGE_SIZE_32K,
693 	ATB_PAGE_SIZE_64K,
694 	ATB_PAGE_SIZE_128K,
695 	ATB_PAGE_SIZE_256K,
696 	ATB_PAGE_SIZE_512K,
697 	ATB_PAGE_SIZE_1M,
698 	ATB_PAGE_SIZE_2M,
699 	ATB_PAGE_SIZE_MAX,
700 };
701 
702 enum gdma_mr_access_flags {
703 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
704 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
705 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
706 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
707 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
708 };
709 
710 /* GDMA_CREATE_DMA_REGION */
711 struct gdma_create_dma_region_req {
712 	struct gdma_req_hdr hdr;
713 
714 	/* The total size of the DMA region */
715 	u64 length;
716 
717 	/* The offset in the first page */
718 	u32 offset_in_page;
719 
720 	/* enum gdma_page_type */
721 	u32 gdma_page_type;
722 
723 	/* The total number of pages */
724 	u32 page_count;
725 
726 	/* If page_addr_list_len is smaller than page_count,
727 	 * the remaining page addresses will be added via the
728 	 * message GDMA_DMA_REGION_ADD_PAGES.
729 	 */
730 	u32 page_addr_list_len;
731 	u64 page_addr_list[];
732 }; /* HW DATA */
733 
734 struct gdma_create_dma_region_resp {
735 	struct gdma_resp_hdr hdr;
736 	u64 dma_region_handle;
737 }; /* HW DATA */
738 
739 /* GDMA_DMA_REGION_ADD_PAGES */
740 struct gdma_dma_region_add_pages_req {
741 	struct gdma_req_hdr hdr;
742 
743 	u64 dma_region_handle;
744 
745 	u32 page_addr_list_len;
746 	u32 reserved3;
747 
748 	u64 page_addr_list[];
749 }; /* HW DATA */
750 
751 /* GDMA_DESTROY_DMA_REGION */
752 struct gdma_destroy_dma_region_req {
753 	struct gdma_req_hdr hdr;
754 
755 	u64 dma_region_handle;
756 }; /* HW DATA */
757 
758 enum gdma_pd_flags {
759 	GDMA_PD_FLAG_INVALID = 0,
760 };
761 
762 struct gdma_create_pd_req {
763 	struct gdma_req_hdr hdr;
764 	enum gdma_pd_flags flags;
765 	u32 reserved;
766 };/* HW DATA */
767 
768 struct gdma_create_pd_resp {
769 	struct gdma_resp_hdr hdr;
770 	u64 pd_handle;
771 	u32 pd_id;
772 	u32 reserved;
773 };/* HW DATA */
774 
775 struct gdma_destroy_pd_req {
776 	struct gdma_req_hdr hdr;
777 	u64 pd_handle;
778 };/* HW DATA */
779 
780 struct gdma_destory_pd_resp {
781 	struct gdma_resp_hdr hdr;
782 };/* HW DATA */
783 
784 enum gdma_mr_type {
785 	/* Guest Virtual Address - MRs of this type allow access
786 	 * to memory mapped by PTEs associated with this MR using a virtual
787 	 * address that is set up in the MST
788 	 */
789 	GDMA_MR_TYPE_GVA = 2,
790 };
791 
792 struct gdma_create_mr_params {
793 	u64 pd_handle;
794 	enum gdma_mr_type mr_type;
795 	union {
796 		struct {
797 			u64 dma_region_handle;
798 			u64 virtual_address;
799 			enum gdma_mr_access_flags access_flags;
800 		} gva;
801 	};
802 };
803 
804 struct gdma_create_mr_request {
805 	struct gdma_req_hdr hdr;
806 	u64 pd_handle;
807 	enum gdma_mr_type mr_type;
808 	u32 reserved_1;
809 
810 	union {
811 		struct {
812 			u64 dma_region_handle;
813 			u64 virtual_address;
814 			enum gdma_mr_access_flags access_flags;
815 		} gva;
816 
817 	};
818 	u32 reserved_2;
819 };/* HW DATA */
820 
821 struct gdma_create_mr_response {
822 	struct gdma_resp_hdr hdr;
823 	u64 mr_handle;
824 	u32 lkey;
825 	u32 rkey;
826 };/* HW DATA */
827 
828 struct gdma_destroy_mr_request {
829 	struct gdma_req_hdr hdr;
830 	u64 mr_handle;
831 };/* HW DATA */
832 
833 struct gdma_destroy_mr_response {
834 	struct gdma_resp_hdr hdr;
835 };/* HW DATA */
836 
837 int mana_gd_verify_vf_version(struct pci_dev *pdev);
838 
839 int mana_gd_register_device(struct gdma_dev *gd);
840 int mana_gd_deregister_device(struct gdma_dev *gd);
841 
842 int mana_gd_post_work_request(struct gdma_queue *wq,
843 			      const struct gdma_wqe_request *wqe_req,
844 			      struct gdma_posted_wqe_info *wqe_info);
845 
846 int mana_gd_post_and_ring(struct gdma_queue *queue,
847 			  const struct gdma_wqe_request *wqe,
848 			  struct gdma_posted_wqe_info *wqe_info);
849 
850 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
851 void mana_gd_free_res_map(struct gdma_resource *r);
852 
853 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
854 			      struct gdma_queue *queue);
855 
856 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
857 			 struct gdma_mem_info *gmi);
858 
859 void mana_gd_free_memory(struct gdma_mem_info *gmi);
860 
861 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
862 			 u32 resp_len, void *resp);
863 
864 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
865 
866 #endif /* _GDMA_H */
867