xref: /linux/include/net/mana/gdma.h (revision af8e51644a70f612974a6e767fa7d896d3c23f88)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _GDMA_H
5 #define _GDMA_H
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9 
10 #include "shm_channel.h"
11 
12 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
13 
14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
15  * them are naturally aligned and hence don't need __packed.
16  */
17 
18 enum gdma_request_type {
19 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
20 	GDMA_QUERY_MAX_RESOURCES	= 2,
21 	GDMA_LIST_DEVICES		= 3,
22 	GDMA_REGISTER_DEVICE		= 4,
23 	GDMA_DEREGISTER_DEVICE		= 5,
24 	GDMA_GENERATE_TEST_EQE		= 10,
25 	GDMA_CREATE_QUEUE		= 12,
26 	GDMA_DISABLE_QUEUE		= 13,
27 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
28 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
29 	GDMA_CREATE_DMA_REGION		= 25,
30 	GDMA_DMA_REGION_ADD_PAGES	= 26,
31 	GDMA_DESTROY_DMA_REGION		= 27,
32 	GDMA_CREATE_PD			= 29,
33 	GDMA_DESTROY_PD			= 30,
34 	GDMA_CREATE_MR			= 31,
35 	GDMA_DESTROY_MR			= 32,
36 	GDMA_QUERY_HWC_TIMEOUT		= 84, /* 0x54 */
37 };
38 
39 #define GDMA_RESOURCE_DOORBELL_PAGE	27
40 
41 enum gdma_queue_type {
42 	GDMA_INVALID_QUEUE,
43 	GDMA_SQ,
44 	GDMA_RQ,
45 	GDMA_CQ,
46 	GDMA_EQ,
47 };
48 
49 enum gdma_work_request_flags {
50 	GDMA_WR_NONE			= 0,
51 	GDMA_WR_OOB_IN_SGL		= BIT(0),
52 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
53 };
54 
55 enum gdma_eqe_type {
56 	GDMA_EQE_COMPLETION		= 3,
57 	GDMA_EQE_TEST_EVENT		= 64,
58 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
59 	GDMA_EQE_HWC_INIT_DATA		= 130,
60 	GDMA_EQE_HWC_INIT_DONE		= 131,
61 	GDMA_EQE_HWC_SOC_RECONFIG	= 132,
62 	GDMA_EQE_HWC_SOC_RECONFIG_DATA	= 133,
63 };
64 
65 enum {
66 	GDMA_DEVICE_NONE	= 0,
67 	GDMA_DEVICE_HWC		= 1,
68 	GDMA_DEVICE_MANA	= 2,
69 	GDMA_DEVICE_MANA_IB	= 3,
70 };
71 
72 struct gdma_resource {
73 	/* Protect the bitmap */
74 	spinlock_t lock;
75 
76 	/* The bitmap size in bits. */
77 	u32 size;
78 
79 	/* The bitmap tracks the resources. */
80 	unsigned long *map;
81 };
82 
83 union gdma_doorbell_entry {
84 	u64	as_uint64;
85 
86 	struct {
87 		u64 id		: 24;
88 		u64 reserved	: 8;
89 		u64 tail_ptr	: 31;
90 		u64 arm		: 1;
91 	} cq;
92 
93 	struct {
94 		u64 id		: 24;
95 		u64 wqe_cnt	: 8;
96 		u64 tail_ptr	: 32;
97 	} rq;
98 
99 	struct {
100 		u64 id		: 24;
101 		u64 reserved	: 8;
102 		u64 tail_ptr	: 32;
103 	} sq;
104 
105 	struct {
106 		u64 id		: 16;
107 		u64 reserved	: 16;
108 		u64 tail_ptr	: 31;
109 		u64 arm		: 1;
110 	} eq;
111 }; /* HW DATA */
112 
113 struct gdma_msg_hdr {
114 	u32 hdr_type;
115 	u32 msg_type;
116 	u16 msg_version;
117 	u16 hwc_msg_id;
118 	u32 msg_size;
119 }; /* HW DATA */
120 
121 struct gdma_dev_id {
122 	union {
123 		struct {
124 			u16 type;
125 			u16 instance;
126 		};
127 
128 		u32 as_uint32;
129 	};
130 }; /* HW DATA */
131 
132 struct gdma_req_hdr {
133 	struct gdma_msg_hdr req;
134 	struct gdma_msg_hdr resp; /* The expected response */
135 	struct gdma_dev_id dev_id;
136 	u32 activity_id;
137 }; /* HW DATA */
138 
139 struct gdma_resp_hdr {
140 	struct gdma_msg_hdr response;
141 	struct gdma_dev_id dev_id;
142 	u32 activity_id;
143 	u32 status;
144 	u32 reserved;
145 }; /* HW DATA */
146 
147 struct gdma_general_req {
148 	struct gdma_req_hdr hdr;
149 }; /* HW DATA */
150 
151 #define GDMA_MESSAGE_V1 1
152 #define GDMA_MESSAGE_V2 2
153 #define GDMA_MESSAGE_V3 3
154 
155 struct gdma_general_resp {
156 	struct gdma_resp_hdr hdr;
157 }; /* HW DATA */
158 
159 #define GDMA_STANDARD_HEADER_TYPE 0
160 
161 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
162 					u32 req_size, u32 resp_size)
163 {
164 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
165 	hdr->req.msg_type = code;
166 	hdr->req.msg_version = GDMA_MESSAGE_V1;
167 	hdr->req.msg_size = req_size;
168 
169 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
170 	hdr->resp.msg_type = code;
171 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
172 	hdr->resp.msg_size = resp_size;
173 }
174 
175 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
176 struct gdma_sge {
177 	u64 address;
178 	u32 mem_key;
179 	u32 size;
180 }; /* HW DATA */
181 
182 struct gdma_wqe_request {
183 	struct gdma_sge *sgl;
184 	u32 num_sge;
185 
186 	u32 inline_oob_size;
187 	const void *inline_oob_data;
188 
189 	u32 flags;
190 	u32 client_data_unit;
191 };
192 
193 enum gdma_page_type {
194 	GDMA_PAGE_TYPE_4K,
195 };
196 
197 #define GDMA_INVALID_DMA_REGION 0
198 
199 struct gdma_mem_info {
200 	struct device *dev;
201 
202 	dma_addr_t dma_handle;
203 	void *virt_addr;
204 	u64 length;
205 
206 	/* Allocated by the PF driver */
207 	u64 dma_region_handle;
208 };
209 
210 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
211 
212 struct gdma_dev {
213 	struct gdma_context *gdma_context;
214 
215 	struct gdma_dev_id dev_id;
216 
217 	u32 pdid;
218 	u32 doorbell;
219 	u32 gpa_mkey;
220 
221 	/* GDMA driver specific pointer */
222 	void *driver_data;
223 
224 	struct auxiliary_device *adev;
225 };
226 
227 /* MANA_PAGE_SIZE is the DMA unit */
228 #define MANA_PAGE_SHIFT 12
229 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
230 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
231 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
232 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
233 
234 /* Required by HW */
235 #define MANA_MIN_QSIZE MANA_PAGE_SIZE
236 
237 #define GDMA_CQE_SIZE 64
238 #define GDMA_EQE_SIZE 16
239 #define GDMA_MAX_SQE_SIZE 512
240 #define GDMA_MAX_RQE_SIZE 256
241 
242 #define GDMA_COMP_DATA_SIZE 0x3C
243 
244 #define GDMA_EVENT_DATA_SIZE 0xC
245 
246 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
247 #define GDMA_WQE_BU_SIZE 32
248 
249 #define INVALID_PDID		UINT_MAX
250 #define INVALID_DOORBELL	UINT_MAX
251 #define INVALID_MEM_KEY		UINT_MAX
252 #define INVALID_QUEUE_ID	UINT_MAX
253 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
254 
255 struct gdma_comp {
256 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
257 	u32 wq_num;
258 	bool is_sq;
259 };
260 
261 struct gdma_event {
262 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
263 	u8  type;
264 };
265 
266 struct gdma_queue;
267 
268 struct mana_eq {
269 	struct gdma_queue *eq;
270 };
271 
272 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
273 			      struct gdma_event *e);
274 
275 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
276 
277 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
278  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
279  * driver increases the 'head' in BUs rather than in bytes, and notifies
280  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
281  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
282  *
283  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
284  * processed, the driver increases the 'tail' to indicate that WQEs have
285  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
286  *
287  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
288  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
289  * the owner bits mechanism to detect if the queue has become empty.
290  */
291 struct gdma_queue {
292 	struct gdma_dev *gdma_dev;
293 
294 	enum gdma_queue_type type;
295 	u32 id;
296 
297 	struct gdma_mem_info mem_info;
298 
299 	void *queue_mem_ptr;
300 	u32 queue_size;
301 
302 	bool monitor_avl_buf;
303 
304 	u32 head;
305 	u32 tail;
306 	struct list_head entry;
307 
308 	/* Extra fields specific to EQ/CQ. */
309 	union {
310 		struct {
311 			bool disable_needed;
312 
313 			gdma_eq_callback *callback;
314 			void *context;
315 
316 			unsigned int msix_index;
317 
318 			u32 log2_throttle_limit;
319 		} eq;
320 
321 		struct {
322 			gdma_cq_callback *callback;
323 			void *context;
324 
325 			struct gdma_queue *parent; /* For CQ/EQ relationship */
326 		} cq;
327 	};
328 };
329 
330 struct gdma_queue_spec {
331 	enum gdma_queue_type type;
332 	bool monitor_avl_buf;
333 	unsigned int queue_size;
334 
335 	/* Extra fields specific to EQ/CQ. */
336 	union {
337 		struct {
338 			gdma_eq_callback *callback;
339 			void *context;
340 
341 			unsigned long log2_throttle_limit;
342 			unsigned int msix_index;
343 		} eq;
344 
345 		struct {
346 			gdma_cq_callback *callback;
347 			void *context;
348 
349 			struct gdma_queue *parent_eq;
350 
351 		} cq;
352 	};
353 };
354 
355 #define MANA_IRQ_NAME_SZ 32
356 
357 struct gdma_irq_context {
358 	void (*handler)(void *arg);
359 	/* Protect the eq_list */
360 	spinlock_t lock;
361 	struct list_head eq_list;
362 	char name[MANA_IRQ_NAME_SZ];
363 };
364 
365 struct gdma_context {
366 	struct device		*dev;
367 
368 	/* Per-vPort max number of queues */
369 	unsigned int		max_num_queues;
370 	unsigned int		max_num_msix;
371 	unsigned int		num_msix_usable;
372 	struct gdma_irq_context	*irq_contexts;
373 
374 	/* L2 MTU */
375 	u16 adapter_mtu;
376 
377 	/* This maps a CQ index to the queue structure. */
378 	unsigned int		max_num_cqs;
379 	struct gdma_queue	**cq_table;
380 
381 	/* Protect eq_test_event and test_event_eq_id  */
382 	struct mutex		eq_test_event_mutex;
383 	struct completion	eq_test_event;
384 	u32			test_event_eq_id;
385 
386 	bool			is_pf;
387 	phys_addr_t		bar0_pa;
388 	void __iomem		*bar0_va;
389 	void __iomem		*shm_base;
390 	void __iomem		*db_page_base;
391 	phys_addr_t		phys_db_page_base;
392 	u32 db_page_size;
393 	int                     numa_node;
394 
395 	/* Shared memory chanenl (used to bootstrap HWC) */
396 	struct shm_channel	shm_channel;
397 
398 	/* Hardware communication channel (HWC) */
399 	struct gdma_dev		hwc;
400 
401 	/* Azure network adapter */
402 	struct gdma_dev		mana;
403 
404 	/* Azure RDMA adapter */
405 	struct gdma_dev		mana_ib;
406 };
407 
408 #define MAX_NUM_GDMA_DEVICES	4
409 
410 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
411 {
412 	return gd->dev_id.type == GDMA_DEVICE_MANA;
413 }
414 
415 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
416 {
417 	return gd->dev_id.type == GDMA_DEVICE_HWC;
418 }
419 
420 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
421 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
422 
423 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
424 
425 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
426 			     const struct gdma_queue_spec *spec,
427 			     struct gdma_queue **queue_ptr);
428 
429 int mana_gd_create_mana_eq(struct gdma_dev *gd,
430 			   const struct gdma_queue_spec *spec,
431 			   struct gdma_queue **queue_ptr);
432 
433 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
434 			      const struct gdma_queue_spec *spec,
435 			      struct gdma_queue **queue_ptr);
436 
437 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
438 
439 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
440 
441 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
442 
443 struct gdma_wqe {
444 	u32 reserved	:24;
445 	u32 last_vbytes	:8;
446 
447 	union {
448 		u32 flags;
449 
450 		struct {
451 			u32 num_sge		:8;
452 			u32 inline_oob_size_div4:3;
453 			u32 client_oob_in_sgl	:1;
454 			u32 reserved1		:4;
455 			u32 client_data_unit	:14;
456 			u32 reserved2		:2;
457 		};
458 	};
459 }; /* HW DATA */
460 
461 #define INLINE_OOB_SMALL_SIZE 8
462 #define INLINE_OOB_LARGE_SIZE 24
463 
464 #define MAX_TX_WQE_SIZE 512
465 #define MAX_RX_WQE_SIZE 256
466 
467 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
468 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
469 			sizeof(struct gdma_sge))
470 
471 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
472 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
473 
474 struct gdma_cqe {
475 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
476 
477 	union {
478 		u32 as_uint32;
479 
480 		struct {
481 			u32 wq_num	: 24;
482 			u32 is_sq	: 1;
483 			u32 reserved	: 4;
484 			u32 owner_bits	: 3;
485 		};
486 	} cqe_info;
487 }; /* HW DATA */
488 
489 #define GDMA_CQE_OWNER_BITS 3
490 
491 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
492 
493 #define SET_ARM_BIT 1
494 
495 #define GDMA_EQE_OWNER_BITS 3
496 
497 union gdma_eqe_info {
498 	u32 as_uint32;
499 
500 	struct {
501 		u32 type	: 8;
502 		u32 reserved1	: 8;
503 		u32 client_id	: 2;
504 		u32 reserved2	: 11;
505 		u32 owner_bits	: 3;
506 	};
507 }; /* HW DATA */
508 
509 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
510 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
511 
512 struct gdma_eqe {
513 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
514 	u32 eqe_info;
515 }; /* HW DATA */
516 
517 #define GDMA_REG_DB_PAGE_OFFSET	8
518 #define GDMA_REG_DB_PAGE_SIZE	0x10
519 #define GDMA_REG_SHM_OFFSET	0x18
520 
521 #define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
522 #define GDMA_PF_REG_DB_PAGE_OFF		0xC8
523 #define GDMA_PF_REG_SHM_OFF		0x70
524 
525 #define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108
526 
527 #define MANA_PF_DEVICE_ID 0x00B9
528 #define MANA_VF_DEVICE_ID 0x00BA
529 
530 struct gdma_posted_wqe_info {
531 	u32 wqe_size_in_bu;
532 };
533 
534 /* GDMA_GENERATE_TEST_EQE */
535 struct gdma_generate_test_event_req {
536 	struct gdma_req_hdr hdr;
537 	u32 queue_index;
538 }; /* HW DATA */
539 
540 /* GDMA_VERIFY_VF_DRIVER_VERSION */
541 enum {
542 	GDMA_PROTOCOL_V1	= 1,
543 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
544 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
545 };
546 
547 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
548 
549 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
550  * so the driver is able to reliably support features like busy_poll.
551  */
552 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
553 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
554 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
555 
556 #define GDMA_DRV_CAP_FLAGS1 \
557 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
558 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
559 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
560 	 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT)
561 
562 #define GDMA_DRV_CAP_FLAGS2 0
563 
564 #define GDMA_DRV_CAP_FLAGS3 0
565 
566 #define GDMA_DRV_CAP_FLAGS4 0
567 
568 struct gdma_verify_ver_req {
569 	struct gdma_req_hdr hdr;
570 
571 	/* Mandatory fields required for protocol establishment */
572 	u64 protocol_ver_min;
573 	u64 protocol_ver_max;
574 
575 	/* Gdma Driver Capability Flags */
576 	u64 gd_drv_cap_flags1;
577 	u64 gd_drv_cap_flags2;
578 	u64 gd_drv_cap_flags3;
579 	u64 gd_drv_cap_flags4;
580 
581 	/* Advisory fields */
582 	u64 drv_ver;
583 	u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
584 	u32 reserved;
585 	u32 os_ver_major;
586 	u32 os_ver_minor;
587 	u32 os_ver_build;
588 	u32 os_ver_platform;
589 	u64 reserved_2;
590 	u8 os_ver_str1[128];
591 	u8 os_ver_str2[128];
592 	u8 os_ver_str3[128];
593 	u8 os_ver_str4[128];
594 }; /* HW DATA */
595 
596 struct gdma_verify_ver_resp {
597 	struct gdma_resp_hdr hdr;
598 	u64 gdma_protocol_ver;
599 	u64 pf_cap_flags1;
600 	u64 pf_cap_flags2;
601 	u64 pf_cap_flags3;
602 	u64 pf_cap_flags4;
603 }; /* HW DATA */
604 
605 /* GDMA_QUERY_MAX_RESOURCES */
606 struct gdma_query_max_resources_resp {
607 	struct gdma_resp_hdr hdr;
608 	u32 status;
609 	u32 max_sq;
610 	u32 max_rq;
611 	u32 max_cq;
612 	u32 max_eq;
613 	u32 max_db;
614 	u32 max_mst;
615 	u32 max_cq_mod_ctx;
616 	u32 max_mod_cq;
617 	u32 max_msix;
618 }; /* HW DATA */
619 
620 /* GDMA_LIST_DEVICES */
621 struct gdma_list_devices_resp {
622 	struct gdma_resp_hdr hdr;
623 	u32 num_of_devs;
624 	u32 reserved;
625 	struct gdma_dev_id devs[64];
626 }; /* HW DATA */
627 
628 /* GDMA_REGISTER_DEVICE */
629 struct gdma_register_device_resp {
630 	struct gdma_resp_hdr hdr;
631 	u32 pdid;
632 	u32 gpa_mkey;
633 	u32 db_id;
634 }; /* HW DATA */
635 
636 struct gdma_allocate_resource_range_req {
637 	struct gdma_req_hdr hdr;
638 	u32 resource_type;
639 	u32 num_resources;
640 	u32 alignment;
641 	u32 allocated_resources;
642 };
643 
644 struct gdma_allocate_resource_range_resp {
645 	struct gdma_resp_hdr hdr;
646 	u32 allocated_resources;
647 };
648 
649 struct gdma_destroy_resource_range_req {
650 	struct gdma_req_hdr hdr;
651 	u32 resource_type;
652 	u32 num_resources;
653 	u32 allocated_resources;
654 };
655 
656 /* GDMA_CREATE_QUEUE */
657 struct gdma_create_queue_req {
658 	struct gdma_req_hdr hdr;
659 	u32 type;
660 	u32 reserved1;
661 	u32 pdid;
662 	u32 doolbell_id;
663 	u64 gdma_region;
664 	u32 reserved2;
665 	u32 queue_size;
666 	u32 log2_throttle_limit;
667 	u32 eq_pci_msix_index;
668 	u32 cq_mod_ctx_id;
669 	u32 cq_parent_eq_id;
670 	u8  rq_drop_on_overrun;
671 	u8  rq_err_on_wqe_overflow;
672 	u8  rq_chain_rec_wqes;
673 	u8  sq_hw_db;
674 	u32 reserved3;
675 }; /* HW DATA */
676 
677 struct gdma_create_queue_resp {
678 	struct gdma_resp_hdr hdr;
679 	u32 queue_index;
680 }; /* HW DATA */
681 
682 /* GDMA_DISABLE_QUEUE */
683 struct gdma_disable_queue_req {
684 	struct gdma_req_hdr hdr;
685 	u32 type;
686 	u32 queue_index;
687 	u32 alloc_res_id_on_creation;
688 }; /* HW DATA */
689 
690 /* GDMA_QUERY_HWC_TIMEOUT */
691 struct gdma_query_hwc_timeout_req {
692 	struct gdma_req_hdr hdr;
693 	u32 timeout_ms;
694 	u32 reserved;
695 };
696 
697 struct gdma_query_hwc_timeout_resp {
698 	struct gdma_resp_hdr hdr;
699 	u32 timeout_ms;
700 	u32 reserved;
701 };
702 
703 enum atb_page_size {
704 	ATB_PAGE_SIZE_4K,
705 	ATB_PAGE_SIZE_8K,
706 	ATB_PAGE_SIZE_16K,
707 	ATB_PAGE_SIZE_32K,
708 	ATB_PAGE_SIZE_64K,
709 	ATB_PAGE_SIZE_128K,
710 	ATB_PAGE_SIZE_256K,
711 	ATB_PAGE_SIZE_512K,
712 	ATB_PAGE_SIZE_1M,
713 	ATB_PAGE_SIZE_2M,
714 	ATB_PAGE_SIZE_MAX,
715 };
716 
717 enum gdma_mr_access_flags {
718 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
719 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
720 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
721 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
722 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
723 };
724 
725 /* GDMA_CREATE_DMA_REGION */
726 struct gdma_create_dma_region_req {
727 	struct gdma_req_hdr hdr;
728 
729 	/* The total size of the DMA region */
730 	u64 length;
731 
732 	/* The offset in the first page */
733 	u32 offset_in_page;
734 
735 	/* enum gdma_page_type */
736 	u32 gdma_page_type;
737 
738 	/* The total number of pages */
739 	u32 page_count;
740 
741 	/* If page_addr_list_len is smaller than page_count,
742 	 * the remaining page addresses will be added via the
743 	 * message GDMA_DMA_REGION_ADD_PAGES.
744 	 */
745 	u32 page_addr_list_len;
746 	u64 page_addr_list[];
747 }; /* HW DATA */
748 
749 struct gdma_create_dma_region_resp {
750 	struct gdma_resp_hdr hdr;
751 	u64 dma_region_handle;
752 }; /* HW DATA */
753 
754 /* GDMA_DMA_REGION_ADD_PAGES */
755 struct gdma_dma_region_add_pages_req {
756 	struct gdma_req_hdr hdr;
757 
758 	u64 dma_region_handle;
759 
760 	u32 page_addr_list_len;
761 	u32 reserved3;
762 
763 	u64 page_addr_list[];
764 }; /* HW DATA */
765 
766 /* GDMA_DESTROY_DMA_REGION */
767 struct gdma_destroy_dma_region_req {
768 	struct gdma_req_hdr hdr;
769 
770 	u64 dma_region_handle;
771 }; /* HW DATA */
772 
773 enum gdma_pd_flags {
774 	GDMA_PD_FLAG_INVALID = 0,
775 };
776 
777 struct gdma_create_pd_req {
778 	struct gdma_req_hdr hdr;
779 	enum gdma_pd_flags flags;
780 	u32 reserved;
781 };/* HW DATA */
782 
783 struct gdma_create_pd_resp {
784 	struct gdma_resp_hdr hdr;
785 	u64 pd_handle;
786 	u32 pd_id;
787 	u32 reserved;
788 };/* HW DATA */
789 
790 struct gdma_destroy_pd_req {
791 	struct gdma_req_hdr hdr;
792 	u64 pd_handle;
793 };/* HW DATA */
794 
795 struct gdma_destory_pd_resp {
796 	struct gdma_resp_hdr hdr;
797 };/* HW DATA */
798 
799 enum gdma_mr_type {
800 	/* Guest Virtual Address - MRs of this type allow access
801 	 * to memory mapped by PTEs associated with this MR using a virtual
802 	 * address that is set up in the MST
803 	 */
804 	GDMA_MR_TYPE_GVA = 2,
805 };
806 
807 struct gdma_create_mr_params {
808 	u64 pd_handle;
809 	enum gdma_mr_type mr_type;
810 	union {
811 		struct {
812 			u64 dma_region_handle;
813 			u64 virtual_address;
814 			enum gdma_mr_access_flags access_flags;
815 		} gva;
816 	};
817 };
818 
819 struct gdma_create_mr_request {
820 	struct gdma_req_hdr hdr;
821 	u64 pd_handle;
822 	enum gdma_mr_type mr_type;
823 	u32 reserved_1;
824 
825 	union {
826 		struct {
827 			u64 dma_region_handle;
828 			u64 virtual_address;
829 			enum gdma_mr_access_flags access_flags;
830 		} gva;
831 
832 	};
833 	u32 reserved_2;
834 };/* HW DATA */
835 
836 struct gdma_create_mr_response {
837 	struct gdma_resp_hdr hdr;
838 	u64 mr_handle;
839 	u32 lkey;
840 	u32 rkey;
841 };/* HW DATA */
842 
843 struct gdma_destroy_mr_request {
844 	struct gdma_req_hdr hdr;
845 	u64 mr_handle;
846 };/* HW DATA */
847 
848 struct gdma_destroy_mr_response {
849 	struct gdma_resp_hdr hdr;
850 };/* HW DATA */
851 
852 int mana_gd_verify_vf_version(struct pci_dev *pdev);
853 
854 int mana_gd_register_device(struct gdma_dev *gd);
855 int mana_gd_deregister_device(struct gdma_dev *gd);
856 
857 int mana_gd_post_work_request(struct gdma_queue *wq,
858 			      const struct gdma_wqe_request *wqe_req,
859 			      struct gdma_posted_wqe_info *wqe_info);
860 
861 int mana_gd_post_and_ring(struct gdma_queue *queue,
862 			  const struct gdma_wqe_request *wqe,
863 			  struct gdma_posted_wqe_info *wqe_info);
864 
865 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
866 void mana_gd_free_res_map(struct gdma_resource *r);
867 
868 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
869 			      struct gdma_queue *queue);
870 
871 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
872 			 struct gdma_mem_info *gmi);
873 
874 void mana_gd_free_memory(struct gdma_mem_info *gmi);
875 
876 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
877 			 u32 resp_len, void *resp);
878 
879 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
880 
881 #endif /* _GDMA_H */
882