xref: /freebsd/sys/dev/mana/gdma.h (revision 9f44a47fd07924afc035991af15d84e6585dea4f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  *
32  */
33 
34 #ifndef _GDMA_H
35 #define _GDMA_H
36 
37 #include <sys/bus.h>
38 #include <sys/bus_dma.h>
39 #include <sys/types.h>
40 #include <sys/limits.h>
41 #include <sys/sx.h>
42 
43 #include "gdma_util.h"
44 #include "shm_channel.h"
45 
46 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
47 
48 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
49  * them are naturally aligned and hence don't need __packed.
50  */
51 
52 #define GDMA_BAR0		0
53 
54 #define GDMA_IRQNAME_SZ		40
55 
56 struct gdma_bus {
57 	bus_space_handle_t	bar0_h;
58 	bus_space_tag_t		bar0_t;
59 };
60 
61 struct gdma_msix_entry {
62 	int			entry;
63 	int			vector;
64 };
65 
66 enum gdma_request_type {
67 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
68 	GDMA_QUERY_MAX_RESOURCES	= 2,
69 	GDMA_LIST_DEVICES		= 3,
70 	GDMA_REGISTER_DEVICE		= 4,
71 	GDMA_DEREGISTER_DEVICE		= 5,
72 	GDMA_GENERATE_TEST_EQE		= 10,
73 	GDMA_CREATE_QUEUE		= 12,
74 	GDMA_DISABLE_QUEUE		= 13,
75 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
76 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
77 	GDMA_CREATE_DMA_REGION		= 25,
78 	GDMA_DMA_REGION_ADD_PAGES	= 26,
79 	GDMA_DESTROY_DMA_REGION		= 27,
80 	GDMA_CREATE_PD			= 29,
81 	GDMA_DESTROY_PD			= 30,
82 	GDMA_CREATE_MR			= 31,
83 	GDMA_DESTROY_MR			= 32,
84 };
85 
86 #define GDMA_RESOURCE_DOORBELL_PAGE	27
87 
88 enum gdma_queue_type {
89 	GDMA_INVALID_QUEUE,
90 	GDMA_SQ,
91 	GDMA_RQ,
92 	GDMA_CQ,
93 	GDMA_EQ,
94 };
95 
96 enum gdma_work_request_flags {
97 	GDMA_WR_NONE			= 0,
98 	GDMA_WR_OOB_IN_SGL		= BIT(0),
99 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
100 };
101 
102 enum gdma_eqe_type {
103 	GDMA_EQE_COMPLETION		= 3,
104 	GDMA_EQE_TEST_EVENT		= 64,
105 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
106 	GDMA_EQE_HWC_INIT_DATA		= 130,
107 	GDMA_EQE_HWC_INIT_DONE		= 131,
108 };
109 
110 enum {
111 	GDMA_DEVICE_NONE	= 0,
112 	GDMA_DEVICE_HWC		= 1,
113 	GDMA_DEVICE_MANA	= 2,
114 };
115 
116 typedef uint64_t gdma_obj_handle_t;
117 
118 struct gdma_resource {
119 	/* Protect the bitmap */
120 	struct mtx		lock_spin;
121 
122 	/* The bitmap size in bits. */
123 	uint32_t		size;
124 
125 	/* The bitmap tracks the resources. */
126 	unsigned long		*map;
127 };
128 
129 union gdma_doorbell_entry {
130 	uint64_t		as_uint64;
131 
132 	struct {
133 		uint64_t id		: 24;
134 		uint64_t reserved	: 8;
135 		uint64_t tail_ptr	: 31;
136 		uint64_t arm		: 1;
137 	} cq;
138 
139 	struct {
140 		uint64_t id		: 24;
141 		uint64_t wqe_cnt	: 8;
142 		uint64_t tail_ptr	: 32;
143 	} rq;
144 
145 	struct {
146 		uint64_t id		: 24;
147 		uint64_t reserved	: 8;
148 		uint64_t tail_ptr	: 32;
149 	} sq;
150 
151 	struct {
152 		uint64_t id		: 16;
153 		uint64_t reserved	: 16;
154 		uint64_t tail_ptr	: 31;
155 		uint64_t arm		: 1;
156 	} eq;
157 }; /* HW DATA */
158 
159 struct gdma_msg_hdr {
160 	uint32_t	hdr_type;
161 	uint32_t	msg_type;
162 	uint16_t	msg_version;
163 	uint16_t	hwc_msg_id;
164 	uint32_t	msg_size;
165 }; /* HW DATA */
166 
167 struct gdma_dev_id {
168 	union {
169 		struct {
170 			uint16_t type;
171 			uint16_t instance;
172 		};
173 
174 		uint32_t as_uint32;
175 	};
176 }; /* HW DATA */
177 
178 struct gdma_req_hdr {
179 	struct gdma_msg_hdr	req;
180 	struct gdma_msg_hdr	resp; /* The expected response */
181 	struct gdma_dev_id	dev_id;
182 	uint32_t		activity_id;
183 }; /* HW DATA */
184 
185 struct gdma_resp_hdr {
186 	struct gdma_msg_hdr	response;
187 	struct gdma_dev_id	dev_id;
188 	uint32_t		activity_id;
189 	uint32_t		status;
190 	uint32_t		reserved;
191 }; /* HW DATA */
192 
193 struct gdma_general_req {
194 	struct gdma_req_hdr	hdr;
195 }; /* HW DATA */
196 
197 #define GDMA_MESSAGE_V1 1
198 
199 struct gdma_general_resp {
200 	struct gdma_resp_hdr	hdr;
201 }; /* HW DATA */
202 
203 #define GDMA_STANDARD_HEADER_TYPE	0
204 
205 static inline void
206 mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, uint32_t code,
207     uint32_t req_size, uint32_t resp_size)
208 {
209 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
210 	hdr->req.msg_type = code;
211 	hdr->req.msg_version = GDMA_MESSAGE_V1;
212 	hdr->req.msg_size = req_size;
213 
214 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
215 	hdr->resp.msg_type = code;
216 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
217 	hdr->resp.msg_size = resp_size;
218 }
219 
220 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
221 struct gdma_sge {
222 	uint64_t		address;
223 	uint32_t		mem_key;
224 	uint32_t		size;
225 }; /* HW DATA */
226 
227 struct gdma_wqe_request {
228 	struct gdma_sge		*sgl;
229 	uint32_t		num_sge;
230 
231 	uint32_t		inline_oob_size;
232 	const void		*inline_oob_data;
233 
234 	uint32_t		flags;
235 	uint32_t		client_data_unit;
236 };
237 
238 enum gdma_page_type {
239 	GDMA_PAGE_TYPE_4K,
240 };
241 
242 #define GDMA_INVALID_DMA_REGION		0
243 
244 struct gdma_mem_info {
245 	device_t		dev;
246 
247 	bus_dma_tag_t		dma_tag;
248 	bus_dmamap_t		dma_map;
249 	bus_addr_t		dma_handle;	/* Physical address	*/
250 	void			*virt_addr;	/* Virtual address	*/
251 	uint64_t		length;
252 
253 	/* Allocated by the PF driver */
254 	gdma_obj_handle_t	dma_region_handle;
255 };
256 
257 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
258 
259 struct gdma_dev {
260 	struct gdma_context	*gdma_context;
261 
262 	struct gdma_dev_id	dev_id;
263 
264 	uint32_t		pdid;
265 	uint32_t		doorbell;
266 	uint32_t		gpa_mkey;
267 
268 	/* GDMA driver specific pointer */
269 	void			*driver_data;
270 };
271 
272 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
273 
274 #define GDMA_CQE_SIZE		64
275 #define GDMA_EQE_SIZE		16
276 #define GDMA_MAX_SQE_SIZE	512
277 #define GDMA_MAX_RQE_SIZE	256
278 
279 #define GDMA_COMP_DATA_SIZE	0x3C
280 
281 #define GDMA_EVENT_DATA_SIZE	0xC
282 
283 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
284 #define GDMA_WQE_BU_SIZE	32
285 
286 #define INVALID_PDID		UINT_MAX
287 #define INVALID_DOORBELL	UINT_MAX
288 #define INVALID_MEM_KEY		UINT_MAX
289 #define INVALID_QUEUE_ID	UINT_MAX
290 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
291 
292 struct gdma_comp {
293 	uint32_t		cqe_data[GDMA_COMP_DATA_SIZE / 4];
294 	uint32_t		wq_num;
295 	bool			is_sq;
296 };
297 
298 struct gdma_event {
299 	uint32_t		details[GDMA_EVENT_DATA_SIZE / 4];
300 	uint8_t			type;
301 };
302 
303 struct gdma_queue;
304 
305 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
306     struct gdma_event *e);
307 
308 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
309 
310 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
311  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
312  * driver increases the 'head' in BUs rather than in bytes, and notifies
313  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
314  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
315  *
316  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
317  * processed, the driver increases the 'tail' to indicate that WQEs have
318  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
319  *
320  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
321  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
322  * the owner bits mechanism to detect if the queue has become empty.
323  */
324 struct gdma_queue {
325 	struct gdma_dev		*gdma_dev;
326 
327 	enum gdma_queue_type	type;
328 	uint32_t		id;
329 
330 	struct gdma_mem_info	mem_info;
331 
332 	void			*queue_mem_ptr;
333 	uint32_t		queue_size;
334 
335 	bool			monitor_avl_buf;
336 
337 	uint32_t		head;
338 	uint32_t		tail;
339 
340 	/* Extra fields specific to EQ/CQ. */
341 	union {
342 		struct {
343 			bool			disable_needed;
344 
345 			gdma_eq_callback	*callback;
346 			void			*context;
347 
348 			unsigned int		msix_index;
349 
350 			uint32_t		log2_throttle_limit;
351 		} eq;
352 
353 		struct {
354 			gdma_cq_callback	*callback;
355 			void			*context;
356 
357 			/* For CQ/EQ relationship */
358 			struct gdma_queue	*parent;
359 		} cq;
360 	};
361 };
362 
363 struct gdma_queue_spec {
364 	enum gdma_queue_type	type;
365 	bool			monitor_avl_buf;
366 	unsigned int		queue_size;
367 
368 	/* Extra fields specific to EQ/CQ. */
369 	union {
370 		struct {
371 			gdma_eq_callback	*callback;
372 			void			*context;
373 
374 			unsigned long		log2_throttle_limit;
375 		} eq;
376 
377 		struct {
378 			gdma_cq_callback	*callback;
379 			void			*context;
380 
381 			struct			gdma_queue *parent_eq;
382 
383 		} cq;
384 	};
385 };
386 
387 struct mana_eq {
388 	struct gdma_queue	*eq;
389 };
390 
391 struct gdma_irq_context {
392 	struct gdma_msix_entry	msix_e;
393 	struct resource		*res;
394 	driver_intr_t		*handler;
395 	void			*arg;
396 	void			*cookie;
397 	bool			requested;
398 	int			cpu;
399 	char			name[GDMA_IRQNAME_SZ];
400 };
401 
402 struct gdma_context {
403 	device_t		dev;
404 
405 	struct gdma_bus		gd_bus;
406 
407 	/* Per-vPort max number of queues */
408 	unsigned int		max_num_queues;
409 	unsigned int		max_num_msix;
410 	unsigned int		num_msix_usable;
411 	struct gdma_resource	msix_resource;
412 	struct gdma_irq_context	*irq_contexts;
413 
414 	/* This maps a CQ index to the queue structure. */
415 	unsigned int		max_num_cqs;
416 	struct gdma_queue	**cq_table;
417 
418 	/* Protect eq_test_event and test_event_eq_id  */
419 	struct sx		eq_test_event_sx;
420 	struct completion	eq_test_event;
421 	uint32_t		test_event_eq_id;
422 
423 	struct resource		*bar0;
424 	struct resource		*msix;
425 	int			msix_rid;
426 	void __iomem		*shm_base;
427 	void __iomem		*db_page_base;
428 	vm_paddr_t		phys_db_page_base;
429 	uint32_t		db_page_size;
430 
431 	/* Shared memory chanenl (used to bootstrap HWC) */
432 	struct shm_channel	shm_channel;
433 
434 	/* Hardware communication channel (HWC) */
435 	struct gdma_dev		hwc;
436 
437 	/* Azure network adapter */
438 	struct gdma_dev		mana;
439 };
440 
441 #define MAX_NUM_GDMA_DEVICES	4
442 
443 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
444 {
445 	return gd->dev_id.type == GDMA_DEVICE_MANA;
446 }
447 
448 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
449 {
450 	return gd->dev_id.type == GDMA_DEVICE_HWC;
451 }
452 
453 uint8_t *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, uint32_t wqe_offset);
454 uint32_t mana_gd_wq_avail_space(struct gdma_queue *wq);
455 
456 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
457 
458 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
459     const struct gdma_queue_spec *spec,
460     struct gdma_queue **queue_ptr);
461 
462 int mana_gd_create_mana_eq(struct gdma_dev *gd,
463     const struct gdma_queue_spec *spec,
464     struct gdma_queue **queue_ptr);
465 
466 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
467     const struct gdma_queue_spec *spec,
468     struct gdma_queue **queue_ptr);
469 
470 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
471 
472 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
473 
474 void mana_gd_ring_cq(struct gdma_queue *cq, uint8_t arm_bit);
475 
476 struct gdma_wqe {
477 	uint32_t reserved	:24;
478 	uint32_t last_vbytes	:8;
479 
480 	union {
481 		uint32_t flags;
482 
483 		struct {
484 			uint32_t num_sge		:8;
485 			uint32_t inline_oob_size_div4	:3;
486 			uint32_t client_oob_in_sgl	:1;
487 			uint32_t reserved1		:4;
488 			uint32_t client_data_unit	:14;
489 			uint32_t reserved2		:2;
490 		};
491 	};
492 }; /* HW DATA */
493 
494 #define INLINE_OOB_SMALL_SIZE	8
495 #define INLINE_OOB_LARGE_SIZE	24
496 
497 #define MAX_TX_WQE_SIZE		512
498 #define MAX_RX_WQE_SIZE		256
499 
500 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
501 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
502 			sizeof(struct gdma_sge))
503 
504 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
505 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
506 
507 struct gdma_cqe {
508 	uint32_t cqe_data[GDMA_COMP_DATA_SIZE / 4];
509 
510 	union {
511 		uint32_t as_uint32;
512 
513 		struct {
514 			uint32_t wq_num		:24;
515 			uint32_t is_sq		:1;
516 			uint32_t reserved	:4;
517 			uint32_t owner_bits	:3;
518 		};
519 	} cqe_info;
520 }; /* HW DATA */
521 
522 #define GDMA_CQE_OWNER_BITS	3
523 
524 #define GDMA_CQE_OWNER_MASK	((1 << GDMA_CQE_OWNER_BITS) - 1)
525 
526 #define SET_ARM_BIT		1
527 
528 #define GDMA_EQE_OWNER_BITS	3
529 
530 union gdma_eqe_info {
531 	uint32_t as_uint32;
532 
533 	struct {
534 		uint32_t type		: 8;
535 		uint32_t reserved1	: 8;
536 		uint32_t client_id	: 2;
537 		uint32_t reserved2	: 11;
538 		uint32_t owner_bits	: 3;
539 	};
540 }; /* HW DATA */
541 
542 #define GDMA_EQE_OWNER_MASK	((1 << GDMA_EQE_OWNER_BITS) - 1)
543 #define INITIALIZED_OWNER_BIT(log2_num_entries)	(1UL << (log2_num_entries))
544 
545 struct gdma_eqe {
546 	uint32_t details[GDMA_EVENT_DATA_SIZE / 4];
547 	uint32_t eqe_info;
548 }; /* HW DATA */
549 
550 #define GDMA_REG_DB_PAGE_OFFSET	8
551 #define GDMA_REG_DB_PAGE_SIZE	0x10
552 #define GDMA_REG_SHM_OFFSET	0x18
553 
554 struct gdma_posted_wqe_info {
555 	uint32_t wqe_size_in_bu;
556 };
557 
558 /* GDMA_GENERATE_TEST_EQE */
559 struct gdma_generate_test_event_req {
560 	struct gdma_req_hdr hdr;
561 	uint32_t queue_index;
562 }; /* HW DATA */
563 
564 /* GDMA_VERIFY_VF_DRIVER_VERSION */
565 enum {
566 	GDMA_PROTOCOL_V1	= 1,
567 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
568 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
569 };
570 
571 struct gdma_verify_ver_req {
572 	struct gdma_req_hdr hdr;
573 
574 	/* Mandatory fields required for protocol establishment */
575 	uint64_t protocol_ver_min;
576 	uint64_t protocol_ver_max;
577 	uint64_t drv_cap_flags1;
578 	uint64_t drv_cap_flags2;
579 	uint64_t drv_cap_flags3;
580 	uint64_t drv_cap_flags4;
581 
582 	/* Advisory fields */
583 	uint64_t drv_ver;
584 	uint32_t os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
585 	uint32_t reserved;
586 	uint32_t os_ver_major;
587 	uint32_t os_ver_minor;
588 	uint32_t os_ver_build;
589 	uint32_t os_ver_platform;
590 	uint64_t reserved_2;
591 	uint8_t os_ver_str1[128];
592 	uint8_t os_ver_str2[128];
593 	uint8_t os_ver_str3[128];
594 	uint8_t os_ver_str4[128];
595 }; /* HW DATA */
596 
597 struct gdma_verify_ver_resp {
598 	struct gdma_resp_hdr hdr;
599 	uint64_t gdma_protocol_ver;
600 	uint64_t pf_cap_flags1;
601 	uint64_t pf_cap_flags2;
602 	uint64_t pf_cap_flags3;
603 	uint64_t pf_cap_flags4;
604 }; /* HW DATA */
605 
606 /* GDMA_QUERY_MAX_RESOURCES */
607 struct gdma_query_max_resources_resp {
608 	struct gdma_resp_hdr hdr;
609 	uint32_t status;
610 	uint32_t max_sq;
611 	uint32_t max_rq;
612 	uint32_t max_cq;
613 	uint32_t max_eq;
614 	uint32_t max_db;
615 	uint32_t max_mst;
616 	uint32_t max_cq_mod_ctx;
617 	uint32_t max_mod_cq;
618 	uint32_t max_msix;
619 }; /* HW DATA */
620 
621 /* GDMA_LIST_DEVICES */
622 struct gdma_list_devices_resp {
623 	struct gdma_resp_hdr hdr;
624 	uint32_t num_of_devs;
625 	uint32_t reserved;
626 	struct gdma_dev_id devs[64];
627 }; /* HW DATA */
628 
629 /* GDMA_REGISTER_DEVICE */
630 struct gdma_register_device_resp {
631 	struct gdma_resp_hdr hdr;
632 	uint32_t pdid;
633 	uint32_t gpa_mkey;
634 	uint32_t db_id;
635 }; /* HW DATA */
636 
637 struct gdma_allocate_resource_range_req {
638 	struct gdma_req_hdr hdr;
639 	uint32_t resource_type;
640 	uint32_t num_resources;
641 	uint32_t alignment;
642 	uint32_t allocated_resources;
643 };
644 
645 struct gdma_allocate_resource_range_resp {
646 	struct gdma_resp_hdr hdr;
647 	uint32_t allocated_resources;
648 };
649 
650 struct gdma_destroy_resource_range_req {
651 	struct gdma_req_hdr hdr;
652 	uint32_t resource_type;
653 	uint32_t num_resources;
654 	uint32_t allocated_resources;
655 };
656 
657 /* GDMA_CREATE_QUEUE */
658 struct gdma_create_queue_req {
659 	struct gdma_req_hdr hdr;
660 	uint32_t type;
661 	uint32_t reserved1;
662 	uint32_t pdid;
663 	uint32_t doolbell_id;
664 	gdma_obj_handle_t gdma_region;
665 	uint32_t reserved2;
666 	uint32_t queue_size;
667 	uint32_t log2_throttle_limit;
668 	uint32_t eq_pci_msix_index;
669 	uint32_t cq_mod_ctx_id;
670 	uint32_t cq_parent_eq_id;
671 	uint8_t  rq_drop_on_overrun;
672 	uint8_t  rq_err_on_wqe_overflow;
673 	uint8_t  rq_chain_rec_wqes;
674 	uint8_t  sq_hw_db;
675 	uint32_t reserved3;
676 }; /* HW DATA */
677 
678 struct gdma_create_queue_resp {
679 	struct gdma_resp_hdr hdr;
680 	uint32_t queue_index;
681 }; /* HW DATA */
682 
683 /* GDMA_DISABLE_QUEUE */
684 struct gdma_disable_queue_req {
685 	struct gdma_req_hdr hdr;
686 	uint32_t type;
687 	uint32_t queue_index;
688 	uint32_t alloc_res_id_on_creation;
689 }; /* HW DATA */
690 
691 enum atb_page_size {
692 	ATB_PAGE_SIZE_4K,
693 	ATB_PAGE_SIZE_8K,
694 	ATB_PAGE_SIZE_16K,
695 	ATB_PAGE_SIZE_32K,
696 	ATB_PAGE_SIZE_64K,
697 	ATB_PAGE_SIZE_128K,
698 	ATB_PAGE_SIZE_256K,
699 	ATB_PAGE_SIZE_512K,
700 	ATB_PAGE_SIZE_1M,
701 	ATB_PAGE_SIZE_2M,
702 	ATB_PAGE_SIZE_MAX,
703 };
704 
705 enum gdma_mr_access_flags {
706 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT(0),
707 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT(1),
708 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT(2),
709 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT(3),
710 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT(4),
711 };
712 
713 /* GDMA_CREATE_DMA_REGION */
714 struct gdma_create_dma_region_req {
715 	struct gdma_req_hdr hdr;
716 
717 	/* The total size of the DMA region */
718 	uint64_t length;
719 
720 	/* The offset in the first page */
721 	uint32_t offset_in_page;
722 
723 	/* enum gdma_page_type */
724 	uint32_t gdma_page_type;
725 
726 	/* The total number of pages */
727 	uint32_t page_count;
728 
729 	/* If page_addr_list_len is smaller than page_count,
730 	 * the remaining page addresses will be added via the
731 	 * message GDMA_DMA_REGION_ADD_PAGES.
732 	 */
733 	uint32_t page_addr_list_len;
734 	uint64_t page_addr_list[];
735 }; /* HW DATA */
736 
737 struct gdma_create_dma_region_resp {
738 	struct gdma_resp_hdr hdr;
739 	gdma_obj_handle_t dma_region_handle;
740 }; /* HW DATA */
741 
742 /* GDMA_DMA_REGION_ADD_PAGES */
743 struct gdma_dma_region_add_pages_req {
744 	struct gdma_req_hdr hdr;
745 
746 	gdma_obj_handle_t dma_region_handle;
747 
748 	uint32_t page_addr_list_len;
749 	uint32_t reserved3;
750 
751 	uint64_t page_addr_list[];
752 }; /* HW DATA */
753 
754 /* GDMA_DESTROY_DMA_REGION */
755 struct gdma_destroy_dma_region_req {
756 	struct gdma_req_hdr hdr;
757 
758 	gdma_obj_handle_t dma_region_handle;
759 }; /* HW DATA */
760 
761 enum gdma_pd_flags {
762 	GDMA_PD_FLAG_INVALID = 0,
763 };
764 
765 struct gdma_create_pd_req {
766 	struct gdma_req_hdr hdr;
767 	enum gdma_pd_flags flags;
768 	uint32_t reserved;
769 };/* HW DATA */
770 
771 struct gdma_create_pd_resp {
772 	struct gdma_resp_hdr hdr;
773 	gdma_obj_handle_t pd_handle;
774 	uint32_t pd_id;
775 	uint32_t reserved;
776 };/* HW DATA */
777 
778 struct gdma_destroy_pd_req {
779 	struct gdma_req_hdr hdr;
780 	gdma_obj_handle_t pd_handle;
781 };/* HW DATA */
782 
783 struct gdma_destory_pd_resp {
784 	struct gdma_resp_hdr hdr;
785 };/* HW DATA */
786 
787 enum gdma_mr_type {
788 	/* Guest Virtual Address - MRs of this type allow access
789 	 * to memory mapped by PTEs associated with this MR using a virtual
790 	 * address that is set up in the MST
791 	 */
792 	GDMA_MR_TYPE_GVA = 2,
793 };
794 
795 struct gdma_create_mr_params {
796 	gdma_obj_handle_t pd_handle;
797 	enum gdma_mr_type mr_type;
798 	union {
799 		struct {
800 			gdma_obj_handle_t dma_region_handle;
801 			uint64_t virtual_address;
802 			enum gdma_mr_access_flags access_flags;
803 		} gva;
804 	};
805 };
806 
807 struct gdma_create_mr_request {
808 	struct gdma_req_hdr hdr;
809 	gdma_obj_handle_t pd_handle;
810 	enum gdma_mr_type mr_type;
811 	uint32_t reserved_1;
812 
813 	union {
814 		struct {
815 			gdma_obj_handle_t dma_region_handle;
816 			uint64_t virtual_address;
817 			enum gdma_mr_access_flags access_flags;
818 		} gva;
819 
820 	};
821 	uint32_t reserved_2;
822 };/* HW DATA */
823 
824 struct gdma_create_mr_response {
825 	struct gdma_resp_hdr hdr;
826 	gdma_obj_handle_t mr_handle;
827 	uint32_t lkey;
828 	uint32_t rkey;
829 };/* HW DATA */
830 
831 struct gdma_destroy_mr_request {
832 	struct gdma_req_hdr hdr;
833 	gdma_obj_handle_t mr_handle;
834 };/* HW DATA */
835 
836 struct gdma_destroy_mr_response {
837 	struct gdma_resp_hdr hdr;
838 };/* HW DATA */
839 
840 int mana_gd_verify_vf_version(device_t dev);
841 
842 int mana_gd_register_device(struct gdma_dev *gd);
843 int mana_gd_deregister_device(struct gdma_dev *gd);
844 
845 int mana_gd_post_work_request(struct gdma_queue *wq,
846     const struct gdma_wqe_request *wqe_req,
847     struct gdma_posted_wqe_info *wqe_info);
848 
849 int mana_gd_post_and_ring(struct gdma_queue *queue,
850     const struct gdma_wqe_request *wqe,
851     struct gdma_posted_wqe_info *wqe_info);
852 
853 int mana_gd_alloc_res_map(uint32_t res_avil, struct gdma_resource *r,
854     const char *lock_name);
855 void mana_gd_free_res_map(struct gdma_resource *r);
856 
857 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
858     struct gdma_queue *queue);
859 
860 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
861     struct gdma_mem_info *gmi);
862 
863 void mana_gd_free_memory(struct gdma_mem_info *gmi);
864 
865 void mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs,
866     int nseg, int error);
867 
868 int mana_gd_send_request(struct gdma_context *gc, uint32_t req_len,
869     const void *req, uint32_t resp_len, void *resp);
870 
871 int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
872     int *doorbell_page);
873 
874 int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
875     int doorbell_page);
876 
877 int mana_gd_destroy_dma_region(struct gdma_context *gc,
878     gdma_obj_handle_t dma_region_handle);
879 #endif /* _GDMA_H */
880