xref: /freebsd/sys/dev/mana/gdma.h (revision e2afbc45258f2fa4bdcf126e959ac660e76fc802)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  */
31 
32 #ifndef _GDMA_H
33 #define _GDMA_H
34 
35 #include <sys/bus.h>
36 #include <sys/bus_dma.h>
37 #include <sys/types.h>
38 #include <sys/limits.h>
39 #include <sys/sx.h>
40 
41 #include "gdma_util.h"
42 #include "shm_channel.h"
43 
44 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
45 
46 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
47  * them are naturally aligned and hence don't need __packed.
48  */
49 
50 #define GDMA_BAR0		0
51 
52 #define GDMA_IRQNAME_SZ		40
53 
54 struct gdma_bus {
55 	bus_space_handle_t	bar0_h;
56 	bus_space_tag_t		bar0_t;
57 };
58 
59 struct gdma_msix_entry {
60 	int			entry;
61 	int			vector;
62 };
63 
64 enum gdma_request_type {
65 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
66 	GDMA_QUERY_MAX_RESOURCES	= 2,
67 	GDMA_LIST_DEVICES		= 3,
68 	GDMA_REGISTER_DEVICE		= 4,
69 	GDMA_DEREGISTER_DEVICE		= 5,
70 	GDMA_GENERATE_TEST_EQE		= 10,
71 	GDMA_CREATE_QUEUE		= 12,
72 	GDMA_DISABLE_QUEUE		= 13,
73 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
74 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
75 	GDMA_CREATE_DMA_REGION		= 25,
76 	GDMA_DMA_REGION_ADD_PAGES	= 26,
77 	GDMA_DESTROY_DMA_REGION		= 27,
78 	GDMA_CREATE_PD			= 29,
79 	GDMA_DESTROY_PD			= 30,
80 	GDMA_CREATE_MR			= 31,
81 	GDMA_DESTROY_MR			= 32,
82 };
83 
84 #define GDMA_RESOURCE_DOORBELL_PAGE	27
85 
86 enum gdma_queue_type {
87 	GDMA_INVALID_QUEUE,
88 	GDMA_SQ,
89 	GDMA_RQ,
90 	GDMA_CQ,
91 	GDMA_EQ,
92 };
93 
94 enum gdma_work_request_flags {
95 	GDMA_WR_NONE			= 0,
96 	GDMA_WR_OOB_IN_SGL		= BIT(0),
97 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
98 };
99 
100 enum gdma_eqe_type {
101 	GDMA_EQE_COMPLETION		= 3,
102 	GDMA_EQE_TEST_EVENT		= 64,
103 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
104 	GDMA_EQE_HWC_INIT_DATA		= 130,
105 	GDMA_EQE_HWC_INIT_DONE		= 131,
106 };
107 
108 enum {
109 	GDMA_DEVICE_NONE	= 0,
110 	GDMA_DEVICE_HWC		= 1,
111 	GDMA_DEVICE_MANA	= 2,
112 };
113 
114 typedef uint64_t gdma_obj_handle_t;
115 
116 struct gdma_resource {
117 	/* Protect the bitmap */
118 	struct mtx		lock_spin;
119 
120 	/* The bitmap size in bits. */
121 	uint32_t		size;
122 
123 	/* The bitmap tracks the resources. */
124 	unsigned long		*map;
125 };
126 
127 union gdma_doorbell_entry {
128 	uint64_t		as_uint64;
129 
130 	struct {
131 		uint64_t id		: 24;
132 		uint64_t reserved	: 8;
133 		uint64_t tail_ptr	: 31;
134 		uint64_t arm		: 1;
135 	} cq;
136 
137 	struct {
138 		uint64_t id		: 24;
139 		uint64_t wqe_cnt	: 8;
140 		uint64_t tail_ptr	: 32;
141 	} rq;
142 
143 	struct {
144 		uint64_t id		: 24;
145 		uint64_t reserved	: 8;
146 		uint64_t tail_ptr	: 32;
147 	} sq;
148 
149 	struct {
150 		uint64_t id		: 16;
151 		uint64_t reserved	: 16;
152 		uint64_t tail_ptr	: 31;
153 		uint64_t arm		: 1;
154 	} eq;
155 }; /* HW DATA */
156 
157 struct gdma_msg_hdr {
158 	uint32_t	hdr_type;
159 	uint32_t	msg_type;
160 	uint16_t	msg_version;
161 	uint16_t	hwc_msg_id;
162 	uint32_t	msg_size;
163 }; /* HW DATA */
164 
165 struct gdma_dev_id {
166 	union {
167 		struct {
168 			uint16_t type;
169 			uint16_t instance;
170 		};
171 
172 		uint32_t as_uint32;
173 	};
174 }; /* HW DATA */
175 
176 struct gdma_req_hdr {
177 	struct gdma_msg_hdr	req;
178 	struct gdma_msg_hdr	resp; /* The expected response */
179 	struct gdma_dev_id	dev_id;
180 	uint32_t		activity_id;
181 }; /* HW DATA */
182 
183 struct gdma_resp_hdr {
184 	struct gdma_msg_hdr	response;
185 	struct gdma_dev_id	dev_id;
186 	uint32_t		activity_id;
187 	uint32_t		status;
188 	uint32_t		reserved;
189 }; /* HW DATA */
190 
191 struct gdma_general_req {
192 	struct gdma_req_hdr	hdr;
193 }; /* HW DATA */
194 
195 #define GDMA_MESSAGE_V1 1
196 #define GDMA_MESSAGE_V2 2
197 #define GDMA_MESSAGE_V3 3
198 #define GDMA_MESSAGE_V4 4
199 
200 struct gdma_general_resp {
201 	struct gdma_resp_hdr	hdr;
202 }; /* HW DATA */
203 
204 #define GDMA_STANDARD_HEADER_TYPE	0
205 
206 static inline void
207 mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, uint32_t code,
208     uint32_t req_size, uint32_t resp_size)
209 {
210 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
211 	hdr->req.msg_type = code;
212 	hdr->req.msg_version = GDMA_MESSAGE_V1;
213 	hdr->req.msg_size = req_size;
214 
215 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
216 	hdr->resp.msg_type = code;
217 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
218 	hdr->resp.msg_size = resp_size;
219 }
220 
221 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
222 struct gdma_sge {
223 	uint64_t		address;
224 	uint32_t		mem_key;
225 	uint32_t		size;
226 }; /* HW DATA */
227 
228 struct gdma_wqe_request {
229 	struct gdma_sge		*sgl;
230 	uint32_t		num_sge;
231 
232 	uint32_t		inline_oob_size;
233 	const void		*inline_oob_data;
234 
235 	uint32_t		flags;
236 	uint32_t		client_data_unit;
237 };
238 
239 enum gdma_page_type {
240 	GDMA_PAGE_TYPE_4K,
241 };
242 
243 #define GDMA_INVALID_DMA_REGION		0
244 
245 struct gdma_mem_info {
246 	device_t		dev;
247 
248 	bus_dma_tag_t		dma_tag;
249 	bus_dmamap_t		dma_map;
250 	bus_addr_t		dma_handle;	/* Physical address	*/
251 	void			*virt_addr;	/* Virtual address	*/
252 	uint64_t		length;
253 
254 	/* Allocated by the PF driver */
255 	gdma_obj_handle_t	dma_region_handle;
256 };
257 
258 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
259 
260 struct gdma_dev {
261 	struct gdma_context	*gdma_context;
262 
263 	struct gdma_dev_id	dev_id;
264 
265 	uint32_t		pdid;
266 	uint32_t		doorbell;
267 	uint32_t		gpa_mkey;
268 
269 	/* GDMA driver specific pointer */
270 	void			*driver_data;
271 };
272 
273 #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
274 
275 #define GDMA_CQE_SIZE		64
276 #define GDMA_EQE_SIZE		16
277 #define GDMA_MAX_SQE_SIZE	512
278 #define GDMA_MAX_RQE_SIZE	256
279 
280 #define GDMA_COMP_DATA_SIZE	0x3C
281 
282 #define GDMA_EVENT_DATA_SIZE	0xC
283 
284 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
285 #define GDMA_WQE_BU_SIZE	32
286 
287 #define INVALID_PDID		UINT_MAX
288 #define INVALID_DOORBELL	UINT_MAX
289 #define INVALID_MEM_KEY		UINT_MAX
290 #define INVALID_QUEUE_ID	UINT_MAX
291 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
292 
293 struct gdma_comp {
294 	uint32_t		cqe_data[GDMA_COMP_DATA_SIZE / 4];
295 	uint32_t		wq_num;
296 	bool			is_sq;
297 };
298 
299 struct gdma_event {
300 	uint32_t		details[GDMA_EVENT_DATA_SIZE / 4];
301 	uint8_t			type;
302 };
303 
304 struct gdma_queue;
305 
306 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
307     struct gdma_event *e);
308 
309 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
310 
311 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
312  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
313  * driver increases the 'head' in BUs rather than in bytes, and notifies
314  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
315  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
316  *
317  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
318  * processed, the driver increases the 'tail' to indicate that WQEs have
319  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
320  *
321  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
322  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
323  * the owner bits mechanism to detect if the queue has become empty.
324  */
325 struct gdma_queue {
326 	struct gdma_dev		*gdma_dev;
327 
328 	enum gdma_queue_type	type;
329 	uint32_t		id;
330 
331 	struct gdma_mem_info	mem_info;
332 
333 	void			*queue_mem_ptr;
334 	uint32_t		queue_size;
335 
336 	bool			monitor_avl_buf;
337 
338 	uint32_t		head;
339 	uint32_t		tail;
340 
341 	/* Extra fields specific to EQ/CQ. */
342 	union {
343 		struct {
344 			bool			disable_needed;
345 
346 			gdma_eq_callback	*callback;
347 			void			*context;
348 
349 			unsigned int		msix_index;
350 
351 			uint32_t		log2_throttle_limit;
352 		} eq;
353 
354 		struct {
355 			gdma_cq_callback	*callback;
356 			void			*context;
357 
358 			/* For CQ/EQ relationship */
359 			struct gdma_queue	*parent;
360 		} cq;
361 	};
362 };
363 
364 struct gdma_queue_spec {
365 	enum gdma_queue_type	type;
366 	bool			monitor_avl_buf;
367 	unsigned int		queue_size;
368 
369 	/* Extra fields specific to EQ/CQ. */
370 	union {
371 		struct {
372 			gdma_eq_callback	*callback;
373 			void			*context;
374 
375 			unsigned long		log2_throttle_limit;
376 		} eq;
377 
378 		struct {
379 			gdma_cq_callback	*callback;
380 			void			*context;
381 
382 			struct			gdma_queue *parent_eq;
383 
384 		} cq;
385 	};
386 };
387 
388 struct mana_eq {
389 	struct gdma_queue	*eq;
390 };
391 
392 struct gdma_irq_context {
393 	struct gdma_msix_entry	msix_e;
394 	struct resource		*res;
395 	driver_intr_t		*handler;
396 	void			*arg;
397 	void			*cookie;
398 	bool			requested;
399 	int			cpu;
400 	char			name[GDMA_IRQNAME_SZ];
401 };
402 
403 struct gdma_context {
404 	device_t		dev;
405 
406 	struct gdma_bus		gd_bus;
407 
408 	/* Per-vPort max number of queues */
409 	unsigned int		max_num_queues;
410 	unsigned int		max_num_msix;
411 	unsigned int		num_msix_usable;
412 	struct gdma_resource	msix_resource;
413 	struct gdma_irq_context	*irq_contexts;
414 
415 	/* L2 MTU */
416 	uint16_t		adapter_mtu;
417 
418 	/* This maps a CQ index to the queue structure. */
419 	unsigned int		max_num_cqs;
420 	struct gdma_queue	**cq_table;
421 
422 	/* Protect eq_test_event and test_event_eq_id  */
423 	struct sx		eq_test_event_sx;
424 	struct completion	eq_test_event;
425 	uint32_t		test_event_eq_id;
426 
427 	struct resource		*bar0;
428 	struct resource		*msix;
429 	int			msix_rid;
430 	void __iomem		*shm_base;
431 	void __iomem		*db_page_base;
432 	vm_paddr_t		phys_db_page_base;
433 	uint32_t		db_page_size;
434 
435 	/* Shared memory chanenl (used to bootstrap HWC) */
436 	struct shm_channel	shm_channel;
437 
438 	/* Hardware communication channel (HWC) */
439 	struct gdma_dev		hwc;
440 
441 	/* Azure network adapter */
442 	struct gdma_dev		mana;
443 };
444 
445 #define MAX_NUM_GDMA_DEVICES	4
446 
447 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
448 {
449 	return gd->dev_id.type == GDMA_DEVICE_MANA;
450 }
451 
452 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
453 {
454 	return gd->dev_id.type == GDMA_DEVICE_HWC;
455 }
456 
457 uint8_t *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, uint32_t wqe_offset);
458 uint32_t mana_gd_wq_avail_space(struct gdma_queue *wq);
459 
460 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
461 
462 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
463     const struct gdma_queue_spec *spec,
464     struct gdma_queue **queue_ptr);
465 
466 int mana_gd_create_mana_eq(struct gdma_dev *gd,
467     const struct gdma_queue_spec *spec,
468     struct gdma_queue **queue_ptr);
469 
470 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
471     const struct gdma_queue_spec *spec,
472     struct gdma_queue **queue_ptr);
473 
474 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
475 
476 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
477 
478 void mana_gd_ring_cq(struct gdma_queue *cq, uint8_t arm_bit);
479 
480 struct gdma_wqe {
481 	uint32_t reserved	:24;
482 	uint32_t last_vbytes	:8;
483 
484 	union {
485 		uint32_t flags;
486 
487 		struct {
488 			uint32_t num_sge		:8;
489 			uint32_t inline_oob_size_div4	:3;
490 			uint32_t client_oob_in_sgl	:1;
491 			uint32_t reserved1		:4;
492 			uint32_t client_data_unit	:14;
493 			uint32_t reserved2		:2;
494 		};
495 	};
496 }; /* HW DATA */
497 
498 #define INLINE_OOB_SMALL_SIZE	8
499 #define INLINE_OOB_LARGE_SIZE	24
500 
501 #define MAX_TX_WQE_SIZE		512
502 #define MAX_RX_WQE_SIZE		256
503 
504 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
505 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
506 			sizeof(struct gdma_sge))
507 
508 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
509 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
510 
511 struct gdma_cqe {
512 	uint32_t cqe_data[GDMA_COMP_DATA_SIZE / 4];
513 
514 	union {
515 		uint32_t as_uint32;
516 
517 		struct {
518 			uint32_t wq_num		:24;
519 			uint32_t is_sq		:1;
520 			uint32_t reserved	:4;
521 			uint32_t owner_bits	:3;
522 		};
523 	} cqe_info;
524 }; /* HW DATA */
525 
526 #define GDMA_CQE_OWNER_BITS	3
527 
528 #define GDMA_CQE_OWNER_MASK	((1 << GDMA_CQE_OWNER_BITS) - 1)
529 
530 #define SET_ARM_BIT		1
531 
532 #define GDMA_EQE_OWNER_BITS	3
533 
534 union gdma_eqe_info {
535 	uint32_t as_uint32;
536 
537 	struct {
538 		uint32_t type		: 8;
539 		uint32_t reserved1	: 8;
540 		uint32_t client_id	: 2;
541 		uint32_t reserved2	: 11;
542 		uint32_t owner_bits	: 3;
543 	};
544 }; /* HW DATA */
545 
546 #define GDMA_EQE_OWNER_MASK	((1 << GDMA_EQE_OWNER_BITS) - 1)
547 #define INITIALIZED_OWNER_BIT(log2_num_entries)	(1UL << (log2_num_entries))
548 
549 struct gdma_eqe {
550 	uint32_t details[GDMA_EVENT_DATA_SIZE / 4];
551 	uint32_t eqe_info;
552 }; /* HW DATA */
553 
554 #define GDMA_REG_DB_PAGE_OFFSET	8
555 #define GDMA_REG_DB_PAGE_SIZE	0x10
556 #define GDMA_REG_SHM_OFFSET	0x18
557 
558 struct gdma_posted_wqe_info {
559 	uint32_t wqe_size_in_bu;
560 };
561 
562 /* GDMA_GENERATE_TEST_EQE */
563 struct gdma_generate_test_event_req {
564 	struct gdma_req_hdr hdr;
565 	uint32_t queue_index;
566 }; /* HW DATA */
567 
568 /* GDMA_VERIFY_VF_DRIVER_VERSION */
569 enum {
570 	GDMA_PROTOCOL_V1	= 1,
571 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
572 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
573 };
574 
575 struct gdma_verify_ver_req {
576 	struct gdma_req_hdr hdr;
577 
578 	/* Mandatory fields required for protocol establishment */
579 	uint64_t protocol_ver_min;
580 	uint64_t protocol_ver_max;
581 	uint64_t drv_cap_flags1;
582 	uint64_t drv_cap_flags2;
583 	uint64_t drv_cap_flags3;
584 	uint64_t drv_cap_flags4;
585 
586 	/* Advisory fields */
587 	uint64_t drv_ver;
588 	uint32_t os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
589 	uint32_t reserved;
590 	uint32_t os_ver_major;
591 	uint32_t os_ver_minor;
592 	uint32_t os_ver_build;
593 	uint32_t os_ver_platform;
594 	uint64_t reserved_2;
595 	uint8_t os_ver_str1[128];
596 	uint8_t os_ver_str2[128];
597 	uint8_t os_ver_str3[128];
598 	uint8_t os_ver_str4[128];
599 }; /* HW DATA */
600 
601 struct gdma_verify_ver_resp {
602 	struct gdma_resp_hdr hdr;
603 	uint64_t gdma_protocol_ver;
604 	uint64_t pf_cap_flags1;
605 	uint64_t pf_cap_flags2;
606 	uint64_t pf_cap_flags3;
607 	uint64_t pf_cap_flags4;
608 }; /* HW DATA */
609 
610 /* GDMA_QUERY_MAX_RESOURCES */
611 struct gdma_query_max_resources_resp {
612 	struct gdma_resp_hdr hdr;
613 	uint32_t status;
614 	uint32_t max_sq;
615 	uint32_t max_rq;
616 	uint32_t max_cq;
617 	uint32_t max_eq;
618 	uint32_t max_db;
619 	uint32_t max_mst;
620 	uint32_t max_cq_mod_ctx;
621 	uint32_t max_mod_cq;
622 	uint32_t max_msix;
623 }; /* HW DATA */
624 
625 /* GDMA_LIST_DEVICES */
626 struct gdma_list_devices_resp {
627 	struct gdma_resp_hdr hdr;
628 	uint32_t num_of_devs;
629 	uint32_t reserved;
630 	struct gdma_dev_id devs[64];
631 }; /* HW DATA */
632 
633 /* GDMA_REGISTER_DEVICE */
634 struct gdma_register_device_resp {
635 	struct gdma_resp_hdr hdr;
636 	uint32_t pdid;
637 	uint32_t gpa_mkey;
638 	uint32_t db_id;
639 }; /* HW DATA */
640 
641 struct gdma_allocate_resource_range_req {
642 	struct gdma_req_hdr hdr;
643 	uint32_t resource_type;
644 	uint32_t num_resources;
645 	uint32_t alignment;
646 	uint32_t allocated_resources;
647 };
648 
649 struct gdma_allocate_resource_range_resp {
650 	struct gdma_resp_hdr hdr;
651 	uint32_t allocated_resources;
652 };
653 
654 struct gdma_destroy_resource_range_req {
655 	struct gdma_req_hdr hdr;
656 	uint32_t resource_type;
657 	uint32_t num_resources;
658 	uint32_t allocated_resources;
659 };
660 
661 /* GDMA_CREATE_QUEUE */
662 struct gdma_create_queue_req {
663 	struct gdma_req_hdr hdr;
664 	uint32_t type;
665 	uint32_t reserved1;
666 	uint32_t pdid;
667 	uint32_t doolbell_id;
668 	gdma_obj_handle_t gdma_region;
669 	uint32_t reserved2;
670 	uint32_t queue_size;
671 	uint32_t log2_throttle_limit;
672 	uint32_t eq_pci_msix_index;
673 	uint32_t cq_mod_ctx_id;
674 	uint32_t cq_parent_eq_id;
675 	uint8_t  rq_drop_on_overrun;
676 	uint8_t  rq_err_on_wqe_overflow;
677 	uint8_t  rq_chain_rec_wqes;
678 	uint8_t  sq_hw_db;
679 	uint32_t reserved3;
680 }; /* HW DATA */
681 
682 struct gdma_create_queue_resp {
683 	struct gdma_resp_hdr hdr;
684 	uint32_t queue_index;
685 }; /* HW DATA */
686 
687 /* GDMA_DISABLE_QUEUE */
688 struct gdma_disable_queue_req {
689 	struct gdma_req_hdr hdr;
690 	uint32_t type;
691 	uint32_t queue_index;
692 	uint32_t alloc_res_id_on_creation;
693 }; /* HW DATA */
694 
695 enum atb_page_size {
696 	ATB_PAGE_SIZE_4K,
697 	ATB_PAGE_SIZE_8K,
698 	ATB_PAGE_SIZE_16K,
699 	ATB_PAGE_SIZE_32K,
700 	ATB_PAGE_SIZE_64K,
701 	ATB_PAGE_SIZE_128K,
702 	ATB_PAGE_SIZE_256K,
703 	ATB_PAGE_SIZE_512K,
704 	ATB_PAGE_SIZE_1M,
705 	ATB_PAGE_SIZE_2M,
706 	ATB_PAGE_SIZE_MAX,
707 };
708 
709 enum gdma_mr_access_flags {
710 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT(0),
711 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT(1),
712 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT(2),
713 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT(3),
714 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT(4),
715 };
716 
717 /* GDMA_CREATE_DMA_REGION */
718 struct gdma_create_dma_region_req {
719 	struct gdma_req_hdr hdr;
720 
721 	/* The total size of the DMA region */
722 	uint64_t length;
723 
724 	/* The offset in the first page */
725 	uint32_t offset_in_page;
726 
727 	/* enum gdma_page_type */
728 	uint32_t gdma_page_type;
729 
730 	/* The total number of pages */
731 	uint32_t page_count;
732 
733 	/* If page_addr_list_len is smaller than page_count,
734 	 * the remaining page addresses will be added via the
735 	 * message GDMA_DMA_REGION_ADD_PAGES.
736 	 */
737 	uint32_t page_addr_list_len;
738 	uint64_t page_addr_list[];
739 }; /* HW DATA */
740 
741 struct gdma_create_dma_region_resp {
742 	struct gdma_resp_hdr hdr;
743 	gdma_obj_handle_t dma_region_handle;
744 }; /* HW DATA */
745 
746 /* GDMA_DMA_REGION_ADD_PAGES */
747 struct gdma_dma_region_add_pages_req {
748 	struct gdma_req_hdr hdr;
749 
750 	gdma_obj_handle_t dma_region_handle;
751 
752 	uint32_t page_addr_list_len;
753 	uint32_t reserved3;
754 
755 	uint64_t page_addr_list[];
756 }; /* HW DATA */
757 
758 /* GDMA_DESTROY_DMA_REGION */
759 struct gdma_destroy_dma_region_req {
760 	struct gdma_req_hdr hdr;
761 
762 	gdma_obj_handle_t dma_region_handle;
763 }; /* HW DATA */
764 
765 enum gdma_pd_flags {
766 	GDMA_PD_FLAG_INVALID = 0,
767 };
768 
769 struct gdma_create_pd_req {
770 	struct gdma_req_hdr hdr;
771 	enum gdma_pd_flags flags;
772 	uint32_t reserved;
773 };/* HW DATA */
774 
775 struct gdma_create_pd_resp {
776 	struct gdma_resp_hdr hdr;
777 	gdma_obj_handle_t pd_handle;
778 	uint32_t pd_id;
779 	uint32_t reserved;
780 };/* HW DATA */
781 
782 struct gdma_destroy_pd_req {
783 	struct gdma_req_hdr hdr;
784 	gdma_obj_handle_t pd_handle;
785 };/* HW DATA */
786 
787 struct gdma_destory_pd_resp {
788 	struct gdma_resp_hdr hdr;
789 };/* HW DATA */
790 
791 enum gdma_mr_type {
792 	/* Guest Virtual Address - MRs of this type allow access
793 	 * to memory mapped by PTEs associated with this MR using a virtual
794 	 * address that is set up in the MST
795 	 */
796 	GDMA_MR_TYPE_GVA = 2,
797 };
798 
799 struct gdma_create_mr_params {
800 	gdma_obj_handle_t pd_handle;
801 	enum gdma_mr_type mr_type;
802 	union {
803 		struct {
804 			gdma_obj_handle_t dma_region_handle;
805 			uint64_t virtual_address;
806 			enum gdma_mr_access_flags access_flags;
807 		} gva;
808 	};
809 };
810 
811 struct gdma_create_mr_request {
812 	struct gdma_req_hdr hdr;
813 	gdma_obj_handle_t pd_handle;
814 	enum gdma_mr_type mr_type;
815 	uint32_t reserved_1;
816 
817 	union {
818 		struct {
819 			gdma_obj_handle_t dma_region_handle;
820 			uint64_t virtual_address;
821 			enum gdma_mr_access_flags access_flags;
822 		} gva;
823 
824 	};
825 	uint32_t reserved_2;
826 };/* HW DATA */
827 
828 struct gdma_create_mr_response {
829 	struct gdma_resp_hdr hdr;
830 	gdma_obj_handle_t mr_handle;
831 	uint32_t lkey;
832 	uint32_t rkey;
833 };/* HW DATA */
834 
835 struct gdma_destroy_mr_request {
836 	struct gdma_req_hdr hdr;
837 	gdma_obj_handle_t mr_handle;
838 };/* HW DATA */
839 
840 struct gdma_destroy_mr_response {
841 	struct gdma_resp_hdr hdr;
842 };/* HW DATA */
843 
844 int mana_gd_verify_vf_version(device_t dev);
845 
846 int mana_gd_register_device(struct gdma_dev *gd);
847 int mana_gd_deregister_device(struct gdma_dev *gd);
848 
849 int mana_gd_post_work_request(struct gdma_queue *wq,
850     const struct gdma_wqe_request *wqe_req,
851     struct gdma_posted_wqe_info *wqe_info);
852 
853 int mana_gd_post_and_ring(struct gdma_queue *queue,
854     const struct gdma_wqe_request *wqe,
855     struct gdma_posted_wqe_info *wqe_info);
856 
857 int mana_gd_alloc_res_map(uint32_t res_avil, struct gdma_resource *r,
858     const char *lock_name);
859 void mana_gd_free_res_map(struct gdma_resource *r);
860 
861 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
862     struct gdma_queue *queue);
863 
864 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
865     struct gdma_mem_info *gmi);
866 
867 void mana_gd_free_memory(struct gdma_mem_info *gmi);
868 
869 void mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs,
870     int nseg, int error);
871 
872 int mana_gd_send_request(struct gdma_context *gc, uint32_t req_len,
873     const void *req, uint32_t resp_len, void *resp);
874 
875 int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
876     int *doorbell_page);
877 
878 int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
879     int doorbell_page);
880 
881 int mana_gd_destroy_dma_region(struct gdma_context *gc,
882     gdma_obj_handle_t dma_region_handle);
883 #endif /* _GDMA_H */
884