xref: /linux/include/net/mana/gdma.h (revision 092e335082f22880207384ad736729c67d784665)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _GDMA_H
5 #define _GDMA_H
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9 
10 #include "shm_channel.h"
11 
12 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
13 
14 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
15  * them are naturally aligned and hence don't need __packed.
16  */
17 
18 enum gdma_request_type {
19 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
20 	GDMA_QUERY_MAX_RESOURCES	= 2,
21 	GDMA_LIST_DEVICES		= 3,
22 	GDMA_REGISTER_DEVICE		= 4,
23 	GDMA_DEREGISTER_DEVICE		= 5,
24 	GDMA_GENERATE_TEST_EQE		= 10,
25 	GDMA_CREATE_QUEUE		= 12,
26 	GDMA_DISABLE_QUEUE		= 13,
27 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
28 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
29 	GDMA_CREATE_DMA_REGION		= 25,
30 	GDMA_DMA_REGION_ADD_PAGES	= 26,
31 	GDMA_DESTROY_DMA_REGION		= 27,
32 	GDMA_CREATE_PD			= 29,
33 	GDMA_DESTROY_PD			= 30,
34 	GDMA_CREATE_MR			= 31,
35 	GDMA_DESTROY_MR			= 32,
36 	GDMA_QUERY_HWC_TIMEOUT		= 84, /* 0x54 */
37 };
38 
39 #define GDMA_RESOURCE_DOORBELL_PAGE	27
40 
41 enum gdma_queue_type {
42 	GDMA_INVALID_QUEUE,
43 	GDMA_SQ,
44 	GDMA_RQ,
45 	GDMA_CQ,
46 	GDMA_EQ,
47 };
48 
49 enum gdma_work_request_flags {
50 	GDMA_WR_NONE			= 0,
51 	GDMA_WR_OOB_IN_SGL		= BIT(0),
52 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
53 };
54 
55 enum gdma_eqe_type {
56 	GDMA_EQE_COMPLETION		= 3,
57 	GDMA_EQE_TEST_EVENT		= 64,
58 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
59 	GDMA_EQE_HWC_INIT_DATA		= 130,
60 	GDMA_EQE_HWC_INIT_DONE		= 131,
61 	GDMA_EQE_HWC_SOC_RECONFIG	= 132,
62 	GDMA_EQE_HWC_SOC_RECONFIG_DATA	= 133,
63 	GDMA_EQE_RNIC_QP_FATAL		= 176,
64 };
65 
66 enum {
67 	GDMA_DEVICE_NONE	= 0,
68 	GDMA_DEVICE_HWC		= 1,
69 	GDMA_DEVICE_MANA	= 2,
70 	GDMA_DEVICE_MANA_IB	= 3,
71 };
72 
73 struct gdma_resource {
74 	/* Protect the bitmap */
75 	spinlock_t lock;
76 
77 	/* The bitmap size in bits. */
78 	u32 size;
79 
80 	/* The bitmap tracks the resources. */
81 	unsigned long *map;
82 };
83 
84 union gdma_doorbell_entry {
85 	u64	as_uint64;
86 
87 	struct {
88 		u64 id		: 24;
89 		u64 reserved	: 8;
90 		u64 tail_ptr	: 31;
91 		u64 arm		: 1;
92 	} cq;
93 
94 	struct {
95 		u64 id		: 24;
96 		u64 wqe_cnt	: 8;
97 		u64 tail_ptr	: 32;
98 	} rq;
99 
100 	struct {
101 		u64 id		: 24;
102 		u64 reserved	: 8;
103 		u64 tail_ptr	: 32;
104 	} sq;
105 
106 	struct {
107 		u64 id		: 16;
108 		u64 reserved	: 16;
109 		u64 tail_ptr	: 31;
110 		u64 arm		: 1;
111 	} eq;
112 }; /* HW DATA */
113 
114 struct gdma_msg_hdr {
115 	u32 hdr_type;
116 	u32 msg_type;
117 	u16 msg_version;
118 	u16 hwc_msg_id;
119 	u32 msg_size;
120 }; /* HW DATA */
121 
122 struct gdma_dev_id {
123 	union {
124 		struct {
125 			u16 type;
126 			u16 instance;
127 		};
128 
129 		u32 as_uint32;
130 	};
131 }; /* HW DATA */
132 
133 struct gdma_req_hdr {
134 	struct gdma_msg_hdr req;
135 	struct gdma_msg_hdr resp; /* The expected response */
136 	struct gdma_dev_id dev_id;
137 	u32 activity_id;
138 }; /* HW DATA */
139 
140 struct gdma_resp_hdr {
141 	struct gdma_msg_hdr response;
142 	struct gdma_dev_id dev_id;
143 	u32 activity_id;
144 	u32 status;
145 	u32 reserved;
146 }; /* HW DATA */
147 
148 struct gdma_general_req {
149 	struct gdma_req_hdr hdr;
150 }; /* HW DATA */
151 
152 #define GDMA_MESSAGE_V1 1
153 #define GDMA_MESSAGE_V2 2
154 #define GDMA_MESSAGE_V3 3
155 #define GDMA_MESSAGE_V4 4
156 
157 struct gdma_general_resp {
158 	struct gdma_resp_hdr hdr;
159 }; /* HW DATA */
160 
161 #define GDMA_STANDARD_HEADER_TYPE 0
162 
163 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
164 					u32 req_size, u32 resp_size)
165 {
166 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
167 	hdr->req.msg_type = code;
168 	hdr->req.msg_version = GDMA_MESSAGE_V1;
169 	hdr->req.msg_size = req_size;
170 
171 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
172 	hdr->resp.msg_type = code;
173 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
174 	hdr->resp.msg_size = resp_size;
175 }
176 
177 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
178 struct gdma_sge {
179 	u64 address;
180 	u32 mem_key;
181 	u32 size;
182 }; /* HW DATA */
183 
184 struct gdma_wqe_request {
185 	struct gdma_sge *sgl;
186 	u32 num_sge;
187 
188 	u32 inline_oob_size;
189 	const void *inline_oob_data;
190 
191 	u32 flags;
192 	u32 client_data_unit;
193 };
194 
195 enum gdma_page_type {
196 	GDMA_PAGE_TYPE_4K,
197 };
198 
199 #define GDMA_INVALID_DMA_REGION 0
200 
201 struct gdma_mem_info {
202 	struct device *dev;
203 
204 	dma_addr_t dma_handle;
205 	void *virt_addr;
206 	u64 length;
207 
208 	/* Allocated by the PF driver */
209 	u64 dma_region_handle;
210 };
211 
212 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
213 
214 struct gdma_dev {
215 	struct gdma_context *gdma_context;
216 
217 	struct gdma_dev_id dev_id;
218 
219 	u32 pdid;
220 	u32 doorbell;
221 	u32 gpa_mkey;
222 
223 	/* GDMA driver specific pointer */
224 	void *driver_data;
225 
226 	struct auxiliary_device *adev;
227 };
228 
229 /* MANA_PAGE_SIZE is the DMA unit */
230 #define MANA_PAGE_SHIFT 12
231 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
232 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
233 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
234 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
235 
236 /* Required by HW */
237 #define MANA_MIN_QSIZE MANA_PAGE_SIZE
238 
239 #define GDMA_CQE_SIZE 64
240 #define GDMA_EQE_SIZE 16
241 #define GDMA_MAX_SQE_SIZE 512
242 #define GDMA_MAX_RQE_SIZE 256
243 
244 #define GDMA_COMP_DATA_SIZE 0x3C
245 
246 #define GDMA_EVENT_DATA_SIZE 0xC
247 
248 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
249 #define GDMA_WQE_BU_SIZE 32
250 
251 #define INVALID_PDID		UINT_MAX
252 #define INVALID_DOORBELL	UINT_MAX
253 #define INVALID_MEM_KEY		UINT_MAX
254 #define INVALID_QUEUE_ID	UINT_MAX
255 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
256 
257 struct gdma_comp {
258 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
259 	u32 wq_num;
260 	bool is_sq;
261 };
262 
263 struct gdma_event {
264 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
265 	u8  type;
266 };
267 
268 struct gdma_queue;
269 
270 struct mana_eq {
271 	struct gdma_queue	*eq;
272 	struct dentry		*mana_eq_debugfs;
273 };
274 
275 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
276 			      struct gdma_event *e);
277 
278 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
279 
280 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
281  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
282  * driver increases the 'head' in BUs rather than in bytes, and notifies
283  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
284  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
285  *
286  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
287  * processed, the driver increases the 'tail' to indicate that WQEs have
288  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
289  *
290  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
291  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
292  * the owner bits mechanism to detect if the queue has become empty.
293  */
294 struct gdma_queue {
295 	struct gdma_dev *gdma_dev;
296 
297 	enum gdma_queue_type type;
298 	u32 id;
299 
300 	struct gdma_mem_info mem_info;
301 
302 	void *queue_mem_ptr;
303 	u32 queue_size;
304 
305 	bool monitor_avl_buf;
306 
307 	u32 head;
308 	u32 tail;
309 	struct list_head entry;
310 
311 	/* Extra fields specific to EQ/CQ. */
312 	union {
313 		struct {
314 			bool disable_needed;
315 
316 			gdma_eq_callback *callback;
317 			void *context;
318 
319 			unsigned int msix_index;
320 
321 			u32 log2_throttle_limit;
322 		} eq;
323 
324 		struct {
325 			gdma_cq_callback *callback;
326 			void *context;
327 
328 			struct gdma_queue *parent; /* For CQ/EQ relationship */
329 		} cq;
330 	};
331 };
332 
333 struct gdma_queue_spec {
334 	enum gdma_queue_type type;
335 	bool monitor_avl_buf;
336 	unsigned int queue_size;
337 
338 	/* Extra fields specific to EQ/CQ. */
339 	union {
340 		struct {
341 			gdma_eq_callback *callback;
342 			void *context;
343 
344 			unsigned long log2_throttle_limit;
345 			unsigned int msix_index;
346 		} eq;
347 
348 		struct {
349 			gdma_cq_callback *callback;
350 			void *context;
351 
352 			struct gdma_queue *parent_eq;
353 
354 		} cq;
355 	};
356 };
357 
358 #define MANA_IRQ_NAME_SZ 32
359 
360 struct gdma_irq_context {
361 	void (*handler)(void *arg);
362 	/* Protect the eq_list */
363 	spinlock_t lock;
364 	struct list_head eq_list;
365 	char name[MANA_IRQ_NAME_SZ];
366 };
367 
368 struct gdma_context {
369 	struct device		*dev;
370 	struct dentry		*mana_pci_debugfs;
371 
372 	/* Per-vPort max number of queues */
373 	unsigned int		max_num_queues;
374 	unsigned int		max_num_msix;
375 	unsigned int		num_msix_usable;
376 	struct gdma_irq_context	*irq_contexts;
377 
378 	/* L2 MTU */
379 	u16 adapter_mtu;
380 
381 	/* This maps a CQ index to the queue structure. */
382 	unsigned int		max_num_cqs;
383 	struct gdma_queue	**cq_table;
384 
385 	/* Protect eq_test_event and test_event_eq_id  */
386 	struct mutex		eq_test_event_mutex;
387 	struct completion	eq_test_event;
388 	u32			test_event_eq_id;
389 
390 	bool			is_pf;
391 	phys_addr_t		bar0_pa;
392 	void __iomem		*bar0_va;
393 	void __iomem		*shm_base;
394 	void __iomem		*db_page_base;
395 	phys_addr_t		phys_db_page_base;
396 	u32 db_page_size;
397 	int                     numa_node;
398 
399 	/* Shared memory chanenl (used to bootstrap HWC) */
400 	struct shm_channel	shm_channel;
401 
402 	/* Hardware communication channel (HWC) */
403 	struct gdma_dev		hwc;
404 
405 	/* Azure network adapter */
406 	struct gdma_dev		mana;
407 
408 	/* Azure RDMA adapter */
409 	struct gdma_dev		mana_ib;
410 };
411 
412 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
413 {
414 	return gd->dev_id.type == GDMA_DEVICE_MANA;
415 }
416 
417 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
418 {
419 	return gd->dev_id.type == GDMA_DEVICE_HWC;
420 }
421 
422 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
423 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
424 
425 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
426 
427 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
428 			     const struct gdma_queue_spec *spec,
429 			     struct gdma_queue **queue_ptr);
430 
431 int mana_gd_create_mana_eq(struct gdma_dev *gd,
432 			   const struct gdma_queue_spec *spec,
433 			   struct gdma_queue **queue_ptr);
434 
435 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
436 			      const struct gdma_queue_spec *spec,
437 			      struct gdma_queue **queue_ptr);
438 
439 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
440 
441 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
442 
443 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
444 
445 struct gdma_wqe {
446 	u32 reserved	:24;
447 	u32 last_vbytes	:8;
448 
449 	union {
450 		u32 flags;
451 
452 		struct {
453 			u32 num_sge		:8;
454 			u32 inline_oob_size_div4:3;
455 			u32 client_oob_in_sgl	:1;
456 			u32 reserved1		:4;
457 			u32 client_data_unit	:14;
458 			u32 reserved2		:2;
459 		};
460 	};
461 }; /* HW DATA */
462 
463 #define INLINE_OOB_SMALL_SIZE 8
464 #define INLINE_OOB_LARGE_SIZE 24
465 
466 #define MAX_TX_WQE_SIZE 512
467 #define MAX_RX_WQE_SIZE 256
468 
469 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
470 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
471 			sizeof(struct gdma_sge))
472 
473 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
474 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
475 
476 struct gdma_cqe {
477 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
478 
479 	union {
480 		u32 as_uint32;
481 
482 		struct {
483 			u32 wq_num	: 24;
484 			u32 is_sq	: 1;
485 			u32 reserved	: 4;
486 			u32 owner_bits	: 3;
487 		};
488 	} cqe_info;
489 }; /* HW DATA */
490 
491 #define GDMA_CQE_OWNER_BITS 3
492 
493 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
494 
495 #define SET_ARM_BIT 1
496 
497 #define GDMA_EQE_OWNER_BITS 3
498 
499 union gdma_eqe_info {
500 	u32 as_uint32;
501 
502 	struct {
503 		u32 type	: 8;
504 		u32 reserved1	: 8;
505 		u32 client_id	: 2;
506 		u32 reserved2	: 11;
507 		u32 owner_bits	: 3;
508 	};
509 }; /* HW DATA */
510 
511 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
512 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
513 
514 struct gdma_eqe {
515 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
516 	u32 eqe_info;
517 }; /* HW DATA */
518 
519 #define GDMA_REG_DB_PAGE_OFFSET	8
520 #define GDMA_REG_DB_PAGE_SIZE	0x10
521 #define GDMA_REG_SHM_OFFSET	0x18
522 
523 #define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
524 #define GDMA_PF_REG_DB_PAGE_OFF		0xC8
525 #define GDMA_PF_REG_SHM_OFF		0x70
526 
527 #define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108
528 
529 #define MANA_PF_DEVICE_ID 0x00B9
530 #define MANA_VF_DEVICE_ID 0x00BA
531 
532 struct gdma_posted_wqe_info {
533 	u32 wqe_size_in_bu;
534 };
535 
536 /* GDMA_GENERATE_TEST_EQE */
537 struct gdma_generate_test_event_req {
538 	struct gdma_req_hdr hdr;
539 	u32 queue_index;
540 }; /* HW DATA */
541 
542 /* GDMA_VERIFY_VF_DRIVER_VERSION */
543 enum {
544 	GDMA_PROTOCOL_V1	= 1,
545 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
546 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
547 };
548 
549 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
550 
551 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
552  * so the driver is able to reliably support features like busy_poll.
553  */
554 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
555 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
556 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
557 
558 /* Driver can handle holes (zeros) in the device list */
559 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
560 
561 #define GDMA_DRV_CAP_FLAGS1 \
562 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
563 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
564 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
565 	 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
566 	 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP)
567 
568 #define GDMA_DRV_CAP_FLAGS2 0
569 
570 #define GDMA_DRV_CAP_FLAGS3 0
571 
572 #define GDMA_DRV_CAP_FLAGS4 0
573 
574 struct gdma_verify_ver_req {
575 	struct gdma_req_hdr hdr;
576 
577 	/* Mandatory fields required for protocol establishment */
578 	u64 protocol_ver_min;
579 	u64 protocol_ver_max;
580 
581 	/* Gdma Driver Capability Flags */
582 	u64 gd_drv_cap_flags1;
583 	u64 gd_drv_cap_flags2;
584 	u64 gd_drv_cap_flags3;
585 	u64 gd_drv_cap_flags4;
586 
587 	/* Advisory fields */
588 	u64 drv_ver;
589 	u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
590 	u32 reserved;
591 	u32 os_ver_major;
592 	u32 os_ver_minor;
593 	u32 os_ver_build;
594 	u32 os_ver_platform;
595 	u64 reserved_2;
596 	u8 os_ver_str1[128];
597 	u8 os_ver_str2[128];
598 	u8 os_ver_str3[128];
599 	u8 os_ver_str4[128];
600 }; /* HW DATA */
601 
602 struct gdma_verify_ver_resp {
603 	struct gdma_resp_hdr hdr;
604 	u64 gdma_protocol_ver;
605 	u64 pf_cap_flags1;
606 	u64 pf_cap_flags2;
607 	u64 pf_cap_flags3;
608 	u64 pf_cap_flags4;
609 }; /* HW DATA */
610 
611 /* GDMA_QUERY_MAX_RESOURCES */
612 struct gdma_query_max_resources_resp {
613 	struct gdma_resp_hdr hdr;
614 	u32 status;
615 	u32 max_sq;
616 	u32 max_rq;
617 	u32 max_cq;
618 	u32 max_eq;
619 	u32 max_db;
620 	u32 max_mst;
621 	u32 max_cq_mod_ctx;
622 	u32 max_mod_cq;
623 	u32 max_msix;
624 }; /* HW DATA */
625 
626 /* GDMA_LIST_DEVICES */
627 #define GDMA_DEV_LIST_SIZE 64
628 struct gdma_list_devices_resp {
629 	struct gdma_resp_hdr hdr;
630 	u32 num_of_devs;
631 	u32 reserved;
632 	struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
633 }; /* HW DATA */
634 
635 /* GDMA_REGISTER_DEVICE */
636 struct gdma_register_device_resp {
637 	struct gdma_resp_hdr hdr;
638 	u32 pdid;
639 	u32 gpa_mkey;
640 	u32 db_id;
641 }; /* HW DATA */
642 
643 struct gdma_allocate_resource_range_req {
644 	struct gdma_req_hdr hdr;
645 	u32 resource_type;
646 	u32 num_resources;
647 	u32 alignment;
648 	u32 allocated_resources;
649 };
650 
651 struct gdma_allocate_resource_range_resp {
652 	struct gdma_resp_hdr hdr;
653 	u32 allocated_resources;
654 };
655 
656 struct gdma_destroy_resource_range_req {
657 	struct gdma_req_hdr hdr;
658 	u32 resource_type;
659 	u32 num_resources;
660 	u32 allocated_resources;
661 };
662 
663 /* GDMA_CREATE_QUEUE */
664 struct gdma_create_queue_req {
665 	struct gdma_req_hdr hdr;
666 	u32 type;
667 	u32 reserved1;
668 	u32 pdid;
669 	u32 doolbell_id;
670 	u64 gdma_region;
671 	u32 reserved2;
672 	u32 queue_size;
673 	u32 log2_throttle_limit;
674 	u32 eq_pci_msix_index;
675 	u32 cq_mod_ctx_id;
676 	u32 cq_parent_eq_id;
677 	u8  rq_drop_on_overrun;
678 	u8  rq_err_on_wqe_overflow;
679 	u8  rq_chain_rec_wqes;
680 	u8  sq_hw_db;
681 	u32 reserved3;
682 }; /* HW DATA */
683 
684 struct gdma_create_queue_resp {
685 	struct gdma_resp_hdr hdr;
686 	u32 queue_index;
687 }; /* HW DATA */
688 
689 /* GDMA_DISABLE_QUEUE */
690 struct gdma_disable_queue_req {
691 	struct gdma_req_hdr hdr;
692 	u32 type;
693 	u32 queue_index;
694 	u32 alloc_res_id_on_creation;
695 }; /* HW DATA */
696 
697 /* GDMA_QUERY_HWC_TIMEOUT */
698 struct gdma_query_hwc_timeout_req {
699 	struct gdma_req_hdr hdr;
700 	u32 timeout_ms;
701 	u32 reserved;
702 };
703 
704 struct gdma_query_hwc_timeout_resp {
705 	struct gdma_resp_hdr hdr;
706 	u32 timeout_ms;
707 	u32 reserved;
708 };
709 
710 enum atb_page_size {
711 	ATB_PAGE_SIZE_4K,
712 	ATB_PAGE_SIZE_8K,
713 	ATB_PAGE_SIZE_16K,
714 	ATB_PAGE_SIZE_32K,
715 	ATB_PAGE_SIZE_64K,
716 	ATB_PAGE_SIZE_128K,
717 	ATB_PAGE_SIZE_256K,
718 	ATB_PAGE_SIZE_512K,
719 	ATB_PAGE_SIZE_1M,
720 	ATB_PAGE_SIZE_2M,
721 	ATB_PAGE_SIZE_MAX,
722 };
723 
724 enum gdma_mr_access_flags {
725 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
726 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
727 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
728 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
729 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
730 };
731 
732 /* GDMA_CREATE_DMA_REGION */
733 struct gdma_create_dma_region_req {
734 	struct gdma_req_hdr hdr;
735 
736 	/* The total size of the DMA region */
737 	u64 length;
738 
739 	/* The offset in the first page */
740 	u32 offset_in_page;
741 
742 	/* enum gdma_page_type */
743 	u32 gdma_page_type;
744 
745 	/* The total number of pages */
746 	u32 page_count;
747 
748 	/* If page_addr_list_len is smaller than page_count,
749 	 * the remaining page addresses will be added via the
750 	 * message GDMA_DMA_REGION_ADD_PAGES.
751 	 */
752 	u32 page_addr_list_len;
753 	u64 page_addr_list[];
754 }; /* HW DATA */
755 
756 struct gdma_create_dma_region_resp {
757 	struct gdma_resp_hdr hdr;
758 	u64 dma_region_handle;
759 }; /* HW DATA */
760 
761 /* GDMA_DMA_REGION_ADD_PAGES */
762 struct gdma_dma_region_add_pages_req {
763 	struct gdma_req_hdr hdr;
764 
765 	u64 dma_region_handle;
766 
767 	u32 page_addr_list_len;
768 	u32 reserved3;
769 
770 	u64 page_addr_list[];
771 }; /* HW DATA */
772 
773 /* GDMA_DESTROY_DMA_REGION */
774 struct gdma_destroy_dma_region_req {
775 	struct gdma_req_hdr hdr;
776 
777 	u64 dma_region_handle;
778 }; /* HW DATA */
779 
780 enum gdma_pd_flags {
781 	GDMA_PD_FLAG_INVALID = 0,
782 	GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
783 };
784 
785 struct gdma_create_pd_req {
786 	struct gdma_req_hdr hdr;
787 	enum gdma_pd_flags flags;
788 	u32 reserved;
789 };/* HW DATA */
790 
791 struct gdma_create_pd_resp {
792 	struct gdma_resp_hdr hdr;
793 	u64 pd_handle;
794 	u32 pd_id;
795 	u32 reserved;
796 };/* HW DATA */
797 
798 struct gdma_destroy_pd_req {
799 	struct gdma_req_hdr hdr;
800 	u64 pd_handle;
801 };/* HW DATA */
802 
803 struct gdma_destory_pd_resp {
804 	struct gdma_resp_hdr hdr;
805 };/* HW DATA */
806 
807 enum gdma_mr_type {
808 	/*
809 	 * Guest Physical Address - MRs of this type allow access
810 	 * to any DMA-mapped memory using bus-logical address
811 	 */
812 	GDMA_MR_TYPE_GPA = 1,
813 	/* Guest Virtual Address - MRs of this type allow access
814 	 * to memory mapped by PTEs associated with this MR using a virtual
815 	 * address that is set up in the MST
816 	 */
817 	GDMA_MR_TYPE_GVA = 2,
818 };
819 
820 struct gdma_create_mr_params {
821 	u64 pd_handle;
822 	enum gdma_mr_type mr_type;
823 	union {
824 		struct {
825 			u64 dma_region_handle;
826 			u64 virtual_address;
827 			enum gdma_mr_access_flags access_flags;
828 		} gva;
829 	};
830 };
831 
832 struct gdma_create_mr_request {
833 	struct gdma_req_hdr hdr;
834 	u64 pd_handle;
835 	enum gdma_mr_type mr_type;
836 	u32 reserved_1;
837 
838 	union {
839 		struct {
840 			u64 dma_region_handle;
841 			u64 virtual_address;
842 			enum gdma_mr_access_flags access_flags;
843 		} gva;
844 
845 	};
846 	u32 reserved_2;
847 };/* HW DATA */
848 
849 struct gdma_create_mr_response {
850 	struct gdma_resp_hdr hdr;
851 	u64 mr_handle;
852 	u32 lkey;
853 	u32 rkey;
854 };/* HW DATA */
855 
856 struct gdma_destroy_mr_request {
857 	struct gdma_req_hdr hdr;
858 	u64 mr_handle;
859 };/* HW DATA */
860 
861 struct gdma_destroy_mr_response {
862 	struct gdma_resp_hdr hdr;
863 };/* HW DATA */
864 
865 int mana_gd_verify_vf_version(struct pci_dev *pdev);
866 
867 int mana_gd_register_device(struct gdma_dev *gd);
868 int mana_gd_deregister_device(struct gdma_dev *gd);
869 
870 int mana_gd_post_work_request(struct gdma_queue *wq,
871 			      const struct gdma_wqe_request *wqe_req,
872 			      struct gdma_posted_wqe_info *wqe_info);
873 
874 int mana_gd_post_and_ring(struct gdma_queue *queue,
875 			  const struct gdma_wqe_request *wqe,
876 			  struct gdma_posted_wqe_info *wqe_info);
877 
878 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
879 void mana_gd_free_res_map(struct gdma_resource *r);
880 
881 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
882 			      struct gdma_queue *queue);
883 
884 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
885 			 struct gdma_mem_info *gmi);
886 
887 void mana_gd_free_memory(struct gdma_mem_info *gmi);
888 
889 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
890 			 u32 resp_len, void *resp);
891 
892 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
893 void mana_register_debugfs(void);
894 void mana_unregister_debugfs(void);
895 
896 #endif /* _GDMA_H */
897