xref: /linux/include/net/mana/gdma.h (revision 2ed4b46b4fc77749cb0f8dd31a01441b82c8dbaa)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _GDMA_H
5 #define _GDMA_H
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9 
10 #include "shm_channel.h"
11 
12 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
13 #define GDMA_STATUS_CMD_UNSUPPORTED	0xffffffff
14 
15 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
16  * them are naturally aligned and hence don't need __packed.
17  */
18 
19 enum gdma_request_type {
20 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
21 	GDMA_QUERY_MAX_RESOURCES	= 2,
22 	GDMA_LIST_DEVICES		= 3,
23 	GDMA_REGISTER_DEVICE		= 4,
24 	GDMA_DEREGISTER_DEVICE		= 5,
25 	GDMA_GENERATE_TEST_EQE		= 10,
26 	GDMA_CREATE_QUEUE		= 12,
27 	GDMA_DISABLE_QUEUE		= 13,
28 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
29 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
30 	GDMA_CREATE_DMA_REGION		= 25,
31 	GDMA_DMA_REGION_ADD_PAGES	= 26,
32 	GDMA_DESTROY_DMA_REGION		= 27,
33 	GDMA_CREATE_PD			= 29,
34 	GDMA_DESTROY_PD			= 30,
35 	GDMA_CREATE_MR			= 31,
36 	GDMA_DESTROY_MR			= 32,
37 	GDMA_QUERY_HWC_TIMEOUT		= 84, /* 0x54 */
38 	GDMA_ALLOC_DM			= 96, /* 0x60 */
39 	GDMA_DESTROY_DM			= 97, /* 0x61 */
40 };
41 
42 #define GDMA_RESOURCE_DOORBELL_PAGE	27
43 
44 enum gdma_queue_type {
45 	GDMA_INVALID_QUEUE,
46 	GDMA_SQ,
47 	GDMA_RQ,
48 	GDMA_CQ,
49 	GDMA_EQ,
50 };
51 
52 enum gdma_work_request_flags {
53 	GDMA_WR_NONE			= 0,
54 	GDMA_WR_OOB_IN_SGL		= BIT(0),
55 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
56 };
57 
58 enum gdma_eqe_type {
59 	GDMA_EQE_COMPLETION		= 3,
60 	GDMA_EQE_TEST_EVENT		= 64,
61 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
62 	GDMA_EQE_HWC_INIT_DATA		= 130,
63 	GDMA_EQE_HWC_INIT_DONE		= 131,
64 	GDMA_EQE_HWC_FPGA_RECONFIG	= 132,
65 	GDMA_EQE_HWC_SOC_RECONFIG_DATA	= 133,
66 	GDMA_EQE_HWC_SOC_SERVICE	= 134,
67 	GDMA_EQE_HWC_RESET_REQUEST	= 135,
68 	GDMA_EQE_RNIC_QP_FATAL		= 176,
69 };
70 
71 enum {
72 	GDMA_DEVICE_NONE	= 0,
73 	GDMA_DEVICE_HWC		= 1,
74 	GDMA_DEVICE_MANA	= 2,
75 	GDMA_DEVICE_MANA_IB	= 3,
76 };
77 
78 enum gdma_service_type {
79 	GDMA_SERVICE_TYPE_NONE		= 0,
80 	GDMA_SERVICE_TYPE_RDMA_SUSPEND	= 1,
81 	GDMA_SERVICE_TYPE_RDMA_RESUME	= 2,
82 };
83 
84 struct mana_service_work {
85 	struct work_struct work;
86 	struct gdma_dev *gdma_dev;
87 	enum gdma_service_type event;
88 };
89 
90 struct gdma_resource {
91 	/* Protect the bitmap */
92 	spinlock_t lock;
93 
94 	/* The bitmap size in bits. */
95 	u32 size;
96 
97 	/* The bitmap tracks the resources. */
98 	unsigned long *map;
99 };
100 
101 union gdma_doorbell_entry {
102 	u64	as_uint64;
103 
104 	struct {
105 		u64 id		: 24;
106 		u64 reserved	: 8;
107 		u64 tail_ptr	: 31;
108 		u64 arm		: 1;
109 	} cq;
110 
111 	struct {
112 		u64 id		: 24;
113 		u64 wqe_cnt	: 8;
114 		u64 tail_ptr	: 32;
115 	} rq;
116 
117 	struct {
118 		u64 id		: 24;
119 		u64 reserved	: 8;
120 		u64 tail_ptr	: 32;
121 	} sq;
122 
123 	struct {
124 		u64 id		: 16;
125 		u64 reserved	: 16;
126 		u64 tail_ptr	: 31;
127 		u64 arm		: 1;
128 	} eq;
129 }; /* HW DATA */
130 
131 struct gdma_msg_hdr {
132 	u32 hdr_type;
133 	u32 msg_type;
134 	u16 msg_version;
135 	u16 hwc_msg_id;
136 	u32 msg_size;
137 }; /* HW DATA */
138 
139 struct gdma_dev_id {
140 	union {
141 		struct {
142 			u16 type;
143 			u16 instance;
144 		};
145 
146 		u32 as_uint32;
147 	};
148 }; /* HW DATA */
149 
150 struct gdma_req_hdr {
151 	struct gdma_msg_hdr req;
152 	struct gdma_msg_hdr resp; /* The expected response */
153 	struct gdma_dev_id dev_id;
154 	u32 activity_id;
155 }; /* HW DATA */
156 
157 struct gdma_resp_hdr {
158 	struct gdma_msg_hdr response;
159 	struct gdma_dev_id dev_id;
160 	u32 activity_id;
161 	u32 status;
162 	u32 reserved;
163 }; /* HW DATA */
164 
165 struct gdma_general_req {
166 	struct gdma_req_hdr hdr;
167 }; /* HW DATA */
168 
169 #define GDMA_MESSAGE_V1 1
170 #define GDMA_MESSAGE_V2 2
171 #define GDMA_MESSAGE_V3 3
172 #define GDMA_MESSAGE_V4 4
173 
174 struct gdma_general_resp {
175 	struct gdma_resp_hdr hdr;
176 }; /* HW DATA */
177 
178 #define GDMA_STANDARD_HEADER_TYPE 0
179 
180 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
181 					u32 req_size, u32 resp_size)
182 {
183 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
184 	hdr->req.msg_type = code;
185 	hdr->req.msg_version = GDMA_MESSAGE_V1;
186 	hdr->req.msg_size = req_size;
187 
188 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
189 	hdr->resp.msg_type = code;
190 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
191 	hdr->resp.msg_size = resp_size;
192 }
193 
194 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
195 struct gdma_sge {
196 	u64 address;
197 	u32 mem_key;
198 	u32 size;
199 }; /* HW DATA */
200 
201 struct gdma_wqe_request {
202 	struct gdma_sge *sgl;
203 	u32 num_sge;
204 
205 	u32 inline_oob_size;
206 	const void *inline_oob_data;
207 
208 	u32 flags;
209 	u32 client_data_unit;
210 };
211 
212 enum gdma_page_type {
213 	GDMA_PAGE_TYPE_4K,
214 };
215 
216 #define GDMA_INVALID_DMA_REGION 0
217 
218 struct mana_serv_work {
219 	struct work_struct serv_work;
220 	struct pci_dev *pdev;
221 	enum gdma_eqe_type type;
222 };
223 
224 struct gdma_mem_info {
225 	struct device *dev;
226 
227 	dma_addr_t dma_handle;
228 	void *virt_addr;
229 	u64 length;
230 
231 	/* Allocated by the PF driver */
232 	u64 dma_region_handle;
233 };
234 
235 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
236 
237 struct gdma_dev {
238 	struct gdma_context *gdma_context;
239 
240 	struct gdma_dev_id dev_id;
241 
242 	u32 pdid;
243 	u32 doorbell;
244 	u32 gpa_mkey;
245 
246 	/* GDMA driver specific pointer */
247 	void *driver_data;
248 
249 	struct auxiliary_device *adev;
250 	bool is_suspended;
251 	bool rdma_teardown;
252 };
253 
254 /* MANA_PAGE_SIZE is the DMA unit */
255 #define MANA_PAGE_SHIFT 12
256 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
257 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
258 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
259 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
260 
261 /* Required by HW */
262 #define MANA_MIN_QSIZE MANA_PAGE_SIZE
263 
264 #define GDMA_CQE_SIZE 64
265 #define GDMA_EQE_SIZE 16
266 #define GDMA_MAX_SQE_SIZE 512
267 #define GDMA_MAX_RQE_SIZE 256
268 
269 #define GDMA_COMP_DATA_SIZE 0x3C
270 
271 #define GDMA_EVENT_DATA_SIZE 0xC
272 
273 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
274 #define GDMA_WQE_BU_SIZE 32
275 
276 #define INVALID_PDID		UINT_MAX
277 #define INVALID_DOORBELL	UINT_MAX
278 #define INVALID_MEM_KEY		UINT_MAX
279 #define INVALID_QUEUE_ID	UINT_MAX
280 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
281 
282 struct gdma_comp {
283 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
284 	u32 wq_num;
285 	bool is_sq;
286 };
287 
288 struct gdma_event {
289 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
290 	u8  type;
291 };
292 
293 struct gdma_queue;
294 
295 struct mana_eq {
296 	struct gdma_queue	*eq;
297 	struct dentry		*mana_eq_debugfs;
298 };
299 
300 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
301 			      struct gdma_event *e);
302 
303 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
304 
305 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
306  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
307  * driver increases the 'head' in BUs rather than in bytes, and notifies
308  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
309  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
310  *
311  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
312  * processed, the driver increases the 'tail' to indicate that WQEs have
313  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
314  *
315  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
316  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
317  * the owner bits mechanism to detect if the queue has become empty.
318  */
319 struct gdma_queue {
320 	struct gdma_dev *gdma_dev;
321 
322 	enum gdma_queue_type type;
323 	u32 id;
324 
325 	struct gdma_mem_info mem_info;
326 
327 	void *queue_mem_ptr;
328 	u32 queue_size;
329 
330 	bool monitor_avl_buf;
331 
332 	u32 head;
333 	u32 tail;
334 	struct list_head entry;
335 
336 	/* Extra fields specific to EQ/CQ. */
337 	union {
338 		struct {
339 			bool disable_needed;
340 
341 			gdma_eq_callback *callback;
342 			void *context;
343 
344 			unsigned int msix_index;
345 
346 			u32 log2_throttle_limit;
347 		} eq;
348 
349 		struct {
350 			gdma_cq_callback *callback;
351 			void *context;
352 
353 			struct gdma_queue *parent; /* For CQ/EQ relationship */
354 		} cq;
355 	};
356 };
357 
358 struct gdma_queue_spec {
359 	enum gdma_queue_type type;
360 	bool monitor_avl_buf;
361 	unsigned int queue_size;
362 
363 	/* Extra fields specific to EQ/CQ. */
364 	union {
365 		struct {
366 			gdma_eq_callback *callback;
367 			void *context;
368 
369 			unsigned long log2_throttle_limit;
370 			unsigned int msix_index;
371 		} eq;
372 
373 		struct {
374 			gdma_cq_callback *callback;
375 			void *context;
376 
377 			struct gdma_queue *parent_eq;
378 
379 		} cq;
380 	};
381 };
382 
383 #define MANA_IRQ_NAME_SZ 32
384 
385 struct gdma_irq_context {
386 	void (*handler)(void *arg);
387 	/* Protect the eq_list */
388 	spinlock_t lock;
389 	struct list_head eq_list;
390 	char name[MANA_IRQ_NAME_SZ];
391 };
392 
393 enum gdma_context_flags {
394 	GC_PROBE_SUCCEEDED	= 0,
395 	GC_IN_SERVICE		= 1,
396 };
397 
398 struct gdma_context {
399 	struct device		*dev;
400 	struct dentry		*mana_pci_debugfs;
401 
402 	/* Per-vPort max number of queues */
403 	unsigned int		max_num_queues;
404 	unsigned int		max_num_msix;
405 	unsigned int		num_msix_usable;
406 	struct xarray		irq_contexts;
407 
408 	/* L2 MTU */
409 	u16 adapter_mtu;
410 
411 	/* This maps a CQ index to the queue structure. */
412 	unsigned int		max_num_cqs;
413 	struct gdma_queue	**cq_table;
414 
415 	/* Protect eq_test_event and test_event_eq_id  */
416 	struct mutex		eq_test_event_mutex;
417 	struct completion	eq_test_event;
418 	u32			test_event_eq_id;
419 
420 	bool			is_pf;
421 
422 	phys_addr_t		bar0_pa;
423 	void __iomem		*bar0_va;
424 	void __iomem		*shm_base;
425 	void __iomem		*db_page_base;
426 	phys_addr_t		phys_db_page_base;
427 	u32 db_page_size;
428 	int                     numa_node;
429 
430 	/* Shared memory chanenl (used to bootstrap HWC) */
431 	struct shm_channel	shm_channel;
432 
433 	/* Hardware communication channel (HWC) */
434 	struct gdma_dev		hwc;
435 
436 	/* Azure network adapter */
437 	struct gdma_dev		mana;
438 
439 	/* Azure RDMA adapter */
440 	struct gdma_dev		mana_ib;
441 
442 	u64 pf_cap_flags1;
443 
444 	struct workqueue_struct *service_wq;
445 
446 	unsigned long		flags;
447 };
448 
449 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
450 {
451 	return gd->dev_id.type == GDMA_DEVICE_MANA;
452 }
453 
454 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
455 {
456 	return gd->dev_id.type == GDMA_DEVICE_HWC;
457 }
458 
459 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
460 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
461 
462 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
463 
464 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
465 			     const struct gdma_queue_spec *spec,
466 			     struct gdma_queue **queue_ptr);
467 
468 int mana_gd_create_mana_eq(struct gdma_dev *gd,
469 			   const struct gdma_queue_spec *spec,
470 			   struct gdma_queue **queue_ptr);
471 
472 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
473 			      const struct gdma_queue_spec *spec,
474 			      struct gdma_queue **queue_ptr);
475 
476 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
477 
478 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
479 
480 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
481 
482 int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type);
483 
484 struct gdma_wqe {
485 	u32 reserved	:24;
486 	u32 last_vbytes	:8;
487 
488 	union {
489 		u32 flags;
490 
491 		struct {
492 			u32 num_sge		:8;
493 			u32 inline_oob_size_div4:3;
494 			u32 client_oob_in_sgl	:1;
495 			u32 reserved1		:4;
496 			u32 client_data_unit	:14;
497 			u32 reserved2		:2;
498 		};
499 	};
500 }; /* HW DATA */
501 
502 #define INLINE_OOB_SMALL_SIZE 8
503 #define INLINE_OOB_LARGE_SIZE 24
504 
505 #define MANA_MAX_TX_WQE_SGL_ENTRIES 30
506 
507 #define MAX_TX_WQE_SIZE 512
508 #define MAX_RX_WQE_SIZE 256
509 
510 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
511 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
512 			sizeof(struct gdma_sge))
513 
514 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
515 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
516 
517 struct gdma_cqe {
518 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
519 
520 	union {
521 		u32 as_uint32;
522 
523 		struct {
524 			u32 wq_num	: 24;
525 			u32 is_sq	: 1;
526 			u32 reserved	: 4;
527 			u32 owner_bits	: 3;
528 		};
529 	} cqe_info;
530 }; /* HW DATA */
531 
532 #define GDMA_CQE_OWNER_BITS 3
533 
534 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
535 
536 #define SET_ARM_BIT 1
537 
538 #define GDMA_EQE_OWNER_BITS 3
539 
540 union gdma_eqe_info {
541 	u32 as_uint32;
542 
543 	struct {
544 		u32 type	: 8;
545 		u32 reserved1	: 8;
546 		u32 client_id	: 2;
547 		u32 reserved2	: 11;
548 		u32 owner_bits	: 3;
549 	};
550 }; /* HW DATA */
551 
552 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
553 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
554 
555 struct gdma_eqe {
556 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
557 	u32 eqe_info;
558 }; /* HW DATA */
559 
560 #define GDMA_REG_DB_PAGE_OFFSET	8
561 #define GDMA_REG_DB_PAGE_SIZE	0x10
562 #define GDMA_REG_SHM_OFFSET	0x18
563 
564 #define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
565 #define GDMA_PF_REG_DB_PAGE_OFF		0xC8
566 #define GDMA_PF_REG_SHM_OFF		0x70
567 
568 #define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108
569 
570 #define MANA_PF_DEVICE_ID 0x00B9
571 #define MANA_VF_DEVICE_ID 0x00BA
572 
573 struct gdma_posted_wqe_info {
574 	u32 wqe_size_in_bu;
575 };
576 
577 /* GDMA_GENERATE_TEST_EQE */
578 struct gdma_generate_test_event_req {
579 	struct gdma_req_hdr hdr;
580 	u32 queue_index;
581 }; /* HW DATA */
582 
583 /* GDMA_VERIFY_VF_DRIVER_VERSION */
584 enum {
585 	GDMA_PROTOCOL_V1	= 1,
586 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
587 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
588 };
589 
590 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
591 
592 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
593  * so the driver is able to reliably support features like busy_poll.
594  */
595 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
596 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
597 #define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
598 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
599 
600 /* Driver can handle holes (zeros) in the device list */
601 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
602 
603 /* Driver supports dynamic MSI-X vector allocation */
604 #define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
605 
606 /* Driver can self reset on EQE notification */
607 #define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
608 
609 /* Driver can self reset on FPGA Reconfig EQE notification */
610 #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
611 
612 /* Driver detects stalled send queues and recovers them */
613 #define GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY BIT(18)
614 
615 #define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
616 
617 /* Driver supports linearizing the skb when num_sge exceeds hardware limit */
618 #define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20)
619 
620 /* Driver can send HWC periodically to query stats */
621 #define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21)
622 
623 /* Driver can handle hardware recovery events during probe */
624 #define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)
625 
626 /* Driver supports self recovery on Hardware Channel timeouts */
627 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY BIT(25)
628 
629 #define GDMA_DRV_CAP_FLAGS1 \
630 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
631 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
632 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
633 	 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
634 	 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
635 	 GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
636 	 GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
637 	 GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \
638 	 GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
639 	 GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
640 	 GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
641 	 GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
642 	 GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY | \
643 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY)
644 
645 #define GDMA_DRV_CAP_FLAGS2 0
646 
647 #define GDMA_DRV_CAP_FLAGS3 0
648 
649 #define GDMA_DRV_CAP_FLAGS4 0
650 
651 struct gdma_verify_ver_req {
652 	struct gdma_req_hdr hdr;
653 
654 	/* Mandatory fields required for protocol establishment */
655 	u64 protocol_ver_min;
656 	u64 protocol_ver_max;
657 
658 	/* Gdma Driver Capability Flags */
659 	u64 gd_drv_cap_flags1;
660 	u64 gd_drv_cap_flags2;
661 	u64 gd_drv_cap_flags3;
662 	u64 gd_drv_cap_flags4;
663 
664 	/* Advisory fields */
665 	u64 drv_ver;
666 	u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
667 	u32 reserved;
668 	u32 os_ver_major;
669 	u32 os_ver_minor;
670 	u32 os_ver_build;
671 	u32 os_ver_platform;
672 	u64 reserved_2;
673 	u8 os_ver_str1[128];
674 	u8 os_ver_str2[128];
675 	u8 os_ver_str3[128];
676 	u8 os_ver_str4[128];
677 }; /* HW DATA */
678 
679 struct gdma_verify_ver_resp {
680 	struct gdma_resp_hdr hdr;
681 	u64 gdma_protocol_ver;
682 	u64 pf_cap_flags1;
683 	u64 pf_cap_flags2;
684 	u64 pf_cap_flags3;
685 	u64 pf_cap_flags4;
686 }; /* HW DATA */
687 
688 /* GDMA_QUERY_MAX_RESOURCES */
689 struct gdma_query_max_resources_resp {
690 	struct gdma_resp_hdr hdr;
691 	u32 status;
692 	u32 max_sq;
693 	u32 max_rq;
694 	u32 max_cq;
695 	u32 max_eq;
696 	u32 max_db;
697 	u32 max_mst;
698 	u32 max_cq_mod_ctx;
699 	u32 max_mod_cq;
700 	u32 max_msix;
701 }; /* HW DATA */
702 
703 /* GDMA_LIST_DEVICES */
704 #define GDMA_DEV_LIST_SIZE 64
705 struct gdma_list_devices_resp {
706 	struct gdma_resp_hdr hdr;
707 	u32 num_of_devs;
708 	u32 reserved;
709 	struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
710 }; /* HW DATA */
711 
712 /* GDMA_REGISTER_DEVICE */
713 struct gdma_register_device_resp {
714 	struct gdma_resp_hdr hdr;
715 	u32 pdid;
716 	u32 gpa_mkey;
717 	u32 db_id;
718 }; /* HW DATA */
719 
720 struct gdma_allocate_resource_range_req {
721 	struct gdma_req_hdr hdr;
722 	u32 resource_type;
723 	u32 num_resources;
724 	u32 alignment;
725 	u32 allocated_resources;
726 };
727 
728 struct gdma_allocate_resource_range_resp {
729 	struct gdma_resp_hdr hdr;
730 	u32 allocated_resources;
731 };
732 
733 struct gdma_destroy_resource_range_req {
734 	struct gdma_req_hdr hdr;
735 	u32 resource_type;
736 	u32 num_resources;
737 	u32 allocated_resources;
738 };
739 
740 /* GDMA_CREATE_QUEUE */
741 struct gdma_create_queue_req {
742 	struct gdma_req_hdr hdr;
743 	u32 type;
744 	u32 reserved1;
745 	u32 pdid;
746 	u32 doolbell_id;
747 	u64 gdma_region;
748 	u32 reserved2;
749 	u32 queue_size;
750 	u32 log2_throttle_limit;
751 	u32 eq_pci_msix_index;
752 	u32 cq_mod_ctx_id;
753 	u32 cq_parent_eq_id;
754 	u8  rq_drop_on_overrun;
755 	u8  rq_err_on_wqe_overflow;
756 	u8  rq_chain_rec_wqes;
757 	u8  sq_hw_db;
758 	u32 reserved3;
759 }; /* HW DATA */
760 
761 struct gdma_create_queue_resp {
762 	struct gdma_resp_hdr hdr;
763 	u32 queue_index;
764 }; /* HW DATA */
765 
766 /* GDMA_DISABLE_QUEUE */
767 struct gdma_disable_queue_req {
768 	struct gdma_req_hdr hdr;
769 	u32 type;
770 	u32 queue_index;
771 	u32 alloc_res_id_on_creation;
772 }; /* HW DATA */
773 
774 /* GDMA_QUERY_HWC_TIMEOUT */
775 struct gdma_query_hwc_timeout_req {
776 	struct gdma_req_hdr hdr;
777 	u32 timeout_ms;
778 	u32 reserved;
779 };
780 
781 struct gdma_query_hwc_timeout_resp {
782 	struct gdma_resp_hdr hdr;
783 	u32 timeout_ms;
784 	u32 reserved;
785 };
786 
787 enum gdma_mr_access_flags {
788 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
789 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
790 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
791 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
792 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
793 };
794 
795 /* GDMA_CREATE_DMA_REGION */
796 struct gdma_create_dma_region_req {
797 	struct gdma_req_hdr hdr;
798 
799 	/* The total size of the DMA region */
800 	u64 length;
801 
802 	/* The offset in the first page */
803 	u32 offset_in_page;
804 
805 	/* enum gdma_page_type */
806 	u32 gdma_page_type;
807 
808 	/* The total number of pages */
809 	u32 page_count;
810 
811 	/* If page_addr_list_len is smaller than page_count,
812 	 * the remaining page addresses will be added via the
813 	 * message GDMA_DMA_REGION_ADD_PAGES.
814 	 */
815 	u32 page_addr_list_len;
816 	u64 page_addr_list[];
817 }; /* HW DATA */
818 
819 struct gdma_create_dma_region_resp {
820 	struct gdma_resp_hdr hdr;
821 	u64 dma_region_handle;
822 }; /* HW DATA */
823 
824 /* GDMA_DMA_REGION_ADD_PAGES */
825 struct gdma_dma_region_add_pages_req {
826 	struct gdma_req_hdr hdr;
827 
828 	u64 dma_region_handle;
829 
830 	u32 page_addr_list_len;
831 	u32 reserved3;
832 
833 	u64 page_addr_list[];
834 }; /* HW DATA */
835 
836 /* GDMA_DESTROY_DMA_REGION */
837 struct gdma_destroy_dma_region_req {
838 	struct gdma_req_hdr hdr;
839 
840 	u64 dma_region_handle;
841 }; /* HW DATA */
842 
843 enum gdma_pd_flags {
844 	GDMA_PD_FLAG_INVALID = 0,
845 	GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
846 };
847 
848 struct gdma_create_pd_req {
849 	struct gdma_req_hdr hdr;
850 	enum gdma_pd_flags flags;
851 	u32 reserved;
852 };/* HW DATA */
853 
854 struct gdma_create_pd_resp {
855 	struct gdma_resp_hdr hdr;
856 	u64 pd_handle;
857 	u32 pd_id;
858 	u32 reserved;
859 };/* HW DATA */
860 
861 struct gdma_destroy_pd_req {
862 	struct gdma_req_hdr hdr;
863 	u64 pd_handle;
864 };/* HW DATA */
865 
866 struct gdma_destory_pd_resp {
867 	struct gdma_resp_hdr hdr;
868 };/* HW DATA */
869 
870 enum gdma_mr_type {
871 	/*
872 	 * Guest Physical Address - MRs of this type allow access
873 	 * to any DMA-mapped memory using bus-logical address
874 	 */
875 	GDMA_MR_TYPE_GPA = 1,
876 	/* Guest Virtual Address - MRs of this type allow access
877 	 * to memory mapped by PTEs associated with this MR using a virtual
878 	 * address that is set up in the MST
879 	 */
880 	GDMA_MR_TYPE_GVA = 2,
881 	/* Guest zero-based address MRs */
882 	GDMA_MR_TYPE_ZBVA = 4,
883 	/* Device address MRs */
884 	GDMA_MR_TYPE_DM = 5,
885 };
886 
887 struct gdma_create_mr_params {
888 	u64 pd_handle;
889 	enum gdma_mr_type mr_type;
890 	union {
891 		struct {
892 			u64 dma_region_handle;
893 			u64 virtual_address;
894 			enum gdma_mr_access_flags access_flags;
895 		} gva;
896 		struct {
897 			u64 dma_region_handle;
898 			enum gdma_mr_access_flags access_flags;
899 		} zbva;
900 		struct {
901 			u64 dm_handle;
902 			u64 offset;
903 			u64 length;
904 			enum gdma_mr_access_flags access_flags;
905 		} da;
906 	};
907 };
908 
909 struct gdma_create_mr_request {
910 	struct gdma_req_hdr hdr;
911 	u64 pd_handle;
912 	enum gdma_mr_type mr_type;
913 	u32 reserved_1;
914 
915 	union {
916 		struct {
917 			u64 dma_region_handle;
918 			u64 virtual_address;
919 			enum gdma_mr_access_flags access_flags;
920 		} __packed gva;
921 		struct {
922 			u64 dma_region_handle;
923 			enum gdma_mr_access_flags access_flags;
924 		} __packed zbva;
925 		struct {
926 			u64 dm_handle;
927 			u64 offset;
928 			enum gdma_mr_access_flags access_flags;
929 		} __packed da;
930 	} __packed;
931 	u32 reserved_2;
932 	union {
933 		struct {
934 			u64 length;
935 		} da_ext;
936 	};
937 };/* HW DATA */
938 
939 struct gdma_create_mr_response {
940 	struct gdma_resp_hdr hdr;
941 	u64 mr_handle;
942 	u32 lkey;
943 	u32 rkey;
944 };/* HW DATA */
945 
946 struct gdma_destroy_mr_request {
947 	struct gdma_req_hdr hdr;
948 	u64 mr_handle;
949 };/* HW DATA */
950 
951 struct gdma_destroy_mr_response {
952 	struct gdma_resp_hdr hdr;
953 };/* HW DATA */
954 
955 struct gdma_alloc_dm_req {
956 	struct gdma_req_hdr hdr;
957 	u64 length;
958 	u32 alignment;
959 	u32 flags;
960 }; /* HW Data */
961 
962 struct gdma_alloc_dm_resp {
963 	struct gdma_resp_hdr hdr;
964 	u64 dm_handle;
965 }; /* HW Data */
966 
967 struct gdma_destroy_dm_req {
968 	struct gdma_req_hdr hdr;
969 	u64 dm_handle;
970 }; /* HW Data */
971 
972 struct gdma_destroy_dm_resp {
973 	struct gdma_resp_hdr hdr;
974 }; /* HW Data */
975 
976 int mana_gd_verify_vf_version(struct pci_dev *pdev);
977 
978 int mana_gd_register_device(struct gdma_dev *gd);
979 int mana_gd_deregister_device(struct gdma_dev *gd);
980 
981 int mana_gd_post_work_request(struct gdma_queue *wq,
982 			      const struct gdma_wqe_request *wqe_req,
983 			      struct gdma_posted_wqe_info *wqe_info);
984 
985 int mana_gd_post_and_ring(struct gdma_queue *queue,
986 			  const struct gdma_wqe_request *wqe,
987 			  struct gdma_posted_wqe_info *wqe_info);
988 
989 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
990 void mana_gd_free_res_map(struct gdma_resource *r);
991 
992 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
993 			      struct gdma_queue *queue);
994 
995 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
996 			 struct gdma_mem_info *gmi);
997 
998 void mana_gd_free_memory(struct gdma_mem_info *gmi);
999 
1000 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
1001 			 u32 resp_len, void *resp);
1002 
1003 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
1004 void mana_register_debugfs(void);
1005 void mana_unregister_debugfs(void);
1006 
1007 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
1008 
1009 int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
1010 int mana_gd_resume(struct pci_dev *pdev);
1011 
1012 bool mana_need_log(struct gdma_context *gc, int err);
1013 
1014 #endif /* _GDMA_H */
1015