xref: /linux/include/net/mana/gdma.h (revision dfecb0c5af3b07ebfa84be63a7a21bfc9e29a872)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _GDMA_H
5 #define _GDMA_H
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/netdevice.h>
9 
10 #include "shm_channel.h"
11 
12 #define GDMA_STATUS_MORE_ENTRIES	0x00000105
13 #define GDMA_STATUS_CMD_UNSUPPORTED	0xffffffff
14 
15 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
16  * them are naturally aligned and hence don't need __packed.
17  */
18 
19 enum gdma_request_type {
20 	GDMA_VERIFY_VF_DRIVER_VERSION	= 1,
21 	GDMA_QUERY_MAX_RESOURCES	= 2,
22 	GDMA_LIST_DEVICES		= 3,
23 	GDMA_REGISTER_DEVICE		= 4,
24 	GDMA_DEREGISTER_DEVICE		= 5,
25 	GDMA_GENERATE_TEST_EQE		= 10,
26 	GDMA_CREATE_QUEUE		= 12,
27 	GDMA_DISABLE_QUEUE		= 13,
28 	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
29 	GDMA_DESTROY_RESOURCE_RANGE	= 24,
30 	GDMA_CREATE_DMA_REGION		= 25,
31 	GDMA_DMA_REGION_ADD_PAGES	= 26,
32 	GDMA_DESTROY_DMA_REGION		= 27,
33 	GDMA_CREATE_PD			= 29,
34 	GDMA_DESTROY_PD			= 30,
35 	GDMA_CREATE_MR			= 31,
36 	GDMA_DESTROY_MR			= 32,
37 	GDMA_QUERY_HWC_TIMEOUT		= 84, /* 0x54 */
38 	GDMA_ALLOC_DM			= 96, /* 0x60 */
39 	GDMA_DESTROY_DM			= 97, /* 0x61 */
40 };
41 
42 #define GDMA_RESOURCE_DOORBELL_PAGE	27
43 
44 enum gdma_queue_type {
45 	GDMA_INVALID_QUEUE,
46 	GDMA_SQ,
47 	GDMA_RQ,
48 	GDMA_CQ,
49 	GDMA_EQ,
50 };
51 
52 enum gdma_work_request_flags {
53 	GDMA_WR_NONE			= 0,
54 	GDMA_WR_OOB_IN_SGL		= BIT(0),
55 	GDMA_WR_PAD_BY_SGE0		= BIT(1),
56 };
57 
58 enum gdma_eqe_type {
59 	GDMA_EQE_COMPLETION		= 3,
60 	GDMA_EQE_TEST_EVENT		= 64,
61 	GDMA_EQE_HWC_INIT_EQ_ID_DB	= 129,
62 	GDMA_EQE_HWC_INIT_DATA		= 130,
63 	GDMA_EQE_HWC_INIT_DONE		= 131,
64 	GDMA_EQE_HWC_FPGA_RECONFIG	= 132,
65 	GDMA_EQE_HWC_SOC_RECONFIG_DATA	= 133,
66 	GDMA_EQE_HWC_SOC_SERVICE	= 134,
67 	GDMA_EQE_HWC_RESET_REQUEST	= 135,
68 	GDMA_EQE_RNIC_QP_FATAL		= 176,
69 };
70 
71 enum {
72 	GDMA_DEVICE_NONE	= 0,
73 	GDMA_DEVICE_HWC		= 1,
74 	GDMA_DEVICE_MANA	= 2,
75 	GDMA_DEVICE_MANA_IB	= 3,
76 };
77 
78 enum gdma_service_type {
79 	GDMA_SERVICE_TYPE_NONE		= 0,
80 	GDMA_SERVICE_TYPE_RDMA_SUSPEND	= 1,
81 	GDMA_SERVICE_TYPE_RDMA_RESUME	= 2,
82 };
83 
84 struct mana_service_work {
85 	struct work_struct work;
86 	struct gdma_dev *gdma_dev;
87 	enum gdma_service_type event;
88 };
89 
90 struct gdma_resource {
91 	/* Protect the bitmap */
92 	spinlock_t lock;
93 
94 	/* The bitmap size in bits. */
95 	u32 size;
96 
97 	/* The bitmap tracks the resources. */
98 	unsigned long *map;
99 };
100 
101 union gdma_doorbell_entry {
102 	u64	as_uint64;
103 
104 	struct {
105 		u64 id		: 24;
106 		u64 reserved	: 8;
107 		u64 tail_ptr	: 31;
108 		u64 arm		: 1;
109 	} cq;
110 
111 	struct {
112 		u64 id		: 24;
113 		u64 wqe_cnt	: 8;
114 		u64 tail_ptr	: 32;
115 	} rq;
116 
117 	struct {
118 		u64 id		: 24;
119 		u64 reserved	: 8;
120 		u64 tail_ptr	: 32;
121 	} sq;
122 
123 	struct {
124 		u64 id		: 16;
125 		u64 reserved	: 16;
126 		u64 tail_ptr	: 31;
127 		u64 arm		: 1;
128 	} eq;
129 }; /* HW DATA */
130 
131 struct gdma_msg_hdr {
132 	u32 hdr_type;
133 	u32 msg_type;
134 	u16 msg_version;
135 	u16 hwc_msg_id;
136 	u32 msg_size;
137 }; /* HW DATA */
138 
139 struct gdma_dev_id {
140 	union {
141 		struct {
142 			u16 type;
143 			u16 instance;
144 		};
145 
146 		u32 as_uint32;
147 	};
148 }; /* HW DATA */
149 
150 struct gdma_req_hdr {
151 	struct gdma_msg_hdr req;
152 	struct gdma_msg_hdr resp; /* The expected response */
153 	struct gdma_dev_id dev_id;
154 	u32 activity_id;
155 }; /* HW DATA */
156 
157 struct gdma_resp_hdr {
158 	struct gdma_msg_hdr response;
159 	struct gdma_dev_id dev_id;
160 	u32 activity_id;
161 	u32 status;
162 	u32 reserved;
163 }; /* HW DATA */
164 
165 struct gdma_general_req {
166 	struct gdma_req_hdr hdr;
167 }; /* HW DATA */
168 
169 #define GDMA_MESSAGE_V1 1
170 #define GDMA_MESSAGE_V2 2
171 #define GDMA_MESSAGE_V3 3
172 #define GDMA_MESSAGE_V4 4
173 
174 struct gdma_general_resp {
175 	struct gdma_resp_hdr hdr;
176 }; /* HW DATA */
177 
178 #define GDMA_STANDARD_HEADER_TYPE 0
179 
180 static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code,
181 					u32 req_size, u32 resp_size)
182 {
183 	hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE;
184 	hdr->req.msg_type = code;
185 	hdr->req.msg_version = GDMA_MESSAGE_V1;
186 	hdr->req.msg_size = req_size;
187 
188 	hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE;
189 	hdr->resp.msg_type = code;
190 	hdr->resp.msg_version = GDMA_MESSAGE_V1;
191 	hdr->resp.msg_size = resp_size;
192 }
193 
194 /* The 16-byte struct is part of the GDMA work queue entry (WQE). */
195 struct gdma_sge {
196 	u64 address;
197 	u32 mem_key;
198 	u32 size;
199 }; /* HW DATA */
200 
201 struct gdma_wqe_request {
202 	struct gdma_sge *sgl;
203 	u32 num_sge;
204 
205 	u32 inline_oob_size;
206 	const void *inline_oob_data;
207 
208 	u32 flags;
209 	u32 client_data_unit;
210 };
211 
212 enum gdma_page_type {
213 	GDMA_PAGE_TYPE_4K,
214 };
215 
216 #define GDMA_INVALID_DMA_REGION 0
217 
218 struct mana_serv_work {
219 	struct work_struct serv_work;
220 	struct pci_dev *pdev;
221 	enum gdma_eqe_type type;
222 };
223 
224 struct gdma_mem_info {
225 	struct device *dev;
226 
227 	dma_addr_t dma_handle;
228 	void *virt_addr;
229 	u64 length;
230 
231 	/* Allocated by the PF driver */
232 	u64 dma_region_handle;
233 };
234 
235 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
236 
237 struct gdma_dev {
238 	struct gdma_context *gdma_context;
239 
240 	struct gdma_dev_id dev_id;
241 
242 	u32 pdid;
243 	u32 doorbell;
244 	u32 gpa_mkey;
245 
246 	/* GDMA driver specific pointer */
247 	void *driver_data;
248 
249 	struct auxiliary_device *adev;
250 	bool is_suspended;
251 	bool rdma_teardown;
252 };
253 
254 /* MANA_PAGE_SIZE is the DMA unit */
255 #define MANA_PAGE_SHIFT 12
256 #define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
257 #define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
258 #define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
259 #define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
260 
261 /* Required by HW */
262 #define MANA_MIN_QSIZE MANA_PAGE_SIZE
263 
264 #define GDMA_CQE_SIZE 64
265 #define GDMA_EQE_SIZE 16
266 #define GDMA_MAX_SQE_SIZE 512
267 #define GDMA_MAX_RQE_SIZE 256
268 
269 #define GDMA_COMP_DATA_SIZE 0x3C
270 
271 #define GDMA_EVENT_DATA_SIZE 0xC
272 
273 /* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */
274 #define GDMA_WQE_BU_SIZE 32
275 
276 #define INVALID_PDID		UINT_MAX
277 #define INVALID_DOORBELL	UINT_MAX
278 #define INVALID_MEM_KEY		UINT_MAX
279 #define INVALID_QUEUE_ID	UINT_MAX
280 #define INVALID_PCI_MSIX_INDEX  UINT_MAX
281 
282 struct gdma_comp {
283 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
284 	u32 wq_num;
285 	bool is_sq;
286 };
287 
288 struct gdma_event {
289 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
290 	u8  type;
291 };
292 
293 struct gdma_queue;
294 
295 struct mana_eq {
296 	struct gdma_queue	*eq;
297 	struct dentry		*mana_eq_debugfs;
298 };
299 
300 typedef void gdma_eq_callback(void *context, struct gdma_queue *q,
301 			      struct gdma_event *e);
302 
303 typedef void gdma_cq_callback(void *context, struct gdma_queue *q);
304 
305 /* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE
306  * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the
307  * driver increases the 'head' in BUs rather than in bytes, and notifies
308  * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track
309  * the HW head, and increases the 'head' by 1 for every processed EQE/CQE.
310  *
311  * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is
312  * processed, the driver increases the 'tail' to indicate that WQEs have
313  * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ.
314  *
315  * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures
316  * that the EQ/CQ is big enough so they can't overflow, and the driver uses
317  * the owner bits mechanism to detect if the queue has become empty.
318  */
319 struct gdma_queue {
320 	struct gdma_dev *gdma_dev;
321 
322 	enum gdma_queue_type type;
323 	u32 id;
324 
325 	struct gdma_mem_info mem_info;
326 
327 	void *queue_mem_ptr;
328 	u32 queue_size;
329 
330 	bool monitor_avl_buf;
331 
332 	u32 head;
333 	u32 tail;
334 	struct list_head entry;
335 
336 	/* Extra fields specific to EQ/CQ. */
337 	union {
338 		struct {
339 			bool disable_needed;
340 
341 			gdma_eq_callback *callback;
342 			void *context;
343 
344 			unsigned int msix_index;
345 
346 			u32 log2_throttle_limit;
347 		} eq;
348 
349 		struct {
350 			gdma_cq_callback *callback;
351 			void *context;
352 
353 			struct gdma_queue *parent; /* For CQ/EQ relationship */
354 		} cq;
355 	};
356 };
357 
358 struct gdma_queue_spec {
359 	enum gdma_queue_type type;
360 	bool monitor_avl_buf;
361 	unsigned int queue_size;
362 
363 	/* Extra fields specific to EQ/CQ. */
364 	union {
365 		struct {
366 			gdma_eq_callback *callback;
367 			void *context;
368 
369 			unsigned long log2_throttle_limit;
370 			unsigned int msix_index;
371 		} eq;
372 
373 		struct {
374 			gdma_cq_callback *callback;
375 			void *context;
376 
377 			struct gdma_queue *parent_eq;
378 
379 		} cq;
380 	};
381 };
382 
383 #define MANA_IRQ_NAME_SZ 32
384 
385 struct gdma_irq_context {
386 	void (*handler)(void *arg);
387 	/* Protect the eq_list */
388 	spinlock_t lock;
389 	struct list_head eq_list;
390 	char name[MANA_IRQ_NAME_SZ];
391 };
392 
393 enum gdma_context_flags {
394 	GC_PROBE_SUCCEEDED	= 0,
395 	GC_IN_SERVICE		= 1,
396 };
397 
398 struct gdma_context {
399 	struct device		*dev;
400 	struct dentry		*mana_pci_debugfs;
401 
402 	/* Per-vPort max number of queues */
403 	unsigned int		max_num_queues;
404 	unsigned int		max_num_msix;
405 	unsigned int		num_msix_usable;
406 	struct xarray		irq_contexts;
407 
408 	/* L2 MTU */
409 	u16 adapter_mtu;
410 
411 	/* This maps a CQ index to the queue structure. */
412 	unsigned int		max_num_cqs;
413 	struct gdma_queue	**cq_table;
414 
415 	/* Protect eq_test_event and test_event_eq_id  */
416 	struct mutex		eq_test_event_mutex;
417 	struct completion	eq_test_event;
418 	u32			test_event_eq_id;
419 
420 	bool			is_pf;
421 
422 	phys_addr_t		bar0_pa;
423 	void __iomem		*bar0_va;
424 	resource_size_t		bar0_size;
425 	void __iomem		*shm_base;
426 	void __iomem		*db_page_base;
427 	phys_addr_t		phys_db_page_base;
428 	u64 db_page_off;
429 	u64 db_page_size;
430 	int                     numa_node;
431 
432 	/* Shared memory chanenl (used to bootstrap HWC) */
433 	struct shm_channel	shm_channel;
434 
435 	/* Hardware communication channel (HWC) */
436 	struct gdma_dev		hwc;
437 
438 	/* Azure network adapter */
439 	struct gdma_dev		mana;
440 
441 	/* Azure RDMA adapter */
442 	struct gdma_dev		mana_ib;
443 
444 	u64 pf_cap_flags1;
445 
446 	struct workqueue_struct *service_wq;
447 
448 	unsigned long		flags;
449 };
450 
451 static inline bool mana_gd_is_mana(struct gdma_dev *gd)
452 {
453 	return gd->dev_id.type == GDMA_DEVICE_MANA;
454 }
455 
456 static inline bool mana_gd_is_hwc(struct gdma_dev *gd)
457 {
458 	return gd->dev_id.type == GDMA_DEVICE_HWC;
459 }
460 
461 u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset);
462 u32 mana_gd_wq_avail_space(struct gdma_queue *wq);
463 
464 int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq);
465 
466 int mana_gd_create_hwc_queue(struct gdma_dev *gd,
467 			     const struct gdma_queue_spec *spec,
468 			     struct gdma_queue **queue_ptr);
469 
470 int mana_gd_create_mana_eq(struct gdma_dev *gd,
471 			   const struct gdma_queue_spec *spec,
472 			   struct gdma_queue **queue_ptr);
473 
474 int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
475 			      const struct gdma_queue_spec *spec,
476 			      struct gdma_queue **queue_ptr);
477 
478 void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
479 
480 int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe);
481 
482 void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit);
483 
484 int mana_schedule_serv_work(struct gdma_context *gc, enum gdma_eqe_type type);
485 
486 struct gdma_wqe {
487 	u32 reserved	:24;
488 	u32 last_vbytes	:8;
489 
490 	union {
491 		u32 flags;
492 
493 		struct {
494 			u32 num_sge		:8;
495 			u32 inline_oob_size_div4:3;
496 			u32 client_oob_in_sgl	:1;
497 			u32 reserved1		:4;
498 			u32 client_data_unit	:14;
499 			u32 reserved2		:2;
500 		};
501 	};
502 }; /* HW DATA */
503 
504 #define INLINE_OOB_SMALL_SIZE 8
505 #define INLINE_OOB_LARGE_SIZE 24
506 
507 #define MANA_MAX_TX_WQE_SGL_ENTRIES 30
508 
509 #define MAX_TX_WQE_SIZE 512
510 #define MAX_RX_WQE_SIZE 256
511 
512 #define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
513 			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
514 			sizeof(struct gdma_sge))
515 
516 #define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
517 			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
518 
519 struct gdma_cqe {
520 	u32 cqe_data[GDMA_COMP_DATA_SIZE / 4];
521 
522 	union {
523 		u32 as_uint32;
524 
525 		struct {
526 			u32 wq_num	: 24;
527 			u32 is_sq	: 1;
528 			u32 reserved	: 4;
529 			u32 owner_bits	: 3;
530 		};
531 	} cqe_info;
532 }; /* HW DATA */
533 
534 #define GDMA_CQE_OWNER_BITS 3
535 
536 #define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1)
537 
538 #define SET_ARM_BIT 1
539 
540 #define GDMA_EQE_OWNER_BITS 3
541 
542 union gdma_eqe_info {
543 	u32 as_uint32;
544 
545 	struct {
546 		u32 type	: 8;
547 		u32 reserved1	: 8;
548 		u32 client_id	: 2;
549 		u32 reserved2	: 11;
550 		u32 owner_bits	: 3;
551 	};
552 }; /* HW DATA */
553 
554 #define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1)
555 #define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries))
556 
557 struct gdma_eqe {
558 	u32 details[GDMA_EVENT_DATA_SIZE / 4];
559 	u32 eqe_info;
560 }; /* HW DATA */
561 
562 #define GDMA_REG_DB_PAGE_OFFSET	8
563 #define GDMA_REG_DB_PAGE_SIZE	0x10
564 #define GDMA_REG_SHM_OFFSET	0x18
565 
566 #define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
567 #define GDMA_PF_REG_DB_PAGE_OFF		0xC8
568 #define GDMA_PF_REG_SHM_OFF		0x70
569 
570 #define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108
571 
572 #define MANA_PF_DEVICE_ID 0x00B9
573 #define MANA_VF_DEVICE_ID 0x00BA
574 
575 struct gdma_posted_wqe_info {
576 	u32 wqe_size_in_bu;
577 };
578 
579 /* GDMA_GENERATE_TEST_EQE */
580 struct gdma_generate_test_event_req {
581 	struct gdma_req_hdr hdr;
582 	u32 queue_index;
583 }; /* HW DATA */
584 
585 /* GDMA_VERIFY_VF_DRIVER_VERSION */
586 enum {
587 	GDMA_PROTOCOL_V1	= 1,
588 	GDMA_PROTOCOL_FIRST	= GDMA_PROTOCOL_V1,
589 	GDMA_PROTOCOL_LAST	= GDMA_PROTOCOL_V1,
590 };
591 
592 #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
593 
594 /* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
595  * so the driver is able to reliably support features like busy_poll.
596  */
597 #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
598 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
599 #define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
600 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
601 
602 /* Driver can handle holes (zeros) in the device list */
603 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
604 
605 /* Driver supports dynamic MSI-X vector allocation */
606 #define GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT BIT(13)
607 
608 /* Driver can self reset on EQE notification */
609 #define GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE BIT(14)
610 
611 /* Driver can self reset on FPGA Reconfig EQE notification */
612 #define GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE BIT(17)
613 
614 /* Driver detects stalled send queues and recovers them */
615 #define GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY BIT(18)
616 
617 #define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
618 
619 /* Driver supports linearizing the skb when num_sge exceeds hardware limit */
620 #define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20)
621 
622 /* Driver can send HWC periodically to query stats */
623 #define GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY BIT(21)
624 
625 /* Driver can handle hardware recovery events during probe */
626 #define GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY BIT(22)
627 
628 /* Driver supports self recovery on Hardware Channel timeouts */
629 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY BIT(25)
630 
631 #define GDMA_DRV_CAP_FLAGS1 \
632 	(GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
633 	 GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \
634 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \
635 	 GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \
636 	 GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP | \
637 	 GDMA_DRV_CAP_FLAG_1_DYNAMIC_IRQ_ALLOC_SUPPORT | \
638 	 GDMA_DRV_CAP_FLAG_1_SELF_RESET_ON_EQE | \
639 	 GDMA_DRV_CAP_FLAG_1_HANDLE_RECONFIG_EQE | \
640 	 GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE | \
641 	 GDMA_DRV_CAP_FLAG_1_PERIODIC_STATS_QUERY | \
642 	 GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
643 	 GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
644 	 GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY | \
645 	 GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY)
646 
647 #define GDMA_DRV_CAP_FLAGS2 0
648 
649 #define GDMA_DRV_CAP_FLAGS3 0
650 
651 #define GDMA_DRV_CAP_FLAGS4 0
652 
653 struct gdma_verify_ver_req {
654 	struct gdma_req_hdr hdr;
655 
656 	/* Mandatory fields required for protocol establishment */
657 	u64 protocol_ver_min;
658 	u64 protocol_ver_max;
659 
660 	/* Gdma Driver Capability Flags */
661 	u64 gd_drv_cap_flags1;
662 	u64 gd_drv_cap_flags2;
663 	u64 gd_drv_cap_flags3;
664 	u64 gd_drv_cap_flags4;
665 
666 	/* Advisory fields */
667 	u64 drv_ver;
668 	u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */
669 	u32 reserved;
670 	u32 os_ver_major;
671 	u32 os_ver_minor;
672 	u32 os_ver_build;
673 	u32 os_ver_platform;
674 	u64 reserved_2;
675 	u8 os_ver_str1[128];
676 	u8 os_ver_str2[128];
677 	u8 os_ver_str3[128];
678 	u8 os_ver_str4[128];
679 }; /* HW DATA */
680 
681 struct gdma_verify_ver_resp {
682 	struct gdma_resp_hdr hdr;
683 	u64 gdma_protocol_ver;
684 	u64 pf_cap_flags1;
685 	u64 pf_cap_flags2;
686 	u64 pf_cap_flags3;
687 	u64 pf_cap_flags4;
688 }; /* HW DATA */
689 
690 /* GDMA_QUERY_MAX_RESOURCES */
691 struct gdma_query_max_resources_resp {
692 	struct gdma_resp_hdr hdr;
693 	u32 status;
694 	u32 max_sq;
695 	u32 max_rq;
696 	u32 max_cq;
697 	u32 max_eq;
698 	u32 max_db;
699 	u32 max_mst;
700 	u32 max_cq_mod_ctx;
701 	u32 max_mod_cq;
702 	u32 max_msix;
703 }; /* HW DATA */
704 
705 /* GDMA_LIST_DEVICES */
706 #define GDMA_DEV_LIST_SIZE 64
707 struct gdma_list_devices_resp {
708 	struct gdma_resp_hdr hdr;
709 	u32 num_of_devs;
710 	u32 reserved;
711 	struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE];
712 }; /* HW DATA */
713 
714 /* GDMA_REGISTER_DEVICE */
715 struct gdma_register_device_resp {
716 	struct gdma_resp_hdr hdr;
717 	u32 pdid;
718 	u32 gpa_mkey;
719 	u32 db_id;
720 }; /* HW DATA */
721 
722 struct gdma_allocate_resource_range_req {
723 	struct gdma_req_hdr hdr;
724 	u32 resource_type;
725 	u32 num_resources;
726 	u32 alignment;
727 	u32 allocated_resources;
728 };
729 
730 struct gdma_allocate_resource_range_resp {
731 	struct gdma_resp_hdr hdr;
732 	u32 allocated_resources;
733 };
734 
735 struct gdma_destroy_resource_range_req {
736 	struct gdma_req_hdr hdr;
737 	u32 resource_type;
738 	u32 num_resources;
739 	u32 allocated_resources;
740 };
741 
742 /* GDMA_CREATE_QUEUE */
743 struct gdma_create_queue_req {
744 	struct gdma_req_hdr hdr;
745 	u32 type;
746 	u32 reserved1;
747 	u32 pdid;
748 	u32 doolbell_id;
749 	u64 gdma_region;
750 	u32 reserved2;
751 	u32 queue_size;
752 	u32 log2_throttle_limit;
753 	u32 eq_pci_msix_index;
754 	u32 cq_mod_ctx_id;
755 	u32 cq_parent_eq_id;
756 	u8  rq_drop_on_overrun;
757 	u8  rq_err_on_wqe_overflow;
758 	u8  rq_chain_rec_wqes;
759 	u8  sq_hw_db;
760 	u32 reserved3;
761 }; /* HW DATA */
762 
763 struct gdma_create_queue_resp {
764 	struct gdma_resp_hdr hdr;
765 	u32 queue_index;
766 }; /* HW DATA */
767 
768 /* GDMA_DISABLE_QUEUE */
769 struct gdma_disable_queue_req {
770 	struct gdma_req_hdr hdr;
771 	u32 type;
772 	u32 queue_index;
773 	u32 alloc_res_id_on_creation;
774 }; /* HW DATA */
775 
776 /* GDMA_QUERY_HWC_TIMEOUT */
777 struct gdma_query_hwc_timeout_req {
778 	struct gdma_req_hdr hdr;
779 	u32 timeout_ms;
780 	u32 reserved;
781 };
782 
783 struct gdma_query_hwc_timeout_resp {
784 	struct gdma_resp_hdr hdr;
785 	u32 timeout_ms;
786 	u32 reserved;
787 };
788 
789 enum gdma_mr_access_flags {
790 	GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
791 	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
792 	GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
793 	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
794 	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
795 };
796 
797 /* GDMA_CREATE_DMA_REGION */
798 struct gdma_create_dma_region_req {
799 	struct gdma_req_hdr hdr;
800 
801 	/* The total size of the DMA region */
802 	u64 length;
803 
804 	/* The offset in the first page */
805 	u32 offset_in_page;
806 
807 	/* enum gdma_page_type */
808 	u32 gdma_page_type;
809 
810 	/* The total number of pages */
811 	u32 page_count;
812 
813 	/* If page_addr_list_len is smaller than page_count,
814 	 * the remaining page addresses will be added via the
815 	 * message GDMA_DMA_REGION_ADD_PAGES.
816 	 */
817 	u32 page_addr_list_len;
818 	u64 page_addr_list[];
819 }; /* HW DATA */
820 
821 struct gdma_create_dma_region_resp {
822 	struct gdma_resp_hdr hdr;
823 	u64 dma_region_handle;
824 }; /* HW DATA */
825 
826 /* GDMA_DMA_REGION_ADD_PAGES */
827 struct gdma_dma_region_add_pages_req {
828 	struct gdma_req_hdr hdr;
829 
830 	u64 dma_region_handle;
831 
832 	u32 page_addr_list_len;
833 	u32 reserved3;
834 
835 	u64 page_addr_list[];
836 }; /* HW DATA */
837 
838 /* GDMA_DESTROY_DMA_REGION */
839 struct gdma_destroy_dma_region_req {
840 	struct gdma_req_hdr hdr;
841 
842 	u64 dma_region_handle;
843 }; /* HW DATA */
844 
845 enum gdma_pd_flags {
846 	GDMA_PD_FLAG_INVALID = 0,
847 	GDMA_PD_FLAG_ALLOW_GPA_MR = 1,
848 };
849 
850 struct gdma_create_pd_req {
851 	struct gdma_req_hdr hdr;
852 	enum gdma_pd_flags flags;
853 	u32 reserved;
854 };/* HW DATA */
855 
856 struct gdma_create_pd_resp {
857 	struct gdma_resp_hdr hdr;
858 	u64 pd_handle;
859 	u32 pd_id;
860 	u32 reserved;
861 };/* HW DATA */
862 
863 struct gdma_destroy_pd_req {
864 	struct gdma_req_hdr hdr;
865 	u64 pd_handle;
866 };/* HW DATA */
867 
868 struct gdma_destory_pd_resp {
869 	struct gdma_resp_hdr hdr;
870 };/* HW DATA */
871 
872 enum gdma_mr_type {
873 	/*
874 	 * Guest Physical Address - MRs of this type allow access
875 	 * to any DMA-mapped memory using bus-logical address
876 	 */
877 	GDMA_MR_TYPE_GPA = 1,
878 	/* Guest Virtual Address - MRs of this type allow access
879 	 * to memory mapped by PTEs associated with this MR using a virtual
880 	 * address that is set up in the MST
881 	 */
882 	GDMA_MR_TYPE_GVA = 2,
883 	/* Guest zero-based address MRs */
884 	GDMA_MR_TYPE_ZBVA = 4,
885 	/* Device address MRs */
886 	GDMA_MR_TYPE_DM = 5,
887 };
888 
889 struct gdma_create_mr_params {
890 	u64 pd_handle;
891 	enum gdma_mr_type mr_type;
892 	union {
893 		struct {
894 			u64 dma_region_handle;
895 			u64 virtual_address;
896 			enum gdma_mr_access_flags access_flags;
897 		} gva;
898 		struct {
899 			u64 dma_region_handle;
900 			enum gdma_mr_access_flags access_flags;
901 		} zbva;
902 		struct {
903 			u64 dm_handle;
904 			u64 offset;
905 			u64 length;
906 			enum gdma_mr_access_flags access_flags;
907 		} da;
908 	};
909 };
910 
911 struct gdma_create_mr_request {
912 	struct gdma_req_hdr hdr;
913 	u64 pd_handle;
914 	enum gdma_mr_type mr_type;
915 	u32 reserved_1;
916 
917 	union {
918 		struct {
919 			u64 dma_region_handle;
920 			u64 virtual_address;
921 			enum gdma_mr_access_flags access_flags;
922 		} __packed gva;
923 		struct {
924 			u64 dma_region_handle;
925 			enum gdma_mr_access_flags access_flags;
926 		} __packed zbva;
927 		struct {
928 			u64 dm_handle;
929 			u64 offset;
930 			enum gdma_mr_access_flags access_flags;
931 		} __packed da;
932 	} __packed;
933 	u32 reserved_2;
934 	union {
935 		struct {
936 			u64 length;
937 		} da_ext;
938 	};
939 };/* HW DATA */
940 
941 struct gdma_create_mr_response {
942 	struct gdma_resp_hdr hdr;
943 	u64 mr_handle;
944 	u32 lkey;
945 	u32 rkey;
946 };/* HW DATA */
947 
948 struct gdma_destroy_mr_request {
949 	struct gdma_req_hdr hdr;
950 	u64 mr_handle;
951 };/* HW DATA */
952 
953 struct gdma_destroy_mr_response {
954 	struct gdma_resp_hdr hdr;
955 };/* HW DATA */
956 
957 struct gdma_alloc_dm_req {
958 	struct gdma_req_hdr hdr;
959 	u64 length;
960 	u32 alignment;
961 	u32 flags;
962 }; /* HW Data */
963 
964 struct gdma_alloc_dm_resp {
965 	struct gdma_resp_hdr hdr;
966 	u64 dm_handle;
967 }; /* HW Data */
968 
969 struct gdma_destroy_dm_req {
970 	struct gdma_req_hdr hdr;
971 	u64 dm_handle;
972 }; /* HW Data */
973 
974 struct gdma_destroy_dm_resp {
975 	struct gdma_resp_hdr hdr;
976 }; /* HW Data */
977 
978 int mana_gd_verify_vf_version(struct pci_dev *pdev);
979 
980 int mana_gd_register_device(struct gdma_dev *gd);
981 int mana_gd_deregister_device(struct gdma_dev *gd);
982 
983 int mana_gd_post_work_request(struct gdma_queue *wq,
984 			      const struct gdma_wqe_request *wqe_req,
985 			      struct gdma_posted_wqe_info *wqe_info);
986 
987 int mana_gd_post_and_ring(struct gdma_queue *queue,
988 			  const struct gdma_wqe_request *wqe,
989 			  struct gdma_posted_wqe_info *wqe_info);
990 
991 int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r);
992 void mana_gd_free_res_map(struct gdma_resource *r);
993 
994 void mana_gd_wq_ring_doorbell(struct gdma_context *gc,
995 			      struct gdma_queue *queue);
996 
997 int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
998 			 struct gdma_mem_info *gmi);
999 
1000 void mana_gd_free_memory(struct gdma_mem_info *gmi);
1001 
1002 int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
1003 			 u32 resp_len, void *resp);
1004 
1005 int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle);
1006 void mana_register_debugfs(void);
1007 void mana_unregister_debugfs(void);
1008 
1009 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event);
1010 
1011 int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state);
1012 int mana_gd_resume(struct pci_dev *pdev);
1013 
1014 bool mana_need_log(struct gdma_context *gc, int err);
1015 
1016 #endif /* _GDMA_H */
1017