xref: /freebsd/sys/dev/mana/mana.h (revision c2a55efd74cccb3d4e7b9037b240ad062c203bb8)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  */
31 
32 #ifndef _MANA_H
33 #define _MANA_H
34 
35 #include <sys/types.h>
36 #include <sys/proc.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/taskqueue.h>
40 #include <sys/counter.h>
41 
42 #include <net/ethernet.h>
43 #include <net/if.h>
44 #include <net/if_media.h>
45 #include <netinet/tcp_lro.h>
46 
47 #include "gdma.h"
48 #include "hw_channel.h"
49 
50 
51 /* Microsoft Azure Network Adapter (MANA)'s definitions
52  *
53  * Structures labeled with "HW DATA" are exchanged with the hardware. All of
54  * them are naturally aligned and hence don't need __packed.
55  */
56 /* MANA protocol version */
57 #define MANA_MAJOR_VERSION	0
58 #define MANA_MINOR_VERSION	1
59 #define MANA_MICRO_VERSION	1
60 
61 #define DRV_MODULE_NAME		"mana"
62 
63 #ifndef DRV_MODULE_VERSION
64 #define DRV_MODULE_VERSION				\
65 	__XSTRING(MANA_MAJOR_VERSION) "."		\
66 	__XSTRING(MANA_MINOR_VERSION) "."		\
67 	__XSTRING(MANA_MICRO_VERSION)
68 #endif
69 #define DEVICE_NAME	"Microsoft Azure Network Adapter (MANA)"
70 #define DEVICE_DESC	"MANA adapter"
71 
72 /*
73  * Supported PCI vendor and devices IDs
74  */
75 #ifndef PCI_VENDOR_ID_MICROSOFT
76 #define PCI_VENDOR_ID_MICROSOFT	0x1414
77 #endif
78 
79 #define PCI_DEV_ID_MANA_VF	0x00ba
80 
81 typedef struct _mana_vendor_id_t {
82 	uint16_t vendor_id;
83 	uint16_t device_id;
84 } mana_vendor_id_t;
85 
86 typedef uint64_t mana_handle_t;
87 #define INVALID_MANA_HANDLE	((mana_handle_t)-1)
88 
89 enum TRI_STATE {
90 	TRI_STATE_UNKNOWN = -1,
91 	TRI_STATE_FALSE = 0,
92 	TRI_STATE_TRUE = 1
93 };
94 
95 /* Number of entries for hardware indirection table must be in power of 2 */
96 #define MANA_INDIRECT_TABLE_SIZE	64
97 #define MANA_INDIRECT_TABLE_MASK	(MANA_INDIRECT_TABLE_SIZE - 1)
98 
99 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
100 #define MANA_HASH_KEY_SIZE		40
101 
102 #define COMP_ENTRY_SIZE			64
103 
104 #define MIN_FRAME_SIZE			146
105 
106 /* Unit number of RX buffers. Must be power of two
107  * Higher number could fail at allocation.
108  */
109 #define MAX_RX_BUFFERS_PER_QUEUE	8192
110 #define DEF_RX_BUFFERS_PER_QUEUE	1024
111 #define MIN_RX_BUFFERS_PER_QUEUE	128
112 
113 /* Unit number of TX buffers. Must be power of two
114  * Higher number could fail at allocation.
115  * The max value is derived as the maximum
116  * allocatable pages supported on host per guest
117  * through testing. TX buffer size beyond this
118  * value is rejected by the hardware.
119  */
120 #define MAX_SEND_BUFFERS_PER_QUEUE	16384
121 #define DEF_SEND_BUFFERS_PER_QUEUE	1024
122 #define MIN_SEND_BUFFERS_PER_QUEUE	128
123 
124 #define EQ_SIZE				(8 * PAGE_SIZE)
125 #define LOG2_EQ_THROTTLE		3
126 
127 #define MAX_PORTS_IN_MANA_DEV		8
128 
129 struct mana_send_buf_info {
130 	struct mbuf			*mbuf;
131 	bus_dmamap_t			dma_map;
132 
133 	/* Required to store the result of mana_gd_post_work_request.
134 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
135 	 * work queue when the WQE is consumed.
136 	 */
137 	struct gdma_posted_wqe_info	wqe_inf;
138 };
139 
140 struct mana_stats {
141 	counter_u64_t			packets;		/* rx, tx */
142 	counter_u64_t			bytes;			/* rx, tx */
143 	counter_u64_t			stop;			/* tx */
144 	counter_u64_t			wakeup;			/* tx */
145 	counter_u64_t			collapse;		/* tx */
146 	counter_u64_t			collapse_err;		/* tx */
147 	counter_u64_t			dma_mapping_err;	/* rx, tx */
148 	counter_u64_t			mbuf_alloc_fail;	/* rx */
149 	counter_u64_t			partial_refill;		/* rx */
150 	counter_u64_t			alt_chg;		/* tx */
151 	counter_u64_t			alt_reset;		/* tx */
152 	counter_u64_t			cqe_err;		/* tx */
153 	counter_u64_t			cqe_unknown_type;	/* tx */
154 };
155 
156 struct mana_txq {
157 	struct gdma_queue	*gdma_sq;
158 
159 	union {
160 		uint32_t	gdma_txq_id;
161 		struct {
162 			uint32_t	reserved1	:10;
163 			uint32_t	vsq_frame	:14;
164 			uint32_t	reserved2	:8;
165 		};
166 	};
167 
168 	uint16_t		vp_offset;
169 
170 	if_t			ndev;
171 	/* Store index to the array of tx_qp in port structure */
172 	int			idx;
173 	/* The alternative txq idx when this txq is under heavy load */
174 	int			alt_txq_idx;
175 
176 	/* The mbufs are sent to the HW and we are waiting for the CQEs. */
177 	struct mana_send_buf_info	*tx_buf_info;
178 	uint16_t		next_to_use;
179 	uint16_t		next_to_complete;
180 
181 	atomic_t		pending_sends;
182 
183 	struct buf_ring		*txq_br;
184 	struct mtx		txq_mtx;
185 	char			txq_mtx_name[16];
186 
187 	uint64_t		tso_pkts;
188 	uint64_t		tso_bytes;
189 
190 	struct task		enqueue_task;
191 	struct taskqueue	*enqueue_tq;
192 
193 	struct mana_stats	stats;
194 };
195 
196 
197 /*
198  * Max WQE size is 512B. The first 8B is for GDMA Out of Band (OOB),
199  * next is the Client OOB can be either 8B or 24B. Thus, the max
200  * space for SGL entries in a singel WQE is 512 - 8 - 8 = 496B. Since each
201  * SGL is 16B in size, the max number of SGLs in a WQE is 496/16 = 31.
202  * Save one for emergency use, set the MAX_MBUF_FRAGS allowed to 30.
203  */
204 #define	MAX_MBUF_FRAGS		30
205 #define MANA_TSO_MAXSEG_SZ	PAGE_SIZE
206 #define MANA_TSO_MAX_SZ		IP_MAXPACKET
207 
208 /* mbuf data and frags dma mappings */
209 struct mana_mbuf_head {
210 	bus_addr_t dma_handle[MAX_MBUF_FRAGS + 1];
211 
212 	uint32_t size[MAX_MBUF_FRAGS + 1];
213 };
214 
215 #define MANA_HEADROOM		sizeof(struct mana_mbuf_head)
216 
217 enum mana_tx_pkt_format {
218 	MANA_SHORT_PKT_FMT	= 0,
219 	MANA_LONG_PKT_FMT	= 1,
220 };
221 
222 struct mana_tx_short_oob {
223 	uint32_t pkt_fmt		:2;
224 	uint32_t is_outer_ipv4		:1;
225 	uint32_t is_outer_ipv6		:1;
226 	uint32_t comp_iphdr_csum	:1;
227 	uint32_t comp_tcp_csum		:1;
228 	uint32_t comp_udp_csum		:1;
229 	uint32_t supress_txcqe_gen	:1;
230 	uint32_t vcq_num		:24;
231 
232 	uint32_t trans_off		:10; /* Transport header offset */
233 	uint32_t vsq_frame		:14;
234 	uint32_t short_vp_offset	:8;
235 }; /* HW DATA */
236 
237 struct mana_tx_long_oob {
238 	uint32_t is_encap		:1;
239 	uint32_t inner_is_ipv6		:1;
240 	uint32_t inner_tcp_opt		:1;
241 	uint32_t inject_vlan_pri_tag	:1;
242 	uint32_t reserved1		:12;
243 	uint32_t pcp			:3;  /* 802.1Q */
244 	uint32_t dei			:1;  /* 802.1Q */
245 	uint32_t vlan_id		:12; /* 802.1Q */
246 
247 	uint32_t inner_frame_offset	:10;
248 	uint32_t inner_ip_rel_offset	:6;
249 	uint32_t long_vp_offset		:12;
250 	uint32_t reserved2		:4;
251 
252 	uint32_t reserved3;
253 	uint32_t reserved4;
254 }; /* HW DATA */
255 
256 struct mana_tx_oob {
257 	struct mana_tx_short_oob	s_oob;
258 	struct mana_tx_long_oob		l_oob;
259 }; /* HW DATA */
260 
261 enum mana_cq_type {
262 	MANA_CQ_TYPE_RX,
263 	MANA_CQ_TYPE_TX,
264 };
265 
266 enum mana_cqe_type {
267 	CQE_INVALID			= 0,
268 	CQE_RX_OKAY			= 1,
269 	CQE_RX_COALESCED_4		= 2,
270 	CQE_RX_OBJECT_FENCE		= 3,
271 	CQE_RX_TRUNCATED		= 4,
272 
273 	CQE_TX_OKAY			= 32,
274 	CQE_TX_SA_DROP			= 33,
275 	CQE_TX_MTU_DROP			= 34,
276 	CQE_TX_INVALID_OOB		= 35,
277 	CQE_TX_INVALID_ETH_TYPE		= 36,
278 	CQE_TX_HDR_PROCESSING_ERROR	= 37,
279 	CQE_TX_VF_DISABLED		= 38,
280 	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
281 	CQE_TX_VPORT_DISABLED		= 40,
282 	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
283 };
284 
285 #define MANA_CQE_COMPLETION	1
286 
287 struct mana_cqe_header {
288 	uint32_t cqe_type	:6;
289 	uint32_t client_type	:2;
290 	uint32_t vendor_err	:24;
291 }; /* HW DATA */
292 
293 /* NDIS HASH Types */
294 #define NDIS_HASH_IPV4		BIT(0)
295 #define NDIS_HASH_TCP_IPV4	BIT(1)
296 #define NDIS_HASH_UDP_IPV4	BIT(2)
297 #define NDIS_HASH_IPV6		BIT(3)
298 #define NDIS_HASH_TCP_IPV6	BIT(4)
299 #define NDIS_HASH_UDP_IPV6	BIT(5)
300 #define NDIS_HASH_IPV6_EX	BIT(6)
301 #define NDIS_HASH_TCP_IPV6_EX	BIT(7)
302 #define NDIS_HASH_UDP_IPV6_EX	BIT(8)
303 
304 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
305 #define MANA_HASH_L4                                                         \
306 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
307 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
308 
309 #define NDIS_HASH_IPV4_L3_MASK	(NDIS_HASH_IPV4)
310 #define NDIS_HASH_IPV4_L4_MASK	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4)
311 #define NDIS_HASH_IPV6_L3_MASK	(NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
312 #define NDIS_HASH_IPV6_L4_MASK						\
313     (NDIS_HASH_TCP_IPV6 | NDIS_HASH_UDP_IPV6 |				\
314     NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
315 #define NDIS_HASH_IPV4_MASK						\
316     (NDIS_HASH_IPV4_L3_MASK | NDIS_HASH_IPV4_L4_MASK)
317 #define NDIS_HASH_IPV6_MASK						\
318     (NDIS_HASH_IPV6_L3_MASK | NDIS_HASH_IPV6_L4_MASK)
319 
320 
321 struct mana_rxcomp_perpkt_info {
322 	uint32_t pkt_len	:16;
323 	uint32_t reserved1	:16;
324 	uint32_t reserved2;
325 	uint32_t pkt_hash;
326 }; /* HW DATA */
327 
328 #define MANA_RXCOMP_OOB_NUM_PPI 4
329 
330 /* Receive completion OOB */
331 struct mana_rxcomp_oob {
332 	struct mana_cqe_header cqe_hdr;
333 
334 	uint32_t rx_vlan_id			:12;
335 	uint32_t rx_vlantag_present		:1;
336 	uint32_t rx_outer_iphdr_csum_succeed	:1;
337 	uint32_t rx_outer_iphdr_csum_fail	:1;
338 	uint32_t reserved1			:1;
339 	uint32_t rx_hashtype			:9;
340 	uint32_t rx_iphdr_csum_succeed		:1;
341 	uint32_t rx_iphdr_csum_fail		:1;
342 	uint32_t rx_tcp_csum_succeed		:1;
343 	uint32_t rx_tcp_csum_fail		:1;
344 	uint32_t rx_udp_csum_succeed		:1;
345 	uint32_t rx_udp_csum_fail		:1;
346 	uint32_t reserved2			:1;
347 
348 	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
349 
350 	uint32_t rx_wqe_offset;
351 }; /* HW DATA */
352 
353 struct mana_tx_comp_oob {
354 	struct mana_cqe_header	cqe_hdr;
355 
356 	uint32_t tx_data_offset;
357 
358 	uint32_t tx_sgl_offset		:5;
359 	uint32_t tx_wqe_offset		:27;
360 
361 	uint32_t reserved[12];
362 }; /* HW DATA */
363 
364 struct mana_rxq;
365 
366 #define CQE_POLLING_BUFFER	512
367 
368 struct mana_cq {
369 	struct gdma_queue	*gdma_cq;
370 
371 	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
372 	uint32_t		gdma_id;
373 
374 	/* Type of the CQ: TX or RX */
375 	enum mana_cq_type	type;
376 
377 	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
378 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
379 	 */
380 	struct mana_rxq		*rxq;
381 
382 	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
383 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
384 	 */
385 	struct mana_txq		*txq;
386 
387 	/* Taskqueue and related structs */
388 	struct task		cleanup_task;
389 	struct taskqueue	*cleanup_tq;
390 	int			cpu;
391 	bool			do_not_ring_db;
392 
393 	/* Budget for one cleanup task */
394 	int			work_done;
395 	int			budget;
396 
397 	/* Buffer which the CQ handler can copy the CQE's into. */
398 	struct gdma_comp	gdma_comp_buf[CQE_POLLING_BUFFER];
399 };
400 
401 struct mana_recv_buf_oob {
402 	/* A valid GDMA work request representing the data buffer. */
403 	struct gdma_wqe_request		wqe_req;
404 
405 	struct mbuf			*mbuf;
406 	bus_dmamap_t			dma_map;
407 
408 	/* SGL of the buffer going to be sent as part of the work request. */
409 	uint32_t			num_sge;
410 	struct gdma_sge			sgl[MAX_RX_WQE_SGL_ENTRIES];
411 
412 	/* Required to store the result of mana_gd_post_work_request.
413 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
414 	 * work queue when the WQE is consumed.
415 	 */
416 	struct gdma_posted_wqe_info	wqe_inf;
417 };
418 
419 struct mana_rxq {
420 	struct gdma_queue		*gdma_rq;
421 	/* Cache the gdma receive queue id */
422 	uint32_t			gdma_id;
423 
424 	/* Index of RQ in the vPort, not gdma receive queue id */
425 	uint32_t			rxq_idx;
426 
427 	uint32_t			datasize;
428 
429 	mana_handle_t			rxobj;
430 
431 	struct completion		fence_event;
432 
433 	struct mana_cq			rx_cq;
434 
435 	if_t				ndev;
436 	struct lro_ctrl			lro;
437 
438 	/* Total number of receive buffers to be allocated */
439 	uint32_t			num_rx_buf;
440 
441 	uint32_t			buf_index;
442 	uint32_t			next_to_refill;
443 	uint32_t			refill_thresh;
444 
445 	uint64_t			lro_tried;
446 	uint64_t			lro_failed;
447 	struct mana_stats		stats;
448 
449 	/* MUST BE THE LAST MEMBER:
450 	 * Each receive buffer has an associated mana_recv_buf_oob.
451 	 */
452 	struct mana_recv_buf_oob	rx_oobs[];
453 };
454 
455 struct mana_tx_qp {
456 	struct mana_txq			txq;
457 
458 	struct mana_cq			tx_cq;
459 
460 	mana_handle_t			tx_object;
461 };
462 
463 struct mana_port_stats {
464 	counter_u64_t		rx_packets;
465 	counter_u64_t		tx_packets;
466 
467 	counter_u64_t		rx_bytes;
468 	counter_u64_t		tx_bytes;
469 
470 	counter_u64_t		rx_drops;
471 	counter_u64_t		tx_drops;
472 
473 	counter_u64_t		stop_queue;
474 	counter_u64_t		wake_queue;
475 };
476 
477 struct mana_context {
478 	struct gdma_dev		*gdma_dev;
479 
480 	uint16_t		num_ports;
481 
482 	struct mana_eq		*eqs;
483 
484 	if_t			ports[MAX_PORTS_IN_MANA_DEV];
485 };
486 
487 struct mana_port_context {
488 	struct mana_context	*ac;
489 	if_t			ndev;
490 	struct ifmedia		media;
491 
492 	struct sx		apc_lock;
493 
494 	/* DMA tag used for queue bufs of the entire port */
495 	bus_dma_tag_t		rx_buf_tag;
496 	bus_dma_tag_t		tx_buf_tag;
497 
498 	uint8_t			mac_addr[ETHER_ADDR_LEN];
499 
500 	enum TRI_STATE		rss_state;
501 
502 	mana_handle_t		default_rxobj;
503 	bool			tx_shortform_allowed;
504 	uint16_t		tx_vp_offset;
505 
506 	struct mana_tx_qp	*tx_qp;
507 
508 	/* Indirection Table for RX & TX. The values are queue indexes */
509 	uint32_t		indir_table[MANA_INDIRECT_TABLE_SIZE];
510 
511 	/* Indirection table containing RxObject Handles */
512 	mana_handle_t		rxobj_table[MANA_INDIRECT_TABLE_SIZE];
513 
514 	/*  Hash key used by the NIC */
515 	uint8_t			hashkey[MANA_HASH_KEY_SIZE];
516 
517 	/* This points to an array of num_queues of RQ pointers. */
518 	struct mana_rxq		**rxqs;
519 
520 	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
521 	unsigned int		max_queues;
522 	unsigned int		num_queues;
523 
524 	unsigned int		tx_queue_size;
525 	unsigned int		rx_queue_size;
526 
527 	mana_handle_t		port_handle;
528 
529 	int			vport_use_count;
530 
531 	uint16_t		port_idx;
532 
533 	uint16_t		frame_size;
534 	uint16_t		max_mtu;
535 	uint16_t		min_mtu;
536 	uint16_t		mtu;
537 
538 	bool			port_is_up;
539 	bool			port_st_save; /* Saved port state */
540 
541 	bool			enable_tx_altq;
542 
543 	bool			bind_cleanup_thread_cpu;
544 	int			last_tx_cq_bind_cpu;
545 	int			last_rx_cq_bind_cpu;
546 
547 	struct mana_port_stats	port_stats;
548 
549 	struct sysctl_oid_list	*port_list;
550 	struct sysctl_ctx_list	que_sysctl_ctx;
551 };
552 
553 #define MANA_APC_LOCK_INIT(apc)			\
554 	sx_init(&(apc)->apc_lock, "MANA port lock")
555 #define MANA_APC_LOCK_DESTROY(apc)		sx_destroy(&(apc)->apc_lock)
556 #define MANA_APC_LOCK_LOCK(apc)			sx_xlock(&(apc)->apc_lock)
557 #define MANA_APC_LOCK_UNLOCK(apc)		sx_unlock(&(apc)->apc_lock)
558 
559 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
560     bool update_hash, bool update_tab);
561 
562 int mana_alloc_queues(if_t ndev);
563 int mana_attach(if_t ndev);
564 int mana_detach(if_t ndev);
565 
566 int mana_probe(struct gdma_dev *gd);
567 void mana_remove(struct gdma_dev *gd);
568 
569 struct mana_obj_spec {
570 	uint32_t	queue_index;
571 	uint64_t	gdma_region;
572 	uint32_t	queue_size;
573 	uint32_t	attached_eq;
574 	uint32_t	modr_ctx_id;
575 };
576 
577 enum mana_command_code {
578 	MANA_QUERY_DEV_CONFIG	= 0x20001,
579 	MANA_QUERY_GF_STAT	= 0x20002,
580 	MANA_CONFIG_VPORT_TX	= 0x20003,
581 	MANA_CREATE_WQ_OBJ	= 0x20004,
582 	MANA_DESTROY_WQ_OBJ	= 0x20005,
583 	MANA_FENCE_RQ		= 0x20006,
584 	MANA_CONFIG_VPORT_RX	= 0x20007,
585 	MANA_QUERY_VPORT_CONFIG	= 0x20008,
586 };
587 
588 /* Query Device Configuration */
589 struct mana_query_device_cfg_req {
590 	struct gdma_req_hdr	hdr;
591 
592 	/* Driver Capability flags */
593 	uint64_t		drv_cap_flags1;
594 	uint64_t		drv_cap_flags2;
595 	uint64_t		drv_cap_flags3;
596 	uint64_t		drv_cap_flags4;
597 
598 	uint32_t		proto_major_ver;
599 	uint32_t		proto_minor_ver;
600 	uint32_t		proto_micro_ver;
601 
602 	uint32_t		reserved;
603 }; /* HW DATA */
604 
605 struct mana_query_device_cfg_resp {
606 	struct gdma_resp_hdr	hdr;
607 
608 	uint64_t		pf_cap_flags1;
609 	uint64_t		pf_cap_flags2;
610 	uint64_t		pf_cap_flags3;
611 	uint64_t		pf_cap_flags4;
612 
613 	uint16_t		max_num_vports;
614 	uint16_t		reserved;
615 	uint32_t		max_num_eqs;
616 
617 	/* response v2: */
618 	uint16_t adapter_mtu;
619 	uint16_t reserved2;
620 	uint32_t reserved3;
621 }; /* HW DATA */
622 
623 /* Query vPort Configuration */
624 struct mana_query_vport_cfg_req {
625 	struct gdma_req_hdr	hdr;
626 	uint32_t		vport_index;
627 }; /* HW DATA */
628 
629 struct mana_query_vport_cfg_resp {
630 	struct gdma_resp_hdr	hdr;
631 	uint32_t		max_num_sq;
632 	uint32_t		max_num_rq;
633 	uint32_t		num_indirection_ent;
634 	uint32_t		reserved1;
635 	uint8_t			mac_addr[6];
636 	uint8_t			reserved2[2];
637 	mana_handle_t		vport;
638 }; /* HW DATA */
639 
640 /* Configure vPort */
641 struct mana_config_vport_req {
642 	struct gdma_req_hdr	hdr;
643 	mana_handle_t		vport;
644 	uint32_t		pdid;
645 	uint32_t		doorbell_pageid;
646 }; /* HW DATA */
647 
648 struct mana_config_vport_resp {
649 	struct gdma_resp_hdr	hdr;
650 	uint16_t		tx_vport_offset;
651 	uint8_t			short_form_allowed;
652 	uint8_t			reserved;
653 }; /* HW DATA */
654 
655 /* Create WQ Object */
656 struct mana_create_wqobj_req {
657 	struct gdma_req_hdr	hdr;
658 	mana_handle_t		vport;
659 	uint32_t		wq_type;
660 	uint32_t		reserved;
661 	uint64_t		wq_gdma_region;
662 	uint64_t		cq_gdma_region;
663 	uint32_t		wq_size;
664 	uint32_t		cq_size;
665 	uint32_t		cq_moderation_ctx_id;
666 	uint32_t		cq_parent_qid;
667 }; /* HW DATA */
668 
669 struct mana_create_wqobj_resp {
670 	struct gdma_resp_hdr	hdr;
671 	uint32_t		wq_id;
672 	uint32_t		cq_id;
673 	mana_handle_t		wq_obj;
674 }; /* HW DATA */
675 
676 /* Destroy WQ Object */
677 struct mana_destroy_wqobj_req {
678 	struct gdma_req_hdr	hdr;
679 	uint32_t		wq_type;
680 	uint32_t		reserved;
681 	mana_handle_t		wq_obj_handle;
682 }; /* HW DATA */
683 
684 struct mana_destroy_wqobj_resp {
685 	struct gdma_resp_hdr	hdr;
686 }; /* HW DATA */
687 
688 /* Fence RQ */
689 struct mana_fence_rq_req {
690 	struct gdma_req_hdr	hdr;
691 	mana_handle_t		wq_obj_handle;
692 }; /* HW DATA */
693 
694 struct mana_fence_rq_resp {
695 	struct gdma_resp_hdr	hdr;
696 }; /* HW DATA */
697 
698 /* Configure vPort Rx Steering */
699 struct mana_cfg_rx_steer_req {
700 	struct gdma_req_hdr	hdr;
701 	mana_handle_t		vport;
702 	uint16_t		num_indir_entries;
703 	uint16_t		indir_tab_offset;
704 	uint32_t		rx_enable;
705 	uint32_t		rss_enable;
706 	uint8_t			update_default_rxobj;
707 	uint8_t			update_hashkey;
708 	uint8_t			update_indir_tab;
709 	uint8_t			reserved;
710 	mana_handle_t		default_rxobj;
711 	uint8_t			hashkey[MANA_HASH_KEY_SIZE];
712 }; /* HW DATA */
713 
714 struct mana_cfg_rx_steer_resp {
715 	struct gdma_resp_hdr	hdr;
716 }; /* HW DATA */
717 
718 #define MANA_MAX_NUM_QUEUES		16
719 
720 #define MANA_SHORT_VPORT_OFFSET_MAX	((1U << 8) - 1)
721 
722 #define MANA_IDX_NEXT(idx, size)	(((idx) + 1) & ((size) - 1))
723 #define MANA_GET_SPACE(start_idx, end_idx, size)			\
724 	(((end_idx) >= (start_idx)) ?					\
725 	((end_idx) - (start_idx)) : ((size) - (start_idx) + (end_idx)))
726 
727 #define MANA_RX_REFILL_THRESH		256
728 
729 struct mana_tx_package {
730 	struct gdma_wqe_request		wqe_req;
731 	struct gdma_sge			sgl_array[MAX_MBUF_FRAGS];
732 
733 	struct mana_tx_oob		tx_oob;
734 
735 	struct gdma_posted_wqe_info	wqe_info;
736 };
737 
738 int mana_restart(struct mana_port_context *apc);
739 
740 int mana_create_wq_obj(struct mana_port_context *apc,
741     mana_handle_t vport,
742     uint32_t wq_type, struct mana_obj_spec *wq_spec,
743     struct mana_obj_spec *cq_spec,
744     mana_handle_t *wq_obj);
745 
746 void mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
747     mana_handle_t wq_obj);
748 
749 int mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
750     uint32_t doorbell_pg_id);
751 
752 void mana_uncfg_vport(struct mana_port_context *apc);
753 #endif /* _MANA_H */
754