xref: /freebsd/sys/dev/mana/mana.h (revision 9b8701b81f14f0fa0787425eb9761b765d5faab0)
1ce110ea1SWei Hu /*-
2ce110ea1SWei Hu  * SPDX-License-Identifier: BSD-2-Clause
3ce110ea1SWei Hu  *
4ce110ea1SWei Hu  * Copyright (c) 2021 Microsoft Corp.
5ce110ea1SWei Hu  * All rights reserved.
6ce110ea1SWei Hu  *
7ce110ea1SWei Hu  * Redistribution and use in source and binary forms, with or without
8ce110ea1SWei Hu  * modification, are permitted provided that the following conditions
9ce110ea1SWei Hu  * are met:
10ce110ea1SWei Hu  *
11ce110ea1SWei Hu  * 1. Redistributions of source code must retain the above copyright
12ce110ea1SWei Hu  *    notice, this list of conditions and the following disclaimer.
13ce110ea1SWei Hu  *
14ce110ea1SWei Hu  * 2. Redistributions in binary form must reproduce the above copyright
15ce110ea1SWei Hu  *    notice, this list of conditions and the following disclaimer in the
16ce110ea1SWei Hu  *    documentation and/or other materials provided with the distribution.
17ce110ea1SWei Hu  *
18ce110ea1SWei Hu  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19ce110ea1SWei Hu  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20ce110ea1SWei Hu  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21ce110ea1SWei Hu  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22ce110ea1SWei Hu  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23ce110ea1SWei Hu  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24ce110ea1SWei Hu  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25ce110ea1SWei Hu  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26ce110ea1SWei Hu  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27ce110ea1SWei Hu  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28ce110ea1SWei Hu  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29ce110ea1SWei Hu  *
30ce110ea1SWei Hu  */
31ce110ea1SWei Hu 
32ce110ea1SWei Hu #ifndef _MANA_H
33ce110ea1SWei Hu #define _MANA_H
34ce110ea1SWei Hu 
35ce110ea1SWei Hu #include <sys/types.h>
36ce110ea1SWei Hu #include <sys/proc.h>
37ce110ea1SWei Hu #include <sys/socket.h>
38ce110ea1SWei Hu #include <sys/sysctl.h>
39ce110ea1SWei Hu #include <sys/taskqueue.h>
40ce110ea1SWei Hu #include <sys/counter.h>
41ce110ea1SWei Hu 
42ce110ea1SWei Hu #include <net/ethernet.h>
43ce110ea1SWei Hu #include <net/if.h>
44ce110ea1SWei Hu #include <net/if_media.h>
45ce110ea1SWei Hu #include <netinet/tcp_lro.h>
46ce110ea1SWei Hu 
47ce110ea1SWei Hu #include "gdma.h"
48ce110ea1SWei Hu #include "hw_channel.h"
49ce110ea1SWei Hu 
50ce110ea1SWei Hu 
51ce110ea1SWei Hu /* Microsoft Azure Network Adapter (MANA)'s definitions
52ce110ea1SWei Hu  *
53ce110ea1SWei Hu  * Structures labeled with "HW DATA" are exchanged with the hardware. All of
54ce110ea1SWei Hu  * them are naturally aligned and hence don't need __packed.
55ce110ea1SWei Hu  */
56ce110ea1SWei Hu /* MANA protocol version */
57ce110ea1SWei Hu #define MANA_MAJOR_VERSION	0
58ce110ea1SWei Hu #define MANA_MINOR_VERSION	1
59ce110ea1SWei Hu #define MANA_MICRO_VERSION	1
60ce110ea1SWei Hu 
61ce110ea1SWei Hu #define DRV_MODULE_NAME		"mana"
62ce110ea1SWei Hu 
63ce110ea1SWei Hu #ifndef DRV_MODULE_VERSION
64ce110ea1SWei Hu #define DRV_MODULE_VERSION				\
65ce110ea1SWei Hu 	__XSTRING(MANA_MAJOR_VERSION) "."		\
66ce110ea1SWei Hu 	__XSTRING(MANA_MINOR_VERSION) "."		\
67ce110ea1SWei Hu 	__XSTRING(MANA_MICRO_VERSION)
68ce110ea1SWei Hu #endif
69ce110ea1SWei Hu #define DEVICE_NAME	"Microsoft Azure Network Adapter (MANA)"
70ce110ea1SWei Hu #define DEVICE_DESC	"MANA adapter"
71ce110ea1SWei Hu 
72ce110ea1SWei Hu /*
73ce110ea1SWei Hu  * Supported PCI vendor and devices IDs
74ce110ea1SWei Hu  */
75ce110ea1SWei Hu #ifndef PCI_VENDOR_ID_MICROSOFT
76ce110ea1SWei Hu #define PCI_VENDOR_ID_MICROSOFT	0x1414
77ce110ea1SWei Hu #endif
78ce110ea1SWei Hu 
79ce110ea1SWei Hu #define PCI_DEV_ID_MANA_VF	0x00ba
80ce110ea1SWei Hu 
81ce110ea1SWei Hu typedef struct _mana_vendor_id_t {
82ce110ea1SWei Hu 	uint16_t vendor_id;
83ce110ea1SWei Hu 	uint16_t device_id;
84ce110ea1SWei Hu } mana_vendor_id_t;
85ce110ea1SWei Hu 
86ce110ea1SWei Hu typedef uint64_t mana_handle_t;
87ce110ea1SWei Hu #define INVALID_MANA_HANDLE	((mana_handle_t)-1)
88ce110ea1SWei Hu 
89ce110ea1SWei Hu enum TRI_STATE {
90ce110ea1SWei Hu 	TRI_STATE_UNKNOWN = -1,
91ce110ea1SWei Hu 	TRI_STATE_FALSE = 0,
92ce110ea1SWei Hu 	TRI_STATE_TRUE = 1
93ce110ea1SWei Hu };
94ce110ea1SWei Hu 
95ce110ea1SWei Hu /* Number of entries for hardware indirection table must be in power of 2 */
96ce110ea1SWei Hu #define MANA_INDIRECT_TABLE_SIZE	64
97ce110ea1SWei Hu #define MANA_INDIRECT_TABLE_MASK	(MANA_INDIRECT_TABLE_SIZE - 1)
98ce110ea1SWei Hu 
99ce110ea1SWei Hu /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
100ce110ea1SWei Hu #define MANA_HASH_KEY_SIZE		40
101ce110ea1SWei Hu 
102ce110ea1SWei Hu #define COMP_ENTRY_SIZE			64
103ce110ea1SWei Hu 
104ce110ea1SWei Hu #define MIN_FRAME_SIZE			146
105ce110ea1SWei Hu #define ADAPTER_MTU_SIZE		1500
106ce110ea1SWei Hu #define DEFAULT_FRAME_SIZE		(ADAPTER_MTU_SIZE + 14)
107ce110ea1SWei Hu #define MAX_FRAME_SIZE			4096
108ce110ea1SWei Hu 
109a18e9994SWei Hu /* Unit number of RX buffers. Must be power of two
110a18e9994SWei Hu  * Higher number could fail at allocation.
111a18e9994SWei Hu  */
112a18e9994SWei Hu #define MAX_RX_BUFFERS_PER_QUEUE	8192
113a18e9994SWei Hu #define DEF_RX_BUFFERS_PER_QUEUE	1024
114a18e9994SWei Hu #define MIN_RX_BUFFERS_PER_QUEUE	128
115ce110ea1SWei Hu 
116a18e9994SWei Hu /* Unit number of TX buffers. Must be power of two
117a18e9994SWei Hu  * Higher number could fail at allocation.
118a18e9994SWei Hu  * The max value is derived as the maximum
119a18e9994SWei Hu  * allocatable pages supported on host per guest
120a18e9994SWei Hu  * through testing. TX buffer size beyond this
121a18e9994SWei Hu  * value is rejected by the hardware.
122a18e9994SWei Hu  */
123a18e9994SWei Hu #define MAX_SEND_BUFFERS_PER_QUEUE	16384
124a18e9994SWei Hu #define DEF_SEND_BUFFERS_PER_QUEUE	1024
125a18e9994SWei Hu #define MIN_SEND_BUFFERS_PER_QUEUE	128
126ce110ea1SWei Hu 
127ce110ea1SWei Hu #define EQ_SIZE				(8 * PAGE_SIZE)
128ce110ea1SWei Hu #define LOG2_EQ_THROTTLE		3
129ce110ea1SWei Hu 
1301833cf13SWei Hu #define MAX_PORTS_IN_MANA_DEV		8
131ce110ea1SWei Hu 
132ce110ea1SWei Hu struct mana_send_buf_info {
133ce110ea1SWei Hu 	struct mbuf			*mbuf;
134ce110ea1SWei Hu 	bus_dmamap_t			dma_map;
135ce110ea1SWei Hu 
136ce110ea1SWei Hu 	/* Required to store the result of mana_gd_post_work_request.
137ce110ea1SWei Hu 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
138ce110ea1SWei Hu 	 * work queue when the WQE is consumed.
139ce110ea1SWei Hu 	 */
140ce110ea1SWei Hu 	struct gdma_posted_wqe_info	wqe_inf;
141ce110ea1SWei Hu };
142ce110ea1SWei Hu 
143ce110ea1SWei Hu struct mana_stats {
144ce110ea1SWei Hu 	counter_u64_t			packets;		/* rx, tx */
145ce110ea1SWei Hu 	counter_u64_t			bytes;			/* rx, tx */
146ce110ea1SWei Hu 	counter_u64_t			stop;			/* tx */
147ce110ea1SWei Hu 	counter_u64_t			wakeup;			/* tx */
148ce110ea1SWei Hu 	counter_u64_t			collapse;		/* tx */
149ce110ea1SWei Hu 	counter_u64_t			collapse_err;		/* tx */
150ce110ea1SWei Hu 	counter_u64_t			dma_mapping_err;	/* rx, tx */
151ce110ea1SWei Hu 	counter_u64_t			mbuf_alloc_fail;	/* rx */
152*9b8701b8SWei Hu 	counter_u64_t			partial_refill;		/* rx */
153ce110ea1SWei Hu 	counter_u64_t			alt_chg;		/* tx */
154ce110ea1SWei Hu 	counter_u64_t			alt_reset;		/* tx */
155516b5059SWei Hu 	counter_u64_t			cqe_err;		/* tx */
156516b5059SWei Hu 	counter_u64_t			cqe_unknown_type;	/* tx */
157ce110ea1SWei Hu };
158ce110ea1SWei Hu 
159ce110ea1SWei Hu struct mana_txq {
160ce110ea1SWei Hu 	struct gdma_queue	*gdma_sq;
161ce110ea1SWei Hu 
162ce110ea1SWei Hu 	union {
163ce110ea1SWei Hu 		uint32_t	gdma_txq_id;
164ce110ea1SWei Hu 		struct {
165ce110ea1SWei Hu 			uint32_t	reserved1	:10;
166ce110ea1SWei Hu 			uint32_t	vsq_frame	:14;
167ce110ea1SWei Hu 			uint32_t	reserved2	:8;
168ce110ea1SWei Hu 		};
169ce110ea1SWei Hu 	};
170ce110ea1SWei Hu 
171ce110ea1SWei Hu 	uint16_t		vp_offset;
172ce110ea1SWei Hu 
17337d22ce0SJustin Hibbits 	if_t			ndev;
174ce110ea1SWei Hu 	/* Store index to the array of tx_qp in port structure */
175ce110ea1SWei Hu 	int			idx;
176ce110ea1SWei Hu 	/* The alternative txq idx when this txq is under heavy load */
177ce110ea1SWei Hu 	int			alt_txq_idx;
178ce110ea1SWei Hu 
179ce110ea1SWei Hu 	/* The mbufs are sent to the HW and we are waiting for the CQEs. */
180ce110ea1SWei Hu 	struct mana_send_buf_info	*tx_buf_info;
181ce110ea1SWei Hu 	uint16_t		next_to_use;
182ce110ea1SWei Hu 	uint16_t		next_to_complete;
183ce110ea1SWei Hu 
184ce110ea1SWei Hu 	atomic_t		pending_sends;
185ce110ea1SWei Hu 
186ce110ea1SWei Hu 	struct buf_ring		*txq_br;
187ce110ea1SWei Hu 	struct mtx		txq_mtx;
188ce110ea1SWei Hu 	char			txq_mtx_name[16];
189ce110ea1SWei Hu 
190b167e449SWei Hu 	uint64_t		tso_pkts;
191b167e449SWei Hu 	uint64_t		tso_bytes;
192b167e449SWei Hu 
193ce110ea1SWei Hu 	struct task		enqueue_task;
194ce110ea1SWei Hu 	struct taskqueue	*enqueue_tq;
195ce110ea1SWei Hu 
196ce110ea1SWei Hu 	struct mana_stats	stats;
197ce110ea1SWei Hu };
198ce110ea1SWei Hu 
199ce110ea1SWei Hu 
200ce110ea1SWei Hu /*
201ce110ea1SWei Hu  * Max WQE size is 512B. The first 8B is for GDMA Out of Band (OOB),
202ce110ea1SWei Hu  * next is the Client OOB can be either 8B or 24B. Thus, the max
203ce110ea1SWei Hu  * space for SGL entries in a singel WQE is 512 - 8 - 8 = 496B. Since each
204ce110ea1SWei Hu  * SGL is 16B in size, the max number of SGLs in a WQE is 496/16 = 31.
205ce110ea1SWei Hu  * Save one for emergency use, set the MAX_MBUF_FRAGS allowed to 30.
206ce110ea1SWei Hu  */
207ce110ea1SWei Hu #define	MAX_MBUF_FRAGS		30
208ce110ea1SWei Hu #define MANA_TSO_MAXSEG_SZ	PAGE_SIZE
209643fd7b4SWei Hu #define MANA_TSO_MAX_SZ		IP_MAXPACKET
210ce110ea1SWei Hu 
211ce110ea1SWei Hu /* mbuf data and frags dma mappings */
212ce110ea1SWei Hu struct mana_mbuf_head {
213ce110ea1SWei Hu 	bus_addr_t dma_handle[MAX_MBUF_FRAGS + 1];
214ce110ea1SWei Hu 
215ce110ea1SWei Hu 	uint32_t size[MAX_MBUF_FRAGS + 1];
216ce110ea1SWei Hu };
217ce110ea1SWei Hu 
218ce110ea1SWei Hu #define MANA_HEADROOM		sizeof(struct mana_mbuf_head)
219ce110ea1SWei Hu 
220ce110ea1SWei Hu enum mana_tx_pkt_format {
221ce110ea1SWei Hu 	MANA_SHORT_PKT_FMT	= 0,
222ce110ea1SWei Hu 	MANA_LONG_PKT_FMT	= 1,
223ce110ea1SWei Hu };
224ce110ea1SWei Hu 
225ce110ea1SWei Hu struct mana_tx_short_oob {
226ce110ea1SWei Hu 	uint32_t pkt_fmt		:2;
227ce110ea1SWei Hu 	uint32_t is_outer_ipv4		:1;
228ce110ea1SWei Hu 	uint32_t is_outer_ipv6		:1;
229ce110ea1SWei Hu 	uint32_t comp_iphdr_csum	:1;
230ce110ea1SWei Hu 	uint32_t comp_tcp_csum		:1;
231ce110ea1SWei Hu 	uint32_t comp_udp_csum		:1;
232ce110ea1SWei Hu 	uint32_t supress_txcqe_gen	:1;
233ce110ea1SWei Hu 	uint32_t vcq_num		:24;
234ce110ea1SWei Hu 
235ce110ea1SWei Hu 	uint32_t trans_off		:10; /* Transport header offset */
236ce110ea1SWei Hu 	uint32_t vsq_frame		:14;
237ce110ea1SWei Hu 	uint32_t short_vp_offset	:8;
238ce110ea1SWei Hu }; /* HW DATA */
239ce110ea1SWei Hu 
240ce110ea1SWei Hu struct mana_tx_long_oob {
241ce110ea1SWei Hu 	uint32_t is_encap		:1;
242ce110ea1SWei Hu 	uint32_t inner_is_ipv6		:1;
243ce110ea1SWei Hu 	uint32_t inner_tcp_opt		:1;
244ce110ea1SWei Hu 	uint32_t inject_vlan_pri_tag	:1;
245ce110ea1SWei Hu 	uint32_t reserved1		:12;
246ce110ea1SWei Hu 	uint32_t pcp			:3;  /* 802.1Q */
247ce110ea1SWei Hu 	uint32_t dei			:1;  /* 802.1Q */
248ce110ea1SWei Hu 	uint32_t vlan_id		:12; /* 802.1Q */
249ce110ea1SWei Hu 
250ce110ea1SWei Hu 	uint32_t inner_frame_offset	:10;
251ce110ea1SWei Hu 	uint32_t inner_ip_rel_offset	:6;
252ce110ea1SWei Hu 	uint32_t long_vp_offset		:12;
253ce110ea1SWei Hu 	uint32_t reserved2		:4;
254ce110ea1SWei Hu 
255ce110ea1SWei Hu 	uint32_t reserved3;
256ce110ea1SWei Hu 	uint32_t reserved4;
257ce110ea1SWei Hu }; /* HW DATA */
258ce110ea1SWei Hu 
259ce110ea1SWei Hu struct mana_tx_oob {
260ce110ea1SWei Hu 	struct mana_tx_short_oob	s_oob;
261ce110ea1SWei Hu 	struct mana_tx_long_oob		l_oob;
262ce110ea1SWei Hu }; /* HW DATA */
263ce110ea1SWei Hu 
264ce110ea1SWei Hu enum mana_cq_type {
265ce110ea1SWei Hu 	MANA_CQ_TYPE_RX,
266ce110ea1SWei Hu 	MANA_CQ_TYPE_TX,
267ce110ea1SWei Hu };
268ce110ea1SWei Hu 
269ce110ea1SWei Hu enum mana_cqe_type {
270ce110ea1SWei Hu 	CQE_INVALID			= 0,
271ce110ea1SWei Hu 	CQE_RX_OKAY			= 1,
272ce110ea1SWei Hu 	CQE_RX_COALESCED_4		= 2,
273ce110ea1SWei Hu 	CQE_RX_OBJECT_FENCE		= 3,
274ce110ea1SWei Hu 	CQE_RX_TRUNCATED		= 4,
275ce110ea1SWei Hu 
276ce110ea1SWei Hu 	CQE_TX_OKAY			= 32,
277ce110ea1SWei Hu 	CQE_TX_SA_DROP			= 33,
278ce110ea1SWei Hu 	CQE_TX_MTU_DROP			= 34,
279ce110ea1SWei Hu 	CQE_TX_INVALID_OOB		= 35,
280ce110ea1SWei Hu 	CQE_TX_INVALID_ETH_TYPE		= 36,
281ce110ea1SWei Hu 	CQE_TX_HDR_PROCESSING_ERROR	= 37,
282ce110ea1SWei Hu 	CQE_TX_VF_DISABLED		= 38,
283ce110ea1SWei Hu 	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
284ce110ea1SWei Hu 	CQE_TX_VPORT_DISABLED		= 40,
285ce110ea1SWei Hu 	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
286ce110ea1SWei Hu };
287ce110ea1SWei Hu 
288ce110ea1SWei Hu #define MANA_CQE_COMPLETION	1
289ce110ea1SWei Hu 
290ce110ea1SWei Hu struct mana_cqe_header {
291ce110ea1SWei Hu 	uint32_t cqe_type	:6;
292ce110ea1SWei Hu 	uint32_t client_type	:2;
293ce110ea1SWei Hu 	uint32_t vendor_err	:24;
294ce110ea1SWei Hu }; /* HW DATA */
295ce110ea1SWei Hu 
296ce110ea1SWei Hu /* NDIS HASH Types */
297ce110ea1SWei Hu #define NDIS_HASH_IPV4		BIT(0)
298ce110ea1SWei Hu #define NDIS_HASH_TCP_IPV4	BIT(1)
299ce110ea1SWei Hu #define NDIS_HASH_UDP_IPV4	BIT(2)
300ce110ea1SWei Hu #define NDIS_HASH_IPV6		BIT(3)
301ce110ea1SWei Hu #define NDIS_HASH_TCP_IPV6	BIT(4)
302ce110ea1SWei Hu #define NDIS_HASH_UDP_IPV6	BIT(5)
303ce110ea1SWei Hu #define NDIS_HASH_IPV6_EX	BIT(6)
304ce110ea1SWei Hu #define NDIS_HASH_TCP_IPV6_EX	BIT(7)
305ce110ea1SWei Hu #define NDIS_HASH_UDP_IPV6_EX	BIT(8)
306ce110ea1SWei Hu 
307ce110ea1SWei Hu #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
308ce110ea1SWei Hu #define MANA_HASH_L4                                                         \
309ce110ea1SWei Hu 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
310ce110ea1SWei Hu 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
311ce110ea1SWei Hu 
312ce110ea1SWei Hu #define NDIS_HASH_IPV4_L3_MASK	(NDIS_HASH_IPV4)
313ce110ea1SWei Hu #define NDIS_HASH_IPV4_L4_MASK	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4)
314ce110ea1SWei Hu #define NDIS_HASH_IPV6_L3_MASK	(NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
315ce110ea1SWei Hu #define NDIS_HASH_IPV6_L4_MASK						\
316ce110ea1SWei Hu     (NDIS_HASH_TCP_IPV6 | NDIS_HASH_UDP_IPV6 |				\
317ce110ea1SWei Hu     NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
318ce110ea1SWei Hu #define NDIS_HASH_IPV4_MASK						\
319ce110ea1SWei Hu     (NDIS_HASH_IPV4_L3_MASK | NDIS_HASH_IPV4_L4_MASK)
320ce110ea1SWei Hu #define NDIS_HASH_IPV6_MASK						\
321ce110ea1SWei Hu     (NDIS_HASH_IPV6_L3_MASK | NDIS_HASH_IPV6_L4_MASK)
322ce110ea1SWei Hu 
323ce110ea1SWei Hu 
324ce110ea1SWei Hu struct mana_rxcomp_perpkt_info {
325ce110ea1SWei Hu 	uint32_t pkt_len	:16;
326ce110ea1SWei Hu 	uint32_t reserved1	:16;
327ce110ea1SWei Hu 	uint32_t reserved2;
328ce110ea1SWei Hu 	uint32_t pkt_hash;
329ce110ea1SWei Hu }; /* HW DATA */
330ce110ea1SWei Hu 
331ce110ea1SWei Hu #define MANA_RXCOMP_OOB_NUM_PPI 4
332ce110ea1SWei Hu 
333ce110ea1SWei Hu /* Receive completion OOB */
334ce110ea1SWei Hu struct mana_rxcomp_oob {
335ce110ea1SWei Hu 	struct mana_cqe_header cqe_hdr;
336ce110ea1SWei Hu 
337ce110ea1SWei Hu 	uint32_t rx_vlan_id			:12;
338ce110ea1SWei Hu 	uint32_t rx_vlantag_present		:1;
339ce110ea1SWei Hu 	uint32_t rx_outer_iphdr_csum_succeed	:1;
340ce110ea1SWei Hu 	uint32_t rx_outer_iphdr_csum_fail	:1;
341ce110ea1SWei Hu 	uint32_t reserved1			:1;
342ce110ea1SWei Hu 	uint32_t rx_hashtype			:9;
343ce110ea1SWei Hu 	uint32_t rx_iphdr_csum_succeed		:1;
344ce110ea1SWei Hu 	uint32_t rx_iphdr_csum_fail		:1;
345ce110ea1SWei Hu 	uint32_t rx_tcp_csum_succeed		:1;
346ce110ea1SWei Hu 	uint32_t rx_tcp_csum_fail		:1;
347ce110ea1SWei Hu 	uint32_t rx_udp_csum_succeed		:1;
348ce110ea1SWei Hu 	uint32_t rx_udp_csum_fail		:1;
349ce110ea1SWei Hu 	uint32_t reserved2			:1;
350ce110ea1SWei Hu 
351ce110ea1SWei Hu 	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
352ce110ea1SWei Hu 
353ce110ea1SWei Hu 	uint32_t rx_wqe_offset;
354ce110ea1SWei Hu }; /* HW DATA */
355ce110ea1SWei Hu 
356ce110ea1SWei Hu struct mana_tx_comp_oob {
357ce110ea1SWei Hu 	struct mana_cqe_header	cqe_hdr;
358ce110ea1SWei Hu 
359ce110ea1SWei Hu 	uint32_t tx_data_offset;
360ce110ea1SWei Hu 
361ce110ea1SWei Hu 	uint32_t tx_sgl_offset		:5;
362ce110ea1SWei Hu 	uint32_t tx_wqe_offset		:27;
363ce110ea1SWei Hu 
364ce110ea1SWei Hu 	uint32_t reserved[12];
365ce110ea1SWei Hu }; /* HW DATA */
366ce110ea1SWei Hu 
367ce110ea1SWei Hu struct mana_rxq;
368ce110ea1SWei Hu 
3691833cf13SWei Hu #define CQE_POLLING_BUFFER	512
3701833cf13SWei Hu 
371ce110ea1SWei Hu struct mana_cq {
372ce110ea1SWei Hu 	struct gdma_queue	*gdma_cq;
373ce110ea1SWei Hu 
374ce110ea1SWei Hu 	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
375ce110ea1SWei Hu 	uint32_t		gdma_id;
376ce110ea1SWei Hu 
377ce110ea1SWei Hu 	/* Type of the CQ: TX or RX */
378ce110ea1SWei Hu 	enum mana_cq_type	type;
379ce110ea1SWei Hu 
380ce110ea1SWei Hu 	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
381ce110ea1SWei Hu 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
382ce110ea1SWei Hu 	 */
383ce110ea1SWei Hu 	struct mana_rxq		*rxq;
384ce110ea1SWei Hu 
385ce110ea1SWei Hu 	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
386ce110ea1SWei Hu 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
387ce110ea1SWei Hu 	 */
388ce110ea1SWei Hu 	struct mana_txq		*txq;
389ce110ea1SWei Hu 
3901833cf13SWei Hu 	/* Taskqueue and related structs */
3911833cf13SWei Hu 	struct task		cleanup_task;
3921833cf13SWei Hu 	struct taskqueue	*cleanup_tq;
3931833cf13SWei Hu 	int			cpu;
3941833cf13SWei Hu 	bool			do_not_ring_db;
3951833cf13SWei Hu 
3961833cf13SWei Hu 	/* Budget for one cleanup task */
3971833cf13SWei Hu 	int			work_done;
3981833cf13SWei Hu 	int			budget;
3991833cf13SWei Hu 
4001833cf13SWei Hu 	/* Buffer which the CQ handler can copy the CQE's into. */
4011833cf13SWei Hu 	struct gdma_comp	gdma_comp_buf[CQE_POLLING_BUFFER];
402ce110ea1SWei Hu };
403ce110ea1SWei Hu 
404ce110ea1SWei Hu struct mana_recv_buf_oob {
405ce110ea1SWei Hu 	/* A valid GDMA work request representing the data buffer. */
406ce110ea1SWei Hu 	struct gdma_wqe_request		wqe_req;
407ce110ea1SWei Hu 
408ce110ea1SWei Hu 	struct mbuf			*mbuf;
409ce110ea1SWei Hu 	bus_dmamap_t			dma_map;
410ce110ea1SWei Hu 
411ce110ea1SWei Hu 	/* SGL of the buffer going to be sent as part of the work request. */
412ce110ea1SWei Hu 	uint32_t			num_sge;
413b685df31SWei Hu 	struct gdma_sge			sgl[MAX_RX_WQE_SGL_ENTRIES];
414ce110ea1SWei Hu 
415ce110ea1SWei Hu 	/* Required to store the result of mana_gd_post_work_request.
416ce110ea1SWei Hu 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
417ce110ea1SWei Hu 	 * work queue when the WQE is consumed.
418ce110ea1SWei Hu 	 */
419ce110ea1SWei Hu 	struct gdma_posted_wqe_info	wqe_inf;
420ce110ea1SWei Hu };
421ce110ea1SWei Hu 
422ce110ea1SWei Hu struct mana_rxq {
423ce110ea1SWei Hu 	struct gdma_queue		*gdma_rq;
424ce110ea1SWei Hu 	/* Cache the gdma receive queue id */
425ce110ea1SWei Hu 	uint32_t			gdma_id;
426ce110ea1SWei Hu 
427ce110ea1SWei Hu 	/* Index of RQ in the vPort, not gdma receive queue id */
428ce110ea1SWei Hu 	uint32_t			rxq_idx;
429ce110ea1SWei Hu 
430ce110ea1SWei Hu 	uint32_t			datasize;
431ce110ea1SWei Hu 
432ce110ea1SWei Hu 	mana_handle_t			rxobj;
433ce110ea1SWei Hu 
434aa108bc7SWei Hu 	struct completion		fence_event;
435aa108bc7SWei Hu 
436ce110ea1SWei Hu 	struct mana_cq			rx_cq;
437ce110ea1SWei Hu 
43837d22ce0SJustin Hibbits 	if_t				ndev;
439ce110ea1SWei Hu 	struct lro_ctrl			lro;
440ce110ea1SWei Hu 
441ce110ea1SWei Hu 	/* Total number of receive buffers to be allocated */
442ce110ea1SWei Hu 	uint32_t			num_rx_buf;
443ce110ea1SWei Hu 
444ce110ea1SWei Hu 	uint32_t			buf_index;
445*9b8701b8SWei Hu 	uint32_t			next_to_refill;
446*9b8701b8SWei Hu 	uint32_t			refill_thresh;
447ce110ea1SWei Hu 
448b167e449SWei Hu 	uint64_t			lro_tried;
449b167e449SWei Hu 	uint64_t			lro_failed;
450ce110ea1SWei Hu 	struct mana_stats		stats;
451ce110ea1SWei Hu 
452ce110ea1SWei Hu 	/* MUST BE THE LAST MEMBER:
453ce110ea1SWei Hu 	 * Each receive buffer has an associated mana_recv_buf_oob.
454ce110ea1SWei Hu 	 */
455ce110ea1SWei Hu 	struct mana_recv_buf_oob	rx_oobs[];
456ce110ea1SWei Hu };
457ce110ea1SWei Hu 
458ce110ea1SWei Hu struct mana_tx_qp {
459ce110ea1SWei Hu 	struct mana_txq			txq;
460ce110ea1SWei Hu 
461ce110ea1SWei Hu 	struct mana_cq			tx_cq;
462ce110ea1SWei Hu 
463ce110ea1SWei Hu 	mana_handle_t			tx_object;
464ce110ea1SWei Hu };
465ce110ea1SWei Hu 
466ce110ea1SWei Hu struct mana_port_stats {
467ce110ea1SWei Hu 	counter_u64_t		rx_packets;
468ce110ea1SWei Hu 	counter_u64_t		tx_packets;
469ce110ea1SWei Hu 
470ce110ea1SWei Hu 	counter_u64_t		rx_bytes;
471ce110ea1SWei Hu 	counter_u64_t		tx_bytes;
472ce110ea1SWei Hu 
473ce110ea1SWei Hu 	counter_u64_t		rx_drops;
474ce110ea1SWei Hu 	counter_u64_t		tx_drops;
475ce110ea1SWei Hu 
476ce110ea1SWei Hu 	counter_u64_t		stop_queue;
477ce110ea1SWei Hu 	counter_u64_t		wake_queue;
478ce110ea1SWei Hu };
479ce110ea1SWei Hu 
480ce110ea1SWei Hu struct mana_context {
481ce110ea1SWei Hu 	struct gdma_dev		*gdma_dev;
482ce110ea1SWei Hu 
483ce110ea1SWei Hu 	uint16_t		num_ports;
484ce110ea1SWei Hu 
4851833cf13SWei Hu 	struct mana_eq		*eqs;
4861833cf13SWei Hu 
48737d22ce0SJustin Hibbits 	if_t			ports[MAX_PORTS_IN_MANA_DEV];
488ce110ea1SWei Hu };
489ce110ea1SWei Hu 
490ce110ea1SWei Hu struct mana_port_context {
491ce110ea1SWei Hu 	struct mana_context	*ac;
49237d22ce0SJustin Hibbits 	if_t			ndev;
493ce110ea1SWei Hu 	struct ifmedia		media;
494ce110ea1SWei Hu 
495ce110ea1SWei Hu 	struct sx		apc_lock;
496ce110ea1SWei Hu 
497ce110ea1SWei Hu 	/* DMA tag used for queue bufs of the entire port */
498ce110ea1SWei Hu 	bus_dma_tag_t		rx_buf_tag;
499ce110ea1SWei Hu 	bus_dma_tag_t		tx_buf_tag;
500ce110ea1SWei Hu 
501ce110ea1SWei Hu 	uint8_t			mac_addr[ETHER_ADDR_LEN];
502ce110ea1SWei Hu 
503ce110ea1SWei Hu 	enum TRI_STATE		rss_state;
504ce110ea1SWei Hu 
505ce110ea1SWei Hu 	mana_handle_t		default_rxobj;
506ce110ea1SWei Hu 	bool			tx_shortform_allowed;
507ce110ea1SWei Hu 	uint16_t		tx_vp_offset;
508ce110ea1SWei Hu 
509ce110ea1SWei Hu 	struct mana_tx_qp	*tx_qp;
510ce110ea1SWei Hu 
511ce110ea1SWei Hu 	/* Indirection Table for RX & TX. The values are queue indexes */
512ce110ea1SWei Hu 	uint32_t		indir_table[MANA_INDIRECT_TABLE_SIZE];
513ce110ea1SWei Hu 
514ce110ea1SWei Hu 	/* Indirection table containing RxObject Handles */
515ce110ea1SWei Hu 	mana_handle_t		rxobj_table[MANA_INDIRECT_TABLE_SIZE];
516ce110ea1SWei Hu 
517ce110ea1SWei Hu 	/*  Hash key used by the NIC */
518ce110ea1SWei Hu 	uint8_t			hashkey[MANA_HASH_KEY_SIZE];
519ce110ea1SWei Hu 
520ce110ea1SWei Hu 	/* This points to an array of num_queues of RQ pointers. */
521ce110ea1SWei Hu 	struct mana_rxq		**rxqs;
522ce110ea1SWei Hu 
523ce110ea1SWei Hu 	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
524ce110ea1SWei Hu 	unsigned int		max_queues;
525ce110ea1SWei Hu 	unsigned int		num_queues;
526ce110ea1SWei Hu 
527a18e9994SWei Hu 	unsigned int		tx_queue_size;
528a18e9994SWei Hu 	unsigned int		rx_queue_size;
529a18e9994SWei Hu 
530ce110ea1SWei Hu 	mana_handle_t		port_handle;
531ce110ea1SWei Hu 
532b685df31SWei Hu 	int			vport_use_count;
533b685df31SWei Hu 
534ce110ea1SWei Hu 	uint16_t		port_idx;
535ce110ea1SWei Hu 
536ce110ea1SWei Hu 	uint16_t		frame_size;
537ce110ea1SWei Hu 
538ce110ea1SWei Hu 	bool			port_is_up;
539ce110ea1SWei Hu 	bool			port_st_save; /* Saved port state */
540ce110ea1SWei Hu 
541ce110ea1SWei Hu 	bool			enable_tx_altq;
5421833cf13SWei Hu 
543ce110ea1SWei Hu 	bool			bind_cleanup_thread_cpu;
5441833cf13SWei Hu 	int			last_tx_cq_bind_cpu;
5451833cf13SWei Hu 	int			last_rx_cq_bind_cpu;
546ce110ea1SWei Hu 
547ce110ea1SWei Hu 	struct mana_port_stats	port_stats;
548ce110ea1SWei Hu 
549ce110ea1SWei Hu 	struct sysctl_oid_list	*port_list;
550ce110ea1SWei Hu 	struct sysctl_ctx_list	que_sysctl_ctx;
551ce110ea1SWei Hu };
552ce110ea1SWei Hu 
553ce110ea1SWei Hu #define MANA_APC_LOCK_INIT(apc)			\
554ce110ea1SWei Hu 	sx_init(&(apc)->apc_lock, "MANA port lock")
555ce110ea1SWei Hu #define MANA_APC_LOCK_DESTROY(apc)		sx_destroy(&(apc)->apc_lock)
556ce110ea1SWei Hu #define MANA_APC_LOCK_LOCK(apc)			sx_xlock(&(apc)->apc_lock)
557ce110ea1SWei Hu #define MANA_APC_LOCK_UNLOCK(apc)		sx_unlock(&(apc)->apc_lock)
558ce110ea1SWei Hu 
559ce110ea1SWei Hu int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
560ce110ea1SWei Hu     bool update_hash, bool update_tab);
561ce110ea1SWei Hu 
56237d22ce0SJustin Hibbits int mana_alloc_queues(if_t ndev);
56337d22ce0SJustin Hibbits int mana_attach(if_t ndev);
56437d22ce0SJustin Hibbits int mana_detach(if_t ndev);
565ce110ea1SWei Hu 
566ce110ea1SWei Hu int mana_probe(struct gdma_dev *gd);
567ce110ea1SWei Hu void mana_remove(struct gdma_dev *gd);
568ce110ea1SWei Hu 
569ce110ea1SWei Hu struct mana_obj_spec {
570ce110ea1SWei Hu 	uint32_t	queue_index;
571ce110ea1SWei Hu 	uint64_t	gdma_region;
572ce110ea1SWei Hu 	uint32_t	queue_size;
573ce110ea1SWei Hu 	uint32_t	attached_eq;
574ce110ea1SWei Hu 	uint32_t	modr_ctx_id;
575ce110ea1SWei Hu };
576ce110ea1SWei Hu 
577ce110ea1SWei Hu enum mana_command_code {
578ce110ea1SWei Hu 	MANA_QUERY_DEV_CONFIG	= 0x20001,
579ce110ea1SWei Hu 	MANA_QUERY_GF_STAT	= 0x20002,
580ce110ea1SWei Hu 	MANA_CONFIG_VPORT_TX	= 0x20003,
581ce110ea1SWei Hu 	MANA_CREATE_WQ_OBJ	= 0x20004,
582ce110ea1SWei Hu 	MANA_DESTROY_WQ_OBJ	= 0x20005,
583ce110ea1SWei Hu 	MANA_FENCE_RQ		= 0x20006,
584ce110ea1SWei Hu 	MANA_CONFIG_VPORT_RX	= 0x20007,
585ce110ea1SWei Hu 	MANA_QUERY_VPORT_CONFIG	= 0x20008,
586ce110ea1SWei Hu };
587ce110ea1SWei Hu 
588ce110ea1SWei Hu /* Query Device Configuration */
589ce110ea1SWei Hu struct mana_query_device_cfg_req {
590ce110ea1SWei Hu 	struct gdma_req_hdr	hdr;
591ce110ea1SWei Hu 
592ce110ea1SWei Hu 	/* Driver Capability flags */
593ce110ea1SWei Hu 	uint64_t		drv_cap_flags1;
594ce110ea1SWei Hu 	uint64_t		drv_cap_flags2;
595ce110ea1SWei Hu 	uint64_t		drv_cap_flags3;
596ce110ea1SWei Hu 	uint64_t		drv_cap_flags4;
597ce110ea1SWei Hu 
598ce110ea1SWei Hu 	uint32_t		proto_major_ver;
599ce110ea1SWei Hu 	uint32_t		proto_minor_ver;
600ce110ea1SWei Hu 	uint32_t		proto_micro_ver;
601ce110ea1SWei Hu 
602ce110ea1SWei Hu 	uint32_t		reserved;
603ce110ea1SWei Hu }; /* HW DATA */
604ce110ea1SWei Hu 
605ce110ea1SWei Hu struct mana_query_device_cfg_resp {
606ce110ea1SWei Hu 	struct gdma_resp_hdr	hdr;
607ce110ea1SWei Hu 
608ce110ea1SWei Hu 	uint64_t		pf_cap_flags1;
609ce110ea1SWei Hu 	uint64_t		pf_cap_flags2;
610ce110ea1SWei Hu 	uint64_t		pf_cap_flags3;
611ce110ea1SWei Hu 	uint64_t		pf_cap_flags4;
612ce110ea1SWei Hu 
613ce110ea1SWei Hu 	uint16_t		max_num_vports;
614ce110ea1SWei Hu 	uint16_t		reserved;
615ce110ea1SWei Hu 	uint32_t		max_num_eqs;
616ce110ea1SWei Hu }; /* HW DATA */
617ce110ea1SWei Hu 
618ce110ea1SWei Hu /* Query vPort Configuration */
619ce110ea1SWei Hu struct mana_query_vport_cfg_req {
620ce110ea1SWei Hu 	struct gdma_req_hdr	hdr;
621ce110ea1SWei Hu 	uint32_t		vport_index;
622ce110ea1SWei Hu }; /* HW DATA */
623ce110ea1SWei Hu 
624ce110ea1SWei Hu struct mana_query_vport_cfg_resp {
625ce110ea1SWei Hu 	struct gdma_resp_hdr	hdr;
626ce110ea1SWei Hu 	uint32_t		max_num_sq;
627ce110ea1SWei Hu 	uint32_t		max_num_rq;
628ce110ea1SWei Hu 	uint32_t		num_indirection_ent;
629ce110ea1SWei Hu 	uint32_t		reserved1;
630ce110ea1SWei Hu 	uint8_t			mac_addr[6];
631ce110ea1SWei Hu 	uint8_t			reserved2[2];
632ce110ea1SWei Hu 	mana_handle_t		vport;
633ce110ea1SWei Hu }; /* HW DATA */
634ce110ea1SWei Hu 
635ce110ea1SWei Hu /* Configure vPort */
636ce110ea1SWei Hu struct mana_config_vport_req {
637ce110ea1SWei Hu 	struct gdma_req_hdr	hdr;
638ce110ea1SWei Hu 	mana_handle_t		vport;
639ce110ea1SWei Hu 	uint32_t		pdid;
640ce110ea1SWei Hu 	uint32_t		doorbell_pageid;
641ce110ea1SWei Hu }; /* HW DATA */
642ce110ea1SWei Hu 
643ce110ea1SWei Hu struct mana_config_vport_resp {
644ce110ea1SWei Hu 	struct gdma_resp_hdr	hdr;
645ce110ea1SWei Hu 	uint16_t		tx_vport_offset;
646ce110ea1SWei Hu 	uint8_t			short_form_allowed;
647ce110ea1SWei Hu 	uint8_t			reserved;
648ce110ea1SWei Hu }; /* HW DATA */
649ce110ea1SWei Hu 
650ce110ea1SWei Hu /* Create WQ Object */
651ce110ea1SWei Hu struct mana_create_wqobj_req {
652ce110ea1SWei Hu 	struct gdma_req_hdr	hdr;
653ce110ea1SWei Hu 	mana_handle_t		vport;
654ce110ea1SWei Hu 	uint32_t		wq_type;
655ce110ea1SWei Hu 	uint32_t		reserved;
656ce110ea1SWei Hu 	uint64_t		wq_gdma_region;
657ce110ea1SWei Hu 	uint64_t		cq_gdma_region;
658ce110ea1SWei Hu 	uint32_t		wq_size;
659ce110ea1SWei Hu 	uint32_t		cq_size;
660ce110ea1SWei Hu 	uint32_t		cq_moderation_ctx_id;
661ce110ea1SWei Hu 	uint32_t		cq_parent_qid;
662ce110ea1SWei Hu }; /* HW DATA */
663ce110ea1SWei Hu 
664ce110ea1SWei Hu struct mana_create_wqobj_resp {
665ce110ea1SWei Hu 	struct gdma_resp_hdr	hdr;
666ce110ea1SWei Hu 	uint32_t		wq_id;
667ce110ea1SWei Hu 	uint32_t		cq_id;
668ce110ea1SWei Hu 	mana_handle_t		wq_obj;
669ce110ea1SWei Hu }; /* HW DATA */
670ce110ea1SWei Hu 
671ce110ea1SWei Hu /* Destroy WQ Object */
672ce110ea1SWei Hu struct mana_destroy_wqobj_req {
673ce110ea1SWei Hu 	struct gdma_req_hdr	hdr;
674ce110ea1SWei Hu 	uint32_t		wq_type;
675ce110ea1SWei Hu 	uint32_t		reserved;
676ce110ea1SWei Hu 	mana_handle_t		wq_obj_handle;
677ce110ea1SWei Hu }; /* HW DATA */
678ce110ea1SWei Hu 
679ce110ea1SWei Hu struct mana_destroy_wqobj_resp {
680ce110ea1SWei Hu 	struct gdma_resp_hdr	hdr;
681ce110ea1SWei Hu }; /* HW DATA */
682ce110ea1SWei Hu 
683ce110ea1SWei Hu /* Fence RQ */
684ce110ea1SWei Hu struct mana_fence_rq_req {
685ce110ea1SWei Hu 	struct gdma_req_hdr	hdr;
686ce110ea1SWei Hu 	mana_handle_t		wq_obj_handle;
687ce110ea1SWei Hu }; /* HW DATA */
688ce110ea1SWei Hu 
689ce110ea1SWei Hu struct mana_fence_rq_resp {
690ce110ea1SWei Hu 	struct gdma_resp_hdr	hdr;
691ce110ea1SWei Hu }; /* HW DATA */
692ce110ea1SWei Hu 
693ce110ea1SWei Hu /* Configure vPort Rx Steering */
694ce110ea1SWei Hu struct mana_cfg_rx_steer_req {
695ce110ea1SWei Hu 	struct gdma_req_hdr	hdr;
696ce110ea1SWei Hu 	mana_handle_t		vport;
697ce110ea1SWei Hu 	uint16_t		num_indir_entries;
698ce110ea1SWei Hu 	uint16_t		indir_tab_offset;
699ce110ea1SWei Hu 	uint32_t		rx_enable;
700ce110ea1SWei Hu 	uint32_t		rss_enable;
701ce110ea1SWei Hu 	uint8_t			update_default_rxobj;
702ce110ea1SWei Hu 	uint8_t			update_hashkey;
703ce110ea1SWei Hu 	uint8_t			update_indir_tab;
704ce110ea1SWei Hu 	uint8_t			reserved;
705ce110ea1SWei Hu 	mana_handle_t		default_rxobj;
706ce110ea1SWei Hu 	uint8_t			hashkey[MANA_HASH_KEY_SIZE];
707ce110ea1SWei Hu }; /* HW DATA */
708ce110ea1SWei Hu 
709ce110ea1SWei Hu struct mana_cfg_rx_steer_resp {
710ce110ea1SWei Hu 	struct gdma_resp_hdr	hdr;
711ce110ea1SWei Hu }; /* HW DATA */
712ce110ea1SWei Hu 
713ce110ea1SWei Hu #define MANA_MAX_NUM_QUEUES		16
714ce110ea1SWei Hu 
715ce110ea1SWei Hu #define MANA_SHORT_VPORT_OFFSET_MAX	((1U << 8) - 1)
716ce110ea1SWei Hu 
717*9b8701b8SWei Hu #define MANA_IDX_NEXT(idx, size)	(((idx) + 1) & ((size) - 1))
718*9b8701b8SWei Hu #define MANA_GET_SPACE(start_idx, end_idx, size)			\
719*9b8701b8SWei Hu 	(((end_idx) >= (start_idx)) ?					\
720*9b8701b8SWei Hu 	((end_idx) - (start_idx)) : ((size) - (start_idx) + (end_idx)))
721*9b8701b8SWei Hu 
722*9b8701b8SWei Hu #define MANA_RX_REFILL_THRESH		256
723*9b8701b8SWei Hu 
724ce110ea1SWei Hu struct mana_tx_package {
725ce110ea1SWei Hu 	struct gdma_wqe_request		wqe_req;
726ce110ea1SWei Hu 	struct gdma_sge			sgl_array[MAX_MBUF_FRAGS];
727ce110ea1SWei Hu 
728ce110ea1SWei Hu 	struct mana_tx_oob		tx_oob;
729ce110ea1SWei Hu 
730ce110ea1SWei Hu 	struct gdma_posted_wqe_info	wqe_info;
731ce110ea1SWei Hu };
732ce110ea1SWei Hu 
733ce110ea1SWei Hu int mana_restart(struct mana_port_context *apc);
734ce110ea1SWei Hu 
735b685df31SWei Hu int mana_create_wq_obj(struct mana_port_context *apc,
736b685df31SWei Hu     mana_handle_t vport,
737b685df31SWei Hu     uint32_t wq_type, struct mana_obj_spec *wq_spec,
738b685df31SWei Hu     struct mana_obj_spec *cq_spec,
739b685df31SWei Hu     mana_handle_t *wq_obj);
740b685df31SWei Hu 
741b685df31SWei Hu void mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
742b685df31SWei Hu     mana_handle_t wq_obj);
743b685df31SWei Hu 
744b685df31SWei Hu int mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
745b685df31SWei Hu     uint32_t doorbell_pg_id);
746b685df31SWei Hu 
747b685df31SWei Hu void mana_uncfg_vport(struct mana_port_context *apc);
748ce110ea1SWei Hu #endif /* _MANA_H */
749