xref: /freebsd/sys/dev/mana/mana.h (revision dce5f3abed7181cc533ca5ed3de44517775e78dd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Microsoft Corp.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  *
32  */
33 
34 #ifndef _MANA_H
35 #define _MANA_H
36 
37 #include <sys/types.h>
38 #include <sys/proc.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
42 #include <sys/counter.h>
43 
44 #include <net/ethernet.h>
45 #include <net/if.h>
46 #include <net/if_media.h>
47 #include <netinet/tcp_lro.h>
48 
49 #include "gdma.h"
50 #include "hw_channel.h"
51 
52 
53 /* Microsoft Azure Network Adapter (MANA)'s definitions
54  *
55  * Structures labeled with "HW DATA" are exchanged with the hardware. All of
56  * them are naturally aligned and hence don't need __packed.
57  */
58 /* MANA protocol version */
59 #define MANA_MAJOR_VERSION	0
60 #define MANA_MINOR_VERSION	1
61 #define MANA_MICRO_VERSION	1
62 
63 #define DRV_MODULE_NAME		"mana"
64 
65 #ifndef DRV_MODULE_VERSION
66 #define DRV_MODULE_VERSION				\
67 	__XSTRING(MANA_MAJOR_VERSION) "."		\
68 	__XSTRING(MANA_MINOR_VERSION) "."		\
69 	__XSTRING(MANA_MICRO_VERSION)
70 #endif
71 #define DEVICE_NAME	"Microsoft Azure Network Adapter (MANA)"
72 #define DEVICE_DESC	"MANA adapter"
73 
74 /*
75  * Supported PCI vendor and devices IDs
76  */
77 #ifndef PCI_VENDOR_ID_MICROSOFT
78 #define PCI_VENDOR_ID_MICROSOFT	0x1414
79 #endif
80 
81 #define PCI_DEV_ID_MANA_VF	0x00ba
82 
83 typedef struct _mana_vendor_id_t {
84 	uint16_t vendor_id;
85 	uint16_t device_id;
86 } mana_vendor_id_t;
87 
88 typedef uint64_t mana_handle_t;
89 #define INVALID_MANA_HANDLE	((mana_handle_t)-1)
90 
91 enum TRI_STATE {
92 	TRI_STATE_UNKNOWN = -1,
93 	TRI_STATE_FALSE = 0,
94 	TRI_STATE_TRUE = 1
95 };
96 
97 /* Number of entries for hardware indirection table must be in power of 2 */
98 #define MANA_INDIRECT_TABLE_SIZE	64
99 #define MANA_INDIRECT_TABLE_MASK	(MANA_INDIRECT_TABLE_SIZE - 1)
100 
101 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
102 #define MANA_HASH_KEY_SIZE		40
103 
104 #define COMP_ENTRY_SIZE			64
105 
106 #define MIN_FRAME_SIZE			146
107 #define ADAPTER_MTU_SIZE		1500
108 #define DEFAULT_FRAME_SIZE		(ADAPTER_MTU_SIZE + 14)
109 #define MAX_FRAME_SIZE			4096
110 
111 #define RX_BUFFERS_PER_QUEUE		512
112 
113 #define MAX_SEND_BUFFERS_PER_QUEUE	256
114 
115 #define EQ_SIZE				(8 * PAGE_SIZE)
116 #define LOG2_EQ_THROTTLE		3
117 
118 #if 1 /* XXX */
119 #define MAX_PORTS_IN_MANA_DEV		1
120 #else
121 #define MAX_PORTS_IN_MANA_DEV		16
122 #endif
123 
124 struct mana_send_buf_info {
125 	struct mbuf			*mbuf;
126 	bus_dmamap_t			dma_map;
127 
128 	/* Required to store the result of mana_gd_post_work_request.
129 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
130 	 * work queue when the WQE is consumed.
131 	 */
132 	struct gdma_posted_wqe_info	wqe_inf;
133 };
134 
135 struct mana_stats {
136 	counter_u64_t			packets;		/* rx, tx */
137 	counter_u64_t			bytes;			/* rx, tx */
138 	counter_u64_t			stop;			/* tx */
139 	counter_u64_t			wakeup;			/* tx */
140 	counter_u64_t			collapse;		/* tx */
141 	counter_u64_t			collapse_err;		/* tx */
142 	counter_u64_t			dma_mapping_err;	/* rx, tx */
143 	counter_u64_t			mbuf_alloc_fail;	/* rx */
144 	counter_u64_t			alt_chg;		/* tx */
145 	counter_u64_t			alt_reset;		/* tx */
146 };
147 
148 struct mana_txq {
149 	struct gdma_queue	*gdma_sq;
150 
151 	union {
152 		uint32_t	gdma_txq_id;
153 		struct {
154 			uint32_t	reserved1	:10;
155 			uint32_t	vsq_frame	:14;
156 			uint32_t	reserved2	:8;
157 		};
158 	};
159 
160 	uint16_t		vp_offset;
161 
162 	struct ifnet		*ndev;
163 	/* Store index to the array of tx_qp in port structure */
164 	int			idx;
165 	/* The alternative txq idx when this txq is under heavy load */
166 	int			alt_txq_idx;
167 
168 	/* The mbufs are sent to the HW and we are waiting for the CQEs. */
169 	struct mana_send_buf_info	*tx_buf_info;
170 	uint16_t		next_to_use;
171 	uint16_t		next_to_complete;
172 
173 	atomic_t		pending_sends;
174 
175 	struct buf_ring		*txq_br;
176 	struct mtx		txq_mtx;
177 	char			txq_mtx_name[16];
178 
179 	struct task		enqueue_task;
180 	struct taskqueue	*enqueue_tq;
181 
182 	struct mana_stats	stats;
183 };
184 
185 
186 /*
187  * Max WQE size is 512B. The first 8B is for GDMA Out of Band (OOB),
188  * next is the Client OOB can be either 8B or 24B. Thus, the max
189  * space for SGL entries in a singel WQE is 512 - 8 - 8 = 496B. Since each
190  * SGL is 16B in size, the max number of SGLs in a WQE is 496/16 = 31.
191  * Save one for emergency use, set the MAX_MBUF_FRAGS allowed to 30.
192  */
193 #define	MAX_MBUF_FRAGS		30
194 #define MANA_TSO_MAXSEG_SZ	PAGE_SIZE
195 
196 /* mbuf data and frags dma mappings */
197 struct mana_mbuf_head {
198 	bus_addr_t dma_handle[MAX_MBUF_FRAGS + 1];
199 
200 	uint32_t size[MAX_MBUF_FRAGS + 1];
201 };
202 
203 #define MANA_HEADROOM		sizeof(struct mana_mbuf_head)
204 
205 enum mana_tx_pkt_format {
206 	MANA_SHORT_PKT_FMT	= 0,
207 	MANA_LONG_PKT_FMT	= 1,
208 };
209 
210 struct mana_tx_short_oob {
211 	uint32_t pkt_fmt		:2;
212 	uint32_t is_outer_ipv4		:1;
213 	uint32_t is_outer_ipv6		:1;
214 	uint32_t comp_iphdr_csum	:1;
215 	uint32_t comp_tcp_csum		:1;
216 	uint32_t comp_udp_csum		:1;
217 	uint32_t supress_txcqe_gen	:1;
218 	uint32_t vcq_num		:24;
219 
220 	uint32_t trans_off		:10; /* Transport header offset */
221 	uint32_t vsq_frame		:14;
222 	uint32_t short_vp_offset	:8;
223 }; /* HW DATA */
224 
225 struct mana_tx_long_oob {
226 	uint32_t is_encap		:1;
227 	uint32_t inner_is_ipv6		:1;
228 	uint32_t inner_tcp_opt		:1;
229 	uint32_t inject_vlan_pri_tag	:1;
230 	uint32_t reserved1		:12;
231 	uint32_t pcp			:3;  /* 802.1Q */
232 	uint32_t dei			:1;  /* 802.1Q */
233 	uint32_t vlan_id		:12; /* 802.1Q */
234 
235 	uint32_t inner_frame_offset	:10;
236 	uint32_t inner_ip_rel_offset	:6;
237 	uint32_t long_vp_offset		:12;
238 	uint32_t reserved2		:4;
239 
240 	uint32_t reserved3;
241 	uint32_t reserved4;
242 }; /* HW DATA */
243 
244 struct mana_tx_oob {
245 	struct mana_tx_short_oob	s_oob;
246 	struct mana_tx_long_oob		l_oob;
247 }; /* HW DATA */
248 
249 enum mana_cq_type {
250 	MANA_CQ_TYPE_RX,
251 	MANA_CQ_TYPE_TX,
252 };
253 
254 enum mana_cqe_type {
255 	CQE_INVALID			= 0,
256 	CQE_RX_OKAY			= 1,
257 	CQE_RX_COALESCED_4		= 2,
258 	CQE_RX_OBJECT_FENCE		= 3,
259 	CQE_RX_TRUNCATED		= 4,
260 
261 	CQE_TX_OKAY			= 32,
262 	CQE_TX_SA_DROP			= 33,
263 	CQE_TX_MTU_DROP			= 34,
264 	CQE_TX_INVALID_OOB		= 35,
265 	CQE_TX_INVALID_ETH_TYPE		= 36,
266 	CQE_TX_HDR_PROCESSING_ERROR	= 37,
267 	CQE_TX_VF_DISABLED		= 38,
268 	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
269 	CQE_TX_VPORT_DISABLED		= 40,
270 	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
271 };
272 
273 #define MANA_CQE_COMPLETION	1
274 
275 struct mana_cqe_header {
276 	uint32_t cqe_type	:6;
277 	uint32_t client_type	:2;
278 	uint32_t vendor_err	:24;
279 }; /* HW DATA */
280 
281 /* NDIS HASH Types */
282 #define NDIS_HASH_IPV4		BIT(0)
283 #define NDIS_HASH_TCP_IPV4	BIT(1)
284 #define NDIS_HASH_UDP_IPV4	BIT(2)
285 #define NDIS_HASH_IPV6		BIT(3)
286 #define NDIS_HASH_TCP_IPV6	BIT(4)
287 #define NDIS_HASH_UDP_IPV6	BIT(5)
288 #define NDIS_HASH_IPV6_EX	BIT(6)
289 #define NDIS_HASH_TCP_IPV6_EX	BIT(7)
290 #define NDIS_HASH_UDP_IPV6_EX	BIT(8)
291 
292 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
293 #define MANA_HASH_L4                                                         \
294 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
295 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
296 
297 #define NDIS_HASH_IPV4_L3_MASK	(NDIS_HASH_IPV4)
298 #define NDIS_HASH_IPV4_L4_MASK	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4)
299 #define NDIS_HASH_IPV6_L3_MASK	(NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
300 #define NDIS_HASH_IPV6_L4_MASK						\
301     (NDIS_HASH_TCP_IPV6 | NDIS_HASH_UDP_IPV6 |				\
302     NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
303 #define NDIS_HASH_IPV4_MASK						\
304     (NDIS_HASH_IPV4_L3_MASK | NDIS_HASH_IPV4_L4_MASK)
305 #define NDIS_HASH_IPV6_MASK						\
306     (NDIS_HASH_IPV6_L3_MASK | NDIS_HASH_IPV6_L4_MASK)
307 
308 
309 struct mana_rxcomp_perpkt_info {
310 	uint32_t pkt_len	:16;
311 	uint32_t reserved1	:16;
312 	uint32_t reserved2;
313 	uint32_t pkt_hash;
314 }; /* HW DATA */
315 
316 #define MANA_RXCOMP_OOB_NUM_PPI 4
317 
318 /* Receive completion OOB */
319 struct mana_rxcomp_oob {
320 	struct mana_cqe_header cqe_hdr;
321 
322 	uint32_t rx_vlan_id			:12;
323 	uint32_t rx_vlantag_present		:1;
324 	uint32_t rx_outer_iphdr_csum_succeed	:1;
325 	uint32_t rx_outer_iphdr_csum_fail	:1;
326 	uint32_t reserved1			:1;
327 	uint32_t rx_hashtype			:9;
328 	uint32_t rx_iphdr_csum_succeed		:1;
329 	uint32_t rx_iphdr_csum_fail		:1;
330 	uint32_t rx_tcp_csum_succeed		:1;
331 	uint32_t rx_tcp_csum_fail		:1;
332 	uint32_t rx_udp_csum_succeed		:1;
333 	uint32_t rx_udp_csum_fail		:1;
334 	uint32_t reserved2			:1;
335 
336 	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
337 
338 	uint32_t rx_wqe_offset;
339 }; /* HW DATA */
340 
341 struct mana_tx_comp_oob {
342 	struct mana_cqe_header	cqe_hdr;
343 
344 	uint32_t tx_data_offset;
345 
346 	uint32_t tx_sgl_offset		:5;
347 	uint32_t tx_wqe_offset		:27;
348 
349 	uint32_t reserved[12];
350 }; /* HW DATA */
351 
352 struct mana_rxq;
353 
354 struct mana_cq {
355 	struct gdma_queue	*gdma_cq;
356 
357 	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
358 	uint32_t		gdma_id;
359 
360 	/* Type of the CQ: TX or RX */
361 	enum mana_cq_type	type;
362 
363 	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
364 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
365 	 */
366 	struct mana_rxq		*rxq;
367 
368 	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
369 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
370 	 */
371 	struct mana_txq		*txq;
372 
373 	/* Pointer to a buffer which the CQ handler can copy the CQE's into. */
374 	struct gdma_comp	*gdma_comp_buf;
375 };
376 
377 #define GDMA_MAX_RQE_SGES	15
378 
379 struct mana_recv_buf_oob {
380 	/* A valid GDMA work request representing the data buffer. */
381 	struct gdma_wqe_request		wqe_req;
382 
383 	struct mbuf			*mbuf;
384 	bus_dmamap_t			dma_map;
385 
386 	/* SGL of the buffer going to be sent as part of the work request. */
387 	uint32_t			num_sge;
388 	struct gdma_sge			sgl[GDMA_MAX_RQE_SGES];
389 
390 	/* Required to store the result of mana_gd_post_work_request.
391 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
392 	 * work queue when the WQE is consumed.
393 	 */
394 	struct gdma_posted_wqe_info	wqe_inf;
395 };
396 
397 struct mana_rxq {
398 	struct gdma_queue		*gdma_rq;
399 	/* Cache the gdma receive queue id */
400 	uint32_t			gdma_id;
401 
402 	/* Index of RQ in the vPort, not gdma receive queue id */
403 	uint32_t			rxq_idx;
404 
405 	uint32_t			datasize;
406 
407 	mana_handle_t			rxobj;
408 
409 	struct mana_cq			rx_cq;
410 
411 	struct ifnet			*ndev;
412 	struct lro_ctrl			lro;
413 
414 	/* Total number of receive buffers to be allocated */
415 	uint32_t			num_rx_buf;
416 
417 	uint32_t			buf_index;
418 
419 	struct mana_stats		stats;
420 
421 	/* MUST BE THE LAST MEMBER:
422 	 * Each receive buffer has an associated mana_recv_buf_oob.
423 	 */
424 	struct mana_recv_buf_oob	rx_oobs[];
425 };
426 
427 struct mana_tx_qp {
428 	struct mana_txq			txq;
429 
430 	struct mana_cq			tx_cq;
431 
432 	mana_handle_t			tx_object;
433 };
434 
435 struct mana_port_stats {
436 	counter_u64_t		rx_packets;
437 	counter_u64_t		tx_packets;
438 
439 	counter_u64_t		rx_bytes;
440 	counter_u64_t		tx_bytes;
441 
442 	counter_u64_t		rx_drops;
443 	counter_u64_t		tx_drops;
444 
445 	counter_u64_t		stop_queue;
446 	counter_u64_t		wake_queue;
447 };
448 
449 struct mana_context {
450 	struct gdma_dev		*gdma_dev;
451 
452 	uint16_t		num_ports;
453 
454 	struct ifnet		*ports[MAX_PORTS_IN_MANA_DEV];
455 };
456 
457 struct mana_port_context {
458 	struct mana_context	*ac;
459 	struct ifnet		*ndev;
460 	struct ifmedia		media;
461 
462 	struct sx		apc_lock;
463 
464 	/* DMA tag used for queue bufs of the entire port */
465 	bus_dma_tag_t		rx_buf_tag;
466 	bus_dma_tag_t		tx_buf_tag;
467 
468 	uint8_t			mac_addr[ETHER_ADDR_LEN];
469 
470 	struct mana_eq		*eqs;
471 
472 	enum TRI_STATE		rss_state;
473 
474 	mana_handle_t		default_rxobj;
475 	bool			tx_shortform_allowed;
476 	uint16_t		tx_vp_offset;
477 
478 	struct mana_tx_qp	*tx_qp;
479 
480 	/* Indirection Table for RX & TX. The values are queue indexes */
481 	uint32_t		indir_table[MANA_INDIRECT_TABLE_SIZE];
482 
483 	/* Indirection table containing RxObject Handles */
484 	mana_handle_t		rxobj_table[MANA_INDIRECT_TABLE_SIZE];
485 
486 	/*  Hash key used by the NIC */
487 	uint8_t			hashkey[MANA_HASH_KEY_SIZE];
488 
489 	/* This points to an array of num_queues of RQ pointers. */
490 	struct mana_rxq		**rxqs;
491 
492 	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
493 	unsigned int		max_queues;
494 	unsigned int		num_queues;
495 
496 	mana_handle_t		port_handle;
497 
498 	uint16_t		port_idx;
499 
500 	uint16_t		frame_size;
501 
502 	bool			port_is_up;
503 	bool			port_st_save; /* Saved port state */
504 
505 	bool			enable_tx_altq;
506 	bool			bind_cleanup_thread_cpu;
507 
508 	struct mana_port_stats	port_stats;
509 
510 	struct sysctl_oid_list	*port_list;
511 	struct sysctl_ctx_list	que_sysctl_ctx;
512 };
513 
514 #define MANA_APC_LOCK_INIT(apc)			\
515 	sx_init(&(apc)->apc_lock, "MANA port lock")
516 #define MANA_APC_LOCK_DESTROY(apc)		sx_destroy(&(apc)->apc_lock)
517 #define MANA_APC_LOCK_LOCK(apc)			sx_xlock(&(apc)->apc_lock)
518 #define MANA_APC_LOCK_UNLOCK(apc)		sx_unlock(&(apc)->apc_lock)
519 
520 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
521     bool update_hash, bool update_tab);
522 
523 int mana_alloc_queues(struct ifnet *ndev);
524 int mana_attach(struct ifnet *ndev);
525 int mana_detach(struct ifnet *ndev);
526 
527 int mana_probe(struct gdma_dev *gd);
528 void mana_remove(struct gdma_dev *gd);
529 
530 struct mana_obj_spec {
531 	uint32_t	queue_index;
532 	uint64_t	gdma_region;
533 	uint32_t	queue_size;
534 	uint32_t	attached_eq;
535 	uint32_t	modr_ctx_id;
536 };
537 
538 enum mana_command_code {
539 	MANA_QUERY_DEV_CONFIG	= 0x20001,
540 	MANA_QUERY_GF_STAT	= 0x20002,
541 	MANA_CONFIG_VPORT_TX	= 0x20003,
542 	MANA_CREATE_WQ_OBJ	= 0x20004,
543 	MANA_DESTROY_WQ_OBJ	= 0x20005,
544 	MANA_FENCE_RQ		= 0x20006,
545 	MANA_CONFIG_VPORT_RX	= 0x20007,
546 	MANA_QUERY_VPORT_CONFIG	= 0x20008,
547 };
548 
549 /* Query Device Configuration */
550 struct mana_query_device_cfg_req {
551 	struct gdma_req_hdr	hdr;
552 
553 	/* Driver Capability flags */
554 	uint64_t		drv_cap_flags1;
555 	uint64_t		drv_cap_flags2;
556 	uint64_t		drv_cap_flags3;
557 	uint64_t		drv_cap_flags4;
558 
559 	uint32_t		proto_major_ver;
560 	uint32_t		proto_minor_ver;
561 	uint32_t		proto_micro_ver;
562 
563 	uint32_t		reserved;
564 }; /* HW DATA */
565 
566 struct mana_query_device_cfg_resp {
567 	struct gdma_resp_hdr	hdr;
568 
569 	uint64_t		pf_cap_flags1;
570 	uint64_t		pf_cap_flags2;
571 	uint64_t		pf_cap_flags3;
572 	uint64_t		pf_cap_flags4;
573 
574 	uint16_t		max_num_vports;
575 	uint16_t		reserved;
576 	uint32_t		max_num_eqs;
577 }; /* HW DATA */
578 
579 /* Query vPort Configuration */
580 struct mana_query_vport_cfg_req {
581 	struct gdma_req_hdr	hdr;
582 	uint32_t		vport_index;
583 }; /* HW DATA */
584 
585 struct mana_query_vport_cfg_resp {
586 	struct gdma_resp_hdr	hdr;
587 	uint32_t		max_num_sq;
588 	uint32_t		max_num_rq;
589 	uint32_t		num_indirection_ent;
590 	uint32_t		reserved1;
591 	uint8_t			mac_addr[6];
592 	uint8_t			reserved2[2];
593 	mana_handle_t		vport;
594 }; /* HW DATA */
595 
596 /* Configure vPort */
597 struct mana_config_vport_req {
598 	struct gdma_req_hdr	hdr;
599 	mana_handle_t		vport;
600 	uint32_t		pdid;
601 	uint32_t		doorbell_pageid;
602 }; /* HW DATA */
603 
604 struct mana_config_vport_resp {
605 	struct gdma_resp_hdr	hdr;
606 	uint16_t		tx_vport_offset;
607 	uint8_t			short_form_allowed;
608 	uint8_t			reserved;
609 }; /* HW DATA */
610 
611 /* Create WQ Object */
612 struct mana_create_wqobj_req {
613 	struct gdma_req_hdr	hdr;
614 	mana_handle_t		vport;
615 	uint32_t		wq_type;
616 	uint32_t		reserved;
617 	uint64_t		wq_gdma_region;
618 	uint64_t		cq_gdma_region;
619 	uint32_t		wq_size;
620 	uint32_t		cq_size;
621 	uint32_t		cq_moderation_ctx_id;
622 	uint32_t		cq_parent_qid;
623 }; /* HW DATA */
624 
625 struct mana_create_wqobj_resp {
626 	struct gdma_resp_hdr	hdr;
627 	uint32_t		wq_id;
628 	uint32_t		cq_id;
629 	mana_handle_t		wq_obj;
630 }; /* HW DATA */
631 
632 /* Destroy WQ Object */
633 struct mana_destroy_wqobj_req {
634 	struct gdma_req_hdr	hdr;
635 	uint32_t		wq_type;
636 	uint32_t		reserved;
637 	mana_handle_t		wq_obj_handle;
638 }; /* HW DATA */
639 
640 struct mana_destroy_wqobj_resp {
641 	struct gdma_resp_hdr	hdr;
642 }; /* HW DATA */
643 
644 /* Fence RQ */
645 struct mana_fence_rq_req {
646 	struct gdma_req_hdr	hdr;
647 	mana_handle_t		wq_obj_handle;
648 }; /* HW DATA */
649 
650 struct mana_fence_rq_resp {
651 	struct gdma_resp_hdr	hdr;
652 }; /* HW DATA */
653 
654 /* Configure vPort Rx Steering */
655 struct mana_cfg_rx_steer_req {
656 	struct gdma_req_hdr	hdr;
657 	mana_handle_t		vport;
658 	uint16_t		num_indir_entries;
659 	uint16_t		indir_tab_offset;
660 	uint32_t		rx_enable;
661 	uint32_t		rss_enable;
662 	uint8_t			update_default_rxobj;
663 	uint8_t			update_hashkey;
664 	uint8_t			update_indir_tab;
665 	uint8_t			reserved;
666 	mana_handle_t		default_rxobj;
667 	uint8_t			hashkey[MANA_HASH_KEY_SIZE];
668 }; /* HW DATA */
669 
670 struct mana_cfg_rx_steer_resp {
671 	struct gdma_resp_hdr	hdr;
672 }; /* HW DATA */
673 
674 #define MANA_MAX_NUM_QUEUES		16
675 
676 #define MANA_SHORT_VPORT_OFFSET_MAX	((1U << 8) - 1)
677 
678 struct mana_tx_package {
679 	struct gdma_wqe_request		wqe_req;
680 	struct gdma_sge			sgl_array[MAX_MBUF_FRAGS];
681 
682 	struct mana_tx_oob		tx_oob;
683 
684 	struct gdma_posted_wqe_info	wqe_info;
685 };
686 
687 int mana_restart(struct mana_port_context *apc);
688 
689 #endif /* _MANA_H */
690