xref: /linux/include/net/mana/mana.h (revision 08062af0a52107a243f7608fd972edb54ca5b7f8)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _MANA_H
5 #define _MANA_H
6 
7 #include <net/xdp.h>
8 
9 #include "gdma.h"
10 #include "hw_channel.h"
11 
12 /* Microsoft Azure Network Adapter (MANA)'s definitions
13  *
14  * Structures labeled with "HW DATA" are exchanged with the hardware. All of
15  * them are naturally aligned and hence don't need __packed.
16  */
17 
18 /* MANA protocol version */
19 #define MANA_MAJOR_VERSION	0
20 #define MANA_MINOR_VERSION	1
21 #define MANA_MICRO_VERSION	1
22 
23 typedef u64 mana_handle_t;
24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25 
26 enum TRI_STATE {
27 	TRI_STATE_UNKNOWN = -1,
28 	TRI_STATE_FALSE = 0,
29 	TRI_STATE_TRUE = 1
30 };
31 
32 /* Number of entries for hardware indirection table must be in power of 2 */
33 #define MANA_INDIRECT_TABLE_MAX_SIZE 512
34 #define MANA_INDIRECT_TABLE_DEF_SIZE 64
35 
36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
37 #define MANA_HASH_KEY_SIZE 40
38 
39 #define COMP_ENTRY_SIZE 64
40 
41 /* This Max value for RX buffers is derived from __alloc_page()'s max page
42  * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
43  * size beyond this value gets rejected by __alloc_page() call.
44  */
45 #define MAX_RX_BUFFERS_PER_QUEUE 8192
46 #define DEF_RX_BUFFERS_PER_QUEUE 512
47 #define MIN_RX_BUFFERS_PER_QUEUE 128
48 
49 /* This max value for TX buffers is derived as the maximum allocatable
50  * pages supported on host per guest through testing. TX buffer size beyond
51  * this value is rejected by the hardware.
52  */
53 #define MAX_TX_BUFFERS_PER_QUEUE 16384
54 #define DEF_TX_BUFFERS_PER_QUEUE 256
55 #define MIN_TX_BUFFERS_PER_QUEUE 128
56 
57 #define EQ_SIZE (8 * MANA_PAGE_SIZE)
58 
59 #define LOG2_EQ_THROTTLE 3
60 
61 #define MAX_PORTS_IN_MANA_DEV 256
62 
63 /* Update this count whenever the respective structures are changed */
64 #define MANA_STATS_RX_COUNT 5
65 #define MANA_STATS_TX_COUNT 11
66 
67 struct mana_stats_rx {
68 	u64 packets;
69 	u64 bytes;
70 	u64 xdp_drop;
71 	u64 xdp_tx;
72 	u64 xdp_redirect;
73 	struct u64_stats_sync syncp;
74 };
75 
76 struct mana_stats_tx {
77 	u64 packets;
78 	u64 bytes;
79 	u64 xdp_xmit;
80 	u64 tso_packets;
81 	u64 tso_bytes;
82 	u64 tso_inner_packets;
83 	u64 tso_inner_bytes;
84 	u64 short_pkt_fmt;
85 	u64 long_pkt_fmt;
86 	u64 csum_partial;
87 	u64 mana_map_err;
88 	struct u64_stats_sync syncp;
89 };
90 
91 struct mana_txq {
92 	struct gdma_queue *gdma_sq;
93 
94 	union {
95 		u32 gdma_txq_id;
96 		struct {
97 			u32 reserved1	: 10;
98 			u32 vsq_frame	: 14;
99 			u32 reserved2	: 8;
100 		};
101 	};
102 
103 	u16 vp_offset;
104 
105 	struct net_device *ndev;
106 
107 	/* The SKBs are sent to the HW and we are waiting for the CQEs. */
108 	struct sk_buff_head pending_skbs;
109 	struct netdev_queue *net_txq;
110 
111 	atomic_t pending_sends;
112 
113 	struct mana_stats_tx stats;
114 };
115 
116 /* skb data and frags dma mappings */
117 struct mana_skb_head {
118 	/* GSO pkts may have 2 SGEs for the linear part*/
119 	dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
120 
121 	u32 size[MAX_SKB_FRAGS + 2];
122 };
123 
124 #define MANA_HEADROOM sizeof(struct mana_skb_head)
125 
126 enum mana_tx_pkt_format {
127 	MANA_SHORT_PKT_FMT	= 0,
128 	MANA_LONG_PKT_FMT	= 1,
129 };
130 
131 struct mana_tx_short_oob {
132 	u32 pkt_fmt		: 2;
133 	u32 is_outer_ipv4	: 1;
134 	u32 is_outer_ipv6	: 1;
135 	u32 comp_iphdr_csum	: 1;
136 	u32 comp_tcp_csum	: 1;
137 	u32 comp_udp_csum	: 1;
138 	u32 supress_txcqe_gen	: 1;
139 	u32 vcq_num		: 24;
140 
141 	u32 trans_off		: 10; /* Transport header offset */
142 	u32 vsq_frame		: 14;
143 	u32 short_vp_offset	: 8;
144 }; /* HW DATA */
145 
146 struct mana_tx_long_oob {
147 	u32 is_encap		: 1;
148 	u32 inner_is_ipv6	: 1;
149 	u32 inner_tcp_opt	: 1;
150 	u32 inject_vlan_pri_tag : 1;
151 	u32 reserved1		: 12;
152 	u32 pcp			: 3;  /* 802.1Q */
153 	u32 dei			: 1;  /* 802.1Q */
154 	u32 vlan_id		: 12; /* 802.1Q */
155 
156 	u32 inner_frame_offset	: 10;
157 	u32 inner_ip_rel_offset : 6;
158 	u32 long_vp_offset	: 12;
159 	u32 reserved2		: 4;
160 
161 	u32 reserved3;
162 	u32 reserved4;
163 }; /* HW DATA */
164 
165 struct mana_tx_oob {
166 	struct mana_tx_short_oob s_oob;
167 	struct mana_tx_long_oob l_oob;
168 }; /* HW DATA */
169 
170 enum mana_cq_type {
171 	MANA_CQ_TYPE_RX,
172 	MANA_CQ_TYPE_TX,
173 };
174 
175 enum mana_cqe_type {
176 	CQE_INVALID			= 0,
177 	CQE_RX_OKAY			= 1,
178 	CQE_RX_COALESCED_4		= 2,
179 	CQE_RX_OBJECT_FENCE		= 3,
180 	CQE_RX_TRUNCATED		= 4,
181 
182 	CQE_TX_OKAY			= 32,
183 	CQE_TX_SA_DROP			= 33,
184 	CQE_TX_MTU_DROP			= 34,
185 	CQE_TX_INVALID_OOB		= 35,
186 	CQE_TX_INVALID_ETH_TYPE		= 36,
187 	CQE_TX_HDR_PROCESSING_ERROR	= 37,
188 	CQE_TX_VF_DISABLED		= 38,
189 	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
190 	CQE_TX_VPORT_DISABLED		= 40,
191 	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
192 };
193 
194 #define MANA_CQE_COMPLETION 1
195 
196 struct mana_cqe_header {
197 	u32 cqe_type	: 6;
198 	u32 client_type	: 2;
199 	u32 vendor_err	: 24;
200 }; /* HW DATA */
201 
202 /* NDIS HASH Types */
203 #define NDIS_HASH_IPV4		BIT(0)
204 #define NDIS_HASH_TCP_IPV4	BIT(1)
205 #define NDIS_HASH_UDP_IPV4	BIT(2)
206 #define NDIS_HASH_IPV6		BIT(3)
207 #define NDIS_HASH_TCP_IPV6	BIT(4)
208 #define NDIS_HASH_UDP_IPV6	BIT(5)
209 #define NDIS_HASH_IPV6_EX	BIT(6)
210 #define NDIS_HASH_TCP_IPV6_EX	BIT(7)
211 #define NDIS_HASH_UDP_IPV6_EX	BIT(8)
212 
213 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
214 #define MANA_HASH_L4                                                         \
215 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
216 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
217 
218 struct mana_rxcomp_perpkt_info {
219 	u32 pkt_len	: 16;
220 	u32 reserved1	: 16;
221 	u32 reserved2;
222 	u32 pkt_hash;
223 }; /* HW DATA */
224 
225 #define MANA_RXCOMP_OOB_NUM_PPI 4
226 
227 /* Receive completion OOB */
228 struct mana_rxcomp_oob {
229 	struct mana_cqe_header cqe_hdr;
230 
231 	u32 rx_vlan_id			: 12;
232 	u32 rx_vlantag_present		: 1;
233 	u32 rx_outer_iphdr_csum_succeed	: 1;
234 	u32 rx_outer_iphdr_csum_fail	: 1;
235 	u32 reserved1			: 1;
236 	u32 rx_hashtype			: 9;
237 	u32 rx_iphdr_csum_succeed	: 1;
238 	u32 rx_iphdr_csum_fail		: 1;
239 	u32 rx_tcp_csum_succeed		: 1;
240 	u32 rx_tcp_csum_fail		: 1;
241 	u32 rx_udp_csum_succeed		: 1;
242 	u32 rx_udp_csum_fail		: 1;
243 	u32 reserved2			: 1;
244 
245 	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
246 
247 	u32 rx_wqe_offset;
248 }; /* HW DATA */
249 
250 struct mana_tx_comp_oob {
251 	struct mana_cqe_header cqe_hdr;
252 
253 	u32 tx_data_offset;
254 
255 	u32 tx_sgl_offset	: 5;
256 	u32 tx_wqe_offset	: 27;
257 
258 	u32 reserved[12];
259 }; /* HW DATA */
260 
261 struct mana_rxq;
262 
263 #define CQE_POLLING_BUFFER 512
264 
265 struct mana_cq {
266 	struct gdma_queue *gdma_cq;
267 
268 	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
269 	u32 gdma_id;
270 
271 	/* Type of the CQ: TX or RX */
272 	enum mana_cq_type type;
273 
274 	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
275 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
276 	 */
277 	struct mana_rxq *rxq;
278 
279 	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
280 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
281 	 */
282 	struct mana_txq *txq;
283 
284 	/* Buffer which the CQ handler can copy the CQE's into. */
285 	struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
286 
287 	/* NAPI data */
288 	struct napi_struct napi;
289 	int work_done;
290 	int work_done_since_doorbell;
291 	int budget;
292 };
293 
294 struct mana_recv_buf_oob {
295 	/* A valid GDMA work request representing the data buffer. */
296 	struct gdma_wqe_request wqe_req;
297 
298 	void *buf_va;
299 	bool from_pool; /* allocated from a page pool */
300 
301 	/* SGL of the buffer going to be sent as part of the work request. */
302 	u32 num_sge;
303 	struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
304 
305 	/* Required to store the result of mana_gd_post_work_request.
306 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
307 	 * work queue when the WQE is consumed.
308 	 */
309 	struct gdma_posted_wqe_info wqe_inf;
310 };
311 
312 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
313 			+ ETH_HLEN)
314 
315 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
316 
317 struct mana_rxq {
318 	struct gdma_queue *gdma_rq;
319 	/* Cache the gdma receive queue id */
320 	u32 gdma_id;
321 
322 	/* Index of RQ in the vPort, not gdma receive queue id */
323 	u32 rxq_idx;
324 
325 	u32 datasize;
326 	u32 alloc_size;
327 	u32 headroom;
328 
329 	mana_handle_t rxobj;
330 
331 	struct mana_cq rx_cq;
332 
333 	struct completion fence_event;
334 
335 	struct net_device *ndev;
336 
337 	/* Total number of receive buffers to be allocated */
338 	u32 num_rx_buf;
339 
340 	u32 buf_index;
341 
342 	struct mana_stats_rx stats;
343 
344 	struct bpf_prog __rcu *bpf_prog;
345 	struct xdp_rxq_info xdp_rxq;
346 	void *xdp_save_va; /* for reusing */
347 	bool xdp_flush;
348 	int xdp_rc; /* XDP redirect return code */
349 
350 	struct page_pool *page_pool;
351 
352 	/* MUST BE THE LAST MEMBER:
353 	 * Each receive buffer has an associated mana_recv_buf_oob.
354 	 */
355 	struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
356 };
357 
358 struct mana_tx_qp {
359 	struct mana_txq txq;
360 
361 	struct mana_cq tx_cq;
362 
363 	mana_handle_t tx_object;
364 };
365 
366 struct mana_ethtool_stats {
367 	u64 stop_queue;
368 	u64 wake_queue;
369 	u64 hc_rx_discards_no_wqe;
370 	u64 hc_rx_err_vport_disabled;
371 	u64 hc_rx_bytes;
372 	u64 hc_rx_ucast_pkts;
373 	u64 hc_rx_ucast_bytes;
374 	u64 hc_rx_bcast_pkts;
375 	u64 hc_rx_bcast_bytes;
376 	u64 hc_rx_mcast_pkts;
377 	u64 hc_rx_mcast_bytes;
378 	u64 hc_tx_err_gf_disabled;
379 	u64 hc_tx_err_vport_disabled;
380 	u64 hc_tx_err_inval_vportoffset_pkt;
381 	u64 hc_tx_err_vlan_enforcement;
382 	u64 hc_tx_err_eth_type_enforcement;
383 	u64 hc_tx_err_sa_enforcement;
384 	u64 hc_tx_err_sqpdid_enforcement;
385 	u64 hc_tx_err_cqpdid_enforcement;
386 	u64 hc_tx_err_mtu_violation;
387 	u64 hc_tx_err_inval_oob;
388 	u64 hc_tx_bytes;
389 	u64 hc_tx_ucast_pkts;
390 	u64 hc_tx_ucast_bytes;
391 	u64 hc_tx_bcast_pkts;
392 	u64 hc_tx_bcast_bytes;
393 	u64 hc_tx_mcast_pkts;
394 	u64 hc_tx_mcast_bytes;
395 	u64 hc_tx_err_gdma;
396 	u64 tx_cqe_err;
397 	u64 tx_cqe_unknown_type;
398 	u64 rx_coalesced_err;
399 	u64 rx_cqe_unknown_type;
400 };
401 
402 struct mana_context {
403 	struct gdma_dev *gdma_dev;
404 
405 	u16 num_ports;
406 
407 	struct mana_eq *eqs;
408 
409 	struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
410 };
411 
412 struct mana_port_context {
413 	struct mana_context *ac;
414 	struct net_device *ndev;
415 
416 	u8 mac_addr[ETH_ALEN];
417 
418 	enum TRI_STATE rss_state;
419 
420 	mana_handle_t default_rxobj;
421 	bool tx_shortform_allowed;
422 	u16 tx_vp_offset;
423 
424 	struct mana_tx_qp *tx_qp;
425 
426 	/* Indirection Table for RX & TX. The values are queue indexes */
427 	u32 *indir_table;
428 	u32 indir_table_sz;
429 
430 	/* Indirection table containing RxObject Handles */
431 	mana_handle_t *rxobj_table;
432 
433 	/*  Hash key used by the NIC */
434 	u8 hashkey[MANA_HASH_KEY_SIZE];
435 
436 	/* This points to an array of num_queues of RQ pointers. */
437 	struct mana_rxq **rxqs;
438 
439 	/* pre-allocated rx buffer array */
440 	void **rxbufs_pre;
441 	dma_addr_t *das_pre;
442 	int rxbpre_total;
443 	u32 rxbpre_datasize;
444 	u32 rxbpre_alloc_size;
445 	u32 rxbpre_headroom;
446 
447 	struct bpf_prog *bpf_prog;
448 
449 	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
450 	unsigned int max_queues;
451 	unsigned int num_queues;
452 
453 	unsigned int rx_queue_size;
454 	unsigned int tx_queue_size;
455 
456 	mana_handle_t port_handle;
457 	mana_handle_t pf_filter_handle;
458 
459 	/* Mutex for sharing access to vport_use_count */
460 	struct mutex vport_mutex;
461 	int vport_use_count;
462 
463 	u16 port_idx;
464 
465 	bool port_is_up;
466 	bool port_st_save; /* Saved port state */
467 
468 	struct mana_ethtool_stats eth_stats;
469 };
470 
471 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
472 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
473 		    bool update_hash, bool update_tab);
474 
475 int mana_alloc_queues(struct net_device *ndev);
476 int mana_attach(struct net_device *ndev);
477 int mana_detach(struct net_device *ndev, bool from_close);
478 
479 int mana_probe(struct gdma_dev *gd, bool resuming);
480 void mana_remove(struct gdma_dev *gd, bool suspending);
481 
482 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
483 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
484 		  u32 flags);
485 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
486 		 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
487 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
488 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
489 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
490 void mana_query_gf_stats(struct mana_port_context *apc);
491 int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
492 void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
493 
494 extern const struct ethtool_ops mana_ethtool_ops;
495 
496 /* A CQ can be created not associated with any EQ */
497 #define GDMA_CQ_NO_EQ  0xffff
498 
499 struct mana_obj_spec {
500 	u32 queue_index;
501 	u64 gdma_region;
502 	u32 queue_size;
503 	u32 attached_eq;
504 	u32 modr_ctx_id;
505 };
506 
507 enum mana_command_code {
508 	MANA_QUERY_DEV_CONFIG	= 0x20001,
509 	MANA_QUERY_GF_STAT	= 0x20002,
510 	MANA_CONFIG_VPORT_TX	= 0x20003,
511 	MANA_CREATE_WQ_OBJ	= 0x20004,
512 	MANA_DESTROY_WQ_OBJ	= 0x20005,
513 	MANA_FENCE_RQ		= 0x20006,
514 	MANA_CONFIG_VPORT_RX	= 0x20007,
515 	MANA_QUERY_VPORT_CONFIG	= 0x20008,
516 
517 	/* Privileged commands for the PF mode */
518 	MANA_REGISTER_FILTER	= 0x28000,
519 	MANA_DEREGISTER_FILTER	= 0x28001,
520 	MANA_REGISTER_HW_PORT	= 0x28003,
521 	MANA_DEREGISTER_HW_PORT	= 0x28004,
522 };
523 
524 /* Query Device Configuration */
525 struct mana_query_device_cfg_req {
526 	struct gdma_req_hdr hdr;
527 
528 	/* MANA Nic Driver Capability flags */
529 	u64 mn_drv_cap_flags1;
530 	u64 mn_drv_cap_flags2;
531 	u64 mn_drv_cap_flags3;
532 	u64 mn_drv_cap_flags4;
533 
534 	u32 proto_major_ver;
535 	u32 proto_minor_ver;
536 	u32 proto_micro_ver;
537 
538 	u32 reserved;
539 }; /* HW DATA */
540 
541 struct mana_query_device_cfg_resp {
542 	struct gdma_resp_hdr hdr;
543 
544 	u64 pf_cap_flags1;
545 	u64 pf_cap_flags2;
546 	u64 pf_cap_flags3;
547 	u64 pf_cap_flags4;
548 
549 	u16 max_num_vports;
550 	u16 reserved;
551 	u32 max_num_eqs;
552 
553 	/* response v2: */
554 	u16 adapter_mtu;
555 	u16 reserved2;
556 	u32 reserved3;
557 }; /* HW DATA */
558 
559 /* Query vPort Configuration */
560 struct mana_query_vport_cfg_req {
561 	struct gdma_req_hdr hdr;
562 	u32 vport_index;
563 }; /* HW DATA */
564 
565 struct mana_query_vport_cfg_resp {
566 	struct gdma_resp_hdr hdr;
567 	u32 max_num_sq;
568 	u32 max_num_rq;
569 	u32 num_indirection_ent;
570 	u32 reserved1;
571 	u8 mac_addr[6];
572 	u8 reserved2[2];
573 	mana_handle_t vport;
574 }; /* HW DATA */
575 
576 /* Configure vPort */
577 struct mana_config_vport_req {
578 	struct gdma_req_hdr hdr;
579 	mana_handle_t vport;
580 	u32 pdid;
581 	u32 doorbell_pageid;
582 }; /* HW DATA */
583 
584 struct mana_config_vport_resp {
585 	struct gdma_resp_hdr hdr;
586 	u16 tx_vport_offset;
587 	u8 short_form_allowed;
588 	u8 reserved;
589 }; /* HW DATA */
590 
591 /* Create WQ Object */
592 struct mana_create_wqobj_req {
593 	struct gdma_req_hdr hdr;
594 	mana_handle_t vport;
595 	u32 wq_type;
596 	u32 reserved;
597 	u64 wq_gdma_region;
598 	u64 cq_gdma_region;
599 	u32 wq_size;
600 	u32 cq_size;
601 	u32 cq_moderation_ctx_id;
602 	u32 cq_parent_qid;
603 }; /* HW DATA */
604 
605 struct mana_create_wqobj_resp {
606 	struct gdma_resp_hdr hdr;
607 	u32 wq_id;
608 	u32 cq_id;
609 	mana_handle_t wq_obj;
610 }; /* HW DATA */
611 
612 /* Destroy WQ Object */
613 struct mana_destroy_wqobj_req {
614 	struct gdma_req_hdr hdr;
615 	u32 wq_type;
616 	u32 reserved;
617 	mana_handle_t wq_obj_handle;
618 }; /* HW DATA */
619 
620 struct mana_destroy_wqobj_resp {
621 	struct gdma_resp_hdr hdr;
622 }; /* HW DATA */
623 
624 /* Fence RQ */
625 struct mana_fence_rq_req {
626 	struct gdma_req_hdr hdr;
627 	mana_handle_t wq_obj_handle;
628 }; /* HW DATA */
629 
630 struct mana_fence_rq_resp {
631 	struct gdma_resp_hdr hdr;
632 }; /* HW DATA */
633 
634 /* Query stats RQ */
635 struct mana_query_gf_stat_req {
636 	struct gdma_req_hdr hdr;
637 	u64 req_stats;
638 }; /* HW DATA */
639 
640 struct mana_query_gf_stat_resp {
641 	struct gdma_resp_hdr hdr;
642 	u64 reported_stats;
643 	/* rx errors/discards */
644 	u64 rx_discards_nowqe;
645 	u64 rx_err_vport_disabled;
646 	/* rx bytes/packets */
647 	u64 hc_rx_bytes;
648 	u64 hc_rx_ucast_pkts;
649 	u64 hc_rx_ucast_bytes;
650 	u64 hc_rx_bcast_pkts;
651 	u64 hc_rx_bcast_bytes;
652 	u64 hc_rx_mcast_pkts;
653 	u64 hc_rx_mcast_bytes;
654 	/* tx errors */
655 	u64 tx_err_gf_disabled;
656 	u64 tx_err_vport_disabled;
657 	u64 tx_err_inval_vport_offset_pkt;
658 	u64 tx_err_vlan_enforcement;
659 	u64 tx_err_ethtype_enforcement;
660 	u64 tx_err_SA_enforcement;
661 	u64 tx_err_SQPDID_enforcement;
662 	u64 tx_err_CQPDID_enforcement;
663 	u64 tx_err_mtu_violation;
664 	u64 tx_err_inval_oob;
665 	/* tx bytes/packets */
666 	u64 hc_tx_bytes;
667 	u64 hc_tx_ucast_pkts;
668 	u64 hc_tx_ucast_bytes;
669 	u64 hc_tx_bcast_pkts;
670 	u64 hc_tx_bcast_bytes;
671 	u64 hc_tx_mcast_pkts;
672 	u64 hc_tx_mcast_bytes;
673 	/* tx error */
674 	u64 tx_err_gdma;
675 }; /* HW DATA */
676 
677 /* Configure vPort Rx Steering */
678 struct mana_cfg_rx_steer_req_v2 {
679 	struct gdma_req_hdr hdr;
680 	mana_handle_t vport;
681 	u16 num_indir_entries;
682 	u16 indir_tab_offset;
683 	u32 rx_enable;
684 	u32 rss_enable;
685 	u8 update_default_rxobj;
686 	u8 update_hashkey;
687 	u8 update_indir_tab;
688 	u8 reserved;
689 	mana_handle_t default_rxobj;
690 	u8 hashkey[MANA_HASH_KEY_SIZE];
691 	u8 cqe_coalescing_enable;
692 	u8 reserved2[7];
693 	mana_handle_t indir_tab[] __counted_by(num_indir_entries);
694 }; /* HW DATA */
695 
696 struct mana_cfg_rx_steer_resp {
697 	struct gdma_resp_hdr hdr;
698 }; /* HW DATA */
699 
700 /* Register HW vPort */
701 struct mana_register_hw_vport_req {
702 	struct gdma_req_hdr hdr;
703 	u16 attached_gfid;
704 	u8 is_pf_default_vport;
705 	u8 reserved1;
706 	u8 allow_all_ether_types;
707 	u8 reserved2;
708 	u8 reserved3;
709 	u8 reserved4;
710 }; /* HW DATA */
711 
712 struct mana_register_hw_vport_resp {
713 	struct gdma_resp_hdr hdr;
714 	mana_handle_t hw_vport_handle;
715 }; /* HW DATA */
716 
717 /* Deregister HW vPort */
718 struct mana_deregister_hw_vport_req {
719 	struct gdma_req_hdr hdr;
720 	mana_handle_t hw_vport_handle;
721 }; /* HW DATA */
722 
723 struct mana_deregister_hw_vport_resp {
724 	struct gdma_resp_hdr hdr;
725 }; /* HW DATA */
726 
727 /* Register filter */
728 struct mana_register_filter_req {
729 	struct gdma_req_hdr hdr;
730 	mana_handle_t vport;
731 	u8 mac_addr[6];
732 	u8 reserved1;
733 	u8 reserved2;
734 	u8 reserved3;
735 	u8 reserved4;
736 	u16 reserved5;
737 	u32 reserved6;
738 	u32 reserved7;
739 	u32 reserved8;
740 }; /* HW DATA */
741 
742 struct mana_register_filter_resp {
743 	struct gdma_resp_hdr hdr;
744 	mana_handle_t filter_handle;
745 }; /* HW DATA */
746 
747 /* Deregister filter */
748 struct mana_deregister_filter_req {
749 	struct gdma_req_hdr hdr;
750 	mana_handle_t filter_handle;
751 }; /* HW DATA */
752 
753 struct mana_deregister_filter_resp {
754 	struct gdma_resp_hdr hdr;
755 }; /* HW DATA */
756 
757 /* Requested GF stats Flags */
758 /* Rx discards/Errors */
759 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE		0x0000000000000001
760 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED	0x0000000000000002
761 /* Rx bytes/pkts */
762 #define STATISTICS_FLAGS_HC_RX_BYTES			0x0000000000000004
763 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS		0x0000000000000008
764 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES		0x0000000000000010
765 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS		0x0000000000000020
766 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES		0x0000000000000040
767 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS		0x0000000000000080
768 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES		0x0000000000000100
769 /* Tx errors */
770 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED		0x0000000000000200
771 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED	0x0000000000000400
772 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS		\
773 							0x0000000000000800
774 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT	0x0000000000001000
775 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT			\
776 							0x0000000000002000
777 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT	0x0000000000004000
778 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT	0x0000000000008000
779 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT	0x0000000000010000
780 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION	0x0000000000020000
781 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB		0x0000000000040000
782 /* Tx bytes/pkts */
783 #define STATISTICS_FLAGS_HC_TX_BYTES			0x0000000000080000
784 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS		0x0000000000100000
785 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES		0x0000000000200000
786 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS		0x0000000000400000
787 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES		0x0000000000800000
788 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS		0x0000000001000000
789 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES		0x0000000002000000
790 /* Tx error */
791 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR		0x0000000004000000
792 
793 #define MANA_MAX_NUM_QUEUES 64
794 
795 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
796 
797 struct mana_tx_package {
798 	struct gdma_wqe_request wqe_req;
799 	struct gdma_sge sgl_array[5];
800 	struct gdma_sge *sgl_ptr;
801 
802 	struct mana_tx_oob tx_oob;
803 
804 	struct gdma_posted_wqe_info wqe_info;
805 };
806 
807 int mana_create_wq_obj(struct mana_port_context *apc,
808 		       mana_handle_t vport,
809 		       u32 wq_type, struct mana_obj_spec *wq_spec,
810 		       struct mana_obj_spec *cq_spec,
811 		       mana_handle_t *wq_obj);
812 
813 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
814 			 mana_handle_t wq_obj);
815 
816 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
817 		   u32 doorbell_pg_id);
818 void mana_uncfg_vport(struct mana_port_context *apc);
819 
820 struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index);
821 #endif /* _MANA_H */
822