xref: /linux/include/net/mana/mana.h (revision fa8a4d3659d0c1ad73d5f59b2e0a6d408de5b317)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #ifndef _MANA_H
5 #define _MANA_H
6 
7 #include <net/xdp.h>
8 
9 #include "gdma.h"
10 #include "hw_channel.h"
11 
12 /* Microsoft Azure Network Adapter (MANA)'s definitions
13  *
14  * Structures labeled with "HW DATA" are exchanged with the hardware. All of
15  * them are naturally aligned and hence don't need __packed.
16  */
17 
18 /* MANA protocol version */
19 #define MANA_MAJOR_VERSION	0
20 #define MANA_MINOR_VERSION	1
21 #define MANA_MICRO_VERSION	1
22 
23 typedef u64 mana_handle_t;
24 #define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25 
26 enum TRI_STATE {
27 	TRI_STATE_UNKNOWN = -1,
28 	TRI_STATE_FALSE = 0,
29 	TRI_STATE_TRUE = 1
30 };
31 
32 /* Number of entries for hardware indirection table must be in power of 2 */
33 #define MANA_INDIRECT_TABLE_MAX_SIZE 512
34 #define MANA_INDIRECT_TABLE_DEF_SIZE 64
35 
36 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */
37 #define MANA_HASH_KEY_SIZE 40
38 
39 #define COMP_ENTRY_SIZE 64
40 
41 #define RX_BUFFERS_PER_QUEUE 512
42 
43 #define MAX_SEND_BUFFERS_PER_QUEUE 256
44 
45 #define EQ_SIZE (8 * MANA_PAGE_SIZE)
46 
47 #define LOG2_EQ_THROTTLE 3
48 
49 #define MAX_PORTS_IN_MANA_DEV 256
50 
51 /* Update this count whenever the respective structures are changed */
52 #define MANA_STATS_RX_COUNT 5
53 #define MANA_STATS_TX_COUNT 11
54 
55 struct mana_stats_rx {
56 	u64 packets;
57 	u64 bytes;
58 	u64 xdp_drop;
59 	u64 xdp_tx;
60 	u64 xdp_redirect;
61 	struct u64_stats_sync syncp;
62 };
63 
64 struct mana_stats_tx {
65 	u64 packets;
66 	u64 bytes;
67 	u64 xdp_xmit;
68 	u64 tso_packets;
69 	u64 tso_bytes;
70 	u64 tso_inner_packets;
71 	u64 tso_inner_bytes;
72 	u64 short_pkt_fmt;
73 	u64 long_pkt_fmt;
74 	u64 csum_partial;
75 	u64 mana_map_err;
76 	struct u64_stats_sync syncp;
77 };
78 
79 struct mana_txq {
80 	struct gdma_queue *gdma_sq;
81 
82 	union {
83 		u32 gdma_txq_id;
84 		struct {
85 			u32 reserved1	: 10;
86 			u32 vsq_frame	: 14;
87 			u32 reserved2	: 8;
88 		};
89 	};
90 
91 	u16 vp_offset;
92 
93 	struct net_device *ndev;
94 
95 	/* The SKBs are sent to the HW and we are waiting for the CQEs. */
96 	struct sk_buff_head pending_skbs;
97 	struct netdev_queue *net_txq;
98 
99 	atomic_t pending_sends;
100 
101 	bool napi_initialized;
102 
103 	struct mana_stats_tx stats;
104 };
105 
106 /* skb data and frags dma mappings */
107 struct mana_skb_head {
108 	/* GSO pkts may have 2 SGEs for the linear part*/
109 	dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
110 
111 	u32 size[MAX_SKB_FRAGS + 2];
112 };
113 
114 #define MANA_HEADROOM sizeof(struct mana_skb_head)
115 
116 enum mana_tx_pkt_format {
117 	MANA_SHORT_PKT_FMT	= 0,
118 	MANA_LONG_PKT_FMT	= 1,
119 };
120 
121 struct mana_tx_short_oob {
122 	u32 pkt_fmt		: 2;
123 	u32 is_outer_ipv4	: 1;
124 	u32 is_outer_ipv6	: 1;
125 	u32 comp_iphdr_csum	: 1;
126 	u32 comp_tcp_csum	: 1;
127 	u32 comp_udp_csum	: 1;
128 	u32 supress_txcqe_gen	: 1;
129 	u32 vcq_num		: 24;
130 
131 	u32 trans_off		: 10; /* Transport header offset */
132 	u32 vsq_frame		: 14;
133 	u32 short_vp_offset	: 8;
134 }; /* HW DATA */
135 
136 struct mana_tx_long_oob {
137 	u32 is_encap		: 1;
138 	u32 inner_is_ipv6	: 1;
139 	u32 inner_tcp_opt	: 1;
140 	u32 inject_vlan_pri_tag : 1;
141 	u32 reserved1		: 12;
142 	u32 pcp			: 3;  /* 802.1Q */
143 	u32 dei			: 1;  /* 802.1Q */
144 	u32 vlan_id		: 12; /* 802.1Q */
145 
146 	u32 inner_frame_offset	: 10;
147 	u32 inner_ip_rel_offset : 6;
148 	u32 long_vp_offset	: 12;
149 	u32 reserved2		: 4;
150 
151 	u32 reserved3;
152 	u32 reserved4;
153 }; /* HW DATA */
154 
155 struct mana_tx_oob {
156 	struct mana_tx_short_oob s_oob;
157 	struct mana_tx_long_oob l_oob;
158 }; /* HW DATA */
159 
160 enum mana_cq_type {
161 	MANA_CQ_TYPE_RX,
162 	MANA_CQ_TYPE_TX,
163 };
164 
165 enum mana_cqe_type {
166 	CQE_INVALID			= 0,
167 	CQE_RX_OKAY			= 1,
168 	CQE_RX_COALESCED_4		= 2,
169 	CQE_RX_OBJECT_FENCE		= 3,
170 	CQE_RX_TRUNCATED		= 4,
171 
172 	CQE_TX_OKAY			= 32,
173 	CQE_TX_SA_DROP			= 33,
174 	CQE_TX_MTU_DROP			= 34,
175 	CQE_TX_INVALID_OOB		= 35,
176 	CQE_TX_INVALID_ETH_TYPE		= 36,
177 	CQE_TX_HDR_PROCESSING_ERROR	= 37,
178 	CQE_TX_VF_DISABLED		= 38,
179 	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
180 	CQE_TX_VPORT_DISABLED		= 40,
181 	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
182 };
183 
184 #define MANA_CQE_COMPLETION 1
185 
186 struct mana_cqe_header {
187 	u32 cqe_type	: 6;
188 	u32 client_type	: 2;
189 	u32 vendor_err	: 24;
190 }; /* HW DATA */
191 
192 /* NDIS HASH Types */
193 #define NDIS_HASH_IPV4		BIT(0)
194 #define NDIS_HASH_TCP_IPV4	BIT(1)
195 #define NDIS_HASH_UDP_IPV4	BIT(2)
196 #define NDIS_HASH_IPV6		BIT(3)
197 #define NDIS_HASH_TCP_IPV6	BIT(4)
198 #define NDIS_HASH_UDP_IPV6	BIT(5)
199 #define NDIS_HASH_IPV6_EX	BIT(6)
200 #define NDIS_HASH_TCP_IPV6_EX	BIT(7)
201 #define NDIS_HASH_UDP_IPV6_EX	BIT(8)
202 
203 #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
204 #define MANA_HASH_L4                                                         \
205 	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
206 	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
207 
208 struct mana_rxcomp_perpkt_info {
209 	u32 pkt_len	: 16;
210 	u32 reserved1	: 16;
211 	u32 reserved2;
212 	u32 pkt_hash;
213 }; /* HW DATA */
214 
215 #define MANA_RXCOMP_OOB_NUM_PPI 4
216 
217 /* Receive completion OOB */
218 struct mana_rxcomp_oob {
219 	struct mana_cqe_header cqe_hdr;
220 
221 	u32 rx_vlan_id			: 12;
222 	u32 rx_vlantag_present		: 1;
223 	u32 rx_outer_iphdr_csum_succeed	: 1;
224 	u32 rx_outer_iphdr_csum_fail	: 1;
225 	u32 reserved1			: 1;
226 	u32 rx_hashtype			: 9;
227 	u32 rx_iphdr_csum_succeed	: 1;
228 	u32 rx_iphdr_csum_fail		: 1;
229 	u32 rx_tcp_csum_succeed		: 1;
230 	u32 rx_tcp_csum_fail		: 1;
231 	u32 rx_udp_csum_succeed		: 1;
232 	u32 rx_udp_csum_fail		: 1;
233 	u32 reserved2			: 1;
234 
235 	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
236 
237 	u32 rx_wqe_offset;
238 }; /* HW DATA */
239 
240 struct mana_tx_comp_oob {
241 	struct mana_cqe_header cqe_hdr;
242 
243 	u32 tx_data_offset;
244 
245 	u32 tx_sgl_offset	: 5;
246 	u32 tx_wqe_offset	: 27;
247 
248 	u32 reserved[12];
249 }; /* HW DATA */
250 
251 struct mana_rxq;
252 
253 #define CQE_POLLING_BUFFER 512
254 
255 struct mana_cq {
256 	struct gdma_queue *gdma_cq;
257 
258 	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
259 	u32 gdma_id;
260 
261 	/* Type of the CQ: TX or RX */
262 	enum mana_cq_type type;
263 
264 	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
265 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
266 	 */
267 	struct mana_rxq *rxq;
268 
269 	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
270 	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
271 	 */
272 	struct mana_txq *txq;
273 
274 	/* Buffer which the CQ handler can copy the CQE's into. */
275 	struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
276 
277 	/* NAPI data */
278 	struct napi_struct napi;
279 	int work_done;
280 	int work_done_since_doorbell;
281 	int budget;
282 };
283 
284 struct mana_recv_buf_oob {
285 	/* A valid GDMA work request representing the data buffer. */
286 	struct gdma_wqe_request wqe_req;
287 
288 	void *buf_va;
289 	bool from_pool; /* allocated from a page pool */
290 
291 	/* SGL of the buffer going to be sent has part of the work request. */
292 	u32 num_sge;
293 	struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
294 
295 	/* Required to store the result of mana_gd_post_work_request.
296 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
297 	 * work queue when the WQE is consumed.
298 	 */
299 	struct gdma_posted_wqe_info wqe_inf;
300 };
301 
302 #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
303 			+ ETH_HLEN)
304 
305 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
306 
307 struct mana_rxq {
308 	struct gdma_queue *gdma_rq;
309 	/* Cache the gdma receive queue id */
310 	u32 gdma_id;
311 
312 	/* Index of RQ in the vPort, not gdma receive queue id */
313 	u32 rxq_idx;
314 
315 	u32 datasize;
316 	u32 alloc_size;
317 	u32 headroom;
318 
319 	mana_handle_t rxobj;
320 
321 	struct mana_cq rx_cq;
322 
323 	struct completion fence_event;
324 
325 	struct net_device *ndev;
326 
327 	/* Total number of receive buffers to be allocated */
328 	u32 num_rx_buf;
329 
330 	u32 buf_index;
331 
332 	struct mana_stats_rx stats;
333 
334 	struct bpf_prog __rcu *bpf_prog;
335 	struct xdp_rxq_info xdp_rxq;
336 	void *xdp_save_va; /* for reusing */
337 	bool xdp_flush;
338 	int xdp_rc; /* XDP redirect return code */
339 
340 	struct page_pool *page_pool;
341 
342 	/* MUST BE THE LAST MEMBER:
343 	 * Each receive buffer has an associated mana_recv_buf_oob.
344 	 */
345 	struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
346 };
347 
348 struct mana_tx_qp {
349 	struct mana_txq txq;
350 
351 	struct mana_cq tx_cq;
352 
353 	mana_handle_t tx_object;
354 };
355 
356 struct mana_ethtool_stats {
357 	u64 stop_queue;
358 	u64 wake_queue;
359 	u64 hc_rx_discards_no_wqe;
360 	u64 hc_rx_err_vport_disabled;
361 	u64 hc_rx_bytes;
362 	u64 hc_rx_ucast_pkts;
363 	u64 hc_rx_ucast_bytes;
364 	u64 hc_rx_bcast_pkts;
365 	u64 hc_rx_bcast_bytes;
366 	u64 hc_rx_mcast_pkts;
367 	u64 hc_rx_mcast_bytes;
368 	u64 hc_tx_err_gf_disabled;
369 	u64 hc_tx_err_vport_disabled;
370 	u64 hc_tx_err_inval_vportoffset_pkt;
371 	u64 hc_tx_err_vlan_enforcement;
372 	u64 hc_tx_err_eth_type_enforcement;
373 	u64 hc_tx_err_sa_enforcement;
374 	u64 hc_tx_err_sqpdid_enforcement;
375 	u64 hc_tx_err_cqpdid_enforcement;
376 	u64 hc_tx_err_mtu_violation;
377 	u64 hc_tx_err_inval_oob;
378 	u64 hc_tx_bytes;
379 	u64 hc_tx_ucast_pkts;
380 	u64 hc_tx_ucast_bytes;
381 	u64 hc_tx_bcast_pkts;
382 	u64 hc_tx_bcast_bytes;
383 	u64 hc_tx_mcast_pkts;
384 	u64 hc_tx_mcast_bytes;
385 	u64 hc_tx_err_gdma;
386 	u64 tx_cqe_err;
387 	u64 tx_cqe_unknown_type;
388 	u64 rx_coalesced_err;
389 	u64 rx_cqe_unknown_type;
390 };
391 
392 struct mana_context {
393 	struct gdma_dev *gdma_dev;
394 
395 	u16 num_ports;
396 
397 	struct mana_eq *eqs;
398 
399 	struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
400 };
401 
402 struct mana_port_context {
403 	struct mana_context *ac;
404 	struct net_device *ndev;
405 
406 	u8 mac_addr[ETH_ALEN];
407 
408 	enum TRI_STATE rss_state;
409 
410 	mana_handle_t default_rxobj;
411 	bool tx_shortform_allowed;
412 	u16 tx_vp_offset;
413 
414 	struct mana_tx_qp *tx_qp;
415 
416 	/* Indirection Table for RX & TX. The values are queue indexes */
417 	u32 *indir_table;
418 	u32 indir_table_sz;
419 
420 	/* Indirection table containing RxObject Handles */
421 	mana_handle_t *rxobj_table;
422 
423 	/*  Hash key used by the NIC */
424 	u8 hashkey[MANA_HASH_KEY_SIZE];
425 
426 	/* This points to an array of num_queues of RQ pointers. */
427 	struct mana_rxq **rxqs;
428 
429 	/* pre-allocated rx buffer array */
430 	void **rxbufs_pre;
431 	dma_addr_t *das_pre;
432 	int rxbpre_total;
433 	u32 rxbpre_datasize;
434 	u32 rxbpre_alloc_size;
435 	u32 rxbpre_headroom;
436 
437 	struct bpf_prog *bpf_prog;
438 
439 	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
440 	unsigned int max_queues;
441 	unsigned int num_queues;
442 
443 	mana_handle_t port_handle;
444 	mana_handle_t pf_filter_handle;
445 
446 	/* Mutex for sharing access to vport_use_count */
447 	struct mutex vport_mutex;
448 	int vport_use_count;
449 
450 	u16 port_idx;
451 
452 	bool port_is_up;
453 	bool port_st_save; /* Saved port state */
454 
455 	struct mana_ethtool_stats eth_stats;
456 };
457 
458 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
459 int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
460 		    bool update_hash, bool update_tab);
461 
462 int mana_alloc_queues(struct net_device *ndev);
463 int mana_attach(struct net_device *ndev);
464 int mana_detach(struct net_device *ndev, bool from_close);
465 
466 int mana_probe(struct gdma_dev *gd, bool resuming);
467 void mana_remove(struct gdma_dev *gd, bool suspending);
468 
469 void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
470 int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
471 		  u32 flags);
472 u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
473 		 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
474 struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
475 void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
476 int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
477 void mana_query_gf_stats(struct mana_port_context *apc);
478 
479 extern const struct ethtool_ops mana_ethtool_ops;
480 
481 /* A CQ can be created not associated with any EQ */
482 #define GDMA_CQ_NO_EQ  0xffff
483 
484 struct mana_obj_spec {
485 	u32 queue_index;
486 	u64 gdma_region;
487 	u32 queue_size;
488 	u32 attached_eq;
489 	u32 modr_ctx_id;
490 };
491 
492 enum mana_command_code {
493 	MANA_QUERY_DEV_CONFIG	= 0x20001,
494 	MANA_QUERY_GF_STAT	= 0x20002,
495 	MANA_CONFIG_VPORT_TX	= 0x20003,
496 	MANA_CREATE_WQ_OBJ	= 0x20004,
497 	MANA_DESTROY_WQ_OBJ	= 0x20005,
498 	MANA_FENCE_RQ		= 0x20006,
499 	MANA_CONFIG_VPORT_RX	= 0x20007,
500 	MANA_QUERY_VPORT_CONFIG	= 0x20008,
501 
502 	/* Privileged commands for the PF mode */
503 	MANA_REGISTER_FILTER	= 0x28000,
504 	MANA_DEREGISTER_FILTER	= 0x28001,
505 	MANA_REGISTER_HW_PORT	= 0x28003,
506 	MANA_DEREGISTER_HW_PORT	= 0x28004,
507 };
508 
509 /* Query Device Configuration */
510 struct mana_query_device_cfg_req {
511 	struct gdma_req_hdr hdr;
512 
513 	/* MANA Nic Driver Capability flags */
514 	u64 mn_drv_cap_flags1;
515 	u64 mn_drv_cap_flags2;
516 	u64 mn_drv_cap_flags3;
517 	u64 mn_drv_cap_flags4;
518 
519 	u32 proto_major_ver;
520 	u32 proto_minor_ver;
521 	u32 proto_micro_ver;
522 
523 	u32 reserved;
524 }; /* HW DATA */
525 
526 struct mana_query_device_cfg_resp {
527 	struct gdma_resp_hdr hdr;
528 
529 	u64 pf_cap_flags1;
530 	u64 pf_cap_flags2;
531 	u64 pf_cap_flags3;
532 	u64 pf_cap_flags4;
533 
534 	u16 max_num_vports;
535 	u16 reserved;
536 	u32 max_num_eqs;
537 
538 	/* response v2: */
539 	u16 adapter_mtu;
540 	u16 reserved2;
541 	u32 reserved3;
542 }; /* HW DATA */
543 
544 /* Query vPort Configuration */
545 struct mana_query_vport_cfg_req {
546 	struct gdma_req_hdr hdr;
547 	u32 vport_index;
548 }; /* HW DATA */
549 
550 struct mana_query_vport_cfg_resp {
551 	struct gdma_resp_hdr hdr;
552 	u32 max_num_sq;
553 	u32 max_num_rq;
554 	u32 num_indirection_ent;
555 	u32 reserved1;
556 	u8 mac_addr[6];
557 	u8 reserved2[2];
558 	mana_handle_t vport;
559 }; /* HW DATA */
560 
561 /* Configure vPort */
562 struct mana_config_vport_req {
563 	struct gdma_req_hdr hdr;
564 	mana_handle_t vport;
565 	u32 pdid;
566 	u32 doorbell_pageid;
567 }; /* HW DATA */
568 
569 struct mana_config_vport_resp {
570 	struct gdma_resp_hdr hdr;
571 	u16 tx_vport_offset;
572 	u8 short_form_allowed;
573 	u8 reserved;
574 }; /* HW DATA */
575 
576 /* Create WQ Object */
577 struct mana_create_wqobj_req {
578 	struct gdma_req_hdr hdr;
579 	mana_handle_t vport;
580 	u32 wq_type;
581 	u32 reserved;
582 	u64 wq_gdma_region;
583 	u64 cq_gdma_region;
584 	u32 wq_size;
585 	u32 cq_size;
586 	u32 cq_moderation_ctx_id;
587 	u32 cq_parent_qid;
588 }; /* HW DATA */
589 
590 struct mana_create_wqobj_resp {
591 	struct gdma_resp_hdr hdr;
592 	u32 wq_id;
593 	u32 cq_id;
594 	mana_handle_t wq_obj;
595 }; /* HW DATA */
596 
597 /* Destroy WQ Object */
598 struct mana_destroy_wqobj_req {
599 	struct gdma_req_hdr hdr;
600 	u32 wq_type;
601 	u32 reserved;
602 	mana_handle_t wq_obj_handle;
603 }; /* HW DATA */
604 
605 struct mana_destroy_wqobj_resp {
606 	struct gdma_resp_hdr hdr;
607 }; /* HW DATA */
608 
609 /* Fence RQ */
610 struct mana_fence_rq_req {
611 	struct gdma_req_hdr hdr;
612 	mana_handle_t wq_obj_handle;
613 }; /* HW DATA */
614 
615 struct mana_fence_rq_resp {
616 	struct gdma_resp_hdr hdr;
617 }; /* HW DATA */
618 
619 /* Query stats RQ */
620 struct mana_query_gf_stat_req {
621 	struct gdma_req_hdr hdr;
622 	u64 req_stats;
623 }; /* HW DATA */
624 
625 struct mana_query_gf_stat_resp {
626 	struct gdma_resp_hdr hdr;
627 	u64 reported_stats;
628 	/* rx errors/discards */
629 	u64 rx_discards_nowqe;
630 	u64 rx_err_vport_disabled;
631 	/* rx bytes/packets */
632 	u64 hc_rx_bytes;
633 	u64 hc_rx_ucast_pkts;
634 	u64 hc_rx_ucast_bytes;
635 	u64 hc_rx_bcast_pkts;
636 	u64 hc_rx_bcast_bytes;
637 	u64 hc_rx_mcast_pkts;
638 	u64 hc_rx_mcast_bytes;
639 	/* tx errors */
640 	u64 tx_err_gf_disabled;
641 	u64 tx_err_vport_disabled;
642 	u64 tx_err_inval_vport_offset_pkt;
643 	u64 tx_err_vlan_enforcement;
644 	u64 tx_err_ethtype_enforcement;
645 	u64 tx_err_SA_enforcement;
646 	u64 tx_err_SQPDID_enforcement;
647 	u64 tx_err_CQPDID_enforcement;
648 	u64 tx_err_mtu_violation;
649 	u64 tx_err_inval_oob;
650 	/* tx bytes/packets */
651 	u64 hc_tx_bytes;
652 	u64 hc_tx_ucast_pkts;
653 	u64 hc_tx_ucast_bytes;
654 	u64 hc_tx_bcast_pkts;
655 	u64 hc_tx_bcast_bytes;
656 	u64 hc_tx_mcast_pkts;
657 	u64 hc_tx_mcast_bytes;
658 	/* tx error */
659 	u64 tx_err_gdma;
660 }; /* HW DATA */
661 
662 /* Configure vPort Rx Steering */
663 struct mana_cfg_rx_steer_req_v2 {
664 	struct gdma_req_hdr hdr;
665 	mana_handle_t vport;
666 	u16 num_indir_entries;
667 	u16 indir_tab_offset;
668 	u32 rx_enable;
669 	u32 rss_enable;
670 	u8 update_default_rxobj;
671 	u8 update_hashkey;
672 	u8 update_indir_tab;
673 	u8 reserved;
674 	mana_handle_t default_rxobj;
675 	u8 hashkey[MANA_HASH_KEY_SIZE];
676 	u8 cqe_coalescing_enable;
677 	u8 reserved2[7];
678 	mana_handle_t indir_tab[] __counted_by(num_indir_entries);
679 }; /* HW DATA */
680 
681 struct mana_cfg_rx_steer_resp {
682 	struct gdma_resp_hdr hdr;
683 }; /* HW DATA */
684 
685 /* Register HW vPort */
686 struct mana_register_hw_vport_req {
687 	struct gdma_req_hdr hdr;
688 	u16 attached_gfid;
689 	u8 is_pf_default_vport;
690 	u8 reserved1;
691 	u8 allow_all_ether_types;
692 	u8 reserved2;
693 	u8 reserved3;
694 	u8 reserved4;
695 }; /* HW DATA */
696 
697 struct mana_register_hw_vport_resp {
698 	struct gdma_resp_hdr hdr;
699 	mana_handle_t hw_vport_handle;
700 }; /* HW DATA */
701 
702 /* Deregister HW vPort */
703 struct mana_deregister_hw_vport_req {
704 	struct gdma_req_hdr hdr;
705 	mana_handle_t hw_vport_handle;
706 }; /* HW DATA */
707 
708 struct mana_deregister_hw_vport_resp {
709 	struct gdma_resp_hdr hdr;
710 }; /* HW DATA */
711 
712 /* Register filter */
713 struct mana_register_filter_req {
714 	struct gdma_req_hdr hdr;
715 	mana_handle_t vport;
716 	u8 mac_addr[6];
717 	u8 reserved1;
718 	u8 reserved2;
719 	u8 reserved3;
720 	u8 reserved4;
721 	u16 reserved5;
722 	u32 reserved6;
723 	u32 reserved7;
724 	u32 reserved8;
725 }; /* HW DATA */
726 
727 struct mana_register_filter_resp {
728 	struct gdma_resp_hdr hdr;
729 	mana_handle_t filter_handle;
730 }; /* HW DATA */
731 
732 /* Deregister filter */
733 struct mana_deregister_filter_req {
734 	struct gdma_req_hdr hdr;
735 	mana_handle_t filter_handle;
736 }; /* HW DATA */
737 
738 struct mana_deregister_filter_resp {
739 	struct gdma_resp_hdr hdr;
740 }; /* HW DATA */
741 
742 /* Requested GF stats Flags */
743 /* Rx discards/Errors */
744 #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE		0x0000000000000001
745 #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED	0x0000000000000002
746 /* Rx bytes/pkts */
747 #define STATISTICS_FLAGS_HC_RX_BYTES			0x0000000000000004
748 #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS		0x0000000000000008
749 #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES		0x0000000000000010
750 #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS		0x0000000000000020
751 #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES		0x0000000000000040
752 #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS		0x0000000000000080
753 #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES		0x0000000000000100
754 /* Tx errors */
755 #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED		0x0000000000000200
756 #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED	0x0000000000000400
757 #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS		\
758 							0x0000000000000800
759 #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT	0x0000000000001000
760 #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT			\
761 							0x0000000000002000
762 #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT	0x0000000000004000
763 #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT	0x0000000000008000
764 #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT	0x0000000000010000
765 #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION	0x0000000000020000
766 #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB		0x0000000000040000
767 /* Tx bytes/pkts */
768 #define STATISTICS_FLAGS_HC_TX_BYTES			0x0000000000080000
769 #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS		0x0000000000100000
770 #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES		0x0000000000200000
771 #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS		0x0000000000400000
772 #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES		0x0000000000800000
773 #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS		0x0000000001000000
774 #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES		0x0000000002000000
775 /* Tx error */
776 #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR		0x0000000004000000
777 
778 #define MANA_MAX_NUM_QUEUES 64
779 
780 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
781 
782 struct mana_tx_package {
783 	struct gdma_wqe_request wqe_req;
784 	struct gdma_sge sgl_array[5];
785 	struct gdma_sge *sgl_ptr;
786 
787 	struct mana_tx_oob tx_oob;
788 
789 	struct gdma_posted_wqe_info wqe_info;
790 };
791 
792 int mana_create_wq_obj(struct mana_port_context *apc,
793 		       mana_handle_t vport,
794 		       u32 wq_type, struct mana_obj_spec *wq_spec,
795 		       struct mana_obj_spec *cq_spec,
796 		       mana_handle_t *wq_obj);
797 
798 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
799 			 mana_handle_t wq_obj);
800 
801 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
802 		   u32 doorbell_pg_id);
803 void mana_uncfg_vport(struct mana_port_context *apc);
804 
805 struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index);
806 #endif /* _MANA_H */
807