xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_hsi_common.h (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 #ifndef __ECORE_HSI_COMMON__
30 #define __ECORE_HSI_COMMON__
31 /********************************/
32 /* Add include to common target */
33 /********************************/
34 #include "common_hsi.h"
35 
36 /*
37  * opcodes for the event ring
38  */
39 enum common_event_opcode
40 {
41 	COMMON_EVENT_PF_START,
42 	COMMON_EVENT_PF_STOP,
43 	COMMON_EVENT_VF_START,
44 	COMMON_EVENT_VF_STOP,
45 	COMMON_EVENT_VF_PF_CHANNEL,
46 	COMMON_EVENT_VF_FLR,
47 	COMMON_EVENT_PF_UPDATE,
48 	COMMON_EVENT_MALICIOUS_VF,
49 	COMMON_EVENT_RL_UPDATE,
50 	COMMON_EVENT_EMPTY,
51 	MAX_COMMON_EVENT_OPCODE
52 };
53 
54 /*
55  * Common Ramrod Command IDs
56  */
57 enum common_ramrod_cmd_id
58 {
59 	COMMON_RAMROD_UNUSED,
60 	COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
61 	COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
62 	COMMON_RAMROD_VF_START /* VF Function Start */,
63 	COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
64 	COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
65 	COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
66 	COMMON_RAMROD_EMPTY /* Empty Ramrod */,
67 	MAX_COMMON_RAMROD_CMD_ID
68 };
69 
70 /*
71  * How ll2 should deal with packet upon errors
72  */
73 enum core_error_handle
74 {
75 	LL2_DROP_PACKET /* If error occurs drop packet */,
76 	LL2_DO_NOTHING /* If error occurs do nothing */,
77 	LL2_ASSERT /* If error occurs assert */,
78 	MAX_CORE_ERROR_HANDLE
79 };
80 
81 /*
82  * opcodes for the event ring
83  */
84 enum core_event_opcode
85 {
86 	CORE_EVENT_TX_QUEUE_START,
87 	CORE_EVENT_TX_QUEUE_STOP,
88 	CORE_EVENT_RX_QUEUE_START,
89 	CORE_EVENT_RX_QUEUE_STOP,
90 	CORE_EVENT_RX_QUEUE_FLUSH,
91 	CORE_EVENT_TX_QUEUE_UPDATE,
92 	MAX_CORE_EVENT_OPCODE
93 };
94 
95 /*
96  * The L4 pseudo checksum mode for Core
97  */
98 enum core_l4_pseudo_checksum_mode
99 {
100 	CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH /* Pseudo Checksum on packet is calculated with the correct packet length. */,
101 	CORE_L4_PSEUDO_CSUM_ZERO_LENGTH /* Pseudo Checksum on packet is calculated with zero length. */,
102 	MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
103 };
104 
105 /*
106  * Light-L2 RX Producers in Tstorm RAM
107  */
108 struct core_ll2_port_stats
109 {
110 	struct regpair gsi_invalid_hdr;
111 	struct regpair gsi_invalid_pkt_length;
112 	struct regpair gsi_unsupported_pkt_typ;
113 	struct regpair gsi_crcchksm_error;
114 };
115 
116 /*
117  * Ethernet TX Per Queue Stats
118  */
119 struct core_ll2_pstorm_per_queue_stat
120 {
121 	struct regpair sent_ucast_bytes /* number of total bytes sent without errors */;
122 	struct regpair sent_mcast_bytes /* number of total bytes sent without errors */;
123 	struct regpair sent_bcast_bytes /* number of total bytes sent without errors */;
124 	struct regpair sent_ucast_pkts /* number of total packets sent without errors */;
125 	struct regpair sent_mcast_pkts /* number of total packets sent without errors */;
126 	struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
127 };
128 
129 /*
130  * Light-L2 RX Producers in Tstorm RAM
131  */
132 struct core_ll2_rx_prod
133 {
134 	__le16 bd_prod /* BD Producer */;
135 	__le16 cqe_prod /* CQE Producer */;
136 	__le32 reserved;
137 };
138 
139 struct core_ll2_tstorm_per_queue_stat
140 {
141 	struct regpair packet_too_big_discard /* Number of packets discarded because they are bigger than MTU */;
142 	struct regpair no_buff_discard /* Number of packets discarded due to lack of host buffers */;
143 };
144 
145 struct core_ll2_ustorm_per_queue_stat
146 {
147 	struct regpair rcv_ucast_bytes;
148 	struct regpair rcv_mcast_bytes;
149 	struct regpair rcv_bcast_bytes;
150 	struct regpair rcv_ucast_pkts;
151 	struct regpair rcv_mcast_pkts;
152 	struct regpair rcv_bcast_pkts;
153 };
154 
155 /*
156  * Core Ramrod Command IDs (light L2)
157  */
158 enum core_ramrod_cmd_id
159 {
160 	CORE_RAMROD_UNUSED,
161 	CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
162 	CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
163 	CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
164 	CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
165 	CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
166 	CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
167 	MAX_CORE_RAMROD_CMD_ID
168 };
169 
170 /*
171  * Core RX CQE Type for Light L2
172  */
173 enum core_roce_flavor_type
174 {
175 	CORE_ROCE,
176 	CORE_RROCE,
177 	MAX_CORE_ROCE_FLAVOR_TYPE
178 };
179 
180 /*
181  * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
182  */
183 struct core_rx_action_on_error
184 {
185 	u8 error_type;
186 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK  0x3 /* ll2 how to handle error packet_too_big (use enum core_error_handle) */
187 #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
188 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK         0x3 /* ll2 how to handle error with no_buff  (use enum core_error_handle) */
189 #define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT        2
190 #define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK        0xF
191 #define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT       4
192 };
193 
194 /*
195  * Core RX BD for Light L2
196  */
197 struct core_rx_bd
198 {
199 	struct regpair addr;
200 	__le16 reserved[4];
201 };
202 
203 /*
204  * Core RX CM offload BD for Light L2
205  */
206 struct core_rx_bd_with_buff_len
207 {
208 	struct regpair addr;
209 	__le16 buff_length;
210 	__le16 reserved[3];
211 };
212 
213 /*
214  * Core RX CM offload BD for Light L2
215  */
216 union core_rx_bd_union
217 {
218 	struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
219 	struct core_rx_bd_with_buff_len rx_bd_with_len /* Core Rx Bd with dynamic buffer length */;
220 };
221 
222 /*
223  * Opaque Data for Light L2 RX CQE .
224  */
225 struct core_rx_cqe_opaque_data
226 {
227 	__le32 data[2] /* Opaque CQE Data */;
228 };
229 
230 /*
231  * Core RX CQE Type for Light L2
232  */
233 enum core_rx_cqe_type
234 {
235 	CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */,
236 	CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */,
237 	CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */,
238 	CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */,
239 	MAX_CORE_RX_CQE_TYPE
240 };
241 
242 /*
243  * Core RX CQE for Light L2 .
244  */
245 struct core_rx_fast_path_cqe
246 {
247 	u8 type /* CQE type (use enum core_rx_cqe_type) */;
248 	u8 placement_offset /* Offset (in bytes) of the packet from start of the buffer */;
249 	struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
250 	__le16 packet_length /* Total packet length (from the parser) */;
251 	__le16 vlan /* 802.1q VLAN tag */;
252 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
253 	struct parsing_err_flags err_flags /* bit- map: each bit represents a specific error. errors indications are provided by the cracker. see spec for detailed description */;
254 	__le16 reserved0;
255 	__le32 reserved1[3];
256 };
257 
258 /*
259  * Core Rx CM offload CQE .
260  */
261 struct core_rx_gsi_offload_cqe
262 {
263 	u8 type /* CQE type (use enum core_rx_cqe_type) */;
264 	u8 data_length_error /* set if gsi data is bigger than buff */;
265 	struct parsing_and_err_flags parse_flags /* Parsing and error flags from the parser */;
266 	__le16 data_length /* Total packet length (from the parser) */;
267 	__le16 vlan /* 802.1q VLAN tag */;
268 	__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
269 	__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
270 	__le16 qp_id /* These are the lower 16 bit of QP id in RoCE BTH header */;
271 	__le32 src_qp /* Source QP from DETH header */;
272 	__le32 reserved[3];
273 };
274 
275 /*
276  * Core RX CQE for Light L2 .
277  */
278 struct core_rx_slow_path_cqe
279 {
280 	u8 type /* CQE type (use enum core_rx_cqe_type) */;
281 	u8 ramrod_cmd_id;
282 	__le16 echo;
283 	struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
284 	__le32 reserved1[5];
285 };
286 
287 /*
288  * Core RX CM offload BD for Light L2
289  */
290 union core_rx_cqe_union
291 {
292 	struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
293 	struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
294 	struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
295 };
296 
297 /*
298  * Ramrod data for rx queue start ramrod
299  */
300 struct core_rx_start_ramrod_data
301 {
302 	struct regpair bd_base /* bd address of the first bd page */;
303 	struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
304 	__le16 mtu /* Maximum transmission unit */;
305 	__le16 sb_id /* Status block ID */;
306 	u8 sb_index /* index of the protocol index */;
307 	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
308 	u8 complete_event_flg /* post completion to the event ring if set */;
309 	u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
310 	__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
311 	u8 inner_vlan_stripping_en /* if set, 802.1q tags will be removed and copied to CQE */;
312 	u8 report_outer_vlan /* if set and inner vlan does not exist, the outer vlan will copied to CQE as inner vlan. should be used in MF_OVLAN mode only. */;
313 	u8 queue_id /* Light L2 RX Queue ID */;
314 	u8 main_func_queue /* Is this the main queue for the PF */;
315 	u8 mf_si_bcast_accept_all /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */;
316 	u8 mf_si_mcast_accept_all /* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if main_func_queue is set. */;
317 	struct core_rx_action_on_error action_on_error /* Specifies how ll2 should deal with packets errors: packet_too_big and no_buff */;
318 	u8 gsi_offload_flag /* set when in GSI offload mode on ROCE connection */;
319 	u8 reserved[6];
320 };
321 
322 /*
323  * Ramrod data for rx queue stop ramrod
324  */
325 struct core_rx_stop_ramrod_data
326 {
327 	u8 complete_cqe_flg /* post completion to the CQE ring if set */;
328 	u8 complete_event_flg /* post completion to the event ring if set */;
329 	u8 queue_id /* Light L2 RX Queue ID */;
330 	u8 reserved1;
331 	__le16 reserved2[2];
332 };
333 
334 /*
335  * Flags for Core TX BD
336  */
337 struct core_tx_bd_data
338 {
339 	__le16 as_bitfield;
340 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK         0x1 /* Do not allow additional VLAN manipulations on this packet (DCB) */
341 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT        0
342 #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK          0x1 /* Insert VLAN into packet. Cannot be set for LB packets (tx_dst == CORE_TX_DEST_LB) */
343 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT         1
344 #define CORE_TX_BD_DATA_START_BD_MASK                0x1 /* This is the first BD of the packet (for debug) */
345 #define CORE_TX_BD_DATA_START_BD_SHIFT               2
346 #define CORE_TX_BD_DATA_IP_CSUM_MASK                 0x1 /* Calculate the IP checksum for the packet */
347 #define CORE_TX_BD_DATA_IP_CSUM_SHIFT                3
348 #define CORE_TX_BD_DATA_L4_CSUM_MASK                 0x1 /* Calculate the L4 checksum for the packet */
349 #define CORE_TX_BD_DATA_L4_CSUM_SHIFT                4
350 #define CORE_TX_BD_DATA_IPV6_EXT_MASK                0x1 /* Packet is IPv6 with extensions */
351 #define CORE_TX_BD_DATA_IPV6_EXT_SHIFT               5
352 #define CORE_TX_BD_DATA_L4_PROTOCOL_MASK             0x1 /* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol: 0-TCP, 1-UDP */
353 #define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT            6
354 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK     0x1 /* The pseudo checksum mode to place in the L4 checksum field. Required only when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode) */
355 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT    7
356 #define CORE_TX_BD_DATA_NBDS_MASK                    0xF /* Number of BDs that make up one packet - width wide enough to present CORE_LL2_TX_MAX_BDS_PER_PACKET */
357 #define CORE_TX_BD_DATA_NBDS_SHIFT                   8
358 #define CORE_TX_BD_DATA_ROCE_FLAV_MASK               0x1 /* Use roce_flavor enum - Differentiate between Roce flavors is valid when connType is ROCE (use enum core_roce_flavor_type) */
359 #define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT              12
360 #define CORE_TX_BD_DATA_IP_LEN_MASK                  0x1 /* Calculate ip length */
361 #define CORE_TX_BD_DATA_IP_LEN_SHIFT                 13
362 #define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK  0x1 /* disables the STAG insertion, relevant only in MF OVLAN mode. */
363 #define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
364 #define CORE_TX_BD_DATA_RESERVED0_MASK               0x1
365 #define CORE_TX_BD_DATA_RESERVED0_SHIFT              15
366 };
367 
368 /*
369  * Core TX BD for Light L2
370  */
371 struct core_tx_bd
372 {
373 	struct regpair addr /* Buffer Address */;
374 	__le16 nbytes /* Number of Bytes in Buffer */;
375 	__le16 nw_vlan_or_lb_echo /* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack packets: echo data to pass to Rx */;
376 	struct core_tx_bd_data bd_data /* BD flags */;
377 	__le16 bitfield1;
378 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK  0x3FFF /* L4 Header Offset from start of packet (in Words). This is needed if both l4_csum and ipv6_ext are set */
379 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
380 #define CORE_TX_BD_TX_DST_MASK           0x3 /* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
381 #define CORE_TX_BD_TX_DST_SHIFT          14
382 };
383 
384 /*
385  * Light L2 TX Destination
386  */
387 enum core_tx_dest
388 {
389 	CORE_TX_DEST_NW /* TX Destination to the Network */,
390 	CORE_TX_DEST_LB /* TX Destination to the Loopback */,
391 	CORE_TX_DEST_RESERVED,
392 	CORE_TX_DEST_DROP /* TX Drop */,
393 	MAX_CORE_TX_DEST
394 };
395 
396 /*
397  * Ramrod data for tx queue start ramrod
398  */
399 struct core_tx_start_ramrod_data
400 {
401 	struct regpair pbl_base_addr /* Address of the pbl page */;
402 	__le16 mtu /* Maximum transmission unit */;
403 	__le16 sb_id /* Status block ID */;
404 	u8 sb_index /* Status block protocol index */;
405 	u8 stats_en /* Statistics Enable */;
406 	u8 stats_id /* Statistics Counter ID */;
407 	u8 conn_type /* connection type that loaded ll2 (use enum protocol_type) */;
408 	__le16 pbl_size /* Number of BD pages pointed by PBL */;
409 	__le16 qm_pq_id /* QM PQ ID */;
410 	u8 gsi_offload_flag /* set when in GSI offload mode on ROCE connection */;
411 	u8 resrved[3];
412 };
413 
414 /*
415  * Ramrod data for tx queue stop ramrod
416  */
417 struct core_tx_stop_ramrod_data
418 {
419 	__le32 reserved0[2];
420 };
421 
422 /*
423  * Ramrod data for tx queue update ramrod
424  */
425 struct core_tx_update_ramrod_data
426 {
427 	u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
428 	u8 reserved0;
429 	__le16 qm_pq_id /* Updated QM PQ ID */;
430 	__le32 reserved1[1];
431 };
432 
433 /*
434  * Enum flag for what type of DCB data to update
435  */
436 enum dcb_dscp_update_mode
437 {
438 	DONT_UPDATE_DCB_DSCP /* use when no change should be done to DCB data */,
439 	UPDATE_DCB /* use to update only L2 (vlan) priority */,
440 	UPDATE_DSCP /* use to update only IP DSCP */,
441 	UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
442 	MAX_DCB_DSCP_UPDATE_MODE
443 };
444 
445 /*
446  * The core storm context for the Ystorm
447  */
448 struct ystorm_core_conn_st_ctx
449 {
450 	__le32 reserved[4];
451 };
452 
453 /*
454  * The core storm context for the Pstorm
455  */
456 struct pstorm_core_conn_st_ctx
457 {
458 	__le32 reserved[4];
459 };
460 
461 /*
462  * Core Slowpath Connection storm context of Xstorm
463  */
464 struct xstorm_core_conn_st_ctx
465 {
466 	__le32 spq_base_lo /* SPQ Ring Base Address low dword */;
467 	__le32 spq_base_hi /* SPQ Ring Base Address high dword */;
468 	struct regpair consolid_base_addr /* Consolidation Ring Base Address */;
469 	__le16 spq_cons /* SPQ Ring Consumer */;
470 	__le16 consolid_cons /* Consolidation Ring Consumer */;
471 	__le32 reserved0[55] /* Pad to 15 cycles */;
472 };
473 
474 struct e4_xstorm_core_conn_ag_ctx
475 {
476 	u8 reserved0 /* cdu_validation */;
477 	u8 state /* state */;
478 	u8 flags0;
479 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
480 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
481 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
482 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
483 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
484 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
485 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
486 #define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
487 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
488 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
489 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1 /* cf_array_active */
490 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
491 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
492 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
493 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
494 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
495 	u8 flags1;
496 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
497 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
498 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
499 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
500 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
501 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
502 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
503 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
504 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
505 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
506 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
507 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
508 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
509 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
510 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
511 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
512 	u8 flags2;
513 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
514 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
515 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
516 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
517 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
518 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
519 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3 /* timer_stop_all */
520 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
521 	u8 flags3;
522 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
523 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
524 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
525 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
526 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
527 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
528 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
529 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
530 	u8 flags4;
531 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
532 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
533 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
534 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
535 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
536 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
537 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
538 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
539 	u8 flags5;
540 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
541 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
542 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
543 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
544 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
545 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
546 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
547 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
548 	u8 flags6;
549 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
550 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
551 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
552 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
553 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
554 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
555 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
556 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
557 	u8 flags7;
558 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
559 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
560 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
561 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
562 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
563 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
564 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
565 #define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
566 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
567 #define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
568 	u8 flags8;
569 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
570 #define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
571 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
572 #define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
573 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
574 #define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
575 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
576 #define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
577 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
578 #define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
579 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
580 #define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
581 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
582 #define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
583 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
584 #define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
585 	u8 flags9;
586 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
587 #define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
588 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
589 #define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
590 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
591 #define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
592 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
593 #define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
594 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
595 #define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
596 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
597 #define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
598 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
599 #define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
600 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1 /* cf_array_cf_en */
601 #define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
602 	u8 flags10;
603 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
604 #define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
605 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
606 #define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
607 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
608 #define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
609 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
610 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
611 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
612 #define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
613 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
614 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
615 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
616 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
617 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
618 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
619 	u8 flags11;
620 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
621 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
622 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
623 #define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
624 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
625 #define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
626 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
627 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
628 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
629 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
630 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
631 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
632 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
633 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
634 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
635 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
636 	u8 flags12;
637 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
638 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
639 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
640 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
641 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
642 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
643 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
644 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
645 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
646 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
647 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
648 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
649 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
650 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
651 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
652 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
653 	u8 flags13;
654 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
655 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
656 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
657 #define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
658 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
659 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
660 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
661 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
662 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
663 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
664 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
665 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
666 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
667 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
668 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
669 #define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
670 	u8 flags14;
671 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
672 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
673 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
674 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
675 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
676 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
677 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
678 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
679 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
680 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
681 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
682 #define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
683 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
684 #define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
685 	u8 byte2 /* byte2 */;
686 	__le16 physical_q0 /* physical_q0 */;
687 	__le16 consolid_prod /* physical_q1 */;
688 	__le16 reserved16 /* physical_q2 */;
689 	__le16 tx_bd_cons /* word3 */;
690 	__le16 tx_bd_or_spq_prod /* word4 */;
691 	__le16 word5 /* word5 */;
692 	__le16 conn_dpi /* conn_dpi */;
693 	u8 byte3 /* byte3 */;
694 	u8 byte4 /* byte4 */;
695 	u8 byte5 /* byte5 */;
696 	u8 byte6 /* byte6 */;
697 	__le32 reg0 /* reg0 */;
698 	__le32 reg1 /* reg1 */;
699 	__le32 reg2 /* reg2 */;
700 	__le32 reg3 /* reg3 */;
701 	__le32 reg4 /* reg4 */;
702 	__le32 reg5 /* cf_array0 */;
703 	__le32 reg6 /* cf_array1 */;
704 	__le16 word7 /* word7 */;
705 	__le16 word8 /* word8 */;
706 	__le16 word9 /* word9 */;
707 	__le16 word10 /* word10 */;
708 	__le32 reg7 /* reg7 */;
709 	__le32 reg8 /* reg8 */;
710 	__le32 reg9 /* reg9 */;
711 	u8 byte7 /* byte7 */;
712 	u8 byte8 /* byte8 */;
713 	u8 byte9 /* byte9 */;
714 	u8 byte10 /* byte10 */;
715 	u8 byte11 /* byte11 */;
716 	u8 byte12 /* byte12 */;
717 	u8 byte13 /* byte13 */;
718 	u8 byte14 /* byte14 */;
719 	u8 byte15 /* byte15 */;
720 	u8 e5_reserved /* e5_reserved */;
721 	__le16 word11 /* word11 */;
722 	__le32 reg10 /* reg10 */;
723 	__le32 reg11 /* reg11 */;
724 	__le32 reg12 /* reg12 */;
725 	__le32 reg13 /* reg13 */;
726 	__le32 reg14 /* reg14 */;
727 	__le32 reg15 /* reg15 */;
728 	__le32 reg16 /* reg16 */;
729 	__le32 reg17 /* reg17 */;
730 	__le32 reg18 /* reg18 */;
731 	__le32 reg19 /* reg19 */;
732 	__le16 word12 /* word12 */;
733 	__le16 word13 /* word13 */;
734 	__le16 word14 /* word14 */;
735 	__le16 word15 /* word15 */;
736 };
737 
738 struct e4_tstorm_core_conn_ag_ctx
739 {
740 	u8 byte0 /* cdu_validation */;
741 	u8 byte1 /* state */;
742 	u8 flags0;
743 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
744 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
745 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
746 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
747 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1 /* bit2 */
748 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
749 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1 /* bit3 */
750 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
751 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1 /* bit4 */
752 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
753 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1 /* bit5 */
754 #define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
755 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
756 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
757 	u8 flags1;
758 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
759 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
760 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
761 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
762 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
763 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
764 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
765 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
766 	u8 flags2;
767 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
768 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
769 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
770 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
771 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3 /* cf7 */
772 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
773 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3 /* cf8 */
774 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
775 	u8 flags3;
776 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3 /* cf9 */
777 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
778 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3 /* cf10 */
779 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
780 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
781 #define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
782 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
783 #define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
784 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
785 #define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
786 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
787 #define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
788 	u8 flags4;
789 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
790 #define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
791 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
792 #define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
793 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
794 #define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
795 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1 /* cf7en */
796 #define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
797 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1 /* cf8en */
798 #define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
799 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1 /* cf9en */
800 #define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
801 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1 /* cf10en */
802 #define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
803 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
804 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
805 	u8 flags5;
806 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
807 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
808 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
809 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
810 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
811 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
812 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
813 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
814 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
815 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
816 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
817 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
818 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
819 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
820 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
821 #define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
822 	__le32 reg0 /* reg0 */;
823 	__le32 reg1 /* reg1 */;
824 	__le32 reg2 /* reg2 */;
825 	__le32 reg3 /* reg3 */;
826 	__le32 reg4 /* reg4 */;
827 	__le32 reg5 /* reg5 */;
828 	__le32 reg6 /* reg6 */;
829 	__le32 reg7 /* reg7 */;
830 	__le32 reg8 /* reg8 */;
831 	u8 byte2 /* byte2 */;
832 	u8 byte3 /* byte3 */;
833 	__le16 word0 /* word0 */;
834 	u8 byte4 /* byte4 */;
835 	u8 byte5 /* byte5 */;
836 	__le16 word1 /* word1 */;
837 	__le16 word2 /* conn_dpi */;
838 	__le16 word3 /* word3 */;
839 	__le32 reg9 /* reg9 */;
840 	__le32 reg10 /* reg10 */;
841 };
842 
843 struct e4_ustorm_core_conn_ag_ctx
844 {
845 	u8 reserved /* cdu_validation */;
846 	u8 byte1 /* state */;
847 	u8 flags0;
848 #define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
849 #define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
850 #define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
851 #define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
852 #define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* timer0cf */
853 #define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
854 #define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* timer1cf */
855 #define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
856 #define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* timer2cf */
857 #define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
858 	u8 flags1;
859 #define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3 /* timer_stop_all */
860 #define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
861 #define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3 /* cf4 */
862 #define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
863 #define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3 /* cf5 */
864 #define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
865 #define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3 /* cf6 */
866 #define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
867 	u8 flags2;
868 #define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
869 #define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
870 #define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
871 #define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
872 #define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
873 #define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
874 #define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1 /* cf3en */
875 #define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
876 #define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1 /* cf4en */
877 #define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
878 #define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1 /* cf5en */
879 #define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
880 #define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1 /* cf6en */
881 #define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
882 #define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
883 #define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
884 	u8 flags3;
885 #define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
886 #define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
887 #define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
888 #define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
889 #define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
890 #define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
891 #define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
892 #define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
893 #define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1 /* rule5en */
894 #define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
895 #define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1 /* rule6en */
896 #define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
897 #define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1 /* rule7en */
898 #define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
899 #define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1 /* rule8en */
900 #define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
901 	u8 byte2 /* byte2 */;
902 	u8 byte3 /* byte3 */;
903 	__le16 word0 /* conn_dpi */;
904 	__le16 word1 /* word1 */;
905 	__le32 rx_producers /* reg0 */;
906 	__le32 reg1 /* reg1 */;
907 	__le32 reg2 /* reg2 */;
908 	__le32 reg3 /* reg3 */;
909 	__le16 word2 /* word2 */;
910 	__le16 word3 /* word3 */;
911 };
912 
913 /*
914  * The core storm context for the Mstorm
915  */
916 struct mstorm_core_conn_st_ctx
917 {
918 	__le32 reserved[24];
919 };
920 
921 /*
922  * The core storm context for the Ustorm
923  */
924 struct ustorm_core_conn_st_ctx
925 {
926 	__le32 reserved[4];
927 };
928 
929 /*
930  * core connection context
931  */
932 struct e4_core_conn_context
933 {
934 	struct ystorm_core_conn_st_ctx ystorm_st_context /* ystorm storm context */;
935 	struct regpair ystorm_st_padding[2] /* padding */;
936 	struct pstorm_core_conn_st_ctx pstorm_st_context /* pstorm storm context */;
937 	struct regpair pstorm_st_padding[2] /* padding */;
938 	struct xstorm_core_conn_st_ctx xstorm_st_context /* xstorm storm context */;
939 	struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
940 	struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
941 	struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
942 	struct mstorm_core_conn_st_ctx mstorm_st_context /* mstorm storm context */;
943 	struct ustorm_core_conn_st_ctx ustorm_st_context /* ustorm storm context */;
944 	struct regpair ustorm_st_padding[2] /* padding */;
945 };
946 
947 struct e5_xstorm_core_conn_ag_ctx
948 {
949 	u8 reserved0 /* cdu_validation */;
950 	u8 state_and_core_id /* state_and_core_id */;
951 	u8 flags0;
952 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1 /* exist_in_qm0 */
953 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
954 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1 /* exist_in_qm1 */
955 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
956 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1 /* exist_in_qm2 */
957 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
958 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1 /* exist_in_qm3 */
959 #define E5_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
960 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1 /* bit4 */
961 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
962 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1 /* cf_array_active */
963 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
964 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1 /* bit6 */
965 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
966 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1 /* bit7 */
967 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
968 	u8 flags1;
969 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1 /* bit8 */
970 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
971 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1 /* bit9 */
972 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
973 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1 /* bit10 */
974 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
975 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1 /* bit11 */
976 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
977 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1 /* bit12 */
978 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
979 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1 /* bit13 */
980 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
981 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1 /* bit14 */
982 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
983 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1 /* bit15 */
984 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
985 	u8 flags2;
986 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3 /* timer0cf */
987 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
988 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3 /* timer1cf */
989 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
990 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3 /* timer2cf */
991 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
992 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3 /* timer_stop_all */
993 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
994 	u8 flags3;
995 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3 /* cf4 */
996 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
997 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3 /* cf5 */
998 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
999 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3 /* cf6 */
1000 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
1001 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3 /* cf7 */
1002 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
1003 	u8 flags4;
1004 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3 /* cf8 */
1005 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
1006 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3 /* cf9 */
1007 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
1008 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3 /* cf10 */
1009 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
1010 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3 /* cf11 */
1011 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
1012 	u8 flags5;
1013 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3 /* cf12 */
1014 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
1015 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3 /* cf13 */
1016 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
1017 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3 /* cf14 */
1018 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
1019 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3 /* cf15 */
1020 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
1021 	u8 flags6;
1022 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3 /* cf16 */
1023 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
1024 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3 /* cf_array_cf */
1025 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
1026 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3 /* cf18 */
1027 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
1028 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3 /* cf19 */
1029 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
1030 	u8 flags7;
1031 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3 /* cf20 */
1032 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
1033 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3 /* cf21 */
1034 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
1035 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3 /* cf22 */
1036 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
1037 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1 /* cf0en */
1038 #define E5_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
1039 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1 /* cf1en */
1040 #define E5_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
1041 	u8 flags8;
1042 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1 /* cf2en */
1043 #define E5_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
1044 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1 /* cf3en */
1045 #define E5_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
1046 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1 /* cf4en */
1047 #define E5_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
1048 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1 /* cf5en */
1049 #define E5_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
1050 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1 /* cf6en */
1051 #define E5_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
1052 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1 /* cf7en */
1053 #define E5_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
1054 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1 /* cf8en */
1055 #define E5_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
1056 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1 /* cf9en */
1057 #define E5_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
1058 	u8 flags9;
1059 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1 /* cf10en */
1060 #define E5_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
1061 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1 /* cf11en */
1062 #define E5_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
1063 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1 /* cf12en */
1064 #define E5_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
1065 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1 /* cf13en */
1066 #define E5_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
1067 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1 /* cf14en */
1068 #define E5_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
1069 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1 /* cf15en */
1070 #define E5_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
1071 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1 /* cf16en */
1072 #define E5_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
1073 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1 /* cf_array_cf_en */
1074 #define E5_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
1075 	u8 flags10;
1076 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1 /* cf18en */
1077 #define E5_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
1078 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1 /* cf19en */
1079 #define E5_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
1080 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1 /* cf20en */
1081 #define E5_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
1082 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1 /* cf21en */
1083 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
1084 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1 /* cf22en */
1085 #define E5_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
1086 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1 /* cf23en */
1087 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
1088 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1 /* rule0en */
1089 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
1090 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1 /* rule1en */
1091 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
1092 	u8 flags11;
1093 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1 /* rule2en */
1094 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
1095 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1 /* rule3en */
1096 #define E5_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
1097 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1 /* rule4en */
1098 #define E5_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
1099 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1 /* rule5en */
1100 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
1101 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1 /* rule6en */
1102 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
1103 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1 /* rule7en */
1104 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
1105 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1 /* rule8en */
1106 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
1107 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1 /* rule9en */
1108 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
1109 	u8 flags12;
1110 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1 /* rule10en */
1111 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
1112 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1 /* rule11en */
1113 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
1114 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1 /* rule12en */
1115 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
1116 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1 /* rule13en */
1117 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
1118 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1 /* rule14en */
1119 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
1120 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1 /* rule15en */
1121 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
1122 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1 /* rule16en */
1123 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
1124 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1 /* rule17en */
1125 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
1126 	u8 flags13;
1127 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1 /* rule18en */
1128 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
1129 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1 /* rule19en */
1130 #define E5_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
1131 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1 /* rule20en */
1132 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
1133 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1 /* rule21en */
1134 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
1135 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1 /* rule22en */
1136 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
1137 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1 /* rule23en */
1138 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
1139 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1 /* rule24en */
1140 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
1141 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1 /* rule25en */
1142 #define E5_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
1143 	u8 flags14;
1144 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1 /* bit16 */
1145 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
1146 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1 /* bit17 */
1147 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
1148 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1 /* bit18 */
1149 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
1150 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1 /* bit19 */
1151 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
1152 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1 /* bit20 */
1153 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
1154 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1 /* bit21 */
1155 #define E5_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
1156 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3 /* cf23 */
1157 #define E5_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
1158 	u8 byte2 /* byte2 */;
1159 	__le16 physical_q0 /* physical_q0 */;
1160 	__le16 consolid_prod /* physical_q1 */;
1161 	__le16 reserved16 /* physical_q2 */;
1162 	__le16 tx_bd_cons /* word3 */;
1163 	__le16 tx_bd_or_spq_prod /* word4 */;
1164 	__le16 word5 /* word5 */;
1165 	__le16 conn_dpi /* conn_dpi */;
1166 	u8 byte3 /* byte3 */;
1167 	u8 byte4 /* byte4 */;
1168 	u8 byte5 /* byte5 */;
1169 	u8 byte6 /* byte6 */;
1170 	__le32 reg0 /* reg0 */;
1171 	__le32 reg1 /* reg1 */;
1172 	__le32 reg2 /* reg2 */;
1173 	__le32 reg3 /* reg3 */;
1174 	__le32 reg4 /* reg4 */;
1175 	__le32 reg5 /* cf_array0 */;
1176 	__le32 reg6 /* cf_array1 */;
1177 	u8 flags15;
1178 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK         0x1 /* bit22 */
1179 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT        0
1180 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK         0x1 /* bit23 */
1181 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT        1
1182 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK         0x1 /* bit24 */
1183 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT        2
1184 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK         0x3 /* cf24 */
1185 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT        3
1186 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK         0x1 /* cf24en */
1187 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT        5
1188 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK         0x1 /* rule26en */
1189 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT        6
1190 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_MASK         0x1 /* rule27en */
1191 #define E5_XSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_SHIFT        7
1192 	u8 byte7 /* byte7 */;
1193 	__le16 word7 /* word7 */;
1194 	__le16 word8 /* word8 */;
1195 	__le16 word9 /* word9 */;
1196 	__le16 word10 /* word10 */;
1197 	__le16 word11 /* word11 */;
1198 	__le32 reg7 /* reg7 */;
1199 	__le32 reg8 /* reg8 */;
1200 	__le32 reg9 /* reg9 */;
1201 	u8 byte8 /* byte8 */;
1202 	u8 byte9 /* byte9 */;
1203 	u8 byte10 /* byte10 */;
1204 	u8 byte11 /* byte11 */;
1205 	u8 byte12 /* byte12 */;
1206 	u8 byte13 /* byte13 */;
1207 	u8 byte14 /* byte14 */;
1208 	u8 byte15 /* byte15 */;
1209 	__le32 reg10 /* reg10 */;
1210 	__le32 reg11 /* reg11 */;
1211 	__le32 reg12 /* reg12 */;
1212 	__le32 reg13 /* reg13 */;
1213 	__le32 reg14 /* reg14 */;
1214 	__le32 reg15 /* reg15 */;
1215 	__le32 reg16 /* reg16 */;
1216 	__le32 reg17 /* reg17 */;
1217 	__le32 reg18 /* reg18 */;
1218 	__le32 reg19 /* reg19 */;
1219 	__le16 word12 /* word12 */;
1220 	__le16 word13 /* word13 */;
1221 	__le16 word14 /* word14 */;
1222 	__le16 word15 /* word15 */;
1223 };
1224 
1225 struct e5_tstorm_core_conn_ag_ctx
1226 {
1227 	u8 byte0 /* cdu_validation */;
1228 	u8 byte1 /* state_and_core_id */;
1229 	u8 flags0;
1230 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
1231 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT         0
1232 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
1233 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT         1
1234 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK          0x1 /* bit2 */
1235 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT         2
1236 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK          0x1 /* bit3 */
1237 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT         3
1238 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK          0x1 /* bit4 */
1239 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT         4
1240 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK          0x1 /* bit5 */
1241 #define E5_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT         5
1242 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
1243 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT          6
1244 	u8 flags1;
1245 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
1246 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT          0
1247 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
1248 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT          2
1249 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
1250 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT          4
1251 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
1252 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT          6
1253 	u8 flags2;
1254 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
1255 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT          0
1256 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
1257 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT          2
1258 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7_MASK           0x3 /* cf7 */
1259 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT          4
1260 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8_MASK           0x3 /* cf8 */
1261 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT          6
1262 	u8 flags3;
1263 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9_MASK           0x3 /* cf9 */
1264 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT          0
1265 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10_MASK          0x3 /* cf10 */
1266 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT         2
1267 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
1268 #define E5_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT        4
1269 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
1270 #define E5_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT        5
1271 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
1272 #define E5_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT        6
1273 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
1274 #define E5_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT        7
1275 	u8 flags4;
1276 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
1277 #define E5_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT        0
1278 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
1279 #define E5_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT        1
1280 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
1281 #define E5_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT        2
1282 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK         0x1 /* cf7en */
1283 #define E5_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT        3
1284 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK         0x1 /* cf8en */
1285 #define E5_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT        4
1286 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK         0x1 /* cf9en */
1287 #define E5_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT        5
1288 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK        0x1 /* cf10en */
1289 #define E5_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT       6
1290 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
1291 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT      7
1292 	u8 flags5;
1293 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
1294 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT      0
1295 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
1296 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT      1
1297 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
1298 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT      2
1299 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
1300 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT      3
1301 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
1302 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT      4
1303 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
1304 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT      5
1305 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
1306 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT      6
1307 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
1308 #define E5_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT      7
1309 	u8 flags6;
1310 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit6 */
1311 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
1312 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit7 */
1313 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
1314 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK  0x1 /* bit8 */
1315 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
1316 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf11 */
1317 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
1318 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf11en */
1319 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
1320 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* rule9en */
1321 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
1322 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_MASK  0x1 /* rule10en */
1323 #define E5_TSTORM_CORE_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
1324 	u8 byte2 /* byte2 */;
1325 	__le16 word0 /* word0 */;
1326 	__le32 reg0 /* reg0 */;
1327 	__le32 reg1 /* reg1 */;
1328 	__le32 reg2 /* reg2 */;
1329 	__le32 reg3 /* reg3 */;
1330 	__le32 reg4 /* reg4 */;
1331 	__le32 reg5 /* reg5 */;
1332 	__le32 reg6 /* reg6 */;
1333 	__le32 reg7 /* reg7 */;
1334 	__le32 reg8 /* reg8 */;
1335 	u8 byte3 /* byte3 */;
1336 	u8 byte4 /* byte4 */;
1337 	u8 byte5 /* byte5 */;
1338 	u8 e4_reserved8 /* byte6 */;
1339 	__le16 word1 /* word1 */;
1340 	__le16 word2 /* conn_dpi */;
1341 	__le32 reg9 /* reg9 */;
1342 	__le16 word3 /* word3 */;
1343 	__le16 e4_reserved9 /* word4 */;
1344 };
1345 
1346 struct e5_ustorm_core_conn_ag_ctx
1347 {
1348 	u8 reserved /* cdu_validation */;
1349 	u8 byte1 /* state_and_core_id */;
1350 	u8 flags0;
1351 #define E5_USTORM_CORE_CONN_AG_CTX_BIT0_MASK          0x1 /* exist_in_qm0 */
1352 #define E5_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT         0
1353 #define E5_USTORM_CORE_CONN_AG_CTX_BIT1_MASK          0x1 /* exist_in_qm1 */
1354 #define E5_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT         1
1355 #define E5_USTORM_CORE_CONN_AG_CTX_CF0_MASK           0x3 /* timer0cf */
1356 #define E5_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT          2
1357 #define E5_USTORM_CORE_CONN_AG_CTX_CF1_MASK           0x3 /* timer1cf */
1358 #define E5_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT          4
1359 #define E5_USTORM_CORE_CONN_AG_CTX_CF2_MASK           0x3 /* timer2cf */
1360 #define E5_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT          6
1361 	u8 flags1;
1362 #define E5_USTORM_CORE_CONN_AG_CTX_CF3_MASK           0x3 /* timer_stop_all */
1363 #define E5_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT          0
1364 #define E5_USTORM_CORE_CONN_AG_CTX_CF4_MASK           0x3 /* cf4 */
1365 #define E5_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT          2
1366 #define E5_USTORM_CORE_CONN_AG_CTX_CF5_MASK           0x3 /* cf5 */
1367 #define E5_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT          4
1368 #define E5_USTORM_CORE_CONN_AG_CTX_CF6_MASK           0x3 /* cf6 */
1369 #define E5_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT          6
1370 	u8 flags2;
1371 #define E5_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK         0x1 /* cf0en */
1372 #define E5_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT        0
1373 #define E5_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK         0x1 /* cf1en */
1374 #define E5_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT        1
1375 #define E5_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK         0x1 /* cf2en */
1376 #define E5_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT        2
1377 #define E5_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK         0x1 /* cf3en */
1378 #define E5_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT        3
1379 #define E5_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK         0x1 /* cf4en */
1380 #define E5_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT        4
1381 #define E5_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK         0x1 /* cf5en */
1382 #define E5_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT        5
1383 #define E5_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK         0x1 /* cf6en */
1384 #define E5_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT        6
1385 #define E5_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK       0x1 /* rule0en */
1386 #define E5_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT      7
1387 	u8 flags3;
1388 #define E5_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK       0x1 /* rule1en */
1389 #define E5_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT      0
1390 #define E5_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK       0x1 /* rule2en */
1391 #define E5_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT      1
1392 #define E5_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK       0x1 /* rule3en */
1393 #define E5_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT      2
1394 #define E5_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK       0x1 /* rule4en */
1395 #define E5_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT      3
1396 #define E5_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK       0x1 /* rule5en */
1397 #define E5_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT      4
1398 #define E5_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK       0x1 /* rule6en */
1399 #define E5_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT      5
1400 #define E5_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK       0x1 /* rule7en */
1401 #define E5_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT      6
1402 #define E5_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK       0x1 /* rule8en */
1403 #define E5_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT      7
1404 	u8 flags4;
1405 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED1_MASK  0x1 /* bit2 */
1406 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
1407 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED2_MASK  0x1 /* bit3 */
1408 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
1409 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED3_MASK  0x3 /* cf7 */
1410 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
1411 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED4_MASK  0x3 /* cf8 */
1412 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED4_SHIFT 4
1413 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED5_MASK  0x1 /* cf7en */
1414 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED5_SHIFT 6
1415 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED6_MASK  0x1 /* cf8en */
1416 #define E5_USTORM_CORE_CONN_AG_CTX_E4_RESERVED6_SHIFT 7
1417 	u8 byte2 /* byte2 */;
1418 	__le16 word0 /* conn_dpi */;
1419 	__le16 word1 /* word1 */;
1420 	__le32 rx_producers /* reg0 */;
1421 	__le32 reg1 /* reg1 */;
1422 	__le32 reg2 /* reg2 */;
1423 	__le32 reg3 /* reg3 */;
1424 	__le16 word2 /* word2 */;
1425 	__le16 word3 /* word3 */;
1426 };
1427 
1428 /*
1429  * core connection context
1430  */
1431 struct e5_core_conn_context
1432 {
1433 	struct ystorm_core_conn_st_ctx ystorm_st_context /* ystorm storm context */;
1434 	struct regpair ystorm_st_padding[2] /* padding */;
1435 	struct pstorm_core_conn_st_ctx pstorm_st_context /* pstorm storm context */;
1436 	struct regpair pstorm_st_padding[2] /* padding */;
1437 	struct xstorm_core_conn_st_ctx xstorm_st_context /* xstorm storm context */;
1438 	struct regpair xstorm_st_padding[2] /* padding */;
1439 	struct e5_xstorm_core_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
1440 	struct e5_tstorm_core_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
1441 	struct e5_ustorm_core_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
1442 	struct mstorm_core_conn_st_ctx mstorm_st_context /* mstorm storm context */;
1443 	struct ustorm_core_conn_st_ctx ustorm_st_context /* ustorm storm context */;
1444 	struct regpair ustorm_st_padding[2] /* padding */;
1445 };
1446 
1447 struct eth_mstorm_per_pf_stat
1448 {
1449 	struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
1450 	struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
1451 	struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
1452 	struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
1453 };
1454 
1455 struct eth_mstorm_per_queue_stat
1456 {
1457 	struct regpair ttl0_discard /* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (in IPv6) */;
1458 	struct regpair packet_too_big_discard /* Number of packets discarded because they are bigger than MTU */;
1459 	struct regpair no_buff_discard /* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */;
1460 	struct regpair not_active_discard /* Number of packets discarded because of no active Rx connection */;
1461 	struct regpair tpa_coalesced_pkts /* number of coalesced packets in all TPA aggregations */;
1462 	struct regpair tpa_coalesced_events /* total number of TPA aggregations */;
1463 	struct regpair tpa_aborts_num /* number of aggregations, which abnormally ended */;
1464 	struct regpair tpa_coalesced_bytes /* total TCP payload length in all TPA aggregations */;
1465 };
1466 
1467 /*
1468  * Ethernet TX Per PF
1469  */
1470 struct eth_pstorm_per_pf_stat
1471 {
1472 	struct regpair sent_lb_ucast_bytes /* number of total ucast bytes sent on loopback port without errors */;
1473 	struct regpair sent_lb_mcast_bytes /* number of total mcast bytes sent on loopback port without errors */;
1474 	struct regpair sent_lb_bcast_bytes /* number of total bcast bytes sent on loopback port without errors */;
1475 	struct regpair sent_lb_ucast_pkts /* number of total ucast packets sent on loopback port without errors */;
1476 	struct regpair sent_lb_mcast_pkts /* number of total mcast packets sent on loopback port without errors */;
1477 	struct regpair sent_lb_bcast_pkts /* number of total bcast packets sent on loopback port without errors */;
1478 	struct regpair sent_gre_bytes /* Sent GRE bytes */;
1479 	struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
1480 	struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
1481 	struct regpair sent_gre_pkts /* Sent GRE packets */;
1482 	struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
1483 	struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
1484 	struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
1485 	struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
1486 	struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
1487 };
1488 
1489 /*
1490  * Ethernet TX Per Queue Stats
1491  */
1492 struct eth_pstorm_per_queue_stat
1493 {
1494 	struct regpair sent_ucast_bytes /* number of total bytes sent without errors */;
1495 	struct regpair sent_mcast_bytes /* number of total bytes sent without errors */;
1496 	struct regpair sent_bcast_bytes /* number of total bytes sent without errors */;
1497 	struct regpair sent_ucast_pkts /* number of total packets sent without errors */;
1498 	struct regpair sent_mcast_pkts /* number of total packets sent without errors */;
1499 	struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
1500 	struct regpair error_drop_pkts /* number of total packets dropped due to errors */;
1501 };
1502 
1503 /*
1504  * ETH Rx producers data
1505  */
1506 struct eth_rx_rate_limit
1507 {
1508 	__le16 mult /* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */;
1509 	__le16 cnst /* Constant term to add (or subtract from number of cycles) */;
1510 	u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
1511 	u8 reserved0;
1512 	__le16 reserved1;
1513 };
1514 
1515 struct eth_ustorm_per_pf_stat
1516 {
1517 	struct regpair rcv_lb_ucast_bytes /* number of total ucast bytes received on loopback port without errors */;
1518 	struct regpair rcv_lb_mcast_bytes /* number of total mcast bytes received on loopback port without errors */;
1519 	struct regpair rcv_lb_bcast_bytes /* number of total bcast bytes received on loopback port without errors */;
1520 	struct regpair rcv_lb_ucast_pkts /* number of total ucast packets received on loopback port without errors */;
1521 	struct regpair rcv_lb_mcast_pkts /* number of total mcast packets received on loopback port without errors */;
1522 	struct regpair rcv_lb_bcast_pkts /* number of total bcast packets received on loopback port without errors */;
1523 	struct regpair rcv_gre_bytes /* Received GRE bytes */;
1524 	struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
1525 	struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
1526 	struct regpair rcv_gre_pkts /* Received GRE packets */;
1527 	struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
1528 	struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
1529 };
1530 
1531 struct eth_ustorm_per_queue_stat
1532 {
1533 	struct regpair rcv_ucast_bytes;
1534 	struct regpair rcv_mcast_bytes;
1535 	struct regpair rcv_bcast_bytes;
1536 	struct regpair rcv_ucast_pkts;
1537 	struct regpair rcv_mcast_pkts;
1538 	struct regpair rcv_bcast_pkts;
1539 };
1540 
1541 /*
1542  * Event Ring VF-PF Channel data
1543  */
1544 struct vf_pf_channel_eqe_data
1545 {
1546 	struct regpair msg_addr /* VF-PF message address */;
1547 };
1548 
1549 /*
1550  * Event Ring malicious VF data
1551  */
1552 struct malicious_vf_eqe_data
1553 {
1554 	u8 vf_id /* Malicious VF ID */;
1555 	u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
1556 	__le16 reserved[3];
1557 };
1558 
1559 /*
1560  * Event Ring initial cleanup data
1561  */
1562 struct initial_cleanup_eqe_data
1563 {
1564 	u8 vf_id /* VF ID */;
1565 	u8 reserved[7];
1566 };
1567 
1568 /*
1569  * Event Data Union
1570  */
1571 union event_ring_data
1572 {
1573 	u8 bytes[8] /* Byte Array */;
1574 	struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
1575 	struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
1576 	struct iscsi_connect_done_results iscsi_conn_done_info /* Dedicated fields to iscsi connect done results */;
1577 	union rdma_eqe_data rdma_data /* Dedicated field for RDMA data */;
1578 	struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
1579 	struct initial_cleanup_eqe_data vf_init_cleanup /* VF Initial Cleanup data */;
1580 };
1581 
1582 /*
1583  * Event Ring Entry
1584  */
1585 struct event_ring_entry
1586 {
1587 	u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
1588 	u8 opcode /* Event Opcode */;
1589 	__le16 reserved0 /* Reserved */;
1590 	__le16 echo /* Echo value from ramrod data on the host */;
1591 	u8 fw_return_code /* FW return code for SP ramrods */;
1592 	u8 flags;
1593 #define EVENT_RING_ENTRY_ASYNC_MASK      0x1 /* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
1594 #define EVENT_RING_ENTRY_ASYNC_SHIFT     0
1595 #define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
1596 #define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
1597 	union event_ring_data data;
1598 };
1599 
1600 /*
1601  * Event Ring Next Page Address
1602  */
1603 struct event_ring_next_addr
1604 {
1605 	struct regpair addr /* Next Page Address */;
1606 	__le32 reserved[2] /* Reserved */;
1607 };
1608 
1609 /*
1610  * Event Ring Element
1611  */
1612 union event_ring_element
1613 {
1614 	struct event_ring_entry entry /* Event Ring Entry */;
1615 	struct event_ring_next_addr next_addr /* Event Ring Next Page Address */;
1616 };
1617 
1618 /*
1619  * Ports mode
1620  */
1621 enum fw_flow_ctrl_mode
1622 {
1623 	flow_ctrl_pause,
1624 	flow_ctrl_pfc,
1625 	MAX_FW_FLOW_CTRL_MODE
1626 };
1627 
1628 /*
1629  * GFT profile type.
1630  */
1631 enum gft_profile_type
1632 {
1633 	GFT_PROFILE_TYPE_4_TUPLE /* tunnel type, inner 4 tuple, IP type and L4 type match. */,
1634 	GFT_PROFILE_TYPE_L4_DST_PORT /* tunnel type, inner L4 destination port, IP type and L4 type match. */,
1635 	GFT_PROFILE_TYPE_IP_DST_ADDR /* tunnel type, inner IP destination address and IP type match. */,
1636 	GFT_PROFILE_TYPE_IP_SRC_ADDR /* tunnel type, inner IP source address and IP type match. */,
1637 	GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
1638 	MAX_GFT_PROFILE_TYPE
1639 };
1640 
1641 /*
1642  * Major and Minor hsi Versions
1643  */
1644 struct hsi_fp_ver_struct
1645 {
1646 	u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
1647 	u8 major_ver_arr[2] /* Major Version of driver loading pf */;
1648 };
1649 
1650 /*
1651  * Integration Phase
1652  */
1653 enum integ_phase
1654 {
1655 	INTEG_PHASE_BB_A0_LATEST=3 /* BB A0 latest integration phase */,
1656 	INTEG_PHASE_BB_B0_NO_MCP=10 /* BB B0 without MCP */,
1657 	INTEG_PHASE_BB_B0_WITH_MCP=11 /* BB B0 with MCP */,
1658 	MAX_INTEG_PHASE
1659 };
1660 
1661 /*
1662  * Ports mode
1663  */
1664 enum iwarp_ll2_tx_queues
1665 {
1666 	IWARP_LL2_IN_ORDER_TX_QUEUE=1 /* LL2 queue for OOO packets sent in-order by the driver */,
1667 	IWARP_LL2_ALIGNED_TX_QUEUE /* LL2 queue for unaligned packets sent aligned by the driver */,
1668 	IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE /* LL2 queue for unaligned packets sent aligned and was right-trimmed by the driver */,
1669 	IWARP_LL2_ERROR /* Error indication */,
1670 	MAX_IWARP_LL2_TX_QUEUES
1671 };
1672 
1673 /*
1674  * Malicious VF error ID
1675  */
1676 enum malicious_vf_error_id
1677 {
1678 	MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
1679 	VF_PF_CHANNEL_NOT_READY /* Writing to VF/PF channel when it is not ready */,
1680 	VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
1681 	VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
1682 	ETH_PACKET_TOO_SMALL /* TX packet is shorter then reported on BDs or from minimal size */,
1683 	ETH_ILLEGAL_VLAN_MODE /* Tx packet with marked as insert VLAN when its illegal */,
1684 	ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
1685 	ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
1686 	ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
1687 	ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */,
1688 	ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
1689 	ETH_INSUFFICIENT_BDS /* There are not enough BDs for transmission of even one packet */,
1690 	ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
1691 	ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
1692 	ETH_ZERO_SIZE_BD /* empty BD (which not contains control flags) is illegal  */,
1693 	ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit  */,
1694 	ETH_INSUFFICIENT_PAYLOAD /* In LSO its expected that on the local BD ring there will be at least MSS bytes of data */,
1695 	ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
1696 	ETH_TUNN_IPV6_EXT_NBD_ERR /* Tunneled packet with IPv6+Ext without a proper number of BDs */,
1697 	ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
1698 	ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
1699 	ETH_PACKET_SIZE_TOO_LARGE /* packet scanned is too large (can be 9700 at most) */,
1700 	MAX_MALICIOUS_VF_ERROR_ID
1701 };
1702 
1703 /*
1704  * Mstorm non-triggering VF zone
1705  */
1706 struct mstorm_non_trigger_vf_zone
1707 {
1708 	struct eth_mstorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
1709 	struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD] /* VF RX queues producers */;
1710 };
1711 
1712 /*
1713  * Mstorm VF zone
1714  */
1715 struct mstorm_vf_zone
1716 {
1717 	struct mstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1718 };
1719 
1720 /*
1721  * vlan header including TPID and TCI fields
1722  */
1723 struct vlan_header
1724 {
1725 	__le16 tpid /* Tag Protocol Identifier */;
1726 	__le16 tci /* Tag Control Information */;
1727 };
1728 
1729 /*
1730  * outer tag configurations
1731  */
1732 struct outer_tag_config_struct
1733 {
1734 	u8 enable_stag_pri_change /* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette Davis, UFP with Host Control mode, and UFP with DCB over base interface. else - 0. */;
1735 	u8 pri_map_valid /* If inner_to_outer_pri_map is initialize then set pri_map_valid */;
1736 	u8 reserved[2];
1737 	struct vlan_header outer_tag /* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol identifier and outer tag control information */;
1738 	u8 inner_to_outer_pri_map[8] /* Map from inner to outer priority. Set pri_map_valid when init map */;
1739 };
1740 
1741 /*
1742  * personality per PF
1743  */
1744 enum personality_type
1745 {
1746 	BAD_PERSONALITY_TYP,
1747 	PERSONALITY_ISCSI /* iSCSI and LL2 */,
1748 	PERSONALITY_FCOE /* Fcoe and LL2 */,
1749 	PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */,
1750 	PERSONALITY_RDMA /* Roce and LL2 */,
1751 	PERSONALITY_CORE /* CORE(LL2) */,
1752 	PERSONALITY_ETH /* Ethernet */,
1753 	PERSONALITY_TOE /* Toe and LL2 */,
1754 	MAX_PERSONALITY_TYPE
1755 };
1756 
1757 /*
1758  * tunnel configuration
1759  */
1760 struct pf_start_tunnel_config
1761 {
1762 	u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set - FW will use a default port */;
1763 	u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set - FW will use a default port */;
1764 	u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. (use enum tunnel_clss) */;
1765 	u8 tunnel_clss_l2geneve /* Rx classification scheme for l2 GENEVE tunnel. (use enum tunnel_clss) */;
1766 	u8 tunnel_clss_ipgeneve /* Rx classification scheme for ip GENEVE tunnel. (use enum tunnel_clss) */;
1767 	u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. (use enum tunnel_clss) */;
1768 	u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. (use enum tunnel_clss) */;
1769 	u8 reserved;
1770 	__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */;
1771 	__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */;
1772 };
1773 
1774 /*
1775  * Ramrod data for PF start ramrod
1776  */
1777 struct pf_start_ramrod_data
1778 {
1779 	struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
1780 	struct regpair consolid_q_pbl_addr /* PBL address of consolidation queue */;
1781 	struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */;
1782 	__le16 event_ring_sb_id /* Status block ID */;
1783 	u8 base_vf_id /* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */;
1784 	u8 num_vfs /* Amount of vfs owned by PF */;
1785 	u8 event_ring_num_pages /* Number of PBL pages in event ring */;
1786 	u8 event_ring_sb_index /* Status block index */;
1787 	u8 path_id /* HW path ID (engine ID) */;
1788 	u8 warning_as_error /* In FW asserts, treat warning as error */;
1789 	u8 dont_log_ramrods /* If not set - throw a warning for each ramrod (for debug) */;
1790 	u8 personality /* define what type of personality is new PF (use enum personality_type) */;
1791 	__le16 log_type_mask /* Log type mask. Each bit set enables a corresponding event type logging. Event types are defined as ASSERT_LOG_TYPE_xxx */;
1792 	u8 mf_mode /* Multi function mode (use enum mf_mode) */;
1793 	u8 integ_phase /* Integration phase (use enum integ_phase) */;
1794 	u8 allow_npar_tx_switching /* If set, inter-pf tx switching is allowed in Switch Independent function mode */;
1795 	u8 reserved0;
1796 	struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
1797 	struct outer_tag_config_struct outer_tag_config /* Outer tag configurations */;
1798 };
1799 
1800 /*
1801  * Per protocol DCB data
1802  */
1803 struct protocol_dcb_data
1804 {
1805 	u8 dcb_enable_flag /* Enable DCB */;
1806 	u8 dscp_enable_flag /* Enable updating DSCP value */;
1807 	u8 dcb_priority /* DCB priority */;
1808 	u8 dcb_tc /* DCB TC */;
1809 	u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
1810 	u8 dcb_dont_add_vlan0 /* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged frames */;
1811 };
1812 
1813 /*
1814  * Update tunnel configuration
1815  */
1816 struct pf_update_tunnel_config
1817 {
1818 	u8 update_rx_pf_clss /* Update RX per PF tunnel classification scheme. */;
1819 	u8 update_rx_def_ucast_clss /* Update per PORT default tunnel RX classification scheme for traffic with unknown unicast outer MAC in NPAR mode. */;
1820 	u8 update_rx_def_non_ucast_clss /* Update per PORT default tunnel RX classification scheme for traffic with non unicast outer MAC in NPAR mode. */;
1821 	u8 set_vxlan_udp_port_flg /* Update VXLAN tunnel UDP destination port. */;
1822 	u8 set_geneve_udp_port_flg /* Update GENEVE tunnel UDP destination port. */;
1823 	u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. (use enum tunnel_clss) */;
1824 	u8 tunnel_clss_l2geneve /* Classification scheme for l2 GENEVE tunnel. (use enum tunnel_clss) */;
1825 	u8 tunnel_clss_ipgeneve /* Classification scheme for ip GENEVE tunnel. (use enum tunnel_clss) */;
1826 	u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. (use enum tunnel_clss) */;
1827 	u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. (use enum tunnel_clss) */;
1828 	__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
1829 	__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
1830 	__le16 reserved;
1831 };
1832 
1833 /*
1834  * Data for port update ramrod
1835  */
1836 struct pf_update_ramrod_data
1837 {
1838 	u8 update_eth_dcb_data_mode /* Update Eth DCB  data indication (use enum dcb_dscp_update_mode) */;
1839 	u8 update_fcoe_dcb_data_mode /* Update FCOE DCB  data indication (use enum dcb_dscp_update_mode) */;
1840 	u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB  data indication (use enum dcb_dscp_update_mode) */;
1841 	u8 update_roce_dcb_data_mode /* Update ROCE DCB  data indication (use enum dcb_dscp_update_mode) */;
1842 	u8 update_rroce_dcb_data_mode /* Update RROCE (RoceV2) DCB  data indication (use enum dcb_dscp_update_mode) */;
1843 	u8 update_iwarp_dcb_data_mode /* Update IWARP DCB  data indication (use enum dcb_dscp_update_mode) */;
1844 	u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
1845 	u8 update_enable_stag_pri_change /* Update Enable STAG Priority Change indication */;
1846 	struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
1847 	struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
1848 	struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */;
1849 	struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
1850 	struct protocol_dcb_data rroce_dcb_data /* core roce related fields */;
1851 	struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */;
1852 	__le16 mf_vlan /* new outer vlan id value */;
1853 	u8 enable_stag_pri_change /* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette Davis, UFP with Host Control mode, and UFP with DCB over base interface. else - 0. */;
1854 	u8 reserved;
1855 	struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */;
1856 };
1857 
1858 /*
1859  * Ports mode
1860  */
1861 enum ports_mode
1862 {
1863 	ENGX2_PORTX1 /* 2 engines x 1 port */,
1864 	ENGX2_PORTX2 /* 2 engines x 2 ports */,
1865 	ENGX1_PORTX1 /* 1 engine  x 1 port */,
1866 	ENGX1_PORTX2 /* 1 engine  x 2 ports */,
1867 	ENGX1_PORTX4 /* 1 engine  x 4 ports */,
1868 	MAX_PORTS_MODE
1869 };
1870 
1871 /*
1872  * use to index in hsi_fp_[major|minor]_ver_arr per protocol
1873  */
1874 enum protocol_version_array_key
1875 {
1876 	ETH_VER_KEY=0,
1877 	ROCE_VER_KEY,
1878 	MAX_PROTOCOL_VERSION_ARRAY_KEY
1879 };
1880 
1881 /*
1882  * RDMA TX Stats
1883  */
1884 struct rdma_sent_stats
1885 {
1886 	struct regpair sent_bytes /* number of total RDMA bytes sent */;
1887 	struct regpair sent_pkts /* number of total RDMA packets sent */;
1888 };
1889 
1890 /*
1891  * Pstorm non-triggering VF zone
1892  */
1893 struct pstorm_non_trigger_vf_zone
1894 {
1895 	struct eth_pstorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
1896 	struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
1897 };
1898 
1899 /*
1900  * Pstorm VF zone
1901  */
1902 struct pstorm_vf_zone
1903 {
1904 	struct pstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1905 	struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
1906 };
1907 
1908 /*
1909  * Ramrod Header of SPQE
1910  */
1911 struct ramrod_header
1912 {
1913 	__le32 cid /* Slowpath Connection CID */;
1914 	u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
1915 	u8 protocol_id /* Ramrod Protocol ID (use enum protocol_type) */;
1916 	__le16 echo /* Ramrod echo */;
1917 };
1918 
1919 /*
1920  * RDMA RX Stats
1921  */
1922 struct rdma_rcv_stats
1923 {
1924 	struct regpair rcv_bytes /* number of total RDMA bytes received */;
1925 	struct regpair rcv_pkts /* number of total RDMA packets received */;
1926 };
1927 
1928 /*
1929  * Data for update QCN/DCQCN RL ramrod
1930  */
1931 struct rl_update_ramrod_data
1932 {
1933 	u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
1934 	u8 dcqcn_update_param_flg /* Update DCQCN global params: timeout, g, k. */;
1935 	u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
1936 	u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
1937 	u8 rl_stop_flg /* Stop RL. */;
1938 	u8 rl_id_first /* ID of first or single RL, that will be updated. */;
1939 	u8 rl_id_last /* ID of last RL, that will be updated. If clear, single RL will updated. */;
1940 	u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
1941 	__le32 rl_bc_rate /* Byte Counter Limit. */;
1942 	__le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
1943 	__le16 rl_r_ai /* Active increase rate. */;
1944 	__le16 rl_r_hai /* Hyper active increase rate. */;
1945 	__le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
1946 	__le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
1947 	__le32 dcqcn_timeuot_us /* DCQCN timeout. */;
1948 	__le32 qcn_timeuot_us /* QCN timeout. */;
1949 	__le32 reserved[2];
1950 };
1951 
1952 /*
1953  * Slowpath Element (SPQE)
1954  */
1955 struct slow_path_element
1956 {
1957 	struct ramrod_header hdr /* Ramrod Header */;
1958 	struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
1959 };
1960 
1961 /*
1962  * Tstorm non-triggering VF zone
1963  */
1964 struct tstorm_non_trigger_vf_zone
1965 {
1966 	struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
1967 };
1968 
1969 struct tstorm_per_port_stat
1970 {
1971 	struct regpair trunc_error_discard /* packet is dropped because it was truncated in NIG */;
1972 	struct regpair mac_error_discard /* packet is dropped because of Ethernet FCS error */;
1973 	struct regpair mftag_filter_discard /* packet is dropped because classification was unsuccessful */;
1974 	struct regpair eth_mac_filter_discard /* packet was passed to Ethernet and dropped because of no mac filter match */;
1975 	struct regpair ll2_mac_filter_discard /* packet passed to Light L2 and dropped because Light L2 is not configured for this PF */;
1976 	struct regpair ll2_conn_disabled_discard /* packet passed to Light L2 and dropped because Light L2 is not configured for this PF */;
1977 	struct regpair iscsi_irregular_pkt /* packet is an ISCSI irregular packet */;
1978 	struct regpair fcoe_irregular_pkt /* packet is an FCOE irregular packet */;
1979 	struct regpair roce_irregular_pkt /* packet is an ROCE irregular packet */;
1980 	struct regpair iwarp_irregular_pkt /* packet is an IWARP irregular packet */;
1981 	struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */;
1982 	struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */;
1983 	struct regpair preroce_irregular_pkt /* packet is an PREROCE irregular packet */;
1984 	struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
1985 	struct regpair eth_vxlan_tunn_filter_discard /* VXLAN dropped packets */;
1986 	struct regpair eth_geneve_tunn_filter_discard /* GENEVE dropped packets */;
1987 	struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
1988 };
1989 
1990 /*
1991  * Tstorm VF zone
1992  */
1993 struct tstorm_vf_zone
1994 {
1995 	struct tstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
1996 };
1997 
1998 /*
1999  * Tunnel classification scheme
2000  */
2001 enum tunnel_clss
2002 {
2003 	TUNNEL_CLSS_MAC_VLAN=0 /* Use MAC and VLAN from first L2 header for vport classification. */,
2004 	TUNNEL_CLSS_MAC_VNI /* Use MAC from first L2 header and VNI from tunnel header for vport classification */,
2005 	TUNNEL_CLSS_INNER_MAC_VLAN /* Use MAC and VLAN from last L2 header for vport classification */,
2006 	TUNNEL_CLSS_INNER_MAC_VNI /* Use MAC from last L2 header and VNI from tunnel header for vport classification */,
2007 	TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE /* Use MAC and VLAN from last L2 header for vport classification. If no exact match, use MAC and VLAN from first L2 header for classification. */,
2008 	MAX_TUNNEL_CLSS
2009 };
2010 
2011 /*
2012  * Ustorm non-triggering VF zone
2013  */
2014 struct ustorm_non_trigger_vf_zone
2015 {
2016 	struct eth_ustorm_per_queue_stat eth_queue_stat /* VF statistic bucket */;
2017 	struct regpair vf_pf_msg_addr /* VF-PF message address */;
2018 };
2019 
2020 /*
2021  * Ustorm triggering VF zone
2022  */
2023 struct ustorm_trigger_vf_zone
2024 {
2025 	u8 vf_pf_msg_valid /* VF-PF message valid flag */;
2026 	u8 reserved[7];
2027 };
2028 
2029 /*
2030  * Ustorm VF zone
2031  */
2032 struct ustorm_vf_zone
2033 {
2034 	struct ustorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
2035 	struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
2036 };
2037 
2038 /*
2039  * VF-PF channel data
2040  */
2041 struct vf_pf_channel_data
2042 {
2043 	__le32 ready /* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel is ready for a new transaction. */;
2044 	u8 valid /* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is valid. */;
2045 	u8 reserved0;
2046 	__le16 reserved1;
2047 };
2048 
2049 /*
2050  * Ramrod data for VF start ramrod
2051  */
2052 struct vf_start_ramrod_data
2053 {
2054 	u8 vf_id /* VF ID */;
2055 	u8 enable_flr_ack /* If set, initial cleanup ack will be sent to parent PF SP event queue */;
2056 	__le16 opaque_fid /* VF opaque FID */;
2057 	u8 personality /* define what type of personality is new VF (use enum personality_type) */;
2058 	u8 reserved[7];
2059 	struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
2060 };
2061 
2062 /*
2063  * Ramrod data for VF start ramrod
2064  */
2065 struct vf_stop_ramrod_data
2066 {
2067 	u8 vf_id /* VF ID */;
2068 	u8 reserved0;
2069 	__le16 reserved1;
2070 	__le32 reserved2;
2071 };
2072 
2073 /*
2074  * VF zone size mode.
2075  */
2076 enum vf_zone_size_mode
2077 {
2078 	VF_ZONE_SIZE_MODE_DEFAULT /* Default VF zone size. Up to 192 VF supported. */,
2079 	VF_ZONE_SIZE_MODE_DOUBLE /* Doubled VF zone size. Up to 96 VF supported. */,
2080 	VF_ZONE_SIZE_MODE_QUAD /* Quad VF zone size. Up to 48 VF supported. */,
2081 	MAX_VF_ZONE_SIZE_MODE
2082 };
2083 
2084 /*
2085  * Attentions status block
2086  */
2087 struct atten_status_block
2088 {
2089 	__le32 atten_bits;
2090 	__le32 atten_ack;
2091 	__le16 reserved0;
2092 	__le16 sb_index /* status block running index */;
2093 	__le32 reserved1;
2094 };
2095 
2096 /*
2097  * DMAE command
2098  */
2099 struct dmae_cmd
2100 {
2101 	__le32 opcode;
2102 #define DMAE_CMD_SRC_MASK              0x1 /* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
2103 #define DMAE_CMD_SRC_SHIFT             0
2104 #define DMAE_CMD_DST_MASK              0x3 /* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None (use enum dmae_cmd_dst_enum) */
2105 #define DMAE_CMD_DST_SHIFT             1
2106 #define DMAE_CMD_C_DST_MASK            0x1 /* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
2107 #define DMAE_CMD_C_DST_SHIFT           3
2108 #define DMAE_CMD_CRC_RESET_MASK        0x1 /* Reset the CRC result (do not use the previous result as the seed) */
2109 #define DMAE_CMD_CRC_RESET_SHIFT       4
2110 #define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1 /* Reset the source address in the next go to the same source address of the previous go */
2111 #define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
2112 #define DMAE_CMD_DST_ADDR_RESET_MASK   0x1 /* Reset the destination address in the next go to the same destination address of the previous go */
2113 #define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
2114 #define DMAE_CMD_COMP_FUNC_MASK        0x1 /* 0   completion function is the same as src function, 1 - 0   completion function is the same as dst function (use enum dmae_cmd_comp_func_enum) */
2115 #define DMAE_CMD_COMP_FUNC_SHIFT       7
2116 #define DMAE_CMD_COMP_WORD_EN_MASK     0x1 /* 0 - Do not write a completion word, 1 - Write a completion word (use enum dmae_cmd_comp_word_en_enum) */
2117 #define DMAE_CMD_COMP_WORD_EN_SHIFT    8
2118 #define DMAE_CMD_COMP_CRC_EN_MASK      0x1 /* 0 - Do not write a CRC word, 1 - Write a CRC word (use enum dmae_cmd_comp_crc_en_enum) */
2119 #define DMAE_CMD_COMP_CRC_EN_SHIFT     9
2120 #define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7 /* The CRC word should be taken from the DMAE address space from address 9+X, where X is the value in these bits. */
2121 #define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
2122 #define DMAE_CMD_RESERVED1_MASK        0x1
2123 #define DMAE_CMD_RESERVED1_SHIFT       13
2124 #define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
2125 #define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
2126 #define DMAE_CMD_ERR_HANDLING_MASK     0x3 /* The field specifies how the completion word is affected by PCIe read error. 0   Send a regular completion, 1 - Send a completion with an error indication, 2   do not send a completion (use enum dmae_cmd_error_handling_enum) */
2127 #define DMAE_CMD_ERR_HANDLING_SHIFT    16
2128 #define DMAE_CMD_PORT_ID_MASK          0x3 /* The port ID to be placed on the  RF FID  field of the GRC bus. this field is used both when GRC is the destination and when it is the source of the DMAE transaction. */
2129 #define DMAE_CMD_PORT_ID_SHIFT         18
2130 #define DMAE_CMD_SRC_PF_ID_MASK        0xF /* Source PCI function number [3:0] */
2131 #define DMAE_CMD_SRC_PF_ID_SHIFT       20
2132 #define DMAE_CMD_DST_PF_ID_MASK        0xF /* Destination PCI function number [3:0] */
2133 #define DMAE_CMD_DST_PF_ID_SHIFT       24
2134 #define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1 /* Source VFID valid */
2135 #define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
2136 #define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1 /* Destination VFID valid */
2137 #define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
2138 #define DMAE_CMD_RESERVED2_MASK        0x3
2139 #define DMAE_CMD_RESERVED2_SHIFT       30
2140 	__le32 src_addr_lo /* PCIe source address low in bytes or GRC source address in DW */;
2141 	__le32 src_addr_hi /* PCIe source address high in bytes or reserved (if source is GRC) */;
2142 	__le32 dst_addr_lo /* PCIe destination address low in bytes or GRC destination address in DW */;
2143 	__le32 dst_addr_hi /* PCIe destination address high in bytes or reserved (if destination is GRC) */;
2144 	__le16 length_dw /* Length in DW */;
2145 	__le16 opcode_b;
2146 #define DMAE_CMD_SRC_VF_ID_MASK        0xFF /* Source VF id */
2147 #define DMAE_CMD_SRC_VF_ID_SHIFT       0
2148 #define DMAE_CMD_DST_VF_ID_MASK        0xFF /* Destination VF id */
2149 #define DMAE_CMD_DST_VF_ID_SHIFT       8
2150 	__le32 comp_addr_lo /* PCIe completion address low in bytes or GRC completion address in DW */;
2151 	__le32 comp_addr_hi /* PCIe completion address high in bytes or reserved (if completion address is GRC) */;
2152 	__le32 comp_val /* Value to write to completion address */;
2153 	__le32 crc32 /* crc16 result */;
2154 	__le32 crc_32_c /* crc32_c result */;
2155 	__le16 crc16 /* crc16 result */;
2156 	__le16 crc16_c /* crc16_c result */;
2157 	__le16 crc10 /* crc_t10 result */;
2158 	__le16 reserved;
2159 	__le16 xsum16 /* checksum16 result  */;
2160 	__le16 xsum8 /* checksum8 result  */;
2161 };
2162 
2163 enum dmae_cmd_comp_crc_en_enum
2164 {
2165 	dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
2166 	dmae_cmd_comp_crc_enabled /* Write a CRC word */,
2167 	MAX_DMAE_CMD_COMP_CRC_EN_ENUM
2168 };
2169 
2170 enum dmae_cmd_comp_func_enum
2171 {
2172 	dmae_cmd_comp_func_to_src /* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */,
2173 	dmae_cmd_comp_func_to_dst /* completion word and/or CRC will be sent to DST-PCI function/DST VFID */,
2174 	MAX_DMAE_CMD_COMP_FUNC_ENUM
2175 };
2176 
2177 enum dmae_cmd_comp_word_en_enum
2178 {
2179 	dmae_cmd_comp_word_disabled /* Do not write a completion word */,
2180 	dmae_cmd_comp_word_enabled /* Write the completion word */,
2181 	MAX_DMAE_CMD_COMP_WORD_EN_ENUM
2182 };
2183 
2184 enum dmae_cmd_c_dst_enum
2185 {
2186 	dmae_cmd_c_dst_pcie,
2187 	dmae_cmd_c_dst_grc,
2188 	MAX_DMAE_CMD_C_DST_ENUM
2189 };
2190 
2191 enum dmae_cmd_dst_enum
2192 {
2193 	dmae_cmd_dst_none_0,
2194 	dmae_cmd_dst_pcie,
2195 	dmae_cmd_dst_grc,
2196 	dmae_cmd_dst_none_3,
2197 	MAX_DMAE_CMD_DST_ENUM
2198 };
2199 
2200 enum dmae_cmd_error_handling_enum
2201 {
2202 	dmae_cmd_error_handling_send_regular_comp /* Send a regular completion (with no error indication) */,
2203 	dmae_cmd_error_handling_send_comp_with_err /* Send a completion with an error indication (i.e. set bit 31 of the completion word) */,
2204 	dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
2205 	MAX_DMAE_CMD_ERROR_HANDLING_ENUM
2206 };
2207 
2208 enum dmae_cmd_src_enum
2209 {
2210 	dmae_cmd_src_pcie /* The source is the PCIe */,
2211 	dmae_cmd_src_grc /* The source is the GRC */,
2212 	MAX_DMAE_CMD_SRC_ENUM
2213 };
2214 
2215 struct e4_mstorm_core_conn_ag_ctx
2216 {
2217 	u8 byte0 /* cdu_validation */;
2218 	u8 byte1 /* state */;
2219 	u8 flags0;
2220 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2221 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2222 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2223 #define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2224 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2225 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2226 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2227 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2228 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2229 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2230 	u8 flags1;
2231 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2232 #define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2233 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2234 #define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2235 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2236 #define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2237 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2238 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2239 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2240 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2241 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2242 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2243 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2244 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2245 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2246 #define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2247 	__le16 word0 /* word0 */;
2248 	__le16 word1 /* word1 */;
2249 	__le32 reg0 /* reg0 */;
2250 	__le32 reg1 /* reg1 */;
2251 };
2252 
2253 struct e4_ystorm_core_conn_ag_ctx
2254 {
2255 	u8 byte0 /* cdu_validation */;
2256 	u8 byte1 /* state */;
2257 	u8 flags0;
2258 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2259 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2260 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2261 #define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2262 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2263 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2264 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2265 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2266 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2267 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2268 	u8 flags1;
2269 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2270 #define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2271 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2272 #define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2273 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2274 #define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2275 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2276 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2277 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2278 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2279 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2280 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2281 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2282 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2283 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2284 #define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2285 	u8 byte2 /* byte2 */;
2286 	u8 byte3 /* byte3 */;
2287 	__le16 word0 /* word0 */;
2288 	__le32 reg0 /* reg0 */;
2289 	__le32 reg1 /* reg1 */;
2290 	__le16 word1 /* word1 */;
2291 	__le16 word2 /* word2 */;
2292 	__le16 word3 /* word3 */;
2293 	__le16 word4 /* word4 */;
2294 	__le32 reg2 /* reg2 */;
2295 	__le32 reg3 /* reg3 */;
2296 };
2297 
2298 struct e5_mstorm_core_conn_ag_ctx
2299 {
2300 	u8 byte0 /* cdu_validation */;
2301 	u8 byte1 /* state_and_core_id */;
2302 	u8 flags0;
2303 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2304 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2305 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2306 #define E5_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2307 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2308 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2309 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2310 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2311 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2312 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2313 	u8 flags1;
2314 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2315 #define E5_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2316 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2317 #define E5_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2318 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2319 #define E5_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2320 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2321 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2322 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2323 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2324 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2325 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2326 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2327 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2328 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2329 #define E5_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2330 	__le16 word0 /* word0 */;
2331 	__le16 word1 /* word1 */;
2332 	__le32 reg0 /* reg0 */;
2333 	__le32 reg1 /* reg1 */;
2334 };
2335 
2336 struct e5_ystorm_core_conn_ag_ctx
2337 {
2338 	u8 byte0 /* cdu_validation */;
2339 	u8 byte1 /* state_and_core_id */;
2340 	u8 flags0;
2341 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1 /* exist_in_qm0 */
2342 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
2343 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1 /* exist_in_qm1 */
2344 #define E5_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
2345 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3 /* cf0 */
2346 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
2347 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3 /* cf1 */
2348 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
2349 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3 /* cf2 */
2350 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
2351 	u8 flags1;
2352 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1 /* cf0en */
2353 #define E5_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
2354 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1 /* cf1en */
2355 #define E5_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
2356 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1 /* cf2en */
2357 #define E5_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
2358 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1 /* rule0en */
2359 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
2360 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1 /* rule1en */
2361 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
2362 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1 /* rule2en */
2363 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
2364 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1 /* rule3en */
2365 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
2366 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1 /* rule4en */
2367 #define E5_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
2368 	u8 byte2 /* byte2 */;
2369 	u8 byte3 /* byte3 */;
2370 	__le16 word0 /* word0 */;
2371 	__le32 reg0 /* reg0 */;
2372 	__le32 reg1 /* reg1 */;
2373 	__le16 word1 /* word1 */;
2374 	__le16 word2 /* word2 */;
2375 	__le16 word3 /* word3 */;
2376 	__le16 word4 /* word4 */;
2377 	__le32 reg2 /* reg2 */;
2378 	__le32 reg3 /* reg3 */;
2379 };
2380 
2381 struct fw_asserts_ram_section
2382 {
2383 	__le16 section_ram_line_offset /* The offset of the section in the RAM in RAM lines (64-bit units) */;
2384 	__le16 section_ram_line_size /* The size of the section in RAM lines (64-bit units) */;
2385 	u8 list_dword_offset /* The offset of the asserts list within the section in dwords */;
2386 	u8 list_element_dword_size /* The size of an assert list element in dwords */;
2387 	u8 list_num_elements /* The number of elements in the asserts list */;
2388 	u8 list_next_index_dword_offset /* The offset of the next list index field within the section in dwords */;
2389 };
2390 
2391 struct fw_ver_num
2392 {
2393 	u8 major /* Firmware major version number */;
2394 	u8 minor /* Firmware minor version number */;
2395 	u8 rev /* Firmware revision version number */;
2396 	u8 eng /* Firmware engineering version number (for bootleg versions) */;
2397 };
2398 
2399 struct fw_ver_info
2400 {
2401 	__le16 tools_ver /* Tools version number */;
2402 	u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
2403 	u8 reserved1;
2404 	struct fw_ver_num num /* FW version number */;
2405 	__le32 timestamp /* FW Timestamp in unix time  (sec. since 1970) */;
2406 	__le32 reserved2;
2407 };
2408 
2409 struct fw_info
2410 {
2411 	struct fw_ver_info ver /* FW version information */;
2412 	struct fw_asserts_ram_section fw_asserts_section /* Info regarding the FW asserts section in the Storm RAM */;
2413 };
2414 
2415 struct fw_info_location
2416 {
2417 	__le32 grc_addr /* GRC address where the fw_info struct is located. */;
2418 	__le32 size /* Size of the fw_info structure (thats located at the grc_addr). */;
2419 };
2420 
2421 /*
2422  * IGU cleanup command
2423  */
2424 struct igu_cleanup
2425 {
2426 	__le32 sb_id_and_flags;
2427 #define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
2428 #define IGU_CLEANUP_RESERVED0_SHIFT    0
2429 #define IGU_CLEANUP_CLEANUP_SET_MASK   0x1 /* cleanup clear - 0, set - 1 */
2430 #define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
2431 #define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
2432 #define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
2433 #define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1 /* must always be set (use enum command_type_bit) */
2434 #define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
2435 	__le32 reserved1;
2436 };
2437 
2438 /*
2439  * IGU firmware driver command
2440  */
2441 union igu_command
2442 {
2443 	struct igu_prod_cons_update prod_cons_update;
2444 	struct igu_cleanup cleanup;
2445 };
2446 
2447 /*
2448  * IGU firmware driver command
2449  */
2450 struct igu_command_reg_ctrl
2451 {
2452 	__le16 opaque_fid;
2453 	__le16 igu_command_reg_ctrl_fields;
2454 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
2455 #define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
2456 #define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
2457 #define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
2458 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1 /* command typ: 0 - read, 1 - write */
2459 #define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
2460 };
2461 
2462 /*
2463  * IGU mapping line structure
2464  */
2465 struct igu_mapping_line
2466 {
2467 	__le32 igu_mapping_line_fields;
2468 #define IGU_MAPPING_LINE_VALID_MASK            0x1
2469 #define IGU_MAPPING_LINE_VALID_SHIFT           0
2470 #define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
2471 #define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
2472 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF /* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
2473 #define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
2474 #define IGU_MAPPING_LINE_PF_VALID_MASK         0x1 /* PF-1, VF-0 */
2475 #define IGU_MAPPING_LINE_PF_VALID_SHIFT        17
2476 #define IGU_MAPPING_LINE_IPS_GROUP_MASK        0x3F
2477 #define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
2478 #define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
2479 #define IGU_MAPPING_LINE_RESERVED_SHIFT        24
2480 };
2481 
2482 /*
2483  * IGU MSIX line structure
2484  */
2485 struct igu_msix_vector
2486 {
2487 	struct regpair address;
2488 	__le32 data;
2489 	__le32 msix_vector_fields;
2490 #define IGU_MSIX_VECTOR_MASK_BIT_MASK      0x1
2491 #define IGU_MSIX_VECTOR_MASK_BIT_SHIFT     0
2492 #define IGU_MSIX_VECTOR_RESERVED0_MASK     0x7FFF
2493 #define IGU_MSIX_VECTOR_RESERVED0_SHIFT    1
2494 #define IGU_MSIX_VECTOR_STEERING_TAG_MASK  0xFF
2495 #define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
2496 #define IGU_MSIX_VECTOR_RESERVED1_MASK     0xFF
2497 #define IGU_MSIX_VECTOR_RESERVED1_SHIFT    24
2498 };
2499 
2500 /*
2501  * per encapsulation type enabling flags
2502  */
2503 struct prs_reg_encapsulation_type_en
2504 {
2505 	u8 flags;
2506 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1 /* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
2507 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
2508 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1 /* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
2509 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
2510 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1 /* Enable bit for VXLAN encapsulation. */
2511 #define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
2512 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1 /* Enable bit for T-Tag encapsulation. */
2513 #define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
2514 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1 /* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
2515 #define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
2516 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1 /* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
2517 #define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
2518 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
2519 #define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
2520 };
2521 
2522 enum pxp_tph_st_hint
2523 {
2524 	TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
2525 	TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
2526 	TPH_ST_HINT_TARGET /* Device Write and Host Read, or Host Write and Device Read */,
2527 	TPH_ST_HINT_TARGET_PRIO /* Device Write and Host Read, or Host Write and Device Read - with temporal reuse */,
2528 	MAX_PXP_TPH_ST_HINT
2529 };
2530 
2531 /*
2532  * QM hardware structure of enable bypass credit mask
2533  */
2534 struct qm_rf_bypass_mask
2535 {
2536 	u8 flags;
2537 #define QM_RF_BYPASS_MASK_LINEVOQ_MASK    0x1
2538 #define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT   0
2539 #define QM_RF_BYPASS_MASK_RESERVED0_MASK  0x1
2540 #define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
2541 #define QM_RF_BYPASS_MASK_PFWFQ_MASK      0x1
2542 #define QM_RF_BYPASS_MASK_PFWFQ_SHIFT     2
2543 #define QM_RF_BYPASS_MASK_VPWFQ_MASK      0x1
2544 #define QM_RF_BYPASS_MASK_VPWFQ_SHIFT     3
2545 #define QM_RF_BYPASS_MASK_PFRL_MASK       0x1
2546 #define QM_RF_BYPASS_MASK_PFRL_SHIFT      4
2547 #define QM_RF_BYPASS_MASK_VPQCNRL_MASK    0x1
2548 #define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT   5
2549 #define QM_RF_BYPASS_MASK_FWPAUSE_MASK    0x1
2550 #define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT   6
2551 #define QM_RF_BYPASS_MASK_RESERVED1_MASK  0x1
2552 #define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
2553 };
2554 
2555 /*
2556  * QM hardware structure of opportunistic credit mask
2557  */
2558 struct qm_rf_opportunistic_mask
2559 {
2560 	__le16 flags;
2561 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK     0x1
2562 #define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT    0
2563 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK     0x1
2564 #define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT    1
2565 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK       0x1
2566 #define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT      2
2567 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK       0x1
2568 #define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT      3
2569 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK        0x1
2570 #define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT       4
2571 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK     0x1
2572 #define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT    5
2573 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK     0x1
2574 #define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT    6
2575 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK   0x1
2576 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT  7
2577 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK  0x1
2578 #define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
2579 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK   0x7F
2580 #define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT  9
2581 };
2582 
2583 /*
2584  * E4 QM hardware structure of QM map memory
2585  */
2586 struct qm_rf_pq_map_e4
2587 {
2588 	__le32 reg;
2589 #define QM_RF_PQ_MAP_E4_PQ_VALID_MASK          0x1 /* PQ active */
2590 #define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT         0
2591 #define QM_RF_PQ_MAP_E4_RL_ID_MASK             0xFF /* RL ID */
2592 #define QM_RF_PQ_MAP_E4_RL_ID_SHIFT            1
2593 #define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK          0x1FF /* the first PQ associated with the VPORT and VOQ of this PQ */
2594 #define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT         9
2595 #define QM_RF_PQ_MAP_E4_VOQ_MASK               0x1F /* VOQ */
2596 #define QM_RF_PQ_MAP_E4_VOQ_SHIFT              18
2597 #define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
2598 #define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
2599 #define QM_RF_PQ_MAP_E4_RL_VALID_MASK          0x1 /* RL active */
2600 #define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT         25
2601 #define QM_RF_PQ_MAP_E4_RESERVED_MASK          0x3F
2602 #define QM_RF_PQ_MAP_E4_RESERVED_SHIFT         26
2603 };
2604 
2605 /*
2606  * E5 QM hardware structure of QM map memory
2607  */
2608 struct qm_rf_pq_map_e5
2609 {
2610 	__le32 reg;
2611 #define QM_RF_PQ_MAP_E5_PQ_VALID_MASK          0x1 /* PQ active */
2612 #define QM_RF_PQ_MAP_E5_PQ_VALID_SHIFT         0
2613 #define QM_RF_PQ_MAP_E5_RL_ID_MASK             0xFF /* RL ID */
2614 #define QM_RF_PQ_MAP_E5_RL_ID_SHIFT            1
2615 #define QM_RF_PQ_MAP_E5_VP_PQ_ID_MASK          0x1FF /* the first PQ associated with the VPORT and VOQ of this PQ */
2616 #define QM_RF_PQ_MAP_E5_VP_PQ_ID_SHIFT         9
2617 #define QM_RF_PQ_MAP_E5_VOQ_MASK               0x3F /* VOQ */
2618 #define QM_RF_PQ_MAP_E5_VOQ_SHIFT              18
2619 #define QM_RF_PQ_MAP_E5_WRR_WEIGHT_GROUP_MASK  0x3 /* WRR weight */
2620 #define QM_RF_PQ_MAP_E5_WRR_WEIGHT_GROUP_SHIFT 24
2621 #define QM_RF_PQ_MAP_E5_RL_VALID_MASK          0x1 /* RL active */
2622 #define QM_RF_PQ_MAP_E5_RL_VALID_SHIFT         26
2623 #define QM_RF_PQ_MAP_E5_RESERVED_MASK          0x1F
2624 #define QM_RF_PQ_MAP_E5_RESERVED_SHIFT         27
2625 };
2626 
2627 /*
2628  * Completion params for aggregated interrupt completion
2629  */
2630 struct sdm_agg_int_comp_params
2631 {
2632 	__le16 params;
2633 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK      0x3F /* the number of aggregated interrupt, 0-31 */
2634 #define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT     0
2635 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK  0x1 /* 1 - set a bit in aggregated vector, 0 - dont set */
2636 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
2637 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK     0x1FF /* Number of bit in the aggregated vector, 0-279 (TBD) */
2638 #define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT    7
2639 };
2640 
2641 /*
2642  * SDM operation gen command (generate aggregative interrupt)
2643  */
2644 struct sdm_op_gen
2645 {
2646 	__le32 command;
2647 #define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF /* completion parameters 0-15 */
2648 #define SDM_OP_GEN_COMP_PARAM_SHIFT 0
2649 #define SDM_OP_GEN_COMP_TYPE_MASK   0xF /* completion type 16-19 */
2650 #define SDM_OP_GEN_COMP_TYPE_SHIFT  16
2651 #define SDM_OP_GEN_RESERVED_MASK    0xFFF /* reserved 20-31 */
2652 #define SDM_OP_GEN_RESERVED_SHIFT   20
2653 };
2654 
2655 #endif /* __ECORE_HSI_COMMON__ */
2656