xref: /freebsd/sys/dev/aq/aq_hw_llh.h (revision 493d26c58e732dcfcdd87993ef71880adfe9d0cb)
1*493d26c5SEd Maste /*
2*493d26c5SEd Maste  * aQuantia Corporation Network Driver
3*493d26c5SEd Maste  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4*493d26c5SEd Maste  *
5*493d26c5SEd Maste  * Redistribution and use in source and binary forms, with or without
6*493d26c5SEd Maste  * modification, are permitted provided that the following conditions
7*493d26c5SEd Maste  * are met:
8*493d26c5SEd Maste  *
9*493d26c5SEd Maste  *   (1) Redistributions of source code must retain the above
10*493d26c5SEd Maste  *   copyright notice, this list of conditions and the following
11*493d26c5SEd Maste  *   disclaimer.
12*493d26c5SEd Maste  *
13*493d26c5SEd Maste  *   (2) Redistributions in binary form must reproduce the above
14*493d26c5SEd Maste  *   copyright notice, this list of conditions and the following
15*493d26c5SEd Maste  *   disclaimer in the documentation and/or other materials provided
16*493d26c5SEd Maste  *   with the distribution.
17*493d26c5SEd Maste  *
18*493d26c5SEd Maste  *   (3)The name of the author may not be used to endorse or promote
19*493d26c5SEd Maste  *   products derived from this software without specific prior
20*493d26c5SEd Maste  *   written permission.
21*493d26c5SEd Maste  *
22*493d26c5SEd Maste  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23*493d26c5SEd Maste  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24*493d26c5SEd Maste  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25*493d26c5SEd Maste  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26*493d26c5SEd Maste  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27*493d26c5SEd Maste  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28*493d26c5SEd Maste  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29*493d26c5SEd Maste  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30*493d26c5SEd Maste  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31*493d26c5SEd Maste  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32*493d26c5SEd Maste  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*493d26c5SEd Maste  */
34*493d26c5SEd Maste 
35*493d26c5SEd Maste /* File hw_atl_llh.h: Declarations of bitfield and register access functions for
36*493d26c5SEd Maste  * Atlantic registers.
37*493d26c5SEd Maste  */
38*493d26c5SEd Maste 
39*493d26c5SEd Maste #ifndef HW_ATL_LLH_H
40*493d26c5SEd Maste #define HW_ATL_LLH_H
41*493d26c5SEd Maste 
42*493d26c5SEd Maste #include "aq_common.h"
43*493d26c5SEd Maste 
44*493d26c5SEd Maste struct aq_hw;
45*493d26c5SEd Maste 
46*493d26c5SEd Maste /* global */
47*493d26c5SEd Maste 
48*493d26c5SEd Maste 
49*493d26c5SEd Maste void reg_glb_fw_image_id1_set(struct aq_hw* hw, u32 value);
50*493d26c5SEd Maste u32 reg_glb_fw_image_id1_get(struct aq_hw* hw);
51*493d26c5SEd Maste 
52*493d26c5SEd Maste /* set global microprocessor semaphore */
53*493d26c5SEd Maste void reg_glb_cpu_sem_set(struct aq_hw *aq_hw, u32 sem_value, u32 sem_index);
54*493d26c5SEd Maste 
55*493d26c5SEd Maste /* get global microprocessor semaphore */
56*493d26c5SEd Maste u32 reg_glb_cpu_sem_get(struct aq_hw *aq_hw, u32 sem_index);
57*493d26c5SEd Maste 
58*493d26c5SEd Maste /*
59*493d26c5SEd Maste *  \brief Get Global Standard Control 1
60*493d26c5SEd Maste *  \return GlobalStandardControl1
61*493d26c5SEd Maste */
62*493d26c5SEd Maste u32 reg_glb_standard_ctl1_get(struct aq_hw* hw);
63*493d26c5SEd Maste /*
64*493d26c5SEd Maste *  \brief Set Global Standard Control 1
65*493d26c5SEd Maste */
66*493d26c5SEd Maste void reg_glb_standard_ctl1_set(struct aq_hw* hw, u32 glb_standard_ctl1);
67*493d26c5SEd Maste 
68*493d26c5SEd Maste /*
69*493d26c5SEd Maste *  \brief Set Global Control 2
70*493d26c5SEd Maste */
71*493d26c5SEd Maste void reg_global_ctl2_set(struct aq_hw* hw, u32 global_ctl2);
72*493d26c5SEd Maste /*
73*493d26c5SEd Maste *  \brief Get Global Control 2
74*493d26c5SEd Maste *  \return GlobalControl2
75*493d26c5SEd Maste */
76*493d26c5SEd Maste u32 reg_global_ctl2_get(struct aq_hw* hw);
77*493d26c5SEd Maste 
78*493d26c5SEd Maste 
79*493d26c5SEd Maste /*
80*493d26c5SEd Maste *  \brief Set Global Daisy Chain Status 1
81*493d26c5SEd Maste */
82*493d26c5SEd Maste void reg_glb_daisy_chain_status1_set(struct aq_hw* hw, u32 glb_daisy_chain_status1);
83*493d26c5SEd Maste /*
84*493d26c5SEd Maste *  \brief Get Global Daisy Chain Status 1
85*493d26c5SEd Maste *  \return glb_daisy_chain_status1
86*493d26c5SEd Maste */
87*493d26c5SEd Maste u32 reg_glb_daisy_chain_status1_get(struct aq_hw* hw);
88*493d26c5SEd Maste 
89*493d26c5SEd Maste 
90*493d26c5SEd Maste /*
91*493d26c5SEd Maste *  \brief Set Global General Provisioning 9
92*493d26c5SEd Maste */
93*493d26c5SEd Maste void reg_glb_general_provisioning9_set(struct aq_hw* hw, u32 value);
94*493d26c5SEd Maste /*
95*493d26c5SEd Maste *  \brief Get Global General Provisioning 9
96*493d26c5SEd Maste *  \return GlobalGeneralProvisioning9
97*493d26c5SEd Maste */
98*493d26c5SEd Maste u32 reg_glb_general_provisioning9_get(struct aq_hw* hw);
99*493d26c5SEd Maste 
100*493d26c5SEd Maste /*
101*493d26c5SEd Maste *  \brief Set Global NVR Provisioning 2
102*493d26c5SEd Maste */
103*493d26c5SEd Maste void reg_glb_nvr_provisioning2_set(struct aq_hw* hw, u32 value);
104*493d26c5SEd Maste /*
105*493d26c5SEd Maste *  \brief Get Global NVR Provisioning 2
106*493d26c5SEd Maste *  \return GlobalNvrProvisioning2
107*493d26c5SEd Maste */
108*493d26c5SEd Maste u32 reg_glb_nvr_provisioning2_get(struct aq_hw* hw);
109*493d26c5SEd Maste 
110*493d26c5SEd Maste /*
111*493d26c5SEd Maste *  \brief Set Global NVR Interface 1
112*493d26c5SEd Maste */
113*493d26c5SEd Maste void reg_glb_nvr_interface1_set(struct aq_hw* hw, u32 value);
114*493d26c5SEd Maste /*
115*493d26c5SEd Maste *  \brief Get Global NVR Interface 1
116*493d26c5SEd Maste *  \return GlobalNvrInterface1
117*493d26c5SEd Maste */
118*493d26c5SEd Maste u32 reg_glb_nvr_interface1_get(struct aq_hw* hw);
119*493d26c5SEd Maste 
120*493d26c5SEd Maste 
121*493d26c5SEd Maste /* set global register reset disable */
122*493d26c5SEd Maste void glb_glb_reg_res_dis_set(struct aq_hw *aq_hw, u32 glb_reg_res_dis);
123*493d26c5SEd Maste 
124*493d26c5SEd Maste /* set soft reset */
125*493d26c5SEd Maste void glb_soft_res_set(struct aq_hw *aq_hw, u32 soft_res);
126*493d26c5SEd Maste 
127*493d26c5SEd Maste /* get soft reset */
128*493d26c5SEd Maste u32 glb_soft_res_get(struct aq_hw *aq_hw);
129*493d26c5SEd Maste 
130*493d26c5SEd Maste /* stats */
131*493d26c5SEd Maste 
132*493d26c5SEd Maste u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw *aq_hw);
133*493d26c5SEd Maste 
134*493d26c5SEd Maste /* get rx dma good octet counter lsw */
135*493d26c5SEd Maste u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw);
136*493d26c5SEd Maste 
137*493d26c5SEd Maste /* get rx dma good packet counter lsw */
138*493d26c5SEd Maste u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw);
139*493d26c5SEd Maste 
140*493d26c5SEd Maste /* get tx dma good octet counter lsw */
141*493d26c5SEd Maste u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw);
142*493d26c5SEd Maste 
143*493d26c5SEd Maste /* get tx dma good packet counter lsw */
144*493d26c5SEd Maste u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw);
145*493d26c5SEd Maste 
146*493d26c5SEd Maste /* get rx dma good octet counter msw */
147*493d26c5SEd Maste u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw);
148*493d26c5SEd Maste 
149*493d26c5SEd Maste /* get rx dma good packet counter msw */
150*493d26c5SEd Maste u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw);
151*493d26c5SEd Maste 
152*493d26c5SEd Maste /* get tx dma good octet counter msw */
153*493d26c5SEd Maste u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw);
154*493d26c5SEd Maste 
155*493d26c5SEd Maste /* get tx dma good packet counter msw */
156*493d26c5SEd Maste u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw);
157*493d26c5SEd Maste 
158*493d26c5SEd Maste /* get  rx lro coalesced packet count lsw */
159*493d26c5SEd Maste u32 stats_rx_lro_coalesced_pkt_count0_get(struct aq_hw *aq_hw);
160*493d26c5SEd Maste 
161*493d26c5SEd Maste /* get msm rx errors counter register */
162*493d26c5SEd Maste u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw *aq_hw);
163*493d26c5SEd Maste 
164*493d26c5SEd Maste /* get msm rx unicast frames counter register */
165*493d26c5SEd Maste u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw *aq_hw);
166*493d26c5SEd Maste 
167*493d26c5SEd Maste /* get msm rx multicast frames counter register */
168*493d26c5SEd Maste u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw *aq_hw);
169*493d26c5SEd Maste 
170*493d26c5SEd Maste /* get msm rx broadcast frames counter register */
171*493d26c5SEd Maste u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw *aq_hw);
172*493d26c5SEd Maste 
173*493d26c5SEd Maste /* get msm rx broadcast octets counter register 1 */
174*493d26c5SEd Maste u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw *aq_hw);
175*493d26c5SEd Maste 
176*493d26c5SEd Maste /* get msm rx unicast octets counter register 0 */
177*493d26c5SEd Maste u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw *aq_hw);
178*493d26c5SEd Maste 
179*493d26c5SEd Maste /* get rx dma statistics counter 7 */
180*493d26c5SEd Maste u32 reg_rx_dma_stat_counter7get(struct aq_hw *aq_hw);
181*493d26c5SEd Maste 
182*493d26c5SEd Maste /* get msm tx errors counter register */
183*493d26c5SEd Maste u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw *aq_hw);
184*493d26c5SEd Maste 
185*493d26c5SEd Maste /* get msm tx unicast frames counter register */
186*493d26c5SEd Maste u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw *aq_hw);
187*493d26c5SEd Maste 
188*493d26c5SEd Maste /* get msm tx multicast frames counter register */
189*493d26c5SEd Maste u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw *aq_hw);
190*493d26c5SEd Maste 
191*493d26c5SEd Maste /* get msm tx broadcast frames counter register */
192*493d26c5SEd Maste u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw *aq_hw);
193*493d26c5SEd Maste 
194*493d26c5SEd Maste /* get msm tx multicast octets counter register 1 */
195*493d26c5SEd Maste u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw *aq_hw);
196*493d26c5SEd Maste 
197*493d26c5SEd Maste /* get msm tx broadcast octets counter register 1 */
198*493d26c5SEd Maste u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw *aq_hw);
199*493d26c5SEd Maste 
200*493d26c5SEd Maste /* get msm tx unicast octets counter register 0 */
201*493d26c5SEd Maste u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw *aq_hw);
202*493d26c5SEd Maste 
203*493d26c5SEd Maste /* get global mif identification */
204*493d26c5SEd Maste u32 reg_glb_mif_id_get(struct aq_hw *aq_hw);
205*493d26c5SEd Maste 
206*493d26c5SEd Maste /** \brief Set Tx Register Reset Disable
207*493d26c5SEd Maste *   \param txRegisterResetDisable 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers
208*493d26c5SEd Maste *   \note Default value: 0x1
209*493d26c5SEd Maste *   \note PORT="pif_mpi_reg_reset_dsbl_i"
210*493d26c5SEd Maste */
211*493d26c5SEd Maste void mpi_tx_reg_res_dis_set(struct aq_hw* hw, u32 mpi_tx_reg_res_dis);
212*493d26c5SEd Maste /** \brief Get Tx Register Reset Disable
213*493d26c5SEd Maste *   \return 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers
214*493d26c5SEd Maste *   \note Default value: 0x1
215*493d26c5SEd Maste *   \note PORT="pif_mpi_reg_reset_dsbl_i"
216*493d26c5SEd Maste */
217*493d26c5SEd Maste u32 mpi_tx_reg_res_dis_get(struct aq_hw* hw);
218*493d26c5SEd Maste 
219*493d26c5SEd Maste 
220*493d26c5SEd Maste /* interrupt */
221*493d26c5SEd Maste 
222*493d26c5SEd Maste /* set interrupt auto mask lsw */
223*493d26c5SEd Maste void itr_irq_auto_masklsw_set(struct aq_hw *aq_hw, u32 irq_auto_masklsw);
224*493d26c5SEd Maste 
225*493d26c5SEd Maste /* set interrupt mapping enable rx */
226*493d26c5SEd Maste void itr_irq_map_en_rx_set(struct aq_hw *aq_hw, u32 irq_map_en_rx, u32 rx);
227*493d26c5SEd Maste 
228*493d26c5SEd Maste /* set interrupt mapping enable tx */
229*493d26c5SEd Maste void itr_irq_map_en_tx_set(struct aq_hw *aq_hw, u32 irq_map_en_tx, u32 tx);
230*493d26c5SEd Maste 
231*493d26c5SEd Maste /* set interrupt mapping rx */
232*493d26c5SEd Maste void itr_irq_map_rx_set(struct aq_hw *aq_hw, u32 irq_map_rx, u32 rx);
233*493d26c5SEd Maste 
234*493d26c5SEd Maste /* set interrupt mapping tx */
235*493d26c5SEd Maste void itr_irq_map_tx_set(struct aq_hw *aq_hw, u32 irq_map_tx, u32 tx);
236*493d26c5SEd Maste 
237*493d26c5SEd Maste /* set interrupt mask clear lsw */
238*493d26c5SEd Maste void itr_irq_msk_clearlsw_set(struct aq_hw *aq_hw, u32 irq_msk_clearlsw);
239*493d26c5SEd Maste 
240*493d26c5SEd Maste /* set interrupt mask set lsw */
241*493d26c5SEd Maste void itr_irq_msk_setlsw_set(struct aq_hw *aq_hw, u32 irq_msk_setlsw);
242*493d26c5SEd Maste 
243*493d26c5SEd Maste /* set interrupt register reset disable */
244*493d26c5SEd Maste void itr_irq_reg_res_dis_set(struct aq_hw *aq_hw, u32 irq_reg_res_dis);
245*493d26c5SEd Maste 
246*493d26c5SEd Maste /* set interrupt status clear lsw */
247*493d26c5SEd Maste void itr_irq_status_clearlsw_set(struct aq_hw *aq_hw,
248*493d26c5SEd Maste                  u32 irq_status_clearlsw);
249*493d26c5SEd Maste 
250*493d26c5SEd Maste /* get interrupt status lsw */
251*493d26c5SEd Maste u32 itr_irq_statuslsw_get(struct aq_hw *aq_hw);
252*493d26c5SEd Maste 
253*493d26c5SEd Maste /* get reset interrupt */
254*493d26c5SEd Maste u32 itr_res_irq_get(struct aq_hw *aq_hw);
255*493d26c5SEd Maste 
256*493d26c5SEd Maste /* set reset interrupt */
257*493d26c5SEd Maste void itr_res_irq_set(struct aq_hw *aq_hw, u32 res_irq);
258*493d26c5SEd Maste 
259*493d26c5SEd Maste void itr_irq_mode_set(struct aq_hw *aq_hw, u32 irq_mode);
260*493d26c5SEd Maste 
261*493d26c5SEd Maste /* Set Link Interrupt Mapping Enable */
262*493d26c5SEd Maste void itr_link_int_map_en_set(struct aq_hw *aq_hw, u32 link_int_en_map_en);
263*493d26c5SEd Maste 
264*493d26c5SEd Maste /* Get Link Interrupt Mapping Enable */
265*493d26c5SEd Maste u32 itr_link_int_map_en_get(struct aq_hw *aq_hw);
266*493d26c5SEd Maste 
267*493d26c5SEd Maste /* Set Link Interrupt Mapping */
268*493d26c5SEd Maste void itr_link_int_map_set(struct aq_hw *aq_hw, u32 link_int_map);
269*493d26c5SEd Maste 
270*493d26c5SEd Maste /* Get Link Interrupt Mapping */
271*493d26c5SEd Maste u32 itr_link_int_map_get(struct aq_hw *aq_hw);
272*493d26c5SEd Maste 
273*493d26c5SEd Maste 
274*493d26c5SEd Maste /* Set MIF Interrupt Mapping Enable */
275*493d26c5SEd Maste void itr_mif_int_map_en_set(struct aq_hw *aq_hw, u32 mif_int_map_en, u32 mif);
276*493d26c5SEd Maste 
277*493d26c5SEd Maste /* Get MIF Interrupt Mapping Enable */
278*493d26c5SEd Maste u32 itr_mif_int_map_en_get(struct aq_hw *aq_hw, u32 mif);
279*493d26c5SEd Maste 
280*493d26c5SEd Maste /* Set MIF Interrupt Mapping */
281*493d26c5SEd Maste void itr_mif_int_map_set(struct aq_hw *aq_hw, u32 mif_int_map, u32 mif);
282*493d26c5SEd Maste 
283*493d26c5SEd Maste /* Get MIF Interrupt Mapping */
284*493d26c5SEd Maste u32 itr_mif_int_map_get(struct aq_hw *aq_hw, u32 mif);
285*493d26c5SEd Maste 
286*493d26c5SEd Maste void itr_irq_status_cor_en_set(struct aq_hw *aq_hw, u32 irq_status_cor_enable);
287*493d26c5SEd Maste 
288*493d26c5SEd Maste void itr_irq_auto_mask_clr_en_set(struct aq_hw *aq_hw, u32 irq_auto_mask_clr_en);
289*493d26c5SEd Maste 
290*493d26c5SEd Maste /* rdm */
291*493d26c5SEd Maste 
292*493d26c5SEd Maste /* set cpu id */
293*493d26c5SEd Maste void rdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca);
294*493d26c5SEd Maste 
295*493d26c5SEd Maste /* set rx dca enable */
296*493d26c5SEd Maste void rdm_rx_dca_en_set(struct aq_hw *aq_hw, u32 rx_dca_en);
297*493d26c5SEd Maste 
298*493d26c5SEd Maste /* set rx dca mode */
299*493d26c5SEd Maste void rdm_rx_dca_mode_set(struct aq_hw *aq_hw, u32 rx_dca_mode);
300*493d26c5SEd Maste 
301*493d26c5SEd Maste /* set rx descriptor data buffer size */
302*493d26c5SEd Maste void rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw,
303*493d26c5SEd Maste                     u32 rx_desc_data_buff_size,
304*493d26c5SEd Maste                     u32 descriptor);
305*493d26c5SEd Maste 
306*493d26c5SEd Maste /* set rx descriptor dca enable */
307*493d26c5SEd Maste void rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, u32 rx_desc_dca_en,
308*493d26c5SEd Maste                 u32 dca);
309*493d26c5SEd Maste 
310*493d26c5SEd Maste /* set rx descriptor enable */
311*493d26c5SEd Maste void rdm_rx_desc_en_set(struct aq_hw *aq_hw, u32 rx_desc_en,
312*493d26c5SEd Maste             u32 descriptor);
313*493d26c5SEd Maste 
314*493d26c5SEd Maste /* set rx descriptor header splitting */
315*493d26c5SEd Maste void rdm_rx_desc_head_splitting_set(struct aq_hw *aq_hw,
316*493d26c5SEd Maste                     u32 rx_desc_head_splitting,
317*493d26c5SEd Maste                     u32 descriptor);
318*493d26c5SEd Maste 
319*493d26c5SEd Maste /* get rx descriptor head pointer */
320*493d26c5SEd Maste u32 rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
321*493d26c5SEd Maste 
322*493d26c5SEd Maste /* set rx descriptor length */
323*493d26c5SEd Maste void rdm_rx_desc_len_set(struct aq_hw *aq_hw, u32 rx_desc_len,
324*493d26c5SEd Maste              u32 descriptor);
325*493d26c5SEd Maste 
326*493d26c5SEd Maste /* set rx descriptor write-back interrupt enable */
327*493d26c5SEd Maste void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
328*493d26c5SEd Maste                   u32 rx_desc_wr_wb_irq_en);
329*493d26c5SEd Maste 
330*493d26c5SEd Maste /* set rx header dca enable */
331*493d26c5SEd Maste void rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, u32 rx_head_dca_en,
332*493d26c5SEd Maste                 u32 dca);
333*493d26c5SEd Maste 
334*493d26c5SEd Maste /* set rx payload dca enable */
335*493d26c5SEd Maste void rdm_rx_pld_dca_en_set(struct aq_hw *aq_hw, u32 rx_pld_dca_en, u32 dca);
336*493d26c5SEd Maste 
337*493d26c5SEd Maste /* set rx descriptor header buffer size */
338*493d26c5SEd Maste void rdm_rx_desc_head_buff_size_set(struct aq_hw *aq_hw,
339*493d26c5SEd Maste                     u32 rx_desc_head_buff_size,
340*493d26c5SEd Maste                     u32 descriptor);
341*493d26c5SEd Maste 
342*493d26c5SEd Maste /* set rx descriptor reset */
343*493d26c5SEd Maste void rdm_rx_desc_res_set(struct aq_hw *aq_hw, u32 rx_desc_res,
344*493d26c5SEd Maste              u32 descriptor);
345*493d26c5SEd Maste 
346*493d26c5SEd Maste /* Set RDM Interrupt Moderation Enable */
347*493d26c5SEd Maste void rdm_rdm_intr_moder_en_set(struct aq_hw *aq_hw, u32 rdm_intr_moder_en);
348*493d26c5SEd Maste 
349*493d26c5SEd Maste /* reg */
350*493d26c5SEd Maste 
351*493d26c5SEd Maste /* set general interrupt mapping register */
352*493d26c5SEd Maste void reg_gen_irq_map_set(struct aq_hw *aq_hw, u32 gen_intr_map, u32 regidx);
353*493d26c5SEd Maste 
354*493d26c5SEd Maste /* get general interrupt status register */
355*493d26c5SEd Maste u32 reg_gen_irq_status_get(struct aq_hw *aq_hw);
356*493d26c5SEd Maste 
357*493d26c5SEd Maste /* set interrupt global control register */
358*493d26c5SEd Maste void reg_irq_glb_ctl_set(struct aq_hw *aq_hw, u32 intr_glb_ctl);
359*493d26c5SEd Maste 
360*493d26c5SEd Maste /* set interrupt throttle register */
361*493d26c5SEd Maste void reg_irq_thr_set(struct aq_hw *aq_hw, u32 intr_thr, u32 throttle);
362*493d26c5SEd Maste 
363*493d26c5SEd Maste /* set rx dma descriptor base address lsw */
364*493d26c5SEd Maste void reg_rx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
365*493d26c5SEd Maste                     u32 rx_dma_desc_base_addrlsw,
366*493d26c5SEd Maste                     u32 descriptor);
367*493d26c5SEd Maste 
368*493d26c5SEd Maste /* set rx dma descriptor base address msw */
369*493d26c5SEd Maste void reg_rx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
370*493d26c5SEd Maste                     u32 rx_dma_desc_base_addrmsw,
371*493d26c5SEd Maste                     u32 descriptor);
372*493d26c5SEd Maste 
373*493d26c5SEd Maste /* get rx dma descriptor status register */
374*493d26c5SEd Maste u32 reg_rx_dma_desc_status_get(struct aq_hw *aq_hw, u32 descriptor);
375*493d26c5SEd Maste 
376*493d26c5SEd Maste /* set rx dma descriptor tail pointer register */
377*493d26c5SEd Maste void reg_rx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
378*493d26c5SEd Maste                   u32 rx_dma_desc_tail_ptr,
379*493d26c5SEd Maste                   u32 descriptor);
380*493d26c5SEd Maste /* get rx dma descriptor tail pointer register */
381*493d26c5SEd Maste u32 reg_rx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
382*493d26c5SEd Maste 
383*493d26c5SEd Maste /* set rx filter multicast filter mask register */
384*493d26c5SEd Maste void reg_rx_flr_mcst_flr_msk_set(struct aq_hw *aq_hw,
385*493d26c5SEd Maste                  u32 rx_flr_mcst_flr_msk);
386*493d26c5SEd Maste 
387*493d26c5SEd Maste /* set rx filter multicast filter register */
388*493d26c5SEd Maste void reg_rx_flr_mcst_flr_set(struct aq_hw *aq_hw, u32 rx_flr_mcst_flr,
389*493d26c5SEd Maste                  u32 filter);
390*493d26c5SEd Maste 
391*493d26c5SEd Maste /* set rx filter rss control register 1 */
392*493d26c5SEd Maste void reg_rx_flr_rss_control1set(struct aq_hw *aq_hw,
393*493d26c5SEd Maste                 u32 rx_flr_rss_control1);
394*493d26c5SEd Maste 
395*493d26c5SEd Maste /* Set RX Filter Control Register 2 */
396*493d26c5SEd Maste void reg_rx_flr_control2_set(struct aq_hw *aq_hw, u32 rx_flr_control2);
397*493d26c5SEd Maste 
398*493d26c5SEd Maste /* Set RX Interrupt Moderation Control Register */
399*493d26c5SEd Maste void reg_rx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
400*493d26c5SEd Maste                 u32 rx_intr_moderation_ctl,
401*493d26c5SEd Maste                 u32 queue);
402*493d26c5SEd Maste 
403*493d26c5SEd Maste /* set tx dma debug control */
404*493d26c5SEd Maste void reg_tx_dma_debug_ctl_set(struct aq_hw *aq_hw, u32 tx_dma_debug_ctl);
405*493d26c5SEd Maste 
406*493d26c5SEd Maste /* set tx dma descriptor base address lsw */
407*493d26c5SEd Maste void reg_tx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
408*493d26c5SEd Maste                     u32 tx_dma_desc_base_addrlsw,
409*493d26c5SEd Maste                     u32 descriptor);
410*493d26c5SEd Maste 
411*493d26c5SEd Maste /* set tx dma descriptor base address msw */
412*493d26c5SEd Maste void reg_tx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
413*493d26c5SEd Maste                     u32 tx_dma_desc_base_addrmsw,
414*493d26c5SEd Maste                     u32 descriptor);
415*493d26c5SEd Maste 
416*493d26c5SEd Maste /* set tx dma descriptor tail pointer register */
417*493d26c5SEd Maste void reg_tx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
418*493d26c5SEd Maste                   u32 tx_dma_desc_tail_ptr,
419*493d26c5SEd Maste                   u32 descriptor);
420*493d26c5SEd Maste 
421*493d26c5SEd Maste /* get tx dma descriptor tail pointer register */
422*493d26c5SEd Maste u32 reg_tx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
423*493d26c5SEd Maste 
424*493d26c5SEd Maste /* Set TX Interrupt Moderation Control Register */
425*493d26c5SEd Maste void reg_tx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
426*493d26c5SEd Maste                 u32 tx_intr_moderation_ctl,
427*493d26c5SEd Maste                 u32 queue);
428*493d26c5SEd Maste 
429*493d26c5SEd Maste /* get global microprocessor scratch pad */
430*493d26c5SEd Maste u32 reg_glb_cpu_scratch_scp_get(struct aq_hw *hw, u32 glb_cpu_scratch_scp_idx);
431*493d26c5SEd Maste /* set global microprocessor scratch pad */
432*493d26c5SEd Maste void reg_glb_cpu_scratch_scp_set(struct aq_hw *aq_hw,
433*493d26c5SEd Maste     u32 glb_cpu_scratch_scp, u32 scratch_scp);
434*493d26c5SEd Maste 
435*493d26c5SEd Maste /* get global microprocessor no reset scratch pad */
436*493d26c5SEd Maste u32 reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw* hw, u32 index);
437*493d26c5SEd Maste /* set global microprocessor no reset scratch pad */
438*493d26c5SEd Maste void reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* aq_hw, u32 value,
439*493d26c5SEd Maste     u32 index);
440*493d26c5SEd Maste 
441*493d26c5SEd Maste /* rpb */
442*493d26c5SEd Maste 
443*493d26c5SEd Maste /* set dma system loopback */
444*493d26c5SEd Maste void rpb_dma_sys_lbk_set(struct aq_hw *aq_hw, u32 dma_sys_lbk);
445*493d26c5SEd Maste 
446*493d26c5SEd Maste /* set rx traffic class mode */
447*493d26c5SEd Maste void rpb_rpf_rx_traf_class_mode_set(struct aq_hw *aq_hw,
448*493d26c5SEd Maste                     u32 rx_traf_class_mode);
449*493d26c5SEd Maste 
450*493d26c5SEd Maste /* set rx buffer enable */
451*493d26c5SEd Maste void rpb_rx_buff_en_set(struct aq_hw *aq_hw, u32 rx_buff_en);
452*493d26c5SEd Maste 
453*493d26c5SEd Maste /* set rx buffer high threshold (per tc) */
454*493d26c5SEd Maste void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
455*493d26c5SEd Maste                      u32 rx_buff_hi_threshold_per_tc,
456*493d26c5SEd Maste                      u32 buffer);
457*493d26c5SEd Maste 
458*493d26c5SEd Maste /* set rx buffer low threshold (per tc) */
459*493d26c5SEd Maste void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
460*493d26c5SEd Maste                      u32 rx_buff_lo_threshold_per_tc,
461*493d26c5SEd Maste                      u32 buffer);
462*493d26c5SEd Maste 
463*493d26c5SEd Maste /* set rx flow control mode */
464*493d26c5SEd Maste void rpb_rx_flow_ctl_mode_set(struct aq_hw *aq_hw, u32 rx_flow_ctl_mode);
465*493d26c5SEd Maste 
466*493d26c5SEd Maste /* set rx packet buffer size (per tc) */
467*493d26c5SEd Maste void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
468*493d26c5SEd Maste                      u32 rx_pkt_buff_size_per_tc,
469*493d26c5SEd Maste                      u32 buffer);
470*493d26c5SEd Maste 
471*493d26c5SEd Maste /* set rx xoff enable (per tc) */
472*493d26c5SEd Maste void rpb_rx_xoff_en_per_tc_set(struct aq_hw *aq_hw, u32 rx_xoff_en_per_tc,
473*493d26c5SEd Maste                    u32 buffer);
474*493d26c5SEd Maste 
475*493d26c5SEd Maste /* rpf */
476*493d26c5SEd Maste 
477*493d26c5SEd Maste /* set l2 broadcast count threshold */
478*493d26c5SEd Maste void rpfl2broadcast_count_threshold_set(struct aq_hw *aq_hw,
479*493d26c5SEd Maste                     u32 l2broadcast_count_threshold);
480*493d26c5SEd Maste 
481*493d26c5SEd Maste /* set l2 broadcast enable */
482*493d26c5SEd Maste void rpfl2broadcast_en_set(struct aq_hw *aq_hw, u32 l2broadcast_en);
483*493d26c5SEd Maste 
484*493d26c5SEd Maste /* set l2 broadcast filter action */
485*493d26c5SEd Maste void rpfl2broadcast_flr_act_set(struct aq_hw *aq_hw,
486*493d26c5SEd Maste                 u32 l2broadcast_flr_act);
487*493d26c5SEd Maste 
488*493d26c5SEd Maste /* set l2 multicast filter enable */
489*493d26c5SEd Maste void rpfl2multicast_flr_en_set(struct aq_hw *aq_hw, u32 l2multicast_flr_en,
490*493d26c5SEd Maste                    u32 filter);
491*493d26c5SEd Maste 
492*493d26c5SEd Maste /* set l2 promiscuous mode enable */
493*493d26c5SEd Maste void rpfl2promiscuous_mode_en_set(struct aq_hw *aq_hw,
494*493d26c5SEd Maste                   u32 l2promiscuous_mode_en);
495*493d26c5SEd Maste 
496*493d26c5SEd Maste /* set l2 unicast filter action */
497*493d26c5SEd Maste void rpfl2unicast_flr_act_set(struct aq_hw *aq_hw, u32 l2unicast_flr_act,
498*493d26c5SEd Maste                   u32 filter);
499*493d26c5SEd Maste 
500*493d26c5SEd Maste /* set l2 unicast filter enable */
501*493d26c5SEd Maste void rpfl2_uc_flr_en_set(struct aq_hw *aq_hw, u32 l2unicast_flr_en,
502*493d26c5SEd Maste              u32 filter);
503*493d26c5SEd Maste 
504*493d26c5SEd Maste /* set l2 unicast destination address lsw */
505*493d26c5SEd Maste void rpfl2unicast_dest_addresslsw_set(struct aq_hw *aq_hw,
506*493d26c5SEd Maste                       u32 l2unicast_dest_addresslsw,
507*493d26c5SEd Maste                       u32 filter);
508*493d26c5SEd Maste 
509*493d26c5SEd Maste /* set l2 unicast destination address msw */
510*493d26c5SEd Maste void rpfl2unicast_dest_addressmsw_set(struct aq_hw *aq_hw,
511*493d26c5SEd Maste                       u32 l2unicast_dest_addressmsw,
512*493d26c5SEd Maste                       u32 filter);
513*493d26c5SEd Maste 
514*493d26c5SEd Maste /* Set L2 Accept all Multicast packets */
515*493d26c5SEd Maste void rpfl2_accept_all_mc_packets_set(struct aq_hw *aq_hw,
516*493d26c5SEd Maste                      u32 l2_accept_all_mc_packets);
517*493d26c5SEd Maste 
518*493d26c5SEd Maste /* set user-priority tc mapping */
519*493d26c5SEd Maste void rpf_rpb_user_priority_tc_map_set(struct aq_hw *aq_hw,
520*493d26c5SEd Maste                       u32 user_priority_tc_map, u32 tc);
521*493d26c5SEd Maste 
522*493d26c5SEd Maste /* set rss key address */
523*493d26c5SEd Maste void rpf_rss_key_addr_set(struct aq_hw *aq_hw, u32 rss_key_addr);
524*493d26c5SEd Maste 
525*493d26c5SEd Maste /* set rss key write data */
526*493d26c5SEd Maste void rpf_rss_key_wr_data_set(struct aq_hw *aq_hw, u32 rss_key_wr_data);
527*493d26c5SEd Maste 
528*493d26c5SEd Maste /* get rss key read data */
529*493d26c5SEd Maste u32 rpf_rss_key_rd_data_get(struct aq_hw *aq_hw);
530*493d26c5SEd Maste 
531*493d26c5SEd Maste /* get rss key write enable */
532*493d26c5SEd Maste u32 rpf_rss_key_wr_en_get(struct aq_hw *aq_hw);
533*493d26c5SEd Maste 
534*493d26c5SEd Maste /* set rss key write enable */
535*493d26c5SEd Maste void rpf_rss_key_wr_en_set(struct aq_hw *aq_hw, u32 rss_key_wr_en);
536*493d26c5SEd Maste 
537*493d26c5SEd Maste /* set rss redirection table address */
538*493d26c5SEd Maste void rpf_rss_redir_tbl_addr_set(struct aq_hw *aq_hw,
539*493d26c5SEd Maste                 u32 rss_redir_tbl_addr);
540*493d26c5SEd Maste 
541*493d26c5SEd Maste /* set rss redirection table write data */
542*493d26c5SEd Maste void rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw,
543*493d26c5SEd Maste                    u32 rss_redir_tbl_wr_data);
544*493d26c5SEd Maste 
545*493d26c5SEd Maste /* get rss redirection write enable */
546*493d26c5SEd Maste u32 rpf_rss_redir_wr_en_get(struct aq_hw *aq_hw);
547*493d26c5SEd Maste 
548*493d26c5SEd Maste /* set rss redirection write enable */
549*493d26c5SEd Maste void rpf_rss_redir_wr_en_set(struct aq_hw *aq_hw, u32 rss_redir_wr_en);
550*493d26c5SEd Maste 
551*493d26c5SEd Maste /* set tpo to rpf system loopback */
552*493d26c5SEd Maste void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw *aq_hw,
553*493d26c5SEd Maste                 u32 tpo_to_rpf_sys_lbk);
554*493d26c5SEd Maste 
555*493d26c5SEd Maste /* set vlan inner ethertype */
556*493d26c5SEd Maste void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
557*493d26c5SEd Maste 
558*493d26c5SEd Maste /* set vlan outer ethertype */
559*493d26c5SEd Maste void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
560*493d26c5SEd Maste 
561*493d26c5SEd Maste /* set vlan promiscuous mode enable */
562*493d26c5SEd Maste void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw,
563*493d26c5SEd Maste 				      u32 vlan_prom_mode_en);
564*493d26c5SEd Maste 
565*493d26c5SEd Maste /* Set VLAN untagged action */
566*493d26c5SEd Maste void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw *aq_hw,
567*493d26c5SEd Maste 				      u32 vlan_untagged_act);
568*493d26c5SEd Maste 
569*493d26c5SEd Maste /* Set VLAN accept untagged packets */
570*493d26c5SEd Maste void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
571*493d26c5SEd Maste 						 u32 vlan_acc_untagged_packets);
572*493d26c5SEd Maste 
573*493d26c5SEd Maste /* Set VLAN filter enable */
574*493d26c5SEd Maste void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en,
575*493d26c5SEd Maste 				u32 filter);
576*493d26c5SEd Maste 
577*493d26c5SEd Maste /* Set VLAN Filter Action */
578*493d26c5SEd Maste void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
579*493d26c5SEd Maste 				 u32 filter);
580*493d26c5SEd Maste 
581*493d26c5SEd Maste /* Set VLAN ID Filter */
582*493d26c5SEd Maste void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr,
583*493d26c5SEd Maste 				u32 filter);
584*493d26c5SEd Maste 
585*493d26c5SEd Maste /* Set VLAN RX queue assignment enable */
586*493d26c5SEd Maste void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq_en,
587*493d26c5SEd Maste 				u32 filter);
588*493d26c5SEd Maste 
589*493d26c5SEd Maste /* Set VLAN RX queue */
590*493d26c5SEd Maste void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq,
591*493d26c5SEd Maste 				u32 filter);
592*493d26c5SEd Maste 
593*493d26c5SEd Maste /* set ethertype filter enable */
594*493d26c5SEd Maste void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en,
595*493d26c5SEd Maste 				u32 filter);
596*493d26c5SEd Maste 
597*493d26c5SEd Maste /* set  ethertype user-priority enable */
598*493d26c5SEd Maste void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
599*493d26c5SEd Maste 					  u32 etht_user_priority_en,
600*493d26c5SEd Maste 					  u32 filter);
601*493d26c5SEd Maste 
602*493d26c5SEd Maste /* set  ethertype rx queue enable */
603*493d26c5SEd Maste void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw,
604*493d26c5SEd Maste 				     u32 etht_rx_queue_en,
605*493d26c5SEd Maste 				     u32 filter);
606*493d26c5SEd Maste 
607*493d26c5SEd Maste /* set ethertype rx queue */
608*493d26c5SEd Maste void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
609*493d26c5SEd Maste 				  u32 filter);
610*493d26c5SEd Maste 
611*493d26c5SEd Maste /* set ethertype user-priority */
612*493d26c5SEd Maste void hw_atl_rpf_etht_user_priority_set(struct aq_hw *aq_hw,
613*493d26c5SEd Maste 				       u32 etht_user_priority,
614*493d26c5SEd Maste 				       u32 filter);
615*493d26c5SEd Maste 
616*493d26c5SEd Maste /* set ethertype management queue */
617*493d26c5SEd Maste void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
618*493d26c5SEd Maste 				   u32 filter);
619*493d26c5SEd Maste 
620*493d26c5SEd Maste /* set ethertype filter action */
621*493d26c5SEd Maste void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
622*493d26c5SEd Maste 				 u32 filter);
623*493d26c5SEd Maste 
624*493d26c5SEd Maste /* set ethertype filter */
625*493d26c5SEd Maste void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
626*493d26c5SEd Maste 
627*493d26c5SEd Maste /* set L3/L4 filter enable */
628*493d26c5SEd Maste void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
629*493d26c5SEd Maste 
630*493d26c5SEd Maste /* set L3 IPv6 enable */
631*493d26c5SEd Maste void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
632*493d26c5SEd Maste 
633*493d26c5SEd Maste /* set L3 source address enable */
634*493d26c5SEd Maste void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
635*493d26c5SEd Maste 
636*493d26c5SEd Maste /* set L3 destination address enable */
637*493d26c5SEd Maste void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
638*493d26c5SEd Maste 
639*493d26c5SEd Maste /* set L4 source port enable */
640*493d26c5SEd Maste void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
641*493d26c5SEd Maste 
642*493d26c5SEd Maste /* set L4 destination port enable */
643*493d26c5SEd Maste void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
644*493d26c5SEd Maste 
645*493d26c5SEd Maste /* set L4 protocol enable */
646*493d26c5SEd Maste void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
647*493d26c5SEd Maste 
648*493d26c5SEd Maste /* set L3 ARP filter enable */
649*493d26c5SEd Maste void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
650*493d26c5SEd Maste 
651*493d26c5SEd Maste /* set L3/L4 rx queue enable */
652*493d26c5SEd Maste void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
653*493d26c5SEd Maste 
654*493d26c5SEd Maste /* set L3/L4 management queue */
655*493d26c5SEd Maste void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
656*493d26c5SEd Maste 
657*493d26c5SEd Maste /* set L3/L4 filter action */
658*493d26c5SEd Maste void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
659*493d26c5SEd Maste 
660*493d26c5SEd Maste /* set L3/L4 rx queue */
661*493d26c5SEd Maste void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
662*493d26c5SEd Maste 
663*493d26c5SEd Maste /* set L4 protocol value */
664*493d26c5SEd Maste void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
665*493d26c5SEd Maste 
666*493d26c5SEd Maste /* set L4 source port */
667*493d26c5SEd Maste void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
668*493d26c5SEd Maste 
669*493d26c5SEd Maste /* set L4 destination port */
670*493d26c5SEd Maste void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
671*493d26c5SEd Maste 
672*493d26c5SEd Maste /* set vlan inner ethertype */
673*493d26c5SEd Maste void rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
674*493d26c5SEd Maste 
675*493d26c5SEd Maste /* set vlan outer ethertype */
676*493d26c5SEd Maste void rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
677*493d26c5SEd Maste 
678*493d26c5SEd Maste /* set vlan promiscuous mode enable */
679*493d26c5SEd Maste void rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, u32 vlan_prom_mode_en);
680*493d26c5SEd Maste 
681*493d26c5SEd Maste /* Set VLAN untagged action */
682*493d26c5SEd Maste void rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, u32 vlan_untagged_act);
683*493d26c5SEd Maste 
684*493d26c5SEd Maste /* Set VLAN accept untagged packets */
685*493d26c5SEd Maste void rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
686*493d26c5SEd Maste                       u32 vlan_accept_untagged_packets);
687*493d26c5SEd Maste 
688*493d26c5SEd Maste /* Set VLAN filter enable */
689*493d26c5SEd Maste void rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en, u32 filter);
690*493d26c5SEd Maste 
691*493d26c5SEd Maste /* Set VLAN Filter Action */
692*493d26c5SEd Maste void rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
693*493d26c5SEd Maste               u32 filter);
694*493d26c5SEd Maste 
695*493d26c5SEd Maste /* Set VLAN ID Filter */
696*493d26c5SEd Maste void rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr, u32 filter);
697*493d26c5SEd Maste 
698*493d26c5SEd Maste /* set ethertype filter enable */
699*493d26c5SEd Maste void rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en, u32 filter);
700*493d26c5SEd Maste 
701*493d26c5SEd Maste /* set  ethertype user-priority enable */
702*493d26c5SEd Maste void rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
703*493d26c5SEd Maste                    u32 etht_user_priority_en, u32 filter);
704*493d26c5SEd Maste 
705*493d26c5SEd Maste /* set  ethertype rx queue enable */
706*493d26c5SEd Maste void rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, u32 etht_rx_queue_en,
707*493d26c5SEd Maste                   u32 filter);
708*493d26c5SEd Maste 
709*493d26c5SEd Maste /* set ethertype rx queue */
710*493d26c5SEd Maste void rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
711*493d26c5SEd Maste                u32 filter);
712*493d26c5SEd Maste 
713*493d26c5SEd Maste /* set ethertype user-priority */
714*493d26c5SEd Maste void rpf_etht_user_priority_set(struct aq_hw *aq_hw, u32 etht_user_priority,
715*493d26c5SEd Maste                 u32 filter);
716*493d26c5SEd Maste 
717*493d26c5SEd Maste /* set ethertype management queue */
718*493d26c5SEd Maste void rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
719*493d26c5SEd Maste                 u32 filter);
720*493d26c5SEd Maste 
721*493d26c5SEd Maste /* set ethertype filter action */
722*493d26c5SEd Maste void rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
723*493d26c5SEd Maste               u32 filter);
724*493d26c5SEd Maste 
725*493d26c5SEd Maste /* set ethertype filter */
726*493d26c5SEd Maste void rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
727*493d26c5SEd Maste 
728*493d26c5SEd Maste /* set L3/L4 filter enable */
729*493d26c5SEd Maste void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
730*493d26c5SEd Maste 
731*493d26c5SEd Maste /* set L3 IPv6 enable */
732*493d26c5SEd Maste void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
733*493d26c5SEd Maste 
734*493d26c5SEd Maste /* set L3 source address enable */
735*493d26c5SEd Maste void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
736*493d26c5SEd Maste 
737*493d26c5SEd Maste /* set L3 destination address enable */
738*493d26c5SEd Maste void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
739*493d26c5SEd Maste 
740*493d26c5SEd Maste /* set L4 source port enable */
741*493d26c5SEd Maste void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
742*493d26c5SEd Maste 
743*493d26c5SEd Maste /* set L4 destination port enable */
744*493d26c5SEd Maste void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
745*493d26c5SEd Maste 
746*493d26c5SEd Maste /* set L4 protocol enable */
747*493d26c5SEd Maste void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
748*493d26c5SEd Maste 
749*493d26c5SEd Maste /* set L3 ARP filter enable */
750*493d26c5SEd Maste void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
751*493d26c5SEd Maste 
752*493d26c5SEd Maste /* set L3/L4 rx queue enable */
753*493d26c5SEd Maste void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
754*493d26c5SEd Maste 
755*493d26c5SEd Maste /* set L3/L4 management queue */
756*493d26c5SEd Maste void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
757*493d26c5SEd Maste 
758*493d26c5SEd Maste /* set L3/L4 filter action */
759*493d26c5SEd Maste void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
760*493d26c5SEd Maste 
761*493d26c5SEd Maste /* set L3/L4 rx queue */
762*493d26c5SEd Maste void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
763*493d26c5SEd Maste 
764*493d26c5SEd Maste /* set L4 protocol value */
765*493d26c5SEd Maste void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
766*493d26c5SEd Maste 
767*493d26c5SEd Maste /* set L4 source port */
768*493d26c5SEd Maste void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
769*493d26c5SEd Maste 
770*493d26c5SEd Maste /* set L4 destination port */
771*493d26c5SEd Maste void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
772*493d26c5SEd Maste 
773*493d26c5SEd Maste /* rpo */
774*493d26c5SEd Maste 
775*493d26c5SEd Maste /* set ipv4 header checksum offload enable */
776*493d26c5SEd Maste void rpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
777*493d26c5SEd Maste                        u32 ipv4header_crc_offload_en);
778*493d26c5SEd Maste 
779*493d26c5SEd Maste /* set rx descriptor vlan stripping */
780*493d26c5SEd Maste void rpo_rx_desc_vlan_stripping_set(struct aq_hw *aq_hw,
781*493d26c5SEd Maste                     u32 rx_desc_vlan_stripping,
782*493d26c5SEd Maste                     u32 descriptor);
783*493d26c5SEd Maste 
784*493d26c5SEd Maste /* set tcp/udp checksum offload enable */
785*493d26c5SEd Maste void rpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
786*493d26c5SEd Maste                     u32 tcp_udp_crc_offload_en);
787*493d26c5SEd Maste 
788*493d26c5SEd Maste /* Set LRO Patch Optimization Enable. */
789*493d26c5SEd Maste void rpo_lro_patch_optimization_en_set(struct aq_hw *aq_hw,
790*493d26c5SEd Maste                        u32 lro_patch_optimization_en);
791*493d26c5SEd Maste 
792*493d26c5SEd Maste /* Set Large Receive Offload Enable */
793*493d26c5SEd Maste void rpo_lro_en_set(struct aq_hw *aq_hw, u32 lro_en);
794*493d26c5SEd Maste 
795*493d26c5SEd Maste /* Set LRO Q Sessions Limit */
796*493d26c5SEd Maste void rpo_lro_qsessions_lim_set(struct aq_hw *aq_hw, u32 lro_qsessions_lim);
797*493d26c5SEd Maste 
798*493d26c5SEd Maste /* Set LRO Total Descriptor Limit */
799*493d26c5SEd Maste void rpo_lro_total_desc_lim_set(struct aq_hw *aq_hw, u32 lro_total_desc_lim);
800*493d26c5SEd Maste 
801*493d26c5SEd Maste /* Set LRO Min Payload of First Packet */
802*493d26c5SEd Maste void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw *aq_hw,
803*493d26c5SEd Maste                       u32 lro_min_pld_of_first_pkt);
804*493d26c5SEd Maste 
805*493d26c5SEd Maste /* Set LRO Packet Limit */
806*493d26c5SEd Maste void rpo_lro_pkt_lim_set(struct aq_hw *aq_hw, u32 lro_packet_lim);
807*493d26c5SEd Maste 
808*493d26c5SEd Maste /* Set LRO Max Number of Descriptors */
809*493d26c5SEd Maste void rpo_lro_max_num_of_descriptors_set(struct aq_hw *aq_hw,
810*493d26c5SEd Maste                     u32 lro_max_desc_num, u32 lro);
811*493d26c5SEd Maste 
812*493d26c5SEd Maste /* Set LRO Time Base Divider */
813*493d26c5SEd Maste void rpo_lro_time_base_divider_set(struct aq_hw *aq_hw,
814*493d26c5SEd Maste                    u32 lro_time_base_divider);
815*493d26c5SEd Maste 
816*493d26c5SEd Maste /*Set LRO Inactive Interval */
817*493d26c5SEd Maste void rpo_lro_inactive_interval_set(struct aq_hw *aq_hw,
818*493d26c5SEd Maste                    u32 lro_inactive_interval);
819*493d26c5SEd Maste 
820*493d26c5SEd Maste /*Set LRO Max Coalescing Interval */
821*493d26c5SEd Maste void rpo_lro_max_coalescing_interval_set(struct aq_hw *aq_hw,
822*493d26c5SEd Maste                      u32 lro_max_coalescing_interval);
823*493d26c5SEd Maste 
824*493d26c5SEd Maste /* rx */
825*493d26c5SEd Maste 
826*493d26c5SEd Maste /* set rx register reset disable */
827*493d26c5SEd Maste void rx_rx_reg_res_dis_set(struct aq_hw *aq_hw, u32 rx_reg_res_dis);
828*493d26c5SEd Maste 
829*493d26c5SEd Maste /* tdm */
830*493d26c5SEd Maste 
831*493d26c5SEd Maste /* set cpu id */
832*493d26c5SEd Maste void tdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca);
833*493d26c5SEd Maste 
834*493d26c5SEd Maste /* set large send offload enable */
835*493d26c5SEd Maste void tdm_large_send_offload_en_set(struct aq_hw *aq_hw,
836*493d26c5SEd Maste                    u32 large_send_offload_en);
837*493d26c5SEd Maste 
838*493d26c5SEd Maste /* set tx descriptor enable */
839*493d26c5SEd Maste void tdm_tx_desc_en_set(struct aq_hw *aq_hw, u32 tx_desc_en, u32 descriptor);
840*493d26c5SEd Maste 
841*493d26c5SEd Maste /* set tx dca enable */
842*493d26c5SEd Maste void tdm_tx_dca_en_set(struct aq_hw *aq_hw, u32 tx_dca_en);
843*493d26c5SEd Maste 
844*493d26c5SEd Maste /* set tx dca mode */
845*493d26c5SEd Maste void tdm_tx_dca_mode_set(struct aq_hw *aq_hw, u32 tx_dca_mode);
846*493d26c5SEd Maste 
847*493d26c5SEd Maste /* set tx descriptor dca enable */
848*493d26c5SEd Maste void tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, u32 tx_desc_dca_en, u32 dca);
849*493d26c5SEd Maste 
850*493d26c5SEd Maste /* get tx descriptor head pointer */
851*493d26c5SEd Maste u32 tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
852*493d26c5SEd Maste 
853*493d26c5SEd Maste /* set tx descriptor length */
854*493d26c5SEd Maste void tdm_tx_desc_len_set(struct aq_hw *aq_hw, u32 tx_desc_len,
855*493d26c5SEd Maste              u32 descriptor);
856*493d26c5SEd Maste 
857*493d26c5SEd Maste /* set tx descriptor write-back interrupt enable */
858*493d26c5SEd Maste void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
859*493d26c5SEd Maste                   u32 tx_desc_wr_wb_irq_en);
860*493d26c5SEd Maste 
861*493d26c5SEd Maste /* set tx descriptor write-back threshold */
862*493d26c5SEd Maste void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw *aq_hw,
863*493d26c5SEd Maste                      u32 tx_desc_wr_wb_threshold,
864*493d26c5SEd Maste                      u32 descriptor);
865*493d26c5SEd Maste 
866*493d26c5SEd Maste /* Set TDM Interrupt Moderation Enable */
867*493d26c5SEd Maste void tdm_tdm_intr_moder_en_set(struct aq_hw *aq_hw,
868*493d26c5SEd Maste                    u32 tdm_irq_moderation_en);
869*493d26c5SEd Maste /* thm */
870*493d26c5SEd Maste 
871*493d26c5SEd Maste /* set lso tcp flag of first packet */
872*493d26c5SEd Maste void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw *aq_hw,
873*493d26c5SEd Maste                        u32 lso_tcp_flag_of_first_pkt);
874*493d26c5SEd Maste 
875*493d26c5SEd Maste /* set lso tcp flag of last packet */
876*493d26c5SEd Maste void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw *aq_hw,
877*493d26c5SEd Maste                       u32 lso_tcp_flag_of_last_pkt);
878*493d26c5SEd Maste 
879*493d26c5SEd Maste /* set lso tcp flag of middle packet */
880*493d26c5SEd Maste void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw *aq_hw,
881*493d26c5SEd Maste                     u32 lso_tcp_flag_of_middle_pkt);
882*493d26c5SEd Maste 
883*493d26c5SEd Maste /* tpb */
884*493d26c5SEd Maste 
885*493d26c5SEd Maste /* set tx buffer enable */
886*493d26c5SEd Maste void tpb_tx_buff_en_set(struct aq_hw *aq_hw, u32 tx_buff_en);
887*493d26c5SEd Maste 
888*493d26c5SEd Maste /* set tx tc mode */
889*493d26c5SEd Maste void tpb_tx_tc_mode_set(struct aq_hw *aq_hw, u32 tc_mode);
890*493d26c5SEd Maste 
891*493d26c5SEd Maste /* set tx buffer high threshold (per tc) */
892*493d26c5SEd Maste void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
893*493d26c5SEd Maste                      u32 tx_buff_hi_threshold_per_tc,
894*493d26c5SEd Maste                      u32 buffer);
895*493d26c5SEd Maste 
896*493d26c5SEd Maste /* set tx buffer low threshold (per tc) */
897*493d26c5SEd Maste void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
898*493d26c5SEd Maste                      u32 tx_buff_lo_threshold_per_tc,
899*493d26c5SEd Maste                      u32 buffer);
900*493d26c5SEd Maste 
901*493d26c5SEd Maste /* set tx dma system loopback enable */
902*493d26c5SEd Maste void tpb_tx_dma_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_dma_sys_lbk_en);
903*493d26c5SEd Maste 
904*493d26c5SEd Maste /* set tx packet buffer size (per tc) */
905*493d26c5SEd Maste void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
906*493d26c5SEd Maste                      u32 tx_pkt_buff_size_per_tc, u32 buffer);
907*493d26c5SEd Maste 
908*493d26c5SEd Maste /* toggle rdm rx dma descriptor cache init */
909*493d26c5SEd Maste void rdm_rx_dma_desc_cache_init_tgl(struct aq_hw *aq_hw);
910*493d26c5SEd Maste 
911*493d26c5SEd Maste /* set tx path pad insert enable */
912*493d26c5SEd Maste void tpb_tx_path_scp_ins_en_set(struct aq_hw *aq_hw, u32 tx_path_scp_ins_en);
913*493d26c5SEd Maste 
914*493d26c5SEd Maste /* tpo */
915*493d26c5SEd Maste 
916*493d26c5SEd Maste /* set ipv4 header checksum offload enable */
917*493d26c5SEd Maste void tpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
918*493d26c5SEd Maste                        u32 ipv4header_crc_offload_en);
919*493d26c5SEd Maste 
920*493d26c5SEd Maste /* set tcp/udp checksum offload enable */
921*493d26c5SEd Maste void tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
922*493d26c5SEd Maste                     u32 tcp_udp_crc_offload_en);
923*493d26c5SEd Maste 
924*493d26c5SEd Maste /* set tx pkt system loopback enable */
925*493d26c5SEd Maste void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_pkt_sys_lbk_en);
926*493d26c5SEd Maste 
927*493d26c5SEd Maste /* tps */
928*493d26c5SEd Maste 
929*493d26c5SEd Maste /* set tx packet scheduler data arbitration mode */
930*493d26c5SEd Maste void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw *aq_hw,
931*493d26c5SEd Maste                        u32 tx_pkt_shed_data_arb_mode);
932*493d26c5SEd Maste 
933*493d26c5SEd Maste /* set tx packet scheduler descriptor rate current time reset */
934*493d26c5SEd Maste void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw *aq_hw,
935*493d26c5SEd Maste                          u32 curr_time_res);
936*493d26c5SEd Maste 
937*493d26c5SEd Maste /* set tx packet scheduler descriptor rate limit */
938*493d26c5SEd Maste void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw *aq_hw,
939*493d26c5SEd Maste                        u32 tx_pkt_shed_desc_rate_lim);
940*493d26c5SEd Maste 
941*493d26c5SEd Maste /* set tx packet scheduler descriptor tc arbitration mode */
942*493d26c5SEd Maste void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw *aq_hw,
943*493d26c5SEd Maste                       u32 tx_pkt_shed_desc_tc_arb_mode);
944*493d26c5SEd Maste 
945*493d26c5SEd Maste /* set tx packet scheduler descriptor tc max credit */
946*493d26c5SEd Maste void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw *aq_hw,
947*493d26c5SEd Maste                         u32 tx_pkt_shed_desc_tc_max_credit,
948*493d26c5SEd Maste                         u32 tc);
949*493d26c5SEd Maste 
950*493d26c5SEd Maste /* set tx packet scheduler descriptor tc weight */
951*493d26c5SEd Maste void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw *aq_hw,
952*493d26c5SEd Maste                     u32 tx_pkt_shed_desc_tc_weight,
953*493d26c5SEd Maste                     u32 tc);
954*493d26c5SEd Maste 
955*493d26c5SEd Maste /* set tx packet scheduler descriptor vm arbitration mode */
956*493d26c5SEd Maste void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw *aq_hw,
957*493d26c5SEd Maste                       u32 tx_pkt_shed_desc_vm_arb_mode);
958*493d26c5SEd Maste 
959*493d26c5SEd Maste /* set tx packet scheduler tc data max credit */
960*493d26c5SEd Maste void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw *aq_hw,
961*493d26c5SEd Maste                         u32 tx_pkt_shed_tc_data_max_credit,
962*493d26c5SEd Maste                         u32 tc);
963*493d26c5SEd Maste 
964*493d26c5SEd Maste /* set tx packet scheduler tc data weight */
965*493d26c5SEd Maste void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw *aq_hw,
966*493d26c5SEd Maste                     u32 tx_pkt_shed_tc_data_weight,
967*493d26c5SEd Maste                     u32 tc);
968*493d26c5SEd Maste 
969*493d26c5SEd Maste /* tx */
970*493d26c5SEd Maste 
971*493d26c5SEd Maste /* set tx register reset disable */
972*493d26c5SEd Maste void tx_tx_reg_res_dis_set(struct aq_hw *aq_hw, u32 tx_reg_res_dis);
973*493d26c5SEd Maste 
974*493d26c5SEd Maste /* msm */
975*493d26c5SEd Maste 
976*493d26c5SEd Maste /* get register access status */
977*493d26c5SEd Maste u32 msm_reg_access_status_get(struct aq_hw *aq_hw);
978*493d26c5SEd Maste 
979*493d26c5SEd Maste /* set  register address for indirect address */
980*493d26c5SEd Maste void msm_reg_addr_for_indirect_addr_set(struct aq_hw *aq_hw,
981*493d26c5SEd Maste                     u32 reg_addr_for_indirect_addr);
982*493d26c5SEd Maste 
983*493d26c5SEd Maste /* set register read strobe */
984*493d26c5SEd Maste void msm_reg_rd_strobe_set(struct aq_hw *aq_hw, u32 reg_rd_strobe);
985*493d26c5SEd Maste 
986*493d26c5SEd Maste /* get  register read data */
987*493d26c5SEd Maste u32 msm_reg_rd_data_get(struct aq_hw *aq_hw);
988*493d26c5SEd Maste 
989*493d26c5SEd Maste /* set  register write data */
990*493d26c5SEd Maste void msm_reg_wr_data_set(struct aq_hw *aq_hw, u32 reg_wr_data);
991*493d26c5SEd Maste 
992*493d26c5SEd Maste /* set register write strobe */
993*493d26c5SEd Maste void msm_reg_wr_strobe_set(struct aq_hw *aq_hw, u32 reg_wr_strobe);
994*493d26c5SEd Maste 
995*493d26c5SEd Maste /* pci */
996*493d26c5SEd Maste 
997*493d26c5SEd Maste /* set pci register reset disable */
998*493d26c5SEd Maste void pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, u32 pci_reg_res_dis);
999*493d26c5SEd Maste 
1000*493d26c5SEd Maste 
1001*493d26c5SEd Maste /*
1002*493d26c5SEd Maste *  \brief Set MIF Power Gating Enable Control
1003*493d26c5SEd Maste */
1004*493d26c5SEd Maste void reg_mif_power_gating_enable_control_set(struct aq_hw* hw, u32 value);
1005*493d26c5SEd Maste /*
1006*493d26c5SEd Maste *  \brief Get MIF Power Gating Enable Control
1007*493d26c5SEd Maste *  \return MifPowerGatingEnableControl
1008*493d26c5SEd Maste */
1009*493d26c5SEd Maste u32 reg_mif_power_gating_enable_control_get(struct aq_hw* hw);
1010*493d26c5SEd Maste 
1011*493d26c5SEd Maste /* get mif up mailbox busy */
1012*493d26c5SEd Maste u32 mif_mcp_up_mailbox_busy_get(struct aq_hw *aq_hw);
1013*493d26c5SEd Maste 
1014*493d26c5SEd Maste /* set mif up mailbox execute operation */
1015*493d26c5SEd Maste void mif_mcp_up_mailbox_execute_operation_set(struct aq_hw* hw, u32 value);
1016*493d26c5SEd Maste 
1017*493d26c5SEd Maste /* get mif uP mailbox address */
1018*493d26c5SEd Maste u32 mif_mcp_up_mailbox_addr_get(struct aq_hw *aq_hw);
1019*493d26c5SEd Maste /* set mif uP mailbox address */
1020*493d26c5SEd Maste void mif_mcp_up_mailbox_addr_set(struct aq_hw *hw, u32 value);
1021*493d26c5SEd Maste 
1022*493d26c5SEd Maste /* get mif uP mailbox data */
1023*493d26c5SEd Maste u32 mif_mcp_up_mailbox_data_get(struct aq_hw *aq_hw);
1024*493d26c5SEd Maste 
1025*493d26c5SEd Maste /* clear ipv4 filter destination address */
1026*493d26c5SEd Maste void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw *aq_hw, u8 location);
1027*493d26c5SEd Maste 
1028*493d26c5SEd Maste /* clear ipv4 filter source address */
1029*493d26c5SEd Maste void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw *aq_hw, u8 location);
1030*493d26c5SEd Maste 
1031*493d26c5SEd Maste /* clear command for filter l3-l4 */
1032*493d26c5SEd Maste void hw_atl_rpfl3l4_cmd_clear(struct aq_hw *aq_hw, u8 location);
1033*493d26c5SEd Maste 
1034*493d26c5SEd Maste /* clear ipv6 filter destination address */
1035*493d26c5SEd Maste void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw *aq_hw, u8 location);
1036*493d26c5SEd Maste 
1037*493d26c5SEd Maste /* clear ipv6 filter source address */
1038*493d26c5SEd Maste void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw *aq_hw, u8 location);
1039*493d26c5SEd Maste 
1040*493d26c5SEd Maste /* set ipv4 filter destination address */
1041*493d26c5SEd Maste void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw *aq_hw, u8 location,
1042*493d26c5SEd Maste 				       u32 ipv4_dest);
1043*493d26c5SEd Maste 
1044*493d26c5SEd Maste /* set ipv4 filter source address */
1045*493d26c5SEd Maste void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw *aq_hw, u8 location,
1046*493d26c5SEd Maste 				      u32 ipv4_src);
1047*493d26c5SEd Maste 
1048*493d26c5SEd Maste /* set command for filter l3-l4 */
1049*493d26c5SEd Maste void hw_atl_rpfl3l4_cmd_set(struct aq_hw *aq_hw, u8 location, u32 cmd);
1050*493d26c5SEd Maste 
1051*493d26c5SEd Maste /* set ipv6 filter source address */
1052*493d26c5SEd Maste void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw *aq_hw, u8 location,
1053*493d26c5SEd Maste 				      u32 *ipv6_src);
1054*493d26c5SEd Maste 
1055*493d26c5SEd Maste /* set ipv6 filter destination address */
1056*493d26c5SEd Maste void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw *aq_hw, u8 location,
1057*493d26c5SEd Maste 				       u32 *ipv6_dest);
1058*493d26c5SEd Maste 
1059*493d26c5SEd Maste /* set vlan inner ethertype */
1060*493d26c5SEd Maste void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
1061*493d26c5SEd Maste 
1062*493d26c5SEd Maste /* set vlan outer ethertype */
1063*493d26c5SEd Maste void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
1064*493d26c5SEd Maste 
1065*493d26c5SEd Maste /* set vlan promiscuous mode enable */
1066*493d26c5SEd Maste void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw,
1067*493d26c5SEd Maste 				      u32 vlan_prom_mode_en);
1068*493d26c5SEd Maste 
1069*493d26c5SEd Maste /* Set VLAN untagged action */
1070*493d26c5SEd Maste void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw *aq_hw,
1071*493d26c5SEd Maste 				      u32 vlan_untagged_act);
1072*493d26c5SEd Maste 
1073*493d26c5SEd Maste /* Set VLAN accept untagged packets */
1074*493d26c5SEd Maste void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
1075*493d26c5SEd Maste 						 u32 vlan_acc_untagged_packets);
1076*493d26c5SEd Maste 
1077*493d26c5SEd Maste /* Set VLAN filter enable */
1078*493d26c5SEd Maste void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en,
1079*493d26c5SEd Maste 				u32 filter);
1080*493d26c5SEd Maste 
1081*493d26c5SEd Maste /* Set VLAN Filter Action */
1082*493d26c5SEd Maste void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
1083*493d26c5SEd Maste 				 u32 filter);
1084*493d26c5SEd Maste 
1085*493d26c5SEd Maste /* Set VLAN ID Filter */
1086*493d26c5SEd Maste void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr,
1087*493d26c5SEd Maste 				u32 filter);
1088*493d26c5SEd Maste 
1089*493d26c5SEd Maste /* Set VLAN RX queue assignment enable */
1090*493d26c5SEd Maste void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq_en,
1091*493d26c5SEd Maste 				u32 filter);
1092*493d26c5SEd Maste 
1093*493d26c5SEd Maste /* Set VLAN RX queue */
1094*493d26c5SEd Maste void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq,
1095*493d26c5SEd Maste 				u32 filter);
1096*493d26c5SEd Maste 
1097*493d26c5SEd Maste /* set ethertype filter enable */
1098*493d26c5SEd Maste void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en,
1099*493d26c5SEd Maste 				u32 filter);
1100*493d26c5SEd Maste 
1101*493d26c5SEd Maste /* set  ethertype user-priority enable */
1102*493d26c5SEd Maste void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
1103*493d26c5SEd Maste 					  u32 etht_user_priority_en,
1104*493d26c5SEd Maste 					  u32 filter);
1105*493d26c5SEd Maste 
1106*493d26c5SEd Maste /* set  ethertype rx queue enable */
1107*493d26c5SEd Maste void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw,
1108*493d26c5SEd Maste 				     u32 etht_rx_queue_en,
1109*493d26c5SEd Maste 				     u32 filter);
1110*493d26c5SEd Maste 
1111*493d26c5SEd Maste /* set ethertype rx queue */
1112*493d26c5SEd Maste void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
1113*493d26c5SEd Maste 				  u32 filter);
1114*493d26c5SEd Maste 
1115*493d26c5SEd Maste /* set ethertype user-priority */
1116*493d26c5SEd Maste void hw_atl_rpf_etht_user_priority_set(struct aq_hw *aq_hw,
1117*493d26c5SEd Maste 				       u32 etht_user_priority,
1118*493d26c5SEd Maste 				       u32 filter);
1119*493d26c5SEd Maste 
1120*493d26c5SEd Maste /* set ethertype management queue */
1121*493d26c5SEd Maste void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
1122*493d26c5SEd Maste 				   u32 filter);
1123*493d26c5SEd Maste 
1124*493d26c5SEd Maste /* set ethertype filter action */
1125*493d26c5SEd Maste void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
1126*493d26c5SEd Maste 				 u32 filter);
1127*493d26c5SEd Maste 
1128*493d26c5SEd Maste /* set ethertype filter */
1129*493d26c5SEd Maste void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
1130*493d26c5SEd Maste 
1131*493d26c5SEd Maste /* set L3/L4 filter enable */
1132*493d26c5SEd Maste void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1133*493d26c5SEd Maste 
1134*493d26c5SEd Maste /* set L3 IPv6 enable */
1135*493d26c5SEd Maste void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1136*493d26c5SEd Maste 
1137*493d26c5SEd Maste /* set L3 source address enable */
1138*493d26c5SEd Maste void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1139*493d26c5SEd Maste 
1140*493d26c5SEd Maste /* set L3 destination address enable */
1141*493d26c5SEd Maste void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1142*493d26c5SEd Maste 
1143*493d26c5SEd Maste /* set L4 source port enable */
1144*493d26c5SEd Maste void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1145*493d26c5SEd Maste 
1146*493d26c5SEd Maste /* set L4 destination port enable */
1147*493d26c5SEd Maste void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1148*493d26c5SEd Maste 
1149*493d26c5SEd Maste /* set L4 protocol enable */
1150*493d26c5SEd Maste void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1151*493d26c5SEd Maste 
1152*493d26c5SEd Maste /* set L3 ARP filter enable */
1153*493d26c5SEd Maste void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1154*493d26c5SEd Maste 
1155*493d26c5SEd Maste /* set L3/L4 rx queue enable */
1156*493d26c5SEd Maste void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1157*493d26c5SEd Maste 
1158*493d26c5SEd Maste /* set L3/L4 management queue */
1159*493d26c5SEd Maste void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1160*493d26c5SEd Maste 
1161*493d26c5SEd Maste /* set L3/L4 filter action */
1162*493d26c5SEd Maste void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1163*493d26c5SEd Maste 
1164*493d26c5SEd Maste /* set L3/L4 rx queue */
1165*493d26c5SEd Maste void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1166*493d26c5SEd Maste 
1167*493d26c5SEd Maste /* set L4 protocol value */
1168*493d26c5SEd Maste void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1169*493d26c5SEd Maste 
1170*493d26c5SEd Maste /* set L4 source port */
1171*493d26c5SEd Maste void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1172*493d26c5SEd Maste 
1173*493d26c5SEd Maste /* set L4 destination port */
1174*493d26c5SEd Maste void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1175*493d26c5SEd Maste 
1176*493d26c5SEd Maste #endif /* HW_ATL_LLH_H */
1177