xref: /freebsd/sys/dev/aq/aq_hw_llh.h (revision 493d26c58e732dcfcdd87993ef71880adfe9d0cb)
1 /*
2  * aQuantia Corporation Network Driver
3  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  *   (1) Redistributions of source code must retain the above
10  *   copyright notice, this list of conditions and the following
11  *   disclaimer.
12  *
13  *   (2) Redistributions in binary form must reproduce the above
14  *   copyright notice, this list of conditions and the following
15  *   disclaimer in the documentation and/or other materials provided
16  *   with the distribution.
17  *
18  *   (3)The name of the author may not be used to endorse or promote
19  *   products derived from this software without specific prior
20  *   written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /* File hw_atl_llh.h: Declarations of bitfield and register access functions for
36  * Atlantic registers.
37  */
38 
39 #ifndef HW_ATL_LLH_H
40 #define HW_ATL_LLH_H
41 
42 #include "aq_common.h"
43 
44 struct aq_hw;
45 
46 /* global */
47 
48 
49 void reg_glb_fw_image_id1_set(struct aq_hw* hw, u32 value);
50 u32 reg_glb_fw_image_id1_get(struct aq_hw* hw);
51 
52 /* set global microprocessor semaphore */
53 void reg_glb_cpu_sem_set(struct aq_hw *aq_hw, u32 sem_value, u32 sem_index);
54 
55 /* get global microprocessor semaphore */
56 u32 reg_glb_cpu_sem_get(struct aq_hw *aq_hw, u32 sem_index);
57 
58 /*
59 *  \brief Get Global Standard Control 1
60 *  \return GlobalStandardControl1
61 */
62 u32 reg_glb_standard_ctl1_get(struct aq_hw* hw);
63 /*
64 *  \brief Set Global Standard Control 1
65 */
66 void reg_glb_standard_ctl1_set(struct aq_hw* hw, u32 glb_standard_ctl1);
67 
68 /*
69 *  \brief Set Global Control 2
70 */
71 void reg_global_ctl2_set(struct aq_hw* hw, u32 global_ctl2);
72 /*
73 *  \brief Get Global Control 2
74 *  \return GlobalControl2
75 */
76 u32 reg_global_ctl2_get(struct aq_hw* hw);
77 
78 
79 /*
80 *  \brief Set Global Daisy Chain Status 1
81 */
82 void reg_glb_daisy_chain_status1_set(struct aq_hw* hw, u32 glb_daisy_chain_status1);
83 /*
84 *  \brief Get Global Daisy Chain Status 1
85 *  \return glb_daisy_chain_status1
86 */
87 u32 reg_glb_daisy_chain_status1_get(struct aq_hw* hw);
88 
89 
90 /*
91 *  \brief Set Global General Provisioning 9
92 */
93 void reg_glb_general_provisioning9_set(struct aq_hw* hw, u32 value);
94 /*
95 *  \brief Get Global General Provisioning 9
96 *  \return GlobalGeneralProvisioning9
97 */
98 u32 reg_glb_general_provisioning9_get(struct aq_hw* hw);
99 
100 /*
101 *  \brief Set Global NVR Provisioning 2
102 */
103 void reg_glb_nvr_provisioning2_set(struct aq_hw* hw, u32 value);
104 /*
105 *  \brief Get Global NVR Provisioning 2
106 *  \return GlobalNvrProvisioning2
107 */
108 u32 reg_glb_nvr_provisioning2_get(struct aq_hw* hw);
109 
110 /*
111 *  \brief Set Global NVR Interface 1
112 */
113 void reg_glb_nvr_interface1_set(struct aq_hw* hw, u32 value);
114 /*
115 *  \brief Get Global NVR Interface 1
116 *  \return GlobalNvrInterface1
117 */
118 u32 reg_glb_nvr_interface1_get(struct aq_hw* hw);
119 
120 
121 /* set global register reset disable */
122 void glb_glb_reg_res_dis_set(struct aq_hw *aq_hw, u32 glb_reg_res_dis);
123 
124 /* set soft reset */
125 void glb_soft_res_set(struct aq_hw *aq_hw, u32 soft_res);
126 
127 /* get soft reset */
128 u32 glb_soft_res_get(struct aq_hw *aq_hw);
129 
130 /* stats */
131 
132 u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw *aq_hw);
133 
134 /* get rx dma good octet counter lsw */
135 u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw);
136 
137 /* get rx dma good packet counter lsw */
138 u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw);
139 
140 /* get tx dma good octet counter lsw */
141 u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw *aq_hw);
142 
143 /* get tx dma good packet counter lsw */
144 u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw *aq_hw);
145 
146 /* get rx dma good octet counter msw */
147 u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw);
148 
149 /* get rx dma good packet counter msw */
150 u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw);
151 
152 /* get tx dma good octet counter msw */
153 u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw *aq_hw);
154 
155 /* get tx dma good packet counter msw */
156 u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw *aq_hw);
157 
158 /* get  rx lro coalesced packet count lsw */
159 u32 stats_rx_lro_coalesced_pkt_count0_get(struct aq_hw *aq_hw);
160 
161 /* get msm rx errors counter register */
162 u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw *aq_hw);
163 
164 /* get msm rx unicast frames counter register */
165 u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw *aq_hw);
166 
167 /* get msm rx multicast frames counter register */
168 u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw *aq_hw);
169 
170 /* get msm rx broadcast frames counter register */
171 u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw *aq_hw);
172 
173 /* get msm rx broadcast octets counter register 1 */
174 u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw *aq_hw);
175 
176 /* get msm rx unicast octets counter register 0 */
177 u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw *aq_hw);
178 
179 /* get rx dma statistics counter 7 */
180 u32 reg_rx_dma_stat_counter7get(struct aq_hw *aq_hw);
181 
182 /* get msm tx errors counter register */
183 u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw *aq_hw);
184 
185 /* get msm tx unicast frames counter register */
186 u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw *aq_hw);
187 
188 /* get msm tx multicast frames counter register */
189 u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw *aq_hw);
190 
191 /* get msm tx broadcast frames counter register */
192 u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw *aq_hw);
193 
194 /* get msm tx multicast octets counter register 1 */
195 u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw *aq_hw);
196 
197 /* get msm tx broadcast octets counter register 1 */
198 u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw *aq_hw);
199 
200 /* get msm tx unicast octets counter register 0 */
201 u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw *aq_hw);
202 
203 /* get global mif identification */
204 u32 reg_glb_mif_id_get(struct aq_hw *aq_hw);
205 
206 /** \brief Set Tx Register Reset Disable
207 *   \param txRegisterResetDisable 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers
208 *   \note Default value: 0x1
209 *   \note PORT="pif_mpi_reg_reset_dsbl_i"
210 */
211 void mpi_tx_reg_res_dis_set(struct aq_hw* hw, u32 mpi_tx_reg_res_dis);
212 /** \brief Get Tx Register Reset Disable
213 *   \return 1 = Disable the S/W reset to MAC-PHY registers, 0 = Enable the S/W reset to MAC-PHY registers
214 *   \note Default value: 0x1
215 *   \note PORT="pif_mpi_reg_reset_dsbl_i"
216 */
217 u32 mpi_tx_reg_res_dis_get(struct aq_hw* hw);
218 
219 
220 /* interrupt */
221 
222 /* set interrupt auto mask lsw */
223 void itr_irq_auto_masklsw_set(struct aq_hw *aq_hw, u32 irq_auto_masklsw);
224 
225 /* set interrupt mapping enable rx */
226 void itr_irq_map_en_rx_set(struct aq_hw *aq_hw, u32 irq_map_en_rx, u32 rx);
227 
228 /* set interrupt mapping enable tx */
229 void itr_irq_map_en_tx_set(struct aq_hw *aq_hw, u32 irq_map_en_tx, u32 tx);
230 
231 /* set interrupt mapping rx */
232 void itr_irq_map_rx_set(struct aq_hw *aq_hw, u32 irq_map_rx, u32 rx);
233 
234 /* set interrupt mapping tx */
235 void itr_irq_map_tx_set(struct aq_hw *aq_hw, u32 irq_map_tx, u32 tx);
236 
237 /* set interrupt mask clear lsw */
238 void itr_irq_msk_clearlsw_set(struct aq_hw *aq_hw, u32 irq_msk_clearlsw);
239 
240 /* set interrupt mask set lsw */
241 void itr_irq_msk_setlsw_set(struct aq_hw *aq_hw, u32 irq_msk_setlsw);
242 
243 /* set interrupt register reset disable */
244 void itr_irq_reg_res_dis_set(struct aq_hw *aq_hw, u32 irq_reg_res_dis);
245 
246 /* set interrupt status clear lsw */
247 void itr_irq_status_clearlsw_set(struct aq_hw *aq_hw,
248                  u32 irq_status_clearlsw);
249 
250 /* get interrupt status lsw */
251 u32 itr_irq_statuslsw_get(struct aq_hw *aq_hw);
252 
253 /* get reset interrupt */
254 u32 itr_res_irq_get(struct aq_hw *aq_hw);
255 
256 /* set reset interrupt */
257 void itr_res_irq_set(struct aq_hw *aq_hw, u32 res_irq);
258 
259 void itr_irq_mode_set(struct aq_hw *aq_hw, u32 irq_mode);
260 
261 /* Set Link Interrupt Mapping Enable */
262 void itr_link_int_map_en_set(struct aq_hw *aq_hw, u32 link_int_en_map_en);
263 
264 /* Get Link Interrupt Mapping Enable */
265 u32 itr_link_int_map_en_get(struct aq_hw *aq_hw);
266 
267 /* Set Link Interrupt Mapping */
268 void itr_link_int_map_set(struct aq_hw *aq_hw, u32 link_int_map);
269 
270 /* Get Link Interrupt Mapping */
271 u32 itr_link_int_map_get(struct aq_hw *aq_hw);
272 
273 
274 /* Set MIF Interrupt Mapping Enable */
275 void itr_mif_int_map_en_set(struct aq_hw *aq_hw, u32 mif_int_map_en, u32 mif);
276 
277 /* Get MIF Interrupt Mapping Enable */
278 u32 itr_mif_int_map_en_get(struct aq_hw *aq_hw, u32 mif);
279 
280 /* Set MIF Interrupt Mapping */
281 void itr_mif_int_map_set(struct aq_hw *aq_hw, u32 mif_int_map, u32 mif);
282 
283 /* Get MIF Interrupt Mapping */
284 u32 itr_mif_int_map_get(struct aq_hw *aq_hw, u32 mif);
285 
286 void itr_irq_status_cor_en_set(struct aq_hw *aq_hw, u32 irq_status_cor_enable);
287 
288 void itr_irq_auto_mask_clr_en_set(struct aq_hw *aq_hw, u32 irq_auto_mask_clr_en);
289 
290 /* rdm */
291 
292 /* set cpu id */
293 void rdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca);
294 
295 /* set rx dca enable */
296 void rdm_rx_dca_en_set(struct aq_hw *aq_hw, u32 rx_dca_en);
297 
298 /* set rx dca mode */
299 void rdm_rx_dca_mode_set(struct aq_hw *aq_hw, u32 rx_dca_mode);
300 
301 /* set rx descriptor data buffer size */
302 void rdm_rx_desc_data_buff_size_set(struct aq_hw *aq_hw,
303                     u32 rx_desc_data_buff_size,
304                     u32 descriptor);
305 
306 /* set rx descriptor dca enable */
307 void rdm_rx_desc_dca_en_set(struct aq_hw *aq_hw, u32 rx_desc_dca_en,
308                 u32 dca);
309 
310 /* set rx descriptor enable */
311 void rdm_rx_desc_en_set(struct aq_hw *aq_hw, u32 rx_desc_en,
312             u32 descriptor);
313 
314 /* set rx descriptor header splitting */
315 void rdm_rx_desc_head_splitting_set(struct aq_hw *aq_hw,
316                     u32 rx_desc_head_splitting,
317                     u32 descriptor);
318 
319 /* get rx descriptor head pointer */
320 u32 rdm_rx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
321 
322 /* set rx descriptor length */
323 void rdm_rx_desc_len_set(struct aq_hw *aq_hw, u32 rx_desc_len,
324              u32 descriptor);
325 
326 /* set rx descriptor write-back interrupt enable */
327 void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
328                   u32 rx_desc_wr_wb_irq_en);
329 
330 /* set rx header dca enable */
331 void rdm_rx_head_dca_en_set(struct aq_hw *aq_hw, u32 rx_head_dca_en,
332                 u32 dca);
333 
334 /* set rx payload dca enable */
335 void rdm_rx_pld_dca_en_set(struct aq_hw *aq_hw, u32 rx_pld_dca_en, u32 dca);
336 
337 /* set rx descriptor header buffer size */
338 void rdm_rx_desc_head_buff_size_set(struct aq_hw *aq_hw,
339                     u32 rx_desc_head_buff_size,
340                     u32 descriptor);
341 
342 /* set rx descriptor reset */
343 void rdm_rx_desc_res_set(struct aq_hw *aq_hw, u32 rx_desc_res,
344              u32 descriptor);
345 
346 /* Set RDM Interrupt Moderation Enable */
347 void rdm_rdm_intr_moder_en_set(struct aq_hw *aq_hw, u32 rdm_intr_moder_en);
348 
349 /* reg */
350 
351 /* set general interrupt mapping register */
352 void reg_gen_irq_map_set(struct aq_hw *aq_hw, u32 gen_intr_map, u32 regidx);
353 
354 /* get general interrupt status register */
355 u32 reg_gen_irq_status_get(struct aq_hw *aq_hw);
356 
357 /* set interrupt global control register */
358 void reg_irq_glb_ctl_set(struct aq_hw *aq_hw, u32 intr_glb_ctl);
359 
360 /* set interrupt throttle register */
361 void reg_irq_thr_set(struct aq_hw *aq_hw, u32 intr_thr, u32 throttle);
362 
363 /* set rx dma descriptor base address lsw */
364 void reg_rx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
365                     u32 rx_dma_desc_base_addrlsw,
366                     u32 descriptor);
367 
368 /* set rx dma descriptor base address msw */
369 void reg_rx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
370                     u32 rx_dma_desc_base_addrmsw,
371                     u32 descriptor);
372 
373 /* get rx dma descriptor status register */
374 u32 reg_rx_dma_desc_status_get(struct aq_hw *aq_hw, u32 descriptor);
375 
376 /* set rx dma descriptor tail pointer register */
377 void reg_rx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
378                   u32 rx_dma_desc_tail_ptr,
379                   u32 descriptor);
380 /* get rx dma descriptor tail pointer register */
381 u32 reg_rx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
382 
383 /* set rx filter multicast filter mask register */
384 void reg_rx_flr_mcst_flr_msk_set(struct aq_hw *aq_hw,
385                  u32 rx_flr_mcst_flr_msk);
386 
387 /* set rx filter multicast filter register */
388 void reg_rx_flr_mcst_flr_set(struct aq_hw *aq_hw, u32 rx_flr_mcst_flr,
389                  u32 filter);
390 
391 /* set rx filter rss control register 1 */
392 void reg_rx_flr_rss_control1set(struct aq_hw *aq_hw,
393                 u32 rx_flr_rss_control1);
394 
395 /* Set RX Filter Control Register 2 */
396 void reg_rx_flr_control2_set(struct aq_hw *aq_hw, u32 rx_flr_control2);
397 
398 /* Set RX Interrupt Moderation Control Register */
399 void reg_rx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
400                 u32 rx_intr_moderation_ctl,
401                 u32 queue);
402 
403 /* set tx dma debug control */
404 void reg_tx_dma_debug_ctl_set(struct aq_hw *aq_hw, u32 tx_dma_debug_ctl);
405 
406 /* set tx dma descriptor base address lsw */
407 void reg_tx_dma_desc_base_addresslswset(struct aq_hw *aq_hw,
408                     u32 tx_dma_desc_base_addrlsw,
409                     u32 descriptor);
410 
411 /* set tx dma descriptor base address msw */
412 void reg_tx_dma_desc_base_addressmswset(struct aq_hw *aq_hw,
413                     u32 tx_dma_desc_base_addrmsw,
414                     u32 descriptor);
415 
416 /* set tx dma descriptor tail pointer register */
417 void reg_tx_dma_desc_tail_ptr_set(struct aq_hw *aq_hw,
418                   u32 tx_dma_desc_tail_ptr,
419                   u32 descriptor);
420 
421 /* get tx dma descriptor tail pointer register */
422 u32 reg_tx_dma_desc_tail_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
423 
424 /* Set TX Interrupt Moderation Control Register */
425 void reg_tx_intr_moder_ctrl_set(struct aq_hw *aq_hw,
426                 u32 tx_intr_moderation_ctl,
427                 u32 queue);
428 
429 /* get global microprocessor scratch pad */
430 u32 reg_glb_cpu_scratch_scp_get(struct aq_hw *hw, u32 glb_cpu_scratch_scp_idx);
431 /* set global microprocessor scratch pad */
432 void reg_glb_cpu_scratch_scp_set(struct aq_hw *aq_hw,
433     u32 glb_cpu_scratch_scp, u32 scratch_scp);
434 
435 /* get global microprocessor no reset scratch pad */
436 u32 reg_glb_cpu_no_reset_scratchpad_get(struct aq_hw* hw, u32 index);
437 /* set global microprocessor no reset scratch pad */
438 void reg_glb_cpu_no_reset_scratchpad_set(struct aq_hw* aq_hw, u32 value,
439     u32 index);
440 
441 /* rpb */
442 
443 /* set dma system loopback */
444 void rpb_dma_sys_lbk_set(struct aq_hw *aq_hw, u32 dma_sys_lbk);
445 
446 /* set rx traffic class mode */
447 void rpb_rpf_rx_traf_class_mode_set(struct aq_hw *aq_hw,
448                     u32 rx_traf_class_mode);
449 
450 /* set rx buffer enable */
451 void rpb_rx_buff_en_set(struct aq_hw *aq_hw, u32 rx_buff_en);
452 
453 /* set rx buffer high threshold (per tc) */
454 void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
455                      u32 rx_buff_hi_threshold_per_tc,
456                      u32 buffer);
457 
458 /* set rx buffer low threshold (per tc) */
459 void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
460                      u32 rx_buff_lo_threshold_per_tc,
461                      u32 buffer);
462 
463 /* set rx flow control mode */
464 void rpb_rx_flow_ctl_mode_set(struct aq_hw *aq_hw, u32 rx_flow_ctl_mode);
465 
466 /* set rx packet buffer size (per tc) */
467 void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
468                      u32 rx_pkt_buff_size_per_tc,
469                      u32 buffer);
470 
471 /* set rx xoff enable (per tc) */
472 void rpb_rx_xoff_en_per_tc_set(struct aq_hw *aq_hw, u32 rx_xoff_en_per_tc,
473                    u32 buffer);
474 
475 /* rpf */
476 
477 /* set l2 broadcast count threshold */
478 void rpfl2broadcast_count_threshold_set(struct aq_hw *aq_hw,
479                     u32 l2broadcast_count_threshold);
480 
481 /* set l2 broadcast enable */
482 void rpfl2broadcast_en_set(struct aq_hw *aq_hw, u32 l2broadcast_en);
483 
484 /* set l2 broadcast filter action */
485 void rpfl2broadcast_flr_act_set(struct aq_hw *aq_hw,
486                 u32 l2broadcast_flr_act);
487 
488 /* set l2 multicast filter enable */
489 void rpfl2multicast_flr_en_set(struct aq_hw *aq_hw, u32 l2multicast_flr_en,
490                    u32 filter);
491 
492 /* set l2 promiscuous mode enable */
493 void rpfl2promiscuous_mode_en_set(struct aq_hw *aq_hw,
494                   u32 l2promiscuous_mode_en);
495 
496 /* set l2 unicast filter action */
497 void rpfl2unicast_flr_act_set(struct aq_hw *aq_hw, u32 l2unicast_flr_act,
498                   u32 filter);
499 
500 /* set l2 unicast filter enable */
501 void rpfl2_uc_flr_en_set(struct aq_hw *aq_hw, u32 l2unicast_flr_en,
502              u32 filter);
503 
504 /* set l2 unicast destination address lsw */
505 void rpfl2unicast_dest_addresslsw_set(struct aq_hw *aq_hw,
506                       u32 l2unicast_dest_addresslsw,
507                       u32 filter);
508 
509 /* set l2 unicast destination address msw */
510 void rpfl2unicast_dest_addressmsw_set(struct aq_hw *aq_hw,
511                       u32 l2unicast_dest_addressmsw,
512                       u32 filter);
513 
514 /* Set L2 Accept all Multicast packets */
515 void rpfl2_accept_all_mc_packets_set(struct aq_hw *aq_hw,
516                      u32 l2_accept_all_mc_packets);
517 
518 /* set user-priority tc mapping */
519 void rpf_rpb_user_priority_tc_map_set(struct aq_hw *aq_hw,
520                       u32 user_priority_tc_map, u32 tc);
521 
522 /* set rss key address */
523 void rpf_rss_key_addr_set(struct aq_hw *aq_hw, u32 rss_key_addr);
524 
525 /* set rss key write data */
526 void rpf_rss_key_wr_data_set(struct aq_hw *aq_hw, u32 rss_key_wr_data);
527 
528 /* get rss key read data */
529 u32 rpf_rss_key_rd_data_get(struct aq_hw *aq_hw);
530 
531 /* get rss key write enable */
532 u32 rpf_rss_key_wr_en_get(struct aq_hw *aq_hw);
533 
534 /* set rss key write enable */
535 void rpf_rss_key_wr_en_set(struct aq_hw *aq_hw, u32 rss_key_wr_en);
536 
537 /* set rss redirection table address */
538 void rpf_rss_redir_tbl_addr_set(struct aq_hw *aq_hw,
539                 u32 rss_redir_tbl_addr);
540 
541 /* set rss redirection table write data */
542 void rpf_rss_redir_tbl_wr_data_set(struct aq_hw *aq_hw,
543                    u32 rss_redir_tbl_wr_data);
544 
545 /* get rss redirection write enable */
546 u32 rpf_rss_redir_wr_en_get(struct aq_hw *aq_hw);
547 
548 /* set rss redirection write enable */
549 void rpf_rss_redir_wr_en_set(struct aq_hw *aq_hw, u32 rss_redir_wr_en);
550 
551 /* set tpo to rpf system loopback */
552 void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw *aq_hw,
553                 u32 tpo_to_rpf_sys_lbk);
554 
555 /* set vlan inner ethertype */
556 void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
557 
558 /* set vlan outer ethertype */
559 void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
560 
561 /* set vlan promiscuous mode enable */
562 void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw,
563 				      u32 vlan_prom_mode_en);
564 
565 /* Set VLAN untagged action */
566 void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw *aq_hw,
567 				      u32 vlan_untagged_act);
568 
569 /* Set VLAN accept untagged packets */
570 void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
571 						 u32 vlan_acc_untagged_packets);
572 
573 /* Set VLAN filter enable */
574 void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en,
575 				u32 filter);
576 
577 /* Set VLAN Filter Action */
578 void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
579 				 u32 filter);
580 
581 /* Set VLAN ID Filter */
582 void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr,
583 				u32 filter);
584 
585 /* Set VLAN RX queue assignment enable */
586 void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq_en,
587 				u32 filter);
588 
589 /* Set VLAN RX queue */
590 void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq,
591 				u32 filter);
592 
593 /* set ethertype filter enable */
594 void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en,
595 				u32 filter);
596 
597 /* set  ethertype user-priority enable */
598 void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
599 					  u32 etht_user_priority_en,
600 					  u32 filter);
601 
602 /* set  ethertype rx queue enable */
603 void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw,
604 				     u32 etht_rx_queue_en,
605 				     u32 filter);
606 
607 /* set ethertype rx queue */
608 void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
609 				  u32 filter);
610 
611 /* set ethertype user-priority */
612 void hw_atl_rpf_etht_user_priority_set(struct aq_hw *aq_hw,
613 				       u32 etht_user_priority,
614 				       u32 filter);
615 
616 /* set ethertype management queue */
617 void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
618 				   u32 filter);
619 
620 /* set ethertype filter action */
621 void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
622 				 u32 filter);
623 
624 /* set ethertype filter */
625 void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
626 
627 /* set L3/L4 filter enable */
628 void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
629 
630 /* set L3 IPv6 enable */
631 void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
632 
633 /* set L3 source address enable */
634 void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
635 
636 /* set L3 destination address enable */
637 void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
638 
639 /* set L4 source port enable */
640 void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
641 
642 /* set L4 destination port enable */
643 void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
644 
645 /* set L4 protocol enable */
646 void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
647 
648 /* set L3 ARP filter enable */
649 void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
650 
651 /* set L3/L4 rx queue enable */
652 void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
653 
654 /* set L3/L4 management queue */
655 void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
656 
657 /* set L3/L4 filter action */
658 void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
659 
660 /* set L3/L4 rx queue */
661 void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
662 
663 /* set L4 protocol value */
664 void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
665 
666 /* set L4 source port */
667 void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
668 
669 /* set L4 destination port */
670 void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
671 
672 /* set vlan inner ethertype */
673 void rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
674 
675 /* set vlan outer ethertype */
676 void rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
677 
678 /* set vlan promiscuous mode enable */
679 void rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw, u32 vlan_prom_mode_en);
680 
681 /* Set VLAN untagged action */
682 void rpf_vlan_untagged_act_set(struct aq_hw *aq_hw, u32 vlan_untagged_act);
683 
684 /* Set VLAN accept untagged packets */
685 void rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
686                       u32 vlan_accept_untagged_packets);
687 
688 /* Set VLAN filter enable */
689 void rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en, u32 filter);
690 
691 /* Set VLAN Filter Action */
692 void rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
693               u32 filter);
694 
695 /* Set VLAN ID Filter */
696 void rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr, u32 filter);
697 
698 /* set ethertype filter enable */
699 void rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en, u32 filter);
700 
701 /* set  ethertype user-priority enable */
702 void rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
703                    u32 etht_user_priority_en, u32 filter);
704 
705 /* set  ethertype rx queue enable */
706 void rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw, u32 etht_rx_queue_en,
707                   u32 filter);
708 
709 /* set ethertype rx queue */
710 void rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
711                u32 filter);
712 
713 /* set ethertype user-priority */
714 void rpf_etht_user_priority_set(struct aq_hw *aq_hw, u32 etht_user_priority,
715                 u32 filter);
716 
717 /* set ethertype management queue */
718 void rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
719                 u32 filter);
720 
721 /* set ethertype filter action */
722 void rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
723               u32 filter);
724 
725 /* set ethertype filter */
726 void rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
727 
728 /* set L3/L4 filter enable */
729 void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
730 
731 /* set L3 IPv6 enable */
732 void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
733 
734 /* set L3 source address enable */
735 void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
736 
737 /* set L3 destination address enable */
738 void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
739 
740 /* set L4 source port enable */
741 void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
742 
743 /* set L4 destination port enable */
744 void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
745 
746 /* set L4 protocol enable */
747 void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
748 
749 /* set L3 ARP filter enable */
750 void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
751 
752 /* set L3/L4 rx queue enable */
753 void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
754 
755 /* set L3/L4 management queue */
756 void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
757 
758 /* set L3/L4 filter action */
759 void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
760 
761 /* set L3/L4 rx queue */
762 void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
763 
764 /* set L4 protocol value */
765 void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
766 
767 /* set L4 source port */
768 void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
769 
770 /* set L4 destination port */
771 void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
772 
773 /* rpo */
774 
775 /* set ipv4 header checksum offload enable */
776 void rpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
777                        u32 ipv4header_crc_offload_en);
778 
779 /* set rx descriptor vlan stripping */
780 void rpo_rx_desc_vlan_stripping_set(struct aq_hw *aq_hw,
781                     u32 rx_desc_vlan_stripping,
782                     u32 descriptor);
783 
784 /* set tcp/udp checksum offload enable */
785 void rpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
786                     u32 tcp_udp_crc_offload_en);
787 
788 /* Set LRO Patch Optimization Enable. */
789 void rpo_lro_patch_optimization_en_set(struct aq_hw *aq_hw,
790                        u32 lro_patch_optimization_en);
791 
792 /* Set Large Receive Offload Enable */
793 void rpo_lro_en_set(struct aq_hw *aq_hw, u32 lro_en);
794 
795 /* Set LRO Q Sessions Limit */
796 void rpo_lro_qsessions_lim_set(struct aq_hw *aq_hw, u32 lro_qsessions_lim);
797 
798 /* Set LRO Total Descriptor Limit */
799 void rpo_lro_total_desc_lim_set(struct aq_hw *aq_hw, u32 lro_total_desc_lim);
800 
801 /* Set LRO Min Payload of First Packet */
802 void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw *aq_hw,
803                       u32 lro_min_pld_of_first_pkt);
804 
805 /* Set LRO Packet Limit */
806 void rpo_lro_pkt_lim_set(struct aq_hw *aq_hw, u32 lro_packet_lim);
807 
808 /* Set LRO Max Number of Descriptors */
809 void rpo_lro_max_num_of_descriptors_set(struct aq_hw *aq_hw,
810                     u32 lro_max_desc_num, u32 lro);
811 
812 /* Set LRO Time Base Divider */
813 void rpo_lro_time_base_divider_set(struct aq_hw *aq_hw,
814                    u32 lro_time_base_divider);
815 
816 /*Set LRO Inactive Interval */
817 void rpo_lro_inactive_interval_set(struct aq_hw *aq_hw,
818                    u32 lro_inactive_interval);
819 
820 /*Set LRO Max Coalescing Interval */
821 void rpo_lro_max_coalescing_interval_set(struct aq_hw *aq_hw,
822                      u32 lro_max_coalescing_interval);
823 
824 /* rx */
825 
826 /* set rx register reset disable */
827 void rx_rx_reg_res_dis_set(struct aq_hw *aq_hw, u32 rx_reg_res_dis);
828 
829 /* tdm */
830 
831 /* set cpu id */
832 void tdm_cpu_id_set(struct aq_hw *aq_hw, u32 cpuid, u32 dca);
833 
834 /* set large send offload enable */
835 void tdm_large_send_offload_en_set(struct aq_hw *aq_hw,
836                    u32 large_send_offload_en);
837 
838 /* set tx descriptor enable */
839 void tdm_tx_desc_en_set(struct aq_hw *aq_hw, u32 tx_desc_en, u32 descriptor);
840 
841 /* set tx dca enable */
842 void tdm_tx_dca_en_set(struct aq_hw *aq_hw, u32 tx_dca_en);
843 
844 /* set tx dca mode */
845 void tdm_tx_dca_mode_set(struct aq_hw *aq_hw, u32 tx_dca_mode);
846 
847 /* set tx descriptor dca enable */
848 void tdm_tx_desc_dca_en_set(struct aq_hw *aq_hw, u32 tx_desc_dca_en, u32 dca);
849 
850 /* get tx descriptor head pointer */
851 u32 tdm_tx_desc_head_ptr_get(struct aq_hw *aq_hw, u32 descriptor);
852 
853 /* set tx descriptor length */
854 void tdm_tx_desc_len_set(struct aq_hw *aq_hw, u32 tx_desc_len,
855              u32 descriptor);
856 
857 /* set tx descriptor write-back interrupt enable */
858 void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw *aq_hw,
859                   u32 tx_desc_wr_wb_irq_en);
860 
861 /* set tx descriptor write-back threshold */
862 void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw *aq_hw,
863                      u32 tx_desc_wr_wb_threshold,
864                      u32 descriptor);
865 
866 /* Set TDM Interrupt Moderation Enable */
867 void tdm_tdm_intr_moder_en_set(struct aq_hw *aq_hw,
868                    u32 tdm_irq_moderation_en);
869 /* thm */
870 
871 /* set lso tcp flag of first packet */
872 void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw *aq_hw,
873                        u32 lso_tcp_flag_of_first_pkt);
874 
875 /* set lso tcp flag of last packet */
876 void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw *aq_hw,
877                       u32 lso_tcp_flag_of_last_pkt);
878 
879 /* set lso tcp flag of middle packet */
880 void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw *aq_hw,
881                     u32 lso_tcp_flag_of_middle_pkt);
882 
883 /* tpb */
884 
885 /* set tx buffer enable */
886 void tpb_tx_buff_en_set(struct aq_hw *aq_hw, u32 tx_buff_en);
887 
888 /* set tx tc mode */
889 void tpb_tx_tc_mode_set(struct aq_hw *aq_hw, u32 tc_mode);
890 
891 /* set tx buffer high threshold (per tc) */
892 void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw *aq_hw,
893                      u32 tx_buff_hi_threshold_per_tc,
894                      u32 buffer);
895 
896 /* set tx buffer low threshold (per tc) */
897 void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw *aq_hw,
898                      u32 tx_buff_lo_threshold_per_tc,
899                      u32 buffer);
900 
901 /* set tx dma system loopback enable */
902 void tpb_tx_dma_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_dma_sys_lbk_en);
903 
904 /* set tx packet buffer size (per tc) */
905 void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw *aq_hw,
906                      u32 tx_pkt_buff_size_per_tc, u32 buffer);
907 
908 /* toggle rdm rx dma descriptor cache init */
909 void rdm_rx_dma_desc_cache_init_tgl(struct aq_hw *aq_hw);
910 
911 /* set tx path pad insert enable */
912 void tpb_tx_path_scp_ins_en_set(struct aq_hw *aq_hw, u32 tx_path_scp_ins_en);
913 
914 /* tpo */
915 
916 /* set ipv4 header checksum offload enable */
917 void tpo_ipv4header_crc_offload_en_set(struct aq_hw *aq_hw,
918                        u32 ipv4header_crc_offload_en);
919 
920 /* set tcp/udp checksum offload enable */
921 void tpo_tcp_udp_crc_offload_en_set(struct aq_hw *aq_hw,
922                     u32 tcp_udp_crc_offload_en);
923 
924 /* set tx pkt system loopback enable */
925 void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw *aq_hw, u32 tx_pkt_sys_lbk_en);
926 
927 /* tps */
928 
929 /* set tx packet scheduler data arbitration mode */
930 void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw *aq_hw,
931                        u32 tx_pkt_shed_data_arb_mode);
932 
933 /* set tx packet scheduler descriptor rate current time reset */
934 void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw *aq_hw,
935                          u32 curr_time_res);
936 
937 /* set tx packet scheduler descriptor rate limit */
938 void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw *aq_hw,
939                        u32 tx_pkt_shed_desc_rate_lim);
940 
941 /* set tx packet scheduler descriptor tc arbitration mode */
942 void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw *aq_hw,
943                       u32 tx_pkt_shed_desc_tc_arb_mode);
944 
945 /* set tx packet scheduler descriptor tc max credit */
946 void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw *aq_hw,
947                         u32 tx_pkt_shed_desc_tc_max_credit,
948                         u32 tc);
949 
950 /* set tx packet scheduler descriptor tc weight */
951 void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw *aq_hw,
952                     u32 tx_pkt_shed_desc_tc_weight,
953                     u32 tc);
954 
955 /* set tx packet scheduler descriptor vm arbitration mode */
956 void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw *aq_hw,
957                       u32 tx_pkt_shed_desc_vm_arb_mode);
958 
959 /* set tx packet scheduler tc data max credit */
960 void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw *aq_hw,
961                         u32 tx_pkt_shed_tc_data_max_credit,
962                         u32 tc);
963 
964 /* set tx packet scheduler tc data weight */
965 void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw *aq_hw,
966                     u32 tx_pkt_shed_tc_data_weight,
967                     u32 tc);
968 
969 /* tx */
970 
971 /* set tx register reset disable */
972 void tx_tx_reg_res_dis_set(struct aq_hw *aq_hw, u32 tx_reg_res_dis);
973 
974 /* msm */
975 
976 /* get register access status */
977 u32 msm_reg_access_status_get(struct aq_hw *aq_hw);
978 
979 /* set  register address for indirect address */
980 void msm_reg_addr_for_indirect_addr_set(struct aq_hw *aq_hw,
981                     u32 reg_addr_for_indirect_addr);
982 
983 /* set register read strobe */
984 void msm_reg_rd_strobe_set(struct aq_hw *aq_hw, u32 reg_rd_strobe);
985 
986 /* get  register read data */
987 u32 msm_reg_rd_data_get(struct aq_hw *aq_hw);
988 
989 /* set  register write data */
990 void msm_reg_wr_data_set(struct aq_hw *aq_hw, u32 reg_wr_data);
991 
992 /* set register write strobe */
993 void msm_reg_wr_strobe_set(struct aq_hw *aq_hw, u32 reg_wr_strobe);
994 
995 /* pci */
996 
997 /* set pci register reset disable */
998 void pci_pci_reg_res_dis_set(struct aq_hw *aq_hw, u32 pci_reg_res_dis);
999 
1000 
1001 /*
1002 *  \brief Set MIF Power Gating Enable Control
1003 */
1004 void reg_mif_power_gating_enable_control_set(struct aq_hw* hw, u32 value);
1005 /*
1006 *  \brief Get MIF Power Gating Enable Control
1007 *  \return MifPowerGatingEnableControl
1008 */
1009 u32 reg_mif_power_gating_enable_control_get(struct aq_hw* hw);
1010 
1011 /* get mif up mailbox busy */
1012 u32 mif_mcp_up_mailbox_busy_get(struct aq_hw *aq_hw);
1013 
1014 /* set mif up mailbox execute operation */
1015 void mif_mcp_up_mailbox_execute_operation_set(struct aq_hw* hw, u32 value);
1016 
1017 /* get mif uP mailbox address */
1018 u32 mif_mcp_up_mailbox_addr_get(struct aq_hw *aq_hw);
1019 /* set mif uP mailbox address */
1020 void mif_mcp_up_mailbox_addr_set(struct aq_hw *hw, u32 value);
1021 
1022 /* get mif uP mailbox data */
1023 u32 mif_mcp_up_mailbox_data_get(struct aq_hw *aq_hw);
1024 
1025 /* clear ipv4 filter destination address */
1026 void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw *aq_hw, u8 location);
1027 
1028 /* clear ipv4 filter source address */
1029 void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw *aq_hw, u8 location);
1030 
1031 /* clear command for filter l3-l4 */
1032 void hw_atl_rpfl3l4_cmd_clear(struct aq_hw *aq_hw, u8 location);
1033 
1034 /* clear ipv6 filter destination address */
1035 void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw *aq_hw, u8 location);
1036 
1037 /* clear ipv6 filter source address */
1038 void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw *aq_hw, u8 location);
1039 
1040 /* set ipv4 filter destination address */
1041 void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw *aq_hw, u8 location,
1042 				       u32 ipv4_dest);
1043 
1044 /* set ipv4 filter source address */
1045 void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw *aq_hw, u8 location,
1046 				      u32 ipv4_src);
1047 
1048 /* set command for filter l3-l4 */
1049 void hw_atl_rpfl3l4_cmd_set(struct aq_hw *aq_hw, u8 location, u32 cmd);
1050 
1051 /* set ipv6 filter source address */
1052 void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw *aq_hw, u8 location,
1053 				      u32 *ipv6_src);
1054 
1055 /* set ipv6 filter destination address */
1056 void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw *aq_hw, u8 location,
1057 				       u32 *ipv6_dest);
1058 
1059 /* set vlan inner ethertype */
1060 void hw_atl_rpf_vlan_inner_etht_set(struct aq_hw *aq_hw, u32 vlan_inner_etht);
1061 
1062 /* set vlan outer ethertype */
1063 void hw_atl_rpf_vlan_outer_etht_set(struct aq_hw *aq_hw, u32 vlan_outer_etht);
1064 
1065 /* set vlan promiscuous mode enable */
1066 void hw_atl_rpf_vlan_prom_mode_en_set(struct aq_hw *aq_hw,
1067 				      u32 vlan_prom_mode_en);
1068 
1069 /* Set VLAN untagged action */
1070 void hw_atl_rpf_vlan_untagged_act_set(struct aq_hw *aq_hw,
1071 				      u32 vlan_untagged_act);
1072 
1073 /* Set VLAN accept untagged packets */
1074 void hw_atl_rpf_vlan_accept_untagged_packets_set(struct aq_hw *aq_hw,
1075 						 u32 vlan_acc_untagged_packets);
1076 
1077 /* Set VLAN filter enable */
1078 void hw_atl_rpf_vlan_flr_en_set(struct aq_hw *aq_hw, u32 vlan_flr_en,
1079 				u32 filter);
1080 
1081 /* Set VLAN Filter Action */
1082 void hw_atl_rpf_vlan_flr_act_set(struct aq_hw *aq_hw, u32 vlan_filter_act,
1083 				 u32 filter);
1084 
1085 /* Set VLAN ID Filter */
1086 void hw_atl_rpf_vlan_id_flr_set(struct aq_hw *aq_hw, u32 vlan_id_flr,
1087 				u32 filter);
1088 
1089 /* Set VLAN RX queue assignment enable */
1090 void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq_en,
1091 				u32 filter);
1092 
1093 /* Set VLAN RX queue */
1094 void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw *aq_hw, u32 vlan_rxq,
1095 				u32 filter);
1096 
1097 /* set ethertype filter enable */
1098 void hw_atl_rpf_etht_flr_en_set(struct aq_hw *aq_hw, u32 etht_flr_en,
1099 				u32 filter);
1100 
1101 /* set  ethertype user-priority enable */
1102 void hw_atl_rpf_etht_user_priority_en_set(struct aq_hw *aq_hw,
1103 					  u32 etht_user_priority_en,
1104 					  u32 filter);
1105 
1106 /* set  ethertype rx queue enable */
1107 void hw_atl_rpf_etht_rx_queue_en_set(struct aq_hw *aq_hw,
1108 				     u32 etht_rx_queue_en,
1109 				     u32 filter);
1110 
1111 /* set ethertype rx queue */
1112 void hw_atl_rpf_etht_rx_queue_set(struct aq_hw *aq_hw, u32 etht_rx_queue,
1113 				  u32 filter);
1114 
1115 /* set ethertype user-priority */
1116 void hw_atl_rpf_etht_user_priority_set(struct aq_hw *aq_hw,
1117 				       u32 etht_user_priority,
1118 				       u32 filter);
1119 
1120 /* set ethertype management queue */
1121 void hw_atl_rpf_etht_mgt_queue_set(struct aq_hw *aq_hw, u32 etht_mgt_queue,
1122 				   u32 filter);
1123 
1124 /* set ethertype filter action */
1125 void hw_atl_rpf_etht_flr_act_set(struct aq_hw *aq_hw, u32 etht_flr_act,
1126 				 u32 filter);
1127 
1128 /* set ethertype filter */
1129 void hw_atl_rpf_etht_flr_set(struct aq_hw *aq_hw, u32 etht_flr, u32 filter);
1130 
1131 /* set L3/L4 filter enable */
1132 void hw_atl_rpf_l3_l4_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1133 
1134 /* set L3 IPv6 enable */
1135 void hw_atl_rpf_l3_v6_enf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1136 
1137 /* set L3 source address enable */
1138 void hw_atl_rpf_l3_saf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1139 
1140 /* set L3 destination address enable */
1141 void hw_atl_rpf_l3_daf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1142 
1143 /* set L4 source port enable */
1144 void hw_atl_rpf_l4_spf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1145 
1146 /* set L4 destination port enable */
1147 void hw_atl_rpf_l4_dpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1148 
1149 /* set L4 protocol enable */
1150 void hw_atl_rpf_l4_protf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1151 
1152 /* set L3 ARP filter enable */
1153 void hw_atl_rpf_l3_arpf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1154 
1155 /* set L3/L4 rx queue enable */
1156 void hw_atl_rpf_l3_l4_rxqf_en_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1157 
1158 /* set L3/L4 management queue */
1159 void hw_atl_rpf_l3_l4_mng_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1160 
1161 /* set L3/L4 filter action */
1162 void hw_atl_rpf_l3_l4_actf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1163 
1164 /* set L3/L4 rx queue */
1165 void hw_atl_rpf_l3_l4_rxqf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1166 
1167 /* set L4 protocol value */
1168 void hw_atl_rpf_l4_protf_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1169 
1170 /* set L4 source port */
1171 void hw_atl_rpf_l4_spd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1172 
1173 /* set L4 destination port */
1174 void hw_atl_rpf_l4_dpd_set(struct aq_hw *aq_hw, u32 val, u32 filter);
1175 
1176 #endif /* HW_ATL_LLH_H */
1177