xref: /freebsd/sys/dev/aq/aq_hw.c (revision 493d26c58e732dcfcdd87993ef71880adfe9d0cb)
1*493d26c5SEd Maste /*
2*493d26c5SEd Maste  * aQuantia Corporation Network Driver
3*493d26c5SEd Maste  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4*493d26c5SEd Maste  *
5*493d26c5SEd Maste  * Redistribution and use in source and binary forms, with or without
6*493d26c5SEd Maste  * modification, are permitted provided that the following conditions
7*493d26c5SEd Maste  * are met:
8*493d26c5SEd Maste  *
9*493d26c5SEd Maste  *   (1) Redistributions of source code must retain the above
10*493d26c5SEd Maste  *   copyright notice, this list of conditions and the following
11*493d26c5SEd Maste  *   disclaimer.
12*493d26c5SEd Maste  *
13*493d26c5SEd Maste  *   (2) Redistributions in binary form must reproduce the above
14*493d26c5SEd Maste  *   copyright notice, this list of conditions and the following
15*493d26c5SEd Maste  *   disclaimer in the documentation and/or other materials provided
16*493d26c5SEd Maste  *   with the distribution.
17*493d26c5SEd Maste  *
18*493d26c5SEd Maste  *   (3)The name of the author may not be used to endorse or promote
19*493d26c5SEd Maste  *   products derived from this software without specific prior
20*493d26c5SEd Maste  *   written permission.
21*493d26c5SEd Maste  *
22*493d26c5SEd Maste  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23*493d26c5SEd Maste  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24*493d26c5SEd Maste  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25*493d26c5SEd Maste  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26*493d26c5SEd Maste  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27*493d26c5SEd Maste  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28*493d26c5SEd Maste  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29*493d26c5SEd Maste  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30*493d26c5SEd Maste  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31*493d26c5SEd Maste  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32*493d26c5SEd Maste  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*493d26c5SEd Maste  */
34*493d26c5SEd Maste 
35*493d26c5SEd Maste #include <unistd.h>
36*493d26c5SEd Maste #include <sys/endian.h>
37*493d26c5SEd Maste #include <sys/param.h>
38*493d26c5SEd Maste #include <sys/systm.h>
39*493d26c5SEd Maste #include <machine/cpu.h>
40*493d26c5SEd Maste #include <sys/socket.h>
41*493d26c5SEd Maste #include <net/if.h>
42*493d26c5SEd Maste 
43*493d26c5SEd Maste #include "aq_hw.h"
44*493d26c5SEd Maste #include "aq_dbg.h"
45*493d26c5SEd Maste #include "aq_hw_llh.h"
46*493d26c5SEd Maste #include "aq_fw.h"
47*493d26c5SEd Maste 
48*493d26c5SEd Maste #define AQ_HW_FW_SM_RAM        0x2U
49*493d26c5SEd Maste #define AQ_CFG_FW_MIN_VER_EXPECTED 0x01050006U
50*493d26c5SEd Maste 
51*493d26c5SEd Maste 
52*493d26c5SEd Maste int aq_hw_err_from_flags(struct aq_hw *hw)
53*493d26c5SEd Maste {
54*493d26c5SEd Maste     return (0);
55*493d26c5SEd Maste }
56*493d26c5SEd Maste 
57*493d26c5SEd Maste static void aq_hw_chip_features_init(struct aq_hw *hw, u32 *p)
58*493d26c5SEd Maste {
59*493d26c5SEd Maste     u32 chip_features = 0U;
60*493d26c5SEd Maste     u32 val = reg_glb_mif_id_get(hw);
61*493d26c5SEd Maste     u32 mif_rev = val & 0xFFU;
62*493d26c5SEd Maste 
63*493d26c5SEd Maste     if ((0xFU & mif_rev) == 1U) {
64*493d26c5SEd Maste         chip_features |= AQ_HW_CHIP_REVISION_A0 |
65*493d26c5SEd Maste                  AQ_HW_CHIP_MPI_AQ |
66*493d26c5SEd Maste                  AQ_HW_CHIP_MIPS;
67*493d26c5SEd Maste     } else if ((0xFU & mif_rev) == 2U) {
68*493d26c5SEd Maste         chip_features |= AQ_HW_CHIP_REVISION_B0 |
69*493d26c5SEd Maste                  AQ_HW_CHIP_MPI_AQ |
70*493d26c5SEd Maste                  AQ_HW_CHIP_MIPS |
71*493d26c5SEd Maste                  AQ_HW_CHIP_TPO2 |
72*493d26c5SEd Maste                  AQ_HW_CHIP_RPF2;
73*493d26c5SEd Maste     } else if ((0xFU & mif_rev) == 0xAU) {
74*493d26c5SEd Maste         chip_features |= AQ_HW_CHIP_REVISION_B1 |
75*493d26c5SEd Maste                  AQ_HW_CHIP_MPI_AQ |
76*493d26c5SEd Maste                  AQ_HW_CHIP_MIPS |
77*493d26c5SEd Maste                  AQ_HW_CHIP_TPO2 |
78*493d26c5SEd Maste                  AQ_HW_CHIP_RPF2;
79*493d26c5SEd Maste     }
80*493d26c5SEd Maste 
81*493d26c5SEd Maste     *p = chip_features;
82*493d26c5SEd Maste }
83*493d26c5SEd Maste 
84*493d26c5SEd Maste int aq_hw_fw_downld_dwords(struct aq_hw *hw, u32 a, u32 *p, u32 cnt)
85*493d26c5SEd Maste {
86*493d26c5SEd Maste     int err = 0;
87*493d26c5SEd Maste 
88*493d26c5SEd Maste //    AQ_DBG_ENTER();
89*493d26c5SEd Maste     AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(hw,
90*493d26c5SEd Maste                        AQ_HW_FW_SM_RAM) == 1U,
91*493d26c5SEd Maste                        1U, 10000U);
92*493d26c5SEd Maste 
93*493d26c5SEd Maste     if (err < 0) {
94*493d26c5SEd Maste         bool is_locked;
95*493d26c5SEd Maste 
96*493d26c5SEd Maste         reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
97*493d26c5SEd Maste         is_locked = reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM);
98*493d26c5SEd Maste         if (!is_locked) {
99*493d26c5SEd Maste             err = -ETIME;
100*493d26c5SEd Maste             goto err_exit;
101*493d26c5SEd Maste         }
102*493d26c5SEd Maste     }
103*493d26c5SEd Maste 
104*493d26c5SEd Maste     mif_mcp_up_mailbox_addr_set(hw, a);
105*493d26c5SEd Maste 
106*493d26c5SEd Maste     for (++cnt; --cnt && !err;) {
107*493d26c5SEd Maste         mif_mcp_up_mailbox_execute_operation_set(hw, 1);
108*493d26c5SEd Maste 
109*493d26c5SEd Maste         if (IS_CHIP_FEATURE(hw, REVISION_B1))
110*493d26c5SEd Maste             AQ_HW_WAIT_FOR(a != mif_mcp_up_mailbox_addr_get(hw), 1U, 1000U);
111*493d26c5SEd Maste         else
112*493d26c5SEd Maste             AQ_HW_WAIT_FOR(!mif_mcp_up_mailbox_busy_get(hw), 1, 1000U);
113*493d26c5SEd Maste 
114*493d26c5SEd Maste         *(p++) = mif_mcp_up_mailbox_data_get(hw);
115*493d26c5SEd Maste     }
116*493d26c5SEd Maste 
117*493d26c5SEd Maste     reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
118*493d26c5SEd Maste 
119*493d26c5SEd Maste err_exit:
120*493d26c5SEd Maste //    AQ_DBG_EXIT(err);
121*493d26c5SEd Maste     return (err);
122*493d26c5SEd Maste }
123*493d26c5SEd Maste 
124*493d26c5SEd Maste int aq_hw_ver_match(const aq_hw_fw_version* ver_expected, const aq_hw_fw_version* ver_actual)
125*493d26c5SEd Maste {
126*493d26c5SEd Maste     AQ_DBG_ENTER();
127*493d26c5SEd Maste 
128*493d26c5SEd Maste     if (ver_actual->major_version >= ver_expected->major_version)
129*493d26c5SEd Maste         return (true);
130*493d26c5SEd Maste     if (ver_actual->minor_version >= ver_expected->minor_version)
131*493d26c5SEd Maste         return (true);
132*493d26c5SEd Maste     if (ver_actual->build_number >= ver_expected->build_number)
133*493d26c5SEd Maste         return (true);
134*493d26c5SEd Maste 
135*493d26c5SEd Maste     return (false);
136*493d26c5SEd Maste }
137*493d26c5SEd Maste 
138*493d26c5SEd Maste static int aq_hw_init_ucp(struct aq_hw *hw)
139*493d26c5SEd Maste {
140*493d26c5SEd Maste     int err = 0;
141*493d26c5SEd Maste     AQ_DBG_ENTER();
142*493d26c5SEd Maste 
143*493d26c5SEd Maste     hw->fw_version.raw = 0;
144*493d26c5SEd Maste 
145*493d26c5SEd Maste     err = aq_fw_reset(hw);
146*493d26c5SEd Maste     if (err != EOK) {
147*493d26c5SEd Maste         aq_log_error("aq_hw_init_ucp(): F/W reset failed, err %d", err);
148*493d26c5SEd Maste         return (err);
149*493d26c5SEd Maste     }
150*493d26c5SEd Maste 
151*493d26c5SEd Maste     aq_hw_chip_features_init(hw, &hw->chip_features);
152*493d26c5SEd Maste     err = aq_fw_ops_init(hw);
153*493d26c5SEd Maste     if (err < 0) {
154*493d26c5SEd Maste         aq_log_error("could not initialize F/W ops, err %d", err);
155*493d26c5SEd Maste         return (-1);
156*493d26c5SEd Maste     }
157*493d26c5SEd Maste 
158*493d26c5SEd Maste     if (hw->fw_version.major_version == 1) {
159*493d26c5SEd Maste         if (!AQ_READ_REG(hw, 0x370)) {
160*493d26c5SEd Maste             unsigned int rnd = 0;
161*493d26c5SEd Maste             unsigned int ucp_0x370 = 0;
162*493d26c5SEd Maste 
163*493d26c5SEd Maste             rnd = arc4random();
164*493d26c5SEd Maste 
165*493d26c5SEd Maste             ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
166*493d26c5SEd Maste             AQ_WRITE_REG(hw, AQ_HW_UCP_0X370_REG, ucp_0x370);
167*493d26c5SEd Maste         }
168*493d26c5SEd Maste 
169*493d26c5SEd Maste         reg_glb_cpu_scratch_scp_set(hw, 0, 25);
170*493d26c5SEd Maste     }
171*493d26c5SEd Maste 
172*493d26c5SEd Maste     /* check 10 times by 1ms */
173*493d26c5SEd Maste     AQ_HW_WAIT_FOR((hw->mbox_addr = AQ_READ_REG(hw, 0x360)) != 0, 400U, 20);
174*493d26c5SEd Maste 
175*493d26c5SEd Maste     aq_hw_fw_version ver_expected = { .raw = AQ_CFG_FW_MIN_VER_EXPECTED };
176*493d26c5SEd Maste     if (!aq_hw_ver_match(&ver_expected, &hw->fw_version))
177*493d26c5SEd Maste         aq_log_error("atlantic: aq_hw_init_ucp(), wrong FW version: expected:%x actual:%x",
178*493d26c5SEd Maste               AQ_CFG_FW_MIN_VER_EXPECTED, hw->fw_version.raw);
179*493d26c5SEd Maste 
180*493d26c5SEd Maste     AQ_DBG_EXIT(err);
181*493d26c5SEd Maste     return (err);
182*493d26c5SEd Maste }
183*493d26c5SEd Maste 
184*493d26c5SEd Maste int aq_hw_mpi_create(struct aq_hw *hw)
185*493d26c5SEd Maste {
186*493d26c5SEd Maste     int err = 0;
187*493d26c5SEd Maste 
188*493d26c5SEd Maste     AQ_DBG_ENTER();
189*493d26c5SEd Maste     err = aq_hw_init_ucp(hw);
190*493d26c5SEd Maste     if (err < 0)
191*493d26c5SEd Maste         goto err_exit;
192*493d26c5SEd Maste 
193*493d26c5SEd Maste err_exit:
194*493d26c5SEd Maste     AQ_DBG_EXIT(err);
195*493d26c5SEd Maste     return (err);
196*493d26c5SEd Maste }
197*493d26c5SEd Maste 
198*493d26c5SEd Maste int aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox)
199*493d26c5SEd Maste {
200*493d26c5SEd Maste     int err = 0;
201*493d26c5SEd Maste //    AQ_DBG_ENTER();
202*493d26c5SEd Maste 
203*493d26c5SEd Maste     if (hw->fw_ops && hw->fw_ops->get_stats) {
204*493d26c5SEd Maste         err = hw->fw_ops->get_stats(hw, &pmbox->stats);
205*493d26c5SEd Maste     } else {
206*493d26c5SEd Maste         err = -ENOTSUP;
207*493d26c5SEd Maste         aq_log_error("get_stats() not supported by F/W");
208*493d26c5SEd Maste     }
209*493d26c5SEd Maste 
210*493d26c5SEd Maste     if (err == EOK) {
211*493d26c5SEd Maste         pmbox->stats.dpc = reg_rx_dma_stat_counter7get(hw);
212*493d26c5SEd Maste         pmbox->stats.cprc = stats_rx_lro_coalesced_pkt_count0_get(hw);
213*493d26c5SEd Maste     }
214*493d26c5SEd Maste 
215*493d26c5SEd Maste //    AQ_DBG_EXIT(err);
216*493d26c5SEd Maste     return (err);
217*493d26c5SEd Maste }
218*493d26c5SEd Maste 
219*493d26c5SEd Maste static int aq_hw_mpi_set(struct aq_hw *hw,
220*493d26c5SEd Maste               enum aq_hw_fw_mpi_state_e state, u32 speed)
221*493d26c5SEd Maste {
222*493d26c5SEd Maste     int err = -ENOTSUP;
223*493d26c5SEd Maste     AQ_DBG_ENTERA("speed %d", speed);
224*493d26c5SEd Maste 
225*493d26c5SEd Maste     if (hw->fw_ops && hw->fw_ops->set_mode) {
226*493d26c5SEd Maste         err = hw->fw_ops->set_mode(hw, state, speed);
227*493d26c5SEd Maste     } else {
228*493d26c5SEd Maste         aq_log_error("set_mode() not supported by F/W");
229*493d26c5SEd Maste     }
230*493d26c5SEd Maste 
231*493d26c5SEd Maste     AQ_DBG_EXIT(err);
232*493d26c5SEd Maste     return (err);
233*493d26c5SEd Maste }
234*493d26c5SEd Maste 
235*493d26c5SEd Maste int aq_hw_set_link_speed(struct aq_hw *hw, u32 speed)
236*493d26c5SEd Maste {
237*493d26c5SEd Maste     return aq_hw_mpi_set(hw, MPI_INIT, speed);
238*493d26c5SEd Maste }
239*493d26c5SEd Maste 
240*493d26c5SEd Maste int aq_hw_get_link_state(struct aq_hw *hw, u32 *link_speed, struct aq_hw_fc_info *fc_neg)
241*493d26c5SEd Maste {
242*493d26c5SEd Maste     int err = EOK;
243*493d26c5SEd Maste 
244*493d26c5SEd Maste  //   AQ_DBG_ENTER();
245*493d26c5SEd Maste 
246*493d26c5SEd Maste     enum aq_hw_fw_mpi_state_e mode;
247*493d26c5SEd Maste     aq_fw_link_speed_t speed = aq_fw_none;
248*493d26c5SEd Maste     aq_fw_link_fc_t fc;
249*493d26c5SEd Maste 
250*493d26c5SEd Maste     if (hw->fw_ops && hw->fw_ops->get_mode) {
251*493d26c5SEd Maste         err = hw->fw_ops->get_mode(hw, &mode, &speed, &fc);
252*493d26c5SEd Maste     } else {
253*493d26c5SEd Maste         aq_log_error("get_mode() not supported by F/W");
254*493d26c5SEd Maste 		AQ_DBG_EXIT(-ENOTSUP);
255*493d26c5SEd Maste         return (-ENOTSUP);
256*493d26c5SEd Maste     }
257*493d26c5SEd Maste 
258*493d26c5SEd Maste     if (err < 0) {
259*493d26c5SEd Maste         aq_log_error("get_mode() failed, err %d", err);
260*493d26c5SEd Maste 		AQ_DBG_EXIT(err);
261*493d26c5SEd Maste         return (err);
262*493d26c5SEd Maste     }
263*493d26c5SEd Maste 	*link_speed = 0;
264*493d26c5SEd Maste     if (mode != MPI_INIT)
265*493d26c5SEd Maste         return (0);
266*493d26c5SEd Maste 
267*493d26c5SEd Maste     switch (speed) {
268*493d26c5SEd Maste     case aq_fw_10G:
269*493d26c5SEd Maste         *link_speed = 10000U;
270*493d26c5SEd Maste         break;
271*493d26c5SEd Maste 
272*493d26c5SEd Maste     case aq_fw_5G:
273*493d26c5SEd Maste         *link_speed = 5000U;
274*493d26c5SEd Maste         break;
275*493d26c5SEd Maste 
276*493d26c5SEd Maste     case aq_fw_2G5:
277*493d26c5SEd Maste         *link_speed = 2500U;
278*493d26c5SEd Maste         break;
279*493d26c5SEd Maste 
280*493d26c5SEd Maste     case aq_fw_1G:
281*493d26c5SEd Maste         *link_speed = 1000U;
282*493d26c5SEd Maste         break;
283*493d26c5SEd Maste 
284*493d26c5SEd Maste     case aq_fw_100M:
285*493d26c5SEd Maste         *link_speed = 100U;
286*493d26c5SEd Maste         break;
287*493d26c5SEd Maste 
288*493d26c5SEd Maste     default:
289*493d26c5SEd Maste         *link_speed = 0U;
290*493d26c5SEd Maste         break;
291*493d26c5SEd Maste     }
292*493d26c5SEd Maste 
293*493d26c5SEd Maste     fc_neg->fc_rx = !!(fc & aq_fw_fc_ENABLE_RX);
294*493d26c5SEd Maste     fc_neg->fc_tx = !!(fc & aq_fw_fc_ENABLE_TX);
295*493d26c5SEd Maste 
296*493d26c5SEd Maste  //   AQ_DBG_EXIT(0);
297*493d26c5SEd Maste     return (0);
298*493d26c5SEd Maste }
299*493d26c5SEd Maste 
300*493d26c5SEd Maste int aq_hw_get_mac_permanent(struct aq_hw *hw,  u8 *mac)
301*493d26c5SEd Maste {
302*493d26c5SEd Maste     int err = -ENOTSUP;
303*493d26c5SEd Maste     AQ_DBG_ENTER();
304*493d26c5SEd Maste 
305*493d26c5SEd Maste     if (hw->fw_ops && hw->fw_ops->get_mac_addr)
306*493d26c5SEd Maste         err = hw->fw_ops->get_mac_addr(hw, mac);
307*493d26c5SEd Maste 
308*493d26c5SEd Maste     /* Couldn't get MAC address from HW. Use auto-generated one. */
309*493d26c5SEd Maste     if ((mac[0] & 1) || ((mac[0] | mac[1] | mac[2]) == 0)) {
310*493d26c5SEd Maste         u16 rnd;
311*493d26c5SEd Maste         u32 h = 0;
312*493d26c5SEd Maste         u32 l = 0;
313*493d26c5SEd Maste 
314*493d26c5SEd Maste         printf("atlantic: HW MAC address %x:%x:%x:%x:%x:%x is multicast or empty MAC", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
315*493d26c5SEd Maste         printf("atlantic: Use random MAC address");
316*493d26c5SEd Maste 
317*493d26c5SEd Maste         rnd = arc4random();
318*493d26c5SEd Maste 
319*493d26c5SEd Maste         /* chip revision */
320*493d26c5SEd Maste         l = 0xE3000000U
321*493d26c5SEd Maste             | (0xFFFFU & rnd)
322*493d26c5SEd Maste             | (0x00 << 16);
323*493d26c5SEd Maste         h = 0x8001300EU;
324*493d26c5SEd Maste 
325*493d26c5SEd Maste         mac[5] = (u8)(0xFFU & l);
326*493d26c5SEd Maste         l >>= 8;
327*493d26c5SEd Maste         mac[4] = (u8)(0xFFU & l);
328*493d26c5SEd Maste         l >>= 8;
329*493d26c5SEd Maste         mac[3] = (u8)(0xFFU & l);
330*493d26c5SEd Maste         l >>= 8;
331*493d26c5SEd Maste         mac[2] = (u8)(0xFFU & l);
332*493d26c5SEd Maste         mac[1] = (u8)(0xFFU & h);
333*493d26c5SEd Maste         h >>= 8;
334*493d26c5SEd Maste         mac[0] = (u8)(0xFFU & h);
335*493d26c5SEd Maste 
336*493d26c5SEd Maste         err = EOK;
337*493d26c5SEd Maste     }
338*493d26c5SEd Maste 
339*493d26c5SEd Maste     AQ_DBG_EXIT(err);
340*493d26c5SEd Maste     return (err);
341*493d26c5SEd Maste }
342*493d26c5SEd Maste 
343*493d26c5SEd Maste int aq_hw_deinit(struct aq_hw *hw)
344*493d26c5SEd Maste {
345*493d26c5SEd Maste     AQ_DBG_ENTER();
346*493d26c5SEd Maste     aq_hw_mpi_set(hw, MPI_DEINIT, 0);
347*493d26c5SEd Maste     AQ_DBG_EXIT(0);
348*493d26c5SEd Maste     return (0);
349*493d26c5SEd Maste }
350*493d26c5SEd Maste 
351*493d26c5SEd Maste int aq_hw_set_power(struct aq_hw *hw, unsigned int power_state)
352*493d26c5SEd Maste {
353*493d26c5SEd Maste     AQ_DBG_ENTER();
354*493d26c5SEd Maste     aq_hw_mpi_set(hw, MPI_POWER, 0);
355*493d26c5SEd Maste     AQ_DBG_EXIT(0);
356*493d26c5SEd Maste     return (0);
357*493d26c5SEd Maste }
358*493d26c5SEd Maste 
359*493d26c5SEd Maste 
360*493d26c5SEd Maste /* HW NIC functions */
361*493d26c5SEd Maste 
362*493d26c5SEd Maste int aq_hw_reset(struct aq_hw *hw)
363*493d26c5SEd Maste {
364*493d26c5SEd Maste     int err = 0;
365*493d26c5SEd Maste 
366*493d26c5SEd Maste     AQ_DBG_ENTER();
367*493d26c5SEd Maste 
368*493d26c5SEd Maste     err = aq_fw_reset(hw);
369*493d26c5SEd Maste     if (err < 0)
370*493d26c5SEd Maste         goto err_exit;
371*493d26c5SEd Maste 
372*493d26c5SEd Maste     itr_irq_reg_res_dis_set(hw, 0);
373*493d26c5SEd Maste     itr_res_irq_set(hw, 1);
374*493d26c5SEd Maste 
375*493d26c5SEd Maste     /* check 10 times by 1ms */
376*493d26c5SEd Maste     AQ_HW_WAIT_FOR(itr_res_irq_get(hw) == 0, 1000, 10);
377*493d26c5SEd Maste     if (err < 0) {
378*493d26c5SEd Maste         printf("atlantic: IRQ reset failed: %d", err);
379*493d26c5SEd Maste         goto err_exit;
380*493d26c5SEd Maste     }
381*493d26c5SEd Maste 
382*493d26c5SEd Maste     if (hw->fw_ops && hw->fw_ops->reset)
383*493d26c5SEd Maste         hw->fw_ops->reset(hw);
384*493d26c5SEd Maste 
385*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
386*493d26c5SEd Maste 
387*493d26c5SEd Maste err_exit:
388*493d26c5SEd Maste     AQ_DBG_EXIT(err);
389*493d26c5SEd Maste     return (err);
390*493d26c5SEd Maste }
391*493d26c5SEd Maste 
392*493d26c5SEd Maste static int aq_hw_qos_set(struct aq_hw *hw)
393*493d26c5SEd Maste {
394*493d26c5SEd Maste     u32 tc = 0U;
395*493d26c5SEd Maste     u32 buff_size = 0U;
396*493d26c5SEd Maste     unsigned int i_priority = 0U;
397*493d26c5SEd Maste     int err = 0;
398*493d26c5SEd Maste 
399*493d26c5SEd Maste     AQ_DBG_ENTER();
400*493d26c5SEd Maste     /* TPS Descriptor rate init */
401*493d26c5SEd Maste     tps_tx_pkt_shed_desc_rate_curr_time_res_set(hw, 0x0U);
402*493d26c5SEd Maste     tps_tx_pkt_shed_desc_rate_lim_set(hw, 0xA);
403*493d26c5SEd Maste 
404*493d26c5SEd Maste     /* TPS VM init */
405*493d26c5SEd Maste     tps_tx_pkt_shed_desc_vm_arb_mode_set(hw, 0U);
406*493d26c5SEd Maste 
407*493d26c5SEd Maste     /* TPS TC credits init */
408*493d26c5SEd Maste     tps_tx_pkt_shed_desc_tc_arb_mode_set(hw, 0U);
409*493d26c5SEd Maste     tps_tx_pkt_shed_data_arb_mode_set(hw, 0U);
410*493d26c5SEd Maste 
411*493d26c5SEd Maste     tps_tx_pkt_shed_tc_data_max_credit_set(hw, 0xFFF, 0U);
412*493d26c5SEd Maste     tps_tx_pkt_shed_tc_data_weight_set(hw, 0x64, 0U);
413*493d26c5SEd Maste     tps_tx_pkt_shed_desc_tc_max_credit_set(hw, 0x50, 0U);
414*493d26c5SEd Maste     tps_tx_pkt_shed_desc_tc_weight_set(hw, 0x1E, 0U);
415*493d26c5SEd Maste 
416*493d26c5SEd Maste     /* Tx buf size */
417*493d26c5SEd Maste     buff_size = AQ_HW_TXBUF_MAX;
418*493d26c5SEd Maste 
419*493d26c5SEd Maste     tpb_tx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
420*493d26c5SEd Maste     tpb_tx_buff_hi_threshold_per_tc_set(hw,
421*493d26c5SEd Maste                         (buff_size * (1024 / 32U) * 66U) /
422*493d26c5SEd Maste                         100U, tc);
423*493d26c5SEd Maste     tpb_tx_buff_lo_threshold_per_tc_set(hw,
424*493d26c5SEd Maste                         (buff_size * (1024 / 32U) * 50U) /
425*493d26c5SEd Maste                         100U, tc);
426*493d26c5SEd Maste 
427*493d26c5SEd Maste     /* QoS Rx buf size per TC */
428*493d26c5SEd Maste     tc = 0;
429*493d26c5SEd Maste     buff_size = AQ_HW_RXBUF_MAX;
430*493d26c5SEd Maste 
431*493d26c5SEd Maste     rpb_rx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
432*493d26c5SEd Maste     rpb_rx_buff_hi_threshold_per_tc_set(hw,
433*493d26c5SEd Maste                         (buff_size *
434*493d26c5SEd Maste                         (1024U / 32U) * 66U) /
435*493d26c5SEd Maste                         100U, tc);
436*493d26c5SEd Maste     rpb_rx_buff_lo_threshold_per_tc_set(hw,
437*493d26c5SEd Maste                         (buff_size *
438*493d26c5SEd Maste                         (1024U / 32U) * 50U) /
439*493d26c5SEd Maste                         100U, tc);
440*493d26c5SEd Maste 
441*493d26c5SEd Maste     /* QoS 802.1p priority -> TC mapping */
442*493d26c5SEd Maste     for (i_priority = 8U; i_priority--;)
443*493d26c5SEd Maste         rpf_rpb_user_priority_tc_map_set(hw, i_priority, 0U);
444*493d26c5SEd Maste 
445*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
446*493d26c5SEd Maste     AQ_DBG_EXIT(err);
447*493d26c5SEd Maste     return (err);
448*493d26c5SEd Maste }
449*493d26c5SEd Maste 
450*493d26c5SEd Maste static int aq_hw_offload_set(struct aq_hw *hw)
451*493d26c5SEd Maste {
452*493d26c5SEd Maste     int err = 0;
453*493d26c5SEd Maste 
454*493d26c5SEd Maste     AQ_DBG_ENTER();
455*493d26c5SEd Maste     /* TX checksums offloads*/
456*493d26c5SEd Maste     tpo_ipv4header_crc_offload_en_set(hw, 1);
457*493d26c5SEd Maste     tpo_tcp_udp_crc_offload_en_set(hw, 1);
458*493d26c5SEd Maste     if (err < 0)
459*493d26c5SEd Maste         goto err_exit;
460*493d26c5SEd Maste 
461*493d26c5SEd Maste     /* RX checksums offloads*/
462*493d26c5SEd Maste     rpo_ipv4header_crc_offload_en_set(hw, 1);
463*493d26c5SEd Maste     rpo_tcp_udp_crc_offload_en_set(hw, 1);
464*493d26c5SEd Maste     if (err < 0)
465*493d26c5SEd Maste         goto err_exit;
466*493d26c5SEd Maste 
467*493d26c5SEd Maste     /* LSO offloads*/
468*493d26c5SEd Maste     tdm_large_send_offload_en_set(hw, 0xFFFFFFFFU);
469*493d26c5SEd Maste     if (err < 0)
470*493d26c5SEd Maste         goto err_exit;
471*493d26c5SEd Maste 
472*493d26c5SEd Maste /* LRO offloads */
473*493d26c5SEd Maste     {
474*493d26c5SEd Maste         u32 i = 0;
475*493d26c5SEd Maste         u32 val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
476*493d26c5SEd Maste             ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
477*493d26c5SEd Maste             ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
478*493d26c5SEd Maste 
479*493d26c5SEd Maste         for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
480*493d26c5SEd Maste             rpo_lro_max_num_of_descriptors_set(hw, val, i);
481*493d26c5SEd Maste 
482*493d26c5SEd Maste         rpo_lro_time_base_divider_set(hw, 0x61AU);
483*493d26c5SEd Maste         rpo_lro_inactive_interval_set(hw, 0);
484*493d26c5SEd Maste         /* the LRO timebase divider is 5 uS (0x61a),
485*493d26c5SEd Maste          * to get a maximum coalescing interval of 250 uS,
486*493d26c5SEd Maste          * we need to multiply by 50(0x32) to get
487*493d26c5SEd Maste          * the default value 250 uS
488*493d26c5SEd Maste          */
489*493d26c5SEd Maste         rpo_lro_max_coalescing_interval_set(hw, 50);
490*493d26c5SEd Maste 
491*493d26c5SEd Maste         rpo_lro_qsessions_lim_set(hw, 1U);
492*493d26c5SEd Maste 
493*493d26c5SEd Maste         rpo_lro_total_desc_lim_set(hw, 2U);
494*493d26c5SEd Maste 
495*493d26c5SEd Maste         rpo_lro_patch_optimization_en_set(hw, 0U);
496*493d26c5SEd Maste 
497*493d26c5SEd Maste         rpo_lro_min_pay_of_first_pkt_set(hw, 10U);
498*493d26c5SEd Maste 
499*493d26c5SEd Maste         rpo_lro_pkt_lim_set(hw, 1U);
500*493d26c5SEd Maste 
501*493d26c5SEd Maste         rpo_lro_en_set(hw, (hw->lro_enabled ? 0xFFFFFFFFU : 0U));
502*493d26c5SEd Maste     }
503*493d26c5SEd Maste 
504*493d26c5SEd Maste 
505*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
506*493d26c5SEd Maste 
507*493d26c5SEd Maste err_exit:
508*493d26c5SEd Maste     AQ_DBG_EXIT(err);
509*493d26c5SEd Maste     return (err);
510*493d26c5SEd Maste }
511*493d26c5SEd Maste 
512*493d26c5SEd Maste static int aq_hw_init_tx_path(struct aq_hw *hw)
513*493d26c5SEd Maste {
514*493d26c5SEd Maste     int err = 0;
515*493d26c5SEd Maste 
516*493d26c5SEd Maste     AQ_DBG_ENTER();
517*493d26c5SEd Maste 
518*493d26c5SEd Maste     /* Tx TC/RSS number config */
519*493d26c5SEd Maste     tpb_tx_tc_mode_set(hw, 1U);
520*493d26c5SEd Maste 
521*493d26c5SEd Maste     thm_lso_tcp_flag_of_first_pkt_set(hw, 0x0FF6U);
522*493d26c5SEd Maste     thm_lso_tcp_flag_of_middle_pkt_set(hw, 0x0FF6U);
523*493d26c5SEd Maste     thm_lso_tcp_flag_of_last_pkt_set(hw, 0x0F7FU);
524*493d26c5SEd Maste 
525*493d26c5SEd Maste     /* Tx interrupts */
526*493d26c5SEd Maste     tdm_tx_desc_wr_wb_irq_en_set(hw, 1U);
527*493d26c5SEd Maste 
528*493d26c5SEd Maste     /* misc */
529*493d26c5SEd Maste     AQ_WRITE_REG(hw, 0x00007040U, 0x00010000U);//IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U);
530*493d26c5SEd Maste     tdm_tx_dca_en_set(hw, 0U);
531*493d26c5SEd Maste     tdm_tx_dca_mode_set(hw, 0U);
532*493d26c5SEd Maste 
533*493d26c5SEd Maste     tpb_tx_path_scp_ins_en_set(hw, 1U);
534*493d26c5SEd Maste 
535*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
536*493d26c5SEd Maste     AQ_DBG_EXIT(err);
537*493d26c5SEd Maste     return (err);
538*493d26c5SEd Maste }
539*493d26c5SEd Maste 
540*493d26c5SEd Maste static int aq_hw_init_rx_path(struct aq_hw *hw)
541*493d26c5SEd Maste {
542*493d26c5SEd Maste     //struct aq_nic_cfg_s *cfg = hw->aq_nic_cfg;
543*493d26c5SEd Maste     unsigned int control_reg_val = 0U;
544*493d26c5SEd Maste     int i;
545*493d26c5SEd Maste     int err;
546*493d26c5SEd Maste 
547*493d26c5SEd Maste     AQ_DBG_ENTER();
548*493d26c5SEd Maste     /* Rx TC/RSS number config */
549*493d26c5SEd Maste     rpb_rpf_rx_traf_class_mode_set(hw, 1U);
550*493d26c5SEd Maste 
551*493d26c5SEd Maste     /* Rx flow control */
552*493d26c5SEd Maste     rpb_rx_flow_ctl_mode_set(hw, 1U);
553*493d26c5SEd Maste 
554*493d26c5SEd Maste     /* RSS Ring selection */
555*493d26c5SEd Maste     reg_rx_flr_rss_control1set(hw, 0xB3333333U);
556*493d26c5SEd Maste 
557*493d26c5SEd Maste     /* Multicast filters */
558*493d26c5SEd Maste     for (i = AQ_HW_MAC_MAX; i--;) {
559*493d26c5SEd Maste         rpfl2_uc_flr_en_set(hw, (i == 0U) ? 1U : 0U, i);
560*493d26c5SEd Maste         rpfl2unicast_flr_act_set(hw, 1U, i);
561*493d26c5SEd Maste     }
562*493d26c5SEd Maste 
563*493d26c5SEd Maste     reg_rx_flr_mcst_flr_msk_set(hw, 0x00000000U);
564*493d26c5SEd Maste     reg_rx_flr_mcst_flr_set(hw, 0x00010FFFU, 0U);
565*493d26c5SEd Maste 
566*493d26c5SEd Maste     /* Vlan filters */
567*493d26c5SEd Maste     rpf_vlan_outer_etht_set(hw, 0x88A8U);
568*493d26c5SEd Maste     rpf_vlan_inner_etht_set(hw, 0x8100U);
569*493d26c5SEd Maste 	rpf_vlan_accept_untagged_packets_set(hw, true);
570*493d26c5SEd Maste 	rpf_vlan_untagged_act_set(hw, HW_ATL_RX_HOST);
571*493d26c5SEd Maste 
572*493d26c5SEd Maste     rpf_vlan_prom_mode_en_set(hw, 1);
573*493d26c5SEd Maste 
574*493d26c5SEd Maste     /* Rx Interrupts */
575*493d26c5SEd Maste     rdm_rx_desc_wr_wb_irq_en_set(hw, 1U);
576*493d26c5SEd Maste 
577*493d26c5SEd Maste     /* misc */
578*493d26c5SEd Maste     control_reg_val = 0x000F0000U; //RPF2
579*493d26c5SEd Maste 
580*493d26c5SEd Maste     /* RSS hash type set for IP/TCP */
581*493d26c5SEd Maste     control_reg_val |= 0x1EU;
582*493d26c5SEd Maste 
583*493d26c5SEd Maste     AQ_WRITE_REG(hw, 0x00005040U, control_reg_val);
584*493d26c5SEd Maste 
585*493d26c5SEd Maste     rpfl2broadcast_en_set(hw, 1U);
586*493d26c5SEd Maste     rpfl2broadcast_flr_act_set(hw, 1U);
587*493d26c5SEd Maste     rpfl2broadcast_count_threshold_set(hw, 0xFFFFU & (~0U / 256U));
588*493d26c5SEd Maste 
589*493d26c5SEd Maste     rdm_rx_dca_en_set(hw, 0U);
590*493d26c5SEd Maste     rdm_rx_dca_mode_set(hw, 0U);
591*493d26c5SEd Maste 
592*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
593*493d26c5SEd Maste     AQ_DBG_EXIT(err);
594*493d26c5SEd Maste     return (err);
595*493d26c5SEd Maste }
596*493d26c5SEd Maste 
597*493d26c5SEd Maste int aq_hw_mac_addr_set(struct aq_hw *hw, u8 *mac_addr, u8 index)
598*493d26c5SEd Maste {
599*493d26c5SEd Maste     int err = 0;
600*493d26c5SEd Maste     unsigned int h = 0U;
601*493d26c5SEd Maste     unsigned int l = 0U;
602*493d26c5SEd Maste 
603*493d26c5SEd Maste     AQ_DBG_ENTER();
604*493d26c5SEd Maste     if (!mac_addr) {
605*493d26c5SEd Maste         err = -EINVAL;
606*493d26c5SEd Maste         goto err_exit;
607*493d26c5SEd Maste     }
608*493d26c5SEd Maste     h = (mac_addr[0] << 8) | (mac_addr[1]);
609*493d26c5SEd Maste     l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
610*493d26c5SEd Maste         (mac_addr[4] << 8) | mac_addr[5];
611*493d26c5SEd Maste 
612*493d26c5SEd Maste     rpfl2_uc_flr_en_set(hw, 0U, index);
613*493d26c5SEd Maste     rpfl2unicast_dest_addresslsw_set(hw, l, index);
614*493d26c5SEd Maste     rpfl2unicast_dest_addressmsw_set(hw, h, index);
615*493d26c5SEd Maste     rpfl2_uc_flr_en_set(hw, 1U, index);
616*493d26c5SEd Maste 
617*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
618*493d26c5SEd Maste 
619*493d26c5SEd Maste err_exit:
620*493d26c5SEd Maste     AQ_DBG_EXIT(err);
621*493d26c5SEd Maste     return (err);
622*493d26c5SEd Maste }
623*493d26c5SEd Maste 
624*493d26c5SEd Maste int aq_hw_init(struct aq_hw *hw, u8 *mac_addr, u8 adm_irq, bool msix)
625*493d26c5SEd Maste {
626*493d26c5SEd Maste 
627*493d26c5SEd Maste     int err = 0;
628*493d26c5SEd Maste     u32 val = 0;
629*493d26c5SEd Maste 
630*493d26c5SEd Maste     AQ_DBG_ENTER();
631*493d26c5SEd Maste 
632*493d26c5SEd Maste     /* Force limit MRRS on RDM/TDM to 2K */
633*493d26c5SEd Maste     val = AQ_READ_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR);
634*493d26c5SEd Maste     AQ_WRITE_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR, (val & ~0x707) | 0x404);
635*493d26c5SEd Maste 
636*493d26c5SEd Maste     /* TX DMA total request limit. B0 hardware is not capable to
637*493d26c5SEd Maste     * handle more than (8K-MRRS) incoming DMA data.
638*493d26c5SEd Maste     * Value 24 in 256byte units
639*493d26c5SEd Maste     */
640*493d26c5SEd Maste     AQ_WRITE_REG(hw, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
641*493d26c5SEd Maste 
642*493d26c5SEd Maste     aq_hw_init_tx_path(hw);
643*493d26c5SEd Maste     aq_hw_init_rx_path(hw);
644*493d26c5SEd Maste 
645*493d26c5SEd Maste     aq_hw_mac_addr_set(hw, mac_addr, AQ_HW_MAC);
646*493d26c5SEd Maste 
647*493d26c5SEd Maste     aq_hw_mpi_set(hw, MPI_INIT, hw->link_rate);
648*493d26c5SEd Maste 
649*493d26c5SEd Maste     aq_hw_qos_set(hw);
650*493d26c5SEd Maste 
651*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
652*493d26c5SEd Maste     if (err < 0)
653*493d26c5SEd Maste         goto err_exit;
654*493d26c5SEd Maste 
655*493d26c5SEd Maste     /* Interrupts */
656*493d26c5SEd Maste     //Enable interrupt
657*493d26c5SEd Maste     itr_irq_status_cor_en_set(hw, 0); //Disable clear-on-read for status
658*493d26c5SEd Maste     itr_irq_auto_mask_clr_en_set(hw, 1); // Enable auto-mask clear.
659*493d26c5SEd Maste 	if (msix)
660*493d26c5SEd Maste 		itr_irq_mode_set(hw, 0x6); //MSIX + multi vector
661*493d26c5SEd Maste 	else
662*493d26c5SEd Maste 		itr_irq_mode_set(hw, 0x5); //MSI + multi vector
663*493d26c5SEd Maste 
664*493d26c5SEd Maste     reg_gen_irq_map_set(hw, 0x80 | adm_irq, 3);
665*493d26c5SEd Maste 
666*493d26c5SEd Maste     aq_hw_offload_set(hw);
667*493d26c5SEd Maste 
668*493d26c5SEd Maste err_exit:
669*493d26c5SEd Maste     AQ_DBG_EXIT(err);
670*493d26c5SEd Maste     return (err);
671*493d26c5SEd Maste }
672*493d26c5SEd Maste 
673*493d26c5SEd Maste 
674*493d26c5SEd Maste int aq_hw_start(struct aq_hw *hw)
675*493d26c5SEd Maste {
676*493d26c5SEd Maste     int err;
677*493d26c5SEd Maste 
678*493d26c5SEd Maste     AQ_DBG_ENTER();
679*493d26c5SEd Maste     tpb_tx_buff_en_set(hw, 1U);
680*493d26c5SEd Maste     rpb_rx_buff_en_set(hw, 1U);
681*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
682*493d26c5SEd Maste     AQ_DBG_EXIT(err);
683*493d26c5SEd Maste     return (err);
684*493d26c5SEd Maste }
685*493d26c5SEd Maste 
686*493d26c5SEd Maste 
687*493d26c5SEd Maste int aq_hw_interrupt_moderation_set(struct aq_hw *hw)
688*493d26c5SEd Maste {
689*493d26c5SEd Maste     static unsigned int AQ_HW_NIC_timers_table_rx_[][2] = {
690*493d26c5SEd Maste         {80, 120},//{0x6U, 0x38U},/* 10Gbit */
691*493d26c5SEd Maste         {0xCU, 0x70U},/* 5Gbit */
692*493d26c5SEd Maste         {0xCU, 0x70U},/* 5Gbit 5GS */
693*493d26c5SEd Maste         {0x18U, 0xE0U},/* 2.5Gbit */
694*493d26c5SEd Maste         {0x30U, 0x80U},/* 1Gbit */
695*493d26c5SEd Maste         {0x4U, 0x50U},/* 100Mbit */
696*493d26c5SEd Maste     };
697*493d26c5SEd Maste     static unsigned int AQ_HW_NIC_timers_table_tx_[][2] = {
698*493d26c5SEd Maste         {0x4fU, 0x1ff},//{0xffU, 0xffU}, /* 10Gbit */
699*493d26c5SEd Maste         {0x4fU, 0xffU}, /* 5Gbit */
700*493d26c5SEd Maste         {0x4fU, 0xffU}, /* 5Gbit 5GS */
701*493d26c5SEd Maste         {0x4fU, 0xffU}, /* 2.5Gbit */
702*493d26c5SEd Maste         {0x4fU, 0xffU}, /* 1Gbit */
703*493d26c5SEd Maste         {0x4fU, 0xffU}, /* 100Mbit */
704*493d26c5SEd Maste     };
705*493d26c5SEd Maste 
706*493d26c5SEd Maste     u32 speed_index = 0U; //itr settings for 10 g
707*493d26c5SEd Maste     u32 itr_rx = 2U;
708*493d26c5SEd Maste     u32 itr_tx = 2U;
709*493d26c5SEd Maste     int custom_itr = hw->itr;
710*493d26c5SEd Maste     int active = custom_itr != 0;
711*493d26c5SEd Maste     int err;
712*493d26c5SEd Maste 
713*493d26c5SEd Maste 
714*493d26c5SEd Maste     AQ_DBG_ENTER();
715*493d26c5SEd Maste 
716*493d26c5SEd Maste     if (custom_itr == -1) {
717*493d26c5SEd Maste 	    itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][0] << 0x8U; /* set min timer value */
718*493d26c5SEd Maste 	    itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][1] << 0x10U; /* set max timer value */
719*493d26c5SEd Maste 
720*493d26c5SEd Maste 	    itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][0] << 0x8U; /* set min timer value */
721*493d26c5SEd Maste 	    itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][1] << 0x10U; /* set max timer value */
722*493d26c5SEd Maste     }else{
723*493d26c5SEd Maste 	    if (custom_itr > 0x1FF)
724*493d26c5SEd Maste 		    custom_itr = 0x1FF;
725*493d26c5SEd Maste 
726*493d26c5SEd Maste 	    itr_rx |= (custom_itr/2) << 0x8U; /* set min timer value */
727*493d26c5SEd Maste 	    itr_rx |= custom_itr << 0x10U; /* set max timer value */
728*493d26c5SEd Maste 
729*493d26c5SEd Maste 	    itr_tx |= (custom_itr/2) << 0x8U; /* set min timer value */
730*493d26c5SEd Maste 	    itr_tx |= custom_itr << 0x10U; /* set max timer value */
731*493d26c5SEd Maste     }
732*493d26c5SEd Maste 
733*493d26c5SEd Maste     tdm_tx_desc_wr_wb_irq_en_set(hw, !active);
734*493d26c5SEd Maste     tdm_tdm_intr_moder_en_set(hw, active);
735*493d26c5SEd Maste     rdm_rx_desc_wr_wb_irq_en_set(hw, !active);
736*493d26c5SEd Maste     rdm_rdm_intr_moder_en_set(hw, active);
737*493d26c5SEd Maste 
738*493d26c5SEd Maste     for (int i = HW_ATL_B0_RINGS_MAX; i--;) {
739*493d26c5SEd Maste         reg_tx_intr_moder_ctrl_set(hw,  itr_tx, i);
740*493d26c5SEd Maste         reg_rx_intr_moder_ctrl_set(hw,  itr_rx, i);
741*493d26c5SEd Maste     }
742*493d26c5SEd Maste 
743*493d26c5SEd Maste     err = aq_hw_err_from_flags(hw);
744*493d26c5SEd Maste     AQ_DBG_EXIT(err);
745*493d26c5SEd Maste     return (err);
746*493d26c5SEd Maste }
747*493d26c5SEd Maste 
748*493d26c5SEd Maste /**
749*493d26c5SEd Maste  * @brief Set VLAN filter table
750*493d26c5SEd Maste  * @details Configure VLAN filter table to accept (and assign the queue) traffic
751*493d26c5SEd Maste  *  for the particular vlan ids.
752*493d26c5SEd Maste  * Note: use this function under vlan promisc mode not to lost the traffic
753*493d26c5SEd Maste  *
754*493d26c5SEd Maste  * @param aq_hw_s
755*493d26c5SEd Maste  * @param aq_rx_filter_vlan VLAN filter configuration
756*493d26c5SEd Maste  * @return 0 - OK, <0 - error
757*493d26c5SEd Maste  */
758*493d26c5SEd Maste int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
759*493d26c5SEd Maste 				  struct aq_rx_filter_vlan *aq_vlans)
760*493d26c5SEd Maste {
761*493d26c5SEd Maste 	int i;
762*493d26c5SEd Maste 
763*493d26c5SEd Maste 	for (i = 0; i < AQ_HW_VLAN_MAX_FILTERS; i++) {
764*493d26c5SEd Maste 		hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
765*493d26c5SEd Maste 		hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
766*493d26c5SEd Maste 		if (aq_vlans[i].enable) {
767*493d26c5SEd Maste 			hw_atl_rpf_vlan_id_flr_set(self,
768*493d26c5SEd Maste 						   aq_vlans[i].vlan_id,
769*493d26c5SEd Maste 						   i);
770*493d26c5SEd Maste 			hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
771*493d26c5SEd Maste 			hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
772*493d26c5SEd Maste 			if (aq_vlans[i].queue != 0xFF) {
773*493d26c5SEd Maste 				hw_atl_rpf_vlan_rxq_flr_set(self,
774*493d26c5SEd Maste 							    aq_vlans[i].queue,
775*493d26c5SEd Maste 							    i);
776*493d26c5SEd Maste 				hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
777*493d26c5SEd Maste 			}
778*493d26c5SEd Maste 		}
779*493d26c5SEd Maste 	}
780*493d26c5SEd Maste 
781*493d26c5SEd Maste 	return aq_hw_err_from_flags(self);
782*493d26c5SEd Maste }
783*493d26c5SEd Maste 
784*493d26c5SEd Maste int hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc)
785*493d26c5SEd Maste {
786*493d26c5SEd Maste 	hw_atl_rpf_vlan_prom_mode_en_set(self, promisc);
787*493d26c5SEd Maste 	return aq_hw_err_from_flags(self);
788*493d26c5SEd Maste }
789*493d26c5SEd Maste 
790*493d26c5SEd Maste 
791*493d26c5SEd Maste void aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc)
792*493d26c5SEd Maste {
793*493d26c5SEd Maste 	AQ_DBG_ENTERA("promisc %d, vlan_promisc %d, allmulti %d", l2_promisc, vlan_promisc, mc_promisc);
794*493d26c5SEd Maste 
795*493d26c5SEd Maste 	rpfl2promiscuous_mode_en_set(self, l2_promisc);
796*493d26c5SEd Maste 
797*493d26c5SEd Maste 	hw_atl_b0_hw_vlan_promisc_set(self, l2_promisc | vlan_promisc);
798*493d26c5SEd Maste 
799*493d26c5SEd Maste 	rpfl2_accept_all_mc_packets_set(self, mc_promisc);
800*493d26c5SEd Maste 	rpfl2multicast_flr_en_set(self, mc_promisc, 0);
801*493d26c5SEd Maste 
802*493d26c5SEd Maste 	AQ_DBG_EXIT(0);
803*493d26c5SEd Maste }
804*493d26c5SEd Maste 
805*493d26c5SEd Maste int aq_hw_rss_hash_set(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])
806*493d26c5SEd Maste {
807*493d26c5SEd Maste 	u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
808*493d26c5SEd Maste 	u32 addr = 0U;
809*493d26c5SEd Maste 	u32 i = 0U;
810*493d26c5SEd Maste 	int err = 0;
811*493d26c5SEd Maste 
812*493d26c5SEd Maste 	AQ_DBG_ENTER();
813*493d26c5SEd Maste 
814*493d26c5SEd Maste 	memcpy(rss_key_dw, rss_key, HW_ATL_RSS_HASHKEY_SIZE);
815*493d26c5SEd Maste 
816*493d26c5SEd Maste 	for (i = 10, addr = 0U; i--; ++addr) {
817*493d26c5SEd Maste 		u32 key_data = bswap32(rss_key_dw[i]);
818*493d26c5SEd Maste 		rpf_rss_key_wr_data_set(self, key_data);
819*493d26c5SEd Maste 		rpf_rss_key_addr_set(self, addr);
820*493d26c5SEd Maste 		rpf_rss_key_wr_en_set(self, 1U);
821*493d26c5SEd Maste 		AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0,
822*493d26c5SEd Maste 			       1000U, 10U);
823*493d26c5SEd Maste 		if (err < 0)
824*493d26c5SEd Maste 			goto err_exit;
825*493d26c5SEd Maste 	}
826*493d26c5SEd Maste 
827*493d26c5SEd Maste 	err = aq_hw_err_from_flags(self);
828*493d26c5SEd Maste 
829*493d26c5SEd Maste err_exit:
830*493d26c5SEd Maste 	AQ_DBG_EXIT(err);
831*493d26c5SEd Maste 	return (err);
832*493d26c5SEd Maste }
833*493d26c5SEd Maste 
834*493d26c5SEd Maste int aq_hw_rss_hash_get(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])
835*493d26c5SEd Maste {
836*493d26c5SEd Maste 	u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
837*493d26c5SEd Maste 	u32 addr = 0U;
838*493d26c5SEd Maste 	u32 i = 0U;
839*493d26c5SEd Maste 	int err = 0;
840*493d26c5SEd Maste 
841*493d26c5SEd Maste 	AQ_DBG_ENTER();
842*493d26c5SEd Maste 
843*493d26c5SEd Maste 	for (i = 10, addr = 0U; i--; ++addr) {
844*493d26c5SEd Maste 		rpf_rss_key_addr_set(self, addr);
845*493d26c5SEd Maste 		rss_key_dw[i] = bswap32(rpf_rss_key_rd_data_get(self));
846*493d26c5SEd Maste 	}
847*493d26c5SEd Maste 	memcpy(rss_key, rss_key_dw, HW_ATL_RSS_HASHKEY_SIZE);
848*493d26c5SEd Maste 
849*493d26c5SEd Maste 	err = aq_hw_err_from_flags(self);
850*493d26c5SEd Maste 
851*493d26c5SEd Maste 	AQ_DBG_EXIT(err);
852*493d26c5SEd Maste 	return (err);
853*493d26c5SEd Maste }
854*493d26c5SEd Maste 
855*493d26c5SEd Maste int aq_hw_rss_set(struct aq_hw_s *self, u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])
856*493d26c5SEd Maste {
857*493d26c5SEd Maste 	u16 bitary[(HW_ATL_RSS_INDIRECTION_TABLE_MAX *
858*493d26c5SEd Maste 					3 / 16U)];
859*493d26c5SEd Maste 	int err = 0;
860*493d26c5SEd Maste 	u32 i = 0U;
861*493d26c5SEd Maste 
862*493d26c5SEd Maste 	memset(bitary, 0, sizeof(bitary));
863*493d26c5SEd Maste 
864*493d26c5SEd Maste 	for (i = HW_ATL_RSS_INDIRECTION_TABLE_MAX; i--;) {
865*493d26c5SEd Maste 		(*(u32 *)(bitary + ((i * 3U) / 16U))) |=
866*493d26c5SEd Maste 			((rss_table[i]) << ((i * 3U) & 0xFU));
867*493d26c5SEd Maste 	}
868*493d26c5SEd Maste 
869*493d26c5SEd Maste 	for (i = ARRAY_SIZE(bitary); i--;) {
870*493d26c5SEd Maste 		rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
871*493d26c5SEd Maste 		rpf_rss_redir_tbl_addr_set(self, i);
872*493d26c5SEd Maste 		rpf_rss_redir_wr_en_set(self, 1U);
873*493d26c5SEd Maste 		AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0,
874*493d26c5SEd Maste 			       1000U, 10U);
875*493d26c5SEd Maste 		if (err < 0)
876*493d26c5SEd Maste 			goto err_exit;
877*493d26c5SEd Maste 	}
878*493d26c5SEd Maste 
879*493d26c5SEd Maste 	err = aq_hw_err_from_flags(self);
880*493d26c5SEd Maste 
881*493d26c5SEd Maste err_exit:
882*493d26c5SEd Maste 	return (err);
883*493d26c5SEd Maste }
884*493d26c5SEd Maste 
885*493d26c5SEd Maste int aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable)
886*493d26c5SEd Maste {
887*493d26c5SEd Maste 	int err = 0;
888*493d26c5SEd Maste 	if(!enable) {
889*493d26c5SEd Maste 		/* HW bug workaround:
890*493d26c5SEd Maste 		 * Disable RSS for UDP using rx flow filter 0.
891*493d26c5SEd Maste 		 * HW does not track RSS stream for fragmenged UDP,
892*493d26c5SEd Maste 		 * 0x5040 control reg does not work.
893*493d26c5SEd Maste 		 */
894*493d26c5SEd Maste 		hw_atl_rpf_l3_l4_enf_set(self, true, 0);
895*493d26c5SEd Maste 		hw_atl_rpf_l4_protf_en_set(self, true, 0);
896*493d26c5SEd Maste 		hw_atl_rpf_l3_l4_rxqf_en_set(self, true, 0);
897*493d26c5SEd Maste 		hw_atl_rpf_l3_l4_actf_set(self, L2_FILTER_ACTION_HOST, 0);
898*493d26c5SEd Maste 		hw_atl_rpf_l3_l4_rxqf_set(self, 0, 0);
899*493d26c5SEd Maste 		hw_atl_rpf_l4_protf_set(self, HW_ATL_RX_UDP, 0);
900*493d26c5SEd Maste 	} else {
901*493d26c5SEd Maste 		hw_atl_rpf_l3_l4_enf_set(self, false, 0);
902*493d26c5SEd Maste 	}
903*493d26c5SEd Maste 
904*493d26c5SEd Maste 	err = aq_hw_err_from_flags(self);
905*493d26c5SEd Maste 	return (err);
906*493d26c5SEd Maste 
907*493d26c5SEd Maste }
908