1*493d26c5SEd Maste /*
2*493d26c5SEd Maste * aQuantia Corporation Network Driver
3*493d26c5SEd Maste * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4*493d26c5SEd Maste *
5*493d26c5SEd Maste * Redistribution and use in source and binary forms, with or without
6*493d26c5SEd Maste * modification, are permitted provided that the following conditions
7*493d26c5SEd Maste * are met:
8*493d26c5SEd Maste *
9*493d26c5SEd Maste * (1) Redistributions of source code must retain the above
10*493d26c5SEd Maste * copyright notice, this list of conditions and the following
11*493d26c5SEd Maste * disclaimer.
12*493d26c5SEd Maste *
13*493d26c5SEd Maste * (2) Redistributions in binary form must reproduce the above
14*493d26c5SEd Maste * copyright notice, this list of conditions and the following
15*493d26c5SEd Maste * disclaimer in the documentation and/or other materials provided
16*493d26c5SEd Maste * with the distribution.
17*493d26c5SEd Maste *
18*493d26c5SEd Maste * (3)The name of the author may not be used to endorse or promote
19*493d26c5SEd Maste * products derived from this software without specific prior
20*493d26c5SEd Maste * written permission.
21*493d26c5SEd Maste *
22*493d26c5SEd Maste * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23*493d26c5SEd Maste * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24*493d26c5SEd Maste * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25*493d26c5SEd Maste * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26*493d26c5SEd Maste * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27*493d26c5SEd Maste * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28*493d26c5SEd Maste * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29*493d26c5SEd Maste * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30*493d26c5SEd Maste * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31*493d26c5SEd Maste * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32*493d26c5SEd Maste * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*493d26c5SEd Maste */
34*493d26c5SEd Maste
35*493d26c5SEd Maste #include <sys/endian.h>
36*493d26c5SEd Maste #include <sys/param.h>
37*493d26c5SEd Maste #include <sys/systm.h>
38*493d26c5SEd Maste #include <machine/cpu.h>
39*493d26c5SEd Maste #include <sys/socket.h>
40*493d26c5SEd Maste #include <net/if.h>
41*493d26c5SEd Maste
42*493d26c5SEd Maste #include "aq_hw.h"
43*493d26c5SEd Maste #include "aq_dbg.h"
44*493d26c5SEd Maste #include "aq_hw_llh.h"
45*493d26c5SEd Maste #include "aq_fw.h"
46*493d26c5SEd Maste
47*493d26c5SEd Maste #define AQ_HW_FW_SM_RAM 0x2U
48*493d26c5SEd Maste #define AQ_CFG_FW_MIN_VER_EXPECTED 0x01050006U
49*493d26c5SEd Maste
50*493d26c5SEd Maste
aq_hw_err_from_flags(struct aq_hw * hw)51*493d26c5SEd Maste int aq_hw_err_from_flags(struct aq_hw *hw)
52*493d26c5SEd Maste {
53*493d26c5SEd Maste return (0);
54*493d26c5SEd Maste }
55*493d26c5SEd Maste
aq_hw_chip_features_init(struct aq_hw * hw,u32 * p)56*493d26c5SEd Maste static void aq_hw_chip_features_init(struct aq_hw *hw, u32 *p)
57*493d26c5SEd Maste {
58*493d26c5SEd Maste u32 chip_features = 0U;
59*493d26c5SEd Maste u32 val = reg_glb_mif_id_get(hw);
60*493d26c5SEd Maste u32 mif_rev = val & 0xFFU;
61*493d26c5SEd Maste
62*493d26c5SEd Maste if ((0xFU & mif_rev) == 1U) {
63*493d26c5SEd Maste chip_features |= AQ_HW_CHIP_REVISION_A0 |
64*493d26c5SEd Maste AQ_HW_CHIP_MPI_AQ |
65*493d26c5SEd Maste AQ_HW_CHIP_MIPS;
66*493d26c5SEd Maste } else if ((0xFU & mif_rev) == 2U) {
67*493d26c5SEd Maste chip_features |= AQ_HW_CHIP_REVISION_B0 |
68*493d26c5SEd Maste AQ_HW_CHIP_MPI_AQ |
69*493d26c5SEd Maste AQ_HW_CHIP_MIPS |
70*493d26c5SEd Maste AQ_HW_CHIP_TPO2 |
71*493d26c5SEd Maste AQ_HW_CHIP_RPF2;
72*493d26c5SEd Maste } else if ((0xFU & mif_rev) == 0xAU) {
73*493d26c5SEd Maste chip_features |= AQ_HW_CHIP_REVISION_B1 |
74*493d26c5SEd Maste AQ_HW_CHIP_MPI_AQ |
75*493d26c5SEd Maste AQ_HW_CHIP_MIPS |
76*493d26c5SEd Maste AQ_HW_CHIP_TPO2 |
77*493d26c5SEd Maste AQ_HW_CHIP_RPF2;
78*493d26c5SEd Maste }
79*493d26c5SEd Maste
80*493d26c5SEd Maste *p = chip_features;
81*493d26c5SEd Maste }
82*493d26c5SEd Maste
aq_hw_fw_downld_dwords(struct aq_hw * hw,u32 a,u32 * p,u32 cnt)83*493d26c5SEd Maste int aq_hw_fw_downld_dwords(struct aq_hw *hw, u32 a, u32 *p, u32 cnt)
84*493d26c5SEd Maste {
85*493d26c5SEd Maste int err = 0;
86*493d26c5SEd Maste
87*493d26c5SEd Maste // AQ_DBG_ENTER();
88*493d26c5SEd Maste AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(hw,
89*493d26c5SEd Maste AQ_HW_FW_SM_RAM) == 1U,
90*493d26c5SEd Maste 1U, 10000U);
91*493d26c5SEd Maste
92*493d26c5SEd Maste if (err < 0) {
93*493d26c5SEd Maste bool is_locked;
94*493d26c5SEd Maste
95*493d26c5SEd Maste reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
96*493d26c5SEd Maste is_locked = reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM);
97*493d26c5SEd Maste if (!is_locked) {
98*493d26c5SEd Maste err = -ETIME;
99*493d26c5SEd Maste goto err_exit;
100*493d26c5SEd Maste }
101*493d26c5SEd Maste }
102*493d26c5SEd Maste
103*493d26c5SEd Maste mif_mcp_up_mailbox_addr_set(hw, a);
104*493d26c5SEd Maste
105*493d26c5SEd Maste for (++cnt; --cnt && !err;) {
106*493d26c5SEd Maste mif_mcp_up_mailbox_execute_operation_set(hw, 1);
107*493d26c5SEd Maste
108*493d26c5SEd Maste if (IS_CHIP_FEATURE(hw, REVISION_B1))
109*493d26c5SEd Maste AQ_HW_WAIT_FOR(a != mif_mcp_up_mailbox_addr_get(hw), 1U, 1000U);
110*493d26c5SEd Maste else
111*493d26c5SEd Maste AQ_HW_WAIT_FOR(!mif_mcp_up_mailbox_busy_get(hw), 1, 1000U);
112*493d26c5SEd Maste
113*493d26c5SEd Maste *(p++) = mif_mcp_up_mailbox_data_get(hw);
114*493d26c5SEd Maste }
115*493d26c5SEd Maste
116*493d26c5SEd Maste reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
117*493d26c5SEd Maste
118*493d26c5SEd Maste err_exit:
119*493d26c5SEd Maste // AQ_DBG_EXIT(err);
120*493d26c5SEd Maste return (err);
121*493d26c5SEd Maste }
122*493d26c5SEd Maste
aq_hw_ver_match(const aq_hw_fw_version * ver_expected,const aq_hw_fw_version * ver_actual)123*493d26c5SEd Maste int aq_hw_ver_match(const aq_hw_fw_version* ver_expected, const aq_hw_fw_version* ver_actual)
124*493d26c5SEd Maste {
125*493d26c5SEd Maste AQ_DBG_ENTER();
126*493d26c5SEd Maste
127*493d26c5SEd Maste if (ver_actual->major_version >= ver_expected->major_version)
128*493d26c5SEd Maste return (true);
129*493d26c5SEd Maste if (ver_actual->minor_version >= ver_expected->minor_version)
130*493d26c5SEd Maste return (true);
131*493d26c5SEd Maste if (ver_actual->build_number >= ver_expected->build_number)
132*493d26c5SEd Maste return (true);
133*493d26c5SEd Maste
134*493d26c5SEd Maste return (false);
135*493d26c5SEd Maste }
136*493d26c5SEd Maste
aq_hw_init_ucp(struct aq_hw * hw)137*493d26c5SEd Maste static int aq_hw_init_ucp(struct aq_hw *hw)
138*493d26c5SEd Maste {
139*493d26c5SEd Maste int err = 0;
140*493d26c5SEd Maste AQ_DBG_ENTER();
141*493d26c5SEd Maste
142*493d26c5SEd Maste hw->fw_version.raw = 0;
143*493d26c5SEd Maste
144*493d26c5SEd Maste err = aq_fw_reset(hw);
145*493d26c5SEd Maste if (err != EOK) {
146*493d26c5SEd Maste aq_log_error("aq_hw_init_ucp(): F/W reset failed, err %d", err);
147*493d26c5SEd Maste return (err);
148*493d26c5SEd Maste }
149*493d26c5SEd Maste
150*493d26c5SEd Maste aq_hw_chip_features_init(hw, &hw->chip_features);
151*493d26c5SEd Maste err = aq_fw_ops_init(hw);
152*493d26c5SEd Maste if (err < 0) {
153*493d26c5SEd Maste aq_log_error("could not initialize F/W ops, err %d", err);
154*493d26c5SEd Maste return (-1);
155*493d26c5SEd Maste }
156*493d26c5SEd Maste
157*493d26c5SEd Maste if (hw->fw_version.major_version == 1) {
158*493d26c5SEd Maste if (!AQ_READ_REG(hw, 0x370)) {
159*493d26c5SEd Maste unsigned int rnd = 0;
160*493d26c5SEd Maste unsigned int ucp_0x370 = 0;
161*493d26c5SEd Maste
162*493d26c5SEd Maste rnd = arc4random();
163*493d26c5SEd Maste
164*493d26c5SEd Maste ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
165*493d26c5SEd Maste AQ_WRITE_REG(hw, AQ_HW_UCP_0X370_REG, ucp_0x370);
166*493d26c5SEd Maste }
167*493d26c5SEd Maste
168*493d26c5SEd Maste reg_glb_cpu_scratch_scp_set(hw, 0, 25);
169*493d26c5SEd Maste }
170*493d26c5SEd Maste
171*493d26c5SEd Maste /* check 10 times by 1ms */
172*493d26c5SEd Maste AQ_HW_WAIT_FOR((hw->mbox_addr = AQ_READ_REG(hw, 0x360)) != 0, 400U, 20);
173*493d26c5SEd Maste
174*493d26c5SEd Maste aq_hw_fw_version ver_expected = { .raw = AQ_CFG_FW_MIN_VER_EXPECTED };
175*493d26c5SEd Maste if (!aq_hw_ver_match(&ver_expected, &hw->fw_version))
176*493d26c5SEd Maste aq_log_error("atlantic: aq_hw_init_ucp(), wrong FW version: expected:%x actual:%x",
177*493d26c5SEd Maste AQ_CFG_FW_MIN_VER_EXPECTED, hw->fw_version.raw);
178*493d26c5SEd Maste
179*493d26c5SEd Maste AQ_DBG_EXIT(err);
180*493d26c5SEd Maste return (err);
181*493d26c5SEd Maste }
182*493d26c5SEd Maste
aq_hw_mpi_create(struct aq_hw * hw)183*493d26c5SEd Maste int aq_hw_mpi_create(struct aq_hw *hw)
184*493d26c5SEd Maste {
185*493d26c5SEd Maste int err = 0;
186*493d26c5SEd Maste
187*493d26c5SEd Maste AQ_DBG_ENTER();
188*493d26c5SEd Maste err = aq_hw_init_ucp(hw);
189*493d26c5SEd Maste if (err < 0)
190*493d26c5SEd Maste goto err_exit;
191*493d26c5SEd Maste
192*493d26c5SEd Maste err_exit:
193*493d26c5SEd Maste AQ_DBG_EXIT(err);
194*493d26c5SEd Maste return (err);
195*493d26c5SEd Maste }
196*493d26c5SEd Maste
aq_hw_mpi_read_stats(struct aq_hw * hw,struct aq_hw_fw_mbox * pmbox)197*493d26c5SEd Maste int aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox)
198*493d26c5SEd Maste {
199*493d26c5SEd Maste int err = 0;
200*493d26c5SEd Maste // AQ_DBG_ENTER();
201*493d26c5SEd Maste
202*493d26c5SEd Maste if (hw->fw_ops && hw->fw_ops->get_stats) {
203*493d26c5SEd Maste err = hw->fw_ops->get_stats(hw, &pmbox->stats);
204*493d26c5SEd Maste } else {
205*493d26c5SEd Maste err = -ENOTSUP;
206*493d26c5SEd Maste aq_log_error("get_stats() not supported by F/W");
207*493d26c5SEd Maste }
208*493d26c5SEd Maste
209*493d26c5SEd Maste if (err == EOK) {
210*493d26c5SEd Maste pmbox->stats.dpc = reg_rx_dma_stat_counter7get(hw);
211*493d26c5SEd Maste pmbox->stats.cprc = stats_rx_lro_coalesced_pkt_count0_get(hw);
212*493d26c5SEd Maste }
213*493d26c5SEd Maste
214*493d26c5SEd Maste // AQ_DBG_EXIT(err);
215*493d26c5SEd Maste return (err);
216*493d26c5SEd Maste }
217*493d26c5SEd Maste
aq_hw_mpi_set(struct aq_hw * hw,enum aq_hw_fw_mpi_state_e state,u32 speed)218*493d26c5SEd Maste static int aq_hw_mpi_set(struct aq_hw *hw,
219*493d26c5SEd Maste enum aq_hw_fw_mpi_state_e state, u32 speed)
220*493d26c5SEd Maste {
221*493d26c5SEd Maste int err = -ENOTSUP;
222*493d26c5SEd Maste AQ_DBG_ENTERA("speed %d", speed);
223*493d26c5SEd Maste
224*493d26c5SEd Maste if (hw->fw_ops && hw->fw_ops->set_mode) {
225*493d26c5SEd Maste err = hw->fw_ops->set_mode(hw, state, speed);
226*493d26c5SEd Maste } else {
227*493d26c5SEd Maste aq_log_error("set_mode() not supported by F/W");
228*493d26c5SEd Maste }
229*493d26c5SEd Maste
230*493d26c5SEd Maste AQ_DBG_EXIT(err);
231*493d26c5SEd Maste return (err);
232*493d26c5SEd Maste }
233*493d26c5SEd Maste
aq_hw_set_link_speed(struct aq_hw * hw,u32 speed)234*493d26c5SEd Maste int aq_hw_set_link_speed(struct aq_hw *hw, u32 speed)
235*493d26c5SEd Maste {
236*493d26c5SEd Maste return aq_hw_mpi_set(hw, MPI_INIT, speed);
237*493d26c5SEd Maste }
238*493d26c5SEd Maste
aq_hw_get_link_state(struct aq_hw * hw,u32 * link_speed,struct aq_hw_fc_info * fc_neg)239*493d26c5SEd Maste int aq_hw_get_link_state(struct aq_hw *hw, u32 *link_speed, struct aq_hw_fc_info *fc_neg)
240*493d26c5SEd Maste {
241*493d26c5SEd Maste int err = EOK;
242*493d26c5SEd Maste
243*493d26c5SEd Maste // AQ_DBG_ENTER();
244*493d26c5SEd Maste
245*493d26c5SEd Maste enum aq_hw_fw_mpi_state_e mode;
246*493d26c5SEd Maste aq_fw_link_speed_t speed = aq_fw_none;
247*493d26c5SEd Maste aq_fw_link_fc_t fc;
248*493d26c5SEd Maste
249*493d26c5SEd Maste if (hw->fw_ops && hw->fw_ops->get_mode) {
250*493d26c5SEd Maste err = hw->fw_ops->get_mode(hw, &mode, &speed, &fc);
251*493d26c5SEd Maste } else {
252*493d26c5SEd Maste aq_log_error("get_mode() not supported by F/W");
253*493d26c5SEd Maste AQ_DBG_EXIT(-ENOTSUP);
254*493d26c5SEd Maste return (-ENOTSUP);
255*493d26c5SEd Maste }
256*493d26c5SEd Maste
257*493d26c5SEd Maste if (err < 0) {
258*493d26c5SEd Maste aq_log_error("get_mode() failed, err %d", err);
259*493d26c5SEd Maste AQ_DBG_EXIT(err);
260*493d26c5SEd Maste return (err);
261*493d26c5SEd Maste }
262*493d26c5SEd Maste *link_speed = 0;
263*493d26c5SEd Maste if (mode != MPI_INIT)
264*493d26c5SEd Maste return (0);
265*493d26c5SEd Maste
266*493d26c5SEd Maste switch (speed) {
267*493d26c5SEd Maste case aq_fw_10G:
268*493d26c5SEd Maste *link_speed = 10000U;
269*493d26c5SEd Maste break;
270*493d26c5SEd Maste
271*493d26c5SEd Maste case aq_fw_5G:
272*493d26c5SEd Maste *link_speed = 5000U;
273*493d26c5SEd Maste break;
274*493d26c5SEd Maste
275*493d26c5SEd Maste case aq_fw_2G5:
276*493d26c5SEd Maste *link_speed = 2500U;
277*493d26c5SEd Maste break;
278*493d26c5SEd Maste
279*493d26c5SEd Maste case aq_fw_1G:
280*493d26c5SEd Maste *link_speed = 1000U;
281*493d26c5SEd Maste break;
282*493d26c5SEd Maste
283*493d26c5SEd Maste case aq_fw_100M:
284*493d26c5SEd Maste *link_speed = 100U;
285*493d26c5SEd Maste break;
286*493d26c5SEd Maste
287*493d26c5SEd Maste default:
288*493d26c5SEd Maste *link_speed = 0U;
289*493d26c5SEd Maste break;
290*493d26c5SEd Maste }
291*493d26c5SEd Maste
292*493d26c5SEd Maste fc_neg->fc_rx = !!(fc & aq_fw_fc_ENABLE_RX);
293*493d26c5SEd Maste fc_neg->fc_tx = !!(fc & aq_fw_fc_ENABLE_TX);
294*493d26c5SEd Maste
295*493d26c5SEd Maste // AQ_DBG_EXIT(0);
296*493d26c5SEd Maste return (0);
297*493d26c5SEd Maste }
298*493d26c5SEd Maste
aq_hw_get_mac_permanent(struct aq_hw * hw,u8 * mac)299*493d26c5SEd Maste int aq_hw_get_mac_permanent(struct aq_hw *hw, u8 *mac)
300*493d26c5SEd Maste {
301*493d26c5SEd Maste int err = -ENOTSUP;
302*493d26c5SEd Maste AQ_DBG_ENTER();
303*493d26c5SEd Maste
304*493d26c5SEd Maste if (hw->fw_ops && hw->fw_ops->get_mac_addr)
305*493d26c5SEd Maste err = hw->fw_ops->get_mac_addr(hw, mac);
306*493d26c5SEd Maste
307*493d26c5SEd Maste /* Couldn't get MAC address from HW. Use auto-generated one. */
308*493d26c5SEd Maste if ((mac[0] & 1) || ((mac[0] | mac[1] | mac[2]) == 0)) {
309*493d26c5SEd Maste u16 rnd;
310*493d26c5SEd Maste u32 h = 0;
311*493d26c5SEd Maste u32 l = 0;
312*493d26c5SEd Maste
313*493d26c5SEd Maste printf("atlantic: HW MAC address %x:%x:%x:%x:%x:%x is multicast or empty MAC", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
314*493d26c5SEd Maste printf("atlantic: Use random MAC address");
315*493d26c5SEd Maste
316*493d26c5SEd Maste rnd = arc4random();
317*493d26c5SEd Maste
318*493d26c5SEd Maste /* chip revision */
319*493d26c5SEd Maste l = 0xE3000000U
320*493d26c5SEd Maste | (0xFFFFU & rnd)
321*493d26c5SEd Maste | (0x00 << 16);
322*493d26c5SEd Maste h = 0x8001300EU;
323*493d26c5SEd Maste
324*493d26c5SEd Maste mac[5] = (u8)(0xFFU & l);
325*493d26c5SEd Maste l >>= 8;
326*493d26c5SEd Maste mac[4] = (u8)(0xFFU & l);
327*493d26c5SEd Maste l >>= 8;
328*493d26c5SEd Maste mac[3] = (u8)(0xFFU & l);
329*493d26c5SEd Maste l >>= 8;
330*493d26c5SEd Maste mac[2] = (u8)(0xFFU & l);
331*493d26c5SEd Maste mac[1] = (u8)(0xFFU & h);
332*493d26c5SEd Maste h >>= 8;
333*493d26c5SEd Maste mac[0] = (u8)(0xFFU & h);
334*493d26c5SEd Maste
335*493d26c5SEd Maste err = EOK;
336*493d26c5SEd Maste }
337*493d26c5SEd Maste
338*493d26c5SEd Maste AQ_DBG_EXIT(err);
339*493d26c5SEd Maste return (err);
340*493d26c5SEd Maste }
341*493d26c5SEd Maste
aq_hw_deinit(struct aq_hw * hw)342*493d26c5SEd Maste int aq_hw_deinit(struct aq_hw *hw)
343*493d26c5SEd Maste {
344*493d26c5SEd Maste AQ_DBG_ENTER();
345*493d26c5SEd Maste aq_hw_mpi_set(hw, MPI_DEINIT, 0);
346*493d26c5SEd Maste AQ_DBG_EXIT(0);
347*493d26c5SEd Maste return (0);
348*493d26c5SEd Maste }
349*493d26c5SEd Maste
aq_hw_set_power(struct aq_hw * hw,unsigned int power_state)350*493d26c5SEd Maste int aq_hw_set_power(struct aq_hw *hw, unsigned int power_state)
351*493d26c5SEd Maste {
352*493d26c5SEd Maste AQ_DBG_ENTER();
353*493d26c5SEd Maste aq_hw_mpi_set(hw, MPI_POWER, 0);
354*493d26c5SEd Maste AQ_DBG_EXIT(0);
355*493d26c5SEd Maste return (0);
356*493d26c5SEd Maste }
357*493d26c5SEd Maste
358*493d26c5SEd Maste
359*493d26c5SEd Maste /* HW NIC functions */
360*493d26c5SEd Maste
aq_hw_reset(struct aq_hw * hw)361*493d26c5SEd Maste int aq_hw_reset(struct aq_hw *hw)
362*493d26c5SEd Maste {
363*493d26c5SEd Maste int err = 0;
364*493d26c5SEd Maste
365*493d26c5SEd Maste AQ_DBG_ENTER();
366*493d26c5SEd Maste
367*493d26c5SEd Maste err = aq_fw_reset(hw);
368*493d26c5SEd Maste if (err < 0)
369*493d26c5SEd Maste goto err_exit;
370*493d26c5SEd Maste
371*493d26c5SEd Maste itr_irq_reg_res_dis_set(hw, 0);
372*493d26c5SEd Maste itr_res_irq_set(hw, 1);
373*493d26c5SEd Maste
374*493d26c5SEd Maste /* check 10 times by 1ms */
375*493d26c5SEd Maste AQ_HW_WAIT_FOR(itr_res_irq_get(hw) == 0, 1000, 10);
376*493d26c5SEd Maste if (err < 0) {
377*493d26c5SEd Maste printf("atlantic: IRQ reset failed: %d", err);
378*493d26c5SEd Maste goto err_exit;
379*493d26c5SEd Maste }
380*493d26c5SEd Maste
381*493d26c5SEd Maste if (hw->fw_ops && hw->fw_ops->reset)
382*493d26c5SEd Maste hw->fw_ops->reset(hw);
383*493d26c5SEd Maste
384*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
385*493d26c5SEd Maste
386*493d26c5SEd Maste err_exit:
387*493d26c5SEd Maste AQ_DBG_EXIT(err);
388*493d26c5SEd Maste return (err);
389*493d26c5SEd Maste }
390*493d26c5SEd Maste
aq_hw_qos_set(struct aq_hw * hw)391*493d26c5SEd Maste static int aq_hw_qos_set(struct aq_hw *hw)
392*493d26c5SEd Maste {
393*493d26c5SEd Maste u32 tc = 0U;
394*493d26c5SEd Maste u32 buff_size = 0U;
395*493d26c5SEd Maste unsigned int i_priority = 0U;
396*493d26c5SEd Maste int err = 0;
397*493d26c5SEd Maste
398*493d26c5SEd Maste AQ_DBG_ENTER();
399*493d26c5SEd Maste /* TPS Descriptor rate init */
400*493d26c5SEd Maste tps_tx_pkt_shed_desc_rate_curr_time_res_set(hw, 0x0U);
401*493d26c5SEd Maste tps_tx_pkt_shed_desc_rate_lim_set(hw, 0xA);
402*493d26c5SEd Maste
403*493d26c5SEd Maste /* TPS VM init */
404*493d26c5SEd Maste tps_tx_pkt_shed_desc_vm_arb_mode_set(hw, 0U);
405*493d26c5SEd Maste
406*493d26c5SEd Maste /* TPS TC credits init */
407*493d26c5SEd Maste tps_tx_pkt_shed_desc_tc_arb_mode_set(hw, 0U);
408*493d26c5SEd Maste tps_tx_pkt_shed_data_arb_mode_set(hw, 0U);
409*493d26c5SEd Maste
410*493d26c5SEd Maste tps_tx_pkt_shed_tc_data_max_credit_set(hw, 0xFFF, 0U);
411*493d26c5SEd Maste tps_tx_pkt_shed_tc_data_weight_set(hw, 0x64, 0U);
412*493d26c5SEd Maste tps_tx_pkt_shed_desc_tc_max_credit_set(hw, 0x50, 0U);
413*493d26c5SEd Maste tps_tx_pkt_shed_desc_tc_weight_set(hw, 0x1E, 0U);
414*493d26c5SEd Maste
415*493d26c5SEd Maste /* Tx buf size */
416*493d26c5SEd Maste buff_size = AQ_HW_TXBUF_MAX;
417*493d26c5SEd Maste
418*493d26c5SEd Maste tpb_tx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
419*493d26c5SEd Maste tpb_tx_buff_hi_threshold_per_tc_set(hw,
420*493d26c5SEd Maste (buff_size * (1024 / 32U) * 66U) /
421*493d26c5SEd Maste 100U, tc);
422*493d26c5SEd Maste tpb_tx_buff_lo_threshold_per_tc_set(hw,
423*493d26c5SEd Maste (buff_size * (1024 / 32U) * 50U) /
424*493d26c5SEd Maste 100U, tc);
425*493d26c5SEd Maste
426*493d26c5SEd Maste /* QoS Rx buf size per TC */
427*493d26c5SEd Maste tc = 0;
428*493d26c5SEd Maste buff_size = AQ_HW_RXBUF_MAX;
429*493d26c5SEd Maste
430*493d26c5SEd Maste rpb_rx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
431*493d26c5SEd Maste rpb_rx_buff_hi_threshold_per_tc_set(hw,
432*493d26c5SEd Maste (buff_size *
433*493d26c5SEd Maste (1024U / 32U) * 66U) /
434*493d26c5SEd Maste 100U, tc);
435*493d26c5SEd Maste rpb_rx_buff_lo_threshold_per_tc_set(hw,
436*493d26c5SEd Maste (buff_size *
437*493d26c5SEd Maste (1024U / 32U) * 50U) /
438*493d26c5SEd Maste 100U, tc);
439*493d26c5SEd Maste
440*493d26c5SEd Maste /* QoS 802.1p priority -> TC mapping */
441*493d26c5SEd Maste for (i_priority = 8U; i_priority--;)
442*493d26c5SEd Maste rpf_rpb_user_priority_tc_map_set(hw, i_priority, 0U);
443*493d26c5SEd Maste
444*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
445*493d26c5SEd Maste AQ_DBG_EXIT(err);
446*493d26c5SEd Maste return (err);
447*493d26c5SEd Maste }
448*493d26c5SEd Maste
aq_hw_offload_set(struct aq_hw * hw)449*493d26c5SEd Maste static int aq_hw_offload_set(struct aq_hw *hw)
450*493d26c5SEd Maste {
451*493d26c5SEd Maste int err = 0;
452*493d26c5SEd Maste
453*493d26c5SEd Maste AQ_DBG_ENTER();
454*493d26c5SEd Maste /* TX checksums offloads*/
455*493d26c5SEd Maste tpo_ipv4header_crc_offload_en_set(hw, 1);
456*493d26c5SEd Maste tpo_tcp_udp_crc_offload_en_set(hw, 1);
457*493d26c5SEd Maste if (err < 0)
458*493d26c5SEd Maste goto err_exit;
459*493d26c5SEd Maste
460*493d26c5SEd Maste /* RX checksums offloads*/
461*493d26c5SEd Maste rpo_ipv4header_crc_offload_en_set(hw, 1);
462*493d26c5SEd Maste rpo_tcp_udp_crc_offload_en_set(hw, 1);
463*493d26c5SEd Maste if (err < 0)
464*493d26c5SEd Maste goto err_exit;
465*493d26c5SEd Maste
466*493d26c5SEd Maste /* LSO offloads*/
467*493d26c5SEd Maste tdm_large_send_offload_en_set(hw, 0xFFFFFFFFU);
468*493d26c5SEd Maste if (err < 0)
469*493d26c5SEd Maste goto err_exit;
470*493d26c5SEd Maste
471*493d26c5SEd Maste /* LRO offloads */
472*493d26c5SEd Maste {
473*493d26c5SEd Maste u32 i = 0;
474*493d26c5SEd Maste u32 val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
475*493d26c5SEd Maste ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
476*493d26c5SEd Maste ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
477*493d26c5SEd Maste
478*493d26c5SEd Maste for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
479*493d26c5SEd Maste rpo_lro_max_num_of_descriptors_set(hw, val, i);
480*493d26c5SEd Maste
481*493d26c5SEd Maste rpo_lro_time_base_divider_set(hw, 0x61AU);
482*493d26c5SEd Maste rpo_lro_inactive_interval_set(hw, 0);
483*493d26c5SEd Maste /* the LRO timebase divider is 5 uS (0x61a),
484*493d26c5SEd Maste * to get a maximum coalescing interval of 250 uS,
485*493d26c5SEd Maste * we need to multiply by 50(0x32) to get
486*493d26c5SEd Maste * the default value 250 uS
487*493d26c5SEd Maste */
488*493d26c5SEd Maste rpo_lro_max_coalescing_interval_set(hw, 50);
489*493d26c5SEd Maste
490*493d26c5SEd Maste rpo_lro_qsessions_lim_set(hw, 1U);
491*493d26c5SEd Maste
492*493d26c5SEd Maste rpo_lro_total_desc_lim_set(hw, 2U);
493*493d26c5SEd Maste
494*493d26c5SEd Maste rpo_lro_patch_optimization_en_set(hw, 0U);
495*493d26c5SEd Maste
496*493d26c5SEd Maste rpo_lro_min_pay_of_first_pkt_set(hw, 10U);
497*493d26c5SEd Maste
498*493d26c5SEd Maste rpo_lro_pkt_lim_set(hw, 1U);
499*493d26c5SEd Maste
500*493d26c5SEd Maste rpo_lro_en_set(hw, (hw->lro_enabled ? 0xFFFFFFFFU : 0U));
501*493d26c5SEd Maste }
502*493d26c5SEd Maste
503*493d26c5SEd Maste
504*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
505*493d26c5SEd Maste
506*493d26c5SEd Maste err_exit:
507*493d26c5SEd Maste AQ_DBG_EXIT(err);
508*493d26c5SEd Maste return (err);
509*493d26c5SEd Maste }
510*493d26c5SEd Maste
aq_hw_init_tx_path(struct aq_hw * hw)511*493d26c5SEd Maste static int aq_hw_init_tx_path(struct aq_hw *hw)
512*493d26c5SEd Maste {
513*493d26c5SEd Maste int err = 0;
514*493d26c5SEd Maste
515*493d26c5SEd Maste AQ_DBG_ENTER();
516*493d26c5SEd Maste
517*493d26c5SEd Maste /* Tx TC/RSS number config */
518*493d26c5SEd Maste tpb_tx_tc_mode_set(hw, 1U);
519*493d26c5SEd Maste
520*493d26c5SEd Maste thm_lso_tcp_flag_of_first_pkt_set(hw, 0x0FF6U);
521*493d26c5SEd Maste thm_lso_tcp_flag_of_middle_pkt_set(hw, 0x0FF6U);
522*493d26c5SEd Maste thm_lso_tcp_flag_of_last_pkt_set(hw, 0x0F7FU);
523*493d26c5SEd Maste
524*493d26c5SEd Maste /* Tx interrupts */
525*493d26c5SEd Maste tdm_tx_desc_wr_wb_irq_en_set(hw, 1U);
526*493d26c5SEd Maste
527*493d26c5SEd Maste /* misc */
528*493d26c5SEd Maste AQ_WRITE_REG(hw, 0x00007040U, 0x00010000U);//IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U);
529*493d26c5SEd Maste tdm_tx_dca_en_set(hw, 0U);
530*493d26c5SEd Maste tdm_tx_dca_mode_set(hw, 0U);
531*493d26c5SEd Maste
532*493d26c5SEd Maste tpb_tx_path_scp_ins_en_set(hw, 1U);
533*493d26c5SEd Maste
534*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
535*493d26c5SEd Maste AQ_DBG_EXIT(err);
536*493d26c5SEd Maste return (err);
537*493d26c5SEd Maste }
538*493d26c5SEd Maste
aq_hw_init_rx_path(struct aq_hw * hw)539*493d26c5SEd Maste static int aq_hw_init_rx_path(struct aq_hw *hw)
540*493d26c5SEd Maste {
541*493d26c5SEd Maste //struct aq_nic_cfg_s *cfg = hw->aq_nic_cfg;
542*493d26c5SEd Maste unsigned int control_reg_val = 0U;
543*493d26c5SEd Maste int i;
544*493d26c5SEd Maste int err;
545*493d26c5SEd Maste
546*493d26c5SEd Maste AQ_DBG_ENTER();
547*493d26c5SEd Maste /* Rx TC/RSS number config */
548*493d26c5SEd Maste rpb_rpf_rx_traf_class_mode_set(hw, 1U);
549*493d26c5SEd Maste
550*493d26c5SEd Maste /* Rx flow control */
551*493d26c5SEd Maste rpb_rx_flow_ctl_mode_set(hw, 1U);
552*493d26c5SEd Maste
553*493d26c5SEd Maste /* RSS Ring selection */
554*493d26c5SEd Maste reg_rx_flr_rss_control1set(hw, 0xB3333333U);
555*493d26c5SEd Maste
556*493d26c5SEd Maste /* Multicast filters */
557*493d26c5SEd Maste for (i = AQ_HW_MAC_MAX; i--;) {
558*493d26c5SEd Maste rpfl2_uc_flr_en_set(hw, (i == 0U) ? 1U : 0U, i);
559*493d26c5SEd Maste rpfl2unicast_flr_act_set(hw, 1U, i);
560*493d26c5SEd Maste }
561*493d26c5SEd Maste
562*493d26c5SEd Maste reg_rx_flr_mcst_flr_msk_set(hw, 0x00000000U);
563*493d26c5SEd Maste reg_rx_flr_mcst_flr_set(hw, 0x00010FFFU, 0U);
564*493d26c5SEd Maste
565*493d26c5SEd Maste /* Vlan filters */
566*493d26c5SEd Maste rpf_vlan_outer_etht_set(hw, 0x88A8U);
567*493d26c5SEd Maste rpf_vlan_inner_etht_set(hw, 0x8100U);
568*493d26c5SEd Maste rpf_vlan_accept_untagged_packets_set(hw, true);
569*493d26c5SEd Maste rpf_vlan_untagged_act_set(hw, HW_ATL_RX_HOST);
570*493d26c5SEd Maste
571*493d26c5SEd Maste rpf_vlan_prom_mode_en_set(hw, 1);
572*493d26c5SEd Maste
573*493d26c5SEd Maste /* Rx Interrupts */
574*493d26c5SEd Maste rdm_rx_desc_wr_wb_irq_en_set(hw, 1U);
575*493d26c5SEd Maste
576*493d26c5SEd Maste /* misc */
577*493d26c5SEd Maste control_reg_val = 0x000F0000U; //RPF2
578*493d26c5SEd Maste
579*493d26c5SEd Maste /* RSS hash type set for IP/TCP */
580*493d26c5SEd Maste control_reg_val |= 0x1EU;
581*493d26c5SEd Maste
582*493d26c5SEd Maste AQ_WRITE_REG(hw, 0x00005040U, control_reg_val);
583*493d26c5SEd Maste
584*493d26c5SEd Maste rpfl2broadcast_en_set(hw, 1U);
585*493d26c5SEd Maste rpfl2broadcast_flr_act_set(hw, 1U);
586*493d26c5SEd Maste rpfl2broadcast_count_threshold_set(hw, 0xFFFFU & (~0U / 256U));
587*493d26c5SEd Maste
588*493d26c5SEd Maste rdm_rx_dca_en_set(hw, 0U);
589*493d26c5SEd Maste rdm_rx_dca_mode_set(hw, 0U);
590*493d26c5SEd Maste
591*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
592*493d26c5SEd Maste AQ_DBG_EXIT(err);
593*493d26c5SEd Maste return (err);
594*493d26c5SEd Maste }
595*493d26c5SEd Maste
aq_hw_mac_addr_set(struct aq_hw * hw,u8 * mac_addr,u8 index)596*493d26c5SEd Maste int aq_hw_mac_addr_set(struct aq_hw *hw, u8 *mac_addr, u8 index)
597*493d26c5SEd Maste {
598*493d26c5SEd Maste int err = 0;
599*493d26c5SEd Maste unsigned int h = 0U;
600*493d26c5SEd Maste unsigned int l = 0U;
601*493d26c5SEd Maste
602*493d26c5SEd Maste AQ_DBG_ENTER();
603*493d26c5SEd Maste if (!mac_addr) {
604*493d26c5SEd Maste err = -EINVAL;
605*493d26c5SEd Maste goto err_exit;
606*493d26c5SEd Maste }
607*493d26c5SEd Maste h = (mac_addr[0] << 8) | (mac_addr[1]);
608*493d26c5SEd Maste l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
609*493d26c5SEd Maste (mac_addr[4] << 8) | mac_addr[5];
610*493d26c5SEd Maste
611*493d26c5SEd Maste rpfl2_uc_flr_en_set(hw, 0U, index);
612*493d26c5SEd Maste rpfl2unicast_dest_addresslsw_set(hw, l, index);
613*493d26c5SEd Maste rpfl2unicast_dest_addressmsw_set(hw, h, index);
614*493d26c5SEd Maste rpfl2_uc_flr_en_set(hw, 1U, index);
615*493d26c5SEd Maste
616*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
617*493d26c5SEd Maste
618*493d26c5SEd Maste err_exit:
619*493d26c5SEd Maste AQ_DBG_EXIT(err);
620*493d26c5SEd Maste return (err);
621*493d26c5SEd Maste }
622*493d26c5SEd Maste
aq_hw_init(struct aq_hw * hw,u8 * mac_addr,u8 adm_irq,bool msix)623*493d26c5SEd Maste int aq_hw_init(struct aq_hw *hw, u8 *mac_addr, u8 adm_irq, bool msix)
624*493d26c5SEd Maste {
625*493d26c5SEd Maste
626*493d26c5SEd Maste int err = 0;
627*493d26c5SEd Maste u32 val = 0;
628*493d26c5SEd Maste
629*493d26c5SEd Maste AQ_DBG_ENTER();
630*493d26c5SEd Maste
631*493d26c5SEd Maste /* Force limit MRRS on RDM/TDM to 2K */
632*493d26c5SEd Maste val = AQ_READ_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR);
633*493d26c5SEd Maste AQ_WRITE_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR, (val & ~0x707) | 0x404);
634*493d26c5SEd Maste
635*493d26c5SEd Maste /* TX DMA total request limit. B0 hardware is not capable to
636*493d26c5SEd Maste * handle more than (8K-MRRS) incoming DMA data.
637*493d26c5SEd Maste * Value 24 in 256byte units
638*493d26c5SEd Maste */
639*493d26c5SEd Maste AQ_WRITE_REG(hw, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
640*493d26c5SEd Maste
641*493d26c5SEd Maste aq_hw_init_tx_path(hw);
642*493d26c5SEd Maste aq_hw_init_rx_path(hw);
643*493d26c5SEd Maste
644*493d26c5SEd Maste aq_hw_mac_addr_set(hw, mac_addr, AQ_HW_MAC);
645*493d26c5SEd Maste
646*493d26c5SEd Maste aq_hw_mpi_set(hw, MPI_INIT, hw->link_rate);
647*493d26c5SEd Maste
648*493d26c5SEd Maste aq_hw_qos_set(hw);
649*493d26c5SEd Maste
650*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
651*493d26c5SEd Maste if (err < 0)
652*493d26c5SEd Maste goto err_exit;
653*493d26c5SEd Maste
654*493d26c5SEd Maste /* Interrupts */
655*493d26c5SEd Maste //Enable interrupt
656*493d26c5SEd Maste itr_irq_status_cor_en_set(hw, 0); //Disable clear-on-read for status
657*493d26c5SEd Maste itr_irq_auto_mask_clr_en_set(hw, 1); // Enable auto-mask clear.
658*493d26c5SEd Maste if (msix)
659*493d26c5SEd Maste itr_irq_mode_set(hw, 0x6); //MSIX + multi vector
660*493d26c5SEd Maste else
661*493d26c5SEd Maste itr_irq_mode_set(hw, 0x5); //MSI + multi vector
662*493d26c5SEd Maste
663*493d26c5SEd Maste reg_gen_irq_map_set(hw, 0x80 | adm_irq, 3);
664*493d26c5SEd Maste
665*493d26c5SEd Maste aq_hw_offload_set(hw);
666*493d26c5SEd Maste
667*493d26c5SEd Maste err_exit:
668*493d26c5SEd Maste AQ_DBG_EXIT(err);
669*493d26c5SEd Maste return (err);
670*493d26c5SEd Maste }
671*493d26c5SEd Maste
672*493d26c5SEd Maste
aq_hw_start(struct aq_hw * hw)673*493d26c5SEd Maste int aq_hw_start(struct aq_hw *hw)
674*493d26c5SEd Maste {
675*493d26c5SEd Maste int err;
676*493d26c5SEd Maste
677*493d26c5SEd Maste AQ_DBG_ENTER();
678*493d26c5SEd Maste tpb_tx_buff_en_set(hw, 1U);
679*493d26c5SEd Maste rpb_rx_buff_en_set(hw, 1U);
680*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
681*493d26c5SEd Maste AQ_DBG_EXIT(err);
682*493d26c5SEd Maste return (err);
683*493d26c5SEd Maste }
684*493d26c5SEd Maste
685*493d26c5SEd Maste
aq_hw_interrupt_moderation_set(struct aq_hw * hw)686*493d26c5SEd Maste int aq_hw_interrupt_moderation_set(struct aq_hw *hw)
687*493d26c5SEd Maste {
688*493d26c5SEd Maste static unsigned int AQ_HW_NIC_timers_table_rx_[][2] = {
689*493d26c5SEd Maste {80, 120},//{0x6U, 0x38U},/* 10Gbit */
690*493d26c5SEd Maste {0xCU, 0x70U},/* 5Gbit */
691*493d26c5SEd Maste {0xCU, 0x70U},/* 5Gbit 5GS */
692*493d26c5SEd Maste {0x18U, 0xE0U},/* 2.5Gbit */
693*493d26c5SEd Maste {0x30U, 0x80U},/* 1Gbit */
694*493d26c5SEd Maste {0x4U, 0x50U},/* 100Mbit */
695*493d26c5SEd Maste };
696*493d26c5SEd Maste static unsigned int AQ_HW_NIC_timers_table_tx_[][2] = {
697*493d26c5SEd Maste {0x4fU, 0x1ff},//{0xffU, 0xffU}, /* 10Gbit */
698*493d26c5SEd Maste {0x4fU, 0xffU}, /* 5Gbit */
699*493d26c5SEd Maste {0x4fU, 0xffU}, /* 5Gbit 5GS */
700*493d26c5SEd Maste {0x4fU, 0xffU}, /* 2.5Gbit */
701*493d26c5SEd Maste {0x4fU, 0xffU}, /* 1Gbit */
702*493d26c5SEd Maste {0x4fU, 0xffU}, /* 100Mbit */
703*493d26c5SEd Maste };
704*493d26c5SEd Maste
705*493d26c5SEd Maste u32 speed_index = 0U; //itr settings for 10 g
706*493d26c5SEd Maste u32 itr_rx = 2U;
707*493d26c5SEd Maste u32 itr_tx = 2U;
708*493d26c5SEd Maste int custom_itr = hw->itr;
709*493d26c5SEd Maste int active = custom_itr != 0;
710*493d26c5SEd Maste int err;
711*493d26c5SEd Maste
712*493d26c5SEd Maste
713*493d26c5SEd Maste AQ_DBG_ENTER();
714*493d26c5SEd Maste
715*493d26c5SEd Maste if (custom_itr == -1) {
716*493d26c5SEd Maste itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][0] << 0x8U; /* set min timer value */
717*493d26c5SEd Maste itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][1] << 0x10U; /* set max timer value */
718*493d26c5SEd Maste
719*493d26c5SEd Maste itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][0] << 0x8U; /* set min timer value */
720*493d26c5SEd Maste itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][1] << 0x10U; /* set max timer value */
721*493d26c5SEd Maste }else{
722*493d26c5SEd Maste if (custom_itr > 0x1FF)
723*493d26c5SEd Maste custom_itr = 0x1FF;
724*493d26c5SEd Maste
725*493d26c5SEd Maste itr_rx |= (custom_itr/2) << 0x8U; /* set min timer value */
726*493d26c5SEd Maste itr_rx |= custom_itr << 0x10U; /* set max timer value */
727*493d26c5SEd Maste
728*493d26c5SEd Maste itr_tx |= (custom_itr/2) << 0x8U; /* set min timer value */
729*493d26c5SEd Maste itr_tx |= custom_itr << 0x10U; /* set max timer value */
730*493d26c5SEd Maste }
731*493d26c5SEd Maste
732*493d26c5SEd Maste tdm_tx_desc_wr_wb_irq_en_set(hw, !active);
733*493d26c5SEd Maste tdm_tdm_intr_moder_en_set(hw, active);
734*493d26c5SEd Maste rdm_rx_desc_wr_wb_irq_en_set(hw, !active);
735*493d26c5SEd Maste rdm_rdm_intr_moder_en_set(hw, active);
736*493d26c5SEd Maste
737*493d26c5SEd Maste for (int i = HW_ATL_B0_RINGS_MAX; i--;) {
738*493d26c5SEd Maste reg_tx_intr_moder_ctrl_set(hw, itr_tx, i);
739*493d26c5SEd Maste reg_rx_intr_moder_ctrl_set(hw, itr_rx, i);
740*493d26c5SEd Maste }
741*493d26c5SEd Maste
742*493d26c5SEd Maste err = aq_hw_err_from_flags(hw);
743*493d26c5SEd Maste AQ_DBG_EXIT(err);
744*493d26c5SEd Maste return (err);
745*493d26c5SEd Maste }
746*493d26c5SEd Maste
747*493d26c5SEd Maste /**
748*493d26c5SEd Maste * @brief Set VLAN filter table
749*493d26c5SEd Maste * @details Configure VLAN filter table to accept (and assign the queue) traffic
750*493d26c5SEd Maste * for the particular vlan ids.
751*493d26c5SEd Maste * Note: use this function under vlan promisc mode not to lost the traffic
752*493d26c5SEd Maste *
753*493d26c5SEd Maste * @param aq_hw_s
754*493d26c5SEd Maste * @param aq_rx_filter_vlan VLAN filter configuration
755*493d26c5SEd Maste * @return 0 - OK, <0 - error
756*493d26c5SEd Maste */
hw_atl_b0_hw_vlan_set(struct aq_hw_s * self,struct aq_rx_filter_vlan * aq_vlans)757*493d26c5SEd Maste int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
758*493d26c5SEd Maste struct aq_rx_filter_vlan *aq_vlans)
759*493d26c5SEd Maste {
760*493d26c5SEd Maste int i;
761*493d26c5SEd Maste
762*493d26c5SEd Maste for (i = 0; i < AQ_HW_VLAN_MAX_FILTERS; i++) {
763*493d26c5SEd Maste hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
764*493d26c5SEd Maste hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
765*493d26c5SEd Maste if (aq_vlans[i].enable) {
766*493d26c5SEd Maste hw_atl_rpf_vlan_id_flr_set(self,
767*493d26c5SEd Maste aq_vlans[i].vlan_id,
768*493d26c5SEd Maste i);
769*493d26c5SEd Maste hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
770*493d26c5SEd Maste hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
771*493d26c5SEd Maste if (aq_vlans[i].queue != 0xFF) {
772*493d26c5SEd Maste hw_atl_rpf_vlan_rxq_flr_set(self,
773*493d26c5SEd Maste aq_vlans[i].queue,
774*493d26c5SEd Maste i);
775*493d26c5SEd Maste hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
776*493d26c5SEd Maste }
777*493d26c5SEd Maste }
778*493d26c5SEd Maste }
779*493d26c5SEd Maste
780*493d26c5SEd Maste return aq_hw_err_from_flags(self);
781*493d26c5SEd Maste }
782*493d26c5SEd Maste
hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s * self,bool promisc)783*493d26c5SEd Maste int hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc)
784*493d26c5SEd Maste {
785*493d26c5SEd Maste hw_atl_rpf_vlan_prom_mode_en_set(self, promisc);
786*493d26c5SEd Maste return aq_hw_err_from_flags(self);
787*493d26c5SEd Maste }
788*493d26c5SEd Maste
789*493d26c5SEd Maste
aq_hw_set_promisc(struct aq_hw_s * self,bool l2_promisc,bool vlan_promisc,bool mc_promisc)790*493d26c5SEd Maste void aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc)
791*493d26c5SEd Maste {
792*493d26c5SEd Maste AQ_DBG_ENTERA("promisc %d, vlan_promisc %d, allmulti %d", l2_promisc, vlan_promisc, mc_promisc);
793*493d26c5SEd Maste
794*493d26c5SEd Maste rpfl2promiscuous_mode_en_set(self, l2_promisc);
795*493d26c5SEd Maste
796*493d26c5SEd Maste hw_atl_b0_hw_vlan_promisc_set(self, l2_promisc | vlan_promisc);
797*493d26c5SEd Maste
798*493d26c5SEd Maste rpfl2_accept_all_mc_packets_set(self, mc_promisc);
799*493d26c5SEd Maste rpfl2multicast_flr_en_set(self, mc_promisc, 0);
800*493d26c5SEd Maste
801*493d26c5SEd Maste AQ_DBG_EXIT(0);
802*493d26c5SEd Maste }
803*493d26c5SEd Maste
aq_hw_rss_hash_set(struct aq_hw_s * self,u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])804*493d26c5SEd Maste int aq_hw_rss_hash_set(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])
805*493d26c5SEd Maste {
806*493d26c5SEd Maste u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
807*493d26c5SEd Maste u32 addr = 0U;
808*493d26c5SEd Maste u32 i = 0U;
809*493d26c5SEd Maste int err = 0;
810*493d26c5SEd Maste
811*493d26c5SEd Maste AQ_DBG_ENTER();
812*493d26c5SEd Maste
813*493d26c5SEd Maste memcpy(rss_key_dw, rss_key, HW_ATL_RSS_HASHKEY_SIZE);
814*493d26c5SEd Maste
815*493d26c5SEd Maste for (i = 10, addr = 0U; i--; ++addr) {
816*493d26c5SEd Maste u32 key_data = bswap32(rss_key_dw[i]);
817*493d26c5SEd Maste rpf_rss_key_wr_data_set(self, key_data);
818*493d26c5SEd Maste rpf_rss_key_addr_set(self, addr);
819*493d26c5SEd Maste rpf_rss_key_wr_en_set(self, 1U);
820*493d26c5SEd Maste AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0,
821*493d26c5SEd Maste 1000U, 10U);
822*493d26c5SEd Maste if (err < 0)
823*493d26c5SEd Maste goto err_exit;
824*493d26c5SEd Maste }
825*493d26c5SEd Maste
826*493d26c5SEd Maste err = aq_hw_err_from_flags(self);
827*493d26c5SEd Maste
828*493d26c5SEd Maste err_exit:
829*493d26c5SEd Maste AQ_DBG_EXIT(err);
830*493d26c5SEd Maste return (err);
831*493d26c5SEd Maste }
832*493d26c5SEd Maste
aq_hw_rss_hash_get(struct aq_hw_s * self,u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])833*493d26c5SEd Maste int aq_hw_rss_hash_get(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])
834*493d26c5SEd Maste {
835*493d26c5SEd Maste u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
836*493d26c5SEd Maste u32 addr = 0U;
837*493d26c5SEd Maste u32 i = 0U;
838*493d26c5SEd Maste int err = 0;
839*493d26c5SEd Maste
840*493d26c5SEd Maste AQ_DBG_ENTER();
841*493d26c5SEd Maste
842*493d26c5SEd Maste for (i = 10, addr = 0U; i--; ++addr) {
843*493d26c5SEd Maste rpf_rss_key_addr_set(self, addr);
844*493d26c5SEd Maste rss_key_dw[i] = bswap32(rpf_rss_key_rd_data_get(self));
845*493d26c5SEd Maste }
846*493d26c5SEd Maste memcpy(rss_key, rss_key_dw, HW_ATL_RSS_HASHKEY_SIZE);
847*493d26c5SEd Maste
848*493d26c5SEd Maste err = aq_hw_err_from_flags(self);
849*493d26c5SEd Maste
850*493d26c5SEd Maste AQ_DBG_EXIT(err);
851*493d26c5SEd Maste return (err);
852*493d26c5SEd Maste }
853*493d26c5SEd Maste
aq_hw_rss_set(struct aq_hw_s * self,u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])854*493d26c5SEd Maste int aq_hw_rss_set(struct aq_hw_s *self, u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])
855*493d26c5SEd Maste {
856*493d26c5SEd Maste u16 bitary[(HW_ATL_RSS_INDIRECTION_TABLE_MAX *
857*493d26c5SEd Maste 3 / 16U)];
858*493d26c5SEd Maste int err = 0;
859*493d26c5SEd Maste u32 i = 0U;
860*493d26c5SEd Maste
861*493d26c5SEd Maste memset(bitary, 0, sizeof(bitary));
862*493d26c5SEd Maste
863*493d26c5SEd Maste for (i = HW_ATL_RSS_INDIRECTION_TABLE_MAX; i--;) {
864*493d26c5SEd Maste (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
865*493d26c5SEd Maste ((rss_table[i]) << ((i * 3U) & 0xFU));
866*493d26c5SEd Maste }
867*493d26c5SEd Maste
868*493d26c5SEd Maste for (i = ARRAY_SIZE(bitary); i--;) {
869*493d26c5SEd Maste rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
870*493d26c5SEd Maste rpf_rss_redir_tbl_addr_set(self, i);
871*493d26c5SEd Maste rpf_rss_redir_wr_en_set(self, 1U);
872*493d26c5SEd Maste AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0,
873*493d26c5SEd Maste 1000U, 10U);
874*493d26c5SEd Maste if (err < 0)
875*493d26c5SEd Maste goto err_exit;
876*493d26c5SEd Maste }
877*493d26c5SEd Maste
878*493d26c5SEd Maste err = aq_hw_err_from_flags(self);
879*493d26c5SEd Maste
880*493d26c5SEd Maste err_exit:
881*493d26c5SEd Maste return (err);
882*493d26c5SEd Maste }
883*493d26c5SEd Maste
aq_hw_udp_rss_enable(struct aq_hw_s * self,bool enable)884*493d26c5SEd Maste int aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable)
885*493d26c5SEd Maste {
886*493d26c5SEd Maste int err = 0;
887*493d26c5SEd Maste if(!enable) {
888*493d26c5SEd Maste /* HW bug workaround:
889*493d26c5SEd Maste * Disable RSS for UDP using rx flow filter 0.
890*493d26c5SEd Maste * HW does not track RSS stream for fragmenged UDP,
891*493d26c5SEd Maste * 0x5040 control reg does not work.
892*493d26c5SEd Maste */
893*493d26c5SEd Maste hw_atl_rpf_l3_l4_enf_set(self, true, 0);
894*493d26c5SEd Maste hw_atl_rpf_l4_protf_en_set(self, true, 0);
895*493d26c5SEd Maste hw_atl_rpf_l3_l4_rxqf_en_set(self, true, 0);
896*493d26c5SEd Maste hw_atl_rpf_l3_l4_actf_set(self, L2_FILTER_ACTION_HOST, 0);
897*493d26c5SEd Maste hw_atl_rpf_l3_l4_rxqf_set(self, 0, 0);
898*493d26c5SEd Maste hw_atl_rpf_l4_protf_set(self, HW_ATL_RX_UDP, 0);
899*493d26c5SEd Maste } else {
900*493d26c5SEd Maste hw_atl_rpf_l3_l4_enf_set(self, false, 0);
901*493d26c5SEd Maste }
902*493d26c5SEd Maste
903*493d26c5SEd Maste err = aq_hw_err_from_flags(self);
904*493d26c5SEd Maste return (err);
905*493d26c5SEd Maste
906*493d26c5SEd Maste }
907