xref: /freebsd/sys/dev/aq/aq_hw.c (revision 14eb7ec7b7135ad1a3448590cbe70b1368b40ec7)
1 /*
2  * aQuantia Corporation Network Driver
3  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  *   (1) Redistributions of source code must retain the above
10  *   copyright notice, this list of conditions and the following
11  *   disclaimer.
12  *
13  *   (2) Redistributions in binary form must reproduce the above
14  *   copyright notice, this list of conditions and the following
15  *   disclaimer in the documentation and/or other materials provided
16  *   with the distribution.
17  *
18  *   (3)The name of the author may not be used to endorse or promote
19  *   products derived from this software without specific prior
20  *   written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
23  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
26  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
28  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/endian.h>
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <machine/cpu.h>
39 #include <sys/socket.h>
40 #include <net/if.h>
41 
42 #include "aq_hw.h"
43 #include "aq_dbg.h"
44 #include "aq_hw_llh.h"
45 #include "aq_fw.h"
46 
47 #define AQ_HW_FW_SM_RAM        0x2U
48 #define AQ_CFG_FW_MIN_VER_EXPECTED 0x01050006U
49 
50 
aq_hw_err_from_flags(struct aq_hw * hw)51 int aq_hw_err_from_flags(struct aq_hw *hw)
52 {
53     return (0);
54 }
55 
aq_hw_chip_features_init(struct aq_hw * hw,u32 * p)56 static void aq_hw_chip_features_init(struct aq_hw *hw, u32 *p)
57 {
58     u32 chip_features = 0U;
59     u32 val = reg_glb_mif_id_get(hw);
60     u32 mif_rev = val & 0xFFU;
61 
62     if ((0xFU & mif_rev) == 1U) {
63         chip_features |= AQ_HW_CHIP_REVISION_A0 |
64                  AQ_HW_CHIP_MPI_AQ |
65                  AQ_HW_CHIP_MIPS;
66     } else if ((0xFU & mif_rev) == 2U) {
67         chip_features |= AQ_HW_CHIP_REVISION_B0 |
68                  AQ_HW_CHIP_MPI_AQ |
69                  AQ_HW_CHIP_MIPS |
70                  AQ_HW_CHIP_TPO2 |
71                  AQ_HW_CHIP_RPF2;
72     } else if ((0xFU & mif_rev) == 0xAU) {
73         chip_features |= AQ_HW_CHIP_REVISION_B1 |
74                  AQ_HW_CHIP_MPI_AQ |
75                  AQ_HW_CHIP_MIPS |
76                  AQ_HW_CHIP_TPO2 |
77                  AQ_HW_CHIP_RPF2;
78     }
79 
80     *p = chip_features;
81 }
82 
aq_hw_fw_downld_dwords(struct aq_hw * hw,u32 a,u32 * p,u32 cnt)83 int aq_hw_fw_downld_dwords(struct aq_hw *hw, u32 a, u32 *p, u32 cnt)
84 {
85     int err = 0;
86 
87 //    AQ_DBG_ENTER();
88     AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(hw,
89                        AQ_HW_FW_SM_RAM) == 1U,
90                        1U, 10000U);
91 
92     if (err < 0) {
93         bool is_locked;
94 
95         reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
96         is_locked = reg_glb_cpu_sem_get(hw, AQ_HW_FW_SM_RAM);
97         if (!is_locked) {
98             err = -ETIME;
99             goto err_exit;
100         }
101     }
102 
103     mif_mcp_up_mailbox_addr_set(hw, a);
104 
105     for (++cnt; --cnt && !err;) {
106         mif_mcp_up_mailbox_execute_operation_set(hw, 1);
107 
108         if (IS_CHIP_FEATURE(hw, REVISION_B1))
109             AQ_HW_WAIT_FOR(a != mif_mcp_up_mailbox_addr_get(hw), 1U, 1000U);
110         else
111             AQ_HW_WAIT_FOR(!mif_mcp_up_mailbox_busy_get(hw), 1, 1000U);
112 
113         *(p++) = mif_mcp_up_mailbox_data_get(hw);
114     }
115 
116     reg_glb_cpu_sem_set(hw, 1U, AQ_HW_FW_SM_RAM);
117 
118 err_exit:
119 //    AQ_DBG_EXIT(err);
120     return (err);
121 }
122 
aq_hw_ver_match(const aq_hw_fw_version * ver_expected,const aq_hw_fw_version * ver_actual)123 int aq_hw_ver_match(const aq_hw_fw_version* ver_expected, const aq_hw_fw_version* ver_actual)
124 {
125     AQ_DBG_ENTER();
126 
127     if (ver_actual->major_version >= ver_expected->major_version)
128         return (true);
129     if (ver_actual->minor_version >= ver_expected->minor_version)
130         return (true);
131     if (ver_actual->build_number >= ver_expected->build_number)
132         return (true);
133 
134     return (false);
135 }
136 
aq_hw_init_ucp(struct aq_hw * hw)137 static int aq_hw_init_ucp(struct aq_hw *hw)
138 {
139     int err = 0;
140     AQ_DBG_ENTER();
141 
142     hw->fw_version.raw = 0;
143 
144     err = aq_fw_reset(hw);
145     if (err != EOK) {
146         aq_log_error("aq_hw_init_ucp(): F/W reset failed, err %d", err);
147         return (err);
148     }
149 
150     aq_hw_chip_features_init(hw, &hw->chip_features);
151     err = aq_fw_ops_init(hw);
152     if (err < 0) {
153         aq_log_error("could not initialize F/W ops, err %d", err);
154         return (-1);
155     }
156 
157     if (hw->fw_version.major_version == 1) {
158         if (!AQ_READ_REG(hw, 0x370)) {
159             unsigned int rnd = 0;
160             unsigned int ucp_0x370 = 0;
161 
162             rnd = arc4random();
163 
164             ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
165             AQ_WRITE_REG(hw, AQ_HW_UCP_0X370_REG, ucp_0x370);
166         }
167 
168         reg_glb_cpu_scratch_scp_set(hw, 0, 25);
169     }
170 
171     /* check 10 times by 1ms */
172     AQ_HW_WAIT_FOR((hw->mbox_addr = AQ_READ_REG(hw, 0x360)) != 0, 400U, 20);
173 
174     aq_hw_fw_version ver_expected = { .raw = AQ_CFG_FW_MIN_VER_EXPECTED };
175     if (!aq_hw_ver_match(&ver_expected, &hw->fw_version))
176         aq_log_error("atlantic: aq_hw_init_ucp(), wrong FW version: expected:%x actual:%x",
177               AQ_CFG_FW_MIN_VER_EXPECTED, hw->fw_version.raw);
178 
179     AQ_DBG_EXIT(err);
180     return (err);
181 }
182 
aq_hw_mpi_create(struct aq_hw * hw)183 int aq_hw_mpi_create(struct aq_hw *hw)
184 {
185     int err = 0;
186 
187     AQ_DBG_ENTER();
188     err = aq_hw_init_ucp(hw);
189     if (err < 0)
190         goto err_exit;
191 
192 err_exit:
193     AQ_DBG_EXIT(err);
194     return (err);
195 }
196 
aq_hw_mpi_read_stats(struct aq_hw * hw,struct aq_hw_fw_mbox * pmbox)197 int aq_hw_mpi_read_stats(struct aq_hw *hw, struct aq_hw_fw_mbox *pmbox)
198 {
199     int err = 0;
200 //    AQ_DBG_ENTER();
201 
202     if (hw->fw_ops && hw->fw_ops->get_stats) {
203         err = hw->fw_ops->get_stats(hw, &pmbox->stats);
204     } else {
205         err = -ENOTSUP;
206         aq_log_error("get_stats() not supported by F/W");
207     }
208 
209     if (err == EOK) {
210         pmbox->stats.dpc = reg_rx_dma_stat_counter7get(hw);
211         pmbox->stats.cprc = stats_rx_lro_coalesced_pkt_count0_get(hw);
212     }
213 
214 //    AQ_DBG_EXIT(err);
215     return (err);
216 }
217 
aq_hw_mpi_set(struct aq_hw * hw,enum aq_hw_fw_mpi_state_e state,u32 speed)218 static int aq_hw_mpi_set(struct aq_hw *hw,
219               enum aq_hw_fw_mpi_state_e state, u32 speed)
220 {
221     int err = -ENOTSUP;
222     AQ_DBG_ENTERA("speed %d", speed);
223 
224     if (hw->fw_ops && hw->fw_ops->set_mode) {
225         err = hw->fw_ops->set_mode(hw, state, speed);
226     } else {
227         aq_log_error("set_mode() not supported by F/W");
228     }
229 
230     AQ_DBG_EXIT(err);
231     return (err);
232 }
233 
aq_hw_set_link_speed(struct aq_hw * hw,u32 speed)234 int aq_hw_set_link_speed(struct aq_hw *hw, u32 speed)
235 {
236     return aq_hw_mpi_set(hw, MPI_INIT, speed);
237 }
238 
aq_hw_get_link_state(struct aq_hw * hw,u32 * link_speed,struct aq_hw_fc_info * fc_neg)239 int aq_hw_get_link_state(struct aq_hw *hw, u32 *link_speed, struct aq_hw_fc_info *fc_neg)
240 {
241     int err = EOK;
242 
243  //   AQ_DBG_ENTER();
244 
245     enum aq_hw_fw_mpi_state_e mode;
246     aq_fw_link_speed_t speed = aq_fw_none;
247     aq_fw_link_fc_t fc;
248 
249     if (hw->fw_ops && hw->fw_ops->get_mode) {
250         err = hw->fw_ops->get_mode(hw, &mode, &speed, &fc);
251     } else {
252         aq_log_error("get_mode() not supported by F/W");
253 		AQ_DBG_EXIT(-ENOTSUP);
254         return (-ENOTSUP);
255     }
256 
257     if (err < 0) {
258         aq_log_error("get_mode() failed, err %d", err);
259 		AQ_DBG_EXIT(err);
260         return (err);
261     }
262 	*link_speed = 0;
263     if (mode != MPI_INIT)
264         return (0);
265 
266     switch (speed) {
267     case aq_fw_10G:
268         *link_speed = 10000U;
269         break;
270 
271     case aq_fw_5G:
272         *link_speed = 5000U;
273         break;
274 
275     case aq_fw_2G5:
276         *link_speed = 2500U;
277         break;
278 
279     case aq_fw_1G:
280         *link_speed = 1000U;
281         break;
282 
283     case aq_fw_100M:
284         *link_speed = 100U;
285         break;
286 
287     default:
288         *link_speed = 0U;
289         break;
290     }
291 
292     fc_neg->fc_rx = !!(fc & aq_fw_fc_ENABLE_RX);
293     fc_neg->fc_tx = !!(fc & aq_fw_fc_ENABLE_TX);
294 
295  //   AQ_DBG_EXIT(0);
296     return (0);
297 }
298 
aq_hw_get_mac_permanent(struct aq_hw * hw,u8 * mac)299 int aq_hw_get_mac_permanent(struct aq_hw *hw,  u8 *mac)
300 {
301     int err = -ENOTSUP;
302     AQ_DBG_ENTER();
303 
304     if (hw->fw_ops && hw->fw_ops->get_mac_addr)
305         err = hw->fw_ops->get_mac_addr(hw, mac);
306 
307     /* Couldn't get MAC address from HW. Use auto-generated one. */
308     if ((mac[0] & 1) || ((mac[0] | mac[1] | mac[2]) == 0)) {
309         u16 rnd;
310         u32 h = 0;
311         u32 l = 0;
312 
313         printf("atlantic: HW MAC address %x:%x:%x:%x:%x:%x is multicast or empty MAC", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
314         printf("atlantic: Use random MAC address");
315 
316         rnd = arc4random();
317 
318         /* chip revision */
319         l = 0xE3000000U
320             | (0xFFFFU & rnd)
321             | (0x00 << 16);
322         h = 0x8001300EU;
323 
324         mac[5] = (u8)(0xFFU & l);
325         l >>= 8;
326         mac[4] = (u8)(0xFFU & l);
327         l >>= 8;
328         mac[3] = (u8)(0xFFU & l);
329         l >>= 8;
330         mac[2] = (u8)(0xFFU & l);
331         mac[1] = (u8)(0xFFU & h);
332         h >>= 8;
333         mac[0] = (u8)(0xFFU & h);
334 
335         err = EOK;
336     }
337 
338     AQ_DBG_EXIT(err);
339     return (err);
340 }
341 
aq_hw_deinit(struct aq_hw * hw)342 int aq_hw_deinit(struct aq_hw *hw)
343 {
344     AQ_DBG_ENTER();
345     aq_hw_mpi_set(hw, MPI_DEINIT, 0);
346     AQ_DBG_EXIT(0);
347     return (0);
348 }
349 
aq_hw_set_power(struct aq_hw * hw,unsigned int power_state)350 int aq_hw_set_power(struct aq_hw *hw, unsigned int power_state)
351 {
352     AQ_DBG_ENTER();
353     aq_hw_mpi_set(hw, MPI_POWER, 0);
354     AQ_DBG_EXIT(0);
355     return (0);
356 }
357 
358 
359 /* HW NIC functions */
360 
aq_hw_reset(struct aq_hw * hw)361 int aq_hw_reset(struct aq_hw *hw)
362 {
363     int err = 0;
364 
365     AQ_DBG_ENTER();
366 
367     err = aq_fw_reset(hw);
368     if (err < 0)
369         goto err_exit;
370 
371     itr_irq_reg_res_dis_set(hw, 0);
372     itr_res_irq_set(hw, 1);
373 
374     /* check 10 times by 1ms */
375     AQ_HW_WAIT_FOR(itr_res_irq_get(hw) == 0, 1000, 10);
376     if (err < 0) {
377         printf("atlantic: IRQ reset failed: %d", err);
378         goto err_exit;
379     }
380 
381     if (hw->fw_ops && hw->fw_ops->reset)
382         hw->fw_ops->reset(hw);
383 
384     err = aq_hw_err_from_flags(hw);
385 
386 err_exit:
387     AQ_DBG_EXIT(err);
388     return (err);
389 }
390 
aq_hw_qos_set(struct aq_hw * hw)391 static int aq_hw_qos_set(struct aq_hw *hw)
392 {
393     u32 tc = 0U;
394     u32 buff_size = 0U;
395     unsigned int i_priority = 0U;
396     int err = 0;
397 
398     AQ_DBG_ENTER();
399     /* TPS Descriptor rate init */
400     tps_tx_pkt_shed_desc_rate_curr_time_res_set(hw, 0x0U);
401     tps_tx_pkt_shed_desc_rate_lim_set(hw, 0xA);
402 
403     /* TPS VM init */
404     tps_tx_pkt_shed_desc_vm_arb_mode_set(hw, 0U);
405 
406     /* TPS TC credits init */
407     tps_tx_pkt_shed_desc_tc_arb_mode_set(hw, 0U);
408     tps_tx_pkt_shed_data_arb_mode_set(hw, 0U);
409 
410     tps_tx_pkt_shed_tc_data_max_credit_set(hw, 0xFFF, 0U);
411     tps_tx_pkt_shed_tc_data_weight_set(hw, 0x64, 0U);
412     tps_tx_pkt_shed_desc_tc_max_credit_set(hw, 0x50, 0U);
413     tps_tx_pkt_shed_desc_tc_weight_set(hw, 0x1E, 0U);
414 
415     /* Tx buf size */
416     buff_size = AQ_HW_TXBUF_MAX;
417 
418     tpb_tx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
419     tpb_tx_buff_hi_threshold_per_tc_set(hw,
420                         (buff_size * (1024 / 32U) * 66U) /
421                         100U, tc);
422     tpb_tx_buff_lo_threshold_per_tc_set(hw,
423                         (buff_size * (1024 / 32U) * 50U) /
424                         100U, tc);
425 
426     /* QoS Rx buf size per TC */
427     tc = 0;
428     buff_size = AQ_HW_RXBUF_MAX;
429 
430     rpb_rx_pkt_buff_size_per_tc_set(hw, buff_size, tc);
431     rpb_rx_buff_hi_threshold_per_tc_set(hw,
432                         (buff_size *
433                         (1024U / 32U) * 66U) /
434                         100U, tc);
435     rpb_rx_buff_lo_threshold_per_tc_set(hw,
436                         (buff_size *
437                         (1024U / 32U) * 50U) /
438                         100U, tc);
439 
440     /* QoS 802.1p priority -> TC mapping */
441     for (i_priority = 8U; i_priority--;)
442         rpf_rpb_user_priority_tc_map_set(hw, i_priority, 0U);
443 
444     err = aq_hw_err_from_flags(hw);
445     AQ_DBG_EXIT(err);
446     return (err);
447 }
448 
aq_hw_offload_set(struct aq_hw * hw)449 static int aq_hw_offload_set(struct aq_hw *hw)
450 {
451     int err = 0;
452 
453     AQ_DBG_ENTER();
454     /* TX checksums offloads*/
455     tpo_ipv4header_crc_offload_en_set(hw, 1);
456     tpo_tcp_udp_crc_offload_en_set(hw, 1);
457     if (err < 0)
458         goto err_exit;
459 
460     /* RX checksums offloads*/
461     rpo_ipv4header_crc_offload_en_set(hw, 1);
462     rpo_tcp_udp_crc_offload_en_set(hw, 1);
463     if (err < 0)
464         goto err_exit;
465 
466     /* LSO offloads*/
467     tdm_large_send_offload_en_set(hw, 0xFFFFFFFFU);
468     if (err < 0)
469         goto err_exit;
470 
471 /* LRO offloads */
472     {
473         u32 i = 0;
474         u32 val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
475             ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
476             ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
477 
478         for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
479             rpo_lro_max_num_of_descriptors_set(hw, val, i);
480 
481         rpo_lro_time_base_divider_set(hw, 0x61AU);
482         rpo_lro_inactive_interval_set(hw, 0);
483         /* the LRO timebase divider is 5 uS (0x61a),
484          * to get a maximum coalescing interval of 250 uS,
485          * we need to multiply by 50(0x32) to get
486          * the default value 250 uS
487          */
488         rpo_lro_max_coalescing_interval_set(hw, 50);
489 
490         rpo_lro_qsessions_lim_set(hw, 1U);
491 
492         rpo_lro_total_desc_lim_set(hw, 2U);
493 
494         rpo_lro_patch_optimization_en_set(hw, 0U);
495 
496         rpo_lro_min_pay_of_first_pkt_set(hw, 10U);
497 
498         rpo_lro_pkt_lim_set(hw, 1U);
499 
500         rpo_lro_en_set(hw, (hw->lro_enabled ? 0xFFFFFFFFU : 0U));
501     }
502 
503 
504     err = aq_hw_err_from_flags(hw);
505 
506 err_exit:
507     AQ_DBG_EXIT(err);
508     return (err);
509 }
510 
aq_hw_init_tx_path(struct aq_hw * hw)511 static int aq_hw_init_tx_path(struct aq_hw *hw)
512 {
513     int err = 0;
514 
515     AQ_DBG_ENTER();
516 
517     /* Tx TC/RSS number config */
518     tpb_tx_tc_mode_set(hw, 1U);
519 
520     thm_lso_tcp_flag_of_first_pkt_set(hw, 0x0FF6U);
521     thm_lso_tcp_flag_of_middle_pkt_set(hw, 0x0FF6U);
522     thm_lso_tcp_flag_of_last_pkt_set(hw, 0x0F7FU);
523 
524     /* Tx interrupts */
525     tdm_tx_desc_wr_wb_irq_en_set(hw, 1U);
526 
527     /* misc */
528     AQ_WRITE_REG(hw, 0x00007040U, 0x00010000U);//IS_CHIP_FEATURE(TPO2) ? 0x00010000U : 0x00000000U);
529     tdm_tx_dca_en_set(hw, 0U);
530     tdm_tx_dca_mode_set(hw, 0U);
531 
532     tpb_tx_path_scp_ins_en_set(hw, 1U);
533 
534     err = aq_hw_err_from_flags(hw);
535     AQ_DBG_EXIT(err);
536     return (err);
537 }
538 
aq_hw_init_rx_path(struct aq_hw * hw)539 static int aq_hw_init_rx_path(struct aq_hw *hw)
540 {
541     //struct aq_nic_cfg_s *cfg = hw->aq_nic_cfg;
542     unsigned int control_reg_val = 0U;
543     int i;
544     int err;
545 
546     AQ_DBG_ENTER();
547     /* Rx TC/RSS number config */
548     rpb_rpf_rx_traf_class_mode_set(hw, 1U);
549 
550     /* Rx flow control */
551     rpb_rx_flow_ctl_mode_set(hw, 1U);
552 
553     /* RSS Ring selection */
554     reg_rx_flr_rss_control1set(hw, 0xB3333333U);
555 
556     /* Multicast filters */
557     for (i = AQ_HW_MAC_MAX; i--;) {
558         rpfl2_uc_flr_en_set(hw, (i == 0U) ? 1U : 0U, i);
559         rpfl2unicast_flr_act_set(hw, 1U, i);
560     }
561 
562     reg_rx_flr_mcst_flr_msk_set(hw, 0x00000000U);
563     reg_rx_flr_mcst_flr_set(hw, 0x00010FFFU, 0U);
564 
565     /* Vlan filters */
566     rpf_vlan_outer_etht_set(hw, 0x88A8U);
567     rpf_vlan_inner_etht_set(hw, 0x8100U);
568 	rpf_vlan_accept_untagged_packets_set(hw, true);
569 	rpf_vlan_untagged_act_set(hw, HW_ATL_RX_HOST);
570 
571     rpf_vlan_prom_mode_en_set(hw, 1);
572 
573     /* Rx Interrupts */
574     rdm_rx_desc_wr_wb_irq_en_set(hw, 1U);
575 
576     /* misc */
577     control_reg_val = 0x000F0000U; //RPF2
578 
579     /* RSS hash type set for IP/TCP */
580     control_reg_val |= 0x1EU;
581 
582     AQ_WRITE_REG(hw, 0x00005040U, control_reg_val);
583 
584     rpfl2broadcast_en_set(hw, 1U);
585     rpfl2broadcast_flr_act_set(hw, 1U);
586     rpfl2broadcast_count_threshold_set(hw, 0xFFFFU & (~0U / 256U));
587 
588     rdm_rx_dca_en_set(hw, 0U);
589     rdm_rx_dca_mode_set(hw, 0U);
590 
591     err = aq_hw_err_from_flags(hw);
592     AQ_DBG_EXIT(err);
593     return (err);
594 }
595 
aq_hw_mac_addr_set(struct aq_hw * hw,u8 * mac_addr,u8 index)596 int aq_hw_mac_addr_set(struct aq_hw *hw, u8 *mac_addr, u8 index)
597 {
598     int err = 0;
599     unsigned int h = 0U;
600     unsigned int l = 0U;
601 
602     AQ_DBG_ENTER();
603     if (!mac_addr) {
604         err = -EINVAL;
605         goto err_exit;
606     }
607     h = (mac_addr[0] << 8) | (mac_addr[1]);
608     l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
609         (mac_addr[4] << 8) | mac_addr[5];
610 
611     rpfl2_uc_flr_en_set(hw, 0U, index);
612     rpfl2unicast_dest_addresslsw_set(hw, l, index);
613     rpfl2unicast_dest_addressmsw_set(hw, h, index);
614     rpfl2_uc_flr_en_set(hw, 1U, index);
615 
616     err = aq_hw_err_from_flags(hw);
617 
618 err_exit:
619     AQ_DBG_EXIT(err);
620     return (err);
621 }
622 
aq_hw_init(struct aq_hw * hw,u8 * mac_addr,u8 adm_irq,bool msix)623 int aq_hw_init(struct aq_hw *hw, u8 *mac_addr, u8 adm_irq, bool msix)
624 {
625 
626     int err = 0;
627     u32 val = 0;
628 
629     AQ_DBG_ENTER();
630 
631     /* Force limit MRRS on RDM/TDM to 2K */
632     val = AQ_READ_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR);
633     AQ_WRITE_REG(hw, AQ_HW_PCI_REG_CONTROL_6_ADR, (val & ~0x707) | 0x404);
634 
635     /* TX DMA total request limit. B0 hardware is not capable to
636     * handle more than (8K-MRRS) incoming DMA data.
637     * Value 24 in 256byte units
638     */
639     AQ_WRITE_REG(hw, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_ADR, 24);
640 
641     aq_hw_init_tx_path(hw);
642     aq_hw_init_rx_path(hw);
643 
644     aq_hw_mac_addr_set(hw, mac_addr, AQ_HW_MAC);
645 
646     aq_hw_mpi_set(hw, MPI_INIT, hw->link_rate);
647 
648     aq_hw_qos_set(hw);
649 
650     err = aq_hw_err_from_flags(hw);
651     if (err < 0)
652         goto err_exit;
653 
654     /* Interrupts */
655     //Enable interrupt
656     itr_irq_status_cor_en_set(hw, 0); //Disable clear-on-read for status
657     itr_irq_auto_mask_clr_en_set(hw, 1); // Enable auto-mask clear.
658 	if (msix)
659 		itr_irq_mode_set(hw, 0x6); //MSIX + multi vector
660 	else
661 		itr_irq_mode_set(hw, 0x5); //MSI + multi vector
662 
663     reg_gen_irq_map_set(hw, 0x80 | adm_irq, 3);
664 
665     aq_hw_offload_set(hw);
666 
667 err_exit:
668     AQ_DBG_EXIT(err);
669     return (err);
670 }
671 
672 
aq_hw_start(struct aq_hw * hw)673 int aq_hw_start(struct aq_hw *hw)
674 {
675     int err;
676 
677     AQ_DBG_ENTER();
678     tpb_tx_buff_en_set(hw, 1U);
679     rpb_rx_buff_en_set(hw, 1U);
680     err = aq_hw_err_from_flags(hw);
681     AQ_DBG_EXIT(err);
682     return (err);
683 }
684 
685 
aq_hw_interrupt_moderation_set(struct aq_hw * hw)686 int aq_hw_interrupt_moderation_set(struct aq_hw *hw)
687 {
688     static unsigned int AQ_HW_NIC_timers_table_rx_[][2] = {
689         {80, 120},//{0x6U, 0x38U},/* 10Gbit */
690         {0xCU, 0x70U},/* 5Gbit */
691         {0xCU, 0x70U},/* 5Gbit 5GS */
692         {0x18U, 0xE0U},/* 2.5Gbit */
693         {0x30U, 0x80U},/* 1Gbit */
694         {0x4U, 0x50U},/* 100Mbit */
695     };
696     static unsigned int AQ_HW_NIC_timers_table_tx_[][2] = {
697         {0x4fU, 0x1ff},//{0xffU, 0xffU}, /* 10Gbit */
698         {0x4fU, 0xffU}, /* 5Gbit */
699         {0x4fU, 0xffU}, /* 5Gbit 5GS */
700         {0x4fU, 0xffU}, /* 2.5Gbit */
701         {0x4fU, 0xffU}, /* 1Gbit */
702         {0x4fU, 0xffU}, /* 100Mbit */
703     };
704 
705     u32 speed_index = 0U; //itr settings for 10 g
706     u32 itr_rx = 2U;
707     u32 itr_tx = 2U;
708     int custom_itr = hw->itr;
709     int active = custom_itr != 0;
710     int err;
711 
712 
713     AQ_DBG_ENTER();
714 
715     if (custom_itr == -1) {
716 	    itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][0] << 0x8U; /* set min timer value */
717 	    itr_rx |= AQ_HW_NIC_timers_table_rx_[speed_index][1] << 0x10U; /* set max timer value */
718 
719 	    itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][0] << 0x8U; /* set min timer value */
720 	    itr_tx |= AQ_HW_NIC_timers_table_tx_[speed_index][1] << 0x10U; /* set max timer value */
721     }else{
722 	    if (custom_itr > 0x1FF)
723 		    custom_itr = 0x1FF;
724 
725 	    itr_rx |= (custom_itr/2) << 0x8U; /* set min timer value */
726 	    itr_rx |= custom_itr << 0x10U; /* set max timer value */
727 
728 	    itr_tx |= (custom_itr/2) << 0x8U; /* set min timer value */
729 	    itr_tx |= custom_itr << 0x10U; /* set max timer value */
730     }
731 
732     tdm_tx_desc_wr_wb_irq_en_set(hw, !active);
733     tdm_tdm_intr_moder_en_set(hw, active);
734     rdm_rx_desc_wr_wb_irq_en_set(hw, !active);
735     rdm_rdm_intr_moder_en_set(hw, active);
736 
737     for (int i = HW_ATL_B0_RINGS_MAX; i--;) {
738         reg_tx_intr_moder_ctrl_set(hw,  itr_tx, i);
739         reg_rx_intr_moder_ctrl_set(hw,  itr_rx, i);
740     }
741 
742     err = aq_hw_err_from_flags(hw);
743     AQ_DBG_EXIT(err);
744     return (err);
745 }
746 
747 /**
748  * @brief Set VLAN filter table
749  * @details Configure VLAN filter table to accept (and assign the queue) traffic
750  *  for the particular vlan ids.
751  * Note: use this function under vlan promisc mode not to lost the traffic
752  *
753  * @param aq_hw_s
754  * @param aq_rx_filter_vlan VLAN filter configuration
755  * @return 0 - OK, <0 - error
756  */
hw_atl_b0_hw_vlan_set(struct aq_hw_s * self,struct aq_rx_filter_vlan * aq_vlans)757 int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
758 				  struct aq_rx_filter_vlan *aq_vlans)
759 {
760 	int i;
761 
762 	for (i = 0; i < AQ_HW_VLAN_MAX_FILTERS; i++) {
763 		hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
764 		hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
765 		if (aq_vlans[i].enable) {
766 			hw_atl_rpf_vlan_id_flr_set(self,
767 						   aq_vlans[i].vlan_id,
768 						   i);
769 			hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
770 			hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
771 			if (aq_vlans[i].queue != 0xFF) {
772 				hw_atl_rpf_vlan_rxq_flr_set(self,
773 							    aq_vlans[i].queue,
774 							    i);
775 				hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
776 			}
777 		}
778 	}
779 
780 	return aq_hw_err_from_flags(self);
781 }
782 
hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s * self,bool promisc)783 int hw_atl_b0_hw_vlan_promisc_set(struct aq_hw_s *self, bool promisc)
784 {
785 	hw_atl_rpf_vlan_prom_mode_en_set(self, promisc);
786 	return aq_hw_err_from_flags(self);
787 }
788 
789 
aq_hw_set_promisc(struct aq_hw_s * self,bool l2_promisc,bool vlan_promisc,bool mc_promisc)790 void aq_hw_set_promisc(struct aq_hw_s *self, bool l2_promisc, bool vlan_promisc, bool mc_promisc)
791 {
792 	AQ_DBG_ENTERA("promisc %d, vlan_promisc %d, allmulti %d", l2_promisc, vlan_promisc, mc_promisc);
793 
794 	rpfl2promiscuous_mode_en_set(self, l2_promisc);
795 
796 	hw_atl_b0_hw_vlan_promisc_set(self, l2_promisc | vlan_promisc);
797 
798 	rpfl2_accept_all_mc_packets_set(self, mc_promisc);
799 	rpfl2multicast_flr_en_set(self, mc_promisc, 0);
800 
801 	AQ_DBG_EXIT(0);
802 }
803 
aq_hw_rss_hash_set(struct aq_hw_s * self,u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])804 int aq_hw_rss_hash_set(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])
805 {
806 	u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
807 	u32 addr = 0U;
808 	u32 i = 0U;
809 	int err = 0;
810 
811 	AQ_DBG_ENTER();
812 
813 	memcpy(rss_key_dw, rss_key, HW_ATL_RSS_HASHKEY_SIZE);
814 
815 	for (i = 10, addr = 0U; i--; ++addr) {
816 		u32 key_data = bswap32(rss_key_dw[i]);
817 		rpf_rss_key_wr_data_set(self, key_data);
818 		rpf_rss_key_addr_set(self, addr);
819 		rpf_rss_key_wr_en_set(self, 1U);
820 		AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0,
821 			       1000U, 10U);
822 		if (err < 0)
823 			goto err_exit;
824 	}
825 
826 	err = aq_hw_err_from_flags(self);
827 
828 err_exit:
829 	AQ_DBG_EXIT(err);
830 	return (err);
831 }
832 
aq_hw_rss_hash_get(struct aq_hw_s * self,u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])833 int aq_hw_rss_hash_get(struct aq_hw_s *self, u8 rss_key[HW_ATL_RSS_HASHKEY_SIZE])
834 {
835 	u32 rss_key_dw[HW_ATL_RSS_HASHKEY_SIZE / 4];
836 	u32 addr = 0U;
837 	u32 i = 0U;
838 	int err = 0;
839 
840 	AQ_DBG_ENTER();
841 
842 	for (i = 10, addr = 0U; i--; ++addr) {
843 		rpf_rss_key_addr_set(self, addr);
844 		rss_key_dw[i] = bswap32(rpf_rss_key_rd_data_get(self));
845 	}
846 	memcpy(rss_key, rss_key_dw, HW_ATL_RSS_HASHKEY_SIZE);
847 
848 	err = aq_hw_err_from_flags(self);
849 
850 	AQ_DBG_EXIT(err);
851 	return (err);
852 }
853 
aq_hw_rss_set(struct aq_hw_s * self,u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])854 int aq_hw_rss_set(struct aq_hw_s *self, u8 rss_table[HW_ATL_RSS_INDIRECTION_TABLE_MAX])
855 {
856 	u16 bitary[(HW_ATL_RSS_INDIRECTION_TABLE_MAX *
857 					3 / 16U)];
858 	int err = 0;
859 	u32 i = 0U;
860 
861 	memset(bitary, 0, sizeof(bitary));
862 
863 	for (i = HW_ATL_RSS_INDIRECTION_TABLE_MAX; i--;) {
864 		(*(u32 *)(bitary + ((i * 3U) / 16U))) |=
865 			((rss_table[i]) << ((i * 3U) & 0xFU));
866 	}
867 
868 	for (i = ARRAY_SIZE(bitary); i--;) {
869 		rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
870 		rpf_rss_redir_tbl_addr_set(self, i);
871 		rpf_rss_redir_wr_en_set(self, 1U);
872 		AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0,
873 			       1000U, 10U);
874 		if (err < 0)
875 			goto err_exit;
876 	}
877 
878 	err = aq_hw_err_from_flags(self);
879 
880 err_exit:
881 	return (err);
882 }
883 
aq_hw_udp_rss_enable(struct aq_hw_s * self,bool enable)884 int aq_hw_udp_rss_enable(struct aq_hw_s *self, bool enable)
885 {
886 	int err = 0;
887 	if(!enable) {
888 		/* HW bug workaround:
889 		 * Disable RSS for UDP using rx flow filter 0.
890 		 * HW does not track RSS stream for fragmenged UDP,
891 		 * 0x5040 control reg does not work.
892 		 */
893 		hw_atl_rpf_l3_l4_enf_set(self, true, 0);
894 		hw_atl_rpf_l4_protf_en_set(self, true, 0);
895 		hw_atl_rpf_l3_l4_rxqf_en_set(self, true, 0);
896 		hw_atl_rpf_l3_l4_actf_set(self, L2_FILTER_ACTION_HOST, 0);
897 		hw_atl_rpf_l3_l4_rxqf_set(self, 0, 0);
898 		hw_atl_rpf_l4_protf_set(self, HW_ATL_RX_UDP, 0);
899 	} else {
900 		hw_atl_rpf_l3_l4_enf_set(self, false, 0);
901 	}
902 
903 	err = aq_hw_err_from_flags(self);
904 	return (err);
905 
906 }
907