xref: /titanic_41/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/lm_hw_init_reset.c (revision f391a51a4e9639750045473dba1cc2831267c93e)
1 /*******************************************************************************
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright 2014 QLogic Corporation
22  * The contents of this file are subject to the terms of the
23  * QLogic End User License (the "License").
24  * You may not use this file except in compliance with the License.
25  *
26  * You can obtain a copy of the License at
27  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28  * QLogic_End_User_Software_License.txt
29  * See the License for the specific language governing permissions
30  * and limitations under the License.
31  *
32  *
33  * Module Description:
34  *      This file contains functions that handle chip init and reset
35  *
36  ******************************************************************************/
37 #include "lm5710.h"
38 #include "command.h"
39 #include "bd_chain.h"
40 #include "ecore_init.h"
41 #include "ecore_init_ops.h"
42 
43 // the phys address is shifted right 12 bits and has an added 1=valid bit added to the 53rd bit
44 // then since this is a wide register(TM) we split it into two 32 bit writes
45 #define ONCHIP_ADDR1(x)   ((u32_t)( x>>12 & 0xFFFFFFFF ))
46 #define ONCHIP_ADDR2(x)   ((u32_t)( 1<<20 | x>>44 ))
47 
48 #define ONCHIP_ADDR0_VALID() ((u32_t)( 1<<20 )) /* Address valued 0 with valid bit on. */
49 
50 #define PXP2_SET_FIRST_LAST_ILT(pdev, blk, first, last) \
51                 do { \
52                     if (CHIP_IS_E1(pdev)) { \
53                         REG_WR(pdev,(PORT_ID(pdev) ? PXP2_REG_PSWRQ_##blk##1_L2P: PXP2_REG_PSWRQ_##blk##0_L2P),((last)<<10 | (first))); \
54                     } else { \
55                         REG_WR(pdev,PXP2_REG_RQ_##blk##_FIRST_ILT,(first)); \
56                         REG_WR(pdev,PXP2_REG_RQ_##blk##_LAST_ILT,(last)); \
57                     } \
58                 } while(0)
59 
60                                      /*  offset                  valid
61                                                                  e1,e1h,e2,e3 save / restore */
62 #define NIG_REG_PORT_0_OFFSETS_VALUES { { NIG_REG_LLH0_FUNC_EN,        {0,1,1,1}, (LM_NIG_RESTORE) },      \
63                                         { NIG_REG_LLH0_FUNC_VLAN_ID,   {0,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
64                                         { NIG_REG_LLH0_ACPI_ENABLE,    {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
65                                         { NIG_REG_LLH0_ACPI_PAT_0_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
66                                         { NIG_REG_LLH0_ACPI_PAT_1_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
67                                         { NIG_REG_LLH0_ACPI_PAT_2_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
68                                         { NIG_REG_LLH0_ACPI_PAT_3_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
69                                         { NIG_REG_LLH0_ACPI_PAT_4_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
70                                         { NIG_REG_LLH0_ACPI_PAT_5_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
71                                         { NIG_REG_LLH0_ACPI_PAT_6_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
72                                         { NIG_REG_LLH0_ACPI_PAT_7_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
73                                         { NIG_REG_LLH0_ACPI_PAT_0_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
74                                         { NIG_REG_LLH0_ACPI_PAT_1_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
75                                         { NIG_REG_LLH0_ACPI_PAT_2_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
76                                         { NIG_REG_LLH0_ACPI_PAT_3_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
77                                         { NIG_REG_LLH0_ACPI_PAT_4_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
78                                         { NIG_REG_LLH0_ACPI_PAT_5_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
79                                         { NIG_REG_LLH0_ACPI_PAT_6_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
80                                         { NIG_REG_LLH0_ACPI_PAT_7_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }}
81 
82 #define NIG_REG_PORT_1_OFFSETS_VALUES { { NIG_REG_LLH1_FUNC_EN,        {0,1,1,1}, (LM_NIG_RESTORE) },        \
83                                         { NIG_REG_LLH1_FUNC_VLAN_ID,   {0,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
84                                         { NIG_REG_LLH1_ACPI_ENABLE,    {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
85                                         { NIG_REG_LLH1_ACPI_PAT_0_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
86                                         { NIG_REG_LLH1_ACPI_PAT_1_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
87                                         { NIG_REG_LLH1_ACPI_PAT_2_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
88                                         { NIG_REG_LLH1_ACPI_PAT_3_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
89                                         { NIG_REG_LLH1_ACPI_PAT_4_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
90                                         { NIG_REG_LLH1_ACPI_PAT_5_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
91                                         { NIG_REG_LLH1_ACPI_PAT_6_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
92                                         { NIG_REG_LLH1_ACPI_PAT_7_LEN, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
93                                         { NIG_REG_LLH1_ACPI_PAT_0_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
94                                         { NIG_REG_LLH1_ACPI_PAT_1_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
95                                         { NIG_REG_LLH1_ACPI_PAT_2_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
96                                         { NIG_REG_LLH1_ACPI_PAT_3_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
97                                         { NIG_REG_LLH1_ACPI_PAT_4_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
98                                         { NIG_REG_LLH1_ACPI_PAT_5_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
99                                         { NIG_REG_LLH1_ACPI_PAT_6_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }, \
100                                         { NIG_REG_LLH1_ACPI_PAT_7_CRC, {1,1,1,1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }}
101 
102 #define ECORE_INIT_COMN(_pdev, _block) \
103     ecore_init_block(_pdev, BLOCK_##_block, PHASE_COMMON)
104 
105 #define ECORE_INIT_PORT(_pdev, _block) \
106     ecore_init_block(_pdev, BLOCK_##_block, PHASE_PORT0 + PORT_ID(_pdev))
107 
108 #define ECORE_INIT_FUNC(_pdev, _block) \
109     ecore_init_block(_pdev, BLOCK_##_block, PHASE_PF0 + FUNC_ID(_pdev))
110 
111 typedef enum {
112     LM_RESET_NIG_OP_SAVE      = 0,
113     LM_RESET_NIG_OP_PROCESS   = 1,
114     LM_RESET_NIG_OP_RESTORE   = 2,
115     LM_RESET_NIG_OP_MAX       = 3
116 } lm_reset_nig_op_t;
117 
118 typedef struct _lm_nig_save_restore_data_t
119 {
120     u32_t offset;
121     struct {
122         u8_t e1;  /* 57710 */
123         u8_t e1h; /* 57711 */
124         u8_t e2;  /* 57712 */
125         u8_t e3;  /* 578xx */
126     } reg_valid;  /* 1 if valid for chip 0 o/`w */
127 
128     u8_t  flags;
129     #define LM_NIG_SAVE    ((u8_t)0x1) /* Should this register be saved    */
130     #define LM_NIG_RESTORE ((u8_t)0x2) /* Should this register be restored */
131 } lm_nig_save_restore_data_t ;
132 
133 lm_chip_global_t g_lm_chip_global[MAX_PCI_BUS_NUM] = {{0}};
134 
lm_reset_set_inprogress(struct _lm_device_t * pdev)135 void lm_reset_set_inprogress(struct _lm_device_t *pdev)
136 {
137     const u8_t bus_num = INST_ID_TO_BUS_NUM(PFDEV(pdev)->vars.inst_id) ;
138     const u8_t flags   = LM_CHIP_GLOBAL_FLAG_RESET_IN_PROGRESS;
139 
140     SET_FLAGS( g_lm_chip_global[bus_num].flags, flags) ;
141 }
142 
lm_reset_clear_inprogress(struct _lm_device_t * pdev)143 void lm_reset_clear_inprogress(struct _lm_device_t *pdev)
144 {
145     const u8_t bus_num = INST_ID_TO_BUS_NUM(PFDEV(pdev)->vars.inst_id) ;
146     const u8_t flags   = LM_CHIP_GLOBAL_FLAG_RESET_IN_PROGRESS;
147 
148     RESET_FLAGS( g_lm_chip_global[bus_num].flags, flags) ;
149 }
150 
lm_pm_reset_is_inprogress(struct _lm_device_t * pdev)151 u8_t lm_pm_reset_is_inprogress(struct _lm_device_t *pdev)
152 {
153     const u8_t bus_num = INST_ID_TO_BUS_NUM(PFDEV(pdev)->vars.inst_id) ;
154     const u8_t flags   = LM_CHIP_GLOBAL_FLAG_RESET_IN_PROGRESS;
155 
156     return ( 0 != GET_FLAGS(g_lm_chip_global[bus_num].flags, flags ) );
157 }
158 
159 void lm_read_attn_regs(lm_device_t *pdev, u32_t * attn_sig_af_inv_arr, u32_t arr_size);
160 u8_t lm_recoverable_error(lm_device_t *pdev, u32_t * attn_sig, u32_t arr_size);
161 
162 /**
163  * @Description
164  *      This function checks if there is optionally a attention
165  *      pending that is recoverable. If it is, then we won't
166  *      assert in the locations that call reset_is_inprogress,
167  *      because there's a high probability we'll overcome the
168  *      error with recovery
169  * @param pdev
170  *
171  * @return u8_t
172  */
lm_er_handling_pending(struct _lm_device_t * pdev)173 u8_t lm_er_handling_pending(struct _lm_device_t *pdev)
174 {
175     u32_t  attn_sig_af_inv_arr[MAX_ATTN_REGS] = {0};
176 
177     if (!pdev->params.enable_error_recovery || CHIP_IS_E1x(pdev))
178     {
179         return FALSE;
180     }
181 
182     lm_read_attn_regs(pdev, attn_sig_af_inv_arr, ARRSIZE(attn_sig_af_inv_arr));
183 
184     return lm_recoverable_error(pdev, attn_sig_af_inv_arr, ARRSIZE(attn_sig_af_inv_arr));
185 }
186 
lm_reset_is_inprogress(struct _lm_device_t * pdev)187 u8_t lm_reset_is_inprogress(struct _lm_device_t *pdev)
188 {
189     u8_t reset_in_progress =
190         lm_pm_reset_is_inprogress(pdev)        ||
191         lm_er_handling_pending(pdev)           ||
192         lm_fl_reset_is_inprogress(PFDEV(pdev)) ||
193         pdev->panic                            ||
194         (IS_VFDEV(pdev) ? lm_fl_reset_is_inprogress(pdev) : FALSE);
195 
196     return reset_in_progress;
197 }
198 
199 /*
200  *------------------------------------------------------------------------
201  * FLR in progress handling -
202  *-------------------------------------------------------------------------
203  */
lm_fl_reset_set_inprogress(struct _lm_device_t * pdev)204 void lm_fl_reset_set_inprogress(struct _lm_device_t *pdev)
205 {
206     pdev->params.is_flr = TRUE;
207     if (IS_PFDEV(pdev))
208     {
209         DbgMessage(pdev, FATAL, "PF[%d] is under FLR\n",FUNC_ID(pdev));
210     }
211     else
212     {
213         DbgMessage(pdev, FATAL, "VF[%d] is under FLR\n",ABS_VFID(pdev));
214     }
215     return;
216 }
217 
lm_fl_reset_clear_inprogress(struct _lm_device_t * pdev)218 void lm_fl_reset_clear_inprogress(struct _lm_device_t *pdev)
219 {
220     pdev->params.is_flr = FALSE;
221     return;
222 }
223 
lm_fl_reset_is_inprogress(struct _lm_device_t * pdev)224 u8_t lm_fl_reset_is_inprogress(struct _lm_device_t *pdev)
225 {
226     return  pdev->params.is_flr;
227 }
228 
lm_is_function_after_flr(struct _lm_device_t * pdev)229 u8_t lm_is_function_after_flr(struct _lm_device_t * pdev)
230 {
231     u8_t is_after_flr = FALSE;
232     is_after_flr = pdev->params.is_flr;
233     if (is_after_flr)
234     {
235         if (IS_PFDEV(pdev))
236         {
237             DbgMessage(pdev, FATAL, "PF[%d] was FLRed\n",FUNC_ID(pdev));
238         }
239         else
240         {
241             DbgMessage(pdev, FATAL, "VF[%d] was FLRed\n",ABS_VFID(pdev));
242         }
243     }
244     return is_after_flr;
245 }
246 
247 u32_t lm_dmae_idx_to_go_cmd( u8_t idx );
248 
lm_cleanup_after_flr(struct _lm_device_t * pdev)249 lm_status_t lm_cleanup_after_flr(struct _lm_device_t * pdev)
250 {
251     lm_status_t lm_status  = LM_STATUS_SUCCESS;
252     u32_t wait_ms          = 60000000;
253     u16_t pretend_value    = 0;
254     u32_t factor           = 0;
255     u32_t cleanup_complete = 0;
256 #if defined(__LINUX) || defined(_VBD_)
257     u32_t pcie_caps_offset = 0;
258 #endif
259 
260     u8_t  function_for_clean_up = 0;
261     u8_t  idx                   = 0;
262 
263     struct sdm_op_gen final_cleanup;
264 
265     // TODO - use here pdev->vars.clk_factor
266     if (CHIP_REV_IS_EMUL(pdev))
267     {
268             factor = LM_EMUL_FACTOR;
269     }
270     else if (CHIP_REV_IS_FPGA(pdev))
271     {
272             factor = LM_FPGA_FACTOR;
273     }
274     else
275     {
276             factor = 1;
277     }
278 
279     wait_ms *= factor;
280     pdev->flr_stats.default_wait_interval_ms = DEFAULT_WAIT_INTERVAL_MICSEC;
281     if (IS_PFDEV(pdev))
282     {
283         DbgMessage(pdev, FATAL, "lm_cleanup_after_flr PF[%d] >>>\n",FUNC_ID(pdev));
284         pdev->flr_stats.is_pf = TRUE;
285         /* Re-enable target PF read access */
286         REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
287 
288         /*Poll on CFC per-pf usage-counter until its 0*/
289 
290         pdev->flr_stats.cfc_usage_counter = REG_WAIT_VERIFY_VAL(pdev, CFC_REG_NUM_LCIDS_INSIDE_PF, 0, wait_ms);
291         DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed CFC per pf usage counter\n",pdev->flr_stats.cfc_usage_counter,DEFAULT_WAIT_INTERVAL_MICSEC);
292         //return LM_STATUS_FAILURE;
293 
294         /* Poll on DQ per-pf usage-counter (until full dq-cleanup is implemented) until its 0*/
295         pdev->flr_stats.dq_usage_counter = REG_WAIT_VERIFY_VAL(pdev, DORQ_REG_PF_USAGE_CNT, 0, wait_ms);
296         DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed DQ per pf usage counter\n", pdev->flr_stats.dq_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC);
297 
298         /* Poll on QM per-pf usage-counter until its 0*/
299         pdev->flr_stats.qm_usage_counter = REG_WAIT_VERIFY_VAL(pdev, QM_REG_PF_USG_CNT_0 + 4*FUNC_ID(pdev),0, wait_ms);
300         DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed QM per pf usage counter\n", pdev->flr_stats.qm_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC);
301 
302         /* Poll on TM per-pf-usage-counter until its 0 */
303 
304         pdev->flr_stats.tm_vnic_usage_counter = REG_WAIT_VERIFY_VAL(pdev, TM_REG_LIN0_VNIC_UC + 4*PORT_ID(pdev),0, wait_ms);
305         DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed TM%d(VNIC) per pf usage counter\n",
306                     pdev->flr_stats.tm_vnic_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC, PORT_ID(pdev));
307 
308         pdev->flr_stats.tm_num_scans_usage_counter = REG_WAIT_VERIFY_VAL(pdev, TM_REG_LIN0_NUM_SCANS + 4*PORT_ID(pdev),0, wait_ms);
309         DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed TM%d(NUM_SCANS) per pf usage counter\n",
310                     pdev->flr_stats.tm_num_scans_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC, PORT_ID(pdev));
311 
312         pdev->flr_stats.dmae_cx = REG_WAIT_VERIFY_VAL(pdev, lm_dmae_idx_to_go_cmd(DMAE_WB_ACCESS_FUNCTION_CMD(FUNC_ID(pdev))), 0, wait_ms);
313         DbgMessage(pdev, FATAL, "%d*%dms waiting for zeroed DMAE_REG_GO_C%d \n",
314                     pdev->flr_stats.tm_num_scans_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC, DMAE_WB_ACCESS_FUNCTION_CMD(FUNC_ID(pdev)));
315     }
316     else
317     {
318         DbgMessage(pdev, FATAL, "lm_cleanup_after_flr VF[%d] >>>\n",ABS_VFID(pdev));
319 
320         /*
321             VF FLR only part
322         a.  Wait until there are no pending ramrods for this VFid in the PF DB. - No pending VF's pending ramrod. It's based on "FLR not during driver load/unload".
323             What about set MAC?
324 
325         b.  Send the new "L2 connection terminate" ramrod for each L2 CID that was used by the VF,
326             including sending the doorbell with the "terminate" flag. - Will be implemented in FW later
327 
328         c.  Send CFC delete ramrod on all L2 connections of that VF (set the CDU-validation field to "invalid"). - part of FW cleanup. VF_TO_PF_CID must initialized in
329             PF CID array*/
330 
331         /*  3.  Poll on the DQ per-function usage-counter until it's 0. */
332         pretend_value = ABS_FUNC_ID(pdev) | (1<<3) | (ABS_VFID(pdev) << 4);
333         lm_status = lm_pretend_func(PFDEV(pdev), pretend_value);
334         if (lm_status == LM_STATUS_SUCCESS)
335         {
336             pdev->flr_stats.dq_usage_counter = REG_WAIT_VERIFY_VAL(PFDEV(pdev), DORQ_REG_VF_USAGE_CNT, 0, wait_ms);
337             lm_pretend_func(PFDEV(pdev), ABS_FUNC_ID(pdev));
338             DbgMessage(pdev, FATAL, "%d*%dms waiting for DQ per vf usage counter\n", pdev->flr_stats.dq_usage_counter, DEFAULT_WAIT_INTERVAL_MICSEC);
339         }
340         else
341         {
342             DbgMessage(pdev, FATAL, "lm_pretend_func(%x) returns %d\n",pretend_value,lm_status);
343             DbgMessage(pdev, FATAL, "VF[%d]: could not read DORQ_REG_VF_USAGE_CNT\n", ABS_VFID(pdev));
344             return lm_status;
345         }
346     }
347 
348 /*  4.  Activate the FW cleanup process by activating AggInt in the FW with GRC. Set the bit of the relevant function in the AggInt bitmask,
349         to indicate to the FW which function is being cleaned. Wait for the per-function completion indication in the Cstorm RAM
350 */
351     function_for_clean_up = IS_VFDEV(pdev) ? FW_VFID(pdev) : FUNC_ID(pdev);
352     cleanup_complete = 0xFFFFFFFF;
353     LM_INTMEM_READ32(PFDEV(pdev),CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up),&cleanup_complete, BAR_CSTRORM_INTMEM);
354     DbgMessage(pdev, FATAL, "CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET is %x",cleanup_complete);
355     if (cleanup_complete)
356     {
357         DbgBreak();
358     }
359 
360     final_cleanup.command = (XSTORM_AGG_INT_FINAL_CLEANUP_INDEX << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM;
361     final_cleanup.command |= (XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE;
362     final_cleanup.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
363     final_cleanup.command |= (function_for_clean_up << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX;
364 
365     DbgMessage(pdev, FATAL, "Final cleanup\n");
366 
367     REG_WR(PFDEV(pdev),XSDM_REG_OPERATION_GEN, final_cleanup.command);
368     pdev->flr_stats.final_cleanup_complete = REG_WAIT_VERIFY_VAL(PFDEV(pdev), BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up), 1, wait_ms);
369     DbgMessage(pdev, FATAL, "%d*%dms waiting for final cleanup compete\n", pdev->flr_stats.final_cleanup_complete, DEFAULT_WAIT_INTERVAL_MICSEC);
370     /* Lets cleanup for next FLR final-cleanup... */
371     LM_INTMEM_WRITE32(PFDEV(pdev),CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(function_for_clean_up),0, BAR_CSTRORM_INTMEM);
372 
373 
374 /*  5.  ATC cleanup. This process will include the following steps (note that ATC will not be available for phase2 of the
375         integration and the following should be added only in phase3):
376     a.  Optionally, wait 2 ms. This is not a must. The driver can start polling (next steps) immediately,
377         but take into account that it may take time till the done indications will be set.
378     b.  Wait until INVALIDATION_DONE[function] = 1
379     c.  Write-clear INVALIDATION_DONE[function] */
380 
381 
382 /*  6.  Verify PBF cleanup. Do the following for all PBF queues (queues 0,1,4, that will be indicated below with N):
383     a.  Make sure PBF command-queue is flushed: Read pN_tq_occupancy. Let's say that the value is X.
384         This number indicates the number of occupied transmission-queue lines.
385         Poll on pN_tq_occupancy and pN_tq_lines_freed_cnt until one of the following:
386             i.  pN_tq_occupancy is 0 (queue is empty). OR
387             ii. pN_tq_lines_freed_cnt equals has advanced (cyclically) by X (all lines that were in the queue were processed). */
388 
389     for (idx = 0; idx < 3; idx++)
390     {
391         u32_t tq_to_free;
392         u32_t tq_freed_cnt_start;
393         u32_t tq_occ;
394         u32_t tq_freed_cnt_last;
395         u32_t pbf_reg_pN_tq_occupancy = 0;
396         u32_t pbf_reg_pN_tq_lines_freed_cnt = 0;
397 
398         switch (idx)
399         {
400         case 0:
401             pbf_reg_pN_tq_occupancy = (CHIP_IS_E3B0(pdev))? PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY;
402             pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT;
403             break;
404         case 1:
405             pbf_reg_pN_tq_occupancy = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY;
406             pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT;
407             break;
408         case 2:
409             pbf_reg_pN_tq_occupancy = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY;
410             pbf_reg_pN_tq_lines_freed_cnt = (CHIP_IS_E3B0(pdev)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT;
411             break;
412         }
413         pdev->flr_stats.pbf_queue[idx] = 0;
414         tq_freed_cnt_last = tq_freed_cnt_start = REG_RD(PFDEV(pdev), pbf_reg_pN_tq_lines_freed_cnt);
415         tq_occ = tq_to_free = REG_RD(PFDEV(pdev), pbf_reg_pN_tq_occupancy);
416         DbgMessage(pdev, FATAL, "TQ_OCCUPANCY[%d]      : s:%x\n", (idx == 2) ? 4 : idx, tq_to_free);
417         DbgMessage(pdev, FATAL, "TQ_LINES_FREED_CNT[%d]: s:%x\n", (idx == 2) ? 4 : idx, tq_freed_cnt_start);
418         while(tq_occ && ((u32_t)S32_SUB(tq_freed_cnt_last, tq_freed_cnt_start) < tq_to_free))
419         {
420             if (pdev->flr_stats.pbf_queue[idx]++ < wait_ms/DEFAULT_WAIT_INTERVAL_MICSEC)
421             {
422                 mm_wait(PFDEV(pdev), DEFAULT_WAIT_INTERVAL_MICSEC);
423                 tq_occ = REG_RD(PFDEV(pdev), pbf_reg_pN_tq_occupancy);
424                 tq_freed_cnt_last = REG_RD(PFDEV(pdev), pbf_reg_pN_tq_lines_freed_cnt);
425             }
426             else
427             {
428                 DbgMessage(pdev, FATAL, "TQ_OCCUPANCY[%d]      : c:%x\n", (idx == 2) ? 4 : idx, tq_occ);
429                 DbgMessage(pdev, FATAL, "TQ_LINES_FREED_CNT[%d]: c:%x\n", (idx == 2) ? 4 : idx, tq_freed_cnt_last);
430                 DbgBreak();
431                 break;
432             }
433         }
434         DbgMessage(pdev, FATAL, "%d*%dms waiting for PBF command queue[%d] is flushed\n",
435                     pdev->flr_stats.pbf_queue[idx], DEFAULT_WAIT_INTERVAL_MICSEC, (idx == 2) ? 4 : idx);
436     }
437 
438 /*  b.  Make sure PBF transmission buffer is flushed: read pN_init_crd once and keep it in variable Y.
439         Read pN_credit and keep it in X. Poll on pN_credit and pN_internal_crd_freed until one of the following:
440             i.  (Y - pN_credit) is 0 (transmission buffer is empty). OR
441             ii. pN_internal_crd_freed_cnt has advanced (cyclically) by Y-X (all transmission buffer lines that were occupied were freed).*/
442 
443     for (idx = 0; idx < 3; idx++)
444     {
445         u32_t init_crd;
446         u32_t credit_last,credit_start;
447         u32_t inernal_freed_crd_start;
448         u32_t inernal_freed_crd_last = 0;
449         u32_t pbf_reg_pN_init_crd = 0;
450         u32_t pbf_reg_pN_credit = 0;
451         u32_t pbf_reg_pN_internal_crd_freed = 0;
452         switch (idx)
453         {
454         case 0:
455             pbf_reg_pN_init_crd = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD;
456             pbf_reg_pN_credit = (CHIP_IS_E3B0(pdev)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT;
457             pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT;
458             break;
459         case 1:
460             pbf_reg_pN_init_crd = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD;
461             pbf_reg_pN_credit = (CHIP_IS_E3B0(pdev)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT;
462             pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT;
463             break;
464         case 2:
465             pbf_reg_pN_init_crd = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD;
466             pbf_reg_pN_credit = (CHIP_IS_E3B0(pdev)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT;
467             pbf_reg_pN_internal_crd_freed = (CHIP_IS_E3B0(pdev)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT;
468             break;
469         }
470         pdev->flr_stats.pbf_transmit_buffer[idx] = 0;
471         inernal_freed_crd_last = inernal_freed_crd_start = REG_RD(PFDEV(pdev), pbf_reg_pN_internal_crd_freed);
472         credit_last = credit_start = REG_RD(PFDEV(pdev), pbf_reg_pN_credit);
473         init_crd = REG_RD(PFDEV(pdev), pbf_reg_pN_init_crd);
474         DbgMessage(pdev, FATAL, "INIT CREDIT[%d]       : %x\n", (idx == 2) ? 4 : idx, init_crd);
475         DbgMessage(pdev, FATAL, "CREDIT[%d]            : s:%x\n", (idx == 2) ? 4 : idx, credit_start);
476         DbgMessage(pdev, FATAL, "INTERNAL_CRD_FREED[%d]: s:%x\n", (idx == 2) ? 4 : idx, inernal_freed_crd_start);
477         while ((credit_last != init_crd)
478                && (u32_t)S32_SUB(inernal_freed_crd_last, inernal_freed_crd_start) < (init_crd - credit_start))
479         {
480             if (pdev->flr_stats.pbf_transmit_buffer[idx]++ < wait_ms/DEFAULT_WAIT_INTERVAL_MICSEC)
481             {
482                 mm_wait(PFDEV(pdev), DEFAULT_WAIT_INTERVAL_MICSEC);
483                 credit_last = REG_RD(PFDEV(pdev), pbf_reg_pN_credit);
484                 inernal_freed_crd_last = REG_RD(PFDEV(pdev), pbf_reg_pN_internal_crd_freed);
485             }
486             else
487             {
488                 DbgMessage(pdev, FATAL, "CREDIT[%d]            : c:%x\n", (idx == 2) ? 4 : idx, credit_last);
489                 DbgMessage(pdev, FATAL, "INTERNAL_CRD_FREED[%d]: c:%x\n", (idx == 2) ? 4 : idx, inernal_freed_crd_last);
490                 DbgBreak();
491                 break;
492             }
493         }
494         DbgMessage(pdev, FATAL, "%d*%dms waiting for PBF transmission buffer[%d] is flushed\n",
495                     pdev->flr_stats.pbf_transmit_buffer[idx], DEFAULT_WAIT_INTERVAL_MICSEC, (idx == 2) ? 4 : idx);
496     }
497 
498 /*  7.  Wait for 100ms in order to make sure that the chip is clean, including all PCI related paths
499         (in Emulation the driver can wait for 10ms*EmulationFactor, i.e.: 20s). This is especially required if FW doesn't implement
500         the flows in Optional Operations (future enhancements).) */
501     mm_wait(pdev, 10000*factor);
502 
503 /*  8.  Verify that the transaction-pending bit of each of the function in the Device Status Register in the PCIe is cleared. */
504 
505 #if defined(__LINUX) || defined(_VBD_)
506     pcie_caps_offset = mm_get_cap_offset(pdev, PCI_CAP_PCIE);
507     if (pcie_caps_offset != 0 && pcie_caps_offset != 0xFFFFFFFF)
508     {
509         u32_t dev_control_and_status = 0xFFFFFFFF;
510         mm_read_pci(pdev, pcie_caps_offset + PCIE_DEV_CTRL, &dev_control_and_status);
511         DbgMessage(pdev, FATAL, "Device Control&Status of PCIe caps is %x\n",dev_control_and_status);
512         if (dev_control_and_status & (PCIE_DEV_STATUS_PENDING_TRANSACTION << 16))
513         {
514             DbgBreak();
515         }
516     }
517 #else
518     DbgMessage(pdev, FATAL, "Function mm_get_cap_offset is not implemented yet\n");
519     DbgBreak();
520 #endif
521 /*  9.  Initialize the function as usual this should include also re-enabling the function in all the HW blocks and Storms that
522     were disabled by the MCP and cleaning relevant per-function information in the chip (internal RAM related information, IGU memory etc.).
523         a.  In case of VF, PF resources that were allocated for previous VF can be re-used by the new VF. If there are resources
524             that are not needed by the new VF then they should be cleared.
525         b.  Note that as long as slow-path prod/cons update to Xstorm is not atomic, they must be cleared by the driver before setting
526             the function to "enable" in the Xstorm.
527         c.  Don't forget to enable the VF in the PXP or the DMA operation for PF in the PXP. */
528 
529     if (IS_PFDEV(pdev))
530     {
531         u32_t m_en;
532         u32_t tmp = 0;
533 
534         tmp = REG_RD(pdev,CFC_REG_WEAK_ENABLE_PF);
535         DbgMessage(pdev, FATAL, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n",tmp);
536 
537         tmp = REG_RD(pdev,PBF_REG_DISABLE_PF);
538         DbgMessage(pdev, FATAL, "PBF_REG_DISABLE_PF is 0x%x\n",tmp);
539 
540         tmp = REG_RD(pdev,IGU_REG_PCI_PF_MSI_EN);
541         DbgMessage(pdev, FATAL, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n",tmp);
542 
543         tmp = REG_RD(pdev,IGU_REG_PCI_PF_MSIX_EN);
544         DbgMessage(pdev, FATAL, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n",tmp);
545 
546         tmp = REG_RD(pdev,IGU_REG_PCI_PF_MSIX_FUNC_MASK);
547         DbgMessage(pdev, FATAL, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n",tmp);
548 
549         tmp = REG_RD(pdev,PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
550         DbgMessage(pdev, FATAL, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n",tmp);
551 
552         tmp = REG_RD(pdev,PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
553         DbgMessage(pdev, FATAL, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n",tmp);
554 
555         REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
556         mm_wait(pdev,999999);
557 
558         m_en = REG_RD(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
559         DbgMessage(pdev, FATAL, "M:0x%x\n",m_en);
560     }
561 
562     if (IS_VFDEV(pdev))
563     {
564 #ifdef VF_INVOLVED
565         //lm_vf_enable_vf(pdev);
566         lm_status = lm_vf_recycle_resc_in_pf(pdev);
567         lm_set_con_state(pdev, LM_SW_LEADING_RSS_CID(pdev), LM_CON_STATE_CLOSE);
568 #endif
569     }
570 
571     lm_fl_reset_clear_inprogress(pdev);
572 
573     return lm_status;
574 }
575 
576 #define LM_GRC_TIMEOUT_MAX_IGNORE ARRSIZE(g_lm_chip_global[0].grc_timeout_val)
577 
578 
579 
lm_inc_cnt_grc_timeout_ignore(struct _lm_device_t * pdev,u32_t val)580 u32_t lm_inc_cnt_grc_timeout_ignore(struct _lm_device_t *pdev, u32_t val)
581 {
582     const        u8_t bus_num  = INST_ID_TO_BUS_NUM(PFDEV(pdev)->vars.inst_id) ;
583     static const u8_t arr_size = ARRSIZE(g_lm_chip_global[0].grc_timeout_val);
584     const        u8_t idx      = g_lm_chip_global[bus_num].cnt_grc_timeout_ignored % arr_size ;
585 
586     g_lm_chip_global[bus_num].grc_timeout_val[idx] = val;
587 
588     return ++g_lm_chip_global[bus_num].cnt_grc_timeout_ignored;
589 }
590 
ecore_gunzip(struct _lm_device_t * pdev,const u8 * zbuf,int len)591 static int ecore_gunzip(struct _lm_device_t *pdev, const u8 *zbuf, int len)
592 {
593     /* TODO : Implement... */
594     UNREFERENCED_PARAMETER_(pdev);
595     UNREFERENCED_PARAMETER_(zbuf);
596     UNREFERENCED_PARAMETER_(len);
597     DbgBreakMsg("ECORE_GUNZIP NOT IMPLEMENTED\n");
598     return FALSE;
599 }
600 
ecore_reg_wr_ind(struct _lm_device_t * pdev,u32 addr,u32 val)601 static void ecore_reg_wr_ind(struct _lm_device_t *pdev, u32 addr, u32 val)
602 {
603     lm_reg_wr_ind(pdev, addr, val);
604 }
605 
ecore_write_dmae_phys_len(struct _lm_device_t * pdev,lm_address_t phys_addr,u32 addr,u32 len)606 static void ecore_write_dmae_phys_len(struct _lm_device_t *pdev,
607                       lm_address_t phys_addr, u32 addr,
608                       u32 len)
609 {
610     lm_dmae_reg_wr_phys(pdev, lm_dmae_get(pdev, LM_DMAE_DEFAULT)->context,
611                 phys_addr, addr, (u16_t)len);
612 }
613 
614 //The bug is that the RBC doesn't get out of reset after we reset the RBC.
rbc_reset_workaround(lm_device_t * pdev)615 static void rbc_reset_workaround(lm_device_t *pdev)
616 {
617     u32_t val = 0;
618 #if defined(_VBD_CMD_) //This function is not needed in vbd_cmd env.
619     return;
620 #endif
621 
622     if (CHIP_IS_E1x(pdev))
623     {
624         //a.Wait 60 microseconds only for verifying the ~64 cycles have passed.
625         mm_wait(pdev, (DEFAULT_WAIT_INTERVAL_MICSEC *2));
626 
627         val = REG_RD(pdev,MISC_REG_RESET_REG_1) ;
628         if(0 == (val & MISC_REGISTERS_RESET_REG_1_RST_RBCP))
629         {
630             //If bit 28 is '0' - This means RBCP block is in reset.(one out of reset)
631             // Take RBC out of reset.
632             REG_WR(pdev,(GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET),MISC_REGISTERS_RESET_REG_1_RST_RBCP);
633 
634             mm_wait(pdev, (DEFAULT_WAIT_INTERVAL_MICSEC *2));
635 
636             val = REG_RD(pdev,MISC_REG_RESET_REG_1) ;
637 
638             DbgMessage(pdev, WARN, "rbc_reset_workaround: MISC_REG_RESET_REG_1 after set= 0x%x\n",val);
639             DbgBreakIf(0 == (val & MISC_REGISTERS_RESET_REG_1_RST_RBCP));
640         }
641     }
642 }
643 
644 
lm_set_nig_reset_called(struct _lm_device_t * pdev)645 void lm_set_nig_reset_called(struct _lm_device_t *pdev)
646 {
647     const u8_t bus_num = INST_ID_TO_BUS_NUM(PFDEV(pdev)->vars.inst_id) ;
648     const u8_t flags   = LM_CHIP_GLOBAL_FLAG_NIG_RESET_CALLED;
649 
650     SET_FLAGS( g_lm_chip_global[bus_num].flags, flags) ;
651 }
652 
lm_clear_nig_reset_called(struct _lm_device_t * pdev)653 void lm_clear_nig_reset_called(struct _lm_device_t *pdev)
654 {
655     const u8_t bus_num = INST_ID_TO_BUS_NUM(PFDEV(pdev)->vars.inst_id) ;
656     const u8_t flags   = LM_CHIP_GLOBAL_FLAG_NIG_RESET_CALLED;
657 
658     RESET_FLAGS( g_lm_chip_global[bus_num].flags, flags) ;
659 }
660 
lm_is_nig_reset_called(struct _lm_device_t * pdev)661 u8_t lm_is_nig_reset_called(struct _lm_device_t *pdev)
662 {
663     const u8_t bus_num = INST_ID_TO_BUS_NUM(PFDEV(pdev)->vars.inst_id) ;
664     const u8_t flags   = LM_CHIP_GLOBAL_FLAG_NIG_RESET_CALLED;
665 
666     return ( 0 != GET_FLAGS( g_lm_chip_global[bus_num].flags, flags ) );
667 }
668 
669 /* This function reset a path (e2) or a chip (e1/e1.5)
670  * includeing or excluding the nig (b_with_nig)
671  */
lm_reset_path(IN struct _lm_device_t * pdev,IN const u8_t b_with_nig)672 void lm_reset_path( IN struct _lm_device_t *pdev,
673                     IN const  u8_t          b_with_nig )
674 {
675     const u32_t reg_1_clear     = b_with_nig ? 0xd3ffffff : 0xd3ffff7f ;
676     u32_t       reg_2_clear     = 0x1400;
677     u32_t       idx             = 0;
678     u32_t       val             = 0;
679     u32_t       offset          = 0;
680     u32_t       wait_cnt        = 5;
681 
682     // set of registers to be saved/restored before/after nig reset
683     static const u32_t reg_arr_e3[]    = { NIG_REG_P0_MAC_IN_EN,
684                                            NIG_REG_P1_MAC_IN_EN };
685 
686     static const u32_t reg_arr_e1_e2[] = { NIG_REG_EMAC0_IN_EN,
687                                            NIG_REG_EMAC1_IN_EN,
688                                            NIG_REG_BMAC0_IN_EN,
689                                            NIG_REG_BMAC1_IN_EN };
690 
691     static const u32_t reg_arr_ftq[]   = { NIG_REG_EGRESS_MNG0_FIFO_EMPTY,
692                                            NIG_REG_EGRESS_MNG1_FIFO_EMPTY,
693                                            NIG_REG_INGRESS_RMP0_DSCR_EMPTY,
694                                            NIG_REG_INGRESS_RMP1_DSCR_EMPTY};
695 
696     static const u32_t ftq_mask        = ( 1 << ARRSIZE(reg_arr_ftq) ) - 1 ; // we need all regs to be 1...
697 
698     // save values of registers
699     u32_t        restore_arr[max(ARRSIZE(reg_arr_e1_e2),ARRSIZE(reg_arr_e3))]  = {0};
700 
701     const u8_t   idx_max     = CHIP_IS_E3(pdev) ? ARRSIZE(reg_arr_e3) : ARRSIZE(reg_arr_e1_e2) ;
702     const u32_t* reg_arr_ptr = CHIP_IS_E3(pdev) ? reg_arr_e3 : reg_arr_e1_e2 ;
703 
704     DbgMessage(pdev, WARN, "lm_reset_path:%sreset [begin]\n", b_with_nig ? " (with NIG) " : " ");
705 
706     if( b_with_nig )
707     {
708         // Ugly patch - we need to prevent nig reset - to be fixed SOON (TODO T7.2?)
709         // We don't care port0/port1 the registers will always exist
710 
711         // save values + write zeros
712         for( idx = 0; idx < idx_max; idx++ )
713         {
714             restore_arr[idx] = REG_RD( pdev, reg_arr_ptr[idx] );
715             REG_WR( pdev, reg_arr_ptr[idx], 0 );
716         }
717 
718         // wait 200 msec before we reset the nig so all packets will pass thorugh
719         // 200000 and not 50*4000 since we want this wait to be "only" 200000ms
720         // when we used 50*4000 method, the actual sleep time was much higher (more than 16 seconds...!)
721         // this caused hw lock timeout (16sec) in lm_reset_device_if_undi_active() funciton.
722         do
723         {
724             val = 0;
725 
726             // first 200000ms we always wait...
727             mm_wait( pdev, 200000 );
728 
729             // check values of FTQ and verify they are all one
730             // if not wait 200000ms up to 5 times...(1 second)
731             for( idx = 0; idx < ARRSIZE(reg_arr_ftq); idx++ )
732             {
733                 offset = reg_arr_ftq[idx];
734                 val |= ( REG_RD( pdev, offset ) ) << idx ;
735             }
736         } while( wait_cnt-- && ( ftq_mask != val ) );
737 
738         // Debug break only if MCP is detected (NVM is not empty)
739         if (lm_is_mcp_detected(pdev))
740         {
741             DbgBreakIf( ftq_mask != val );
742         }
743     }
744 
745     /* reset device */
746     REG_WR(pdev, GRCBASE_MISC+ MISC_REGISTERS_RESET_REG_1_CLEAR, reg_1_clear );
747 
748     if (CHIP_IS_E3(pdev))
749     {
750         // New blocks that need to be taken out of reset
751         // Mstat0 - bit 24 of RESET_REG_2
752         // Mstat1 - bit 25 of RESET_REG_2
753         reg_2_clear |= (MISC_REGISTERS_RESET_REG_2_MSTAT1 | MISC_REGISTERS_RESET_REG_2_MSTAT0);
754     }
755 
756     REG_WR(pdev, GRCBASE_MISC+ MISC_REGISTERS_RESET_REG_2_CLEAR, reg_2_clear);
757 
758     if( b_with_nig  )
759     {
760         lm_set_nig_reset_called(pdev);
761         /* take the NIG out of reset */
762         REG_WR(pdev, GRCBASE_MISC+ MISC_REGISTERS_RESET_REG_1_SET, MISC_REGISTERS_RESET_REG_1_RST_NIG);
763 
764         // restore....
765         for( idx = 0; idx < idx_max; idx++ )
766         {
767             REG_WR( pdev, reg_arr_ptr[idx], restore_arr[idx] );
768         }
769     }
770 
771     pdev->vars.b_is_dmae_ready = FALSE;
772 
773     DbgMessage(pdev, WARN, "lm_reset_path:%sreset [end]\n", b_with_nig ? " (with NIG) ": " ");
774 
775     // rbc_reset_workaround() should be called AFTER nig is out of reset
776     // otherwise the probability that nig will be accessed by bootcode while
777     // it is in reset is very high (this will cause GRC_TIMEOUT)
778 
779     // TODO - we still need to deal with CQ45947 (calling rbc_reset_workaround before nig is out of reset will
780     //        cause the grc_timeout to happen
781     DbgMessage(pdev, WARN, "lm_reset_path:%sreset rbcp wait [begin]\n", b_with_nig ? " (with NIG) ": " ");
782     rbc_reset_workaround(pdev);
783     DbgMessage(pdev, WARN, "lm_reset_path:%sreset rbcp wait [end]\n", b_with_nig ? " (with NIG) ": " ");
784 }
785 
786 /*
787  * quote from bnx2x:
788  *
789  * "previous driver DMAE transaction may have occurred when pre-boot stage ended
790  * and boot began, or when kdump kernel was loaded. Either case would invalidate
791  * the addresses of the transaction, resulting in was-error bit set in the pci
792  * causing all hw-to-host pcie transactions to timeout. If this happened we want
793  * to clear the interrupt which detected this from the pglueb and the was done
794  * bit"
795  */
796 
lm_reset_prev_interrupted_dmae(struct _lm_device_t * pdev)797 static void lm_reset_prev_interrupted_dmae(struct _lm_device_t *pdev)
798 {
799     u32_t val = 0;
800 
801     if ( CHIP_IS_E1x(pdev) )
802     {
803         // the register below doesn't exists in E1/E1.5 and will cause RBCN attention in
804         // case accessed, so we do nothing in case chip is earlier than E2 (CQ63388, CQ63302).
805         return;
806     }
807 
808     val = REG_RD(pdev, PGLUE_B_REG_PGLUE_B_INT_STS);
809 
810     if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
811     {
812         DbgMessage(pdev, WARNi, "lm_reset_prev_interrupted_dmae: was error bit was found to be set in pglueb upon startup. Clearing");
813         REG_WR(pdev, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << FUNC_ID(pdev));
814     }
815 }
816 
817 // return TRUE if function is hidden
lm_reset_device_if_undi_func_hide_helper(struct _lm_device_t * pdev,const u32_t chip_id,const u8_t path_id,const u8_t port,const u8_t vnic,const u8_t port_factor,const lm_chip_port_mode_t port_mode)818 static u8_t lm_reset_device_if_undi_func_hide_helper( struct _lm_device_t       *pdev,
819                                                       const  u32_t               chip_id,
820                                                       const  u8_t                path_id,
821                                                       const  u8_t                port,
822                                                       const  u8_t                vnic,
823                                                       const  u8_t                port_factor,
824                                                       const  lm_chip_port_mode_t port_mode )
825 {
826     u8_t  b_hidden       = FALSE;
827     u8_t  func_config_id = 0;
828     u32_t offset         = 0;
829     u32_t mf_config      = 0;
830 
831     // Macros taken from MFW .h files to have a better and correct use of the function/port matrix.
832     #define E2_2P_PF_NUM(path, port, pf)            (((pf) << 1) | (path))                  /* pf: 0..3     ==> pf_num: 0..7 */
833     #define E2_4P_PF_NUM(path, port, pf)            (((pf) << 2) | ((port) << 1) | (path))  /* pf: 0..1     ==> pf_num: 0..7 */
834     #define E2_PF_NUM(path, port, pf)               ((port_mode == LM_CHIP_PORT_MODE_4) ? E2_4P_PF_NUM(path, port, pf) : E2_2P_PF_NUM(path, port, pf))
835 
836      if( CHIP_IS_E1_PARAM(chip_id) )
837      {
838          DbgBreakMsg("We should not reach this line\n");
839          return b_hidden;
840      }
841 
842      if( CHIP_IS_E1x_PARAM(chip_id) )
843      {
844          func_config_id = ( port_factor * vnic ) + port;
845      }
846      else
847      {
848          func_config_id = E2_PF_NUM( path_id , port, vnic );
849      }
850 
851      offset = OFFSETOF(mf_cfg_t, func_mf_config[func_config_id].config);
852      LM_MFCFG_READ(pdev, offset, &mf_config);
853 
854      if( mf_config & FUNC_MF_CFG_FUNC_HIDE )
855      {
856          b_hidden = TRUE;
857      }
858 
859      return b_hidden;
860 }
861 
lm_reset_device_if_undi_active(struct _lm_device_t * pdev)862 void lm_reset_device_if_undi_active(struct _lm_device_t *pdev)
863 {
864     u32_t                         val                                 = 0;
865     u8_t                          vnic                                = 0;
866     u8_t                          port                                = 0;
867     u8_t                          opcode_idx                          = 0; // 0 = load, 1 = unload
868     lm_loader_response            resp                                = 0;
869     u32_t                         swap_val                            = 0;
870     u32_t                         swap_en                             = 0;
871     u32_t                         rst_dorq_val                        = 0;
872     u8_t                          port_max                            = 0;
873     u8_t                          b_hidden                            = FALSE;
874     u8_t                          b_first_non_hidden_iter             = TRUE;
875     u8_t                          last_valid_vnic                     = 0;
876     static const u32_t            param_loader                        = DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET;
877     static const u32_t            UNDI_ACTIVE_INDICATION_VAL          = 7;
878     static const lm_loader_opcode opcode_arr[]                        = {LM_LOADER_OPCODE_LOAD, LM_LOADER_OPCODE_UNLOAD_WOL_DIS} ;
879     const lm_chip_port_mode_t     port_mode                           = CHIP_PORT_MODE(pdev);
880     u8_t                          port_factor                         = 0;
881     u8_t                          vnics_per_port                      = 0;
882     const u8_t                    func_mb_id                          = FUNC_MAILBOX_ID(pdev); // Store original pdev func mb id
883     const u8_t                    path_id                             = PATH_ID(pdev);
884     static const u32_t            misc_registers_reset_reg_1_rst_dorq = MISC_REGISTERS_RESET_REG_1_RST_DORQ;
885 
886     /*
887      * Clear possible previously interrupted DMAE which may have left PCI inaccessible.
888      */
889 
890     lm_reset_prev_interrupted_dmae(pdev);
891 
892     /*
893     * Check if device is active and was previously initialized by
894     * UNDI driver.  UNDI driver initializes CID offset for normal bell
895     * to 0x7.
896     */
897 
898     if( LM_STATUS_SUCCESS == lm_hw_lock(pdev, HW_LOCK_RESOURCE_RESET, TRUE) )
899     {
900         rst_dorq_val = REG_RD(pdev,MISC_REG_RESET_REG_1);
901 
902         // dorq is out of reset
903         if( rst_dorq_val & misc_registers_reset_reg_1_rst_dorq )
904         {
905             val = REG_RD(pdev,DORQ_REG_NORM_CID_OFST);
906         }
907 
908         DbgMessage(pdev, WARN, "lm_reset_device_if_undi_active: DORQ_REG_NORM_CID_OFST val = 0x%x\n",val);
909 
910         if( UNDI_ACTIVE_INDICATION_VAL == val )
911         {
912             REG_WR( pdev, DORQ_REG_NORM_CID_OFST ,0 );
913         }
914         else
915         {
916             // We call here with FALSE since there might be a race (only here)
917             // that lm_hw_clear_all_locks() will clear the lock altough it is acquired
918             // and than we get ASSERT in checked builds.
919             // so this FALSE here is only to prevent ASSERT on checked builds when ER enabled (CQ60944).
920             lm_hw_unlock_ex(pdev, HW_LOCK_RESOURCE_RESET, FALSE );
921 
922             // undi is not active, nothing to do.
923             return;
924         }
925     }
926     else
927     {
928         // lock is already taken by other func we have nothing to do though this is NOT acceptable we get here...
929         return;
930     }
931 
932     DbgMessage(pdev, WARN, "lm_reset_device_if_undi_active: UNDI is active! need to reset device\n");
933 
934     if (GET_FLAGS( pdev->params.test_mode, TEST_MODE_NO_MCP))
935     {
936         /* TBD: E1H - when MCP is not present, determine if possible to get here */
937         DbgBreakMsg("lm_reset_device_if_undi_active: reading from shmem when MCP is not present\n");
938     }
939 
940     switch( port_mode )
941     {
942     case LM_CHIP_PORT_MODE_NONE: // E1.0/E1.5: we enter this if() one time  - for one of the functions, and and mailbox func numbers are 0 and 1
943     case LM_CHIP_PORT_MODE_4:    // E2
944         port_max       = PORT_MAX;
945         port_factor    = (LM_CHIP_PORT_MODE_4 == port_mode) ? 4 : 2;
946         vnics_per_port = (LM_CHIP_PORT_MODE_4 == port_mode )? 2 : pdev->params.vnics_per_port; // for 4-port it is always 2. for others its upon param
947         break;
948 
949     case LM_CHIP_PORT_MODE_2:
950         port_max       = 1; // E2: we enter this if() maximum twice - once for each path, and mailbox func number is 0 for both times
951         port_factor    = 2;
952         vnics_per_port = pdev->params.vnics_per_port;; // Always symetric in case not 4 port mode.
953         break;
954 
955     default:
956         DbgBreakMsg("we should not reach this line!");
957         break;
958     }
959 
960     ASSERT_STATIC( 2 == ARRSIZE(opcode_arr) );
961     DbgBreakIf( LM_LOADER_OPCODE_LOAD != opcode_arr[0] );
962     DbgBreakIf( LM_LOADER_OPCODE_LOAD == opcode_arr[1] );
963 
964     // We do here two opcode iterations, each one of them for all ports...
965     // 1. first iteration(s) will "tell" the mcp that all ports are loaded (MCP accepts LOAD requests for ports that are already loaded.)
966     //    This way we cann assure that driver is the "owner" of the hardware (includes NIG)
967     //    So we can reset the nig.
968     //
969     // 2. second iteration(s) will "tell" the mcp that all ports are unloaded so we can "come clean" for regular driver load flow
970     for( opcode_idx = 0; opcode_idx < ARRSIZE(opcode_arr); opcode_idx++ )
971     {
972         for( port = 0; port < port_max; port++ )
973         {
974             b_first_non_hidden_iter = TRUE;
975 
976             // Check what is the last valid vnic (non hidden one)
977             for( vnic = 0; vnic < vnics_per_port; vnic++ )
978             {
979                 if( CHIP_IS_E1(pdev) )
980                 {
981                     // we don't have func_mf_config in E1. To prevent invalid access to shmem - break.
982                     last_valid_vnic = 0;
983                     break;
984                 }
985 
986                 b_hidden = lm_reset_device_if_undi_func_hide_helper( pdev,
987                                                                      CHIP_NUM(pdev),
988                                                                      path_id,
989                                                                      port,
990                                                                      vnic,
991                                                                      port_factor,
992                                                                      port_mode );
993 
994                 if( !b_hidden )
995                 {
996                     last_valid_vnic = vnic; // we save "last_valid_vnic" for later use in reset loop
997                                             // this is the reason we make this loop twice (here and below)
998                 }
999             }
1000 
1001             for( vnic = 0; vnic <= last_valid_vnic; vnic++ )
1002             {
1003                 // NOTE: it seems that these two line are redundant after we have the new FUNC_MAILBOX_ID macro
1004                 //       keep it for now
1005                 pdev->params.pfunc_mb_id = FUNC_MAILBOX_ID_PARAM( port, vnic, CHIP_NUM(pdev), port_mode );
1006 
1007                 if( !CHIP_IS_E1(pdev) )
1008                 {
1009                     b_hidden = lm_reset_device_if_undi_func_hide_helper( pdev,
1010                                                                          CHIP_NUM(pdev),
1011                                                                          path_id,
1012                                                                          port,
1013                                                                          vnic,
1014                                                                          port_factor,
1015                                                                          port_mode );
1016 
1017                     if( b_hidden )
1018                     {
1019                         continue;
1020                     }
1021                 }
1022 
1023                 // get fw_wr_seq for the func
1024                 lm_mcp_cmd_init(pdev);
1025 
1026                 resp = lm_loader_lock(pdev, opcode_arr[opcode_idx] );
1027 
1028                 if( LM_LOADER_RESPONSE_UNLOAD_COMMON == resp )
1029                 {
1030                     DbgBreakIf( LM_LOADER_OPCODE_LOAD == opcode_arr[opcode_idx] );
1031                 }
1032 
1033                 if ( LM_LOADER_OPCODE_LOAD == opcode_arr[opcode_idx] )
1034                 {
1035                     // clean HC config (only if exists  E1.0/E1.5)
1036                     // INTR_BLK_TYPE is not valid since we don't have this information at this phase yet.
1037                     if ( CHIP_IS_E1x(pdev) )
1038                     {
1039                         if( b_first_non_hidden_iter ) // This might be redundent but since before BCV change this code was running once per port we keep it as it is
1040                         {
1041                             REG_WR(pdev,HC_REG_CONFIG_0+(4*port),0x1000);
1042                         }
1043                     }
1044 
1045                     if( b_first_non_hidden_iter ) // per port no need to run more than once
1046                     {
1047                         // mask AEU signal
1048                         REG_WR(pdev,MISC_REG_AEU_MASK_ATTN_FUNC_0+(4*port),0);
1049                         b_first_non_hidden_iter = FALSE;
1050                     }
1051 
1052                     if( last_valid_vnic == vnic )
1053                     {
1054                          // TODO: Reset take into account mstat - dealed better in main branch where reset chip issue is tidier,
1055                          // leaving this for integrate...
1056 
1057                         // save nig swap register before NIG reset
1058                         swap_val = REG_RD(pdev,NIG_REG_PORT_SWAP);
1059                         swap_en  = REG_RD(pdev,NIG_REG_STRAP_OVERRIDE);
1060 
1061                         // reset the chip with nig
1062                         lm_reset_path( pdev, TRUE );
1063 
1064                         // restore nig swap register
1065                         REG_WR(pdev,NIG_REG_PORT_SWAP,swap_val);
1066                         REG_WR(pdev,NIG_REG_STRAP_OVERRIDE,swap_en);
1067                     }// nig reset
1068                 }
1069                 lm_loader_unlock(pdev, opcode_arr[opcode_idx], &param_loader ) ;
1070             } // vnic loop
1071         } // port loop
1072     } // opcode loop
1073 
1074     // We expect that last reposne will be LM_LOADER_RESPONSE_UNLOAD_COMMON
1075     if( LM_LOADER_RESPONSE_UNLOAD_COMMON != resp )
1076     {
1077         DbgBreakIf( LM_LOADER_RESPONSE_UNLOAD_COMMON != resp );
1078     }
1079 
1080     // restore original function number
1081     pdev->params.pfunc_mb_id = func_mb_id;
1082 
1083     lm_hw_unlock(pdev, HW_LOCK_RESOURCE_RESET);
1084 
1085     // after the unlock the chip/path is in reset for sure, then second port won't see 7 in the DORQ_REG_NORM_CID_OFST
1086 
1087 } // lm_reset_device_if_undi_active
1088 
1089 /**lm_disable_function_in_nig
1090  * Configure the NIG LLH so that packets targeting the given PF
1091  * are marked as "classification failed".
1092  * This function must be called before sending the FUNCTION_STOP
1093  * ramrod.
1094  *
1095  * @param pdev the PF to disable.
1096  *
1097  * @return lm_status_t LM_STATUS_SUCCESS on success, some other
1098  *         failure value on failure.
1099  */
lm_disable_function_in_nig(struct _lm_device_t * pdev)1100 lm_status_t lm_disable_function_in_nig(struct _lm_device_t *pdev)
1101 {
1102     lm_status_t lm_status   = LM_STATUS_SUCCESS;
1103     u32_t nig_entry_idx     = 0;
1104     const u32_t MAX_OFFSET_IN_NIG_MEM1      = 8;
1105     const u32_t MAX_OFFSET_IN_NIG_MEM2      = MAX_MAC_OFFSET_IN_NIG - MAX_OFFSET_IN_NIG_MEM1;
1106     const u32_t nig_mem_enable_base_offset  = (PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : NIG_REG_LLH0_FUNC_MEM_ENABLE);
1107     const u32_t nig_mem2_enable_base_offset = (PORT_ID(pdev) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE : NIG_REG_P0_LLH_FUNC_MEM2_ENABLE);
1108 
1109     if (!IS_MULTI_VNIC(pdev))
1110     {
1111         DbgBreakIf(!IS_MULTI_VNIC(pdev));
1112         return LM_STATUS_SUCCESS;
1113     }
1114 
1115     if (IS_MF_SD_MODE(pdev))
1116     {
1117         /* for SD mode, clear NIG_REG_LLH1_FUNC_EN */
1118         REG_WR(pdev, (PORT_ID(pdev) ? NIG_REG_LLH1_FUNC_EN : NIG_REG_LLH0_FUNC_EN), 0);
1119         lm_set_func_en(pdev, FALSE); /* if function should be enabled it will be set when wol is configured */
1120     }
1121     else if (IS_MF_SI_MODE(pdev) || IS_MF_AFEX_MODE(pdev))
1122     {
1123     /*for NPAR/NPAR-SD mode, clear every NIG LLH entry by clearing NIG_REG_LLH1_FUNC_MEM_ENABLE for every entry in both
1124      NIG mem1 and mem2.*/
1125         for (nig_entry_idx = 0; nig_entry_idx < MAX_OFFSET_IN_NIG_MEM1; ++nig_entry_idx)
1126         {
1127             REG_WR(pdev, nig_mem_enable_base_offset + nig_entry_idx*sizeof(u32_t), 0);
1128         }
1129         for (nig_entry_idx = 0; nig_entry_idx < MAX_OFFSET_IN_NIG_MEM2; ++nig_entry_idx)
1130         {
1131             REG_WR(pdev, nig_mem2_enable_base_offset + nig_entry_idx*sizeof(u32_t), 0);
1132         }
1133     }
1134     else
1135     {
1136         DbgBreakMsg("Invalid MF mode.");
1137     }
1138 
1139     return lm_status;
1140 }
1141 
1142 /**
1143  * This function sends the function-stop ramrod and waits
1144  * synchroniously for its completion
1145  *
1146  * @param pdev
1147  *
1148  * @return lm_status_t SUCCESS / TIMEOUT on waiting for
1149  *         completion
1150  */
lm_function_stop(struct _lm_device_t * pdev)1151 lm_status_t lm_function_stop(struct _lm_device_t *pdev)
1152 {
1153 
1154     lm_status_t lm_status = LM_STATUS_SUCCESS;
1155 
1156 
1157     DbgMessage(pdev, INFORMeq|INFORMl2sp, "#lm_function_stop\n");
1158 
1159 
1160     pdev->eq_info.function_state = FUNCTION_STOP_POSTED;
1161 
1162     lm_status = lm_sq_post(pdev,
1163                            0,
1164                            RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
1165                            CMD_PRIORITY_NORMAL,
1166                            NONE_CONNECTION_TYPE,
1167                            0 );
1168 
1169     if (lm_status != LM_STATUS_SUCCESS)
1170     {
1171         return lm_status;
1172     }
1173 
1174     lm_status = lm_wait_state_change(pdev, &pdev->eq_info.function_state, FUNCTION_STOP_COMPLETED);
1175 
1176     return lm_status;
1177 } /* lm_function_stop */
1178 
lm_chip_stop(struct _lm_device_t * pdev)1179 lm_status_t lm_chip_stop(struct _lm_device_t *pdev)
1180 {
1181     lm_status_t lm_status = LM_STATUS_SUCCESS;
1182     const u32_t fwd_cid   = FWD_CID(pdev);
1183 
1184 #ifdef VF_INVOLVED
1185     if (IS_VFDEV(pdev))
1186     {
1187         return lm_status;
1188     }
1189 #endif
1190     if (lm_fl_reset_is_inprogress(pdev))
1191     {
1192         lm_set_con_state(pdev, fwd_cid, LM_CON_STATE_CLOSE);
1193         DbgMessage(pdev, WARN, "lm_chip_stop: Under FLR: \"close\" leading and FWD conns.\n");
1194         return LM_STATUS_SUCCESS;
1195     }
1196     if ((lm_status = lm_close_forward_con(pdev)) != LM_STATUS_SUCCESS)
1197     {
1198         DbgMessage(pdev, FATAL, "lm_chip_stop: ERROR closing FWD connection!!!\n");
1199     }
1200 
1201     if (pdev->params.multi_vnics_mode)
1202     {
1203         lm_disable_function_in_nig(pdev);
1204     }
1205 
1206     lm_status = lm_function_stop(pdev);
1207 
1208     if ((lm_status != LM_STATUS_SUCCESS) && (lm_status != LM_STATUS_ABORTED))
1209     {
1210         DbgMessage(pdev, FATAL, "lm_chip_stop: ERROR closing function!!!\n");
1211         DbgBreak();
1212     }
1213 
1214     /* Function stop has been sent, we should now block slowpath commands  */
1215     lm_sq_change_state(pdev, SQ_STATE_BLOCKED);
1216 
1217     return lm_status;
1218 }
1219 
1220 /* This function clears the pf enable bit in the pglue-b and cfc, to make sure that if any requests
1221  * are made on this function they will be dropped before they can cause any fatal errors. */
clear_pf_enable(lm_device_t * pdev)1222 static void clear_pf_enable(lm_device_t *pdev)
1223 {
1224     REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
1225     REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
1226     //REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
1227 }
1228 
uninit_pxp2_blk(lm_device_t * pdev)1229 static void uninit_pxp2_blk(lm_device_t *pdev)
1230 {
1231     u32_t rq_onchip_at_reg, on_chip_addr2_val;
1232     u32_t k, temp;
1233 
1234     if(ERR_IF(!pdev))
1235     {
1236         return;
1237     }
1238 
1239 
1240     /* clean ILT table
1241      * before doing that we must promise that all the ILT clients (CDU/TM/QM/SRC) of the
1242      * disabled function are not going to access the table anymore:
1243      * - TM: already disabled in "reset function part"
1244      * - CDU/QM: all L2/L4/L5 connections are already closed
1245      * - SRC: In order to make sure SRC request is not initiated:
1246      *    - in MF mode, we clean the ILT table in the per func phase, after LLH was already disabled
1247      *    - in SF mode, we clean the ILT table in the per port phase, after port link was already reset */
1248 
1249     temp              = FUNC_ID(pdev) * ILT_NUM_PAGE_ENTRIES_PER_FUNC;
1250     rq_onchip_at_reg  = CHIP_IS_E1(pdev) ? PXP2_REG_RQ_ONCHIP_AT : PXP2_REG_RQ_ONCHIP_AT_B0;
1251     on_chip_addr2_val = CHIP_IS_E1x(pdev)? 0 : ONCHIP_ADDR0_VALID();
1252 
1253     for (k=0;k<ILT_NUM_PAGE_ENTRIES_PER_FUNC;temp++,k++)
1254     {
1255         REG_WR_IND(pdev,rq_onchip_at_reg+temp*8,0);
1256         REG_WR_IND(pdev,rq_onchip_at_reg+temp*8+4,on_chip_addr2_val);
1257     }
1258 
1259     PXP2_SET_FIRST_LAST_ILT(pdev, CDU, 0, 0);
1260     PXP2_SET_FIRST_LAST_ILT(pdev, QM,  0, 0);
1261     PXP2_SET_FIRST_LAST_ILT(pdev, SRC, 0, 0);
1262 
1263     /* Timers workaround bug for E2 phase3: if this is vnic-3, we need to set the entire ilt range for this timers. */
1264     if (!CHIP_IS_E1x(pdev) && VNIC_ID(pdev) == 3)
1265     {
1266         PXP2_SET_FIRST_LAST_ILT(pdev, TM,  0, ILT_NUM_PAGE_ENTRIES - 1);
1267     }
1268     else
1269     {
1270         PXP2_SET_FIRST_LAST_ILT(pdev, TM,  0, 0);
1271     }
1272 }
1273 
1274 /**
1275  * Function takes care of resetting everything related to the
1276  * function stage
1277  *
1278  * @param pdev
1279  * @param cleanup - this indicates whether we are in the last
1280  *                "Reset" function to be called, if so we need
1281  *                to do some cleanups here, otherwise they'll be
1282  *                done in later stages
1283  *
1284  * @return lm_status_t
1285  */
lm_reset_function_part(struct _lm_device_t * pdev,u8_t cleanup)1286 lm_status_t lm_reset_function_part(struct _lm_device_t *pdev, u8_t cleanup)
1287 {
1288     /*It assumed that all protocols are down all unload ramrod already completed*/
1289     u32_t cnt         = 0;
1290     u32_t val         = 0;
1291     const u8_t  port  = PORT_ID(pdev);
1292     const u8_t  func  = FUNC_ID(pdev);
1293     u8_t  sb_id       = 0;
1294 
1295 
1296     if (IS_MULTI_VNIC(pdev) && IS_PMF(pdev))
1297     {
1298         DbgMessage(pdev, WARN,
1299                         "lm_reset_function_part: Func %d is no longer PMF \n", FUNC_ID(pdev));
1300         // disconnect from NIG attention
1301         if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC)
1302         {
1303             REG_WR(pdev,  (PORT_ID(pdev) ? HC_REG_LEADING_EDGE_1 : HC_REG_LEADING_EDGE_0), 0);
1304             REG_WR(pdev,  (PORT_ID(pdev) ? HC_REG_TRAILING_EDGE_1 : HC_REG_TRAILING_EDGE_0), 0);
1305         }
1306         else
1307         {
1308             REG_WR(pdev,  IGU_REG_TRAILING_EDGE_LATCH, 0);
1309             REG_WR(pdev,  IGU_REG_LEADING_EDGE_LATCH, 0);
1310         }
1311         MM_ACQUIRE_PHY_LOCK(pdev);
1312         lm_stats_on_pmf_update(pdev,FALSE);
1313         MM_RELEASE_PHY_LOCK(pdev);
1314     }
1315 
1316     /*  Configure IGU */
1317     if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC)
1318     {
1319         REG_WR(pdev,HC_REG_CONFIG_0+(4*port),0x1000);
1320     }
1321 
1322     /*  Timer stop scan.*/
1323     REG_WR(pdev,TM_REG_EN_LINEAR0_TIMER + (4*port),0);
1324     for(cnt = 0; cnt < LM_TIMERS_SCAN_POLL; cnt++)
1325     {
1326         mm_wait(pdev, LM_TIMERS_SCAN_TIME); /* 1m */
1327 
1328         val=REG_RD(pdev,TM_REG_LIN0_SCAN_ON+(4*port));
1329         if (!val)
1330         {
1331             break;
1332         }
1333 
1334         // in case reset in progress
1335         // we won't get completion so no need to wait
1336         if(CHIP_IS_E1x(pdev) && lm_reset_is_inprogress(pdev) )
1337         {
1338             break;
1339         }
1340     }
1341     /*timeout*/
1342     DbgMessage(pdev, INFORMi, "timer status on %d \n",val);
1343 
1344     /* shutdown bug - in case of shutdown it's quite possible that the timer blocks hangs the scan never ends */
1345     if (!lm_reset_is_inprogress(pdev))
1346     {
1347         DbgBreakIf(cnt == LM_TIMERS_SCAN_POLL);
1348     }
1349 
1350     // reset the fw statistics (so next time client is up data will be correct)
1351     // if we don't call it here - we'll see in statistics 4GB+real
1352     lm_stats_fw_reset(pdev) ;
1353 
1354     /* Timers workaround bug: before cleaning the ilt we need to disable the pf-enable bit in the pglc + cfc */
1355     if (cleanup)
1356     { /* pdev->params.multi_vnics_mode, function that gets response "port/common" does this in the lm_reset_port_part  */
1357         if (!CHIP_IS_E1x(pdev))
1358         {
1359             clear_pf_enable(pdev);
1360             pdev->vars.b_is_dmae_ready = FALSE; /* Can't access dmae since bus-master is disabled */
1361         }
1362         uninit_pxp2_blk(pdev);
1363     }
1364 
1365     /* Disable the function and status  blocks in the STORMs unless under FLR (don't want to intefere
1366      * with FW flow) */
1367     if (!lm_reset_is_inprogress(pdev))
1368     {
1369         LM_INTMEM_WRITE8(pdev, XSTORM_FUNC_EN_OFFSET(FUNC_ID(pdev)), 0, BAR_XSTRORM_INTMEM);
1370         LM_INTMEM_WRITE8(pdev, CSTORM_FUNC_EN_OFFSET(FUNC_ID(pdev)), 0, BAR_CSTRORM_INTMEM);
1371         LM_INTMEM_WRITE8(pdev, TSTORM_FUNC_EN_OFFSET(FUNC_ID(pdev)), 0, BAR_TSTRORM_INTMEM);
1372         LM_INTMEM_WRITE8(pdev, USTORM_FUNC_EN_OFFSET(FUNC_ID(pdev)), 0, BAR_USTRORM_INTMEM);
1373 
1374         LM_FOREACH_SB_ID(pdev, sb_id)
1375         {
1376             LM_INTMEM_WRITE8(pdev, CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(LM_FW_SB_ID(pdev, sb_id)),
1377                       SB_DISABLED, BAR_CSTRORM_INTMEM);
1378         }
1379 
1380         LM_INTMEM_WRITE8(pdev, CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
1381                          SB_DISABLED, BAR_CSTRORM_INTMEM);
1382     }
1383 
1384     return LM_STATUS_SUCCESS;
1385 }
1386 
1387 
1388 
lm_reset_port_part(struct _lm_device_t * pdev)1389 lm_status_t lm_reset_port_part(struct _lm_device_t *pdev)
1390 {
1391     /*It assumed that all protocols are down all unload ramrod already completed*/
1392     u32_t      val  = 0;
1393     const u8_t port = PORT_ID(pdev);
1394 
1395     /*  TODO Configure ACPI pattern if required. */
1396     /*  TODO Close the NIG port (also include congestion management toward XCM).*/
1397     // disable attention from nig
1398     REG_WR(pdev, NIG_REG_MASK_INTERRUPT_PORT0 + 4*port,0x0);
1399 
1400     // Do not rcv packets to BRB
1401     REG_WR(pdev, NIG_REG_LLH0_BRB1_DRV_MASK + 4*port,0x0);
1402 
1403     // Do not direct rcv packets that are not for MCP to the brb
1404     REG_WR(pdev, NIG_REG_LLH0_BRB1_NOT_MCP  + 4*32*port,0x0);
1405 
1406     // If DCBX is enabled we always want to go back to ETS disabled.
1407     // NIG is not reset
1408     if(IS_DCB_ENABLED(pdev))
1409     {
1410         elink_ets_disabled(&pdev->params.link,
1411                            &pdev->vars.link);
1412     }
1413 
1414     // reset external phy to cause link partner to see link down
1415     MM_ACQUIRE_PHY_LOCK(pdev);
1416     lm_reset_link(pdev);
1417     MM_RELEASE_PHY_LOCK(pdev);
1418     /*  Configure AEU.*/
1419     REG_WR(pdev,MISC_REG_AEU_MASK_ATTN_FUNC_0+(4*port),0);
1420 
1421     /* shutdown bug - in case of shutdown don't bother with clearing the BRB or the ILT */
1422     if (!lm_reset_is_inprogress(pdev))
1423     {
1424         /*  Wait a timeout (100msec).*/
1425         mm_wait(pdev,LM_UNLOAD_TIME);
1426         /*  Check for BRB port occupancy. If BRB is not empty driver starts the ChipErrorRecovery routine.*/
1427         val=REG_RD(pdev,BRB1_REG_PORT_NUM_OCC_BLOCKS_0+(4*port));
1428         /* brb1 not empty */
1429         if (val)
1430         {
1431             DbgMessage(pdev, INFORMi, "lm_reset_function_part BRB1 is not empty %d blooks are occupied\n",val);
1432             return LM_STATUS_TIMEOUT;
1433         }
1434 
1435 
1436         if (!CHIP_IS_E1x(pdev))
1437         {
1438             clear_pf_enable(pdev);
1439             pdev->vars.b_is_dmae_ready = FALSE; /* Can't access dmae since bus-master is disabled */
1440         }
1441         /* link is closed and BRB is empty, can safely delete SRC ILT table: */
1442         uninit_pxp2_blk(pdev);
1443 
1444     }
1445 
1446     return LM_STATUS_SUCCESS;
1447 }
1448 
1449 /**
1450  * @Description
1451  *     This function checks whether a certain data entry
1452  *     (register in NIG) is valid for current phase and chip.
1453  * @param pdev
1454  * @param data: A register in the nig with data on when it is
1455  *            valid
1456  * @param op: which phase we're in (save/restore/process
1457  *
1458  * @return INLINE u8_t TRUE: if entry is valid FALSE o/w
1459  */
lm_reset_nig_valid_offset(lm_device_t * pdev,const lm_nig_save_restore_data_t * data,lm_reset_nig_op_t op)1460 static INLINE u8_t lm_reset_nig_valid_offset(lm_device_t                      * pdev,
1461                                              const lm_nig_save_restore_data_t * data,
1462                                              lm_reset_nig_op_t                  op)
1463 {
1464     if ((op == LM_RESET_NIG_OP_SAVE) && !GET_FLAGS(data->flags, LM_NIG_SAVE))
1465     {
1466         return FALSE;
1467     }
1468 
1469     if ((op == LM_RESET_NIG_OP_RESTORE) && !GET_FLAGS(data->flags, LM_NIG_RESTORE))
1470     {
1471         return FALSE;
1472     }
1473 
1474     if (CHIP_IS_E1(pdev))
1475     {
1476         return data->reg_valid.e1;
1477     }
1478     else if (CHIP_IS_E1H(pdev))
1479     {
1480         return data->reg_valid.e1h;
1481     }
1482     else if (CHIP_IS_E2(pdev))
1483     {
1484         return data->reg_valid.e2;
1485     }
1486     else
1487     {
1488         return data->reg_valid.e3;
1489     }
1490 }
1491 
1492 // This function should be called only if we are on MCP lock
1493 // This function should be called only on E1.5 or on E2 (width of PXP2_REG_PGL_PRETEND_FUNC_xx reg is 16bit)
lm_pretend_func(struct _lm_device_t * pdev,u16_t pretend_func_num)1494 lm_status_t lm_pretend_func( struct _lm_device_t *pdev, u16_t pretend_func_num )
1495 {
1496     u32_t offset = 0;
1497 
1498     if (CHIP_IS_E1(pdev))
1499     {
1500         return LM_STATUS_FAILURE;
1501     }
1502 
1503     if(CHIP_IS_E1H(pdev) && (pretend_func_num >= E1H_FUNC_MAX))
1504     {
1505         return LM_STATUS_INVALID_PARAMETER;
1506     }
1507 
1508     switch (ABS_FUNC_ID(pdev))
1509     {
1510     case 0:
1511         offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
1512         break;
1513 
1514     case 1:
1515         offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
1516         break;
1517 
1518     case 2:
1519         offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
1520         break;
1521 
1522     case 3:
1523         offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
1524         break;
1525 
1526     case 4:
1527         offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
1528         break;
1529 
1530     case 5:
1531         offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
1532         break;
1533 
1534     case 6:
1535         offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
1536         break;
1537 
1538     case 7:
1539         offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
1540         break;
1541 
1542     default:
1543         break;
1544     }
1545 
1546     if( 0 == offset )
1547     {
1548         return LM_STATUS_INVALID_PARAMETER;
1549     }
1550 
1551     if(offset)
1552     {
1553         REG_WR(pdev, offset, pretend_func_num );
1554         REG_WAIT_VERIFY_VAL(pdev, offset, pretend_func_num, 200);
1555     }
1556 
1557     return LM_STATUS_SUCCESS;
1558 }
1559 
1560 /**
1561  * @Description
1562  *      This function is called between saving the nig registers
1563  *      and restoring them. It's purpose is to do any special
1564  *      handling that requires knowing what the registers that
1565  *      were read are and before restoring them. It can change
1566  *      the values of other registers based on knowledge
1567  *      obtained by values of different registers.
1568  *
1569  *      Current processing rules:
1570  *              NIG_REG_LLHX_FUNC_EN should be set to '1' if
1571  *              lm_get_func_en is valid. otherwise it
1572  *              will remain '0'. Only under sd mode.
1573  *
1574  * @param pdev
1575  * @param reg_offsets_port
1576  * @param reg_port_arr
1577  * @param reg_port_arr_size
1578  */
lm_reset_nig_process(IN struct _lm_device_t * pdev,IN lm_nig_save_restore_data_t const reg_offsets_port[],OUT u32_t reg_port_arr[],IN u32_t const reg_port_arr_size,IN u8_t const func_id)1579 static void lm_reset_nig_process(IN struct _lm_device_t              *pdev,
1580                                  IN  lm_nig_save_restore_data_t const reg_offsets_port[],
1581                                  OUT u32_t                            reg_port_arr[],
1582                                  IN  u32_t                      const reg_port_arr_size,
1583                                  IN  u8_t                       const func_id)
1584 
1585 {
1586     const lm_nig_save_restore_data_t  * data = NULL;
1587     u32_t                               idx  = 0;
1588 
1589     /* Current processing only has to do with SD multi function mode. this if should be removed
1590      * if  the case changes... */
1591     if (!IS_MF_SD_MODE(pdev))
1592     {
1593         return;
1594     }
1595 
1596     /* We loop on all the registers to make sure we access the correct offset: incase someone moves it. */
1597     for( idx = 0; idx < reg_port_arr_size ; idx++ )
1598     {
1599         data = &reg_offsets_port[idx];
1600         if (lm_reset_nig_valid_offset(pdev, data, LM_RESET_NIG_OP_RESTORE))
1601         {
1602             if ((data->offset == NIG_REG_LLH0_FUNC_EN) || (data->offset == NIG_REG_LLH1_FUNC_EN))
1603             {
1604                 reg_port_arr[idx] = lm_get_func_en(pdev, func_id);
1605             }
1606 
1607         }
1608     }
1609 
1610 }
1611 
lm_reset_nig_values_for_func_save_restore(IN struct _lm_device_t * pdev,IN lm_reset_nig_op_t const save_or_restore,IN u8_t const pretend_func_id,IN lm_nig_save_restore_data_t const reg_offsets_port[],OUT u32_t reg_port_arr[],IN u32_t const reg_port_arr_size,IN u32_t const reg_port_wb_offset_base,OUT u64_t reg_port_wb_arr[],IN u32_t const reg_port_wb_arr_size)1612 static void lm_reset_nig_values_for_func_save_restore( IN struct _lm_device_t              *pdev,
1613                                                        IN  lm_reset_nig_op_t          const save_or_restore,
1614                                                        IN  u8_t                       const pretend_func_id,
1615                                                        IN  lm_nig_save_restore_data_t const reg_offsets_port[],
1616                                                        OUT u32_t                            reg_port_arr[],
1617                                                        IN  u32_t                      const reg_port_arr_size,
1618                                                        IN  u32_t                      const reg_port_wb_offset_base,
1619                                                        OUT u64_t                            reg_port_wb_arr[],
1620                                                        IN  u32_t                      const reg_port_wb_arr_size )
1621 {
1622     const lm_nig_save_restore_data_t * data        = NULL;
1623     u32_t                              offset      = 0;
1624     u32_t                              val_32[2]   = {0} ;
1625     u32_t                              idx         = 0;
1626     u8_t                               abs_func_id = ABS_FUNC_ID(pdev);
1627     u8_t                               b_save      = FALSE;
1628 
1629     switch(save_or_restore)
1630     {
1631     case LM_RESET_NIG_OP_SAVE:
1632         b_save = TRUE;
1633         break;
1634 
1635     case LM_RESET_NIG_OP_RESTORE:
1636         b_save = FALSE;
1637         break;
1638 
1639     case LM_RESET_NIG_OP_PROCESS:
1640         lm_reset_nig_process(pdev,reg_offsets_port,reg_port_arr,reg_port_arr_size, pretend_func_id);
1641         return; /* Return on purpose: processing is done in a separate function */
1642 
1643     default:
1644         DbgBreakIf(TRUE);
1645         break;
1646     }
1647 
1648     if( pretend_func_id != abs_func_id  )
1649     {
1650         lm_pretend_func( pdev, pretend_func_id );
1651     }
1652 
1653     for( idx = 0; idx < reg_port_arr_size ; idx++ )
1654     {
1655         data = &reg_offsets_port[idx];
1656         if (lm_reset_nig_valid_offset(pdev, data, save_or_restore))
1657         {
1658             if( b_save )
1659             {
1660                 reg_port_arr[idx] = REG_RD(pdev, data->offset );
1661             }
1662             else
1663             {
1664                 REG_WR(pdev, data->offset, reg_port_arr[idx] );
1665             }
1666         }
1667     }
1668 
1669     for( idx = 0; idx < reg_port_wb_arr_size; idx++)
1670     {
1671         offset = reg_port_wb_offset_base + 8*idx;
1672 
1673         if( b_save)
1674         {
1675             REG_RD_IND( pdev,  offset,   &val_32[0] );
1676             REG_RD_IND( pdev,  offset+4, &val_32[1] );
1677             reg_port_wb_arr[idx] = HILO_U64( val_32[1], val_32[0] );
1678         }
1679         else
1680         {
1681             val_32[0] = U64_LO(reg_port_wb_arr[idx]);
1682             val_32[1] = U64_HI(reg_port_wb_arr[idx]);
1683 
1684             REG_WR_IND( pdev,  offset,   val_32[0] );
1685             REG_WR_IND( pdev,  offset+4, val_32[1] );
1686         }
1687     }
1688 
1689     if( pretend_func_id != abs_func_id  )
1690     {
1691         lm_pretend_func( pdev, abs_func_id );
1692     }
1693 }
1694 
1695 /*
1696    1. save known essential NIG values (port swap, WOL nwuf for all funcs)
1697    2. Pretend to relevant func - for split register as well
1698    3. Resets the device and the NIG.
1699    4. Restore known essential NIG values (port swap and WOL nwuf).
1700 */
1701 
1702 void
lm_reset_device_with_nig(struct _lm_device_t * pdev)1703 lm_reset_device_with_nig(struct _lm_device_t *pdev)
1704 {
1705     u8_t                          idx                                        = 0;
1706     u8_t                          idx_port                                   = 0;
1707     u8_t                          abs_func_vector                            = 0;
1708     u8_t                          abs_func_id                                = ABS_FUNC_ID(pdev); // for debugging only
1709     const u8_t                    idx_max                                    = MAX_FUNC_NUM;
1710     const u8_t                    path_id                                    = PATH_ID(pdev);
1711     const u32_t                   chip_num                                   = CHIP_NUM(pdev);
1712     const lm_chip_port_mode_t     chip_port_mode                             = CHIP_PORT_MODE(pdev);
1713     static const u32_t            offset_base_wb[PORT_MAX]                   = { NIG_REG_LLH0_ACPI_BE_MEM_DATA, NIG_REG_LLH1_ACPI_BE_MEM_DATA };
1714     lm_reset_nig_op_t             lm_reset_nig_op                            = LM_RESET_NIG_OP_SAVE;
1715 
1716     // List of registers that are split-4 (different addresses per port, but same per function)
1717     static const lm_nig_save_restore_data_t reg_offsets_port0[]              = NIG_REG_PORT_0_OFFSETS_VALUES;
1718     static const lm_nig_save_restore_data_t reg_offsets_port1[]              = NIG_REG_PORT_1_OFFSETS_VALUES;
1719 
1720     /* List of registers that are "global" for all funcitons in path               offset                         valid
1721                                                                                                                   e1,e1h,e2,e3 save / restore */
1722     const lm_nig_save_restore_data_t non_split_offsets[]                     = { { NIG_REG_PORT_SWAP,             {1, 1, 0, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
1723                                                                                  { NIG_REG_STRAP_OVERRIDE,        {1, 1 ,0, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
1724                                                                                  { NIG_REG_P0_ACPI_MF_GLOBAL_EN,  {0 ,0, 1, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
1725                                                                                  { NIG_REG_P1_ACPI_MF_GLOBAL_EN,  {0 ,0, 1, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
1726                                                                                  { NIG_REG_LLH_E1HOV_MODE,        {0, 1, 0, 0}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
1727                                                                                  { NIG_REG_LLH_MF_MODE,           {0, 1, 1, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
1728                                                                                  { NIG_REG_LLH1_MF_MODE,          {0, 0, 0, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
1729                                                                                  { NIG_REG_MASK_INTERRUPT_PORT0,  {1, 1, 1, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) },
1730                                                                                  { NIG_REG_MASK_INTERRUPT_PORT1,  {1, 1, 1, 1}, (LM_NIG_SAVE | LM_NIG_RESTORE) }};
1731 
1732     u32_t                         non_split_vals[ARRSIZE(non_split_offsets)] = {0};
1733     static u64_t                  reg_nig_port_restore_wb[MAX_FUNC_NUM][NIG_REG_LLH0_ACPI_BE_MEM_DATA_SIZE/2] = {{0}} ; // the nwuf data
1734     static u32_t                  reg_nig_port_restore[MAX_FUNC_NUM][ARRSIZE(reg_offsets_port0)]              = {{0}};
1735 
1736     UNREFERENCED_PARAMETER_( abs_func_id );
1737 
1738     // Note:
1739     // Due to kernel stack limitation we use reg_nig_port_restore(_wb) as static variables.
1740     // At first glance, it doesn't look good BUT avoiding multiple access to the values is assured:
1741     //    mcp locking mechanism LOAD_COMMON etc
1742 
1743     // Currently we work with max 8 PF, in case of a change - need to verify code is still valid
1744     ASSERT_STATIC( 8 == MAX_FUNC_NUM );
1745     ASSERT_STATIC( 2 == PORT_MAX );
1746 
1747     // verify enum values
1748     ASSERT_STATIC( LM_RESET_NIG_OP_SAVE    < LM_RESET_NIG_OP_PROCESS );
1749     ASSERT_STATIC( LM_RESET_NIG_OP_PROCESS < LM_RESET_NIG_OP_RESTORE );
1750     ASSERT_STATIC( 3 == LM_RESET_NIG_OP_MAX );
1751 
1752     // verify that save/restores are same size as offsets range
1753     ASSERT_STATIC( ARRSIZE(reg_nig_port_restore[0]) == ARRSIZE(reg_offsets_port0) );
1754     ASSERT_STATIC( ARRSIZE(reg_nig_port_restore[1]) == ARRSIZE(reg_offsets_port1) );
1755     ASSERT_STATIC( NIG_REG_LLH0_ACPI_BE_MEM_DATA_SIZE == NIG_REG_LLH1_ACPI_BE_MEM_DATA_SIZE );
1756 
1757     abs_func_vector = lm_get_abs_func_vector( chip_num, chip_port_mode, IS_MULTI_VNIC(pdev), path_id );
1758 
1759     // start the "save/restore" operation
1760     for( lm_reset_nig_op = LM_RESET_NIG_OP_SAVE; lm_reset_nig_op < LM_RESET_NIG_OP_MAX; lm_reset_nig_op++ )
1761     {
1762         for( idx = 0; idx < idx_max; idx++ )
1763         {
1764             // we skip non-marked functions
1765             if( 0 == GET_BIT( abs_func_vector, idx ) )
1766             {
1767                 continue;
1768             }
1769 
1770             // choose the correct idx_port
1771             idx_port = PORT_ID_PARAM_FUNC_ABS( chip_num, chip_port_mode, idx );
1772 
1773             DbgBreakIf( idx_port >= PORT_MAX );
1774 
1775             // save for 1st iteariton
1776             // restore for 2nd iteration
1777             lm_reset_nig_values_for_func_save_restore( pdev,
1778                                                        lm_reset_nig_op,
1779                                                        idx,
1780                                                        idx_port ? reg_offsets_port1 : reg_offsets_port0,
1781                                                        reg_nig_port_restore[idx],
1782                                                        ARRSIZE(reg_nig_port_restore[idx]),
1783                                                        offset_base_wb[idx_port],
1784                                                        reg_nig_port_restore_wb[idx],
1785                                                        ARRSIZE(reg_nig_port_restore_wb[idx]) );
1786         } // for func iterations
1787 
1788         // This code section should be done once and anyway!
1789         if ( LM_RESET_NIG_OP_SAVE == lm_reset_nig_op)
1790         {
1791             for( idx = 0; idx < ARRSIZE(non_split_vals); idx++ )
1792             {
1793                 if (lm_reset_nig_valid_offset(pdev, &non_split_offsets[idx], LM_RESET_NIG_OP_SAVE))
1794                 {
1795                     non_split_vals[idx] = REG_RD( pdev, non_split_offsets[idx].offset );
1796                 }
1797 
1798             }
1799 
1800             //reset chip with NIG!!
1801             lm_reset_path( pdev, TRUE );
1802 
1803             // save nig swap register and global acpi enable before NIG reset
1804             for( idx = 0; idx < ARRSIZE(non_split_vals); idx++ )
1805             {
1806                 if (lm_reset_nig_valid_offset(pdev, &non_split_offsets[idx], LM_RESET_NIG_OP_RESTORE))
1807                 {
1808                     REG_WR(pdev, non_split_offsets[idx].offset, non_split_vals[idx]);
1809                 }
1810             }
1811 
1812         } // save iteartion only code
1813 
1814     } // for save/restore loop
1815 
1816 } // lm_reset_device_with_nig
1817 
1818 void
lm_reset_common_part(struct _lm_device_t * pdev)1819 lm_reset_common_part(struct _lm_device_t *pdev)
1820 {
1821     /* Reset the HW blocks that are listed in section 4.13.18.*/
1822     if (lm_pm_reset_is_inprogress(pdev))
1823     {
1824         /* In case of shutdown we reset the NIG as well */
1825         lm_reset_device_with_nig(pdev);
1826     }
1827     else
1828     {
1829         lm_reset_path( pdev, FALSE );
1830     }
1831 
1832     /* According to E1/E1H/E2 Recovery flow spec, as long as MCP does not support process kill, "close the gates"
1833      * should be disabled while no drivers are loaded. The last driver that unloads should disable "close the gates"
1834      */
1835     lm_er_disable_close_the_gate(pdev);
1836 }
1837 
lm_chip_reset(struct _lm_device_t * pdev,lm_reason_t reason)1838 void lm_chip_reset(struct _lm_device_t *pdev, lm_reason_t reason)
1839 {
1840     lm_loader_opcode       opcode = 0 ;
1841     lm_loader_response     resp   = 0 ;
1842     u32_t                  val    = 0;
1843     u32_t                  enabled_wols = mm_get_wol_flags(pdev);
1844 
1845     DbgMessage(pdev, INFORMi , "### lm_chip_reset\n");
1846 
1847 #ifdef VF_INVOLVED
1848     if (IS_VFDEV(pdev))
1849     {
1850         lm_status_t lm_status = lm_vf_chip_reset(pdev,reason);
1851         if (lm_status != LM_STATUS_SUCCESS)
1852         {
1853             DbgMessage(pdev, FATAL, "lm_chip_reset: ERROR (%d) resetting VF!!!\n",lm_status);
1854             DbgBreakIf(!DBG_BREAK_ON(UNDER_TEST));
1855         }
1856         return;
1857     }
1858 #endif
1859 
1860     // depends on reason, send relevant message to MCP
1861     switch( reason )
1862     {
1863     case LM_REASON_WOL_SUSPEND:
1864         opcode = LM_LOADER_OPCODE_UNLOAD_WOL_EN | LM_LOADER_OPCODE_UNLOAD_SUSPEND;
1865         break ;
1866 
1867     case LM_REASON_NO_WOL_SUSPEND:
1868         opcode = LM_LOADER_OPCODE_UNLOAD_WOL_DIS | LM_LOADER_OPCODE_UNLOAD_SUSPEND;
1869         break ;
1870 
1871     case LM_REASON_DRIVER_UNLOAD:
1872     case LM_REASON_DRIVER_UNLOAD_POWER_DOWN:
1873     case LM_REASON_DRIVER_SHUTDOWN:
1874         enabled_wols = LM_WAKE_UP_MODE_NONE; // in S5 default is by nvm cfg 19
1875         // in case we do support wol_cap, we ignore OS configuration and
1876         // we decide upon nvm settings (CQ49516 - S5 WOL functionality to always look at NVRAM WOL Setting)
1877         if( GET_FLAGS( pdev->hw_info.port_feature_config, PORT_FEATURE_WOL_ENABLED ) )
1878         {
1879             opcode = LM_LOADER_OPCODE_UNLOAD_WOL_EN ;
1880             // enabled_wols so the mac address will be written by lm_set_d3_mpkt()
1881             SET_FLAGS( enabled_wols, LM_WAKE_UP_MODE_MAGIC_PACKET );
1882         }
1883         else
1884         {
1885             opcode = LM_LOADER_OPCODE_UNLOAD_WOL_DIS ;
1886         }
1887         break;
1888 
1889     default:
1890         break;
1891     }
1892 
1893     if ( !CHIP_IS_E1(pdev) )
1894     {
1895         if (CHIP_IS_E2(pdev) || CHIP_IS_E1H(pdev))
1896         {
1897             val = REG_RD( pdev, MISC_REG_E1HMF_MODE);
1898         }
1899         else
1900         {
1901             ASSERT_STATIC(MISC_REG_E1HMF_MODE_P1 == (MISC_REG_E1HMF_MODE_P0 + 4));
1902             val = REG_RD( pdev, MISC_REG_E1HMF_MODE_P0 + PORT_ID(pdev)*4);
1903         }
1904 
1905         // We do expect that register value will be consistent with multi_vnics_mode.
1906         if (!lm_fl_reset_is_inprogress(pdev))
1907         {
1908             DbgBreakIf( pdev->params.multi_vnics_mode ^ val );
1909         }
1910     }
1911 
1912     if (lm_fl_reset_is_inprogress(pdev))
1913     {
1914         if (TEST_MODE_NO_MCP == GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP))
1915         {
1916             DbgMessage(pdev, FATAL, "lm_chip_reset under FLR: NO MCP\n");
1917             lm_loader_lock(pdev, opcode);
1918             lm_loader_unlock(pdev, opcode, NULL);
1919         }
1920 
1921         DbgMessage(pdev, FATAL, "lm_chip_reset under FLR: return\n");
1922         return;
1923     }
1924 
1925     // magic packet should be programmed before unload request send to MCP
1926     lm_set_d3_mpkt(pdev, enabled_wols) ;
1927 
1928     resp = lm_loader_lock(pdev, opcode ) ;
1929 
1930     if (!IS_ASSIGNED_TO_VM_PFDEV(pdev))
1931     {
1932         lm_pcie_state_save_for_d3(pdev);
1933     }
1934 
1935     // nwuf is programmed before chip reset since if we reset the NIG we resotre all function anyway
1936     lm_set_d3_nwuf(pdev, enabled_wols) ;
1937 
1938     switch (resp)
1939     {
1940     case LM_LOADER_RESPONSE_UNLOAD_FUNCTION:
1941         lm_reset_function_part(pdev, TRUE /* cleanup*/);
1942         break;
1943     case LM_LOADER_RESPONSE_UNLOAD_PORT:
1944         lm_reset_function_part(pdev, FALSE /* cleanup */ );
1945         lm_reset_port_part(pdev);
1946         break;
1947     case LM_LOADER_RESPONSE_UNLOAD_COMMON:
1948         lm_reset_function_part(pdev, FALSE /* cleanup */);
1949         lm_reset_port_part(pdev);
1950         //Check if there is dbus work
1951         mm_dbus_stop_if_started(pdev);
1952         lm_reset_common_part(pdev);
1953         break;
1954     default:
1955         DbgMessage(pdev, WARN, "wrong loader response=0x%x\n", resp);
1956         DbgBreakIfAll(1);
1957     }
1958 
1959     pdev->vars.b_is_dmae_ready = FALSE ;
1960 
1961     // unset pmf flag needed for D3 state
1962     pdev->vars.is_pmf = NOT_PMF;
1963 
1964     resp = lm_loader_unlock(pdev, opcode, NULL ) ;
1965 
1966     if (resp != LM_LOADER_RESPONSE_UNLOAD_DONE )
1967     {
1968         DbgMessage(pdev, WARN, "wrong loader response=0x%x\n", resp);
1969         DbgBreakIfAll(1);
1970     }
1971 }
1972 
1973 /**
1974  * This function sends the "function-start" ramrod and waits
1975  * synchroniously for it's completion. Called from the
1976  * chip-start flow.
1977  *
1978  * @param pdev
1979  *
1980  * @return lm_status_t SUCCESS / TIMEOUT on waiting for
1981  *         completion
1982  */
lm_function_start(struct _lm_device_t * pdev)1983 lm_status_t lm_function_start(struct _lm_device_t *pdev)
1984 {
1985     struct function_start_data * func_start_data = NULL;
1986     lm_status_t                  lm_status       = LM_STATUS_SUCCESS;
1987 
1988     DbgMessage(pdev, INFORMeq|INFORMl2sp, "#lm_function_start\n");
1989 
1990     pdev->eq_info.function_state = FUNCTION_START_POSTED;
1991 
1992     if (CHK_NULL(pdev) || CHK_NULL(pdev->slowpath_info.slowpath_data.func_start_data))
1993     {
1994         return LM_STATUS_INVALID_PARAMETER;
1995     }
1996 
1997     func_start_data = pdev->slowpath_info.slowpath_data.func_start_data;
1998 
1999     if (pdev->params.multi_vnics_mode)
2000     {
2001         DbgBreakIf(pdev->params.mf_mode >= MAX_MF_MODE);
2002         func_start_data->function_mode = pdev->params.mf_mode;
2003     }
2004     else
2005     {
2006         func_start_data->function_mode = SINGLE_FUNCTION;
2007     }
2008 
2009     func_start_data->sd_vlan_tag = mm_cpu_to_le16(pdev->params.ovlan);
2010     /* NIV_TODO: func_start_data->vif_id = mm_cpu_to_le16(??) */
2011 
2012     /* TODO: For Modifying Ether type of Outer VLAN to SVLAN:
2013         To use, first set these registers to to SVLAN Ethertype (0x88a8)
2014         PRS_REG_VLAN_TYPE_0
2015         PBF_REG_VLAN_TYPE_0
2016         NIG_REG_LLH_OUTER_VLAN_TYPE_1
2017         Then modify/create the function with  sd_vlan_eth_type set to SVLAN Ethertype (0x88a8)
2018     */
2019     if (IS_MF_SD_MODE(pdev) && IS_SD_BD_MODE(pdev))
2020     {
2021         const u8_t  port   = PORT_ID(pdev);
2022         u32_t offset = ( port ? NIG_REG_LLH1_OUTER_VLAN_ID : NIG_REG_LLH0_OUTER_VLAN_ID );
2023 
2024         func_start_data->sd_vlan_eth_type = mm_cpu_to_le16(0x88a8);
2025         REG_WR(pdev, PRS_REG_VLAN_TYPE_0, 0x88a8);
2026         REG_WR(pdev, PBF_REG_VLAN_TYPE_0, 0x88a8);
2027         REG_WR(pdev, offset , 0x88a8);
2028     }
2029     else
2030         func_start_data->sd_vlan_eth_type = mm_cpu_to_le16(pdev->params.sd_vlan_eth_type);
2031 
2032     func_start_data->path_id = PATH_ID(pdev);
2033 
2034     // Function start is sent when the first miniport clients binds. (Can be also FCOE or iSCSI)
2035     // The requirement for NW multiple priority is only known to eVBD when the NDIS miniport binds.
2036     if(MM_DCB_MP_L2_IS_ENABLE(pdev))
2037     {
2038         // Multiple priority enabled (only from D3 flow)
2039         func_start_data->network_cos_mode = STATIC_COS;
2040     }
2041     else
2042     {
2043         func_start_data->network_cos_mode = OVERRIDE_COS;
2044     }
2045 
2046     // encapsulated packets offload is disabled by default
2047     // in case of an error, restore last fw state.
2048     if (ENCAP_OFFLOAD_DISABLED == pdev->encap_info.current_encap_offload_state)
2049     {
2050         func_start_data->tunn_clss_en  = 0;
2051         func_start_data->tunnel_mode = TUNN_MODE_NONE;
2052     }
2053     else
2054     {
2055         func_start_data->tunn_clss_en  = 1;
2056         func_start_data->tunnel_mode = TUNN_MODE_GRE;
2057         func_start_data->gre_tunnel_type = NVGRE_TUNNEL;
2058     }
2059 
2060     if ((IS_SD_UFP_MODE(pdev) || IS_SD_BD_MODE(pdev)) &&
2061         GET_FLAGS(pdev->params.mf_proto_support_flags, LM_PROTO_SUPPORT_FCOE))
2062     {
2063         func_start_data->sd_accept_mf_clss_fail_match_ethtype = 1;
2064         func_start_data->sd_accept_mf_clss_fail               = 1;
2065         func_start_data->sd_accept_mf_clss_fail_ethtype       = mm_cpu_to_le16(0x8914);
2066         func_start_data->no_added_tags                        = 1;
2067     }
2068 
2069     if (IS_SD_UFP_MODE(pdev) || IS_SD_BD_MODE(pdev))
2070     {
2071         /* modify sd_vlan_force_pri_val through registry */
2072         func_start_data->sd_vlan_force_pri_flg = 1;
2073         func_start_data->sd_vlan_force_pri_val = func_start_data->sd_vlan_force_pri_val;
2074     }
2075 
2076     lm_status = lm_sq_post(pdev,
2077                            0,
2078                            RAMROD_CMD_ID_COMMON_FUNCTION_START,
2079                            CMD_PRIORITY_NORMAL,
2080                            NONE_CONNECTION_TYPE,
2081                            LM_SLOWPATH_PHYS(pdev, func_start_data).as_u64);
2082 
2083     if (lm_status != LM_STATUS_SUCCESS)
2084     {
2085         return lm_status;
2086     }
2087 
2088     lm_status = lm_wait_state_change(pdev, &pdev->eq_info.function_state, FUNCTION_START_COMPLETED);
2089 
2090     return lm_status;
2091 } /* lm_function_start */
2092 
lm_chip_start(struct _lm_device_t * pdev)2093 lm_status_t lm_chip_start(struct _lm_device_t *pdev)
2094 {
2095     lm_status_t lm_status = LM_STATUS_SUCCESS ;
2096     u8_t        min_bw    = (u8_t)pdev->params.bandwidth_min;
2097     u8_t        max_bw    = (u8_t)pdev->params.bandwidth_max;
2098 
2099     DbgMessage(pdev, INFORMi, "lm_chip_start\n");
2100 
2101     if (IS_VFDEV(pdev))
2102     {
2103         return LM_STATUS_SUCCESS; //lm_vf_chip_start(pdev);
2104     }
2105 
2106     if ( max_bw != 0 )
2107     {
2108         //we assume that if one of the BW registry parameters is not 0, then so is the other one.
2109         DbgBreakIf(min_bw == 0);
2110         lm_status = lm_mcp_set_mf_bw(pdev, min_bw, max_bw);
2111         if (LM_STATUS_SUCCESS != lm_status)
2112         {
2113             return lm_status;
2114         }
2115     }
2116 
2117     /* Chip is initialized. We are now about to send first ramrod we can open slow-path-queue */
2118     lm_sq_change_state(pdev, SQ_STATE_NORMAL);
2119 
2120     lm_status = lm_function_start(pdev);
2121     if ( LM_STATUS_SUCCESS != lm_status )
2122     {
2123         return lm_status;
2124     }
2125 
2126     // start timer scan after leading connection ramrod.
2127     REG_WR(pdev, TM_REG_EN_LINEAR0_TIMER + 4*PORT_ID(pdev),1);
2128 
2129     lm_status = lm_establish_forward_con(pdev);
2130     if ( LM_STATUS_SUCCESS != lm_status )
2131     {
2132         goto on_err ;
2133     }
2134 
2135 on_err:
2136     if( LM_STATUS_SUCCESS != lm_status )
2137     {
2138         DbgMessage(pdev, FATAL, "lm_chip_start on_err:\n");
2139         lm_function_stop(pdev);
2140         REG_WR(pdev, TM_REG_EN_LINEAR0_TIMER + 4*PORT_ID(pdev),0);
2141     }
2142 
2143     return lm_status;
2144 }
2145 
2146 /*
2147  *Function Name:lm_read_fw_stats_ptr
2148  *
2149  *Parameters:
2150  *
2151  *Description: read stats_ptr ( port and func) from shmem
2152  *
2153  *Assumption: stats scratch pad address from MCP can not change on run time (bc upgrade is not valid)
2154  *            in case bc upgraded - need to stop statistics and read addresses again
2155  *Returns:
2156  *
2157  */
lm_setup_read_mgmt_stats_ptr(struct _lm_device_t * pdev,IN const u32_t mailbox_num,OUT u32_t * OPTIONAL fw_port_stats_ptr,OUT u32_t * OPTIONAL fw_func_stats_ptr)2158 void lm_setup_read_mgmt_stats_ptr( struct _lm_device_t *pdev, IN const u32_t mailbox_num, OUT u32_t* OPTIONAL fw_port_stats_ptr, OUT u32_t* OPTIONAL fw_func_stats_ptr )
2159 {
2160     if (GET_FLAGS( pdev->params.test_mode, TEST_MODE_NO_MCP))
2161     {
2162         // E2 TODO: move this to lm_main and get info at get_shmem_info...
2163         #define NO_MCP_WA_FW_FUNC_STATS_PTR       (0xAF900)
2164         #define NO_MCP_WA_FW_PORT_STATS_PTR       (0xAFA00)
2165         if ( 0 != fw_func_stats_ptr)
2166         {
2167             *fw_func_stats_ptr = NO_MCP_WA_FW_FUNC_STATS_PTR;
2168         }
2169 
2170         if ( 0 != fw_port_stats_ptr)
2171         {
2172             *fw_port_stats_ptr = NO_MCP_WA_FW_PORT_STATS_PTR;
2173         }
2174         return;
2175     }
2176 
2177     if ( NULL != fw_func_stats_ptr )
2178     {
2179         // read func_stats address
2180         LM_SHMEM_READ(pdev,
2181                       OFFSETOF(shmem_region_t,
2182                       func_mb[mailbox_num].fw_mb_param),
2183                       fw_func_stats_ptr);
2184 
2185         // Backward compatibility adjustments for Bootcode v4.0.8 and below
2186         if( 0xf80a0000 == *fw_func_stats_ptr )
2187         {
2188             DbgMessage(pdev, FATAL , "lm_read_fw_stats_ptr: boot code earlier than v4.0.8 fw_mb=%p-->NULL\n", *fw_func_stats_ptr );
2189             *fw_func_stats_ptr = 0;//NULL
2190         }
2191         DbgMessage(pdev, WARN , "lm_read_fw_stats_ptr: pdev->vars.fw_func_stats_ptr=%p\n", *fw_func_stats_ptr );
2192     }
2193 
2194     if ( NULL != fw_port_stats_ptr )
2195     {
2196         // read port_stats address
2197         LM_SHMEM_READ(pdev,
2198                       OFFSETOF(shmem_region_t,
2199                       port_mb[PORT_ID(pdev)].port_stx),
2200                       fw_port_stats_ptr);
2201 
2202         DbgMessage(pdev, WARN, "lm_read_fw_stats_ptr: pdev->vars.fw_port_stats_ptr=%p\n", *fw_port_stats_ptr );
2203     }
2204 }
2205 
2206 /**lm_init_get_modes_bitmap
2207  * Get the representation of the device's configuration as
2208  * inittool init-modes flags.
2209  *
2210  * @param pdev the device to use
2211  *
2212  * @return u32_t a bitmap with the appropriate INIT_MODE_XXX
2213  *         flags set.
2214  */
2215 static u32_t
lm_init_get_modes_bitmap(struct _lm_device_t * pdev)2216 lm_init_get_modes_bitmap(struct _lm_device_t *pdev)
2217 {
2218     u32_t   flags    = 0;
2219     u32_t   chip_rev = 0;
2220 
2221     if (CHIP_REV_IS_ASIC(pdev))
2222     {
2223         SET_FLAGS(flags, MODE_ASIC);
2224     }
2225     else if (CHIP_REV_IS_FPGA(pdev))
2226     {
2227         SET_FLAGS(flags, MODE_FPGA);
2228     }
2229     else if (CHIP_REV_IS_EMUL(pdev))
2230     {
2231         SET_FLAGS(flags, MODE_EMUL);
2232     }
2233     else
2234     {
2235         DbgBreakIf(TRUE);
2236     }
2237 
2238     if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4)
2239     {
2240         SET_FLAGS(flags, MODE_PORT4);
2241     }
2242     else if ((CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_2)||(CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_NONE))
2243     {
2244         SET_FLAGS(flags, MODE_PORT2);
2245     }
2246     else
2247     {
2248         DbgBreakIf(TRUE);
2249     }
2250 
2251     DbgMessage(pdev, INFORMi, "chipid is 0x%x, rev is 0x%x\n", CHIP_NUM(pdev), CHIP_REV(pdev));
2252     if (CHIP_IS_E2(pdev))
2253     {
2254         DbgMessage(pdev, INFORMi, "chip is E2\n");
2255         SET_FLAGS(flags, MODE_E2);
2256     }
2257     else if (CHIP_IS_E3(pdev))
2258     {
2259         DbgMessage(pdev, INFORMi, "chip is E3\n");
2260         SET_FLAGS(flags, MODE_E3);
2261         if (CHIP_REV_IS_ASIC(pdev))
2262         {
2263             DbgMessage(pdev, INFORMi, "chip is ASIC\n");
2264             chip_rev = CHIP_REV(pdev);
2265         }
2266         else
2267         {
2268             chip_rev = CHIP_REV_SIM(pdev);
2269             DbgMessage(pdev, INFORMi, "chip is EMUL/FPGA. modified chip_rev is 0x%x\n", chip_rev);
2270         }
2271 
2272         if ((chip_rev == CHIP_REV_Ax))
2273         {
2274             DbgMessage(pdev, INFORMi, "chip is E3 Ax\n");
2275             SET_FLAGS(flags, MODE_E3_A0);
2276         }
2277         else if (chip_rev == CHIP_REV_Bx)
2278         {
2279             DbgMessage(pdev, INFORMi, "chip is E3 Bx\n");
2280             SET_FLAGS(flags, MODE_E3_B0);
2281 
2282             /* Multiple cos mode is relevant to E3 B0 only... */
2283             switch (pdev->params.e3_cos_modes)
2284             {
2285             case LM_COS_MODE_COS3:
2286                 SET_FLAGS(flags, MODE_COS3);
2287                 break;
2288             case LM_COS_MODE_COS6:
2289                 SET_FLAGS(flags, MODE_COS6);
2290                 break;
2291             default:
2292                 DbgBreakMsg("Unknown Cos Mode");
2293             }
2294         }
2295         else
2296         {
2297             DbgBreakIf(TRUE);
2298         }
2299     }
2300     else
2301     {
2302         DbgMessage(pdev, INFORMi, "chip is not E2/E3\n");
2303     }
2304 
2305 
2306     if (pdev->params.multi_vnics_mode)
2307     {
2308         SET_FLAGS(flags, MODE_MF);
2309         switch(pdev->params.mf_mode)
2310         {
2311         case MULTI_FUNCTION_SD:
2312             SET_FLAGS(flags, MODE_MF_SD);
2313             break;
2314         case MULTI_FUNCTION_SI:
2315             SET_FLAGS(flags, MODE_MF_SI);
2316             break;
2317         case MULTI_FUNCTION_AFEX:
2318             SET_FLAGS(flags, MODE_MF_AFEX);
2319             break;
2320         default:
2321             DbgBreakIf(TRUE);
2322         }
2323     }
2324     else
2325     {
2326         SET_FLAGS(flags, MODE_SF);
2327     }
2328 
2329 
2330 #if defined(LITTLE_ENDIAN)
2331     SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
2332 #else
2333     SET_FLAGS(flags, MODE_BIG_ENDIAN);
2334 #endif
2335 
2336 //validation
2337 #define SINGLE_BIT_SET(_bitmap) POWER_OF_2(_bitmap)
2338 #define AT_MOST_SINGLE_SET(_bitmap) (((_bitmap)==0)||(SINGLE_BIT_SET(_bitmap)))
2339 
2340     DbgBreakIf(!SINGLE_BIT_SET(GET_FLAGS(flags, MODE_EMUL|MODE_FPGA|MODE_ASIC)) );
2341     DbgBreakIf(!SINGLE_BIT_SET(GET_FLAGS(flags, MODE_PORT2|MODE_PORT4)) );
2342     DbgBreakIf(!SINGLE_BIT_SET(GET_FLAGS(flags, MODE_SF|MODE_MF)) );
2343     DbgBreakIf(!SINGLE_BIT_SET(GET_FLAGS(flags, MODE_LITTLE_ENDIAN|MODE_BIG_ENDIAN)) );
2344     DbgBreakIf(!AT_MOST_SINGLE_SET(GET_FLAGS(flags,MODE_E3_A0|MODE_E3_B0)));
2345     DbgBreakIf(!AT_MOST_SINGLE_SET(GET_FLAGS(flags,MODE_MF_SD|MODE_MF_SI|MODE_MF_AFEX)));
2346     DbgBreakIf(GET_FLAGS(flags, MODE_E3)&& !(GET_FLAGS(flags,MODE_E3_A0|MODE_E3_B0) ));
2347     DbgBreakIf(GET_FLAGS(flags, MODE_MF)&& !(GET_FLAGS(flags,MODE_MF_SD|MODE_MF_SI|MODE_MF_AFEX) ));
2348 
2349     return flags;
2350 }
2351 
2352 /**lm_ncsi_get_shmem_address
2353  * @brief get ncsi shmem address
2354  * @param lm_device
2355  *
2356  * @return ncsi_oem_shmem address or 0 if doesn't exists
2357  */
2358 static u32_t
lm_ncsi_get_shmem_address(struct _lm_device_t * pdev)2359 lm_ncsi_get_shmem_address( struct _lm_device_t *pdev)
2360 {
2361     u32_t shmem2_size        = 0;
2362     u32_t ncsi_oem_data_addr = 0;
2363     u32_t offset             = 0;
2364 
2365     offset = OFFSETOF(shmem2_region_t, size);
2366     LM_SHMEM2_READ( pdev, offset, &shmem2_size );
2367 
2368     offset = OFFSETOF(shmem2_region_t, ncsi_oem_data_addr);
2369 
2370     if ( shmem2_size > offset )
2371     {
2372         LM_SHMEM2_READ(pdev, offset, &ncsi_oem_data_addr);
2373     }
2374 
2375     return ncsi_oem_data_addr;
2376 }
2377 
2378 /**
2379  *  @brief: Writes product version to shmem (for NCSI)
2380  *
2381  *  No endian conversion is needed if data type is u32.  Although, MCP is big endian, basic storage unit is u32.
2382  *  Unless you access individual byte,  writing a 32-bit word in shmem from host DOES NOT need any endian conversion.
2383  *  In other word, if host driver write 0x12345678 to a 4-byte location in shmem,  MCP will read it correctly.  eVBD doesn�t need to do mm_cpu_to_be32.
2384  *
2385  * @param[in] lm_device
2386  *
2387  * @return LM_STATUS_SUCCESS if written, other if not.
2388  */
2389 static lm_status_t
lm_ncsi_drv_ver_to_scratchpad(struct _lm_device_t * pdev,u32_t ver_32)2390 lm_ncsi_drv_ver_to_scratchpad( struct _lm_device_t *pdev, u32_t ver_32 )
2391 {
2392     const u32_t           ncsi_oem_data_addr = lm_ncsi_get_shmem_address(pdev);
2393     static const u32_t    offset             = OFFSETOF(struct glob_ncsi_oem_data ,driver_version);
2394 
2395     if ( 0 == ncsi_oem_data_addr )
2396     {
2397         return LM_STATUS_FAILURE;
2398     }
2399 
2400     REG_WR(pdev, ncsi_oem_data_addr + offset, ver_32);
2401 
2402     return LM_STATUS_SUCCESS;
2403 }
2404 
2405 u8_t
lm_ncsi_prev_drv_ver_is_win8_inbox(struct _lm_device_t * pdev)2406 lm_ncsi_prev_drv_ver_is_win8_inbox( struct _lm_device_t *pdev)
2407 {
2408     const u32_t           ncsi_oem_data_addr = lm_ncsi_get_shmem_address(pdev);
2409     static const u32_t    offset             = OFFSETOF(struct glob_ncsi_oem_data ,driver_version);
2410     static const u32_t    offset_unused      = OFFSETOF(struct glob_ncsi_oem_data ,unused);
2411     u8_t                  ver_str[16]        = {0};
2412     u32_t                 ver_num[4]         = {0};
2413     u32_t                 ver_num_prev       = 0;
2414     u32_t                 i                  = 0;
2415     u32_t                 str_idx            = 0;
2416     u8_t                  num_dwords         = 0;
2417     u32_t                 val                = 0;
2418     u32_t                 mult               = 0;
2419     u8_t                * p                  = NULL;
2420     u8_t                * ver_str_end        = NULL;
2421 
2422 
2423     /* inbox will only load with bootcode 7.4 and above, in which this field exists
2424      * for sure. So if it's zero, we're not an inbox driver.
2425      */
2426     if ( 0 == ncsi_oem_data_addr )
2427     {
2428         return FALSE;
2429     }
2430 
2431     /* First figure out if we're reading a string or a number, T7.0 and inbox used
2432      * strings, whereas T7.2 and above use just the product ver as a u32_t. We do
2433      * this by reading the unused fields
2434      */
2435     val = REG_RD(pdev, ncsi_oem_data_addr + offset_unused);
2436     if (0 == val)
2437     {
2438         /* Previous version is not inbox... we're ok... */
2439         return FALSE;
2440     }
2441 
2442     /* Now read the version string -> as if we are inbox. This will read the values
2443      * from the unused fields as well. */
2444     num_dwords = ARRSIZE(ver_str)/sizeof(u32_t);
2445     for (i = 0; i < num_dwords; i++)
2446     {
2447         str_idx = i*sizeof(u32_t);
2448         val = REG_RD(pdev, ncsi_oem_data_addr + offset + str_idx);
2449         val = mm_be32_to_cpu(val);
2450         *((u32 *)&ver_str[str_idx]) = val;
2451     }
2452 
2453     /* Now we just need to figure out if the engineering number is != 0,
2454      * and version is more than 7.0.35.94 (inbox version) that'll mean we're inbox...
2455      * the string looks like this:  vXX.XX.XX.XX, X are digits.
2456      */
2457     p = ver_str;
2458     if (*p != 'v')
2459     {
2460         /* Not inbox... */
2461         return FALSE;
2462     }
2463     p++; // we took away the v, now it looks like this: XX.XX.XX.XX
2464 
2465     ver_str_end = ver_str + ARRSIZE(ver_str) - 1;
2466 
2467     for (i = 0; i < 4; i++)
2468     {
2469         mult = 1;
2470         while ((*p != '.') &&                           /* Between separator     */
2471                (IS_DIGIT(*p)) &&          /* Is a digit            */
2472                (p < ver_str_end))                       /* Doesn't overrun array */
2473         {
2474             ver_num[i] = ver_num[i]*mult + (*p-'0');
2475             mult = mult*10;
2476             p++;
2477         }
2478         p++;
2479     }
2480 
2481     /* Save for debugging */
2482     ver_num_prev =
2483         (ver_num[0] << 24) |
2484         (ver_num[1] << 16) |
2485         (ver_num[2] << 8)  |
2486          ver_num[3] ;
2487 
2488     /* Check inbox: 7.0.35.xx make sure xx != 0*/
2489     if (((ver_num_prev & 0xffffff00) == 0x07002300) && (ver_num[3] != 0) )
2490     {
2491         return TRUE;
2492     }
2493 
2494     return FALSE;
2495 }
2496 
2497 /**
2498  * @brief Writes FCoE capabilites to shmem (for NCSI)
2499  *  No endian conversion is needed if data type is u32.  Although, MCP is big endian, basic storage unit is u32.
2500  *  Unless you access individual byte,  writing a 32-bit word in shmem from host DOES NOT need any endian conversion.
2501  *  In other word, if host driver write 0x12345678 to a 4-byte location in shmem,  MCP will read it correctly.  eVBD doesn�t need to do mm_cpu_to_be32.
2502  *
2503  * @param lm_device
2504  *
2505  * @return LM_STATUS_SUCCESS if written, FAILED if not
2506  */
2507 lm_status_t
lm_ncsi_fcoe_cap_to_scratchpad(struct _lm_device_t * pdev)2508 lm_ncsi_fcoe_cap_to_scratchpad( struct _lm_device_t *pdev)
2509 {
2510     const u32_t                   ncsi_oem_data_addr = lm_ncsi_get_shmem_address(pdev);
2511     const u8_t                    path_id            = PATH_ID(pdev);
2512     const u8_t                    port_id            = PORT_ID(pdev);
2513     u8_t                          i                  = 0;
2514     u32_t                         offset             = 0;
2515     const u32_t                   bc_rev             = LM_GET_BC_REV_MAJOR(pdev);
2516     const u32_t                   bc_rev_min         = REQ_BC_VER_4_FCOE_FEATURES;
2517     u32_t*                        buf32              = (u32_t*)(&pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_shmem.fcoe_capabilities);
2518     static const u8_t             idx_max            = sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_shmem.fcoe_capabilities)/sizeof(u32_t);
2519 
2520     ASSERT_STATIC( FIELD_SIZE( struct glob_ncsi_oem_data, fcoe_features[0][0] ) ==
2521                    sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_shmem.fcoe_capabilities) );
2522 
2523     if ( 0 == ncsi_oem_data_addr )
2524     {
2525         return LM_STATUS_FAILURE;
2526     }
2527 
2528     if ( bc_rev < bc_rev_min )
2529     {
2530         // not supported before this bootcode.
2531         return LM_STATUS_INVALID_PARAMETER;
2532     }
2533 
2534     // populate fcoe_features
2535     offset = OFFSETOF(struct glob_ncsi_oem_data ,fcoe_features[path_id][port_id]);
2536 
2537     // no endian conversion is needed if data type is u32.  Although, MCP is big endian, basic storage unit is u32.
2538     // Unless you access individual byte,  writing a 32-bit word in shmem from host DOES NOT need any endian conversion.
2539     // In other word, if host driver write 0x12345678 to a 4-byte location in shmem,  MCP will read it correctly.  eVBD doesn�t need to do mm_cpu_to_be32.
2540     for (i = 0; i < idx_max; i++)
2541     {
2542         REG_WR(pdev,
2543                ncsi_oem_data_addr + offset + i*sizeof(u32_t),
2544                buf32[i]);
2545     }
2546 
2547     return LM_STATUS_SUCCESS;
2548 }
2549 
init_misc_common(lm_device_t * pdev)2550 static void init_misc_common(lm_device_t *pdev)
2551 {
2552     u32_t reset_reg_1_val = 0xffffffff;
2553     u32_t reset_reg_2_val = 0xfffc;
2554 
2555     /* Take Chip Blocks out of Reset */
2556     if (CHIP_IS_E3(pdev))
2557     {
2558         // New blocks that need to be taken out of reset
2559         // Mstat0 - bit 24 of RESET_REG_2
2560         // Mstat1 - bit 25 of RESET_REG_2
2561         reset_reg_2_val |= (MISC_REGISTERS_RESET_REG_2_MSTAT1 | MISC_REGISTERS_RESET_REG_2_MSTAT0) ;
2562     }
2563 
2564     REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_RESET_REG_1_SET,reset_reg_1_val);
2565     // BMAC is not out of reset
2566     REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_RESET_REG_2_SET,reset_reg_2_val);
2567 
2568     ECORE_INIT_COMN(pdev, MISC);
2569 
2570     if (!CHIP_IS_E1(pdev)) /* multi-function not supported in E1 */
2571     {
2572         // init multifunction_mode reg. For E3 - this is done in the port-phase, and can differ between ports...
2573         if (CHIP_IS_E2(pdev) || CHIP_IS_E1H(pdev))
2574         {
2575             REG_WR(pdev,MISC_REG_E1HMF_MODE , (pdev->params.multi_vnics_mode ? 1 : 0));
2576         }
2577         // TBD: E1H, consider disabling grc timeout enable
2578     }
2579 
2580     /* Chip is out of reset */
2581 
2582     /* Timers bug workaround. The chip has just been taken out of reset. We need to make sure that all the functions (except this one)
2583      * are marked as disabled in the PGLC + CFC to avoid timer bug to occur */
2584     if (!CHIP_IS_E1x(pdev))
2585     {
2586         u8_t abs_func_id;
2587 
2588         /* 4-port mode or 2-port mode we need to turn of master-enable for everyone, after that, turn it back on for self.
2589          * so, we disregard multi-function or not, and always disable for all functions on the given path, this means 0,2,4,6 for
2590          * path 0 and 1,3,5,7 for path 1 */
2591         for (abs_func_id = PATH_ID(pdev); abs_func_id  < E2_FUNC_MAX*2; abs_func_id+=2)
2592         {
2593             if (abs_func_id == ABS_FUNC_ID(pdev))
2594             {
2595                 REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
2596                 continue;
2597             }
2598             lm_pretend_func(pdev, abs_func_id);
2599 
2600             clear_pf_enable(pdev);
2601 
2602             lm_pretend_func(pdev, ABS_FUNC_ID(pdev));
2603         }
2604 
2605         /* Error recovery: we may have caused a BSOD during last error recovery attempt leaving some locks taken and attentions on,
2606          * code below sort of "recovers" from a failed recovery.
2607          */
2608         if (pdev->params.enable_error_recovery && !CHIP_IS_E1x(pdev))
2609         {
2610             lm_hw_clear_all_locks(pdev);
2611             /* Clear the general attention used to notify second engine: just incase it was left turned on...  */
2612             REG_WR(pdev, MISC_REG_AEU_GENERAL_ATTN_20 , 0);
2613         }
2614     }
2615 
2616 }
2617 
init_aeu_port(lm_device_t * pdev)2618 static void init_aeu_port(lm_device_t *pdev)
2619 {
2620     u32_t offset = 0;
2621     u32_t val    = 0;
2622 
2623     if(ERR_IF(!pdev))
2624     {
2625         return;
2626     }
2627 
2628     ECORE_INIT_PORT(pdev, MISC_AEU);
2629 
2630     // init aeu_mask_attn_func_0/1:
2631     // - SF mode: bits 3-7 are masked. only bits 0-2 are in use
2632     // - MF mode: bit 3 is masked. bits 0-2 are in use as in SF.
2633     //            bits 4-7 are used for "per vnic group attention"
2634     val = (pdev->params.multi_vnics_mode ? 0xF7 : 0x7);
2635     if(!CHIP_IS_E1(pdev))
2636     {
2637         // For DCBX we need to enable group 4 even in SF.
2638         val |= 0x10;
2639     }
2640     REG_WR(pdev, (PORT_ID(pdev) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0), val);
2641 
2642     // If SPIO5 is set to generate interrupts, enable it for this port
2643     val = REG_RD(pdev, MISC_REG_SPIO_EVENT_EN);
2644     if (val & MISC_SPIO_SPIO5)
2645     {
2646         // fan failure handling
2647         offset = (PORT_ID(pdev) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0) ;
2648         val=REG_RD(pdev, offset );
2649         // add SPIO5 to group
2650         SET_FLAGS(val, AEU_INPUTS_ATTN_BITS_SPIO5 ) ;
2651         REG_WR(pdev, offset, val ) ;
2652     }
2653 
2654     if (pdev->params.enable_error_recovery && !CHIP_IS_E1x(pdev))
2655     {
2656         /* Under error recovery we use general attention 20 (bit 18) therefore
2657          * we need to enable it*/
2658         offset = (PORT_ID(pdev) ? MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0) ;
2659         val = REG_RD(pdev, offset);
2660         val |= AEU_INPUTS_ATTN_BITS_GRC_MAPPED_GENERAL_ATTN20;
2661         REG_WR(pdev, offset, val);
2662     }
2663 }
2664 
init_pxp_common(lm_device_t * pdev)2665 static void init_pxp_common(lm_device_t *pdev)
2666 {
2667     if(ERR_IF(!pdev))
2668     {
2669         return;
2670     }
2671 
2672     ECORE_INIT_COMN(pdev, PXP);
2673     if( CHIP_NUM(pdev) <= CHIP_NUM_5710 )
2674     {
2675         // enable hw interrupt from PXP on usdm overflow bit 16 on INT_MASK_0
2676         REG_WR(pdev,PXP_REG_PXP_INT_MASK_0,0);
2677     }
2678 }
2679 
2680 
init_pxp2_common(lm_device_t * pdev)2681 static void init_pxp2_common(lm_device_t *pdev)
2682 {
2683     u32_t wait_ms = (CHIP_REV_IS_ASIC(pdev)) ? 200 : 200000;
2684     u32_t       i = 0;
2685 
2686     if(ERR_IF(!pdev))
2687     {
2688         return;
2689     }
2690 
2691     // static init
2692     ECORE_INIT_COMN(pdev, PXP2);
2693 
2694     // runtime init
2695 #ifdef __BIG_ENDIAN
2696     REG_WR(pdev,  PXP2_REG_RQ_QM_ENDIAN_M, 1);
2697     REG_WR(pdev,  PXP2_REG_RQ_TM_ENDIAN_M, 1);
2698     REG_WR(pdev,  PXP2_REG_RQ_SRC_ENDIAN_M, 1);
2699     REG_WR(pdev,  PXP2_REG_RQ_CDU_ENDIAN_M, 1);
2700     REG_WR(pdev,  PXP2_REG_RQ_DBG_ENDIAN_M, 1);
2701 
2702     REG_WR(pdev,  PXP2_REG_RD_QM_SWAP_MODE, 1);
2703     REG_WR(pdev,  PXP2_REG_RD_TM_SWAP_MODE, 1);
2704     REG_WR(pdev,  PXP2_REG_RD_SRC_SWAP_MODE, 1);
2705     REG_WR(pdev,  PXP2_REG_RD_CDURD_SWAP_MODE, 1);
2706 #endif
2707     ecore_init_pxp_arb(pdev, pdev->hw_info.max_read_req_size, pdev->hw_info.max_payload_size);
2708 
2709     REG_WR(pdev,PXP2_REG_RQ_CDU_P_SIZE,LOG2(pdev->params.ilt_client_page_size/LM_PAGE_SIZE));
2710     REG_WR(pdev,PXP2_REG_RQ_TM_P_SIZE,LOG2(pdev->params.ilt_client_page_size/LM_PAGE_SIZE));
2711     REG_WR(pdev,PXP2_REG_RQ_QM_P_SIZE,LOG2(pdev->params.ilt_client_page_size/LM_PAGE_SIZE));
2712     REG_WR(pdev,PXP2_REG_RQ_SRC_P_SIZE,LOG2(pdev->params.ilt_client_page_size/LM_PAGE_SIZE));
2713 
2714     // on E 1.5 fpga set number of max pcie tag number to 5
2715     if (CHIP_REV_IS_FPGA(pdev) && CHIP_IS_E1H(pdev))
2716     {
2717         REG_WR(pdev,PXP2_REG_PGL_TAGS_LIMIT,0x1);
2718     }
2719 
2720     // verify PXP init finished (we want to use the DMAE)
2721     REG_WAIT_VERIFY_VAL(pdev,PXP2_REG_RQ_CFG_DONE, 1, wait_ms);
2722     REG_WAIT_VERIFY_VAL(pdev,PXP2_REG_RD_INIT_DONE,1, wait_ms);
2723 
2724     REG_WR(pdev,PXP2_REG_RQ_DISABLE_INPUTS,0);
2725     REG_WR(pdev,PXP2_REG_RD_DISABLE_INPUTS,0);
2726 
2727     /* Timers bug workaround E2 only. We need to set the entire ILT to have entries with value "0" and valid bit on.
2728      * This needs to be done by the first PF that is loaded in a path (i.e. common phase)
2729      */
2730     if (!CHIP_IS_E1x(pdev))
2731     {
2732         /* Step 1: set zeroes to all ilt page entries with valid bit on */
2733         for (i=0; i < ILT_NUM_PAGE_ENTRIES; i++)
2734         {
2735             REG_WR(pdev,PXP2_REG_RQ_ONCHIP_AT_B0+i*8,  0);
2736             REG_WR(pdev,PXP2_REG_RQ_ONCHIP_AT_B0+i*8+4,ONCHIP_ADDR0_VALID());
2737         }
2738         /* Step 2: set the timers first/last ilt entry to point to the entire range to prevent ILT range error */
2739         if (pdev->params.multi_vnics_mode)
2740         {
2741             lm_pretend_func(pdev, (PATH_ID(pdev) + 6));
2742             PXP2_SET_FIRST_LAST_ILT(pdev, TM,  0, ILT_NUM_PAGE_ENTRIES - 1);
2743             lm_pretend_func(pdev, ABS_FUNC_ID(pdev));
2744         }
2745 
2746         /* set E2 HW for 64B cache line alignment */
2747         /* TODO: align according to runtime cache line size */
2748         REG_WR(pdev,PXP2_REG_RQ_DRAM_ALIGN,1); /* for 128B cache line value should be 2 */
2749         REG_WR(pdev,PXP2_REG_RQ_DRAM_ALIGN_RD,1); /* for 128B cache line value should be 2 */
2750         REG_WR(pdev,PXP2_REG_RQ_DRAM_ALIGN_SEL,1);
2751     }
2752 }
2753 
init_pglue_b_common(lm_device_t * pdev)2754 static void init_pglue_b_common(lm_device_t *pdev)
2755 {
2756     ECORE_INIT_COMN(pdev, PGLUE_B);
2757 }
2758 
init_atc_common(lm_device_t * pdev)2759 static void init_atc_common(lm_device_t *pdev)
2760 {
2761     u32_t wait_ms = (CHIP_REV_IS_ASIC(pdev)) ? 200 : 200000;
2762     if (!CHIP_IS_E1x(pdev))
2763     {
2764         ECORE_INIT_COMN(pdev, ATC);
2765 
2766         REG_WAIT_VERIFY_VAL(pdev, ATC_REG_ATC_INIT_DONE ,1,wait_ms );
2767     }
2768 }
2769 
init_pxp2_func(lm_device_t * pdev)2770 static void init_pxp2_func(lm_device_t *pdev)
2771 {
2772     #define PXP2_NUM_TABLES 4
2773     lm_address_t * addr_table[PXP2_NUM_TABLES];
2774     u32_t           num_pages[PXP2_NUM_TABLES];
2775     u32_t           first_ilt[PXP2_NUM_TABLES];
2776     u32_t           last_ilt[PXP2_NUM_TABLES];
2777     u32_t rq_onchip_at_reg;
2778     u32_t i,j,k,temp;
2779 
2780     ECORE_INIT_FUNC(pdev, PXP2);
2781 
2782     addr_table[0] = pdev->vars.context_cdu_phys_addr_table;
2783     addr_table[1] = pdev->vars.timers_linear_phys_addr_table;
2784     addr_table[2] = pdev->vars.qm_queues_phys_addr_table;
2785     addr_table[3] = pdev->vars.searcher_t1_phys_addr_table;
2786     num_pages[0] = pdev->vars.context_cdu_num_pages;
2787     num_pages[1] = pdev->vars.timers_linear_num_pages;
2788     num_pages[2] = pdev->vars.qm_queues_num_pages;
2789     num_pages[3] = pdev->vars.searcher_t1_num_pages;
2790 
2791     temp = FUNC_ID(pdev) * ILT_NUM_PAGE_ENTRIES_PER_FUNC;
2792     rq_onchip_at_reg = CHIP_IS_E1(pdev) ? PXP2_REG_RQ_ONCHIP_AT : PXP2_REG_RQ_ONCHIP_AT_B0;
2793 
2794     for (k=0;k<PXP2_NUM_TABLES;k++)
2795     {
2796         // j is the first table entry line for this block temp is the number of the last written entry (each entry is 8 octets long)
2797         j=temp;
2798         for (i=0; i<num_pages[k]; temp++, i++)
2799         {
2800             REG_WR_IND(pdev,rq_onchip_at_reg+temp*8,ONCHIP_ADDR1(addr_table[k][i].as_u64));
2801             REG_WR_IND(pdev,rq_onchip_at_reg+temp*8+4,ONCHIP_ADDR2(addr_table[k][i].as_u64));
2802         }
2803         first_ilt[k] = j;
2804         last_ilt[k] = (temp - 1);
2805     }
2806     DbgBreakIf(!(temp<((u32_t)ILT_NUM_PAGE_ENTRIES_PER_FUNC*(FUNC_ID(pdev)+1))));
2807 
2808     PXP2_SET_FIRST_LAST_ILT(pdev, CDU, first_ilt[0], last_ilt[0]);
2809     PXP2_SET_FIRST_LAST_ILT(pdev, TM,  first_ilt[1], last_ilt[1]);
2810     PXP2_SET_FIRST_LAST_ILT(pdev, QM,  first_ilt[2], last_ilt[2]);
2811     PXP2_SET_FIRST_LAST_ILT(pdev, SRC, first_ilt[3], last_ilt[3]);
2812 
2813     if (!CHIP_IS_E1x(pdev))
2814     {
2815         /* Timers workaround bug: function init part. Need to wait 20msec after initializing ILT,
2816          * needed to make sure there are no requests in one of the PXP internal queues with "old" ILT addresses */
2817         mm_wait(pdev, 20000);
2818     }
2819 
2820 }
2821 
2822 
init_dmae_common(lm_device_t * pdev)2823 static void init_dmae_common(lm_device_t *pdev)
2824 {
2825     if(ERR_IF(!pdev))
2826     {
2827         return;
2828     }
2829 
2830     ECORE_INIT_COMN( pdev, DMAE);
2831 
2832     // write arbitrary buffer to DMAE, hw memory setup phase
2833 
2834 
2835     REG_WR_DMAE_LEN_ZERO(pdev,  TSEM_REG_PRAM, 8);
2836     pdev->vars.b_is_dmae_ready = TRUE ;
2837 }
2838 
init_qm_common(lm_device_t * pdev)2839 static void init_qm_common(lm_device_t *pdev)
2840 {
2841     u8_t i    = 0;
2842     u8_t func = 0;
2843 
2844     if(ERR_IF(!pdev))
2845     {
2846         return;
2847     }
2848 
2849     ECORE_INIT_COMN( pdev, QM);
2850 
2851     /* nullify PTRTBL */
2852     for (i=0; i<64; i++)
2853     {
2854         REG_WR_IND(pdev,QM_REG_PTRTBL +8*i ,0);
2855         REG_WR_IND(pdev,QM_REG_PTRTBL +8*i +4 ,0);
2856     }
2857 
2858     /* nullify extended PTRTBL (E1H only) */
2859     if (CHIP_IS_E1H(pdev))
2860     {
2861         for (i=0; i<64; i++)
2862         {
2863             REG_WR_IND(pdev,QM_REG_PTRTBL_EXT_A +8*i ,0);
2864             REG_WR_IND(pdev,QM_REG_PTRTBL_EXT_A +8*i +4 ,0);
2865         }
2866     }
2867 
2868     /* softrest pulse */
2869     REG_WR(pdev,QM_REG_SOFT_RESET,1);
2870     REG_WR(pdev,QM_REG_SOFT_RESET,0);
2871 
2872     /* We initialize the QM with max_common_conns, this way, the value is identical for all queues and it saves
2873      * the driver the need for knowing the mapping of the physical queses to functions.
2874      * Since we assume  writing the same value to all queue entries, we can do this in the common phase and just initialize
2875      * all queues the same */
2876     /* physical queues mapping :
2877      *  E1 queues:
2878      *  - q[0-63].
2879      *  - initialized via QM_REG_BASEADDR and QM_REG_PTRTBL REG
2880      *  - port0 uses q[0-15], port1 uses q[32-47], q[16-31,48-63] are not used
2881      *
2882      *  E1.5 queues:
2883      *  - _ON TOP OF_ E1 queues !
2884      *  - q[64-127]
2885      **/
2886 
2887     /* Initialize QM Queues */
2888     #define QM_QUEUES_PER_FUNC 16
2889 
2890     /* To eliminate the need of the driver knowing the exact function --> queue mapping, we simply initialize all queues, even for E1
2891      * we initialize all 64 queues (as if we had 4 functions). For E1H we initialize the extension as well. */
2892     for (func = 0; func < 4; func++)
2893     {
2894         for (i = 0; i < QM_QUEUES_PER_FUNC; i++)
2895         {
2896             REG_WR(pdev,QM_REG_BASEADDR +4*(func*QM_QUEUES_PER_FUNC+i) , pdev->hw_info.max_common_conns * 4*i);
2897         }
2898     }
2899 
2900     if (CHIP_IS_E1H(pdev))
2901     {
2902         for (func = 0; func < 4; func++)
2903         {
2904             for (i=0; i<QM_QUEUES_PER_FUNC; i++)
2905             {
2906                 REG_WR(pdev,QM_REG_BASEADDR_EXT_A +4*(func*QM_QUEUES_PER_FUNC+i) , pdev->hw_info.max_common_conns * 4*i);
2907             }
2908         }
2909     }
2910 }
2911 
init_qm_func(lm_device_t * pdev)2912 static void init_qm_func(lm_device_t *pdev)
2913 {
2914     ECORE_INIT_FUNC( pdev, QM);
2915 
2916     if (!CHIP_IS_E1x(pdev))
2917     {
2918         /* Array of PF Enable bits, each pf needs to set its own,
2919          * is set to 'zero' by MCP on PF FLR */
2920         REG_WR(pdev, QM_REG_PF_EN, 1);
2921     }
2922 }
2923 
init_qm_port(lm_device_t * pdev)2924 static void init_qm_port(lm_device_t *pdev)
2925 {
2926     if(ERR_IF(!pdev))
2927     {
2928         return;
2929     }
2930 
2931     ECORE_INIT_PORT(pdev, QM);
2932 
2933     /* The same for all functions on port, therefore we use the max_port_connections */
2934     REG_WR(pdev, (PORT_ID(pdev) ? QM_REG_CONNNUM_1 : QM_REG_CONNNUM_0), pdev->hw_info.max_common_conns/16 -1);
2935 }
2936 
init_tm_port(lm_device_t * pdev)2937 static void init_tm_port(lm_device_t *pdev)
2938 {
2939     if(ERR_IF(!pdev))
2940     {
2941         return;
2942     }
2943 
2944     ECORE_INIT_PORT(pdev, TM);
2945 
2946     /* when more then 64K connections per _port_ are supported, we need to change the init value for LIN0/1_SCAN_TIME */
2947     REG_WR(pdev,(PORT_ID(pdev) ? TM_REG_LIN1_SCAN_TIME : TM_REG_LIN0_SCAN_TIME), 20);
2948     /* The same for all functions on port, therefore we need to use the max_port_connections */
2949     REG_WR(pdev,(PORT_ID(pdev) ? TM_REG_LIN1_MAX_ACTIVE_CID : TM_REG_LIN0_MAX_ACTIVE_CID), (pdev->hw_info.max_port_conns/32)-1);
2950 
2951 }
2952 
init_dq_common(lm_device_t * pdev)2953 static void init_dq_common(lm_device_t *pdev)
2954 {
2955     if(ERR_IF(!pdev))
2956     {
2957         return;
2958     }
2959 
2960     ECORE_INIT_COMN(pdev, DORQ);
2961 
2962 
2963     // TBD: consider setting to the OS page size
2964     REG_WR(pdev,DORQ_REG_DPM_CID_OFST,LM_DQ_CID_BITS);
2965     if (CHIP_REV_IS_ASIC(pdev))
2966     {
2967         // enable hw interrupt from doorbell Q
2968         REG_WR(pdev,DORQ_REG_DORQ_INT_MASK,0);
2969     }
2970 }
2971 
init_dq_func(lm_device_t * pdev)2972 void init_dq_func(lm_device_t *pdev)
2973 {
2974     ECORE_INIT_FUNC(pdev, DORQ);
2975 #ifdef VF_INVOLVED
2976     if (!CHIP_IS_E1x(pdev) && (IS_BASIC_VIRT_MODE_MASTER_PFDEV(pdev) || IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)))
2977     {
2978         REG_WR(pdev, DORQ_REG_MAX_RVFID_SIZE, 6);       // As long as we want to use absolute VF-id number
2979         REG_WR(pdev, DORQ_REG_VF_NORM_VF_BASE, 0);      //(a VF-id that is unique within the port), like the id
2980                                                         //that is used by all HW blocks and FW
2981 
2982         REG_WR(pdev, DORQ_REG_VF_NORM_CID_BASE, LM_VF_CID_BASE(pdev));  /*64 for single connection.
2983                                                                     PF connections in the beginning (L2 connections),
2984                                                                     then VF connections, and then the rest of PF connections */
2985 
2986         REG_WR(pdev, DORQ_REG_VF_NORM_CID_WND_SIZE, LM_VF_CID_WND_SIZE(pdev)); /* should reflect the maximal number of connections in a VF.
2987                                                                            0 for single connection  */
2988 #if 0
2989         ASSERT_STATIC(LM_DQ_CID_BITS >=  3);
2990         REG_WR(pdev, DORQ_REG_VF_NORM_CID_OFST, LM_DQ_CID_BITS - 3);    /*means the number of bits in a VF doorbell.
2991                                                                          For 8B doorbells it should be 0, 128B should be 4 */
2992 #endif
2993         REG_WR(pdev, DORQ_REG_VF_NORM_CID_OFST, LM_VF_DQ_CID_BITS);
2994         /*In addition, in order to configure the way that the DQ builds the CID,
2995           the driver should also configure the DQ security checks for the VFs,
2996           thresholds for VF-doorbells, VF CID range. In the first step it's possible
2997           to configure all these checks in a way that disables validation checks:
2998             DQ security checks for VFs - configure single rule (out of 16) with mask = 0x1 and value = 0x0.
2999             CID range - 0 to 0x1ffff
3000             VF doorbell thresholds - according to the DQ size. */
3001 
3002         REG_WR(pdev, DORQ_REG_VF_TYPE_MASK_0, 0x71);
3003         REG_WR(pdev, DORQ_REG_VF_TYPE_VALUE_0, 0);
3004         REG_WR(pdev, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
3005         REG_WR(pdev, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
3006 
3007 
3008         REG_WR(pdev, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
3009         REG_WR(pdev, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
3010     }
3011 #endif
3012 }
3013 
init_brb1_common(lm_device_t * pdev)3014 static void init_brb1_common(lm_device_t *pdev)
3015 {
3016     ECORE_INIT_COMN(pdev, BRB1);
3017 }
3018 
init_pbf_common(lm_device_t * pdev)3019 static void init_pbf_common(lm_device_t *pdev)
3020 {
3021     ECORE_INIT_COMN(pdev, PBF);
3022 
3023     if (!CHIP_IS_E1x(pdev))
3024     {
3025         if (IS_MF_AFEX_MODE(pdev))
3026         {
3027             REG_WR(pdev, PBF_REG_HDRS_AFTER_BASIC, 0xE);
3028             REG_WR(pdev, PBF_REG_MUST_HAVE_HDRS, 0xA);
3029             REG_WR(pdev, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
3030             REG_WR(pdev, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
3031             REG_WR(pdev, PBF_REG_TAG_LEN_0, 0x4);
3032         }
3033         else
3034         {
3035             /* Ovlan exists only if we are in path multi-function + switch-dependent mode, in switch-independent there is no ovlan headers */
3036             REG_WR(pdev, PBF_REG_HDRS_AFTER_BASIC, (pdev->params.path_has_ovlan ? 7 : 6)); //Bit-map indicating which L2 hdrs may appear after the basic Ethernet header.
3037         }
3038     }
3039 }
3040 
init_pbf_func(lm_device_t * pdev)3041 static void init_pbf_func(lm_device_t *pdev)
3042 {
3043     ECORE_INIT_FUNC(pdev, PBF);
3044     if (!CHIP_IS_E1x(pdev))
3045     {
3046         REG_WR(pdev,PBF_REG_DISABLE_PF,0);
3047     }
3048 }
3049 
init_brb_port(lm_device_t * pdev)3050 static void init_brb_port(lm_device_t *pdev)
3051 {
3052     u32_t low  = 0;
3053     u32_t high = 0;
3054     u8_t  port = 0;
3055 
3056     port=PORT_ID(pdev);
3057 
3058     ECORE_INIT_PORT( pdev, BRB1);
3059 
3060     if (CHIP_IS_E1x(pdev))
3061     {
3062         // on E1H we do support enable pause
3063         if (CHIP_REV_IS_EMUL(pdev) || (CHIP_REV_IS_FPGA(pdev) && CHIP_IS_E1(pdev)))
3064         {
3065             // special emulation and FPGA values for pause no pause
3066             high = 513;
3067             low = 0;
3068         }
3069         else
3070         {
3071             if (IS_MULTI_VNIC(pdev))
3072             {
3073                 // A - 24KB + MTU(in K) *4
3074                 // A - 24*4 + 150; (9600*4)/256 - (mtu = jumbo = 9600)
3075                 low = 246;
3076             }
3077             else
3078             {
3079                 if (pdev->params.mtu_max <= 4096)
3080                 {
3081                     // A - 40KB low = 40*4
3082                     low = 160;
3083                 }
3084                 else
3085                 {
3086                     // A - 24KB + MTU(in K) *4
3087                     low = 96 + (pdev->params.mtu_max*4)/256;
3088                 }
3089             }
3090             // B - 14KB High = low+14*4
3091             high = low + 56;
3092         }
3093 
3094         REG_WR(pdev,BRB1_REG_PAUSE_LOW_THRESHOLD_0+port*4,low);
3095         REG_WR(pdev,BRB1_REG_PAUSE_HIGH_THRESHOLD_0+port*4,high);
3096     }
3097 
3098     if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4)
3099     {
3100         REG_WR(pdev, (PORT_ID(pdev)?  BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0), 40);
3101     }
3102 
3103 }
3104 
3105 
init_prs_common(lm_device_t * pdev)3106 static void init_prs_common(lm_device_t *pdev)
3107 {
3108     if(ERR_IF(!pdev))
3109     {
3110         return;
3111     }
3112 
3113     ECORE_INIT_COMN( pdev, PRS);
3114 
3115     if (!CHIP_IS_E1(pdev))
3116     {
3117         REG_WR(pdev,PRS_REG_E1HOV_MODE, (pdev->params.path_has_ovlan ? 1 : 0));
3118     }
3119 
3120     if (!CHIP_IS_E1x(pdev))
3121     {
3122         if (IS_MF_AFEX_MODE(pdev))
3123         {
3124             if (!CHIP_IS_E3B0(pdev)) //on E3 B0 this initialization happens in port phase.
3125             {
3126                 REG_WR(pdev, PRS_REG_HDRS_AFTER_BASIC, 0xE);
3127                 REG_WR(pdev, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
3128                 REG_WR(pdev, PRS_REG_MUST_HAVE_HDRS, 0xA);
3129             }
3130 
3131             REG_WR(pdev, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
3132             REG_WR(pdev, PRS_REG_TAG_LEN_0, 0x4);
3133         }
3134         else
3135         {
3136             if (!CHIP_IS_E3B0(pdev)) //on E3 B0 this initialization happens in port phase.
3137             {
3138                 /* Ovlan exists only if we are in multi-function + switch-dependent mode, in switch-independent there is no ovlan headers */
3139                 REG_WR(pdev, PRS_REG_HDRS_AFTER_BASIC, (pdev->params.path_has_ovlan ? 7 : 6)); //Bit-map indicating which L2 hdrs may appear after the basic Ethernet header.
3140             }
3141         }
3142     }
3143 
3144 }
3145 
init_prs_port(lm_device_t * pdev)3146 static void init_prs_port(lm_device_t *pdev)
3147 {
3148     ECORE_INIT_PORT(pdev, PRS);
3149 
3150     if (IS_MF_AFEX_MODE(pdev))
3151     {
3152         if (CHIP_IS_E3B0(pdev)) //on E3 B0 this initialization happens in port phase.
3153         {
3154             REG_WR(pdev, (0 == PORT_ID(pdev))? PRS_REG_HDRS_AFTER_BASIC_PORT_0 :PRS_REG_HDRS_AFTER_BASIC_PORT_1 , 0xE);
3155             REG_WR(pdev, (0 == PORT_ID(pdev))? PRS_REG_HDRS_AFTER_TAG_0_PORT_0 :PRS_REG_HDRS_AFTER_TAG_0_PORT_1 , 0x6);
3156             REG_WR(pdev, (0 == PORT_ID(pdev))? PRS_REG_MUST_HAVE_HDRS_PORT_0   :PRS_REG_MUST_HAVE_HDRS_PORT_1   , 0xA);
3157         }
3158     }
3159     else
3160     {
3161         if (CHIP_IS_E3B0(pdev)) //on E3 B0 this initialization happens in port phase.
3162         {
3163             /* Ovlan exists only if we are in multi-function + switch-dependent mode, in switch-independent there is no ovlan headers */
3164             REG_WR(pdev, (0 == PORT_ID(pdev))? PRS_REG_HDRS_AFTER_BASIC_PORT_0:PRS_REG_HDRS_AFTER_BASIC_PORT_1, (IS_MF_SD_MODE(pdev) ? 7 : 6)); //Bit-map indicating which L2 hdrs may appear after the basic Ethernet header.
3165         }
3166     }
3167 }
3168 
init_prs_func(lm_device_t * pdev)3169 static void init_prs_func(lm_device_t *pdev)
3170 {
3171     if(ERR_IF(!pdev))
3172     {
3173         return;
3174     }
3175 
3176     ECORE_INIT_FUNC( pdev, PRS);
3177 }
3178 
3179 
init_semi_common(lm_device_t * pdev)3180 static void init_semi_common(lm_device_t *pdev)
3181 {
3182 
3183     if (!CHIP_IS_E1x(pdev))
3184     {
3185         /* reset VFC memories - relevant only for E2, has to be done before initialing semi blocks which also
3186          * initialize VFC blocks.  */
3187         REG_WR(pdev, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
3188                VFC_MEMORIES_RST_REG_CAM_RST |
3189                VFC_MEMORIES_RST_REG_RAM_RST);
3190         REG_WR(pdev, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
3191                VFC_MEMORIES_RST_REG_CAM_RST |
3192                VFC_MEMORIES_RST_REG_RAM_RST);
3193     }
3194 
3195 
3196     ECORE_INIT_COMN(pdev, TSEM);
3197     ECORE_INIT_COMN(pdev, CSEM);
3198     ECORE_INIT_COMN(pdev, USEM);
3199     ECORE_INIT_COMN(pdev, XSEM);
3200     }
3201 
init_semi_port(lm_device_t * pdev)3202 static void init_semi_port(lm_device_t *pdev)
3203 {
3204     ECORE_INIT_PORT(pdev, TSEM);
3205     ECORE_INIT_PORT(pdev, USEM);
3206     ECORE_INIT_PORT(pdev, CSEM);
3207     ECORE_INIT_PORT(pdev, XSEM);
3208 
3209     /*
3210       Passive buffer REG setup - Dual port memory in semi passive buffer in E1 must be read once before used
3211       NOTE: This code is needed only for E1 though we will leave it as it is since it makes no harm and doesn't effect performance
3212     */
3213     {
3214         u32_t kuku = 0;
3215         kuku= REG_RD(pdev,  XSEM_REG_PASSIVE_BUFFER);
3216         kuku = REG_RD(pdev,  XSEM_REG_PASSIVE_BUFFER + 4);
3217         kuku = REG_RD(pdev,  XSEM_REG_PASSIVE_BUFFER + 8);
3218 
3219         kuku = REG_RD(pdev,  CSEM_REG_PASSIVE_BUFFER );
3220         kuku = REG_RD(pdev,  CSEM_REG_PASSIVE_BUFFER + 4);
3221         kuku = REG_RD(pdev,  CSEM_REG_PASSIVE_BUFFER + 8);
3222 
3223         kuku = REG_RD(pdev,  TSEM_REG_PASSIVE_BUFFER );
3224         kuku = REG_RD(pdev,  TSEM_REG_PASSIVE_BUFFER + 4);
3225         kuku = REG_RD(pdev,  TSEM_REG_PASSIVE_BUFFER + 8);
3226 
3227         kuku = REG_RD(pdev,  USEM_REG_PASSIVE_BUFFER );
3228         kuku = REG_RD(pdev,  USEM_REG_PASSIVE_BUFFER + 4);
3229         kuku = REG_RD(pdev,  USEM_REG_PASSIVE_BUFFER + 8);
3230     }
3231 }
3232 
init_semi_func(lm_device_t * pdev)3233 static void init_semi_func(lm_device_t *pdev)
3234 {
3235     ECORE_INIT_FUNC(pdev, TSEM);
3236     ECORE_INIT_FUNC(pdev, USEM);
3237     ECORE_INIT_FUNC(pdev, CSEM);
3238     ECORE_INIT_FUNC(pdev, XSEM);
3239 
3240     if (!CHIP_IS_E1x(pdev))
3241     {
3242         REG_WR(pdev,TSEM_REG_VFPF_ERR_NUM, (FUNC_ID(pdev) + E2_MAX_NUM_OF_VFS));
3243         REG_WR(pdev,USEM_REG_VFPF_ERR_NUM, (FUNC_ID(pdev) + E2_MAX_NUM_OF_VFS));
3244         REG_WR(pdev,CSEM_REG_VFPF_ERR_NUM, (FUNC_ID(pdev) + E2_MAX_NUM_OF_VFS));
3245         REG_WR(pdev,XSEM_REG_VFPF_ERR_NUM, (FUNC_ID(pdev) + E2_MAX_NUM_OF_VFS));
3246     }
3247 
3248 }
3249 
3250 
init_pbf_port(lm_device_t * pdev)3251 static void init_pbf_port(lm_device_t *pdev)
3252 {
3253     if(ERR_IF(!pdev))
3254     {
3255         return;
3256     }
3257 
3258     ECORE_INIT_PORT(pdev, PBF);
3259 
3260     // configure PBF to work without PAUSE mtu 9600 - bug in E1/E1H
3261     if (CHIP_IS_E1x(pdev))
3262     {
3263         REG_WR(pdev,(PORT_ID(pdev) ? PBF_REG_P1_PAUSE_ENABLE : PBF_REG_P0_PAUSE_ENABLE),0);
3264         //  update threshold
3265         REG_WR(pdev,(PORT_ID(pdev) ? PBF_REG_P1_ARB_THRSH : PBF_REG_P0_ARB_THRSH),(MAXIMUM_PACKET_SIZE/16));
3266         //  update init credit
3267         REG_WR(pdev,(PORT_ID(pdev) ? PBF_REG_P1_INIT_CRD : PBF_REG_P0_INIT_CRD),(MAXIMUM_PACKET_SIZE/16) + 553 -22);
3268         // probe changes
3269         REG_WR(pdev,(PORT_ID(pdev) ? PBF_REG_INIT_P1 : PBF_REG_INIT_P0),1);
3270         mm_wait(pdev,5);
3271         REG_WR(pdev,(PORT_ID(pdev) ? PBF_REG_INIT_P1 : PBF_REG_INIT_P0),0);
3272     }
3273 
3274 }
3275 
init_src_common(lm_device_t * pdev)3276 static void init_src_common(lm_device_t *pdev)
3277 {
3278     if(ERR_IF(!pdev))
3279     {
3280         return;
3281     }
3282 
3283     REG_WR(pdev,SRC_REG_SOFT_RST,1);
3284 
3285     ECORE_INIT_COMN(pdev, SRC);
3286 
3287     REG_WR(pdev,SRC_REG_KEYSEARCH_0,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[0]));
3288     REG_WR(pdev,SRC_REG_KEYSEARCH_1,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[4]));
3289     REG_WR(pdev,SRC_REG_KEYSEARCH_2,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[8]));
3290     REG_WR(pdev,SRC_REG_KEYSEARCH_3,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[12]));
3291     REG_WR(pdev,SRC_REG_KEYSEARCH_4,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[16]));
3292     REG_WR(pdev,SRC_REG_KEYSEARCH_5,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[20]));
3293     REG_WR(pdev,SRC_REG_KEYSEARCH_6,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[24]));
3294     REG_WR(pdev,SRC_REG_KEYSEARCH_7,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[28]));
3295     REG_WR(pdev,SRC_REG_KEYSEARCH_8,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[32]));
3296     REG_WR(pdev,SRC_REG_KEYSEARCH_9,*(u32_t *)(&pdev->context_info->searcher_hash.searcher_key[36]));
3297 
3298     REG_WR(pdev,SRC_REG_SOFT_RST,0);
3299 }
3300 
init_src_func(lm_device_t * pdev)3301 static void init_src_func(lm_device_t *pdev)
3302 {
3303     lm_address_t src_addr;
3304 
3305     ECORE_INIT_FUNC(pdev, SRC);
3306     // tell the searcher where the T2 table is
3307     REG_WR(pdev,  (PORT_ID(pdev) ? SRC_REG_COUNTFREE1 : SRC_REG_COUNTFREE0) ,pdev->vars.searcher_t2_num_pages * pdev->params.ilt_client_page_size/64);
3308     REG_WR_IND(pdev,  (PORT_ID(pdev) ? SRC_REG_FIRSTFREE1 : SRC_REG_FIRSTFREE0),pdev->vars.searcher_t2_phys_addr_table[0].as_u32.low);
3309     REG_WR_IND(pdev,  (PORT_ID(pdev) ? SRC_REG_FIRSTFREE1 : SRC_REG_FIRSTFREE0)+4,pdev->vars.searcher_t2_phys_addr_table[0].as_u32.high);
3310     src_addr.as_u64 = pdev->vars.searcher_t2_phys_addr_table[pdev->vars.searcher_t2_num_pages-1].as_u64
3311         + pdev->params.ilt_client_page_size - 64 ;
3312     REG_WR_IND(pdev,  (PORT_ID(pdev) ? SRC_REG_LASTFREE1 : SRC_REG_LASTFREE0),src_addr.as_u32.low);
3313     REG_WR_IND(pdev,  (PORT_ID(pdev) ? SRC_REG_LASTFREE1 : SRC_REG_LASTFREE0)+4,src_addr.as_u32.high);
3314     REG_WR(pdev,  (PORT_ID(pdev) ? SRC_REG_NUMBER_HASH_BITS1 : SRC_REG_NUMBER_HASH_BITS0),pdev->context_info->searcher_hash.num_hash_bits);
3315 }
3316 
init_cdu_common(lm_device_t * pdev)3317 static void init_cdu_common(lm_device_t *pdev)
3318 {
3319     u32_t val = 0;
3320 
3321     if(ERR_IF(!pdev))
3322     {
3323         return;
3324     }
3325     // static initialization only for Common part
3326     ECORE_INIT_COMN(pdev, CDU);
3327 
3328     val = (pdev->params.num_context_in_page<<24) +
3329         (pdev->params.context_waste_size<<12)  +
3330         pdev->params.context_line_size;
3331     REG_WR(pdev,CDU_REG_CDU_GLOBAL_PARAMS,val);
3332     /* configure cdu to work with cdu-validation. TODO: Move init to hw init tool */
3333     REG_WR(pdev,CDU_REG_CDU_CONTROL0,0X1UL);
3334     REG_WR(pdev,CDU_REG_CDU_CHK_MASK0,0X0003d000UL); /* enable region 2 */
3335     REG_WR(pdev,CDU_REG_CDU_CHK_MASK1,0X0000003dUL); /* enable region 4 */
3336 
3337 }
3338 
3339 
init_cfc_common(lm_device_t * pdev)3340 static void init_cfc_common(lm_device_t *pdev)
3341 {
3342     u32_t cfc_init_reg = 0;
3343     if(ERR_IF(!pdev))
3344     {
3345         return;
3346     }
3347 
3348     ECORE_INIT_COMN(pdev, CFC);
3349     /* init cfc with user configurable number of connections in cfc */
3350 
3351     cfc_init_reg |= (1 << CFC_INIT_REG_REG_AC_INIT_SIZE);
3352     cfc_init_reg |= (pdev->params.cfc_last_lcid << CFC_INIT_REG_REG_LL_INIT_LAST_LCID_SIZE);
3353     cfc_init_reg |= (1 << CFC_INIT_REG_REG_LL_INIT_SIZE);
3354     cfc_init_reg |= (1 << CFC_INIT_REG_REG_CAM_INIT_SIZE);
3355     REG_WR(pdev,  CFC_REG_INIT_REG, cfc_init_reg);
3356 
3357     // enable context validation interrupt from CFC
3358     #ifdef VF_INVOLVED
3359     if (!CHIP_IS_E1x(pdev) && (IS_BASIC_VIRT_MODE_MASTER_PFDEV(pdev) || IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)))
3360     {
3361         /* with vfs - due to flr.. we don't want cfc to give attention on error from pxp,
3362          * in regular environemt - we want this error bit5:
3363          * The CDU responded with an error bit #0 (PCIe error) DORQ client has separate control
3364          * for this exec error
3365          */
3366         REG_WR(pdev, CFC_REG_DISABLE_ON_ERROR, 0xffdf);
3367         REG_WR(pdev, CFC_REG_CFC_INT_MASK, 0x2);
3368         REG_WR(pdev, CFC_REG_DORQ_MASK_PCIERR, 0x1);
3369         REG_WR(pdev, CFC_REG_DORQ_MASK_VALERR, 0x1);
3370     }
3371     else
3372     {
3373         REG_WR(pdev,CFC_REG_CFC_INT_MASK ,0);
3374         REG_WR(pdev, CFC_REG_DORQ_MASK_PCIERR, 0);
3375         REG_WR(pdev, CFC_REG_DORQ_MASK_VALERR, 0);
3376     }
3377     #else
3378     REG_WR(pdev,CFC_REG_CFC_INT_MASK ,0);
3379     #endif
3380 
3381 
3382 
3383     // configure CFC/CDU. TODO: Move CFC init to hw init tool */
3384     REG_WR(pdev,CFC_REG_DEBUG0 ,0x20020000);
3385     REG_WR(pdev,CFC_REG_INTERFACES ,0x280000);
3386     REG_WR(pdev,CFC_REG_INTERFACES ,0);
3387 
3388 }
3389 
3390 
3391 
init_hc_port(lm_device_t * pdev)3392 static void init_hc_port(lm_device_t *pdev)
3393 {
3394     if(ERR_IF(!pdev))
3395     {
3396         return;
3397     }
3398 
3399     if(CHIP_IS_E1(pdev))
3400     {
3401         REG_WR(pdev,  (PORT_ID(pdev) ? HC_REG_LEADING_EDGE_1 : HC_REG_LEADING_EDGE_0), 0);
3402         REG_WR(pdev,  (PORT_ID(pdev) ? HC_REG_TRAILING_EDGE_1 : HC_REG_TRAILING_EDGE_0), 0);
3403     }
3404 
3405     ECORE_INIT_PORT(pdev, HC);
3406 }
3407 
init_hc_func(lm_device_t * pdev)3408 static void init_hc_func(lm_device_t *pdev)
3409 {
3410     const u8_t func = FUNC_ID(pdev);
3411 
3412     if(ERR_IF(!pdev))
3413     {
3414         return;
3415     }
3416 
3417     if(CHIP_IS_E1H(pdev))
3418     {
3419         REG_WR(pdev, MISC_REG_AEU_GENERAL_ATTN_12 + 4*func,0x0);
3420         REG_WR(pdev,  (PORT_ID(pdev) ? HC_REG_LEADING_EDGE_1 : HC_REG_LEADING_EDGE_0), 0);
3421         REG_WR(pdev,  (PORT_ID(pdev) ? HC_REG_TRAILING_EDGE_1 : HC_REG_TRAILING_EDGE_0), 0);
3422     }
3423 
3424     ECORE_INIT_FUNC(pdev, HC);
3425 }
3426 
init_igu_common(lm_device_t * pdev)3427 static void init_igu_common( lm_device_t *pdev )
3428 {
3429 
3430     ECORE_INIT_COMN(pdev, IGU);
3431 
3432     /* Enable IGU debugging feature */
3433 #if 0 /* uncomment if you want to enable igu debug command for function 0, more changes required for different functions - will also need to define u32_t val=0*/
3434     REG_WR(pdev, IGU_REG_COMMAND_DEBUG, 1); // 1 - FIFO collects eight last incoming command
3435     /* Configure fid = PF (bit 6) and function 0 (PF#0)*/
3436     val = ((0x40 & IGU_ERROR_HANDLING_FILTER_REG_ERROR_HANDLING_FILTER_FID) |
3437         IGU_ERROR_HANDLING_FILTER_REG_ERROR_HANDLING_FILTER_EN);
3438 
3439     REG_WR(pdev, IGU_REG_ERROR_HANDLING_FILTER, val);
3440 
3441 #endif
3442 }
3443 
init_igu_func(lm_device_t * pdev)3444 static void init_igu_func(lm_device_t *pdev)
3445 {
3446     u32_t prod_idx,i,val;
3447     u8_t num_segs;
3448     u8_t base_prod;
3449     u8_t sb_id;
3450     u8_t dsb_idx;
3451     u8_t igu_func_id;
3452 
3453     if(ERR_IF(!pdev))
3454     {
3455         return;
3456     }
3457 
3458     if (INTR_BLK_TYPE(pdev) == INTR_BLK_IGU)
3459     {
3460         /* E2 TODO: make sure that misc is updated accordingly and that three lines below are not required */
3461         REG_WR(pdev, MISC_REG_AEU_GENERAL_ATTN_12 + 4*FUNC_ID(pdev),0x0);
3462         REG_WR(pdev,  IGU_REG_LEADING_EDGE_LATCH, 0);
3463         REG_WR(pdev,  IGU_REG_TRAILING_EDGE_LATCH, 0);
3464 
3465         ECORE_INIT_FUNC(pdev, IGU);
3466 
3467         /* Let's enable the function in the IGU - this is to enable consumer updates */
3468         val=REG_RD(pdev, IGU_REG_PF_CONFIGURATION);
3469         SET_FLAGS(val, IGU_PF_CONF_FUNC_EN);
3470         REG_WR(pdev,  IGU_REG_PF_CONFIGURATION, val);
3471 
3472         /* Producer memory:
3473          * E2 mode: address 0-135 match to the mapping memory;
3474          * 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default prod;  139 PF3 default prod;
3475          * 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod; 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod;
3476          * 144-147 reserved.
3477          * E1.5 mode - In backward compatible mode; for non default SB; each even line in the memory
3478          * holds the U producer and each odd line hold the C producer. The first 128 producer are for
3479          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on).
3480          * The last 20 producers are for the DSB for each PF. each PF has five segments
3481          * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods; 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods;
3482          */
3483         /* non-default-status-blocks*/
3484         num_segs = IGU_NORM_NDSB_NUM_SEGS;
3485         for (sb_id = 0; sb_id < LM_IGU_SB_CNT(pdev); sb_id++)
3486         {
3487             prod_idx = (IGU_BASE_NDSB(pdev) + sb_id)*num_segs; /* bc-assumption consecutive pfs, norm-no assumption */
3488             for (i = 0; i < num_segs;i++)
3489             {
3490                 REG_WR(pdev, IGU_REG_PROD_CONS_MEMORY + (prod_idx + i)*4, 0);
3491             }
3492             /* Give Consumer updates with value '0' */
3493             lm_int_ack_sb_enable(pdev, sb_id);
3494 
3495             /* Send cleanup command */
3496             lm_int_igu_sb_cleanup(pdev, IGU_BASE_NDSB(pdev) + sb_id);
3497         }
3498 
3499         /* default-status-blocks */
3500         if (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4)
3501         {
3502             dsb_idx = FUNC_ID(pdev);
3503         }
3504         else
3505         {
3506             dsb_idx = VNIC_ID(pdev);
3507         }
3508         num_segs = (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC)? IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
3509         base_prod = (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_BC) ? (IGU_BC_BASE_DSB_PROD + dsb_idx) : (IGU_NORM_BASE_DSB_PROD + dsb_idx);
3510         for (i = 0; i < num_segs; i++)
3511         {
3512             REG_WR(pdev, IGU_REG_PROD_CONS_MEMORY + (base_prod + i*MAX_VNIC_NUM)*4, 0);
3513         }
3514 
3515         lm_int_ack_def_sb_enable(pdev);
3516 
3517         /* Send cleanup command */
3518         lm_int_igu_sb_cleanup(pdev, IGU_DSB_ID(pdev));
3519 
3520         /* Reset statistics msix / attn */
3521         igu_func_id = (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4)? FUNC_ID(pdev) : VNIC_ID(pdev);
3522         igu_func_id |= (1 << IGU_FID_ENCODE_IS_PF_SHIFT);
3523 
3524         REG_WR(pdev, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + igu_func_id*4, 0);
3525         REG_WR(pdev, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (igu_func_id + MAX_VNIC_NUM)*4, 0);
3526 
3527         /* E2 TODO: these should become driver const once rf-tool supports split-68 const. */
3528         REG_WR(pdev, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
3529         REG_WR(pdev, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
3530         REG_WR(pdev, IGU_REG_SB_MASK_LSB, 0);
3531         REG_WR(pdev, IGU_REG_SB_MASK_MSB, 0);
3532         REG_WR(pdev, IGU_REG_PBA_STATUS_LSB, 0);
3533         REG_WR(pdev, IGU_REG_PBA_STATUS_MSB, 0);
3534 
3535     }
3536 }
3537 
3538 
init_nig_common(lm_device_t * pdev)3539 static void init_nig_common(lm_device_t *pdev)
3540 {
3541     ECORE_INIT_COMN( pdev, NIG);
3542 
3543     if (CHIP_IS_E2(pdev) || CHIP_IS_E1H(pdev)) /* E3 supports this per port - and is therefore done in the port phase */
3544     {
3545         REG_WR(pdev,NIG_REG_LLH_MF_MODE,    IS_MULTI_VNIC(pdev) ? 1 : 0);
3546     }
3547 
3548     /* E1HOV mode was removed in E2 and is replaced with hdrs-after-basic... */
3549     if (CHIP_IS_E1H(pdev))
3550     {
3551         REG_WR(pdev,NIG_REG_LLH_E1HOV_MODE, IS_MF_SD_MODE(pdev) ? 1 : 0);
3552     }
3553 
3554 }
3555 
init_nig_port(lm_device_t * pdev)3556 static void init_nig_port(lm_device_t *pdev)
3557 {
3558     ECORE_INIT_PORT( pdev, NIG);
3559 
3560     if (!CHIP_IS_E3(pdev))
3561     {
3562         REG_WR(pdev,(PORT_ID(pdev) ? NIG_REG_XGXS_SERDES1_MODE_SEL : NIG_REG_XGXS_SERDES0_MODE_SEL),1);
3563     }
3564 
3565     if (!CHIP_IS_E1x(pdev))
3566     {
3567         /* MF-mode can be set separately per port in E3, and therefore is done here... for E2 and before it is done in the common phase */
3568         if (CHIP_IS_E3(pdev))
3569         {
3570             REG_WR(pdev,(PORT_ID(pdev)?  NIG_REG_LLH1_MF_MODE: NIG_REG_LLH_MF_MODE), IS_MULTI_VNIC(pdev) ? 1 : 0);
3571         }
3572     }
3573 
3574     if (!CHIP_IS_E1(pdev))
3575     {
3576         /*   LLH0/1_BRB1_DRV_MASK_MF        MF      SF
3577               mask_no_outer_vlan            0       1
3578               mask_outer_vlan               1       0*/
3579         u32_t mask_mf_reg = PORT_ID(pdev) ? NIG_REG_LLH1_BRB1_DRV_MASK_MF : NIG_REG_LLH0_BRB1_DRV_MASK_MF;
3580         u32_t val = IS_MF_SD_MODE(pdev) ? NIG_LLH0_BRB1_DRV_MASK_MF_REG_LLH0_BRB1_DRV_MASK_OUTER_VLAN : NIG_LLH0_BRB1_DRV_MASK_MF_REG_LLH0_BRB1_DRV_MASK_NO_OUTER_VLAN;
3581 
3582         ASSERT_STATIC(NIG_LLH0_BRB1_DRV_MASK_MF_REG_LLH0_BRB1_DRV_MASK_OUTER_VLAN    == NIG_LLH1_BRB1_DRV_MASK_MF_REG_LLH1_BRB1_DRV_MASK_OUTER_VLAN);
3583         ASSERT_STATIC(NIG_LLH0_BRB1_DRV_MASK_MF_REG_LLH0_BRB1_DRV_MASK_NO_OUTER_VLAN == NIG_LLH1_BRB1_DRV_MASK_MF_REG_LLH1_BRB1_DRV_MASK_NO_OUTER_VLAN);
3584         REG_WR(pdev, mask_mf_reg, val);
3585 
3586         if (!CHIP_IS_E1x(pdev))
3587         {
3588             if (IS_MF_SD_MODE(pdev))
3589             {
3590                 REG_WR(pdev, (PORT_ID(pdev) ? NIG_REG_LLH1_CLS_TYPE : NIG_REG_LLH0_CLS_TYPE), 1);
3591             }
3592             else
3593             {
3594                 REG_WR(pdev, (PORT_ID(pdev) ? NIG_REG_LLH1_CLS_TYPE : NIG_REG_LLH0_CLS_TYPE), 2);
3595             }
3596         }
3597     }
3598 }
3599 
init_nig_func(lm_device_t * pdev)3600 void init_nig_func(lm_device_t *pdev)
3601 {
3602     const u8_t  mf     = pdev->params.multi_vnics_mode;
3603     const u8_t  port   = PORT_ID(pdev);
3604     u32_t       offset = 0;
3605 
3606     ECORE_INIT_FUNC(pdev, NIG);
3607 
3608     if (mf)
3609     {
3610         offset = ( port ? NIG_REG_LLH1_FUNC_EN : NIG_REG_LLH0_FUNC_EN );
3611 
3612         if (IS_SD_UFP_MODE(pdev) && GET_FLAGS(pdev->params.mf_proto_support_flags, LM_PROTO_SUPPORT_FCOE))
3613         {
3614             REG_WR(pdev, offset , 0);
3615         }
3616         else
3617         {
3618             REG_WR(pdev, offset , 1);
3619         }
3620 
3621         offset = ( port ? NIG_REG_LLH1_FUNC_VLAN_ID : NIG_REG_LLH0_FUNC_VLAN_ID );
3622         REG_WR(pdev, offset , pdev->params.ovlan);
3623     }
3624 }
3625 
init_pxpcs_common(lm_device_t * pdev)3626 static void init_pxpcs_common(lm_device_t *pdev)
3627 {
3628     if(ERR_IF(!pdev))
3629     {
3630         return;
3631     }
3632 
3633     /* Reset pciex errors */
3634     REG_WR(pdev,0x2814,0xffffffff);
3635     REG_WR(pdev,0x3820,0xffffffff);
3636 
3637     if (!CHIP_IS_E1x(pdev))
3638     {
3639         REG_WR(pdev,PCICFG_OFFSET + PXPCS_TL_CONTROL_5,  (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
3640         REG_WR(pdev,PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
3641                (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
3642         REG_WR(pdev,PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
3643                (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
3644     }
3645 }
3646 
init_pxpcs_func(lm_device_t * pdev)3647 static void init_pxpcs_func(lm_device_t *pdev)
3648 {
3649     if(ERR_IF(!pdev))
3650     {
3651         return;
3652     }
3653     /* Reset pciex errors */
3654     REG_WR(pdev,0x2114,0xffffffff);
3655     REG_WR(pdev,0x2120,0xffffffff);
3656 }
3657 
init_pglue_b_port(lm_device_t * pdev)3658 static void init_pglue_b_port(lm_device_t *pdev)
3659 {
3660     ECORE_INIT_PORT(pdev, PGLUE_B);
3661     /* Timers bug workaround: disables the pf_master bit in pglue at common phase, we need to enable it here before
3662      * any dmae access are attempted. Therefore we manually added the enable-master to the port phase (it also happens
3663      * in the function phase) */
3664     if (!CHIP_IS_E1x(pdev))
3665     {
3666         REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
3667     }
3668 }
3669 
init_pglue_b_func(lm_device_t * pdev)3670 static void init_pglue_b_func(lm_device_t *pdev)
3671 {
3672     ECORE_INIT_FUNC(pdev, PGLUE_B);
3673 
3674     if (!CHIP_IS_E1x(pdev))
3675     {
3676         /* 1. Timers bug workaround. There may be an error here. do this only if func_id=6, otherwise
3677          * an error isn't expected
3678          * 2. May be an error due to FLR.
3679          */
3680         REG_WR(pdev,PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, FUNC_ID(pdev));
3681     }
3682 
3683 }
3684 
init_cfc_func(lm_device_t * pdev)3685 static void init_cfc_func(lm_device_t *pdev)
3686 {
3687     ECORE_INIT_FUNC(pdev, CFC);
3688     if (!CHIP_IS_E1x(pdev))
3689     {
3690         REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,1);
3691         //REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,1);
3692     }
3693 }
3694 
init_aeu_common(lm_device_t * pdev)3695 static void init_aeu_common(lm_device_t * pdev)
3696 {
3697     ECORE_INIT_COMN(pdev, MISC_AEU);
3698 
3699     /* Error Recovery : attach some attentions to close-the-g8 NIG + PXP2 */
3700     lm_er_config_close_the_g8(pdev);
3701 }
3702 
3703 #define init_tcm_common( pdev)     ECORE_INIT_COMN(pdev, TCM);
3704 #define init_ccm_common( pdev)     ECORE_INIT_COMN(pdev, CCM);
3705 #define init_ucm_common( pdev)     ECORE_INIT_COMN(pdev, UCM);
3706 #define init_xcm_common( pdev)     ECORE_INIT_COMN(pdev, XCM)
3707 #define init_tsdm_common(pdev)     ECORE_INIT_COMN(pdev, TSDM)
3708 #define init_csdm_common(pdev)     ECORE_INIT_COMN(pdev, CSDM)
3709 #define init_usdm_common(pdev)     ECORE_INIT_COMN(pdev, USDM)
3710 #define init_xsdm_common(pdev)     ECORE_INIT_COMN(pdev, XSDM)
3711 #define init_tm_common(  pdev)     ECORE_INIT_COMN(pdev, TM)
3712 #define init_upb_common( pdev)     ECORE_INIT_COMN(pdev, UPB)
3713 #define init_xpb_common( pdev)     ECORE_INIT_COMN(pdev, XPB)
3714 #define init_hc_common(  pdev)     ECORE_INIT_COMN(pdev, HC)
3715 #define init_dbg_common(pdev)      ECORE_INIT_COMN(pdev, DBG)
3716 
3717 #define init_pxp_port(pdev)        ECORE_INIT_PORT(pdev, PXP)
3718 #define init_pxp2_port(pdev)       ECORE_INIT_PORT(pdev, PXP2)
3719 #define init_atc_port(pdev)        ECORE_INIT_PORT(pdev, ATC)
3720 #define init_tcm_port( pdev)       ECORE_INIT_PORT(pdev, TCM)
3721 #define init_ucm_port( pdev)       ECORE_INIT_PORT(pdev, UCM)
3722 #define init_ccm_port( pdev)       ECORE_INIT_PORT(pdev, CCM)
3723 #define init_misc_port( pdev)      ECORE_INIT_PORT(pdev, MISC)
3724 #define init_xcm_port( pdev)       ECORE_INIT_PORT(pdev, XCM)
3725 #define init_dq_port(pdev)         ECORE_INIT_PORT(pdev, DORQ)
3726 #define init_tsdm_port( pdev)      ECORE_INIT_PORT(pdev, TSDM)
3727 #define init_csdm_port( pdev)      ECORE_INIT_PORT(pdev, CSDM)
3728 #define init_usdm_port( pdev)      ECORE_INIT_PORT(pdev, USDM)
3729 #define init_xsdm_port( pdev)      ECORE_INIT_PORT(pdev, XSDM)
3730 #define init_upb_port(pdev)        ECORE_INIT_PORT(pdev, UPB)
3731 #define init_xpb_port(pdev)        ECORE_INIT_PORT(pdev, XPB)
3732 #define init_src_port(pdev)        ECORE_INIT_PORT(pdev, SRC)
3733 #define init_cdu_port(pdev)        ECORE_INIT_PORT(pdev, CDU)
3734 #define init_cfc_port(pdev)        ECORE_INIT_PORT(pdev, CFC)
3735 
3736 #define init_igu_port( pdev)       ECORE_INIT_PORT(pdev, IGU)
3737 #define init_dbg_port(pdev)        ECORE_INIT_PORT(pdev, DBG)
3738 #define init_dmae_port(pdev)       ECORE_INIT_PORT(pdev, DMAE)
3739 
3740 #define init_misc_func(pdev)       ECORE_INIT_FUNC(pdev, MISC)
3741 #define init_pxp_func(pdev)        ECORE_INIT_FUNC(pdev, PXP)
3742 #define init_atc_func(pdev)        ECORE_INIT_FUNC(pdev, ATC)
3743 #define init_tcm_func(pdev)        ECORE_INIT_FUNC(pdev, TCM)
3744 #define init_ucm_func(pdev)        ECORE_INIT_FUNC(pdev, UCM)
3745 #define init_ccm_func(pdev)        ECORE_INIT_FUNC(pdev, CCM)
3746 #define init_xcm_func(pdev)        ECORE_INIT_FUNC(pdev, XCM)
3747 #define init_tm_func(pdev)         ECORE_INIT_FUNC(pdev, TM)
3748 #define init_brb_func(pdev)        ECORE_INIT_FUNC(pdev, BRB1)
3749 #define init_tsdm_func(pdev)       ECORE_INIT_FUNC(pdev, TSDM)
3750 #define init_csdm_func(pdev)       ECORE_INIT_FUNC(pdev, CSDM)
3751 #define init_usdm_func(pdev)       ECORE_INIT_FUNC(pdev, USDM)
3752 #define init_xsdm_func(pdev)       ECORE_INIT_FUNC(pdev, XSDM)
3753 #define init_upb_func(pdev)        ECORE_INIT_FUNC(pdev, UPB)
3754 #define init_xpb_func(pdev)        ECORE_INIT_FUNC(pdev, XPB)
3755 #define init_cdu_func(pdev)        ECORE_INIT_FUNC(pdev, CDU)
3756 #define init_aeu_func(pdev)        ECORE_INIT_FUNC(pdev, MISC_AEU)
3757 #define init_dbg_func(pdev)        ECORE_INIT_FUNC(pdev, DBG)
3758 #define init_dmae_func(pdev)       ECORE_INIT_FUNC(pdev, DMAE)
3759 
3760 // for PRS BRB mem setup
init_nig_pkt(struct _lm_device_t * pdev)3761 static void init_nig_pkt(struct _lm_device_t *pdev)
3762 {
3763     u32 wb_write[3] = {0} ;
3764 
3765     wb_write[0] = 0x55555555 ;
3766     wb_write[1] = 0x55555555 ;
3767     wb_write[2] = 0x20 ;
3768 
3769     // TBD: consider use DMAE to these writes
3770 
3771     // Ethernet source and destination addresses
3772     REG_WR_IND(pdev,NIG_REG_DEBUG_PACKET_LB,  wb_write[0]);
3773     REG_WR_IND(pdev,NIG_REG_DEBUG_PACKET_LB+4,wb_write[1]);
3774     // #SOP
3775     REG_WR_IND(pdev,NIG_REG_DEBUG_PACKET_LB+8,wb_write[2]);
3776 
3777     wb_write[0] = 0x09000000 ;
3778     wb_write[1] = 0x55555555 ;
3779     wb_write[2] = 0x10 ;
3780 
3781     // NON-IP protocol
3782     REG_WR_IND(pdev,NIG_REG_DEBUG_PACKET_LB,  wb_write[0]);
3783     REG_WR_IND(pdev,NIG_REG_DEBUG_PACKET_LB+4,wb_write[1]);
3784     // EOP, eop_bvalid = 0
3785     REG_WR_IND(pdev,NIG_REG_DEBUG_PACKET_LB+8,wb_write[2]);
3786 }
3787 
prs_brb_mem_setup(struct _lm_device_t * pdev)3788 static void prs_brb_mem_setup (struct _lm_device_t *pdev)
3789 {
3790     u32_t val    = 0;
3791     u32_t trash  = 0;
3792     u32_t cnt    = 0;
3793     u8_t  i      = 0;
3794 
3795 #ifdef _VBD_CMD_
3796     return;
3797 #endif
3798     DbgBreakIf(!pdev->vars.clk_factor);
3799 
3800     DbgMessage(pdev, WARN, "mem_wrk start part1\n");
3801     //First part
3802     // Disable inputs of parser neighbor blocks
3803     REG_WR(pdev,TSDM_REG_ENABLE_IN1,0x0);
3804     REG_WR(pdev,TCM_REG_PRS_IFEN,0x0);
3805     REG_WR(pdev,CFC_REG_DEBUG0,0x1);
3806     REG_WR(pdev,NIG_REG_PRS_REQ_IN_EN,0x0);
3807 
3808     // Write 0 to parser credits for CFC search request
3809     REG_WR(pdev,PRS_REG_CFC_SEARCH_INITIAL_CREDIT,0x0);
3810 
3811     // send Ethernet packet
3812     init_nig_pkt(pdev);
3813 
3814     // TODO: Reset NIG statistic
3815     // Wait until NIG register shows 1 packet of size 0x10
3816     cnt = 1000;
3817     while (cnt)
3818     {
3819         val=REG_RD(pdev,NIG_REG_STAT2_BRB_OCTET);
3820         trash=REG_RD(pdev,NIG_REG_STAT2_BRB_OCTET+4);
3821 
3822         if (val == 0x10)
3823         {
3824             break;
3825         }
3826         mm_wait(pdev,10 * pdev->vars.clk_factor);
3827         cnt--;
3828     }
3829     if (val != 0x10)
3830     {
3831         DbgMessage(pdev, FATAL, "mem_wrk: part1 NIG timeout val = 0x%x\n",val);
3832         DbgBreakIfAll(1);
3833     }
3834 
3835     // Wait until PRS register shows 1 packet
3836     cnt = 1000;
3837     while (cnt)
3838     {
3839         val=REG_RD(pdev,PRS_REG_NUM_OF_PACKETS);
3840 
3841         if (val == 0x1)
3842         {
3843             break;
3844         }
3845         mm_wait(pdev,10 * pdev->vars.clk_factor);
3846         cnt--;
3847     }
3848     if (val != 0x1)
3849     {
3850         DbgMessage(pdev, FATAL, "mem_wrk: part1 PRS timeout val = 0x%x\n",val);
3851         DbgBreakIfAll(1);
3852     }
3853     // End of part 1
3854 
3855     // #Reset and init BRB,PRS
3856     REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_RESET_REG_1_CLEAR,0x3);
3857     mm_wait(pdev,50);
3858     REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_RESET_REG_1_SET,0x3);
3859     mm_wait(pdev,50);
3860     init_brb1_common( pdev );
3861     init_prs_common(pdev);
3862 
3863     DbgMessage(pdev, WARN, "mem_wrk start part2\n");
3864     // "Start of part 2"
3865 
3866     // Disable inputs of parser neighbor blocks
3867     REG_WR(pdev,TSDM_REG_ENABLE_IN1,0x0);
3868     REG_WR(pdev,TCM_REG_PRS_IFEN,0x0);
3869     REG_WR(pdev,CFC_REG_DEBUG0,0x1);
3870     REG_WR(pdev,NIG_REG_PRS_REQ_IN_EN,0x0);
3871 
3872     // Write 0 to parser credits for CFC search request
3873     REG_WR(pdev,PRS_REG_CFC_SEARCH_INITIAL_CREDIT,0x0);
3874 
3875     // send 10 Ethernet packets
3876     for (i=0;i<10;i++)
3877     {
3878         init_nig_pkt(pdev);
3879     }
3880 
3881     // Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0
3882     cnt = 1000;
3883     while (cnt)
3884     {
3885         val=REG_RD(pdev,NIG_REG_STAT2_BRB_OCTET);
3886         trash=REG_RD(pdev,NIG_REG_STAT2_BRB_OCTET+4);
3887 
3888         if (val == 0xb0)
3889         {
3890             break;
3891         }
3892         mm_wait(pdev,10 * pdev->vars.clk_factor );
3893         cnt--;
3894     }
3895     if (val != 0xb0)
3896     {
3897         DbgMessage(pdev, FATAL, "mem_wrk: part2 NIG timeout val = 0x%x\n",val);
3898         DbgBreakIfAll(1);
3899     }
3900 
3901     // Wait until PRS register shows 2 packet
3902     val=REG_RD(pdev,PRS_REG_NUM_OF_PACKETS);
3903 
3904     if (val != 0x2)
3905     {
3906         DbgMessage(pdev, FATAL, "mem_wrk: part2 PRS wait for 2 timeout val = 0x%x\n",val);
3907         DbgBreakIfAll(1);
3908     }
3909 
3910     // Write 1 to parser credits for CFC search request
3911     REG_WR(pdev,PRS_REG_CFC_SEARCH_INITIAL_CREDIT,0x1);
3912 
3913     // Wait until PRS register shows 3 packet
3914     mm_wait(pdev,100 * pdev->vars.clk_factor);
3915     // Wait until NIG register shows 1 packet of size 0x10
3916     val=REG_RD(pdev,PRS_REG_NUM_OF_PACKETS);
3917 
3918     if (val != 0x3)
3919     {
3920         DbgMessage(pdev, FATAL, "mem_wrk: part2 PRS wait for 3 timeout val = 0x%x\n",val);
3921         DbgBreakIfAll(1);
3922     }
3923 
3924      // clear NIG EOP FIFO
3925     for (i=0;i<11;i++)
3926     {
3927         trash=REG_RD(pdev,NIG_REG_INGRESS_EOP_LB_FIFO);
3928     }
3929     val=REG_RD(pdev,NIG_REG_INGRESS_EOP_LB_EMPTY);
3930     DbgBreakIfAll(val != 1);
3931 
3932     // #Reset and init BRB,PRS
3933     REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_RESET_REG_1_CLEAR,0x03);
3934     mm_wait(pdev,50);
3935     REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_RESET_REG_1_SET,0x03);
3936     mm_wait(pdev,50);
3937     init_brb1_common( pdev );
3938     init_prs_common(pdev);
3939     // everest_init_part( pdev, BLCNUM_NIG  ,COMMON, hw);
3940 
3941     // Enable inputs of parser neighbor blocks
3942     REG_WR(pdev,TSDM_REG_ENABLE_IN1,0x7fffffff);
3943     REG_WR(pdev,TCM_REG_PRS_IFEN,0x1);
3944     REG_WR(pdev,CFC_REG_DEBUG0,0x0);
3945     REG_WR(pdev,NIG_REG_PRS_REQ_IN_EN,0x1);
3946 
3947     DbgMessage(pdev, WARN, "mem_wrk: Finish start part2\n");
3948 
3949 }
3950 
lm_init_intmem_common(struct _lm_device_t * pdev)3951 static void lm_init_intmem_common(struct _lm_device_t *pdev)
3952 {
3953     /* ip_id_mask (determines how the ip id (ipv4) rolls over, (init value currently constant: 'half')) */
3954     /* TODO need to add constant in common constant */
3955     LM_INTMEM_WRITE16(pdev, XSTORM_COMMON_IP_ID_MASK_OFFSET, 0x8000, BAR_XSTRORM_INTMEM);
3956 
3957     LM_INTMEM_WRITE16(pdev, USTORM_ETH_DYNAMIC_HC_PARAM_OFFSET, (u16_t)pdev->params.l2_dynamic_hc_min_bytes_per_packet, BAR_USTRORM_INTMEM);
3958     DbgBreakIf(USTORM_ETH_DYNAMIC_HC_PARAM_SIZE != sizeof(u16_t));
3959 
3960     if (!CHIP_IS_E1x(pdev))
3961     {
3962         DbgBreakIf(CSTORM_IGU_MODE_SIZE != 1);
3963         if (INTR_BLK_MODE(pdev) == INTR_BLK_MODE_NORM)
3964         {
3965             LM_INTMEM_WRITE8(pdev, CSTORM_IGU_MODE_OFFSET, HC_IGU_NBC_MODE, BAR_CSTRORM_INTMEM);
3966         }
3967         else
3968         {
3969             LM_INTMEM_WRITE8(pdev, CSTORM_IGU_MODE_OFFSET, HC_IGU_BC_MODE, BAR_CSTRORM_INTMEM);
3970         }
3971     }
3972 }
3973 
3974 
lm_init_intmem_port(struct _lm_device_t * pdev)3975 static void lm_init_intmem_port(struct _lm_device_t *pdev)
3976 {
3977     u8_t func = 0;
3978 
3979     /* Licensing with no MCP workaround. */
3980     if (GET_FLAGS( pdev->params.test_mode, TEST_MODE_NO_MCP))
3981     {
3982         /* If there is no MCP then there is no shmem_base, therefore we write to an absolute address. port 1 is 28 bytes away.  */
3983         #define SHMEM_ABSOLUTE_LICENSE_ADDRESS 0xaff3c
3984         DbgMessage(pdev, WARN, "writing reg: %p\n", SHMEM_ABSOLUTE_LICENSE_ADDRESS + (PORT_ID(pdev) * 0x1c));
3985         LM_SHMEM_WRITE(pdev, SHMEM_ABSOLUTE_LICENSE_ADDRESS + (PORT_ID(pdev) * 0x1c), 0xffff);
3986     }
3987 
3988     DbgBreakIf(!pdev->vars.clk_factor);
3989     if(CHIP_IS_E1H(pdev))
3990     {
3991         /* in a non-mf-aware chip, we don't need to take care of all the other functions */
3992         LM_FOREACH_FUNC_IN_PORT(pdev, func)
3993         {
3994             /* Set all mac filter drop flags to '0' to make sure we don't accept packets for vnics that aren't up yet... do this for each vnic! */
3995             LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, ucast_drop_all), 0, BAR_TSTRORM_INTMEM);
3996             LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, ucast_accept_all), 0, BAR_TSTRORM_INTMEM);
3997             LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, mcast_drop_all), 0, BAR_TSTRORM_INTMEM);
3998             LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, mcast_accept_all), 0, BAR_TSTRORM_INTMEM);
3999             LM_INTMEM_WRITE32(pdev,TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + OFFSETOF(struct tstorm_eth_mac_filter_config, bcast_accept_all), 0, BAR_TSTRORM_INTMEM);
4000         }
4001     }
4002     // for now only in multi vnic mode for min max cmng
4003     if (IS_MULTI_VNIC(pdev))
4004     {
4005         // first time always use 10000 for 10G
4006         lm_cmng_init(pdev,10000);
4007     }
4008 
4009     /* Tx switching is only enabled if in MF SI mode and npar_vm_switching is enabled...*/
4010     if (IS_MF_SI_MODE(pdev) && pdev->params.npar_vm_switching_enable)
4011     {
4012         //In switch independent mode, driver must enable TCP TX switching using XSTORM_TCP_TX_SWITCHING_EN_OFFSET.
4013         LM_INTMEM_WRITE32(pdev,XSTORM_TCP_TX_SWITCHING_EN_OFFSET(PORT_ID(pdev)), 1, BAR_XSTRORM_INTMEM);
4014     }
4015     else
4016     {
4017         if (!CHIP_IS_E1x(pdev)) //no Tx switching in E1, and the internal RAM offset for it is invalid.
4018         {
4019             LM_INTMEM_WRITE32(pdev,XSTORM_TCP_TX_SWITCHING_EN_OFFSET(PORT_ID(pdev)), 0, BAR_XSTRORM_INTMEM);
4020         }
4021     }
4022 }
4023 
lm_init_intmem_eq(struct _lm_device_t * pdev)4024 static void lm_init_intmem_eq(struct _lm_device_t * pdev)
4025 {
4026     struct event_ring_data eq_data = {{0}};
4027     u32_t  addr                    = CSTORM_EVENT_RING_DATA_OFFSET(FUNC_ID(pdev));
4028     u32_t  index                   = 0;
4029 
4030     eq_data.base_addr.hi = lm_bd_chain_phys_addr(&pdev->eq_info.eq_chain.bd_chain, 0).as_u32.high;
4031     eq_data.base_addr.lo = lm_bd_chain_phys_addr(&pdev->eq_info.eq_chain.bd_chain, 0).as_u32.low;
4032     eq_data.producer     = lm_bd_chain_prod_idx(&pdev->eq_info.eq_chain.bd_chain);
4033     eq_data.index_id     = HC_SP_INDEX_EQ_CONS;
4034     eq_data.sb_id        = DEF_STATUS_BLOCK_INDEX;
4035 
4036     for (index = 0; index < sizeof(struct event_ring_data) / sizeof(u32_t); index++)
4037     {
4038         LM_INTMEM_WRITE32(pdev, addr + (sizeof(u32_t) * index), *((u32 *)&eq_data + index), BAR_CSTRORM_INTMEM);
4039     }
4040 }
4041 
lm_init_intmem_function(struct _lm_device_t * pdev)4042 static void lm_init_intmem_function(struct _lm_device_t *pdev)
4043 {
4044     u8_t const      func                                    = FUNC_ID(pdev);
4045 
4046     /* status blocks are done in init_status_blocks() */    /* need to be write using GRC don't generate interrupt spq prod init WB */
4047     REG_WR(pdev,XSEM_REG_FAST_MEMORY + (XSTORM_SPQ_PAGE_BASE_OFFSET(func)),pdev->sq_info.sq_chain.bd_chain_phy.as_u32.low);
4048     REG_WR(pdev,XSEM_REG_FAST_MEMORY + (XSTORM_SPQ_PAGE_BASE_OFFSET(func)) + 4,pdev->sq_info.sq_chain.bd_chain_phy.as_u32.high);
4049     REG_WR(pdev,XSEM_REG_FAST_MEMORY + (XSTORM_SPQ_PROD_OFFSET(func)),pdev->sq_info.sq_chain.prod_idx);
4050 
4051     /* Initialize the event-queue */
4052     lm_init_intmem_eq(pdev);
4053 
4054     /* Todo: Init indirection table */
4055 
4056     if(CHIP_IS_E1(pdev))
4057     {
4058         // Should run only for E1 (begining fw 6.4.10). In earlier versions (e.g. 6.2) the workaorund is relevant for E1.5 as well.
4059         /* add for PXP dual port memory setup */
4060         DbgBreakIf(lm_bd_chain_phys_addr(&pdev->eq_info.eq_chain.bd_chain, 0).as_u64 == 0);
4061         LM_INTMEM_WRITE32(pdev,USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),lm_bd_chain_phys_addr(&pdev->eq_info.eq_chain.bd_chain, 0).as_u32.low, BAR_USTRORM_INTMEM); /* need to check */
4062         LM_INTMEM_WRITE32(pdev,USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func)+4,lm_bd_chain_phys_addr(&pdev->eq_info.eq_chain.bd_chain, 0).as_u32.high, BAR_USTRORM_INTMEM); /* need to check */
4063     }
4064 
4065 
4066     ASSERT_STATIC( 3 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[0].threshold) ) ;
4067 
4068     //init dynamic hc
4069     LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func), pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[0], BAR_CSTRORM_INTMEM);
4070     LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+4, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[1], BAR_CSTRORM_INTMEM);
4071     LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+8, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].threshold[2], BAR_CSTRORM_INTMEM);
4072 
4073     /*Set DHC scaling factor for L4*/
4074     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+12, (16 - (u8_t)pdev->params.l4_hc_scaling_factor), BAR_CSTRORM_INTMEM);
4075 
4076     /*Reset DHC scaling factors for rest of protocols*/
4077     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+13, 0, BAR_CSTRORM_INTMEM);
4078     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+14, 0, BAR_CSTRORM_INTMEM);
4079     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+15, 0, BAR_CSTRORM_INTMEM);
4080 
4081     ASSERT_STATIC( 4 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0) ) ;
4082     ASSERT_STATIC( 4 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1) ) ;
4083     ASSERT_STATIC( 4 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2) ) ;
4084     ASSERT_STATIC( 4 == ARRSIZE(pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3) ) ;
4085 
4086     /*Set DHC timeout 0 for all protocols*/
4087     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+16, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[0], BAR_CSTRORM_INTMEM);
4088     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+17, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[1], BAR_CSTRORM_INTMEM);
4089     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+18, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[2], BAR_CSTRORM_INTMEM);
4090     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+19, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout0[3], BAR_CSTRORM_INTMEM);
4091 
4092     /*Set DHC timeout 1 for all protocols*/
4093     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+20, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[0], BAR_CSTRORM_INTMEM);
4094     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+21, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[1], BAR_CSTRORM_INTMEM);
4095     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+22, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[2], BAR_CSTRORM_INTMEM);
4096     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+23, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout1[3], BAR_CSTRORM_INTMEM);
4097 
4098     /*Set DHC timeout 2 for all protocols*/
4099     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+24, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[0], BAR_CSTRORM_INTMEM);
4100     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+25, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[1], BAR_CSTRORM_INTMEM);
4101     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+26, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[2], BAR_CSTRORM_INTMEM);
4102     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+27, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout2[3], BAR_CSTRORM_INTMEM);
4103 
4104     /*Set DHC timeout 3 for all protocols*/
4105     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+28, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[0], BAR_CSTRORM_INTMEM);
4106     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+29, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[1], BAR_CSTRORM_INTMEM);
4107     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+30, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[2], BAR_CSTRORM_INTMEM);
4108     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+31, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_RX_ID].hc_timeout3[3], BAR_CSTRORM_INTMEM);
4109 
4110 #define TX_DHC_OFFSET   32
4111     LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[0], BAR_CSTRORM_INTMEM);
4112     LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+4, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[1], BAR_CSTRORM_INTMEM);
4113     LM_INTMEM_WRITE32(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+8, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].threshold[2], BAR_CSTRORM_INTMEM);
4114 
4115 
4116     /*Reset DHC scaling factors for all protocols*/
4117     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+12, 0, BAR_CSTRORM_INTMEM);
4118     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+13, 0, BAR_CSTRORM_INTMEM);
4119     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+14, 0, BAR_CSTRORM_INTMEM);
4120     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+15, 0, BAR_CSTRORM_INTMEM);
4121 
4122     /*Set DHC timeout 0 for all protocols*/
4123     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+16, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[0], BAR_CSTRORM_INTMEM);
4124     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+17, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[1], BAR_CSTRORM_INTMEM);
4125     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+18, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[2], BAR_CSTRORM_INTMEM);
4126     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+19, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout0[3], BAR_CSTRORM_INTMEM);
4127 
4128     /*Set DHC timeout 1 for all protocols*/
4129     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+20, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[0], BAR_CSTRORM_INTMEM);
4130     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+21, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[1], BAR_CSTRORM_INTMEM);
4131     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+22, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[2], BAR_CSTRORM_INTMEM);
4132     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+23, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout1[3], BAR_CSTRORM_INTMEM);
4133 
4134     /*Set DHC timeout 2 for all protocols*/
4135     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+24, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[0], BAR_CSTRORM_INTMEM);
4136     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+25, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[1], BAR_CSTRORM_INTMEM);
4137     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+26, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[2], BAR_CSTRORM_INTMEM);
4138     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+27, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout2[3], BAR_CSTRORM_INTMEM);
4139 
4140     /*Set DHC timeout 3 for all protocols*/
4141     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+28, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[0], BAR_CSTRORM_INTMEM);
4142     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+29, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[1], BAR_CSTRORM_INTMEM);
4143     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+30, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[2], BAR_CSTRORM_INTMEM);
4144     LM_INTMEM_WRITE8(pdev,CSTORM_DYNAMIC_HC_CONFIG_OFFSET(func)+TX_DHC_OFFSET+31, pdev->vars.int_coal.eth_dynamic_hc_cfg.sm_config[SM_TX_ID].hc_timeout3[3], BAR_CSTRORM_INTMEM);
4145 
4146     /* E1H specific init */
4147     if (pdev->params.disable_patent_using)
4148     {
4149         DbgMessage(pdev, WARN, "Patent is disabled\n");
4150         LM_INTMEM_WRITE8(pdev, TSTORM_TCP_GLOBAL_PARAMS_OFFSET, 0, BAR_TSTRORM_INTMEM);
4151     }
4152 
4153     /* Below statements forces FW to trace SP operation. This debugger feature may be involved via initialization correspnding params value
4154        in bootleg or/and via undocumented registry value (per function). Disableing statistics is highly recommmended using this debug option*/
4155     if (pdev->params.record_sp & XSTORM_RECORD_SLOW_PATH)
4156     {
4157         LM_INTMEM_WRITE8(pdev, XSTORM_RECORD_SLOW_PATH_OFFSET(FUNC_ID(pdev)), 1, BAR_XSTRORM_INTMEM);
4158     }
4159 
4160     if (pdev->params.record_sp & CSTORM_RECORD_SLOW_PATH)
4161     {
4162         LM_INTMEM_WRITE8(pdev, CSTORM_RECORD_SLOW_PATH_OFFSET(FUNC_ID(pdev)), 1, BAR_CSTRORM_INTMEM);
4163     }
4164 
4165     if (pdev->params.record_sp & TSTORM_RECORD_SLOW_PATH)
4166     {
4167         LM_INTMEM_WRITE8(pdev, TSTORM_RECORD_SLOW_PATH_OFFSET(FUNC_ID(pdev)), 1, BAR_TSTRORM_INTMEM);
4168     }
4169 
4170     if (pdev->params.record_sp & USTORM_RECORD_SLOW_PATH)
4171     {
4172         LM_INTMEM_WRITE8(pdev, USTORM_RECORD_SLOW_PATH_OFFSET(FUNC_ID(pdev)), 1, BAR_USTRORM_INTMEM);
4173     }
4174 
4175     /* Enable the function in STORMs */
4176     LM_INTMEM_WRITE8(pdev, XSTORM_VF_TO_PF_OFFSET(FUNC_ID(pdev)), FUNC_ID(pdev), BAR_XSTRORM_INTMEM);
4177     LM_INTMEM_WRITE8(pdev, CSTORM_VF_TO_PF_OFFSET(FUNC_ID(pdev)), FUNC_ID(pdev), BAR_CSTRORM_INTMEM);
4178     LM_INTMEM_WRITE8(pdev, TSTORM_VF_TO_PF_OFFSET(FUNC_ID(pdev)), FUNC_ID(pdev), BAR_TSTRORM_INTMEM);
4179     LM_INTMEM_WRITE8(pdev, USTORM_VF_TO_PF_OFFSET(FUNC_ID(pdev)), FUNC_ID(pdev), BAR_USTRORM_INTMEM);
4180 
4181     LM_INTMEM_WRITE8(pdev, XSTORM_FUNC_EN_OFFSET(FUNC_ID(pdev)), 1, BAR_XSTRORM_INTMEM);
4182     LM_INTMEM_WRITE8(pdev, CSTORM_FUNC_EN_OFFSET(FUNC_ID(pdev)), 1, BAR_CSTRORM_INTMEM);
4183     LM_INTMEM_WRITE8(pdev, TSTORM_FUNC_EN_OFFSET(FUNC_ID(pdev)), 1, BAR_TSTRORM_INTMEM);
4184     LM_INTMEM_WRITE8(pdev, USTORM_FUNC_EN_OFFSET(FUNC_ID(pdev)), 1, BAR_USTRORM_INTMEM);
4185 }
4186 
init_common_part(struct _lm_device_t * pdev)4187 static  void init_common_part(struct _lm_device_t *pdev)
4188 {
4189     u32_t       temp                      = 0;
4190     u32_t       val                       = 0;
4191     u32_t       trash                     = 0;
4192     u8_t        rc                        = 0;
4193     const u32_t wait_ms                   = 200*pdev->vars.clk_factor ;
4194     u32_t       shmem_base[MAX_PATH_NUM]  = {0};
4195     u32_t       shmem_base2[MAX_PATH_NUM] = {0};
4196     const u8_t  port                      = PORT_ID(pdev);
4197 
4198     DbgMessage(pdev, INFORMi, "init_common_part\n");
4199 
4200     /* shutdown bug - clear the shutdown inprogress flag*/
4201     /* Must be done before DMAE */
4202     lm_reset_clear_inprogress(pdev);
4203 
4204     DbgBreakIf( !pdev->vars.clk_factor );
4205 
4206     init_misc_common( pdev );
4207     init_pxp_common ( pdev );
4208     init_pxp2_common( pdev );
4209     init_pglue_b_common(pdev);
4210     init_atc_common ( pdev );
4211     init_dmae_common( pdev );
4212     init_tcm_common ( pdev );
4213     init_ucm_common ( pdev );
4214     init_ccm_common ( pdev );
4215     init_xcm_common ( pdev );
4216     init_qm_common  ( pdev );
4217     init_tm_common  ( pdev );
4218     init_dq_common  ( pdev );
4219     init_brb1_common( pdev );
4220     init_prs_common( pdev);
4221     init_tsdm_common( pdev );
4222     init_csdm_common( pdev );
4223     init_usdm_common( pdev );
4224     init_xsdm_common( pdev );
4225 
4226     init_semi_common(pdev);
4227 
4228     // syncronize rtc of the semi's
4229     REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_RESET_REG_1_CLEAR,0x80000000);
4230     REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_RESET_REG_1_SET,0x80000000);
4231 
4232     init_upb_common( pdev );
4233     init_xpb_common( pdev );
4234     init_pbf_common( pdev );
4235 
4236     init_src_common(pdev);
4237     init_cdu_common(pdev);
4238     init_cfc_common(pdev);
4239     init_hc_common(pdev);
4240 
4241     if (!CHIP_IS_E1x(pdev) && GET_FLAGS( pdev->params.test_mode, TEST_MODE_NO_MCP))
4242     {
4243         /* don't zeroize msix memory - this overrides windows OS initialization */
4244         REG_WR(pdev,IGU_REG_RESET_MEMORIES,0x36);
4245     }
4246     init_igu_common(pdev);
4247     init_aeu_common(pdev);
4248     init_pxpcs_common(pdev);
4249     init_dbg_common(pdev);
4250     init_nig_common(pdev);
4251 
4252     // TBD: E1H - determine whether to move from here, or have "wait for blks done" function
4253     //finish CFC init
4254     REG_WAIT_VERIFY_VAL(pdev, CFC_REG_LL_INIT_DONE,1,wait_ms );
4255 
4256     REG_WAIT_VERIFY_VAL(pdev, CFC_REG_AC_INIT_DONE,1,wait_ms);
4257     // moved here because of timing problem
4258     REG_WAIT_VERIFY_VAL(pdev, CFC_REG_CAM_INIT_DONE,1,wait_ms);
4259     // we need to enable inputs here.
4260     REG_WR(pdev,CFC_REG_DEBUG0,0);
4261 
4262     if (CHIP_IS_E1(pdev))
4263     {
4264         // read NIG statistic
4265         val   = REG_RD(pdev,NIG_REG_STAT2_BRB_OCTET);
4266         trash = REG_RD(pdev,NIG_REG_STAT2_BRB_OCTET+4);
4267 
4268         // PRS BRB memory setup only after full power cycle
4269         if(val == 0)
4270         {
4271             prs_brb_mem_setup(pdev);
4272         }
4273     }
4274 
4275     lm_setup_fan_failure_detection(pdev);
4276 
4277     /* One time initialization of the phy:
4278     in 2-port-mode - only for the first device on a chip!
4279     in 4-port-mode - always */
4280 
4281     if ((pdev->vars.load_code == LM_LOADER_RESPONSE_LOAD_COMMON_CHIP) ||
4282         CHIP_IS_E1x(pdev))
4283     {
4284         shmem_base[0]  = pdev->hw_info.shmem_base;
4285         shmem_base2[0] = pdev->hw_info.shmem_base2;
4286 
4287         if (!CHIP_IS_E1x(pdev))
4288         {
4289             LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t,other_shmem_base_addr), &shmem_base[1]);
4290             LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t,other_shmem2_base_addr), &shmem_base2[1]);
4291         }
4292 
4293         // Apply common init only in case LFA is not supported by MFW.
4294         if ( !LM_SHMEM2_HAS(pdev, lfa_host_addr[port]) )
4295         {
4296             rc = elink_common_init_phy(pdev, shmem_base, shmem_base2, CHIP_ID(pdev), 0);
4297             DbgBreakIf( ELINK_STATUS_OK != rc );
4298 
4299             rc = elink_pre_init_phy(pdev, shmem_base[0], shmem_base2[0], CHIP_ID(pdev), port);
4300             DbgBreakIf( ELINK_STATUS_OK != rc );
4301         }
4302     }
4303 
4304     //clear PXP2 attentions
4305     temp = REG_RD(pdev,PXP2_REG_PXP2_INT_STS_CLR_0);
4306 
4307     // set dcc_support in case active
4308     if(pdev->hw_info.shmem_base2)
4309     {
4310         val = (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV) ;
4311         temp = OFFSETOF( shmem2_region_t, dcc_support);
4312         LM_SHMEM2_WRITE(pdev, temp, val );
4313     }
4314 
4315     ///Write driver NIV support
4316     if (IS_MF_AFEX_MODE(pdev))
4317     {
4318         DbgBreakIf(!pdev->hw_info.shmem_base2);
4319         LM_SHMEM2_WRITE(pdev,   OFFSETOF( shmem2_region_t, afex_driver_support),
4320                                 SHMEM_AFEX_SUPPORTED_VERSION_ONE );
4321     }
4322 
4323     if (LM_SHMEM2_HAS(pdev, drv_capabilities_flag))
4324     {
4325         DbgBreakIf(!pdev->hw_info.shmem_base2);
4326         //we clear all the other capabilites flags and set just DRV_FLAGS_CAPABALITIES_LOADED_SUPPORTED
4327         LM_SHMEM2_WRITE(pdev, OFFSETOF(shmem2_region_t, drv_capabilities_flag[FUNC_MAILBOX_ID(pdev)]), DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED);
4328     }
4329 
4330 
4331     enable_blocks_attention(pdev);
4332 
4333     /* Enable parity error only for E2 and above */
4334     if (!CHIP_IS_E1x(pdev))
4335     {
4336         DbgMessage(pdev, WARN, "Enabling parity errors\n");
4337         ecore_enable_blocks_parity(pdev);
4338     }
4339 }
4340 
4341 
init_port_part(struct _lm_device_t * pdev)4342 void init_port_part(struct _lm_device_t *pdev)
4343 {
4344     u32_t val = 0;
4345     const u8_t  port = PORT_ID(pdev);
4346 
4347     /* Probe phys on board - must happen before lm_reset_link*/
4348     elink_phy_probe(&pdev->params.link);
4349 
4350     REG_WR(pdev,(port ? NIG_REG_MASK_INTERRUPT_PORT1 : NIG_REG_MASK_INTERRUPT_PORT0), 0);
4351 
4352     init_misc_port(pdev);
4353     init_pxp_port(pdev);
4354     init_pxp2_port(pdev);
4355     init_pglue_b_port(pdev);
4356     init_atc_port(pdev);
4357     init_tcm_port( pdev);
4358     init_ucm_port( pdev);
4359     init_ccm_port( pdev);
4360     init_xcm_port( pdev);
4361     init_qm_port ( pdev);
4362     init_tm_port ( pdev);
4363     init_dq_port ( pdev);
4364     init_brb_port( pdev);
4365     init_prs_port( pdev);
4366     init_tsdm_port( pdev);
4367     init_csdm_port( pdev);
4368     init_usdm_port( pdev);
4369     init_xsdm_port( pdev);
4370 
4371     init_semi_port(pdev);
4372     init_upb_port(pdev);
4373     init_xpb_port(pdev);
4374     init_pbf_port( pdev );
4375     init_src_port(pdev);
4376     init_cdu_port(pdev);
4377     init_cfc_port(pdev);
4378     init_hc_port( pdev);
4379     init_igu_port( pdev);
4380     init_aeu_port( pdev);
4381     init_dbg_port(pdev);
4382 
4383     init_nig_port( pdev);
4384     init_dmae_port(pdev);
4385 
4386 
4387     MM_ACQUIRE_PHY_LOCK(pdev);
4388     lm_stats_init_port_part(pdev);
4389     elink_init_mod_abs_int(pdev, &pdev->vars.link, CHIP_ID(pdev), pdev->hw_info.shmem_base, pdev->hw_info.shmem_base2, port);
4390     MM_RELEASE_PHY_LOCK(pdev);
4391 
4392     // iSCSI FW expect bit 28 to be set
4393     if (!GET_FLAGS( pdev->params.test_mode, TEST_MODE_NO_MCP))
4394     {
4395         LM_SHMEM_READ(pdev,  OFFSETOF(shmem_region_t,dev_info.port_feature_config[port].config), &val );
4396         SET_FLAGS(val, (1 << 28)) ;
4397         LM_SHMEM_WRITE(pdev, OFFSETOF(shmem_region_t,dev_info.port_feature_config[port].config), val );
4398     }
4399     // Clear the shared port bit of the DCBX completion
4400     lm_dcbx_config_drv_flags(pdev, lm_dcbx_drv_flags_reset_flags,0);
4401 }
4402 
init_function_part(struct _lm_device_t * pdev)4403 void init_function_part(struct _lm_device_t *pdev)
4404 {
4405     const u8_t func       = FUNC_ID(pdev);
4406     const u8_t func_mb_id = FUNC_MAILBOX_ID(pdev);
4407 
4408     DbgMessage(pdev, INFORMi, "init_function_part, func=%d\n", func);
4409 
4410     if (!CHIP_IS_E1x(pdev) && LM_SHMEM2_HAS(pdev, drv_capabilities_flag))
4411     {
4412         //we clear all the other capabilites flags and set just DRV_FLAGS_CAPAIALITIES_LOADED_SUPPORTED
4413         LM_SHMEM2_WRITE(pdev, OFFSETOF(shmem2_region_t, drv_capabilities_flag[func_mb_id]), DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | (pdev->params.mtu_max << DRV_FLAGS_MTU_SHIFT));
4414     }
4415 
4416     init_pxp_func(pdev);
4417     init_pxp2_func( pdev );
4418     init_pglue_b_func(pdev);
4419     init_atc_func(pdev);
4420     init_misc_func(pdev);
4421     init_tcm_func(pdev);
4422     init_ucm_func(pdev);
4423     init_ccm_func(pdev);
4424     init_xcm_func(pdev);
4425     init_semi_func(pdev);
4426     init_qm_func(pdev);
4427     init_tm_func(pdev);
4428     init_dq_func(pdev);
4429     init_brb_func(pdev);
4430     init_prs_func(pdev);
4431     init_tsdm_func(pdev);
4432     init_csdm_func(pdev);
4433     init_usdm_func(pdev);
4434     init_xsdm_func(pdev);
4435     init_upb_func(pdev);
4436     init_xpb_func(pdev);
4437 
4438     init_pbf_func(pdev);
4439     init_src_func(pdev);
4440     init_cdu_func(pdev);
4441     init_cfc_func(pdev);
4442     init_hc_func(pdev);
4443     init_igu_func(pdev);
4444     init_aeu_func(pdev);
4445     init_pxpcs_func(pdev);
4446     init_dbg_func(pdev);
4447     init_nig_func( pdev);
4448     init_dmae_func(pdev);
4449 
4450 
4451     /* Probe phys on board */
4452     elink_phy_probe(&pdev->params.link);
4453     if (IS_PMF(pdev) && IS_MULTI_VNIC(pdev))
4454     {
4455         DbgMessage(pdev, WARN, "init_function_part: Func %d is the PMF\n", func );
4456     }
4457 
4458     MM_ACQUIRE_PHY_LOCK(pdev);
4459     lm_stats_init_func_part(pdev);
4460     MM_RELEASE_PHY_LOCK(pdev);
4461 }
4462 
4463 /**
4464  * @Description
4465  *      The purpose of this function is to check that the chip
4466  *      is ready for initialization. Most checks are done in
4467  *      get_dev_info, however, due to Diag requirements its
4468  *      possible that certain things are not configured properly
4469  *      but get_dev_info passed. At time of writing this
4470  *      function it was IGU configuration in E3, but in the
4471  *      future there may be more things like this...
4472  *
4473  * @param pdev
4474  *
4475  * @return TRUE / FALSE
4476  */
4477 u8_t
lm_chip_ready_for_init(struct _lm_device_t * pdev)4478 lm_chip_ready_for_init( struct _lm_device_t *pdev)
4479 {
4480     lm_igu_info_t * igu_info = &pdev->hw_info.intr_blk_info.igu_info;
4481     const u8_t      blk_type = INTR_BLK_TYPE(pdev);
4482     const u8_t      blk_mode = INTR_BLK_MODE(pdev);
4483 
4484     if (( blk_type == INTR_BLK_IGU) &&
4485         ( blk_mode == INTR_BLK_MODE_NORM))
4486     {
4487         if ((igu_info->igu_sb_cnt < 1) ||(igu_info->igu_base_sb == 0xff))
4488         {
4489             return FALSE;
4490         }
4491     }
4492 
4493     return TRUE;
4494 }
4495 
lm_init_common_chip_part(struct _lm_device_t * pdev)4496 lm_status_t lm_init_common_chip_part(struct _lm_device_t *pdev)
4497 {
4498     lm_status_t lm_status = LM_STATUS_SUCCESS;
4499     u32_t       val       = 0;
4500 
4501     #ifdef _VBD_
4502     lm_fl_reset_clear_inprogress(pdev);
4503     #endif
4504 
4505     val = convert_to_bcd( pdev->product_version );
4506     lm_ncsi_drv_ver_to_scratchpad(pdev, val );
4507 
4508     return lm_status;
4509 }
4510 
4511 /* Description:
4512  *    The main function of this routine is to initialize the
4513  *    hardware. it configues all hw blocks in several phases acording to mcp response:
4514  *    1. common blocks
4515  *    2. per function blocks
4516  */
4517 lm_status_t
lm_chip_init(struct _lm_device_t * pdev)4518 lm_chip_init( struct _lm_device_t *pdev)
4519 {
4520 
4521     const lm_loader_opcode opcode    = LM_LOADER_OPCODE_LOAD;
4522     lm_loader_response     resp      = 0;
4523     lm_status_t            lm_status = LM_STATUS_SUCCESS;
4524 
4525 
4526     DbgMessage(pdev, INFORMi , "### lm_chip_init %x\n",CHIP_NUM(pdev));
4527 
4528 #ifdef VF_INVOLVED
4529     if (IS_VFDEV(pdev))
4530     {
4531         return lm_vf_chip_init(pdev);
4532     }
4533 #endif
4534 
4535     if (!lm_chip_ready_for_init(pdev))
4536     {
4537         return LM_STATUS_FAILURE;
4538     }
4539 
4540     /* Check if we need to reset the device:
4541      * This can happen for two reasons:
4542      * 1. Undi was active
4543      * 2. BFS/CrashDump Hibernation (fcoe crashdump driver) */
4544     if (IS_PFDEV(pdev))
4545     {
4546         lm_reset_device_if_undi_active(pdev);
4547     }
4548 
4549     // init mcp sequences
4550     lm_status = lm_mcp_cmd_init(pdev);
4551 
4552     if( LM_STATUS_SUCCESS != lm_status )
4553     {
4554         DbgMessage(pdev, FATAL, "lm_chip_init: mcp_cmd_init failed. lm_status=0x%x\n", lm_status);
4555         DbgBreakMsg("lm_mcp_cmd_init failed!\n");
4556         return lm_status ;
4557     }
4558 
4559     INIT_MODE_FLAGS(pdev) = lm_init_get_modes_bitmap(pdev);
4560 
4561     resp = lm_loader_lock(pdev, opcode );
4562 
4563 
4564     /* Save the load response */
4565     pdev->vars.load_code = resp;
4566     // This should be first call after load request since we must complete
4567     // these settings in 5 seconds (MCP keepalive timeout or start pulse)
4568     lm_driver_pulse_always_alive(pdev);
4569 
4570     if( LM_LOADER_RESPONSE_INVALID != resp )
4571     {
4572         if (IS_ASSIGNED_TO_VM_PFDEV(pdev))
4573         {
4574             //Validate FW if Port or Function
4575             switch (resp)
4576             {
4577             case LM_LOADER_RESPONSE_LOAD_PORT:
4578             case LM_LOADER_RESPONSE_LOAD_FUNCTION:
4579                 if (!lm_is_fw_version_valid(pdev))
4580                 {
4581                     lm_loader_lock(pdev, LM_LOADER_OPCODE_UNLOAD_WOL_MCP);
4582                     lm_loader_unlock(pdev, LM_LOADER_OPCODE_UNLOAD_WOL_MCP, NULL );
4583                     return LM_STATUS_BAD_SIGNATURE;
4584                 }
4585                 break;
4586             default:
4587                 break;
4588             }
4589         }
4590         // We need to call it here since init_funciton_part use these pointers
4591         lm_setup_read_mgmt_stats_ptr(pdev, FUNC_MAILBOX_ID(pdev), &pdev->vars.fw_port_stats_ptr, &pdev->vars.fw_func_stats_ptr );
4592     }
4593 
4594     if (!IS_DRIVER_PULSE_ALWAYS_ALIVE(pdev))
4595     {
4596         if(LM_STATUS_SUCCESS != lm_send_driver_pulse(pdev))
4597         {
4598             lm_driver_pulse_always_alive(pdev);
4599             DbgBreak();
4600         }
4601     }
4602 
4603     // update mps and mrrs from pcicfg
4604     lm_status = lm_get_pcicfg_mps_mrrs(pdev);
4605 
4606     if (!IS_ASSIGNED_TO_VM_PFDEV(pdev))
4607     {
4608         lm_pcie_state_restore_for_d0( pdev);
4609     }
4610 
4611     switch (resp)
4612     {
4613     case LM_LOADER_RESPONSE_LOAD_COMMON_CHIP:
4614         lm_status = lm_init_common_chip_part(pdev);
4615         if (LM_STATUS_SUCCESS != lm_status)
4616         {
4617             return lm_status;
4618         }
4619     case LM_LOADER_RESPONSE_LOAD_COMMON:
4620 #ifdef _VBD_
4621         lm_fl_reset_clear_inprogress(pdev);
4622 #endif
4623         lm_reset_path( pdev, FALSE ); /* Give a chip-reset (path) before initializing driver*/
4624         init_common_part(pdev);
4625         if (IS_MULTI_VNIC(pdev) && CHIP_IS_E2E3(pdev) && CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_2)
4626         {
4627             int i = 0;
4628             u32_t start_reg = IGU_REG_FUNC_WITH_MORE_16_SB_0;
4629             u32_t function_number = (1 << 8) | (1 << 6) | 0;
4630 
4631             for (i = 0; i < VNICS_PER_PATH(pdev); i++)
4632             {
4633                  REG_WR(pdev, start_reg + 4 * i, function_number + 2 * i);
4634             }
4635         }
4636 
4637         lm_init_intmem_common(pdev);
4638         // going to the port part no break
4639 
4640         // Clear pervious dbus info which may have been left
4641         // during error recovery (if any)
4642         mm_dbus_stop_if_started(pdev);
4643 
4644         //Check if there is dbus work
4645         mm_dbus_start_if_enable(pdev);
4646 
4647     case LM_LOADER_RESPONSE_LOAD_PORT:
4648 #ifdef _VBD_
4649         if (lm_is_function_after_flr(pdev))
4650         {
4651             if (IS_PFDEV(pdev))
4652             {
4653                 lm_status = lm_cleanup_after_flr(pdev);
4654 
4655                 if(lm_status != LM_STATUS_SUCCESS)
4656                 {
4657                     return lm_status;
4658                 }
4659             }
4660             else
4661             {
4662                 lm_fl_reset_clear_inprogress(pdev);
4663             }
4664         }
4665 #endif
4666         // If we are here, DMAE is ready (from common part init) - set it for TRUE for non-first devices
4667         pdev->vars.b_is_dmae_ready = TRUE;
4668 
4669         // set device as pmf
4670         pdev->vars.is_pmf = PMF_ORIGINAL;
4671 
4672         init_port_part(pdev);
4673         lm_init_intmem_port(pdev);
4674 
4675         // going to the function part - fall through
4676     case LM_LOADER_RESPONSE_LOAD_FUNCTION:
4677 #ifdef _VBD_
4678     if (lm_is_function_after_flr(pdev))
4679     {
4680         if (IS_PFDEV(pdev))
4681         {
4682             lm_status = lm_cleanup_after_flr(pdev);
4683 
4684             if(lm_status != LM_STATUS_SUCCESS)
4685             {
4686                 return lm_status;
4687             }
4688         }
4689         else
4690         {
4691             lm_fl_reset_clear_inprogress(pdev);
4692         }
4693     }
4694 #endif
4695         // If we are here, DMAE is ready (from port part init) - set it for TRUE for non-first devices
4696         pdev->vars.b_is_dmae_ready = TRUE;
4697         init_function_part(pdev);
4698         init_status_blocks(pdev);
4699         lm_init_intmem_function(pdev);
4700 #ifndef __BIG_ENDIAN
4701         lm_tcp_init_chip_common(pdev);
4702 #endif
4703         break;
4704 
4705     default:
4706         DbgMessage(pdev, WARN, "wrong loader response\n");
4707         DbgBreakIfAll(1);
4708     }
4709 
4710     resp = lm_loader_unlock( pdev, opcode, NULL ) ;
4711 
4712     if (resp != LM_LOADER_RESPONSE_LOAD_DONE)
4713     {
4714         DbgMessage(pdev, WARN, "wrong loader response\n");
4715         DbgBreakIfAll(1);
4716     }
4717 
4718     /* Read MF config parameters: there is a time window between MF
4719      * configuration initialization and DCC attention, allowing DCC
4720      * link state change to go unnoticed. This may cause wrong link
4721      * state to be seen by clients, hence re-sync here.
4722      */
4723 
4724     if (IS_MF_MODE_CAPABLE(pdev))
4725     {
4726            lm_get_shmem_info(pdev);
4727     }
4728 
4729     // TBD link training
4730 
4731     return LM_STATUS_SUCCESS;
4732 }
4733