xref: /illumos-gate/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/lm_hw_attn.c (revision d14abf155341d55053c76eeec58b787a456b753b)
1 /*******************************************************************************
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright 2014 QLogic Corporation
22  * The contents of this file are subject to the terms of the
23  * QLogic End User License (the "License").
24  * You may not use this file except in compliance with the License.
25  *
26  * You can obtain a copy of the License at
27  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28  * QLogic_End_User_Software_License.txt
29  * See the License for the specific language governing permissions
30  * and limitations under the License.
31  *
32  *
33  * Module Description:
34  *      This file contains functions that handle HW and FW attention
35  *
36  ******************************************************************************/
37 
38 #include "lm5710.h"
39 #include "general_atten_bits.h"
40 #include "aeu_inputs.h"
41 #include "command.h"
42 
lm_inc_er_debug_idx(lm_device_t * pdev)43 static INLINE void lm_inc_er_debug_idx(lm_device_t * pdev)
44 {
45     pdev->debug_info.curr_er_debug_idx++;
46     if (pdev->debug_info.curr_er_debug_idx == MAX_ER_DEBUG_ENTRIES)
47     {
48         pdev->debug_info.curr_er_debug_idx=0;
49     }
50 }
51 
52 /**
53  * @description
54  *      called from attention handling routines, checks if the
55  *      attention received is an error which is recoverable via
56  *      process kill. If error recovery is disabled this
57  *      function always returns FALSE;
58  *
59  * @param pdev
60  * @param attn_sig : values of the after_invert registers read
61  *                 in the misc that indicate which attention
62  *                 occured
63  *
64  *
65  * @return u8_t TRUE: attention requires process_kill. FALSE o/w
66  */
lm_recoverable_error(lm_device_t * pdev,u32_t * attn_sig,u32_t arr_size)67 u8_t lm_recoverable_error(lm_device_t *pdev, u32_t * attn_sig, u32_t arr_size)
68 {
69     lm_er_debug_info_t * debug_info = NULL;
70     u32_t                i;
71 
72     if (!pdev->params.enable_error_recovery || CHIP_IS_E1x(pdev))
73     {
74         return FALSE;
75     }
76 
77     ASSERT_STATIC(ARRSIZE(debug_info->attn_sig) >= MAX_ATTN_REGS);
78     DbgBreakIf(arr_size < MAX_ATTN_REGS);
79 
80     if ((attn_sig[0] & HW_PRTY_ASSERT_SET_0) || (attn_sig[1] & HW_PRTY_ASSERT_SET_1) ||
81         (attn_sig[2] & HW_PRTY_ASSERT_SET_2) || (attn_sig[3] & HW_PRTY_ASSERT_SET_3))
82     {
83         /* Parity Error... Assuming we only enable parities we can deal with
84          * this is a recoverable error...
85          */
86         debug_info = &((pdev)->debug_info.er_debug_info[pdev->debug_info.curr_er_debug_idx]);
87         for (i = 0; i < arr_size; i++)
88         {
89             debug_info->attn_sig[i] = attn_sig[i];
90         }
91         lm_inc_er_debug_idx(pdev);
92 
93         /* TODO: maybe get GRCDump here in the future... */
94         DbgMessage(pdev, FATAL, "lm_recoverable_error: funcid:%d, 0:0x%x, 0:0x%x, 0:0x%x, 0:0x%x\n",
95                    ABS_FUNC_ID(pdev), attn_sig[0], attn_sig[1], attn_sig[2], attn_sig[3]);
96 
97         return TRUE;
98     }
99 
100     /* HW Attentions (other than parity ) */
101     if (attn_sig[1] & HW_INTERRUT_ASSERT_SET_1)
102     {
103         /* QM Interrupt is recoverable */
104         if (attn_sig[1] & AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT)
105         {
106             debug_info = &((pdev)->debug_info.er_debug_info[pdev->debug_info.curr_er_debug_idx]);
107             for (i = 0; i < arr_size; i++)
108             {
109                 debug_info->attn_sig[i] = attn_sig[i];
110             }
111             lm_inc_er_debug_idx(pdev);
112 
113             DbgMessage(pdev, FATAL, "lm_recoverable_error: funcid:%d, 0:0x%x, 0:0x%x, 0:0x%x, 0:0x%x\n",
114                    ABS_FUNC_ID(pdev), attn_sig[0], attn_sig[1], attn_sig[2], attn_sig[3]);
115             return TRUE;
116         }
117 
118     }
119 
120     if (attn_sig[3] & EVEREST_GEN_ATTN_IN_USE_MASK)
121     {
122         if ( GENERAL_ATTEN_OFFSET(ERROR_RECOVERY_ATTENTION_BIT) & attn_sig[3])
123         {
124             debug_info = &((pdev)->debug_info.er_debug_info[pdev->debug_info.curr_er_debug_idx]);
125             for (i = 0; i < arr_size; i++)
126             {
127                 debug_info->attn_sig[i] = attn_sig[i];
128             }
129             lm_inc_er_debug_idx(pdev);
130 
131             DbgMessage(pdev, FATAL, "lm_recoverable_error: funcid:%d, 0:0x%x, 0:0x%x, 0:0x%x, 0:0x%x\n",
132                    ABS_FUNC_ID(pdev), attn_sig[0], attn_sig[1], attn_sig[2], attn_sig[3]);
133             return TRUE;
134         }
135     }
136 
137     return FALSE;
138 }
139 
enable_blocks_attention(struct _lm_device_t * pdev)140 void enable_blocks_attention(struct _lm_device_t *pdev)
141 {
142     u32_t val = 0;
143 
144     REG_WR(pdev,PXP_REG_PXP_INT_MASK_0,0);
145     if (!CHIP_IS_E1x(pdev))
146     {
147         REG_WR(pdev,PXP_REG_PXP_INT_MASK_1, (PXP_PXP_INT_MASK_1_REG_HST_INCORRECT_ACCESS
148                                              | PXP_PXP_INT_MASK_1_REG_HST_VF_DISABLED_ACCESS /*Temporary solution*/
149                                              | PXP_PXP_INT_MASK_1_REG_HST_PERMISSION_VIOLATION) /*Win8 MMIO (security test)???*/);
150     }
151     REG_WR(pdev,DORQ_REG_DORQ_INT_MASK,0);
152     /* CFC_REG_CFC_INT_MASK see in init_cfc_common */
153 
154 
155     //mask read length error interrupts in brb for parser (parsing unit and 'checksum and crc' unit)
156     //these errors are legal (PU reads fixe length and CAC can cause read length error on truncated packets)
157     REG_WR(pdev,BRB1_REG_BRB1_INT_MASK ,0xFC00);
158 
159     REG_WR(pdev,QM_REG_QM_INT_MASK ,0);
160     REG_WR(pdev,TM_REG_TM_INT_MASK ,0);
161     REG_WR(pdev,XSDM_REG_XSDM_INT_MASK_0 ,0);
162     REG_WR(pdev,XSDM_REG_XSDM_INT_MASK_1 ,0);
163     REG_WR(pdev,XCM_REG_XCM_INT_MASK ,0);
164     //REG_WR(pdev,XSEM_REG_XSEM_INT_MASK_0 ,0);
165     //REG_WR(pdev,XSEM_REG_XSEM_INT_MASK_1 ,0);
166     REG_WR(pdev,USDM_REG_USDM_INT_MASK_0 ,0);
167     REG_WR(pdev,USDM_REG_USDM_INT_MASK_1 ,0);
168     REG_WR(pdev,UCM_REG_UCM_INT_MASK ,0);
169     //REG_WR(pdev,USEM_REG_USEM_INT_MASK_0 ,0);
170     //REG_WR(pdev,USEM_REG_USEM_INT_MASK_1 ,0);
171     REG_WR(pdev,GRCBASE_UPB+PB_REG_PB_INT_MASK ,0);
172     REG_WR(pdev,CSDM_REG_CSDM_INT_MASK_0 ,0);
173     REG_WR(pdev,CSDM_REG_CSDM_INT_MASK_1 ,0);
174     REG_WR(pdev,CCM_REG_CCM_INT_MASK ,0);
175     //REG_WR(pdev,CSEM_REG_CSEM_INT_MASK_0 ,0);
176     //REG_WR(pdev,CSEM_REG_CSEM_INT_MASK_1 ,0);
177     val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
178           PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
179           PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
180     if (!CHIP_IS_E1x(pdev))
181     {
182         val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
183                PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
184     }
185     REG_WR(pdev, PXP2_REG_PXP2_INT_MASK_0, val);
186 
187     REG_WR(pdev,TSDM_REG_TSDM_INT_MASK_0 ,0);
188     REG_WR(pdev,TSDM_REG_TSDM_INT_MASK_1 ,0);
189     REG_WR(pdev,TCM_REG_TCM_INT_MASK ,0);
190     //REG_WR(pdev,TSEM_REG_TSEM_INT_MASK_0 ,0);
191     //REG_WR(pdev,TSEM_REG_TSEM_INT_MASK_1 ,0);
192     REG_WR(pdev,CDU_REG_CDU_INT_MASK ,0);
193     REG_WR(pdev,DMAE_REG_DMAE_INT_MASK ,0);
194     //REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_MISC_INT_MASK ,0);
195     //MASK BIT 3,4
196     REG_WR(pdev,PBF_REG_PBF_INT_MASK ,0X18);
197 
198 }
199 
disable_blocks_attention(struct _lm_device_t * pdev)200 void disable_blocks_attention(struct _lm_device_t *pdev)
201 {
202 #define MASK_VALUE_GENERATE(_val) ((u32_t)((((u64_t)0x1)<<_val)-1))
203     typedef struct _block_mask_info_t
204     {
205         u32_t reg_offset;    /* the register offset */
206         u32_t mask_value[3]; /* the mask value per hw (e1 =0 /e1.5 = 1/e2 = 2)*/
207     } block_mask_info_t;
208 
209     u8_t  chip_idx   = 0;
210     u32_t mask_idx   = 0;
211     u32_t val        = 0;
212     u32_t offset     = 0;
213     u32_t mask_value = 0;
214 
215     static const block_mask_info_t init_mask_values_arr[] =
216     {
217         { ATC_REG_ATC_INT_MASK,           { 0,
218                                             0,
219                                             6 } },
220 
221         { BRB1_REG_BRB1_INT_MASK,         { 19,
222                                             19,
223                                             19} },
224 
225         { CCM_REG_CCM_INT_MASK,           { 11,
226                                             11,
227                                             11 } },
228 
229         { CDU_REG_CDU_INT_MASK,           { 7,
230                                             7,
231                                             7 } },
232 
233         { CFC_REG_CFC_INT_MASK,           { 2,
234                                             2,
235                                             2  } },
236 
237         { CSDM_REG_CSDM_INT_MASK_0,       { 32,
238                                             32,
239                                             32 } },
240 
241         { CSDM_REG_CSDM_INT_MASK_1,       { 10,
242                                             10,
243                                             11 } },
244 
245 #if 0
246         { CSEM_REG_CSEM_INT_MASK_0,       { 32,
247                                             32,
248                                             32 } },
249 
250         { CSEM_REG_CSEM_INT_MASK_1,       { 10,
251                                             11,
252                                             11} },
253 
254         { DBG_REG_DBG_INT_MASK,           { 2,
255                                             2,
256                                             2 } },
257 #endif //0
258 
259         { DMAE_REG_DMAE_INT_MASK,         { 2,
260                                             2,
261                                             2 } },
262 
263         { DORQ_REG_DORQ_INT_MASK,         { 5,
264                                             5,
265                                             6 } },
266 #if 0
267         { HC_REG_HC_INT_MASK,             { 7,
268                                             7,
269                                             7 } },
270 #endif //0
271 
272         { IGU_REG_IGU_INT_MASK,           { 0,
273                                             0,
274                                             11 } },
275 #if 0
276         { MISC_REGISTERS_MISC_INT_MASK,   { 4,
277                                             4,
278                                             8 } },
279 
280         { NIG_REGISTERS_NIG_INT_MASK_0,   { 32,
281                                             32,
282                                             32 } },
283 
284         { NIG_REGISTERS_NIG_INT_MASK_1,   { 2,
285                                             4,
286                                             14 } },
287 
288         { PB_REGISTERS_PB_INT_MASK,       { 2,
289                                             2,
290                                             2} },
291 #endif // 0
292 
293         { PBF_REG_PBF_INT_MASK,           { 5,
294                                             5,
295                                             7 } },
296 
297         { PGLUE_B_REG_PGLUE_B_INT_MASK,   { 0,
298                                             0,
299                                             9 } },
300 #if 0
301         { PRS_REG_PRS_INT_MASK,           { 1,
302                                             1,
303                                             1 } },
304 #endif // 0
305 
306         { PXP2_REG_PXP2_INT_MASK_0,       { 25,
307                                             32,
308                                             32 } },
309 
310 #if 0
311         { PXP2_REG_PXP2_INT_MASK_1,       { 0,
312                                             6,
313                                             16} },
314 #endif //0
315 
316         { PXP_REG_PXP_INT_MASK_0,         { 32,
317                                             32,
318                                             32 } },
319 
320         { PXP_REG_PXP_INT_MASK_1,         { 5,
321                                             5,
322                                             8 } },
323 
324         { QM_REG_QM_INT_MASK,             { 2,
325                                             2,
326                                             14 } },
327 #if 0
328         { SEM_FAST_REG_SEM_FAST_INT_MASK, { 1, // This offset is actually 4 different registers (per SEM)
329                                             1,
330                                             1} },
331 
332         { SRC_REG_SRC_INT_MASK,           { 1,
333                                             3,
334                                             3 } },
335 #endif //0
336 
337         { TCM_REG_TCM_INT_MASK,           { 11,
338                                             11,
339                                             11 } },
340 
341         { TM_REG_TM_INT_MASK,             { 1,
342                                             1,
343                                             1} },
344 
345         { TSDM_REG_TSDM_INT_MASK_0,       { 32,
346                                             32,
347                                             32 } },
348 
349         { TSDM_REG_TSDM_INT_MASK_1,       { 10,
350                                             10,
351                                             11 } },
352 #if 0
353         { TSEM_REG_TSEM_INT_MASK_0,       { 32,
354                                             32,
355                                             32 } },
356 
357         { TSEM_REG_TSEM_INT_MASK_1,       { 10,
358                                             11,
359                                             13 } },
360 #endif // 0
361 
362         { UCM_REG_UCM_INT_MASK,           { 11,
363                                             11,
364                                             11} },
365 
366         { USDM_REG_USDM_INT_MASK_0,       { 32,
367                                             32,
368                                             32 } },
369 
370         { USDM_REG_USDM_INT_MASK_1,       { 10,
371                                             10,
372                                             11 } },
373 #if 0
374         { USEM_REG_USEM_INT_MASK_0,       { 32,
375                                             32,
376                                             32 } },
377 
378         { USEM_REG_USEM_INT_MASK_1,       { 10,
379                                             11,
380                                             11 } },
381 #endif //0
382 
383         { VFC_REG_VFC_INT_MASK,           { 0,
384                                             0,
385                                             1 } },
386 
387         { XCM_REG_XCM_INT_MASK,           { 14,
388                                             14,
389                                             14 } },
390 
391         { XSDM_REG_XSDM_INT_MASK_0,       { 32,
392                                             32,
393                                             32 } },
394 
395         { XSDM_REG_XSDM_INT_MASK_1,       { 10,
396                                             10,
397                                             11} },
398 #if 0
399         { XSEM_REG_XSEM_INT_MASK_0,      { 32,
400                                            32,
401                                            32 } },
402 
403         { XSEM_REG_XSEM_INT_MASK_1,      { 10,
404                                            11,
405                                            13 } } ,
406 #endif // 0
407     }; // init_mask_values_arr
408 
409     if (IS_VFDEV(pdev))
410     {
411         return;
412     }
413     if CHIP_IS_E1( pdev )
414     {
415         chip_idx = 0; // E1.0
416     }
417     else if CHIP_IS_E1H(pdev)
418     {
419         chip_idx = 1; // E1.5
420     }
421     else if CHIP_IS_E2E3(pdev)
422     {
423         chip_idx = 2; // E2
424     }
425     else
426     {
427         // New chip!!!
428         DbgBreakIf(1); // E??
429     }
430 
431     DbgBreakIf( chip_idx >= ARRSIZE( init_mask_values_arr[0].mask_value ) );
432 
433     for( mask_idx = 0; mask_idx < ARRSIZE(init_mask_values_arr);  mask_idx++ )
434     {
435         mask_value = init_mask_values_arr[mask_idx].mask_value[chip_idx] ;
436 
437         if( mask_value )
438         {
439             val        = MASK_VALUE_GENERATE(mask_value);
440             offset     = init_mask_values_arr[mask_idx].reg_offset;
441             REG_WR(pdev, offset, val );
442         }
443     }
444     /*
445 
446     REG_WR(pdev,PXP_REG_PXP_INT_MASK_0,0xffffffff);
447     if (IS_E2(pdev)) {
448         REG_WR(pdev,PXP_REG_PXP_INT_MASK_1,0xff);
449     } else {
450     REG_WR(pdev,PXP_REG_PXP_INT_MASK_1,0x1f);
451     }
452     REG_WR(pdev,DORQ_REG_DORQ_INT_MASK,0x1f);
453     REG_WR(pdev,CFC_REG_CFC_INT_MASK ,0x3);
454     REG_WR(pdev,QM_REG_QM_INT_MASK ,0x3);
455     REG_WR(pdev,TM_REG_TM_INT_MASK ,0x1);
456     REG_WR(pdev,XSDM_REG_XSDM_INT_MASK_0 ,0xffffffff);
457     REG_WR(pdev,XSDM_REG_XSDM_INT_MASK_1 ,0x3ff);
458     REG_WR(pdev,XCM_REG_XCM_INT_MASK,0x3fff);
459     //REG_WR(pdev,XSEM_REG_XSEM_INT_MASK_0 ,0);
460     //REG_WR(pdev,XSEM_REG_XSEM_INT_MASK_1 ,0);
461     REG_WR(pdev,USDM_REG_USDM_INT_MASK_0 ,0xffffffff);
462     REG_WR(pdev,USDM_REG_USDM_INT_MASK_1 ,0x3ff);
463     REG_WR(pdev,UCM_REG_UCM_INT_MASK ,0x7ff);
464     //REG_WR(pdev,USEM_REG_USEM_INT_MASK_0 ,0);
465     //REG_WR(pdev,USEM_REG_USEM_INT_MASK_1 ,0);
466     REG_WR(pdev,GRCBASE_UPB+PB_REG_PB_INT_MASK ,0x3);
467     REG_WR(pdev,CSDM_REG_CSDM_INT_MASK_0 ,0xffffffff);
468     REG_WR(pdev,CSDM_REG_CSDM_INT_MASK_1 ,0x3ff);
469     REG_WR(pdev,CCM_REG_CCM_INT_MASK ,0x7ff);
470     //REG_WR(pdev,CSEM_REG_CSEM_INT_MASK_0 ,0);
471     //REG_WR(pdev,CSEM_REG_CSEM_INT_MASK_1 ,0);
472 
473     REG_WR(pdev,PXP2_REG_PXP2_INT_MASK_0,0xffffffff);
474 
475     REG_WR(pdev,TSDM_REG_TSDM_INT_MASK_0 ,0xffffffff);
476     REG_WR(pdev,TSDM_REG_TSDM_INT_MASK_1 ,0x3ff);
477     REG_WR(pdev,TCM_REG_TCM_INT_MASK ,0x7ff);
478     //REG_WR(pdev,TSEM_REG_TSEM_INT_MASK_0 ,0);
479     //REG_WR(pdev,TSEM_REG_TSEM_INT_MASK_1 ,0);
480     REG_WR(pdev,CDU_REG_CDU_INT_MASK ,0x7f);
481     REG_WR(pdev,DMAE_REG_DMAE_INT_MASK ,0x3);
482     //REG_WR(pdev,GRCBASE_MISC+MISC_REGISTERS_MISC_INT_MASK ,0);
483     //MASK BIT 3,4
484     REG_WR(pdev,PBF_REG_PBF_INT_MASK ,0x1f);
485     */
486 
487     // disable MCP's attentions
488     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,0);
489     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_0_OUT_1,0);
490     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2,0);
491     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_0_OUT_3,0);
492     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,0);
493     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_1_OUT_1,0);
494     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2,0);
495     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_1_OUT_3,0);
496     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4,0);
497     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5,0);
498     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6,0);
499     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7,0);
500     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4,0);
501     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5,0);
502     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6,0);
503     REG_WR(pdev,MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7,0);
504 }
505 
lm_reset_mask_attn(struct _lm_device_t * pdev)506 void lm_reset_mask_attn(struct _lm_device_t *pdev)
507 {
508     // mask the pxp attentions
509     REG_WR(pdev,PXP_REG_PXP_INT_MASK_0,0xffffffff); // 32 bits
510     if (CHIP_IS_E1x(pdev))
511     {
512         REG_WR(pdev,PXP_REG_PXP_INT_MASK_1,0x1f); // 5 bits
513     }
514     else
515     {
516         REG_WR(pdev,PXP_REG_PXP_INT_MASK_1,0xff); // 8 bits
517     }
518     REG_WR(pdev,PXP2_REG_PXP2_INT_MASK_0,0xffffffff); // 32 bits
519 
520     /* We never unmask this register so no need to re-mask it*/
521     //REG_WR(pdev,PXP2_REG_PXP2_INT_MASK_1,0x3f); // 32 bits
522 }
523 
lm_latch_attn_everest_processing(lm_device_t * pdev,u32_t sig_word_aft_inv)524 static void lm_latch_attn_everest_processing(lm_device_t *pdev, u32_t sig_word_aft_inv)
525 {
526     u32_t latch_bit_to_clr = 0;
527     u32_t val              = 0;
528     u32_t offset           = 0;
529 
530     //pass over all latched attentions
531     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR);
532     if ( offset & sig_word_aft_inv)
533     {
534         latch_bit_to_clr = 0x1;
535         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
536         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_RBCR received!!!\n");
537         DbgBreakIfAll(1);
538     }
539     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT);
540     if ( offset & sig_word_aft_inv)
541     {
542         latch_bit_to_clr = 0x2;
543         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
544         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_RBCT received!!!\n");
545         DbgBreakIfAll(1);
546     }
547     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN);
548     if ( offset & sig_word_aft_inv)
549     {
550         latch_bit_to_clr = 0x4;
551         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
552         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_RBCN received!!!\n");
553         DbgBreakIfAll(1);
554     }
555     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU);
556     if ( offset & sig_word_aft_inv)
557     {
558         latch_bit_to_clr = 0x8;
559         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
560         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_RBCU received!!!\n");
561         DbgBreakIfAll(1);
562     }
563     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP);
564     if ( offset & sig_word_aft_inv)
565     {
566         latch_bit_to_clr = 0x10;
567         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
568         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_RBCP received!!! \n");
569         DbgBreakIfAll(1);
570     }
571     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC);
572     if ( offset & sig_word_aft_inv)
573     {
574 #define GRC_TIMEOUT_MASK_ADDRESS(_val)  ( (_val)     & ((1<<19)-1)) // 0x000fffff
575 #define GRC_TIMEOUT_MASK_FUNCTION(_val) ( (_val>>20) & ((1<<3)-1))  // 0x00700000
576 #define GRC_TIMEOUT_MASK_MASTER(_val)   ( (_val>>24) & ((1<<4)-1))  // 0x0f000000
577 
578         u32_t       addr                            = 0;
579         u32_t       func                            = 0;
580         u32_t       master                          = 0;
581         u32_t       grc_timeout_cnt                 = 0;
582         u8_t        b_assert                        = TRUE;
583         u8_t        b_nig_reset_called              = lm_is_nig_reset_called(pdev);
584         const u32_t grc_timeout_max_ignore          = pdev->params.grc_timeout_max_ignore;
585 
586         latch_bit_to_clr = 0x20;
587 
588         // we check if nig reset was done
589         if( b_nig_reset_called )
590         {
591             b_assert = FALSE;
592         }
593 
594         if (!CHIP_IS_E1(pdev))
595         {
596             val    = REG_RD(pdev, MISC_REG_GRC_TIMEOUT_ATTN);
597             addr   = GRC_TIMEOUT_MASK_ADDRESS(val);
598             func   = GRC_TIMEOUT_MASK_FUNCTION(val);
599             master = GRC_TIMEOUT_MASK_MASTER(val);
600 
601             // in non E1 we can verify it is mcp cause (due to nig probably)
602             if( 2 != master ) // 2 is mcp cause
603             {
604                 b_assert = TRUE;
605             }
606         }
607 
608         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
609         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_TIMEOUT_GRC received!!! val=0x%08x master=0x%x func=0x%x addr=0x%xx4=0x%X)\n"
610                                ,val, master, func, addr, addr*4 );
611 
612         // NOTE: we ignore b_nig_reset_called and ASSERT only according to grc_timeout_max_ignore value (default is 0x10)
613 
614         grc_timeout_cnt = lm_inc_cnt_grc_timeout_ignore(pdev, val);
615         // if we are here it means we ignore the ASSERT inc counter
616         if( grc_timeout_cnt >= grc_timeout_max_ignore )
617         {
618             b_assert = TRUE;
619         }
620         else
621         {
622             b_assert = FALSE;
623         }
624 
625         if( b_assert )
626         {
627             DbgBreakIf(1);
628         }
629 
630         if( b_nig_reset_called )
631         {
632             // we reset the flag (we "allow" one timeout after nig reset)
633             lm_clear_nig_reset_called(pdev);
634         }
635     }
636     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC);
637     if ( offset & sig_word_aft_inv)
638     {
639         latch_bit_to_clr = 0x40;
640         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
641         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_RSVD_GRC received!!!\n");
642         DbgBreakIfAll(1);
643     }
644     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_ROM_PARITY_MCP);
645     if ( offset & sig_word_aft_inv)
646     {
647         latch_bit_to_clr = 0x80;
648         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
649         val = lm_mcp_check(pdev);
650         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_ROM_PARITY_MCP received!!!\n");
651         /* For E2, at the time this code was written (e2-bringup ) the parity is (somehow) expected */
652         if (CHIP_IS_E1x(pdev))
653         {
654             DbgBreakIfAll(1);
655         }
656         else
657         {
658             DbgBreakIf(1);
659         }
660     }
661     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_UM_RX_PARITY_MCP);
662     if ( offset & sig_word_aft_inv)
663     {
664         latch_bit_to_clr = 0x100;
665         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
666         val = lm_mcp_check(pdev);
667         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_UM_RX_PARITY_MCP received!!!\n");
668         DbgBreakIfAll(1);
669     }
670     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_UM_TX_PARITY_MCP);
671     if ( offset & sig_word_aft_inv)
672     {
673         latch_bit_to_clr = 0x200;
674         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
675         val = lm_mcp_check(pdev);
676         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_UM_TX_PARITY_MCP received!!!\n");
677         DbgBreakIfAll(1);
678     }
679     offset = GENERAL_ATTEN_OFFSET(LATCHED_ATTN_SCPAD_PARITY_MCP);
680     if ( offset & sig_word_aft_inv)
681     {
682         latch_bit_to_clr = 0x400;
683         REG_WR(pdev, MISC_REG_AEU_CLR_LATCH_SIGNAL, latch_bit_to_clr);
684         val = lm_mcp_check(pdev);
685         DbgMessage(pdev, FATAL, "lm_latch_attn_everest_processing: LATCHED_ATTN_SCPAD_PARITY_MCP received!!!\n");
686         DbgBreakIfAll(1);
687     }
688 }
689 
lm_hard_wired_processing(lm_device_t * pdev,u16_t assertion_proc_flgs)690 static void lm_hard_wired_processing(lm_device_t *pdev, u16_t assertion_proc_flgs)
691 {
692     /* processing of highest 8-15 bits of 8 "hard-wired" attention signals toward IGU.
693        Excluding NIG & PXP "close the gates"
694 
695        ! No need to lock here since this is an uncommon group whether there is a recovery procedure or not.
696 
697        Signal name         Bit position    SOURCE       Type        Required Destination
698        -----------------------------------------------------------------------------
699        NIG attention for port0  D8         NIG          Event       MCP/Driver0(PHY)
700        SW timer#4 port0         D9         MISC         Event       MCP -> Ignore!
701        GPIO#2 port0             D10        MISC         Event       MCP
702        GPIO#3 port0             D11        MISC         Event       MCP
703        GPIO#4 port0             D12        MISC         Event       MCP
704        General attn1            D13        GRC mapped   Attention   MCP/Driver0/Driver1 -> ASSERT!
705        General attn2            D14        GRC mapped   Attention   MCP/Driver0/Driver1 -> ASSERT!
706        General attn3            D15        GRC mapped   Attention   MCP/Driver0/Driver1 -> ASSERT!
707     */
708     //TODO: for the required attn signals, need to "clean the hw block" (INT_STS_CLR..)
709     if (PORT_ID(pdev) == 0)
710     {
711 #if 0   // Timer 4 is being used by OCBB now
712         if (assertion_proc_flgs & ATTN_SW_TIMER_4_FUNC)
713         {
714             //DbgMessage(pdev, FATAL, "lm_hard_wired_processing: ATTN_SW_TIMER_4_FUNC!\n");
715             //to deal with this signal, add dispatch func call here
716         }
717 #endif
718         if (assertion_proc_flgs & GPIO_2_FUNC)
719         {
720             DbgMessage(pdev, WARN, "lm_hard_wired_processing: GPIO_1_FUNC!\n");
721             //to deal with this signal, add dispatch func call here
722         }
723         if (assertion_proc_flgs & GPIO_3_FUNC)
724         {
725             DbgMessage(pdev, WARN, "lm_hard_wired_processing: GPIO_2_FUNC!\n");
726             //to deal with this signal, add dispatch func call here
727         }
728         if (assertion_proc_flgs & GPIO_4_FUNC)
729         {
730         DbgMessage(pdev, WARN, "lm_hard_wired_processing: GPIO_3_FUNC0!\n");
731         // Will be handled in deassertion
732         }
733         if (assertion_proc_flgs & ATTN_GENERAL_ATTN_1)
734         {
735             DbgMessage(pdev, FATAL, "lm_hard_wired_processing: ATTN_GENERAL_ATTN_1! and clean it!!!\n");
736             REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_1,0x0);
737         }
738         if (assertion_proc_flgs & ATTN_GENERAL_ATTN_2)
739         {
740             DbgMessage(pdev, FATAL, "lm_hard_wired_processing: ATTN_GENERAL_ATTN_2! and clean it!!!\n");
741             REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_2,0x0);
742         }
743         if (assertion_proc_flgs & ATTN_GENERAL_ATTN_3)
744         {
745             DbgMessage(pdev, FATAL, "lm_hard_wired_processing: ATTN_GENERAL_ATTN_3! and clean it!!!\n");
746             REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_3,0x0);
747         }
748     }
749     else
750     {
751         DbgBreakIf(PORT_ID(pdev) != 1);
752 
753         if (assertion_proc_flgs & ATTN_SW_TIMER_4_FUNC1)
754         {
755             //DbgMessage(pdev, FATAL, "lm_hard_wired_processing: ATTN_SW_TIMER_4_FUNC1!\n");
756             //to deal with this signal, add dispatch func call here
757         }
758         if (assertion_proc_flgs & GPIO_2_FUNC1)
759         {
760             DbgMessage(pdev, WARN, "lm_hard_wired_processing: GPIO_1_FUNC1!\n");
761             //to deal with this signal, add dispatch func call here
762         }
763         if (assertion_proc_flgs & GPIO_3_FUNC1)
764         {
765             DbgMessage(pdev, WARN, "lm_hard_wired_processing: GPIO_2_FUNC1!\n");
766             //to deal with this signal, add dispatch func call here
767         }
768         if (assertion_proc_flgs & GPIO_4_FUNC1)
769         {
770             DbgMessage(pdev, WARN, "lm_hard_wired_processing: GPIO_3_FUNC1!\n");
771             // Will be handled in deassertion
772         }
773         if (assertion_proc_flgs & ATTN_GENERAL_ATTN_4)
774         {
775             DbgMessage(pdev, FATAL, "lm_hard_wired_processing: ATTN_GENERAL_ATTN_4! and clean it!!!\n");
776             REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_4,0x0);
777         }
778         if (assertion_proc_flgs & ATTN_GENERAL_ATTN_5)
779         {
780             DbgMessage(pdev, FATAL, "lm_hard_wired_processing: ATTN_GENERAL_ATTN_5! and clean it!!!\n");
781             REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_5,0x0);
782         }
783         if (assertion_proc_flgs & ATTN_GENERAL_ATTN_6)
784         {
785             DbgMessage(pdev, FATAL, "lm_hard_wired_processing: ATTN_GENERAL_ATTN_6! and clean it!!!\n");
786             REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_6,0x0);
787         }
788     }
789 }
790 
lm_nig_processing(lm_device_t * pdev)791 static void lm_nig_processing(lm_device_t *pdev)
792 {
793     u32_t nig_status_port          = 0;
794     u32_t unicore_val              = 0;
795     u32_t is_unicore_intr_asserted = 0;
796     // save nig interrupt mask and set it back later
797     lm_link_update(pdev);
798     if (PORT_ID(pdev) == 0)
799     {
800         //read the status interrupt of the NIG for the appropriate port (will do read-modify-write)
801         nig_status_port = REG_RD(pdev,  NIG_REG_STATUS_INTERRUPT_PORT0);
802 
803         //pass over each of the 24 NIG REG to find out why the NIG attention was asserted.
804         //every unicore interrupt read, in case it differs from the corresponding bit in the
805         //NIG_REG_STATUS_INTERRUPT_PORT0, then we need to assign the value read into the apporpriate bit
806         // in NIG_REG_STATUS_INTERRUPT_PORT0 register.
807 
808         //HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC0_STATUS_MISC_MI_INT, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT_SIZE);
809         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC0_STATUS_MISC_MI_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_COMPLETE_SIZE);
810         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC0_STATUS_MISC_CFG_CHANGE, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_CFG_CHANGE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_CFG_CHANGE_SIZE);
811         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC0_STATUS_MISC_LINK_STATUS, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_LINK_STATUS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_LINK_STATUS_SIZE);
812         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC0_STATUS_MISC_LINK_CHANGE, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_LINK_CHANGE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_LINK_CHANGE_SIZE);
813         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC0_STATUS_MISC_ATTN, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_ATTN, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_ATTN_SIZE);
814         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES0_STATUS_MAC_CRS, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_MAC_CRS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_MAC_CRS_SIZE);
815         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES0_STATUS_AUTONEG_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_AUTONEG_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_AUTONEG_COMPLETE_SIZE);
816         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES0_STATUS_FIBER_RXACT, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_FIBER_RXACT, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_FIBER_RXACT_SIZE);
817         //HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES0_STATUS_LINK_STATUS, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS_SIZE);
818         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES0_STATUS_MR_PAGE_RX, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_MR_PAGE_RX, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_MR_PAGE_RX_SIZE);
819         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES0_STATUS_CL73_AN_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_CL73_AN_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_CL73_AN_COMPLETE_SIZE);
820         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES0_STATUS_CL73_MR_PAGE_RX, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_CL73_MR_PAGE_RX, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_CL73_MR_PAGE_RX_SIZE);
821         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES0_STATUS_RX_SIGDET, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_RX_SIGDET, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_RX_SIGDET_SIZE);
822         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_REMOTEMDIOREQ, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_REMOTEMDIOREQ, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_REMOTEMDIOREQ_SIZE);
823         //HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_LINK10G, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G_SIZE);
824         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_AUTONEG_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_AUTONEG_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_AUTONEG_COMPLETE_SIZE);
825         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_FIBER_RXACT, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_FIBER_RXACT, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_FIBER_RXACT_SIZE);
826         //HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_LINK_STATUS, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE);
827         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_MR_PAGE_RX, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_MR_PAGE_RX, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_MR_PAGE_RX_SIZE);
828         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_CL73_AN_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_CL73_AN_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_CL73_AN_COMPLETE_SIZE);
829         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_CL73_MR_PAGE_RX, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_CL73_MR_PAGE_RX, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_CL73_MR_PAGE_RX_SIZE);
830         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_RX_SIGDET, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_RX_SIGDET, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_RX_SIGDET_SIZE);
831         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS0_STATUS_MAC_CRS, &unicore_val, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_MAC_CRS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_MAC_CRS_SIZE);
832 
833         //write back the updated status interrupt of the NIG for the appropriate port.
834         REG_WR(pdev,  NIG_REG_STATUS_INTERRUPT_PORT0, nig_status_port);
835     }
836     else
837     {
838         DbgBreakIf(PORT_ID(pdev) != 1);
839         nig_status_port = REG_RD(pdev,  NIG_REG_STATUS_INTERRUPT_PORT1);
840 
841         //HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC1_STATUS_MISC_MI_INT, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_MI_INT, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_MI_INT_SIZE);
842         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC1_STATUS_MISC_MI_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_MI_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_MI_COMPLETE_SIZE);
843         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC1_STATUS_MISC_CFG_CHANGE, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_CFG_CHANGE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_CFG_CHANGE_SIZE);
844         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC1_STATUS_MISC_LINK_STATUS, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_LINK_STATUS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_LINK_STATUS_SIZE);
845         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC1_STATUS_MISC_LINK_CHANGE, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_LINK_CHANGE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_LINK_CHANGE_SIZE);
846         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_EMAC1_STATUS_MISC_ATTN, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_ATTN, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_EMAC1_MISC_ATTN_SIZE);
847         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES1_STATUS_MAC_CRS, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_MAC_CRS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_MAC_CRS_SIZE);
848         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES1_STATUS_AUTONEG_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_AUTONEG_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_AUTONEG_COMPLETE_SIZE);
849         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES1_STATUS_FIBER_RXACT, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_FIBER_RXACT, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_FIBER_RXACT_SIZE);
850         //HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES1_STATUS_LINK_STATUS, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_LINK_STATUS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_LINK_STATUS_SIZE);
851         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES1_STATUS_MR_PAGE_RX, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_MR_PAGE_RX, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_MR_PAGE_RX_SIZE);
852         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES1_STATUS_CL73_AN_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_CL73_AN_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_CL73_AN_COMPLETE_SIZE);
853         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES1_STATUS_CL73_MR_PAGE_RX, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_CL73_MR_PAGE_RX, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_CL73_MR_PAGE_RX_SIZE);
854         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_SERDES1_STATUS_RX_SIGDET, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_RX_SIGDET, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_SERDES1_RX_SIGDET_SIZE);
855         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_REMOTEMDIOREQ, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_REMOTEMDIOREQ, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_REMOTEMDIOREQ_SIZE);
856         //HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_LINK10G, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_LINK10G, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_LINK10G_SIZE);
857         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_AUTONEG_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_AUTONEG_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_AUTONEG_COMPLETE_SIZE);
858         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_FIBER_RXACT, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_FIBER_RXACT, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_FIBER_RXACT_SIZE);
859         //HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_LINK_STATUS, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_LINK_STATUS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_LINK_STATUS_SIZE);
860         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_MR_PAGE_RX, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_MR_PAGE_RX, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_MR_PAGE_RX_SIZE);
861         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_CL73_AN_COMPLETE, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_CL73_AN_COMPLETE, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_CL73_AN_COMPLETE_SIZE);
862         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_CL73_MR_PAGE_RX, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_CL73_MR_PAGE_RX, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_CL73_MR_PAGE_RX_SIZE);
863         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_RX_SIGDET, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_RX_SIGDET, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_RX_SIGDET_SIZE);
864         HANDLE_UNICORE_INT_ASSERTED(pdev, NIG_REG_XGXS1_STATUS_MAC_CRS, &unicore_val, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_MAC_CRS, &nig_status_port, &is_unicore_intr_asserted, NIG_STATUS_INTERRUPT_PORT1_REG_STATUS_XGXS1_MAC_CRS_SIZE);
865 
866         REG_WR(pdev,  NIG_REG_STATUS_INTERRUPT_PORT1, nig_status_port);
867 
868     }
869 }
870 
lm_handle_assertion_processing(lm_device_t * pdev,u16_t assertion_proc_flgs)871 void lm_handle_assertion_processing(lm_device_t *pdev, u16_t assertion_proc_flgs)
872 {
873     u32_t       val           = 0;
874     u32_t       port_reg_name = 0;
875     u32_t       mask_val      = 0;
876     u32_t       nig_mask      = 0;
877 
878     DbgMessage(pdev, INFORM, "lm_handle_assertion_processing: assertion_proc_flgs:%d\n", assertion_proc_flgs);
879 
880     //mask only appropriate attention output signals from configured routing and unifier logic toward IGU.
881     //This is for driver/chip sync to eventually return to '00' monitored state
882     //in both leading & trailing latch.
883     //mask non-hard-wired dynamic groups only
884 
885     DbgBreakIf(pdev->vars.attn_state & assertion_proc_flgs);
886 
887     //mask relevant AEU attn lines
888     //             mask  assert_flgs  new mask
889     //legal:        0       0       ->    0
890     //              1       0       ->    1
891     //              1       1       ->    0
892     //ASSERT:       0       1 -> this won't change us thanks to & ~
893 
894     ASSERT_STATIC( HW_LOCK_RESOURCE_PORT0_ATT_MASK +1 == HW_LOCK_RESOURCE_PORT1_ATT_MASK );
895     ASSERT_STATIC( NIG_REG_MASK_INTERRUPT_PORT0 + 4   == NIG_REG_MASK_INTERRUPT_PORT1 );
896 
897     lm_hw_lock(pdev, HW_LOCK_RESOURCE_PORT0_ATT_MASK + PORT_ID(pdev), TRUE);
898     port_reg_name = PORT_ID(pdev) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0;
899     // read the hw current mask value
900     mask_val=REG_RD(pdev, port_reg_name);
901     //changed rrom XOR to & ~
902     pdev->vars.aeu_mask_attn_func = mask_val & 0xff;
903     DbgMessage(pdev, INFORM, "lm_handle_assertion_processing: BEFORE: aeu_mask_attn_func:0x%x\n", pdev->vars.aeu_mask_attn_func);
904     //changed rrom XOR to & ~
905     pdev->vars.aeu_mask_attn_func &= ~(assertion_proc_flgs & 0xff);
906     REG_WR(pdev, port_reg_name, pdev->vars.aeu_mask_attn_func);
907     DbgMessage(pdev, INFORM, "lm_handle_assertion_processing: AFTER : aeu_mask_attn_func:0x%x\n", pdev->vars.aeu_mask_attn_func);
908     lm_hw_unlock(pdev, HW_LOCK_RESOURCE_PORT0_ATT_MASK + PORT_ID(pdev));
909     //update the bits states
910 
911     //        state  assert_flgs  new state
912     //legal:    0       0         -> 0
913     //          0       1         -> 1
914     //          1       0         -> 1
915     //error:    1       1 -> this won't change us thanks to |
916     DbgMessage(pdev, INFORM, "lm_handle_assertion_processing: BEFORE: attn_state:0x%x\n", pdev->vars.attn_state);
917     //changed from XOR to OR for safety
918     pdev->vars.attn_state |= assertion_proc_flgs;
919 
920     DbgMessage(pdev, INFORM, "lm_handle_assertion_processing: AFTER : attn_state:0x%x\n", pdev->vars.attn_state);
921     //process only hard-wired lines in case any got up
922     if (assertion_proc_flgs & ATTN_HARD_WIRED_MASK)
923     {
924         lm_hard_wired_processing(pdev, assertion_proc_flgs);
925     }
926 
927     // now handle nig
928     if (assertion_proc_flgs & ATTN_NIG_FOR_FUNC)
929     {
930         MM_ACQUIRE_PHY_LOCK(pdev);
931          // save nig interrupt mask and set it back later
932         nig_mask = REG_RD(pdev,  NIG_REG_MASK_INTERRUPT_PORT0 + 4*PORT_ID(pdev));
933         REG_WR(pdev,  NIG_REG_MASK_INTERRUPT_PORT0 + 4*PORT_ID(pdev), 0);
934 
935         // we'll handle the attention only if mask is not 0
936         // if mask is 0, it means that "old" and irrelevant is sent
937         // and we should not hnalde it (e.g. CQ48990 - got link down event after loopback mode was set).
938         if( nig_mask )
939         {
940             lm_nig_processing(pdev);
941         }
942         else
943         {
944             DbgMessage(pdev, WARN, "lm_handle_deassertion_processing: got attention when nig_mask is 0\n" );
945         }
946     }
947 
948     //parallel write to IGU to set the attn_ack for _all asserted_ lines.
949     val = assertion_proc_flgs;
950 
951     // attntion bits set
952     if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC)
953     {
954         REG_WR(pdev,  HC_REG_COMMAND_REG + PORT_ID(pdev)*32 + COMMAND_REG_ATTN_BITS_SET,val);
955     }
956     else
957     {
958         u32_t cmd_addr = IGU_CMD_ATTN_BIT_SET_UPPER;
959         if (INTR_BLK_ACCESS(pdev) == INTR_BLK_ACCESS_IGUMEM)
960         {
961             REG_WR(pdev, BAR_IGU_INTMEM + cmd_addr*8, val);
962         }
963         else
964         {
965             struct igu_ctrl_reg cmd_ctrl;
966             u8_t                igu_func_id = 0;
967             /* GRC ACCESS: */
968             /* Write the Data, then the control */
969              /* [18:12] - FID (if VF - [18] = 0; [17:12] = VF number; if PF - [18] = 1; [17:14] = 0; [13:12] = PF number) */
970             igu_func_id = IGU_FUNC_ID(pdev);
971             cmd_ctrl.ctrl_data =
972                 ((cmd_addr << IGU_CTRL_REG_ADDRESS_SHIFT) |
973                  (igu_func_id << IGU_CTRL_REG_FID_SHIFT) |
974                  (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
975 
976             REG_WR(pdev, IGU_REG_COMMAND_REG_32LSB_DATA, val);
977             REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl.ctrl_data);
978         }
979     }
980 
981     // now set back the mask
982     if (assertion_proc_flgs & ATTN_NIG_FOR_FUNC)
983     {
984         u8_t blk_type   = INTR_BLK_TYPE(pdev);
985         u8_t blk_access = INTR_BLK_ACCESS(pdev);
986 
987         if ( ( blk_type != INTR_BLK_HC ) && ( blk_access == INTR_BLK_ACCESS_IGUMEM ))
988         {
989             u32 cnt = 0;
990             // Verify that IGU ack through BAR was written before restoring NIG mask.
991             // This loop should exit after 2-3 iterations max.
992             do
993             {
994                 val = REG_RD(pdev, IGU_REG_ATTENTION_ACK_BITS);
995             }
996             while (((val & ATTN_NIG_FOR_FUNC) == 0) && (++cnt < MAX_IGU_ATTN_ACK_TO));
997 
998             if (!val)
999             {
1000                 DbgMessage(pdev, FATAL, "Failed to verify IGU ack on time\n");
1001             }
1002         }
1003         REG_WR(pdev,  NIG_REG_MASK_INTERRUPT_PORT0 + 4*PORT_ID(pdev), nig_mask);
1004         MM_RELEASE_PHY_LOCK(pdev);
1005     }
1006 }
1007 
lm_cfc_attn_everest_processing(lm_device_t * pdev)1008 static u32_t lm_cfc_attn_everest_processing(lm_device_t *pdev)
1009 {
1010     u32_t val, valc;
1011     val = REG_RD(pdev,CFC_REG_CFC_INT_STS);
1012 
1013     // TODO add defines here
1014     DbgMessage(pdev, FATAL, "CFC hw attention 0x%x\n",val);
1015     if (val) {
1016         pdev->vars.cfc_int_status_cnt++;
1017     // CFC error attention
1018     if (val & 0x2)
1019     {
1020                 //DbgBreakIfAll(1);
1021     }
1022 }
1023     valc = REG_RD(pdev,CFC_REG_CFC_INT_STS_CLR);
1024     return val;
1025 }
lm_pxp_attn_everest_processing(lm_device_t * pdev)1026 static void lm_pxp_attn_everest_processing(lm_device_t *pdev)
1027 {
1028     u32_t val = REG_RD(pdev,PXP_REG_PXP_INT_STS_0);
1029 
1030     // TODO add defines here
1031     DbgMessage(pdev, FATAL, "PXP hw attention 0x%x\n",val);
1032     // RQ_USDMDP_FIFO_OVERFLOW attention
1033     if (val & 0x18000)
1034     {
1035         DbgBreakIfAll(1);
1036     }
1037 
1038 }
1039 /*
1040  *Function Name:lm_spio5_attn_everest_processing
1041  *
1042  *Parameters:
1043  *
1044  *Description:
1045  *  Indicates fan failure on specific external_phy_config (PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
1046  *Returns:
1047  *
1048  */
lm_spio5_attn_everest_processing(lm_device_t * pdev)1049 static void lm_spio5_attn_everest_processing(lm_device_t *pdev)
1050 {
1051     u32_t      val            = 0;
1052     u32_t      offset         = 0;
1053     u32_t      ext_phy_config = 0;
1054     const u8_t port_id        = PORT_ID(pdev);
1055 
1056    // Special fan failure handling for boards with external PHY SFX7101 (which include fan)
1057     PHY_HW_LOCK(pdev);
1058     elink_hw_reset_phy(&pdev->params.link);
1059     PHY_HW_UNLOCK(pdev);
1060 
1061     offset = ( 0 == port_id ) ? MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 ;
1062 
1063     val = REG_RD(pdev, offset );
1064 
1065     DbgMessage(pdev, FATAL, "lm_spio5_attn_everest_processing: SPIO5 hw attention 0x%x\n",val);
1066 
1067     // mask flags so we won't get this attention anymore
1068     RESET_FLAGS(val, AEU_INPUTS_ATTN_BITS_SPIO5 ) ;
1069     REG_WR(pdev, offset, val ) ;
1070 
1071     // change phy_type to type failure (under phy lock)
1072     MM_ACQUIRE_PHY_LOCK(pdev);
1073 
1074     offset = OFFSETOF(shmem_region_t,dev_info.port_hw_config[port_id].external_phy_config);
1075 
1076     LM_SHMEM_READ(pdev, offset, &ext_phy_config);
1077 
1078     RESET_FLAGS(ext_phy_config, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK ) ;
1079     SET_FLAGS(ext_phy_config, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE ) ;
1080 
1081     // Set external phy type to failure for MCP to know about the failure
1082     LM_SHMEM_WRITE(pdev, offset, ext_phy_config);
1083 
1084     DbgMessage(pdev, WARN, "lm_spio5_attn_everest_processing: external_phy_type 0x%x\n",ext_phy_config);
1085 
1086     // Indicate "link-down". elink_hw_reset_phy takes care of the physical part, but part of the function
1087     // masks attentions, which means we won't get a link event from anywhere else. Therefore we need to
1088     // indicate link down at this point to OS... to supress traffic and upload toe connections...
1089     // we do this under lock since we change the link status...
1090     pdev->vars.link_status = LM_STATUS_LINK_DOWN;
1091 
1092     mm_indicate_link(pdev, pdev->vars.link_status, pdev->vars.medium);
1093 
1094     MM_RELEASE_PHY_LOCK(pdev);
1095 
1096     // write to the event log!
1097     mm_event_log_generic( pdev, LM_LOG_ID_FAN_FAILURE );
1098 
1099     mm_indicate_hw_failure(pdev);
1100 }
1101 
1102 // Check current fan failure state - report in case signaled.
lm_check_fan_failure(struct _lm_device_t * pdev)1103 void lm_check_fan_failure(struct _lm_device_t *pdev)
1104 {
1105     u32_t val = 0;
1106 
1107     if (IS_VFDEV(pdev))
1108     {
1109         return;
1110     }
1111 
1112     val = REG_RD(pdev, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + PORT_ID(pdev)*4);
1113 
1114     if( GET_FLAGS(val, AEU_INPUTS_ATTN_BITS_SPIO5))
1115     {
1116         lm_spio5_attn_everest_processing(pdev);
1117     }
1118 }
1119 
1120 // Change PMF or link change
1121 // PMF sent link updates to all func (but himself) OR I become a PMF from MCP notification
1122 // on some cases PMF sends link event to himself as well if errors occured in the mac.
lm_pmf_or_link_event(lm_device_t * pdev,u32_t drv_status)1123 static void lm_pmf_or_link_event(lm_device_t *pdev, u32_t drv_status)
1124 {
1125     u32_t val = 0;
1126 
1127 
1128     DbgMessage(pdev, WARN, "lm_pmf_or_link_event: sync general attention received!!! for func%d\n",FUNC_ID(pdev));
1129 
1130     // sync with link
1131     MM_ACQUIRE_PHY_LOCK(pdev);
1132     elink_link_status_update(&pdev->params.link,&pdev->vars.link);
1133     lm_link_report(pdev);
1134     MM_RELEASE_PHY_LOCK(pdev);
1135 
1136     if (!IS_PMF(pdev) && GET_FLAGS(drv_status,DRV_STATUS_PMF))
1137     {
1138         //pmf migration
1139         pdev->vars.is_pmf = PMF_MIGRATION;
1140         // load stat from MCP
1141         MM_ACQUIRE_PHY_LOCK(pdev);
1142         lm_stats_on_pmf_update(pdev,TRUE);
1143         MM_RELEASE_PHY_LOCK(pdev);
1144 
1145         // Connect to NIG attentions
1146         val = (0xff0f | (1 << (VNIC_ID(pdev) + 4)));
1147         if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC)
1148         {
1149             REG_WR(pdev,  (PORT_ID(pdev) ? HC_REG_TRAILING_EDGE_1 : HC_REG_TRAILING_EDGE_0), val);
1150             REG_WR(pdev,  (PORT_ID(pdev) ? HC_REG_LEADING_EDGE_1  : HC_REG_LEADING_EDGE_0) , val);
1151         }
1152         else
1153         {
1154             if (CHIP_IS_E3(pdev))
1155             {
1156                 val &= ~ATTN_SW_TIMER_4_FUNC; // To prevent Timer4 expiration attention
1157             }
1158             REG_WR(pdev,  IGU_REG_TRAILING_EDGE_LATCH, val);
1159             REG_WR(pdev,  IGU_REG_LEADING_EDGE_LATCH, val);
1160         }
1161 
1162         if(TRUE == IS_DCB_ENABLED(pdev))
1163         {
1164             lm_dcbx_pmf_migration(pdev);
1165         }
1166     }
1167 }
1168 
lm_dcc_event(lm_device_t * pdev,u32_t dcc_event)1169 static void lm_dcc_event(lm_device_t *pdev, u32_t dcc_event)
1170 {
1171     u32_t       val               = 0;
1172     u32_t       event_val_current = 0;
1173     u32_t       fw_resp           = 0 ;
1174     lm_status_t lm_status         = LM_STATUS_FAILURE ;
1175 
1176     DbgMessage(pdev, WARN, "lm_dcc_event: dcc_event=0x%x\n",dcc_event);
1177 
1178     if( !IS_MULTI_VNIC(pdev) )
1179     {
1180         DbgBreakIf(1);
1181         return;
1182     }
1183 
1184     // read shemem
1185 
1186     // Read new mf config from shemem
1187     LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].config), &val);
1188 
1189     pdev->hw_info.mf_info.func_mf_cfg = val ;
1190 
1191     // is it enable/disable
1192     event_val_current = DRV_STATUS_DCC_DISABLE_ENABLE_PF ;
1193 
1194     if GET_FLAGS( dcc_event, event_val_current )
1195     {
1196         if( GET_FLAGS( pdev->hw_info.mf_info.func_mf_cfg, FUNC_MF_CFG_FUNC_DISABLED ) )
1197         {
1198             DbgMessage(pdev, WARN, "lm_dcc_event: mf_cfg function disabled val=0x%x\n",val);
1199 
1200             // TODO - receive packets fronm another machine when link is down - expected - miniport drop packets
1201             // TBD - disable RX & TX
1202         }
1203         else
1204         {
1205             DbgMessage(pdev, WARN, "lm_dcc_event: mf_cfg function enabled val=0x%x\n",val);
1206             // TBD - enable RX & TX
1207         }
1208         lm_status = LM_STATUS_SUCCESS ;
1209         RESET_FLAGS( dcc_event, event_val_current );
1210     }
1211 
1212     event_val_current = DRV_STATUS_DCC_BANDWIDTH_ALLOCATION ;
1213 
1214     if GET_FLAGS(dcc_event, event_val_current)
1215     {
1216         if( !IS_PMF(pdev) )
1217         {
1218             DbgBreakIf(1);
1219             return;
1220         }
1221         lm_status = LM_STATUS_SUCCESS ;
1222         RESET_FLAGS( dcc_event, event_val_current );
1223     }
1224 
1225     /* Report results to MCP */
1226     if (dcc_event)
1227     {
1228         // unknown event
1229         lm_status = lm_mcp_cmd_send_recieve( pdev, lm_mcp_mb_header, DRV_MSG_CODE_DCC_FAILURE, 0, MCP_CMD_DEFAULT_TIMEOUT, &fw_resp ) ;
1230     }
1231     else
1232     {
1233         // we are done
1234         if( LM_STATUS_SUCCESS == lm_status )
1235         {
1236             // sync with link --> update min max/link for all function
1237             MM_ACQUIRE_PHY_LOCK(pdev);
1238             elink_link_status_update(&pdev->params.link,&pdev->vars.link);
1239             lm_link_report(pdev);
1240             MM_RELEASE_PHY_LOCK(pdev);
1241         }
1242         lm_status = lm_mcp_cmd_send_recieve( pdev, lm_mcp_mb_header, DRV_MSG_CODE_DCC_OK, 0, MCP_CMD_DEFAULT_TIMEOUT, &fw_resp ) ;
1243         //bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1244     }
1245     DbgBreakIf( lm_status != LM_STATUS_SUCCESS );
1246 }
1247 
lm_set_bandwidth_event(lm_device_t * pdev)1248 static lm_status_t lm_set_bandwidth_event(lm_device_t *pdev)
1249 {
1250     u32_t       mcp_resp    = 0;
1251     lm_status_t lm_status   = LM_STATUS_SUCCESS;
1252 
1253     DbgBreakIf(!IS_SD_UFP_MODE(pdev) && (!IS_MULTI_VNIC(pdev) || !pdev->vars.is_pmf));
1254 
1255     MM_ACQUIRE_PHY_LOCK(pdev);
1256 
1257     //update CMNG data from SHMEM
1258     lm_reload_link_and_cmng(pdev);
1259 
1260     //acknoledge the MCP event
1261     lm_mcp_cmd_send_recieve(pdev,lm_mcp_mb_header, DRV_MSG_CODE_SET_MF_BW_ACK, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
1262 
1263     if ( mcp_resp != FW_MSG_CODE_SET_MF_BW_DONE)
1264     {
1265         DbgBreakIf(mcp_resp != FW_MSG_CODE_SET_MF_BW_DONE);
1266         lm_status = LM_STATUS_FAILURE;
1267         goto _exit;
1268     }
1269 
1270     //indicate link change to OS, since sync_link_status does not generate a link event for the PMF.
1271     mm_indicate_link(pdev, pdev->vars.link_status, pdev->vars.medium);
1272 
1273     //notify all functions
1274     sync_link_status(pdev);
1275 
1276 _exit:
1277     MM_RELEASE_PHY_LOCK(pdev);
1278     return lm_status;
1279 }
1280 
1281 typedef enum drv_info_opcode drv_info_opcode_t;
1282 
lm_stats_drv_info_to_mfw_event(struct _lm_device_t * pdev)1283 lm_status_t lm_stats_drv_info_to_mfw_event( struct _lm_device_t* pdev )
1284 {
1285     u32_t              val             = 0;
1286     u32_t              drv_msg         = 0;
1287     u32_t              ver             = 0;
1288     u32_t              fw_resp         = 0 ;
1289     lm_status_t        lm_status       = LM_STATUS_SUCCESS ;
1290     drv_info_opcode_t  drv_info_op     = -1;
1291 
1292     if( !LM_SHMEM2_HAS(pdev, drv_info_control) )
1293     {
1294         // We should never get here...
1295         DbgBreakIfAll(!LM_SHMEM2_HAS(pdev, drv_info_control));
1296         return LM_STATUS_FAILURE;
1297     }
1298 
1299     LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t, drv_info_control), &val );
1300 
1301     ver = ( GET_FLAGS( val, DRV_INFO_CONTROL_VER_MASK ) ) >> DRV_INFO_CONTROL_VER_SHIFT ;
1302 
1303     do
1304     {
1305         if( DRV_INFO_CUR_VER != ver )
1306         {
1307             // We don't support this interface verison
1308             drv_msg = DRV_MSG_CODE_DRV_INFO_NACK;
1309             break;
1310         }
1311 
1312         drv_info_op = ( GET_FLAGS( val, DRV_INFO_CONTROL_OP_CODE_MASK ) ) >> DRV_INFO_CONTROL_OP_CODE_SHIFT;
1313 
1314         lm_status = lm_stats_drv_info_to_mfw_assign(pdev, drv_info_op );
1315 
1316         if( LM_STATUS_SUCCESS != lm_status )
1317         {
1318             // We don't support this interface verison/opcode
1319             drv_msg = DRV_MSG_CODE_DRV_INFO_NACK;
1320             break;
1321         }
1322 
1323         LM_SHMEM2_WRITE(pdev, OFFSETOF(shmem2_region_t, drv_info_host_addr_lo), pdev->vars.stats.stats_collect.drv_info_to_mfw.drv_info_to_mfw_phys_addr.as_u32.low );
1324         LM_SHMEM2_WRITE(pdev, OFFSETOF(shmem2_region_t, drv_info_host_addr_hi), pdev->vars.stats.stats_collect.drv_info_to_mfw.drv_info_to_mfw_phys_addr.as_u32.high );
1325 
1326         drv_msg = DRV_MSG_CODE_DRV_INFO_ACK;
1327 
1328     } while(0);
1329 
1330     lm_status = lm_mcp_cmd_send_recieve( pdev, lm_mcp_mb_header, drv_msg, 0, MCP_CMD_DEFAULT_TIMEOUT, &fw_resp );
1331 
1332     return lm_status;
1333 }
1334 
lm_ufp_pf_disable(lm_device_t * pdev)1335 static lm_status_t lm_ufp_pf_disable(lm_device_t *pdev)
1336 {
1337     lm_status_t status        = LM_STATUS_SUCCESS;
1338     u32_t       mcp_resp      = 0;
1339 
1340     /*TODO: Have to do some processing based on fi the pF is enabled or disabled*/
1341     ///indicate "link-down"
1342     MM_ACQUIRE_PHY_LOCK(pdev);
1343 
1344     pdev->vars.link_status = LM_STATUS_LINK_DOWN;
1345     mm_indicate_link(pdev, pdev->vars.link_status, pdev->vars.medium);
1346 
1347     MM_RELEASE_PHY_LOCK(pdev);
1348 
1349     /* Report results to MCP */
1350     ///ACK the MCP message
1351     if(status == LM_STATUS_SUCCESS)
1352         lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_OEM_OK, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
1353     else
1354         lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_OEM_FAILURE, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
1355 
1356     DbgBreakIf(mcp_resp != FW_MSG_CODE_OEM_ACK);
1357     return status;
1358 }
1359 
lm_ufp_pf_enable(lm_device_t * pdev)1360 static void lm_ufp_pf_enable(lm_device_t *pdev)
1361 {
1362     lm_status_t                  status    = LM_STATUS_SUCCESS;
1363     u32_t                        mcp_resp  = 0;
1364     struct function_update_data  *data     = LM_SLOWPATH(pdev, ufp_function_update_data);
1365     const lm_address_t           data_phys = LM_SLOWPATH_PHYS(pdev, ufp_function_update_data);
1366     lm_hardware_mf_info_t        *mf_info  = &pdev->hw_info.mf_info;
1367     u32_t                        tag       = 0;
1368 
1369     //Reconfigure rate-limit
1370     MM_ACQUIRE_PHY_LOCK(pdev);
1371     lm_reload_link_and_cmng(pdev);
1372     MM_RELEASE_PHY_LOCK(pdev);
1373 
1374     /* Other than vlan tag what are other UFP specific data?
1375      * Should we read the priority etc
1376      */
1377 
1378     /* get ovlan if we're in switch-dependent mode... */
1379     LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].e1hov_tag),&tag);
1380     mf_info->ext_id    = (u16_t)tag;
1381     pdev->params.ovlan = (u16_t)tag;
1382 
1383     /* modify the NIG LLH registers */
1384     init_nig_func(pdev);
1385 
1386     DbgBreakIf(pdev->slowpath_info.ufp_func_ramrod_state != UFP_RAMROD_NOT_POSTED);
1387 
1388     /* send function update ramrod to change the tag in the FW */
1389     data->sd_vlan_tag_change_flg = TRUE;
1390     data->sd_vlan_tag            = mm_cpu_to_le16((u16_t)tag);
1391     data->echo                   = FUNC_UPDATE_RAMROD_SOURCE_UFP;
1392 
1393     status = lm_eq_ramrod_post_sync(pdev,
1394                                     RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
1395                                     data_phys.as_u64,CMD_PRIORITY_NORMAL,
1396                                     &pdev->slowpath_info.ufp_func_ramrod_state,
1397                                     UFP_RAMROD_PF_LINK_UPDATE_POSTED,
1398                                     UFP_RAMROD_COMPLETED);
1399 
1400     /* Report results to MCP */
1401     ///ACK the MCP message
1402     if(status == LM_STATUS_SUCCESS)
1403         lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_OEM_OK, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
1404     else
1405         lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_OEM_FAILURE, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
1406 
1407     DbgBreakIf(mcp_resp != FW_MSG_CODE_OEM_ACK);
1408 
1409     pdev->slowpath_info.ufp_func_ramrod_state = UFP_RAMROD_NOT_POSTED;
1410 }
1411 
lm_oem_event(lm_device_t * pdev,u32_t event)1412 static lm_status_t lm_oem_event(lm_device_t *pdev, u32_t event)
1413 {
1414     lm_status_t lm_status = LM_STATUS_SUCCESS;
1415     const u32_t offset    = OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].config);
1416     u32_t       config    = 0;
1417 
1418     DbgMessage(pdev, INFORM, "oem_event 0x%x\n", event);
1419 
1420     ///read FUNC-DISABLED and FUNC-DELETED from func_mf_cfg
1421     LM_MFCFG_READ(pdev, offset, &config);
1422     pdev->hw_info.mf_info.func_mf_cfg = config ;
1423 
1424     if (event & DRV_STATUS_OEM_DISABLE_ENABLE_PF)
1425     {
1426         if((config & FUNC_MF_CFG_FUNC_DISABLED) || (config & FUNC_MF_CFG_FUNC_DELETED))
1427         {
1428             lm_status = lm_ufp_pf_disable(pdev);
1429             if (lm_status != LM_STATUS_SUCCESS)
1430             {
1431                 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1432                 return lm_status;
1433             }
1434         }
1435         else
1436         {
1437 #ifdef EDIAG
1438             lm_ufp_pf_enable(pdev);
1439 #else
1440             lm_status = MM_REGISTER_LPME(pdev, lm_ufp_pf_enable, TRUE, TRUE);
1441 #endif
1442             if (lm_status != LM_STATUS_SUCCESS)
1443             {
1444                 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1445                 return lm_status;
1446             }
1447         }
1448     }
1449     else if (event & DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)
1450     {
1451         //lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info;
1452 
1453         ///* get min/max bw */
1454         //mf_info->min_bw[vnic] = (GET_FLAGS(config, FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT);
1455         //mf_info->max_bw[vnic] = (GET_FLAGS(config, FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT);
1456 
1457         /* this function reads the bw configuration and does the necessary processing..
1458          * only drawback is it reads the configuration for all the functions?
1459          *. todo check if we should be using this or not...
1460          */
1461         lm_status = lm_set_bandwidth_event(pdev);
1462         if (lm_status != LM_STATUS_SUCCESS)
1463         {
1464             DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1465             return lm_status;
1466         }
1467     }
1468 
1469     return lm_status;
1470 }
1471 
lm_update_svid(lm_device_t * pdev)1472 static void lm_update_svid(lm_device_t *pdev)
1473 {
1474     lm_hardware_mf_info_t          *mf_info        = &pdev->hw_info.mf_info;
1475     u32_t                          tag             = 0;
1476     u32_t                          mcp_resp        = 0;
1477     lm_status_t                    lm_status       = LM_STATUS_SUCCESS;
1478     struct function_update_data    *data           = LM_SLOWPATH(pdev, ufp_function_update_data);
1479     const lm_address_t             data_phys       = LM_SLOWPATH_PHYS(pdev, ufp_function_update_data);
1480 
1481     /* get ovlan if we're in switch-dependent mode... */
1482     LM_MFCFG_READ(pdev, OFFSETOF(mf_cfg_t, func_mf_config[ABS_FUNC_ID(pdev)].e1hov_tag),&tag);
1483     mf_info->ext_id      = (u16_t)tag;
1484     pdev->params.ovlan   = (u16_t)tag;
1485 
1486     /* modify the NIG LLH registers */
1487     init_nig_func(pdev);
1488 
1489     DbgBreakIf(pdev->slowpath_info.ufp_func_ramrod_state != UFP_RAMROD_NOT_POSTED);
1490 
1491     /* send function update ramrod to change the tag in the FW */
1492     data->sd_vlan_tag_change_flg = TRUE;
1493     data->sd_vlan_tag            = mm_cpu_to_le16((u16_t)tag);
1494     data->echo	                 = FUNC_UPDATE_RAMROD_SOURCE_UFP;
1495 
1496     lm_status = lm_eq_ramrod_post_sync(pdev,
1497                                        RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
1498                                        data_phys.as_u64,CMD_PRIORITY_NORMAL,
1499                                        &pdev->slowpath_info.ufp_func_ramrod_state,
1500                                        UFP_RAMROD_PF_UPDATE_POSTED,
1501                                        UFP_RAMROD_COMPLETED);
1502 
1503     /* Report results to MCP */
1504     if(lm_status == LM_STATUS_SUCCESS)
1505         lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
1506     else
1507         lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
1508 
1509     DbgBreakIf(mcp_resp != DRV_MSG_CODE_OEM_UPDATE_SVID_ACK);
1510     pdev->slowpath_info.ufp_func_ramrod_state = UFP_RAMROD_NOT_POSTED;
1511 }
1512 
1513 #ifndef EDIAG
lm_ufp_update_priority(lm_device_t * pdev)1514 static void lm_ufp_update_priority(lm_device_t *pdev)
1515 {
1516     lm_hardware_mf_info_t          *mf_info        = &pdev->hw_info.mf_info;
1517     u32_t                          new_priority    = 0;
1518     u32_t                          mcp_resp        = 0;
1519     lm_status_t                    lm_status       = LM_STATUS_SUCCESS;
1520     struct function_update_data    *data           = LM_SLOWPATH(pdev, ufp_function_update_data);
1521     const lm_address_t             data_phys       = LM_SLOWPATH_PHYS(pdev, ufp_function_update_data);
1522 
1523     DbgBreakIf(pdev->slowpath_info.ufp_func_ramrod_state != UFP_RAMROD_NOT_POSTED);
1524 
1525     /* Todo get the priority from somewhere */
1526 
1527     /* send function update ramrod to change the tag in the FW */
1528     data->sd_vlan_force_pri_change_flg = TRUE;
1529     data->sd_vlan_force_pri_flg        = TRUE;
1530     //data->sd_vlan_force_pri_val        = mm_cpu_to_le16((u16_t)new_priority);
1531 
1532     data->echo	                       = FUNC_UPDATE_RAMROD_SOURCE_UFP;
1533 
1534     lm_status = lm_eq_ramrod_post_sync(pdev,
1535                                        RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
1536                                        data_phys.as_u64,CMD_PRIORITY_NORMAL,
1537                                        &pdev->slowpath_info.ufp_func_ramrod_state,
1538                                        UFP_RAMROD_PF_UPDATE_POSTED,
1539                                        UFP_RAMROD_COMPLETED);
1540     /*Todo Report results to mcp?*/
1541     pdev->slowpath_info.ufp_func_ramrod_state = UFP_RAMROD_NOT_POSTED;
1542 }
1543 #endif
1544 
lm_svid_event(lm_device_t * pdev)1545 static lm_status_t lm_svid_event(lm_device_t *pdev)
1546 {
1547     lm_status_t lm_status = LM_STATUS_SUCCESS;
1548 #ifdef EDIAG
1549     lm_update_svid(pdev);
1550 #else
1551     lm_status = MM_REGISTER_LPME(pdev, lm_update_svid, TRUE, TRUE);
1552 #endif
1553     if (lm_status != LM_STATUS_SUCCESS)
1554     {
1555             DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
1556             return lm_status;
1557     }
1558 
1559     return lm_status;
1560 }
1561 
lm_generic_event(lm_device_t * pdev)1562 static void lm_generic_event(lm_device_t *pdev)
1563 {
1564     u32_t      val              = 0;
1565     u32_t      offset           = 0; // for debugging convenient
1566     u8_t       call_pmf_or_link = FALSE;
1567     const u8_t func_id          = FUNC_ID(pdev);
1568 
1569 
1570     offset = MISC_REG_AEU_GENERAL_ATTN_12 + 4*func_id;
1571 
1572     // reset attention
1573     REG_WR(pdev, offset ,0x0);
1574 
1575     offset = OFFSETOF(shmem_region_t, func_mb[FUNC_MAILBOX_ID(pdev)].drv_status) ;
1576 
1577     // drv_status
1578     LM_SHMEM_READ(pdev,
1579                   offset,
1580                   &val);
1581 
1582     // E1H NIG status sync attention mapped to group 4-7
1583 
1584     if (GET_FLAGS( val, DRV_STATUS_VF_DISABLED))
1585     {
1586         u32_t mcp_vf_disabled[E2_VF_MAX / 32] = {0};
1587         u32_t i, fw_resp = 0;
1588 
1589         // Read VFs
1590         for (i = 0; i < ARRSIZE(mcp_vf_disabled); i++)
1591         {
1592             LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t,mcp_vf_disabled[i]), &mcp_vf_disabled[i]);
1593         }
1594         DbgMessage(pdev, FATAL, "lm_generic_event: DRV_STATUS_VF_DISABLED received for vfs bitmap %x %x!!!\n", mcp_vf_disabled[0], mcp_vf_disabled[1]);
1595 
1596         // SHMULIK, PLACE YOUR CODE HERE ( Handle only VFs of this PF )
1597 
1598         // Acknoledge the VFs you handled ( This array is per PF driver on path )
1599         for (i = 0; i < ARRSIZE(mcp_vf_disabled) ; i++)
1600         {
1601             LM_SHMEM2_WRITE(pdev, OFFSETOF(shmem2_region_t,drv_ack_vf_disabled[FUNC_MAILBOX_ID(pdev)][i]), mcp_vf_disabled[i]);
1602         }
1603         lm_mcp_cmd_send_recieve( pdev,
1604                                  lm_mcp_mb_header,
1605                                  DRV_MSG_CODE_VF_DISABLED_DONE,
1606                                  0,
1607                                  MCP_CMD_DEFAULT_TIMEOUT,
1608                                  &fw_resp);
1609         return; // YANIV - DEBUG @@@!!!
1610     }
1611     if(IS_MULTI_VNIC(pdev))
1612     {
1613         if( GET_FLAGS( val, DRV_STATUS_DCC_EVENT_MASK ) )
1614         {
1615             lm_dcc_event(pdev, (DRV_STATUS_DCC_EVENT_MASK & val) );
1616         }
1617 
1618         if (GET_FLAGS(val, DRV_STATUS_SET_MF_BW ))
1619         {
1620             lm_set_bandwidth_event(pdev);
1621         }
1622 
1623         //if val has any NIV event flags, call lm_niv_event
1624         if ( GET_FLAGS(val, DRV_STATUS_AFEX_EVENT_MASK) )
1625         {
1626             lm_niv_event(pdev, GET_FLAGS(val, DRV_STATUS_AFEX_EVENT_MASK) );
1627         }
1628     }
1629 
1630     if GET_FLAGS(val, DRV_STATUS_DRV_INFO_REQ)
1631     {
1632         lm_stats_drv_info_to_mfw_event(pdev);
1633     }
1634 
1635     // NOTE:
1636     // once we have events such as DCC and NIV, this condition doesn't stand anymore
1637     // we might get here TRUE although we are in MULTI_VNIC AND we are not PMF
1638     // and this is not for link change or pmf migration
1639     // the potential problem (redundant link report to OS CQ60223)
1640     // is resolved in "lm_link_report" function that check current link
1641     // with previous reported link
1642 
1643     /* Check if pmf or link event function should be called: */
1644     call_pmf_or_link = IS_MULTI_VNIC(pdev) && !pdev->vars.is_pmf;
1645 
1646 
1647     /* PMF or link event */
1648     if (GET_FLAGS(pdev->vars.link.periodic_flags, ELINK_PERIODIC_FLAGS_LINK_EVENT))
1649     {
1650         DbgMessage(pdev, WARN, "lm_generic_event: ELINK_PERIODIC_FLAGS_LINK_EVENT func_id=%d!!!\n", func_id );
1651 
1652         /*  sync with link */
1653         MM_ACQUIRE_PHY_LOCK_DPC(pdev);
1654         RESET_FLAGS(pdev->vars.link.periodic_flags, ELINK_PERIODIC_FLAGS_LINK_EVENT);
1655         MM_RELEASE_PHY_LOCK_DPC(pdev);
1656 
1657         call_pmf_or_link = TRUE;
1658     }
1659 
1660     if(call_pmf_or_link)
1661     {
1662         lm_pmf_or_link_event(pdev, val);
1663     }
1664 
1665     if GET_FLAGS(val, DRV_STATUS_OEM_EVENT_MASK)
1666     {
1667         lm_oem_event(pdev, val);
1668     }
1669 
1670     if GET_FLAGS(val, DRV_STATUS_OEM_UPDATE_SVID)
1671     {
1672         lm_svid_event(pdev);
1673     }
1674 
1675     lm_dcbx_event(pdev,val);
1676 }
1677 
lm_gen_attn_everest_processing(lm_device_t * pdev,u32_t sig_word_aft_inv)1678 static void lm_gen_attn_everest_processing(lm_device_t *pdev, u32_t sig_word_aft_inv)
1679 {
1680     u32_t offset = 0; // for debugging convenient
1681     u32_t val    = 0;
1682 
1683     //pass over all attention generals which are wired to a dynamic group of the lower 8 bits
1684     offset = GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) ;
1685     if ( offset & sig_word_aft_inv)
1686     {
1687         REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_7,0x0);
1688         DbgMessage(pdev, FATAL, "lm_gen_attn_everest_processing: TSTORM_FATAL_ASSERT_ATTENTION_BIT received!!!\n");
1689         DbgBreakIfAll(1);
1690     }
1691     offset = GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT);
1692     if ( offset & sig_word_aft_inv)
1693     {
1694         REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_8,0x0);
1695         DbgMessage(pdev, FATAL, "lm_gen_attn_everest_processing: USTORM_FATAL_ASSERT_ATTENTION_BIT received!!!\n");
1696         DbgBreakIfAll(1);
1697     }
1698     offset = GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT);
1699     if ( offset & sig_word_aft_inv)
1700     {
1701         REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_9,0x0);
1702         DbgMessage(pdev, FATAL, "lm_gen_attn_everest_processing: CSTORM_FATAL_ASSERT_ATTENTION_BIT received!!!\n");
1703         DbgBreakIfAll(1);
1704     }
1705     offset = GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT);
1706     if ( offset & sig_word_aft_inv)
1707     {
1708         REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_10,0x0);
1709         DbgMessage(pdev, FATAL, "lm_gen_attn_everest_processing: XSTORM_FATAL_ASSERT_ATTENTION_BIT received!!!\n");
1710         DbgBreakIfAll(1);
1711     }
1712     offset = GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT);
1713     if ( offset & sig_word_aft_inv)
1714     {
1715         REG_WR(pdev,MISC_REG_AEU_GENERAL_ATTN_11,0x0);
1716         val = lm_mcp_check(pdev);
1717         DbgMessage(pdev, FATAL, "lm_gen_attn_everest_processing: MCP_FATAL_ASSERT_ATTENTION_BIT received mcp_check=0x%x!!!\n" , val);
1718         DbgBreakIfAll(1);
1719     }
1720      // E1H NIG status sync attention mapped to group 4-7
1721     if (!CHIP_IS_E1(pdev))
1722     {
1723         // PMF change or link update
1724         offset = GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + FUNC_ID(pdev));
1725 
1726         if ( offset & sig_word_aft_inv)
1727         {
1728            lm_generic_event(pdev);
1729         }
1730     }
1731 }
1732 
lm_read_attn_regs(lm_device_t * pdev,u32_t * attn_sig_af_inv_arr,u32_t arr_size)1733 void lm_read_attn_regs(lm_device_t *pdev, u32_t * attn_sig_af_inv_arr, u32_t arr_size)
1734 {
1735     u8_t i;
1736     DbgBreakIf( pdev->vars.num_attn_sig_regs > arr_size );
1737     DbgBreakIf( pdev->vars.num_attn_sig_regs > ARRSIZE(pdev->vars.attn_sig_af_inv_reg_addr) );
1738 
1739     //Read the 128 attn signals bits after inverter
1740     for (i = 0; i < pdev->vars.num_attn_sig_regs; i++)
1741     {
1742         attn_sig_af_inv_arr[i] = REG_RD(pdev, pdev->vars.attn_sig_af_inv_reg_addr[i]);
1743     }
1744 
1745     DbgMessage(pdev, INFORM, "lm_handle_deassertion_processing: attn_sig_aft_invert_1:0x%x; attn_sig_aft_invert_2:0x%x; attn_sig_aft_invert_3:0x%x; attn_sig_aft_invert_4:0x%x,attn_sig_aft_invert_5:0x%x\n",
1746                 attn_sig_af_inv_arr[0],
1747                 attn_sig_af_inv_arr[1],
1748                 attn_sig_af_inv_arr[2],
1749                 attn_sig_af_inv_arr[3],
1750                 attn_sig_af_inv_arr[4]);
1751 }
1752 
1753 
1754 
lm_get_attn_info(lm_device_t * pdev,u16_t * attn_bits,u16_t * attn_ack)1755 void lm_get_attn_info(lm_device_t *pdev, u16_t *attn_bits, u16_t *attn_ack)
1756 {
1757     volatile struct atten_sp_status_block *       attention_sb = NULL;
1758     u16_t                                   lcl_attn_sb_index = 0;
1759 
1760     DbgBreakIf(!(pdev && attn_bits && attn_ack));
1761 
1762     attention_sb = lm_get_attention_status_block(pdev);
1763 
1764     //guard against dynamic change of attn lines - 15 interations max
1765     //the main idea here is to assure that we work on synchronized snapshots of the attn_bits and
1766     //attn_ack and avoid a faulty scenario where attn_ack we read in sanpshot #2 corresponds to attn_bits
1767     //of snapshot #1 which occured on different time frames.
1768     do
1769     {
1770         lcl_attn_sb_index = mm_le16_to_cpu(attention_sb->attn_bits_index);
1771         *attn_bits = (u16_t)mm_le32_to_cpu(attention_sb->attn_bits);
1772         *attn_ack  = (u16_t)mm_le32_to_cpu(attention_sb->attn_bits_ack);
1773 
1774     } while (lcl_attn_sb_index != mm_le16_to_cpu(attention_sb->attn_bits_index));
1775     //the lcl_attn_sb_index differs from the real local attn_index in the pdev since in this while loop it could
1776     //have been changed, we don't save it locally, and thus we will definitely receive an interrupt in case the
1777     //while condition is met.
1778 
1779     DbgMessage(pdev,
1780                INFORMi,
1781                "lm_get_attn_info: def_sb->attn_bits:0x%x, def_sb->attn_ack:0x%x, attn_bits:0x%x, attn_ack:0x%x\n",
1782                mm_le32_to_cpu(attention_sb->attn_bits),
1783                mm_le32_to_cpu(attention_sb->attn_bits_ack),
1784                *attn_bits,
1785                *attn_ack);
1786 }
1787 
1788 
lm_dq_attn_everest_processing(lm_device_t * pdev)1789 static u32_t lm_dq_attn_everest_processing(lm_device_t *pdev)
1790 {
1791     u32_t val,valc;
1792     val=REG_RD(pdev,DORQ_REG_DORQ_INT_STS);
1793     // TODO add defines here
1794     DbgMessage(pdev, FATAL, "DB hw attention 0x%x\n",val);
1795     if (val) {
1796         pdev->vars.dq_int_status_cnt++;
1797         if (val & DORQ_DORQ_INT_STS_REG_DB_DISCARD)
1798         {
1799     // DORQ discard attention
1800             pdev->vars.dq_int_status_discard_cnt++;//DbgBreakIfAll(1);
1801         }
1802         if (val & DORQ_DORQ_INT_STS_REG_TYPE_VAL_ERR)
1803     {
1804             // DORQ discard attention
1805             pdev->vars.dq_int_status_vf_val_err_cnt++;//DbgBreakIfAll(1);
1806             pdev->vars.dq_vf_type_val_err_fid = REG_RD(pdev,DORQ_REG_VF_TYPE_VAL_ERR_FID);
1807             pdev->vars.dq_vf_type_val_err_mcid = REG_RD(pdev,DORQ_REG_VF_TYPE_VAL_ERR_MCID);
1808     }
1809     }
1810     valc = REG_RD(pdev,DORQ_REG_DORQ_INT_STS_CLR);
1811     return val;
1812 }
1813 
lm_handle_deassertion_processing(lm_device_t * pdev,u16_t deassertion_proc_flgs)1814 void lm_handle_deassertion_processing(lm_device_t *pdev, u16_t deassertion_proc_flgs)
1815 {
1816     lm_status_t lm_status                     = LM_STATUS_SUCCESS;
1817     u32_t  val                                = 0;
1818     u32_t  port_reg_name                      = 0;
1819     u8_t   index                              = 0;
1820     u8_t   i                                  = 0;
1821     u32_t  mask_val                           = 0;
1822     u32_t  attn_sig_af_inv_arr[MAX_ATTN_REGS] = {0};
1823     u32_t  group_mask_arr[MAX_ATTN_REGS]      = {0};
1824     u32_t  mask_arr_val[MAX_ATTN_REGS]        = {0};
1825     u32_t  dq_int_sts, cfc_int_sts;
1826 
1827     DbgBreakIf(!pdev);
1828     DbgMessage(pdev, INFORM, "lm_handle_deassertion_processing: deassertion_proc_flgs:%d\n", deassertion_proc_flgs);
1829 
1830 
1831     //acquire split lock for attention signals handling
1832     acquire_split_alr(pdev);
1833 
1834     lm_read_attn_regs(pdev, attn_sig_af_inv_arr, ARRSIZE(attn_sig_af_inv_arr));
1835 
1836     if (lm_recoverable_error(pdev, attn_sig_af_inv_arr,ARRSIZE(attn_sig_af_inv_arr)))
1837     {
1838         DbgMessage(pdev, WARNer, "Starting lm recover flow ");
1839         lm_status = mm_er_initiate_recovery(pdev);
1840         if (lm_status == LM_STATUS_SUCCESS)
1841         {
1842             /* Continue only on success... */
1843             /* Disable HW interrupts */
1844             lm_disable_int(pdev);
1845 
1846             release_split_alr(pdev);
1847             /* In case of recoverable error don't handle attention so that
1848             * other functions get this parity as well.
1849             */
1850             return;
1851         }
1852         DbgMessage(pdev, WARNer, "mm_er_initiate_recovery returned status %d ", lm_status);
1853 
1854         /* Recovery failed... we'll keep going, and eventually hit
1855          * the attnetion and assert...
1856          */
1857     }
1858 
1859     //For all deasserted groups, pass over entire attn_bits after inverter and if they
1860     // are members of that particular gruop, treat each one of them accordingly.
1861     for (index = 0; index < ARRSIZE(pdev->vars.attn_groups_output); index++)
1862     {
1863         if (deassertion_proc_flgs & (1 << index))
1864         {
1865             for (i = 0; i < ARRSIZE(group_mask_arr); i++)
1866             {
1867                 group_mask_arr[i] = pdev->vars.attn_groups_output[index].attn_sig_dword[i];
1868             }
1869 
1870             DbgMessage(pdev, WARN, "lm_handle_deassertion_processing: group #%d got attention on it!\n", index);
1871             DbgMessage(pdev, WARN, "lm_handle_deassertion_processing: mask1:0x%x, mask2:0x%x, mask3:0x%x, mask4:0x%x,mask5:0x%x\n",
1872                        group_mask_arr[0],
1873                        group_mask_arr[1],
1874                        group_mask_arr[2],
1875                        group_mask_arr[3],
1876                        group_mask_arr[4]);
1877             DbgMessage(pdev, WARN, "lm_handle_deassertion_processing: attn1:0x%x, attn2:0x%x, attn3:0x%x, attn4:0x%x,attn5:0x%x\n",
1878                        attn_sig_af_inv_arr[0],
1879                        attn_sig_af_inv_arr[1],
1880                        attn_sig_af_inv_arr[2],
1881                        attn_sig_af_inv_arr[3],
1882                        attn_sig_af_inv_arr[4]);
1883 
1884             if (attn_sig_af_inv_arr[3] & EVEREST_GEN_ATTN_IN_USE_MASK & group_mask_arr[3])
1885             {
1886                 lm_gen_attn_everest_processing(pdev, attn_sig_af_inv_arr[3]);
1887             }
1888 
1889             // DQ attn
1890             if (attn_sig_af_inv_arr[1] & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT & group_mask_arr[1])
1891             {
1892                 dq_int_sts = lm_dq_attn_everest_processing(pdev);
1893             }
1894             // CFC attn
1895             if (attn_sig_af_inv_arr[2] & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT & group_mask_arr[2])
1896             {
1897                 cfc_int_sts = lm_cfc_attn_everest_processing(pdev);
1898             }
1899             // PXP attn
1900             if (attn_sig_af_inv_arr[2] & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT & group_mask_arr[2])
1901             {
1902                 lm_pxp_attn_everest_processing(pdev);
1903             }
1904             // SPIO 5 bit in register 0
1905             if (attn_sig_af_inv_arr[0] & AEU_INPUTS_ATTN_BITS_SPIO5 & group_mask_arr[0])
1906             {
1907                 lm_spio5_attn_everest_processing(pdev);
1908             }
1909 
1910             // GPIO3 bits in register 0
1911             if (attn_sig_af_inv_arr[0] & pdev->vars.link.aeu_int_mask & group_mask_arr[0])
1912             {
1913                 // Handle it only for PMF
1914                 if (IS_PMF(pdev))
1915                 {
1916                     MM_ACQUIRE_PHY_LOCK(pdev);
1917                     PHY_HW_LOCK(pdev);
1918                     elink_handle_module_detect_int(&pdev->params.link);
1919                     PHY_HW_UNLOCK(pdev);
1920                     MM_RELEASE_PHY_LOCK(pdev);
1921                 }
1922             }
1923 
1924             //TODO: attribute each attention signal arrived and which is a member of a group and give it its own
1925             // specific treatment. later, for each attn, do "clean the hw block" via the INT_STS_CLR.
1926 
1927             //Check for lattched attn signals
1928             if (attn_sig_af_inv_arr[3] & EVEREST_LATCHED_ATTN_IN_USE_MASK & group_mask_arr[3])
1929             {
1930                 lm_latch_attn_everest_processing(pdev, attn_sig_af_inv_arr[3]);
1931             }
1932 
1933             // general hw block attention
1934             i = 0;
1935             mask_arr_val[i] = attn_sig_af_inv_arr[i] & HW_INTERRUT_ASSERT_SET_0 & group_mask_arr[i];
1936             i = 1;
1937             mask_arr_val[i] = attn_sig_af_inv_arr[i] & HW_INTERRUT_ASSERT_SET_1 & group_mask_arr[i];
1938             i = 2;
1939             mask_arr_val[i] = attn_sig_af_inv_arr[i] & HW_INTERRUT_ASSERT_SET_2 & group_mask_arr[i];
1940             i = 4;
1941             mask_arr_val[i] = attn_sig_af_inv_arr[i] & HW_INTERRUT_ASSERT_SET_4 & group_mask_arr[i];
1942 
1943             if (mask_arr_val[2] & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
1944                 pdev->vars.pxp_hw_interrupts_cnt++;
1945             }
1946 
1947             if ( (mask_arr_val[0]) ||
1948                  (mask_arr_val[1] & ~AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) ||
1949                  (mask_arr_val[2] & ~(AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT | AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT)) ||
1950                  (mask_arr_val[4]) )
1951             {
1952                 DbgMessage(pdev, FATAL, "hw block attention:\n");
1953                 DbgMessage(pdev, FATAL, "0: 0x%08x\n", mask_arr_val[0]);
1954                 DbgMessage(pdev, FATAL, "1: 0x%08x\n", mask_arr_val[1]);
1955                 DbgMessage(pdev, FATAL, "2: 0x%08x\n", mask_arr_val[2]);
1956                 DbgMessage(pdev, FATAL, "4: 0x%08x\n", mask_arr_val[4]);
1957                 DbgBreakIfAll(1);
1958             }
1959             // general hw block mem prty
1960             i = 0;
1961             mask_arr_val[i] = attn_sig_af_inv_arr[i] & HW_PRTY_ASSERT_SET_0 & group_mask_arr[i];
1962             i = 1;
1963             mask_arr_val[i] = attn_sig_af_inv_arr[i] & HW_PRTY_ASSERT_SET_1 & group_mask_arr[i];
1964             i = 2;
1965             mask_arr_val[i] = attn_sig_af_inv_arr[i] & HW_PRTY_ASSERT_SET_2 & group_mask_arr[i];
1966             i = 4;
1967             mask_arr_val[i] = attn_sig_af_inv_arr[i] & HW_PRTY_ASSERT_SET_4 & group_mask_arr[i];
1968 
1969             if ( (mask_arr_val[0]) ||
1970                  (mask_arr_val[1]) ||
1971                  (mask_arr_val[2]) ||
1972                  (mask_arr_val[4]) )
1973             {
1974                 DbgMessage(pdev, FATAL, "hw block parity attention\n");
1975                 DbgMessage(pdev, FATAL, "0: 0x%08x\n", mask_arr_val[0]);
1976                 DbgMessage(pdev, FATAL, "1: 0x%08x\n", mask_arr_val[1]);
1977                 DbgMessage(pdev, FATAL, "2: 0x%08x\n", mask_arr_val[2]);
1978                 DbgMessage(pdev, FATAL, "4: 0x%08x\n", mask_arr_val[4]);
1979                 DbgBreakIfAll(1);
1980             }
1981         }
1982     }
1983 
1984     //release split lock
1985     release_split_alr(pdev);
1986 
1987     //TODO: the attn_ack bits to clear must be passed with '0'
1988     //val = deassertion_proc_flgs;
1989     val = ~deassertion_proc_flgs;
1990     // attntion bits clear
1991     if (INTR_BLK_TYPE(pdev) == INTR_BLK_HC)
1992     {
1993         REG_WR(pdev,  HC_REG_COMMAND_REG + PORT_ID(pdev)*32 + COMMAND_REG_ATTN_BITS_CLR,val);
1994     }
1995     else
1996     {
1997         u32_t cmd_addr = IGU_CMD_ATTN_BIT_CLR_UPPER;
1998 
1999         if (INTR_BLK_ACCESS(pdev) == INTR_BLK_ACCESS_IGUMEM)
2000         {
2001             REG_WR(pdev, BAR_IGU_INTMEM + cmd_addr*8, val);
2002         }
2003         else
2004         {
2005             struct igu_ctrl_reg cmd_ctrl;
2006             u8_t                igu_func_id = 0;
2007 
2008             /* GRC ACCESS: */
2009             /* Write the Data, then the control */
2010              /* [18:12] - FID (if VF - [18] = 0; [17:12] = VF number; if PF - [18] = 1; [17:14] = 0; [13:12] = PF number) */
2011             igu_func_id = IGU_FUNC_ID(pdev);
2012             cmd_ctrl.ctrl_data =
2013                 ((cmd_addr << IGU_CTRL_REG_ADDRESS_SHIFT) |
2014                  (igu_func_id << IGU_CTRL_REG_FID_SHIFT) |
2015                  (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT));
2016 
2017             REG_WR(pdev, IGU_REG_COMMAND_REG_32LSB_DATA, val);
2018             REG_WR(pdev, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl.ctrl_data);
2019         }
2020     }
2021 
2022     //unmask only appropriate attention output signals from configured routing and unifier logic toward IGU.
2023     //This is for driver/chip sync to eventually return to '00' monitored state
2024     //in both leading & trailing latch.
2025     //unmask non-hard-wired dynamic groups only
2026 
2027     DbgBreakIf(~pdev->vars.attn_state & deassertion_proc_flgs);
2028 
2029     //unmask relevant AEU attn lines
2030     //             mask  deassert_flgs  new mask
2031     //legal:        0       0       ->    0
2032     //              0       1       ->    1
2033     //              1       0       ->    1
2034     //ASSERT:       1       1 -> this won't change us thanks to the |
2035 
2036     port_reg_name = PORT_ID(pdev) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0;
2037 
2038     lm_hw_lock(pdev, HW_LOCK_RESOURCE_PORT0_ATT_MASK + PORT_ID(pdev), TRUE);
2039 
2040     mask_val = REG_RD(pdev, port_reg_name);
2041 
2042     pdev->vars.aeu_mask_attn_func = mask_val & 0xff;
2043 
2044     DbgMessage(pdev, INFORM, "lm_handle_deassertion_processing: BEFORE: aeu_mask_attn_func:0x%x\n", pdev->vars.aeu_mask_attn_func);
2045     //changed from XOR to OR for safely
2046     pdev->vars.aeu_mask_attn_func |= (deassertion_proc_flgs & 0xff);
2047 
2048     DbgMessage(pdev, INFORM, "lm_handle_deassertion_processing: AFTER : aeu_mask_attn_func:0x%x\n", pdev->vars.aeu_mask_attn_func);
2049 
2050     REG_WR(pdev, port_reg_name, pdev->vars.aeu_mask_attn_func);
2051     lm_hw_unlock(pdev, HW_LOCK_RESOURCE_PORT0_ATT_MASK + PORT_ID(pdev));
2052     //update the attn bits states
2053     //            state  deassert_flgs  new state
2054     //legal:        0       0       ->    0
2055     //              1       0       ->    1
2056     //              1       1       ->    0
2057     //ASSERT:       0       1 -> this won't change our state thanks to & ~ !
2058     DbgMessage(pdev, INFORM, "lm_handle_deassertion_processing: BEFORE: attn_state:0x%x\n", pdev->vars.attn_state);
2059 
2060     //changed from XOR to : AND ~ for safety
2061     pdev->vars.attn_state &= ~deassertion_proc_flgs;
2062 
2063     DbgMessage(pdev, INFORM, "lm_handle_deassertion_processing: AFTER : attn_state:0x%x\n", pdev->vars.attn_state);
2064 }
2065