xref: /titanic_50/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/device/lm_mcp.c (revision d14abf155341d55053c76eeec58b787a456b753b)
1 /*******************************************************************************
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright 2014 QLogic Corporation
22  * The contents of this file are subject to the terms of the
23  * QLogic End User License (the "License").
24  * You may not use this file except in compliance with the License.
25  *
26  * You can obtain a copy of the License at
27  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28  * QLogic_End_User_Software_License.txt
29  * See the License for the specific language governing permissions
30  * and limitations under the License.
31  *
32  *
33  * Module Description:
34  *
35  *
36  * History:
37  *    11/26/07 Alon Elhanani    Inception.
38  ******************************************************************************/
39 
40 #include "lm5710.h"
41 #include "license.h"
42 #include "mcp_shmem.h"
43 #include "debug.h"
44 
45 #define MCP_EMUL_TIMEOUT 200000    /* 200 ms (in us) */
46 #define MCP_TIMEOUT      5000000   /* 5 seconds (in us) */
47 #define MCP_ONE_TIMEOUT  100000    /* 100 ms (in us) */
48 
49 /**
50  * Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
51  * depending on the HW type.
52  *
53  * @param pdev
54  */
lm_mcp_wait_one(IN struct _lm_device_t * pdev)55 static __inline void lm_mcp_wait_one (
56     IN  struct _lm_device_t * pdev
57     )
58 {
59     /* special handling for emulation and FPGA,
60        wait 10 times longer */
61     if (CHIP_REV_IS_SLOW(pdev)) {
62         mm_wait(pdev, MCP_ONE_TIMEOUT*10);
63     } else {
64         mm_wait(pdev, MCP_ONE_TIMEOUT);
65     }
66 }
67 
68 
69 #if !defined(b710)
70 
71 /**
72  * Prepare CLP to MCP reset.
73  *
74  * @param pdev Device handle
75  * @param magic_val Old value of `magic' bit.
76  */
lm_clp_reset_prep(IN struct _lm_device_t * pdev,OUT u32_t * magic_val)77 void lm_clp_reset_prep(
78     IN  struct _lm_device_t * pdev,
79     OUT u32_t               * magic_val
80     )
81 {
82     u32_t val = 0;
83     u32_t offset;
84 
85 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
86 
87     ASSERT_STATIC(sizeof(struct mf_cfg) % sizeof(u32_t) == 0);
88 
89     /* Do some magic... */
90     offset = OFFSETOF(mf_cfg_t, shared_mf_config.clp_mb);
91     LM_MFCFG_READ(pdev, offset, &val);
92     *magic_val = val & SHARED_MF_CLP_MAGIC;
93     LM_MFCFG_WRITE(pdev, offset, val | SHARED_MF_CLP_MAGIC);
94 }
95 
96 /**
97  * Restore the value of the `magic' bit.
98  *
99  * @param pdev Device handle.
100  * @param magic_val Old value of the `magic' bit.
101  */
lm_clp_reset_done(IN struct _lm_device_t * pdev,IN u32_t magic_val)102 void lm_clp_reset_done(
103     IN  struct _lm_device_t * pdev,
104     IN  u32_t                 magic_val
105     )
106 {
107     u32_t val = 0;
108     u32_t offset;
109 
110     /* Restore the `magic' bit value... */
111     offset = OFFSETOF(mf_cfg_t, shared_mf_config.clp_mb);
112     LM_MFCFG_READ(pdev, offset, &val);
113     LM_MFCFG_WRITE(pdev, offset, (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
114 }
115 
116 #endif // !b710
117 
lm_is_mcp_detected(IN struct _lm_device_t * pdev)118 u8_t lm_is_mcp_detected(
119     IN struct _lm_device_t *pdev
120     )
121 {
122     return pdev->hw_info.mcp_detected;
123 }
124 
125 /**
126  * @Description
127  *      Prepares for MCP reset: takes care of CLP configurations
128  *      (saves it aside to resotre later) .
129  *
130  * @param pdev
131  * @param magic_val Old value of 'magic' bit.
132  */
lm_reset_mcp_prep(lm_device_t * pdev,u32_t * magic_val)133 lm_status_t lm_reset_mcp_prep(lm_device_t *pdev, u32_t * magic_val)
134 {
135     u32_t shmem;
136     u32_t validity_offset;
137 
138     /* Set `magic' bit in order to save MF config */
139     if (!CHIP_IS_E1(pdev))
140     {
141         lm_clp_reset_prep(pdev, magic_val);
142     }
143 
144     /* Get shmem offset */
145     shmem = REG_RD(pdev, MISC_REG_SHARED_MEM_ADDR);
146     validity_offset = OFFSETOF(shmem_region_t, validity_map[0]);
147 
148     /* Clear validity map flags */
149     if( shmem > 0 )
150     {
151         REG_WR(pdev, shmem + validity_offset, 0);
152     }
153 
154     return LM_STATUS_SUCCESS;
155 }
156 
lm_reset_mcp_comp(lm_device_t * pdev,u32_t magic_val)157 lm_status_t lm_reset_mcp_comp(lm_device_t *pdev, u32_t magic_val)
158 {
159     lm_status_t lm_status         = LM_STATUS_SUCCESS;
160     u32_t       shmem_sig_timeout = 0;
161     u32_t       validity_offset   = 0;
162     u32_t       shmem             = 0;
163     u32_t       val               = 0;
164     u32_t       cnt               = 0;
165 
166 #ifdef _VBD_CMD_
167     return LM_STATUS_SUCCESS;
168 #endif
169 
170     /* Get shmem offset */
171     shmem = REG_RD(pdev, MISC_REG_SHARED_MEM_ADDR);
172     if( shmem == 0 ) {
173         DbgMessage(pdev, FATAL, "Shmem 0 return failure\n");
174         lm_status = LM_STATUS_FAILURE;
175         goto exit_lbl;
176     }
177 
178     ASSERT_STATIC(0 != MCP_ONE_TIMEOUT);
179 
180     if (CHIP_REV_IS_EMUL(pdev))
181         shmem_sig_timeout = MCP_EMUL_TIMEOUT / MCP_ONE_TIMEOUT; // 200ms
182     else
183         shmem_sig_timeout = MCP_TIMEOUT / MCP_ONE_TIMEOUT; // 5sec
184 
185     validity_offset = OFFSETOF(shmem_region_t, validity_map[0]);
186 
187     /* Wait for MCP to come up */
188     for(cnt = 0; cnt < shmem_sig_timeout; cnt++)
189     {
190         /* TBD: its best to check validity map of last port. currently checks on port 0. */
191         val = REG_RD(pdev, shmem + validity_offset);
192         DbgMessage(pdev, INFORM, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem, shmem + validity_offset, val);
193 
194         /* check that shared memory is valid. */
195         if((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) ==
196            (SHR_MEM_VALIDITY_DEV_INFO|SHR_MEM_VALIDITY_MB)) {
197             break;
198         }
199 
200         lm_mcp_wait_one(pdev);
201     }
202 
203     DbgMessage(pdev, INFORM , "Cnt=%d Shmem validity map 0x%x\n",cnt, val);
204 
205     /* Check that shared memory is valid. This indicates that MCP is up. */
206     if((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
207        (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
208     {
209         DbgMessage(pdev, FATAL, "Shmem signature not present. MCP is not up !!\n");
210         lm_status = LM_STATUS_FAILURE;
211         goto exit_lbl;
212     }
213 
214 exit_lbl:
215 
216     if (!CHIP_IS_E1(pdev))
217     {
218         /* Restore `magic' bit value */
219         lm_clp_reset_done(pdev, magic_val);
220     }
221 
222     return lm_status;
223 }
224 
lm_reset_mcp(IN struct _lm_device_t * pdev)225 lm_status_t lm_reset_mcp(
226     IN struct _lm_device_t *pdev
227     )
228 {
229 
230     u32_t magic_val = 0;
231     u32_t val, retries=0;
232     lm_status_t lm_status = LM_STATUS_SUCCESS;
233 
234     DbgMessage(pdev, VERBOSE, "Entered lm_reset_mcp\n");
235 
236     lm_reset_mcp_prep(pdev, &magic_val);
237 
238     /* wait up to 3 seconds to get all locks. Whatsoever, reset mcp afterwards */
239     do {
240          REG_WR(pdev, MISC_REG_DRIVER_CONTROL_15 + 4, 0xffffffff);
241          val = REG_RD(pdev, MISC_REG_DRIVER_CONTROL_15);
242          mm_wait(pdev, 1);
243     } while ((val != 0xffffffff) && (++retries < 3000000));
244 
245     /* Reset the MCP */
246     REG_WR(pdev, GRCBASE_MISC+ MISC_REGISTERS_RESET_REG_2_CLEAR,
247          MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE  |
248          MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B      |
249          MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU        |
250          MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE);
251 
252     /* release the locks taken */
253     REG_WR(pdev, MISC_REG_DRIVER_CONTROL_15, 0xffffffff);
254 
255     mm_wait(pdev, 100000);
256 
257     // No need to wait here a minimum time, since the mcp_comp will
258     // returns only when mcp is ready.
259     lm_status = lm_reset_mcp_comp(pdev, magic_val);
260 
261     return lm_status;
262 }
263 
264 //acquire split MCP access lock register
265 lm_status_t
acquire_split_alr(lm_device_t * pdev)266 acquire_split_alr(
267     lm_device_t *pdev)
268 {
269     lm_status_t lm_status;
270     u32_t j, cnt;
271     u32_t val_wr, val_rd;
272 
273     DbgMessage(pdev, INFORM, "acquire_split_alr() - %d START!\n", FUNC_ID(pdev) );
274 
275     //Adjust timeout for our emulation needs
276     cnt = 30000 * 100;
277     val_wr = 1UL << 31;
278     val_rd = 0;
279 
280     //acquire lock using mcpr_access_lock SPLIT register
281 
282     for(j = 0; j < cnt*10; j++)
283     {
284         REG_WR(pdev,  GRCBASE_MCP + 0x9c, val_wr);
285         val_rd = REG_RD(pdev,  GRCBASE_MCP + 0x9c);
286         if (val_rd & (1UL << 31))
287         {
288             break;
289         }
290 
291         mm_wait(pdev, 5);
292     }
293 
294     if(val_rd & (1UL << 31))
295     {
296         lm_status = LM_STATUS_SUCCESS;
297     }
298     else
299     {
300         DbgBreakMsg("Cannot get access to nvram interface.\n");
301 
302         lm_status = LM_STATUS_BUSY;
303     }
304 
305     DbgMessage(pdev, INFORM, "acquire_split_alr() - %d END!\n", FUNC_ID(pdev) );
306 
307     return lm_status;
308 }
309 
310 //Release split MCP access lock register
311 void
release_split_alr(lm_device_t * pdev)312 release_split_alr(
313     lm_device_t *pdev)
314 {
315     u32_t val = 0;
316 
317     DbgMessage(pdev, INFORM, "release_split_alr() - %d START!\n", FUNC_ID(pdev) );
318 
319     //This is only a sanity check, can remove later in free build.
320     val= REG_RD(pdev, GRCBASE_MCP + 0x9c);
321     DbgBreakIf(!(val & (1L << 31)));
322 
323     val = 0;
324 
325     //release mcpr_access_lock SPLIT register
326     REG_WR(pdev,  GRCBASE_MCP + 0x9c, val);
327     DbgMessage(pdev, INFORM, "release_split_alr() - %d END!\n", FUNC_ID(pdev) );
328 } /* release_nvram_lock */
329 
330 /*******************************************************************************
331  * Description:
332  *         sends the mcp a keepalive to known registers
333  * Return:
334  ******************************************************************************/
lm_send_driver_pulse(lm_device_t * pdev)335 lm_status_t lm_send_driver_pulse( lm_device_t* pdev )
336 {
337     u32_t        msg_code   = 0;
338     u32_t        drv_pulse  = 0;
339     u32_t        mcp_pulse  = 0;
340 
341     if CHK_NULL(pdev)
342     {
343         return LM_STATUS_INVALID_PARAMETER ;
344     }
345 
346     if GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP)
347     {
348         return LM_STATUS_SUCCESS ;
349     }
350 
351     ++pdev->vars.drv_pulse_wr_seq;
352     msg_code = pdev->vars.drv_pulse_wr_seq & DRV_PULSE_SEQ_MASK;
353     if (GET_FLAGS(pdev->params.test_mode, TEST_MODE_DRIVER_PULSE_ALWAYS_ALIVE)
354         || IS_DRIVER_PULSE_ALWAYS_ALIVE(pdev))
355     {
356         SET_FLAGS( msg_code, DRV_PULSE_ALWAYS_ALIVE ) ;
357     }
358 
359     drv_pulse = msg_code;
360 
361     LM_SHMEM_WRITE(pdev,
362                    OFFSETOF(shmem_region_t,
363                    func_mb[FUNC_MAILBOX_ID(pdev)].drv_pulse_mb),msg_code);
364     LM_SHMEM_READ(pdev,
365                   OFFSETOF(shmem_region_t,
366                   func_mb[FUNC_MAILBOX_ID(pdev)].mcp_pulse_mb),
367                   &mcp_pulse);
368 
369     mcp_pulse&= MCP_PULSE_SEQ_MASK ;
370     /* The delta between driver pulse and mcp response
371      * should be 1 (before mcp response) or 0 (after mcp response)
372     */
373     if ((drv_pulse != mcp_pulse) &&
374         (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK)))
375     {
376         DbgMessage(pdev, FATAL, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse );
377         return LM_STATUS_FAILURE ;
378     }
379     DbgMessage(pdev, INFORMi , "Sent driver pulse cmd to MCP\n");
380     return LM_STATUS_SUCCESS ;
381 }
382 /*******************************************************************************
383  * Description:
384  *         Set driver pulse to MCP to always alive
385  * Return:
386  ******************************************************************************/
lm_driver_pulse_always_alive(struct _lm_device_t * pdev)387 void lm_driver_pulse_always_alive(struct _lm_device_t* pdev)
388 {
389     if CHK_NULL(pdev)
390     {
391         return;
392     }
393     if GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP)
394     {
395         return ;
396     }
397     // Reset the MCP pulse to always alive
398     LM_SHMEM_WRITE( pdev,
399                     OFFSETOF(shmem_region_t,
400                     func_mb[FUNC_MAILBOX_ID(pdev)].drv_pulse_mb),
401                     DRV_PULSE_ALWAYS_ALIVE );
402 }
403 // entry that represents a function in the loader objcet
404 typedef struct _lm_loader_func_entry_t
405 {
406     u8_t b_loaded ;   // does this function was loaded
407 } lm_loader_func_entry_t ;
408 // global object represents MCP - should be one per CHIP (boards)
409 typedef struct _lm_loader_path_obj_t
410 {
411     u32_t*                   lock_ctx ;               // reserved - lock object context (currently not in use)
412     lm_loader_func_entry_t   func_arr[E1H_FUNC_MAX] ; // array of function entries
413 } lm_loader_path_obj_t ;
414 
415 typedef struct _lm_loader_obj_t
416 {
417     u8_t                     lock_owner ;             // is a function acquire the lock? (1 based)
418     lm_loader_path_obj_t path_arr[MAX_PATH_NUM] ;
419 } lm_loader_obj_t ;
420 
421 lm_loader_obj_t g_lm_loader  = {0};
422 
423 // TRUE if the function is first on the port
424 #define LM_LOADER_IS_FIRST_ON_PORT(_pdev,_path_idx,_port_idx) \
425  ( (FALSE == g_lm_loader.path_arr[_path_idx].func_arr[_port_idx+0].b_loaded) && \
426    (FALSE == g_lm_loader.path_arr[_path_idx].func_arr[_port_idx+2].b_loaded) && \
427    (FALSE == g_lm_loader.path_arr[_path_idx].func_arr[_port_idx+4].b_loaded) && \
428    (FALSE == g_lm_loader.path_arr[_path_idx].func_arr[_port_idx+6].b_loaded) )
429 
430 // TRUE if the function is last on the port
431 #define LM_LOADER_IS_LAST_ON_PORT(_pdev,_path_idx,_port_idx) \
432   ( ( ( FUNC_ID(_pdev) == (_port_idx+0) ) ? TRUE : (FALSE == g_lm_loader.path_arr[_path_idx].func_arr[(_port_idx+0)].b_loaded) ) && \
433     ( ( FUNC_ID(_pdev) == (_port_idx+2) ) ? TRUE : (FALSE == g_lm_loader.path_arr[_path_idx].func_arr[(_port_idx+2)].b_loaded) ) && \
434     ( ( FUNC_ID(_pdev) == (_port_idx+4) ) ? TRUE : (FALSE == g_lm_loader.path_arr[_path_idx].func_arr[(_port_idx+4)].b_loaded) ) && \
435     ( ( FUNC_ID(_pdev) == (_port_idx+6) ) ? TRUE : (_port_idx == 0)?(FALSE == g_lm_loader.path_arr[_path_idx].func_arr[6].b_loaded):(FALSE == g_lm_loader.path_arr[_path_idx].func_arr[7].b_loaded) ) )
436 
437 
438 #define LM_LOADER_IS_FIRST_ON_COMMON(_pdev,_path_idx) (LM_LOADER_IS_FIRST_ON_PORT(_pdev,_path_idx,0) && LM_LOADER_IS_FIRST_ON_PORT(_pdev,_path_idx,1))
439 #define LM_LOADER_IS_LAST_ON_COMMON(_pdev,_path_idx)  (LM_LOADER_IS_LAST_ON_PORT(_pdev,_path_idx,0)  && LM_LOADER_IS_LAST_ON_PORT(_pdev,_path_idx,1))
440 
441 #define LM_LOADER_IS_FIRST_ON_CHIP(_pdev) (LM_LOADER_IS_FIRST_ON_COMMON(_pdev,0) && LM_LOADER_IS_FIRST_ON_COMMON(_pdev,1))
442 #define LM_LOADER_IS_LAST_ON_CHIP(_pdev)  (LM_LOADER_IS_LAST_ON_COMMON(_pdev,0)  && LM_LOADER_IS_LAST_ON_COMMON(_pdev,1))
443 
444 // Accessed only with lock!
445 // TRUE if any device is currently locked
446 #define LM_LOADER_IS_LOCKED(_chip_idx) ( (FALSE != g_lm_loader.lock_owner) )
447 
448 /*
449  *Function Name:lm_loader_opcode_to_mcp_msg
450  *
451  *Parameters:
452  *      b_lock - true if it is lock false if unlock
453  *Description:
454  *      LM_LOADER_OPCODE_XXX-->DRV_MSG_CODE_XXX
455  *Returns:
456  *
457  */
lm_loader_opcode_to_mcp_msg(lm_loader_opcode opcode,u8_t b_lock)458 static u32_t lm_loader_opcode_to_mcp_msg( lm_loader_opcode opcode, u8_t b_lock )
459 {
460     u32_t mcp_msg = 0xffffffff ;
461 
462     switch(opcode)
463     {
464     case LM_LOADER_OPCODE_LOAD:
465         mcp_msg = b_lock ? DRV_MSG_CODE_LOAD_REQ : DRV_MSG_CODE_LOAD_DONE ;
466         break;
467     case LM_LOADER_OPCODE_UNLOAD_WOL_EN:
468         mcp_msg = b_lock ? DRV_MSG_CODE_UNLOAD_REQ_WOL_EN : DRV_MSG_CODE_UNLOAD_DONE ;
469         break;
470     case LM_LOADER_OPCODE_UNLOAD_WOL_DIS:
471         mcp_msg = b_lock ? DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS : DRV_MSG_CODE_UNLOAD_DONE ;
472         break;
473     case LM_LOADER_OPCODE_UNLOAD_WOL_MCP:
474         mcp_msg = b_lock ? DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP : DRV_MSG_CODE_UNLOAD_DONE ;
475         break;
476     default:
477         DbgBreakIf(1) ;
478         break;
479     }
480     return mcp_msg ;
481 }
482 /*
483  *Function Name:mcp_resp_to_lm_loader_resp
484  *
485  *Parameters:
486  *
487  *Description:
488  *      Translates mcp response to loader response FW_MSG_CODE_DRV_XXX->LM_LOADER_RESPONSE_XX
489  *Returns:
490  *
491  */
mcp_resp_to_lm_loader_resp(u32_t mcp_resp)492 lm_loader_response mcp_resp_to_lm_loader_resp( u32_t mcp_resp )
493 {
494     lm_loader_response resp = LM_LOADER_RESPONSE_INVALID ;
495     switch(mcp_resp)
496     {
497     case FW_MSG_CODE_DRV_LOAD_COMMON:
498         resp = LM_LOADER_RESPONSE_LOAD_COMMON ;
499         break;
500     case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
501         resp = LM_LOADER_RESPONSE_LOAD_COMMON_CHIP ;
502         break;
503     case FW_MSG_CODE_DRV_LOAD_PORT:
504         resp = LM_LOADER_RESPONSE_LOAD_PORT ;
505         break;
506     case FW_MSG_CODE_DRV_LOAD_FUNCTION:
507         resp = LM_LOADER_RESPONSE_LOAD_FUNCTION ;
508         break;
509     case FW_MSG_CODE_DRV_UNLOAD_COMMON:
510         resp = LM_LOADER_RESPONSE_UNLOAD_COMMON ;
511         break;
512     case FW_MSG_CODE_DRV_UNLOAD_PORT:
513         resp = LM_LOADER_RESPONSE_UNLOAD_PORT ;
514         break;
515     case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
516         resp = LM_LOADER_RESPONSE_UNLOAD_FUNCTION ;
517         break;
518     case FW_MSG_CODE_DRV_LOAD_DONE:
519         resp = LM_LOADER_RESPONSE_LOAD_DONE ;
520         break;
521     case FW_MSG_CODE_DRV_UNLOAD_DONE:
522         resp = LM_LOADER_RESPONSE_UNLOAD_DONE ;
523         break;
524     default:
525         DbgMessage(NULL, FATAL, "mcp_resp=0x%x\n", mcp_resp );
526         DbgBreakIf(1) ;
527         break;
528     }
529     return resp ;
530 }
531 // TBD - should it be the only indication??
532 #define IS_MCP_ON(_pdev) ( TEST_MODE_NO_MCP != GET_FLAGS(_pdev->params.test_mode, TEST_MODE_NO_MCP ) )
533 
534 /*
535  *Function Name:lm_loader_lock
536  *
537  *Parameters:
538  *
539  *Description:
540  *     sync loading/unloading of port/funciton
541  *Returns:
542  *
543  */
lm_loader_lock(lm_device_t * pdev,lm_loader_opcode opcode)544 lm_loader_response lm_loader_lock( lm_device_t* pdev, lm_loader_opcode opcode )
545 {
546     u32_t              mcp_msg        = 0;
547     u32_t              param          = 0;
548     u32_t              fw_resp        = 0;
549     lm_loader_response resp           = LM_LOADER_RESPONSE_INVALID ;
550     lm_status_t        lm_status      = LM_STATUS_SUCCESS ;
551     u32_t              wait_cnt       = 0;
552     u32_t              wait_cnt_limit = 5000;
553     const u32_t        feature_flags  = mm_get_feature_flags( pdev );
554     const u8_t         is_suspend     = opcode & LM_LOADER_OPCODE_UNLOAD_SUSPEND;
555 
556     opcode &= LM_LOADER_OPCODE_MASK;
557     if( IS_MCP_ON(pdev) )
558     {
559         mcp_msg = lm_loader_opcode_to_mcp_msg( opcode, TRUE ) ;
560 
561         // in case it is load (and not unload)
562         // send mfw LFA param
563         if ( DRV_MSG_CODE_LOAD_REQ == mcp_msg )
564         {
565             SET_FLAGS(param, DRV_MSG_CODE_LOAD_REQ_WITH_LFA );
566 
567             // in case BFS, set FORCE_LFA flag on
568             if( GET_FLAGS( feature_flags, FEATURE_ETH_BOOTMODE_PXE )   ||
569                 GET_FLAGS( feature_flags, FEATURE_ETH_BOOTMODE_ISCSI ) ||
570                 GET_FLAGS( feature_flags, FEATURE_ETH_BOOTMODE_FCOE ) )
571             {
572                 SET_FLAGS( param, DRV_MSG_CODE_LOAD_REQ_FORCE_LFA );
573             }
574 
575         }
576         else if (is_suspend)
577         {
578             SET_FLAGS( param, DRV_MSG_CODE_UNLOAD_NON_D3_POWER ); //temporary
579         }
580 
581         //we do this with no locks because acquiring the loader lock may take a long time (e.g in case another function takes a
582         //long time to initialize we will only get a response from the MCP when it's done). We don't need a lock because interrupts
583         //are disabled at this point and we won't get any IOCTLs.
584         lm_status = lm_mcp_cmd_send_recieve_non_atomic( pdev, lm_mcp_mb_header, mcp_msg, param, MCP_CMD_DEFAULT_TIMEOUT, &fw_resp ) ;
585         if ( LM_STATUS_SUCCESS == lm_status )
586         {
587             resp = mcp_resp_to_lm_loader_resp(  fw_resp ) ;
588             pdev->vars.b_in_init_reset_flow = TRUE;
589         }
590     }
591     else // MCP_SIM
592     {
593         if( ERR_IF(PORT_ID(pdev) > 1) || ERR_IF(( FUNC_ID(pdev)) >= ARRSIZE(g_lm_loader.path_arr[PATH_ID(pdev)].func_arr)) )
594         {
595             DbgBreakMsg("Invalid PORT_ID/FUNC_ID\n");
596             return resp ;
597         }
598         do
599         {
600             MM_ACQUIRE_LOADER_LOCK();
601             if( LM_LOADER_IS_LOCKED(PATH_ID(pdev)) )
602             {
603                 MM_RELEASE_LOADER_LOCK();
604                 mm_wait(pdev,20) ;
605                 DbgBreakIfAll( ++wait_cnt > wait_cnt_limit ) ;
606             }
607             else
608             {
609                 // we'll release the lock when we are finish the work
610                 break;
611             }
612         }while(1) ;
613         // Verify no one hold the lock, if so - it's a bug!
614         DbgBreakIf( 0 != g_lm_loader.lock_owner ) ;
615 
616         // mark our current function id as owner
617         g_lm_loader.lock_owner = FUNC_ID(pdev)+1 ;
618 
619         switch( opcode )
620         {
621         case LM_LOADER_OPCODE_LOAD:
622             if( LM_LOADER_IS_FIRST_ON_CHIP(pdev) )
623             {
624                 resp = LM_LOADER_RESPONSE_LOAD_COMMON_CHIP;
625             }
626             else if( LM_LOADER_IS_FIRST_ON_COMMON(pdev,PATH_ID(pdev)) )
627             {
628                 resp = LM_LOADER_RESPONSE_LOAD_COMMON ;
629             }
630             else if( LM_LOADER_IS_FIRST_ON_PORT( pdev, PATH_ID(pdev), PORT_ID(pdev) ) )
631             {
632                 resp = LM_LOADER_RESPONSE_LOAD_PORT ;
633             }
634             else
635             {
636                 resp = LM_LOADER_RESPONSE_LOAD_FUNCTION ;
637             }
638             break;
639         case LM_LOADER_OPCODE_UNLOAD_WOL_EN:
640         case LM_LOADER_OPCODE_UNLOAD_WOL_DIS:
641         case LM_LOADER_OPCODE_UNLOAD_WOL_MCP:
642             if( LM_LOADER_IS_LAST_ON_COMMON(pdev,PATH_ID(pdev)) )
643             {
644                 resp = LM_LOADER_RESPONSE_UNLOAD_COMMON ;
645             }
646             else if( LM_LOADER_IS_LAST_ON_PORT( pdev, PATH_ID(pdev), PORT_ID(pdev) ) )
647             {
648                 resp = LM_LOADER_RESPONSE_UNLOAD_PORT ;
649             }
650             else
651             {
652                 resp = LM_LOADER_RESPONSE_UNLOAD_FUNCTION ;
653             }
654             break;
655         default:
656             DbgBreakIf(1) ;
657             break;
658         }  // switch
659         pdev->vars.b_in_init_reset_flow = TRUE;
660         MM_RELEASE_LOADER_LOCK();
661     } // MCP_SIM
662     return resp ;
663 }
664 /*
665  *Function Name:lm_loader_unlock
666  *
667  *Parameters:
668  *
669  *Description:
670  *      sync loading/unloading of port/funciton
671  *Returns:
672  *
673  */
lm_loader_unlock(struct _lm_device_t * pdev,lm_loader_opcode opcode,OPTIONAL const u32_t * IN p_param)674 lm_loader_response lm_loader_unlock( struct _lm_device_t *pdev, lm_loader_opcode opcode, OPTIONAL const u32_t* IN p_param )
675 {
676     u32_t              mcp_msg     = 0 ;
677     u32_t              param       = p_param ? (*p_param) : 0 ;
678     lm_loader_response resp        = LM_LOADER_RESPONSE_INVALID ;
679     u32_t              fw_resp     = 0 ;
680     lm_status_t        lm_status   = LM_STATUS_SUCCESS ;
681     u8_t               b_new_state = 0xff ;
682     if CHK_NULL(pdev)
683     {
684         return resp ;
685     }
686     opcode &= LM_LOADER_OPCODE_MASK;
687     if( IS_MCP_ON(pdev) )
688     {
689         mcp_msg   = lm_loader_opcode_to_mcp_msg( opcode, FALSE );
690         //we do this with no locks because acquiring the loader lock may take a long time (e.g in case another function takes a
691         //long time to initialize we will only get a response from the MCP when it's done). We don't need a lock because interrupts
692         //are disabled at this point and we won't get any IOCTLs.
693         lm_status = lm_mcp_cmd_send_recieve_non_atomic(pdev, lm_mcp_mb_header, mcp_msg, param, MCP_CMD_DEFAULT_TIMEOUT, &fw_resp ) ;
694         if ( LM_STATUS_SUCCESS == lm_status )
695         {
696             resp = mcp_resp_to_lm_loader_resp( fw_resp ) ;
697             pdev->vars.b_in_init_reset_flow = FALSE;
698         }
699     }
700     else // MCP_SIM
701     {
702         MM_ACQUIRE_LOADER_LOCK();
703 
704         // Verify current function id is the owner
705         DbgBreakIf( g_lm_loader.lock_owner != FUNC_ID(pdev)+1 ) ;
706 
707         switch( opcode )
708         {
709         case LM_LOADER_OPCODE_LOAD:
710             b_new_state = TRUE ;
711             resp        = LM_LOADER_RESPONSE_LOAD_DONE ;
712             break;
713         case LM_LOADER_OPCODE_UNLOAD_WOL_EN:
714         case LM_LOADER_OPCODE_UNLOAD_WOL_DIS:
715         case LM_LOADER_OPCODE_UNLOAD_WOL_MCP:
716             b_new_state = FALSE  ;
717             resp        = LM_LOADER_RESPONSE_UNLOAD_DONE ;
718             break;
719         default:
720             DbgBreakIf(1) ;
721             break;
722         }  // switch
723         // verify new state differs than current
724         DbgBreakIf(g_lm_loader.path_arr[PATH_ID(pdev)].func_arr[FUNC_ID(pdev)].b_loaded == b_new_state);
725 
726         // assign new state
727         g_lm_loader.path_arr[PATH_ID(pdev)].func_arr[FUNC_ID(pdev)].b_loaded = b_new_state ;
728 
729         // mark we don't own the lock anymore
730         g_lm_loader.lock_owner = FALSE ;
731 
732         pdev->vars.b_in_init_reset_flow = FALSE;
733         MM_RELEASE_LOADER_LOCK();
734     } // MCP_SIM
735     return resp ;
736 }
737 
738 /* Used for simulating a mcp reset where the mcp no longer knows the state of the uploaded drivers... */
lm_loader_reset(struct _lm_device_t * pdev)739 void lm_loader_reset ( struct _lm_device_t *pdev )
740 {
741     mm_memset(&g_lm_loader, 0, sizeof(g_lm_loader));
742 }
743 
744 /*
745  *Function Name:lm_mcp_cmd_init
746  *
747  *Parameters:
748  *
749  *Description:
750  *      initiate sequence of mb + verify boot code version
751  *Returns:
752  *
753  */
lm_mcp_cmd_init(struct _lm_device_t * pdev)754 lm_status_t lm_mcp_cmd_init( struct _lm_device_t *pdev)
755 {
756     u32_t val        = 0 ;
757     u32_t bc_rev     = 0 ;
758     u32_t offset     = 0 ;
759     u8_t  func_mb_id = 0;
760 
761     DbgMessage(pdev, INFORMi , "### mcp_cmd_init\n");
762 
763     if CHK_NULL(pdev)
764     {
765         return LM_STATUS_FAILURE ;
766     }
767 
768     // we are on NO_MCP mode - nothing to do
769     if( 0 != GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP ) )
770     {
771         return LM_STATUS_SUCCESS ;
772     }
773 
774     //validtae bc version
775     bc_rev = LM_GET_BC_REV_MAJOR(pdev);
776 
777     if (bc_rev < BC_REV_SUPPORTED)
778     {
779         DbgMessage(pdev, FATAL,"bc version is less than 0x%x equal to 0x%x.\n", BC_REV_SUPPORTED, bc_rev );
780         DbgBreakMsg("Please upgrade the bootcode version.\n");
781         // TODO add event log
782         return LM_STATUS_INVALID_PARAMETER;
783     }
784 
785     // enable optic module verification according to BC version
786     if (bc_rev >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL)
787     {
788         SET_FLAGS(pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY);
789     }
790 
791     if (bc_rev >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL)
792     {
793         SET_FLAGS(pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY);
794     }
795 
796     if (bc_rev >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED)
797     {
798         SET_FLAGS(pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX);
799     }
800 
801     if (bc_rev >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED)
802     {
803         SET_FLAGS(pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED);
804     }
805 
806     if (bc_rev >= REQ_BC_VER_4_MT_SUPPORTED)
807     {
808         SET_FLAGS(pdev->params.link.feature_config_flags, ELINK_FEATURE_CONFIG_MT_SUPPORT);
809     }
810 
811     // regular MCP mode
812     func_mb_id = pdev->params.pfunc_mb_id;
813 
814     // read first seq number from shared memory
815     offset = OFFSETOF(shmem_region_t, func_mb[func_mb_id].drv_mb_header);
816     LM_SHMEM_READ(pdev, offset, &val);
817     pdev->vars.fw_wr_seq = (u16_t)(val & DRV_MSG_SEQ_NUMBER_MASK);
818 
819     // read current mcp_pulse value
820     offset = OFFSETOF(shmem_region_t,func_mb[func_mb_id].mcp_pulse_mb) ;
821     LM_SHMEM_READ(pdev, offset ,&val);
822     pdev->vars.drv_pulse_wr_seq = (u16_t)(val & MCP_PULSE_SEQ_MASK);
823 
824     return LM_STATUS_SUCCESS;
825 }
826 
lm_mcp_set_mf_bw(struct _lm_device_t * pdev,IN u8_t min_bw,IN u8_t max_bw)827 lm_status_t lm_mcp_set_mf_bw(struct _lm_device_t *pdev, IN u8_t min_bw, IN u8_t max_bw)
828 {
829     u32_t       minmax_param    = 0;
830     u32_t       resp            = 0;
831     lm_status_t lm_status       = LM_STATUS_SUCCESS;
832     const u32_t bc_rev          = LM_GET_BC_REV_MAJOR(pdev);
833 
834     //if in no MCP mode, don't do anything
835     if(!lm_is_mcp_detected(pdev))
836     {
837         DbgMessage(pdev, WARNmi, "No MCP detected.\n");
838         return LM_STATUS_SUCCESS;
839     }
840     //if bootcode is less then REQ_BC_VER_4_SET_MF_BW, fail
841     if( bc_rev < REQ_BC_VER_4_SET_MF_BW )
842     {
843         DbgMessage(pdev, WARNmi, "Invalid bootcode version.\n");
844         return LM_STATUS_INVALID_PARAMETER;
845     }
846     //if not E2 or not MF mode, fail
847     if(CHIP_IS_E1x(pdev) || !IS_MULTI_VNIC(pdev))
848     {
849         DbgMessage(pdev, WARNmi, "Device is E1/E1.5 or in SF mode.\n");
850         return LM_STATUS_INVALID_PARAMETER;
851     }
852     //if the parameters are not valid, fail
853     if (max_bw > 100)
854     {
855         DbgMessage(pdev, WARNmi, "Invalid parameters.\n");
856         return LM_STATUS_INVALID_PARAMETER;
857     }
858     //build MCP command parameter from min_bw/max_bw
859     //we use FUNC_MF_CFG_MIN_BW_SHIFT because the param structure is supposed to
860     //be equivalent for this opcode and for the DCC opcode, but there is no define
861     //for this opcode.
862     ASSERT_STATIC(FUNC_MF_CFG_MIN_BW_MASK == DRV_MSG_CODE_SET_MF_BW_MIN_MASK);
863     ASSERT_STATIC(FUNC_MF_CFG_MAX_BW_MASK == DRV_MSG_CODE_SET_MF_BW_MAX_MASK);
864     minmax_param =  (min_bw << FUNC_MF_CFG_MIN_BW_SHIFT)|
865                     (max_bw << FUNC_MF_CFG_MAX_BW_SHIFT);
866 
867     //call lm_mcp_cmd_send_recieve with DRV_MSG_CODE_SET_MF_BW opcode and the parameter
868     lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_SET_MF_BW, minmax_param, MCP_CMD_DEFAULT_TIMEOUT, &resp);
869 
870     //make sure that the response is FW_MSG_CODE_SET_MF_BW_SENT
871     if(resp != FW_MSG_CODE_SET_MF_BW_SENT)
872     {
873         DbgBreakIf(resp != FW_MSG_CODE_SET_MF_BW_SENT);
874         return LM_STATUS_FAILURE;
875     }
876 
877     //return what lm_mcp_cmd_send_recieve returned
878     return lm_status;
879 }
880 
881 /*
882  *Function Name:lm_mcp_cmd_send
883  *
884  *Parameters:
885  *
886  *Description:
887  *      send
888  *Returns:
889  *
890  */
lm_mcp_cmd_send(struct _lm_device_t * pdev,lm_mcp_mb_type mb_type,u32_t drv_msg,u32_t param)891 lm_status_t lm_mcp_cmd_send( struct _lm_device_t *pdev, lm_mcp_mb_type mb_type, u32_t drv_msg, u32_t param )
892 {
893     u16_t*     p_seq      = NULL ;
894     u32_t      offset     = 0 ;
895     u32_t      drv_mask   = 0 ;
896     const u8_t func_mb_id = pdev->params.pfunc_mb_id;
897 
898     DbgMessage(pdev, INFORMi , "### mcp_cmd_send mb_type=0x%x drv_msg=0x%x param=0x%x\n", mb_type, drv_msg, param );
899 
900     // we are on NO_MCP mode - nothing to do
901     if( 0 != GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP ) )
902     {
903         return LM_STATUS_SUCCESS ;
904     }
905 
906     switch( mb_type )
907     {
908     case lm_mcp_mb_header:
909         p_seq      = &pdev->vars.fw_wr_seq ;
910         drv_mask   = DRV_MSG_SEQ_NUMBER_MASK ;
911         offset     = OFFSETOF(shmem_region_t, func_mb[func_mb_id].drv_mb_header) ;
912         /* Write the parameter to the mcp */
913         if (p_seq)
914         {
915             LM_SHMEM_WRITE(pdev,OFFSETOF(shmem_region_t, func_mb[func_mb_id].drv_mb_param),param);
916         }
917         break;
918 
919     case lm_mcp_mb_pulse:
920         p_seq      = &pdev->vars.drv_pulse_wr_seq ;
921         drv_mask   = DRV_PULSE_SEQ_MASK ;
922         offset     = OFFSETOF(shmem_region_t, func_mb[func_mb_id].mcp_pulse_mb) ;
923         break;
924     case lm_mcp_mb_param:
925     default:
926         break;
927     }
928 
929     if CHK_NULL( p_seq )
930     {
931         return LM_STATUS_INVALID_PARAMETER ;
932     }
933 
934     // incremant sequence
935     ++(*p_seq);
936 
937     // prepare message
938     drv_msg |= ( (*p_seq) & drv_mask );
939 
940     LM_SHMEM_WRITE(pdev,offset,drv_msg);
941 
942     DbgMessage(pdev, INFORMi , "mcp_cmd_send: Sent driver load cmd to MCP at 0x%x\n", drv_msg);
943 
944     return LM_STATUS_SUCCESS ;
945 }
946 
947 /*
948  *Function Name:lm_mcp_cmd_response
949  *
950  *Parameters:
951  *              TBD - add timeout value
952  *Description:
953  *              assumption - only one request can be sent simultaneously
954  *Returns:
955  *
956  */
lm_mcp_cmd_response(struct _lm_device_t * pdev,lm_mcp_mb_type mcp_mb_type,u32_t drv_msg,u32_t timeout,OUT u32_t * p_fw_resp)957 lm_status_t lm_mcp_cmd_response( struct _lm_device_t *pdev,
958                                  lm_mcp_mb_type       mcp_mb_type,
959                                  u32_t                drv_msg,
960                                  u32_t                timeout,
961                                  OUT u32_t*           p_fw_resp )
962 {
963     u16_t*      p_seq      = NULL ;
964     u32_t       offset     = 0 ;
965     u32_t       drv_mask   = 0 ;
966     u32_t       fw_mask    = 0 ;
967     u32_t       cnt        = 0 ;
968     u32_t       wait_itr   = 0 ;
969     u32_t       resp_mask  = 0xffffffff ;
970     lm_status_t lm_status  = LM_STATUS_SUCCESS ;
971     const u8_t  func_mb_id = pdev->params.pfunc_mb_id;
972 
973     UNREFERENCED_PARAMETER_(timeout);
974 
975     DbgMessage(pdev, INFORMi, "### mcp_cmd_response mb_type=0x%x drv_msg=0x%x\n", mcp_mb_type, drv_msg );
976 
977     if ( CHK_NULL(p_fw_resp) )
978     {
979         return LM_STATUS_FAILURE ;
980     }
981 
982     switch( mcp_mb_type )
983     {
984     case lm_mcp_mb_header:
985         p_seq      = &pdev->vars.fw_wr_seq ;
986         drv_mask   = DRV_MSG_SEQ_NUMBER_MASK ;
987         fw_mask    = FW_MSG_SEQ_NUMBER_MASK ;
988         resp_mask  = FW_MSG_CODE_MASK ;
989         offset     = OFFSETOF(shmem_region_t, func_mb[func_mb_id].fw_mb_header) ;
990         break;
991 
992         // TBD - is it needed ??
993     case lm_mcp_mb_pulse:
994         p_seq      = &pdev->vars.drv_pulse_wr_seq ;
995         drv_mask   = DRV_PULSE_SEQ_MASK ;
996         fw_mask    = MCP_PULSE_SEQ_MASK ;
997         offset     = OFFSETOF(shmem_region_t, func_mb[func_mb_id].mcp_pulse_mb) ;
998         break;
999 
1000     case lm_mcp_mb_param:
1001     default:
1002         break;
1003     }
1004 
1005     if CHK_NULL( p_seq )
1006     {
1007         return LM_STATUS_INVALID_PARAMETER ;
1008     }
1009 
1010     lm_status = LM_STATUS_TIMEOUT ;
1011 
1012     // Wait for reply 5 sec per unloading function
1013     //TODO exponential back off
1014     wait_itr = 240 * FW_ACK_NUM_OF_POLL * PORT_MAX * (u32_t)(IS_MULTI_VNIC(pdev) ? MAX_VNIC_NUM : 1);
1015     for(cnt = 0; cnt < wait_itr; cnt++)
1016     {
1017         mm_wait(pdev, FW_ACK_POLL_TIME_MS * 50);
1018 
1019         LM_SHMEM_READ(pdev, offset, p_fw_resp);
1020 
1021         if(( (*p_fw_resp) & fw_mask) == ( (*p_seq) & drv_mask))
1022         {
1023             lm_status = LM_STATUS_SUCCESS ;
1024             break;
1025         }
1026     }
1027 
1028     *p_fw_resp = (*p_fw_resp & resp_mask);
1029 
1030     return lm_status ;
1031 }
1032 
lm_mcp_cmd_send_recieve_non_atomic(struct _lm_device_t * pdev,lm_mcp_mb_type mcp_mb_type,u32_t drv_msg,u32_t param,u32_t timeout,OUT u32_t * p_fw_resp)1033 lm_status_t lm_mcp_cmd_send_recieve_non_atomic( struct _lm_device_t *pdev,
1034                                              lm_mcp_mb_type       mcp_mb_type,
1035                                              u32_t                drv_msg,
1036                                              u32_t                param,
1037                                              u32_t                timeout,
1038                                              OUT u32_t*           p_fw_resp )
1039 {
1040     lm_status_t lm_status = LM_STATUS_FAILURE;
1041     u32_t       val       = 0;
1042 
1043     lm_status = lm_mcp_cmd_send( pdev, mcp_mb_type, drv_msg, param) ;
1044 
1045     if( LM_STATUS_SUCCESS != lm_status )
1046     {
1047         val = lm_mcp_check(pdev);
1048         DbgMessage(pdev, FATAL, "mcp_cmd_send_and_recieve: mcp_cmd_send drv_msg=0x%x failed. lm_status=0x%x mcp_check=0x%x\n", drv_msg, lm_status, val);
1049         DbgBreakMsg("mcp_cmd_send_and_recieve: mcp_cmd_send failed!\n");
1050         return lm_status;
1051     }
1052 
1053     DbgMessage(pdev, INFORMi , "mcp_cmd_send_and_recieve: Sent driver cmd=0x%x to MCP\n",  drv_msg );
1054 
1055     lm_status = lm_mcp_cmd_response( pdev, mcp_mb_type, drv_msg, timeout, p_fw_resp ) ;
1056 
1057     if( LM_STATUS_SUCCESS != lm_status )
1058     {
1059         val = lm_mcp_check(pdev);
1060         DbgMessage(pdev, FATAL, "mcp_cmd_send_and_recieve: mcp_cmd_response drv_msg=0x%x failed. lm_status=0x%x mcp_check=0x%x\n", drv_msg, lm_status, val);
1061         DbgBreakMsg("mcp_cmd_send_and_recieve: mcp_cmd_response failed!\n");
1062         return lm_status;
1063     }
1064 
1065     DbgMessage(pdev, INFORMi , "mcp_cmd_send_and_recieve: Got response 0x%x from MCP\n", *p_fw_resp );
1066 
1067     return LM_STATUS_SUCCESS;
1068 }
1069 
1070 /*
1071  *Function Name:lm_mcp_cmd_send_recieve
1072  *
1073  *Parameters:
1074  *
1075  *Description:
1076  *
1077  *Returns: lm_status_t
1078  *
1079  */
lm_mcp_cmd_send_recieve(struct _lm_device_t * pdev,lm_mcp_mb_type mcp_mb_type,u32_t drv_msg,u32_t param,u32_t timeout,OUT u32_t * p_fw_resp)1080 lm_status_t lm_mcp_cmd_send_recieve( struct _lm_device_t *pdev,
1081                                      lm_mcp_mb_type       mcp_mb_type,
1082                                      u32_t                drv_msg,
1083                                      u32_t                param,
1084                                      u32_t                timeout,
1085                                      OUT u32_t*           p_fw_resp )
1086 {
1087     lm_status_t lm_status = LM_STATUS_SUCCESS ;
1088 
1089     MM_ACQUIRE_MCP_LOCK(pdev);
1090 
1091     lm_status = lm_mcp_cmd_send_recieve_non_atomic(pdev, mcp_mb_type, drv_msg, param, timeout, p_fw_resp);
1092 
1093     MM_RELEASE_MCP_LOCK(pdev);
1094 
1095     return lm_status ;
1096 }
1097 
1098 
1099 // check if mcp program counter is advancing, In case it doesn't return the value in case it does, return 0
lm_mcp_check(struct _lm_device_t * pdev)1100 u32_t lm_mcp_check( struct _lm_device_t *pdev)
1101 {
1102     static u32_t const offset = MCP_REG_MCPR_CPU_PROGRAM_COUNTER ;
1103     u32_t              reg    = 0 ;
1104     u32_t              i      = 0 ;
1105 
1106     reg = REG_RD(pdev, offset);
1107 
1108     for( i = 0; i<4; i++ )
1109     {
1110         if( REG_RD(pdev, offset) != reg )
1111         {
1112             return 0; // OK
1113         }
1114     }
1115     return reg; // mcp is hang on this value as program counter!
1116 }
1117 
1118 /**lm_mcp_cli_idx_to_drv_cap_flag
1119  * Get the flag to set in drv_capabilities_flag for a given LM
1120  * client.
1121  *
1122  * @param cli_id the LM client index.
1123  *
1124  * @return u32_t the appropriate flag for cli_id, or 0 if there
1125  *         is no matching flag.
1126  */
lm_mcp_cli_idx_to_drv_cap_flag(IN const lm_cli_idx_t cli_id)1127 static u32_t lm_mcp_cli_idx_to_drv_cap_flag(IN const lm_cli_idx_t cli_id)
1128 {
1129     switch(cli_id)
1130     {
1131     case LM_CLI_IDX_NDIS:
1132         return DRV_FLAGS_CAPABILITIES_LOADED_L2;
1133     case LM_CLI_IDX_ISCSI:
1134         return DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
1135     case LM_CLI_IDX_FCOE:
1136         return DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
1137     case LM_CLI_IDX_MAX://may happen for UM clients that have no matching LM client, such as diag.
1138         return 0;
1139     case LM_CLI_IDX_FWD://fallthrough - this client has no bind/unbind flow and no matching UM client
1140     case LM_CLI_IDX_OOO://fallthrough - this client has no bind/unbind flow and no matching UM client
1141     default:
1142         DbgBreakMsg("Invalid client type");
1143         return 0;
1144     }
1145 }
1146 
lm_mcp_indicate_client_imp(struct _lm_device_t * pdev,IN const lm_cli_idx_t cli_id,IN const u8_t b_bind)1147 void lm_mcp_indicate_client_imp(struct _lm_device_t *pdev, IN const lm_cli_idx_t cli_id, IN const u8_t b_bind )
1148 {
1149     const u32_t drv_cap_client = lm_mcp_cli_idx_to_drv_cap_flag(cli_id);
1150     const u32_t func_mb_id = FUNC_MAILBOX_ID(pdev);
1151     const u32_t shmem_offset = OFFSETOF(shmem2_region_t, drv_capabilities_flag[func_mb_id]);
1152     u32_t       drv_cap_shmem  = 0;
1153 
1154     if (CHIP_IS_E1x(pdev) ||
1155         !LM_SHMEM2_HAS(pdev, drv_capabilities_flag))
1156     {
1157         return;
1158     }
1159 
1160     if (0 == drv_cap_client)
1161     {
1162         //this is a client that does not require updating the SHMEM
1163         return;
1164     }
1165 
1166     LM_SHMEM2_READ(pdev, shmem_offset, &drv_cap_shmem);
1167 
1168     if( b_bind )
1169     {
1170         SET_FLAGS( drv_cap_shmem, drv_cap_client );
1171     }
1172     else
1173     {
1174         RESET_FLAGS( drv_cap_shmem, drv_cap_client );
1175     }
1176 
1177     LM_SHMEM2_WRITE(pdev, shmem_offset, drv_cap_shmem);
1178 }
1179 
lm_mcp_indicate_client_bind(struct _lm_device_t * pdev,IN const lm_cli_idx_t cli_id)1180 void lm_mcp_indicate_client_bind(struct _lm_device_t *pdev, IN const lm_cli_idx_t cli_id)
1181 {
1182     lm_mcp_indicate_client_imp(pdev, cli_id, TRUE);
1183 }
1184 
lm_mcp_indicate_client_unbind(struct _lm_device_t * pdev,IN const lm_cli_idx_t cli_id)1185 void lm_mcp_indicate_client_unbind(struct _lm_device_t *pdev, IN const lm_cli_idx_t cli_id)
1186 {
1187     lm_mcp_indicate_client_imp(pdev, cli_id, FALSE);
1188 }
1189