xref: /linux/drivers/scsi/bfa/bfa_ioc.c (revision 12871a0bd67dd4db4418e1daafcd46e9d329ef10)
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfa_ioc.h"
20 #include "bfi_ctreg.h"
21 #include "bfa_defs.h"
22 #include "bfa_defs_svc.h"
23 
24 BFA_TRC_FILE(CNA, IOC);
25 
26 /*
27  * IOC local definitions
28  */
29 #define BFA_IOC_TOV		3000	/* msecs */
30 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
31 #define BFA_IOC_HB_TOV		500	/* msecs */
32 #define BFA_IOC_HWINIT_MAX	5
33 #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
34 
35 #define bfa_ioc_timer_start(__ioc)					\
36 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
37 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
39 
40 #define bfa_hb_timer_start(__ioc)					\
41 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
42 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
44 
45 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
46 
47 /*
48  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
49  */
50 
51 #define bfa_ioc_firmware_lock(__ioc)			\
52 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
53 #define bfa_ioc_firmware_unlock(__ioc)			\
54 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
55 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57 #define bfa_ioc_notify_fail(__ioc)              \
58 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59 #define bfa_ioc_sync_start(__ioc)               \
60 			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
61 #define bfa_ioc_sync_join(__ioc)                \
62 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
63 #define bfa_ioc_sync_leave(__ioc)               \
64 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
65 #define bfa_ioc_sync_ack(__ioc)                 \
66 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
67 #define bfa_ioc_sync_complete(__ioc)            \
68 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
69 
70 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
71 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
72 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
73 
74 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
75 
76 /*
77  * forward declarations
78  */
79 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
80 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
81 static void bfa_ioc_timeout(void *ioc);
82 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
93 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
95 
96 
97 /*
98  * IOC state machine definitions/declarations
99  */
100 enum ioc_event {
101 	IOC_E_RESET		= 1,	/*  IOC reset request		*/
102 	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
103 	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
104 	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
105 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
106 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
107 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
108 	IOC_E_INITFAILED	= 8,	/*  failure notice by iocpf sm	*/
109 	IOC_E_PFFAILED		= 9,	/*  failure notice by iocpf sm	*/
110 	IOC_E_HBFAIL		= 10,	/*  heartbeat failure		*/
111 	IOC_E_HWERROR		= 11,	/*  hardware error interrupt	*/
112 	IOC_E_TIMEOUT		= 12,	/*  timeout			*/
113 };
114 
115 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
124 
125 static struct bfa_sm_table_s ioc_sm_table[] = {
126 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
127 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
128 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
129 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
130 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
131 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
132 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
133 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
134 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
135 };
136 
137 /*
138  * IOCPF state machine definitions/declarations
139  */
140 
141 #define bfa_iocpf_timer_start(__ioc)					\
142 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
143 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
144 #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
145 
146 #define bfa_iocpf_recovery_timer_start(__ioc)				\
147 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
148 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
149 
150 #define bfa_sem_timer_start(__ioc)					\
151 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
152 			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
153 #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
154 
155 /*
156  * Forward declareations for iocpf state machine
157  */
158 static void bfa_iocpf_timeout(void *ioc_arg);
159 static void bfa_iocpf_sem_timeout(void *ioc_arg);
160 
161 /*
162  * IOCPF state machine events
163  */
164 enum iocpf_event {
165 	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
166 	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
167 	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
168 	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
169 	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
170 	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
171 	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
172 	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
173 	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
174 	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
175 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
176 };
177 
178 /*
179  * IOCPF states
180  */
181 enum bfa_iocpf_state {
182 	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
183 	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
184 	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
185 	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
186 	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
187 	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
188 	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
189 	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
190 	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
191 };
192 
193 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
201 						enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
207 						enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
209 
210 static struct bfa_sm_table_s iocpf_sm_table[] = {
211 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
212 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
213 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
214 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
215 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
216 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
217 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
218 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
219 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
220 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
221 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
222 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
223 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
224 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
225 };
226 
227 /*
228  * IOC State Machine
229  */
230 
231 /*
232  * Beginning state. IOC uninit state.
233  */
234 
235 static void
236 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
237 {
238 }
239 
240 /*
241  * IOC is in uninit state.
242  */
243 static void
244 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
245 {
246 	bfa_trc(ioc, event);
247 
248 	switch (event) {
249 	case IOC_E_RESET:
250 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
251 		break;
252 
253 	default:
254 		bfa_sm_fault(ioc, event);
255 	}
256 }
257 /*
258  * Reset entry actions -- initialize state machine
259  */
260 static void
261 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
262 {
263 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
264 }
265 
266 /*
267  * IOC is in reset state.
268  */
269 static void
270 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
271 {
272 	bfa_trc(ioc, event);
273 
274 	switch (event) {
275 	case IOC_E_ENABLE:
276 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
277 		break;
278 
279 	case IOC_E_DISABLE:
280 		bfa_ioc_disable_comp(ioc);
281 		break;
282 
283 	case IOC_E_DETACH:
284 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
285 		break;
286 
287 	default:
288 		bfa_sm_fault(ioc, event);
289 	}
290 }
291 
292 
293 static void
294 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
295 {
296 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
297 }
298 
299 /*
300  * Host IOC function is being enabled, awaiting response from firmware.
301  * Semaphore is acquired.
302  */
303 static void
304 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
305 {
306 	bfa_trc(ioc, event);
307 
308 	switch (event) {
309 	case IOC_E_ENABLED:
310 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
311 		break;
312 
313 	case IOC_E_PFFAILED:
314 		/* !!! fall through !!! */
315 	case IOC_E_HWERROR:
316 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
317 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
318 		if (event != IOC_E_PFFAILED)
319 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
320 		break;
321 
322 	case IOC_E_DISABLE:
323 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
324 		break;
325 
326 	case IOC_E_DETACH:
327 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
328 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
329 		break;
330 
331 	case IOC_E_ENABLE:
332 		break;
333 
334 	default:
335 		bfa_sm_fault(ioc, event);
336 	}
337 }
338 
339 
340 static void
341 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
342 {
343 	bfa_ioc_timer_start(ioc);
344 	bfa_ioc_send_getattr(ioc);
345 }
346 
347 /*
348  * IOC configuration in progress. Timer is active.
349  */
350 static void
351 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
352 {
353 	bfa_trc(ioc, event);
354 
355 	switch (event) {
356 	case IOC_E_FWRSP_GETATTR:
357 		bfa_ioc_timer_stop(ioc);
358 		bfa_ioc_check_attr_wwns(ioc);
359 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
360 		break;
361 
362 		break;
363 	case IOC_E_PFFAILED:
364 	case IOC_E_HWERROR:
365 		bfa_ioc_timer_stop(ioc);
366 		/* !!! fall through !!! */
367 	case IOC_E_TIMEOUT:
368 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
369 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
370 		if (event != IOC_E_PFFAILED)
371 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
372 		break;
373 
374 	case IOC_E_DISABLE:
375 		bfa_ioc_timer_stop(ioc);
376 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
377 		break;
378 
379 	case IOC_E_ENABLE:
380 		break;
381 
382 	default:
383 		bfa_sm_fault(ioc, event);
384 	}
385 }
386 
387 
388 static void
389 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
390 {
391 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
392 
393 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
394 	bfa_ioc_hb_monitor(ioc);
395 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
396 }
397 
398 static void
399 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
400 {
401 	bfa_trc(ioc, event);
402 
403 	switch (event) {
404 	case IOC_E_ENABLE:
405 		break;
406 
407 	case IOC_E_DISABLE:
408 		bfa_hb_timer_stop(ioc);
409 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
410 		break;
411 
412 	case IOC_E_PFFAILED:
413 	case IOC_E_HWERROR:
414 		bfa_hb_timer_stop(ioc);
415 		/* !!! fall through !!! */
416 	case IOC_E_HBFAIL:
417 		bfa_ioc_fail_notify(ioc);
418 
419 		if (ioc->iocpf.auto_recover)
420 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
421 		else
422 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
423 
424 		if (event != IOC_E_PFFAILED)
425 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
426 		break;
427 
428 	default:
429 		bfa_sm_fault(ioc, event);
430 	}
431 }
432 
433 
434 static void
435 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
436 {
437 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
438 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
439 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
440 }
441 
442 /*
443  * IOC is being disabled
444  */
445 static void
446 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
447 {
448 	bfa_trc(ioc, event);
449 
450 	switch (event) {
451 	case IOC_E_DISABLED:
452 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
453 		break;
454 
455 	case IOC_E_HWERROR:
456 		/*
457 		 * No state change.  Will move to disabled state
458 		 * after iocpf sm completes failure processing and
459 		 * moves to disabled state.
460 		 */
461 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
462 		break;
463 
464 	default:
465 		bfa_sm_fault(ioc, event);
466 	}
467 }
468 
469 /*
470  * IOC disable completion entry.
471  */
472 static void
473 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
474 {
475 	bfa_ioc_disable_comp(ioc);
476 }
477 
478 static void
479 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
480 {
481 	bfa_trc(ioc, event);
482 
483 	switch (event) {
484 	case IOC_E_ENABLE:
485 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
486 		break;
487 
488 	case IOC_E_DISABLE:
489 		ioc->cbfn->disable_cbfn(ioc->bfa);
490 		break;
491 
492 	case IOC_E_DETACH:
493 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
494 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
495 		break;
496 
497 	default:
498 		bfa_sm_fault(ioc, event);
499 	}
500 }
501 
502 
503 static void
504 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
505 {
506 	bfa_trc(ioc, 0);
507 }
508 
509 /*
510  * Hardware initialization retry.
511  */
512 static void
513 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
514 {
515 	bfa_trc(ioc, event);
516 
517 	switch (event) {
518 	case IOC_E_ENABLED:
519 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
520 		break;
521 
522 	case IOC_E_PFFAILED:
523 	case IOC_E_HWERROR:
524 		/*
525 		 * Initialization retry failed.
526 		 */
527 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
528 		if (event != IOC_E_PFFAILED)
529 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
530 		break;
531 
532 	case IOC_E_INITFAILED:
533 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
534 		break;
535 
536 	case IOC_E_ENABLE:
537 		break;
538 
539 	case IOC_E_DISABLE:
540 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
541 		break;
542 
543 	case IOC_E_DETACH:
544 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
545 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
546 		break;
547 
548 	default:
549 		bfa_sm_fault(ioc, event);
550 	}
551 }
552 
553 
554 static void
555 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
556 {
557 	bfa_trc(ioc, 0);
558 }
559 
560 /*
561  * IOC failure.
562  */
563 static void
564 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
565 {
566 	bfa_trc(ioc, event);
567 
568 	switch (event) {
569 
570 	case IOC_E_ENABLE:
571 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
572 		break;
573 
574 	case IOC_E_DISABLE:
575 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
576 		break;
577 
578 	case IOC_E_DETACH:
579 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
580 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
581 		break;
582 
583 	case IOC_E_HWERROR:
584 		/*
585 		 * HB failure notification, ignore.
586 		 */
587 		break;
588 	default:
589 		bfa_sm_fault(ioc, event);
590 	}
591 }
592 
593 /*
594  * IOCPF State Machine
595  */
596 
597 /*
598  * Reset entry actions -- initialize state machine
599  */
600 static void
601 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
602 {
603 	iocpf->retry_count = 0;
604 	iocpf->auto_recover = bfa_auto_recover;
605 }
606 
607 /*
608  * Beginning state. IOC is in reset state.
609  */
610 static void
611 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
612 {
613 	struct bfa_ioc_s *ioc = iocpf->ioc;
614 
615 	bfa_trc(ioc, event);
616 
617 	switch (event) {
618 	case IOCPF_E_ENABLE:
619 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
620 		break;
621 
622 	case IOCPF_E_STOP:
623 		break;
624 
625 	default:
626 		bfa_sm_fault(ioc, event);
627 	}
628 }
629 
630 /*
631  * Semaphore should be acquired for version check.
632  */
633 static void
634 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
635 {
636 	bfa_ioc_hw_sem_get(iocpf->ioc);
637 }
638 
639 /*
640  * Awaiting h/w semaphore to continue with version check.
641  */
642 static void
643 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
644 {
645 	struct bfa_ioc_s *ioc = iocpf->ioc;
646 
647 	bfa_trc(ioc, event);
648 
649 	switch (event) {
650 	case IOCPF_E_SEMLOCKED:
651 		if (bfa_ioc_firmware_lock(ioc)) {
652 			if (bfa_ioc_sync_start(ioc)) {
653 				iocpf->retry_count = 0;
654 				bfa_ioc_sync_join(ioc);
655 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
656 			} else {
657 				bfa_ioc_firmware_unlock(ioc);
658 				writel(1, ioc->ioc_regs.ioc_sem_reg);
659 				bfa_sem_timer_start(ioc);
660 			}
661 		} else {
662 			writel(1, ioc->ioc_regs.ioc_sem_reg);
663 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
664 		}
665 		break;
666 
667 	case IOCPF_E_DISABLE:
668 		bfa_sem_timer_stop(ioc);
669 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
670 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
671 		break;
672 
673 	case IOCPF_E_STOP:
674 		bfa_sem_timer_stop(ioc);
675 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
676 		break;
677 
678 	default:
679 		bfa_sm_fault(ioc, event);
680 	}
681 }
682 
683 /*
684  * Notify enable completion callback.
685  */
686 static void
687 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
688 {
689 	/*
690 	 * Call only the first time sm enters fwmismatch state.
691 	 */
692 	if (iocpf->retry_count == 0)
693 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
694 
695 	iocpf->retry_count++;
696 	bfa_iocpf_timer_start(iocpf->ioc);
697 }
698 
699 /*
700  * Awaiting firmware version match.
701  */
702 static void
703 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
704 {
705 	struct bfa_ioc_s *ioc = iocpf->ioc;
706 
707 	bfa_trc(ioc, event);
708 
709 	switch (event) {
710 	case IOCPF_E_TIMEOUT:
711 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
712 		break;
713 
714 	case IOCPF_E_DISABLE:
715 		bfa_iocpf_timer_stop(ioc);
716 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
717 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
718 		break;
719 
720 	case IOCPF_E_STOP:
721 		bfa_iocpf_timer_stop(ioc);
722 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
723 		break;
724 
725 	default:
726 		bfa_sm_fault(ioc, event);
727 	}
728 }
729 
730 /*
731  * Request for semaphore.
732  */
733 static void
734 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
735 {
736 	bfa_ioc_hw_sem_get(iocpf->ioc);
737 }
738 
739 /*
740  * Awaiting semaphore for h/w initialzation.
741  */
742 static void
743 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
744 {
745 	struct bfa_ioc_s *ioc = iocpf->ioc;
746 
747 	bfa_trc(ioc, event);
748 
749 	switch (event) {
750 	case IOCPF_E_SEMLOCKED:
751 		if (bfa_ioc_sync_complete(ioc)) {
752 			bfa_ioc_sync_join(ioc);
753 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
754 		} else {
755 			writel(1, ioc->ioc_regs.ioc_sem_reg);
756 			bfa_sem_timer_start(ioc);
757 		}
758 		break;
759 
760 	case IOCPF_E_DISABLE:
761 		bfa_sem_timer_stop(ioc);
762 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
763 		break;
764 
765 	default:
766 		bfa_sm_fault(ioc, event);
767 	}
768 }
769 
770 static void
771 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
772 {
773 	bfa_iocpf_timer_start(iocpf->ioc);
774 	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
775 }
776 
777 /*
778  * Hardware is being initialized. Interrupts are enabled.
779  * Holding hardware semaphore lock.
780  */
781 static void
782 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
783 {
784 	struct bfa_ioc_s *ioc = iocpf->ioc;
785 
786 	bfa_trc(ioc, event);
787 
788 	switch (event) {
789 	case IOCPF_E_FWREADY:
790 		bfa_iocpf_timer_stop(ioc);
791 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
792 		break;
793 
794 	case IOCPF_E_INITFAIL:
795 		bfa_iocpf_timer_stop(ioc);
796 		/*
797 		 * !!! fall through !!!
798 		 */
799 
800 	case IOCPF_E_TIMEOUT:
801 		writel(1, ioc->ioc_regs.ioc_sem_reg);
802 		if (event == IOCPF_E_TIMEOUT)
803 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
804 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
805 		break;
806 
807 	case IOCPF_E_DISABLE:
808 		bfa_iocpf_timer_stop(ioc);
809 		bfa_ioc_sync_leave(ioc);
810 		writel(1, ioc->ioc_regs.ioc_sem_reg);
811 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
812 		break;
813 
814 	default:
815 		bfa_sm_fault(ioc, event);
816 	}
817 }
818 
819 static void
820 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
821 {
822 	bfa_iocpf_timer_start(iocpf->ioc);
823 	bfa_ioc_send_enable(iocpf->ioc);
824 }
825 
826 /*
827  * Host IOC function is being enabled, awaiting response from firmware.
828  * Semaphore is acquired.
829  */
830 static void
831 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
832 {
833 	struct bfa_ioc_s *ioc = iocpf->ioc;
834 
835 	bfa_trc(ioc, event);
836 
837 	switch (event) {
838 	case IOCPF_E_FWRSP_ENABLE:
839 		bfa_iocpf_timer_stop(ioc);
840 		writel(1, ioc->ioc_regs.ioc_sem_reg);
841 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
842 		break;
843 
844 	case IOCPF_E_INITFAIL:
845 		bfa_iocpf_timer_stop(ioc);
846 		/*
847 		 * !!! fall through !!!
848 		 */
849 
850 	case IOCPF_E_TIMEOUT:
851 		writel(1, ioc->ioc_regs.ioc_sem_reg);
852 		if (event == IOCPF_E_TIMEOUT)
853 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
854 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
855 		break;
856 
857 	case IOCPF_E_DISABLE:
858 		bfa_iocpf_timer_stop(ioc);
859 		writel(1, ioc->ioc_regs.ioc_sem_reg);
860 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
861 		break;
862 
863 	case IOCPF_E_FWREADY:
864 		bfa_ioc_send_enable(ioc);
865 		break;
866 
867 	default:
868 		bfa_sm_fault(ioc, event);
869 	}
870 }
871 
872 static void
873 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
874 {
875 	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
876 }
877 
878 static void
879 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
880 {
881 	struct bfa_ioc_s *ioc = iocpf->ioc;
882 
883 	bfa_trc(ioc, event);
884 
885 	switch (event) {
886 	case IOCPF_E_DISABLE:
887 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
888 		break;
889 
890 	case IOCPF_E_GETATTRFAIL:
891 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
892 		break;
893 
894 	case IOCPF_E_FAIL:
895 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
896 		break;
897 
898 	case IOCPF_E_FWREADY:
899 		if (bfa_ioc_is_operational(ioc)) {
900 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
901 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
902 		} else {
903 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
904 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
905 		}
906 		break;
907 
908 	default:
909 		bfa_sm_fault(ioc, event);
910 	}
911 }
912 
913 static void
914 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
915 {
916 	bfa_iocpf_timer_start(iocpf->ioc);
917 	bfa_ioc_send_disable(iocpf->ioc);
918 }
919 
920 /*
921  * IOC is being disabled
922  */
923 static void
924 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
925 {
926 	struct bfa_ioc_s *ioc = iocpf->ioc;
927 
928 	bfa_trc(ioc, event);
929 
930 	switch (event) {
931 	case IOCPF_E_FWRSP_DISABLE:
932 	case IOCPF_E_FWREADY:
933 		bfa_iocpf_timer_stop(ioc);
934 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
935 		break;
936 
937 	case IOCPF_E_FAIL:
938 		bfa_iocpf_timer_stop(ioc);
939 		/*
940 		 * !!! fall through !!!
941 		 */
942 
943 	case IOCPF_E_TIMEOUT:
944 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
945 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
946 		break;
947 
948 	case IOCPF_E_FWRSP_ENABLE:
949 		break;
950 
951 	default:
952 		bfa_sm_fault(ioc, event);
953 	}
954 }
955 
956 static void
957 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
958 {
959 	bfa_ioc_hw_sem_get(iocpf->ioc);
960 }
961 
962 /*
963  * IOC hb ack request is being removed.
964  */
965 static void
966 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
967 {
968 	struct bfa_ioc_s *ioc = iocpf->ioc;
969 
970 	bfa_trc(ioc, event);
971 
972 	switch (event) {
973 	case IOCPF_E_SEMLOCKED:
974 		bfa_ioc_sync_leave(ioc);
975 		writel(1, ioc->ioc_regs.ioc_sem_reg);
976 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
977 		break;
978 
979 	case IOCPF_E_FAIL:
980 		break;
981 
982 	default:
983 		bfa_sm_fault(ioc, event);
984 	}
985 }
986 
987 /*
988  * IOC disable completion entry.
989  */
990 static void
991 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
992 {
993 	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
994 }
995 
996 static void
997 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
998 {
999 	struct bfa_ioc_s *ioc = iocpf->ioc;
1000 
1001 	bfa_trc(ioc, event);
1002 
1003 	switch (event) {
1004 	case IOCPF_E_ENABLE:
1005 		iocpf->retry_count = 0;
1006 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1007 		break;
1008 
1009 	case IOCPF_E_STOP:
1010 		bfa_ioc_firmware_unlock(ioc);
1011 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1012 		break;
1013 
1014 	default:
1015 		bfa_sm_fault(ioc, event);
1016 	}
1017 }
1018 
1019 static void
1020 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1021 {
1022 	bfa_ioc_hw_sem_get(iocpf->ioc);
1023 }
1024 
1025 /*
1026  * Hardware initialization failed.
1027  */
1028 static void
1029 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1030 {
1031 	struct bfa_ioc_s *ioc = iocpf->ioc;
1032 
1033 	bfa_trc(ioc, event);
1034 
1035 	switch (event) {
1036 	case IOCPF_E_SEMLOCKED:
1037 		bfa_ioc_notify_fail(ioc);
1038 		bfa_ioc_sync_ack(ioc);
1039 		iocpf->retry_count++;
1040 		if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
1041 			bfa_ioc_sync_leave(ioc);
1042 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1043 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1044 		} else {
1045 			if (bfa_ioc_sync_complete(ioc))
1046 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1047 			else {
1048 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1049 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1050 			}
1051 		}
1052 		break;
1053 
1054 	case IOCPF_E_DISABLE:
1055 		bfa_sem_timer_stop(ioc);
1056 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1057 		break;
1058 
1059 	case IOCPF_E_STOP:
1060 		bfa_sem_timer_stop(ioc);
1061 		bfa_ioc_firmware_unlock(ioc);
1062 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1063 		break;
1064 
1065 	case IOCPF_E_FAIL:
1066 		break;
1067 
1068 	default:
1069 		bfa_sm_fault(ioc, event);
1070 	}
1071 }
1072 
1073 static void
1074 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1075 {
1076 	bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
1077 }
1078 
1079 /*
1080  * Hardware initialization failed.
1081  */
1082 static void
1083 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1084 {
1085 	struct bfa_ioc_s *ioc = iocpf->ioc;
1086 
1087 	bfa_trc(ioc, event);
1088 
1089 	switch (event) {
1090 	case IOCPF_E_DISABLE:
1091 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1092 		break;
1093 
1094 	case IOCPF_E_STOP:
1095 		bfa_ioc_firmware_unlock(ioc);
1096 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1097 		break;
1098 
1099 	default:
1100 		bfa_sm_fault(ioc, event);
1101 	}
1102 }
1103 
1104 static void
1105 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1106 {
1107 	/*
1108 	 * Mark IOC as failed in hardware and stop firmware.
1109 	 */
1110 	bfa_ioc_lpu_stop(iocpf->ioc);
1111 
1112 	/*
1113 	 * Flush any queued up mailbox requests.
1114 	 */
1115 	bfa_ioc_mbox_hbfail(iocpf->ioc);
1116 
1117 	bfa_ioc_hw_sem_get(iocpf->ioc);
1118 }
1119 
1120 static void
1121 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1122 {
1123 	struct bfa_ioc_s *ioc = iocpf->ioc;
1124 
1125 	bfa_trc(ioc, event);
1126 
1127 	switch (event) {
1128 	case IOCPF_E_SEMLOCKED:
1129 		iocpf->retry_count = 0;
1130 		bfa_ioc_sync_ack(ioc);
1131 		bfa_ioc_notify_fail(ioc);
1132 		if (!iocpf->auto_recover) {
1133 			bfa_ioc_sync_leave(ioc);
1134 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1135 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1136 		} else {
1137 			if (bfa_ioc_sync_complete(ioc))
1138 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1139 			else {
1140 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1141 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1142 			}
1143 		}
1144 		break;
1145 
1146 	case IOCPF_E_DISABLE:
1147 		bfa_sem_timer_stop(ioc);
1148 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1149 		break;
1150 
1151 	case IOCPF_E_FAIL:
1152 		break;
1153 
1154 	default:
1155 		bfa_sm_fault(ioc, event);
1156 	}
1157 }
1158 
1159 static void
1160 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1161 {
1162 }
1163 
1164 /*
1165  * IOC is in failed state.
1166  */
1167 static void
1168 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1169 {
1170 	struct bfa_ioc_s *ioc = iocpf->ioc;
1171 
1172 	bfa_trc(ioc, event);
1173 
1174 	switch (event) {
1175 	case IOCPF_E_DISABLE:
1176 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1177 		break;
1178 
1179 	default:
1180 		bfa_sm_fault(ioc, event);
1181 	}
1182 }
1183 
1184 /*
1185  *  BFA IOC private functions
1186  */
1187 
1188 static void
1189 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1190 {
1191 	struct list_head			*qe;
1192 	struct bfa_ioc_hbfail_notify_s	*notify;
1193 
1194 	ioc->cbfn->disable_cbfn(ioc->bfa);
1195 
1196 	/*
1197 	 * Notify common modules registered for notification.
1198 	 */
1199 	list_for_each(qe, &ioc->hb_notify_q) {
1200 		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1201 		notify->cbfn(notify->cbarg);
1202 	}
1203 }
1204 
1205 bfa_boolean_t
1206 bfa_ioc_sem_get(void __iomem *sem_reg)
1207 {
1208 	u32 r32;
1209 	int cnt = 0;
1210 #define BFA_SEM_SPINCNT	3000
1211 
1212 	r32 = readl(sem_reg);
1213 
1214 	while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1215 		cnt++;
1216 		udelay(2);
1217 		r32 = readl(sem_reg);
1218 	}
1219 
1220 	if (r32 == 0)
1221 		return BFA_TRUE;
1222 
1223 	WARN_ON(cnt >= BFA_SEM_SPINCNT);
1224 	return BFA_FALSE;
1225 }
1226 
1227 static void
1228 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1229 {
1230 	u32	r32;
1231 
1232 	/*
1233 	 * First read to the semaphore register will return 0, subsequent reads
1234 	 * will return 1. Semaphore is released by writing 1 to the register
1235 	 */
1236 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1237 	if (r32 == 0) {
1238 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1239 		return;
1240 	}
1241 
1242 	bfa_sem_timer_start(ioc);
1243 }
1244 
1245 /*
1246  * Initialize LPU local memory (aka secondary memory / SRAM)
1247  */
1248 static void
1249 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1250 {
1251 	u32	pss_ctl;
1252 	int		i;
1253 #define PSS_LMEM_INIT_TIME  10000
1254 
1255 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1256 	pss_ctl &= ~__PSS_LMEM_RESET;
1257 	pss_ctl |= __PSS_LMEM_INIT_EN;
1258 
1259 	/*
1260 	 * i2c workaround 12.5khz clock
1261 	 */
1262 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1263 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1264 
1265 	/*
1266 	 * wait for memory initialization to be complete
1267 	 */
1268 	i = 0;
1269 	do {
1270 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1271 		i++;
1272 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1273 
1274 	/*
1275 	 * If memory initialization is not successful, IOC timeout will catch
1276 	 * such failures.
1277 	 */
1278 	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1279 	bfa_trc(ioc, pss_ctl);
1280 
1281 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1282 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1283 }
1284 
1285 static void
1286 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1287 {
1288 	u32	pss_ctl;
1289 
1290 	/*
1291 	 * Take processor out of reset.
1292 	 */
1293 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1294 	pss_ctl &= ~__PSS_LPU0_RESET;
1295 
1296 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1297 }
1298 
1299 static void
1300 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1301 {
1302 	u32	pss_ctl;
1303 
1304 	/*
1305 	 * Put processors in reset.
1306 	 */
1307 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1308 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1309 
1310 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1311 }
1312 
1313 /*
1314  * Get driver and firmware versions.
1315  */
1316 void
1317 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1318 {
1319 	u32	pgnum, pgoff;
1320 	u32	loff = 0;
1321 	int		i;
1322 	u32	*fwsig = (u32 *) fwhdr;
1323 
1324 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1325 	pgoff = PSS_SMEM_PGOFF(loff);
1326 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1327 
1328 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1329 	     i++) {
1330 		fwsig[i] =
1331 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1332 		loff += sizeof(u32);
1333 	}
1334 }
1335 
1336 /*
1337  * Returns TRUE if same.
1338  */
1339 bfa_boolean_t
1340 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1341 {
1342 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1343 	int i;
1344 
1345 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1346 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1347 
1348 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1349 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1350 			bfa_trc(ioc, i);
1351 			bfa_trc(ioc, fwhdr->md5sum[i]);
1352 			bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1353 			return BFA_FALSE;
1354 		}
1355 	}
1356 
1357 	bfa_trc(ioc, fwhdr->md5sum[0]);
1358 	return BFA_TRUE;
1359 }
1360 
1361 /*
1362  * Return true if current running version is valid. Firmware signature and
1363  * execution context (driver/bios) must match.
1364  */
1365 static bfa_boolean_t
1366 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1367 {
1368 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1369 
1370 	bfa_ioc_fwver_get(ioc, &fwhdr);
1371 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1372 		bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1373 
1374 	if (fwhdr.signature != drv_fwhdr->signature) {
1375 		bfa_trc(ioc, fwhdr.signature);
1376 		bfa_trc(ioc, drv_fwhdr->signature);
1377 		return BFA_FALSE;
1378 	}
1379 
1380 	if (swab32(fwhdr.param) != boot_env) {
1381 		bfa_trc(ioc, fwhdr.param);
1382 		bfa_trc(ioc, boot_env);
1383 		return BFA_FALSE;
1384 	}
1385 
1386 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1387 }
1388 
1389 /*
1390  * Conditionally flush any pending message from firmware at start.
1391  */
1392 static void
1393 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1394 {
1395 	u32	r32;
1396 
1397 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1398 	if (r32)
1399 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1400 }
1401 
1402 static void
1403 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1404 {
1405 	enum bfi_ioc_state ioc_fwstate;
1406 	bfa_boolean_t fwvalid;
1407 	u32 boot_type;
1408 	u32 boot_env;
1409 
1410 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1411 
1412 	if (force)
1413 		ioc_fwstate = BFI_IOC_UNINIT;
1414 
1415 	bfa_trc(ioc, ioc_fwstate);
1416 
1417 	boot_type = BFI_BOOT_TYPE_NORMAL;
1418 	boot_env = BFI_BOOT_LOADER_OS;
1419 
1420 	/*
1421 	 * check if firmware is valid
1422 	 */
1423 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1424 		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1425 
1426 	if (!fwvalid) {
1427 		bfa_ioc_boot(ioc, boot_type, boot_env);
1428 		return;
1429 	}
1430 
1431 	/*
1432 	 * If hardware initialization is in progress (initialized by other IOC),
1433 	 * just wait for an initialization completion interrupt.
1434 	 */
1435 	if (ioc_fwstate == BFI_IOC_INITING) {
1436 		ioc->cbfn->reset_cbfn(ioc->bfa);
1437 		return;
1438 	}
1439 
1440 	/*
1441 	 * If IOC function is disabled and firmware version is same,
1442 	 * just re-enable IOC.
1443 	 *
1444 	 * If option rom, IOC must not be in operational state. With
1445 	 * convergence, IOC will be in operational state when 2nd driver
1446 	 * is loaded.
1447 	 */
1448 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1449 
1450 		/*
1451 		 * When using MSI-X any pending firmware ready event should
1452 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1453 		 */
1454 		bfa_ioc_msgflush(ioc);
1455 		ioc->cbfn->reset_cbfn(ioc->bfa);
1456 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1457 		return;
1458 	}
1459 
1460 	/*
1461 	 * Initialize the h/w for any other states.
1462 	 */
1463 	bfa_ioc_boot(ioc, boot_type, boot_env);
1464 }
1465 
1466 static void
1467 bfa_ioc_timeout(void *ioc_arg)
1468 {
1469 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1470 
1471 	bfa_trc(ioc, 0);
1472 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1473 }
1474 
1475 void
1476 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1477 {
1478 	u32 *msgp = (u32 *) ioc_msg;
1479 	u32 i;
1480 
1481 	bfa_trc(ioc, msgp[0]);
1482 	bfa_trc(ioc, len);
1483 
1484 	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1485 
1486 	/*
1487 	 * first write msg to mailbox registers
1488 	 */
1489 	for (i = 0; i < len / sizeof(u32); i++)
1490 		writel(cpu_to_le32(msgp[i]),
1491 			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1492 
1493 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1494 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1495 
1496 	/*
1497 	 * write 1 to mailbox CMD to trigger LPU event
1498 	 */
1499 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1500 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1501 }
1502 
1503 static void
1504 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1505 {
1506 	struct bfi_ioc_ctrl_req_s enable_req;
1507 	struct timeval tv;
1508 
1509 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1510 		    bfa_ioc_portid(ioc));
1511 	enable_req.ioc_class = ioc->ioc_mc;
1512 	do_gettimeofday(&tv);
1513 	enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1514 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1515 }
1516 
1517 static void
1518 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1519 {
1520 	struct bfi_ioc_ctrl_req_s disable_req;
1521 
1522 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1523 		    bfa_ioc_portid(ioc));
1524 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1525 }
1526 
1527 static void
1528 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1529 {
1530 	struct bfi_ioc_getattr_req_s	attr_req;
1531 
1532 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1533 		    bfa_ioc_portid(ioc));
1534 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1535 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1536 }
1537 
1538 static void
1539 bfa_ioc_hb_check(void *cbarg)
1540 {
1541 	struct bfa_ioc_s  *ioc = cbarg;
1542 	u32	hb_count;
1543 
1544 	hb_count = readl(ioc->ioc_regs.heartbeat);
1545 	if (ioc->hb_count == hb_count) {
1546 		bfa_ioc_recover(ioc);
1547 		return;
1548 	} else {
1549 		ioc->hb_count = hb_count;
1550 	}
1551 
1552 	bfa_ioc_mbox_poll(ioc);
1553 	bfa_hb_timer_start(ioc);
1554 }
1555 
1556 static void
1557 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1558 {
1559 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1560 	bfa_hb_timer_start(ioc);
1561 }
1562 
1563 /*
1564  *	Initiate a full firmware download.
1565  */
1566 static void
1567 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1568 		    u32 boot_env)
1569 {
1570 	u32 *fwimg;
1571 	u32 pgnum, pgoff;
1572 	u32 loff = 0;
1573 	u32 chunkno = 0;
1574 	u32 i;
1575 
1576 	/*
1577 	 * Initialize LMEM first before code download
1578 	 */
1579 	bfa_ioc_lmem_init(ioc);
1580 
1581 	bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1582 	fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1583 
1584 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1585 	pgoff = PSS_SMEM_PGOFF(loff);
1586 
1587 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1588 
1589 	for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1590 
1591 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1592 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1593 			fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1594 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1595 		}
1596 
1597 		/*
1598 		 * write smem
1599 		 */
1600 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1601 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1602 
1603 		loff += sizeof(u32);
1604 
1605 		/*
1606 		 * handle page offset wrap around
1607 		 */
1608 		loff = PSS_SMEM_PGOFF(loff);
1609 		if (loff == 0) {
1610 			pgnum++;
1611 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1612 		}
1613 	}
1614 
1615 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1616 			ioc->ioc_regs.host_page_num_fn);
1617 
1618 	/*
1619 	 * Set boot type and boot param at the end.
1620 	*/
1621 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1622 			swab32(boot_type));
1623 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1624 			swab32(boot_env));
1625 }
1626 
1627 
1628 /*
1629  * Update BFA configuration from firmware configuration.
1630  */
1631 static void
1632 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1633 {
1634 	struct bfi_ioc_attr_s	*attr = ioc->attr;
1635 
1636 	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1637 	attr->card_type     = be32_to_cpu(attr->card_type);
1638 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1639 
1640 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1641 }
1642 
1643 /*
1644  * Attach time initialization of mbox logic.
1645  */
1646 static void
1647 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1648 {
1649 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1650 	int	mc;
1651 
1652 	INIT_LIST_HEAD(&mod->cmd_q);
1653 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1654 		mod->mbhdlr[mc].cbfn = NULL;
1655 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1656 	}
1657 }
1658 
1659 /*
1660  * Mbox poll timer -- restarts any pending mailbox requests.
1661  */
1662 static void
1663 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1664 {
1665 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1666 	struct bfa_mbox_cmd_s		*cmd;
1667 	u32			stat;
1668 
1669 	/*
1670 	 * If no command pending, do nothing
1671 	 */
1672 	if (list_empty(&mod->cmd_q))
1673 		return;
1674 
1675 	/*
1676 	 * If previous command is not yet fetched by firmware, do nothing
1677 	 */
1678 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1679 	if (stat)
1680 		return;
1681 
1682 	/*
1683 	 * Enqueue command to firmware.
1684 	 */
1685 	bfa_q_deq(&mod->cmd_q, &cmd);
1686 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1687 }
1688 
1689 /*
1690  * Cleanup any pending requests.
1691  */
1692 static void
1693 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1694 {
1695 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1696 	struct bfa_mbox_cmd_s		*cmd;
1697 
1698 	while (!list_empty(&mod->cmd_q))
1699 		bfa_q_deq(&mod->cmd_q, &cmd);
1700 }
1701 
1702 /*
1703  * Read data from SMEM to host through PCI memmap
1704  *
1705  * @param[in]	ioc	memory for IOC
1706  * @param[in]	tbuf	app memory to store data from smem
1707  * @param[in]	soff	smem offset
1708  * @param[in]	sz	size of smem in bytes
1709  */
1710 static bfa_status_t
1711 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1712 {
1713 	u32 pgnum, loff;
1714 	__be32 r32;
1715 	int i, len;
1716 	u32 *buf = tbuf;
1717 
1718 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1719 	loff = PSS_SMEM_PGOFF(soff);
1720 	bfa_trc(ioc, pgnum);
1721 	bfa_trc(ioc, loff);
1722 	bfa_trc(ioc, sz);
1723 
1724 	/*
1725 	 *  Hold semaphore to serialize pll init and fwtrc.
1726 	 */
1727 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1728 		bfa_trc(ioc, 0);
1729 		return BFA_STATUS_FAILED;
1730 	}
1731 
1732 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1733 
1734 	len = sz/sizeof(u32);
1735 	bfa_trc(ioc, len);
1736 	for (i = 0; i < len; i++) {
1737 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1738 		buf[i] = be32_to_cpu(r32);
1739 		loff += sizeof(u32);
1740 
1741 		/*
1742 		 * handle page offset wrap around
1743 		 */
1744 		loff = PSS_SMEM_PGOFF(loff);
1745 		if (loff == 0) {
1746 			pgnum++;
1747 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1748 		}
1749 	}
1750 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1751 			ioc->ioc_regs.host_page_num_fn);
1752 	/*
1753 	 *  release semaphore.
1754 	 */
1755 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1756 
1757 	bfa_trc(ioc, pgnum);
1758 	return BFA_STATUS_OK;
1759 }
1760 
1761 /*
1762  * Clear SMEM data from host through PCI memmap
1763  *
1764  * @param[in]	ioc	memory for IOC
1765  * @param[in]	soff	smem offset
1766  * @param[in]	sz	size of smem in bytes
1767  */
1768 static bfa_status_t
1769 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1770 {
1771 	int i, len;
1772 	u32 pgnum, loff;
1773 
1774 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1775 	loff = PSS_SMEM_PGOFF(soff);
1776 	bfa_trc(ioc, pgnum);
1777 	bfa_trc(ioc, loff);
1778 	bfa_trc(ioc, sz);
1779 
1780 	/*
1781 	 *  Hold semaphore to serialize pll init and fwtrc.
1782 	 */
1783 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1784 		bfa_trc(ioc, 0);
1785 		return BFA_STATUS_FAILED;
1786 	}
1787 
1788 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1789 
1790 	len = sz/sizeof(u32); /* len in words */
1791 	bfa_trc(ioc, len);
1792 	for (i = 0; i < len; i++) {
1793 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1794 		loff += sizeof(u32);
1795 
1796 		/*
1797 		 * handle page offset wrap around
1798 		 */
1799 		loff = PSS_SMEM_PGOFF(loff);
1800 		if (loff == 0) {
1801 			pgnum++;
1802 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1803 		}
1804 	}
1805 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1806 			ioc->ioc_regs.host_page_num_fn);
1807 
1808 	/*
1809 	 *  release semaphore.
1810 	 */
1811 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1812 	bfa_trc(ioc, pgnum);
1813 	return BFA_STATUS_OK;
1814 }
1815 
1816 static void
1817 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1818 {
1819 	struct list_head		*qe;
1820 	struct bfa_ioc_hbfail_notify_s	*notify;
1821 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1822 
1823 	/*
1824 	 * Notify driver and common modules registered for notification.
1825 	 */
1826 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
1827 	list_for_each(qe, &ioc->hb_notify_q) {
1828 		notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1829 		notify->cbfn(notify->cbarg);
1830 	}
1831 
1832 	bfa_ioc_debug_save_ftrc(ioc);
1833 
1834 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1835 		"Heart Beat of IOC has failed\n");
1836 
1837 }
1838 
1839 static void
1840 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1841 {
1842 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1843 	/*
1844 	 * Provide enable completion callback.
1845 	 */
1846 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1847 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1848 		"Running firmware version is incompatible "
1849 		"with the driver version\n");
1850 }
1851 
1852 bfa_status_t
1853 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1854 {
1855 
1856 	/*
1857 	 *  Hold semaphore so that nobody can access the chip during init.
1858 	 */
1859 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1860 
1861 	bfa_ioc_pll_init_asic(ioc);
1862 
1863 	ioc->pllinit = BFA_TRUE;
1864 	/*
1865 	 *  release semaphore.
1866 	 */
1867 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1868 
1869 	return BFA_STATUS_OK;
1870 }
1871 
1872 /*
1873  * Interface used by diag module to do firmware boot with memory test
1874  * as the entry vector.
1875  */
1876 void
1877 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1878 {
1879 	void __iomem *rb;
1880 
1881 	bfa_ioc_stats(ioc, ioc_boots);
1882 
1883 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1884 		return;
1885 
1886 	/*
1887 	 * Initialize IOC state of all functions on a chip reset.
1888 	 */
1889 	rb = ioc->pcidev.pci_bar_kva;
1890 	if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1891 		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1892 		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1893 	} else {
1894 		writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1895 		writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1896 	}
1897 
1898 	bfa_ioc_msgflush(ioc);
1899 	bfa_ioc_download_fw(ioc, boot_type, boot_env);
1900 
1901 	/*
1902 	 * Enable interrupts just before starting LPU
1903 	 */
1904 	ioc->cbfn->reset_cbfn(ioc->bfa);
1905 	bfa_ioc_lpu_start(ioc);
1906 }
1907 
1908 /*
1909  * Enable/disable IOC failure auto recovery.
1910  */
1911 void
1912 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1913 {
1914 	bfa_auto_recover = auto_recover;
1915 }
1916 
1917 
1918 
1919 bfa_boolean_t
1920 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1921 {
1922 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1923 }
1924 
1925 bfa_boolean_t
1926 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1927 {
1928 	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1929 
1930 	return ((r32 != BFI_IOC_UNINIT) &&
1931 		(r32 != BFI_IOC_INITING) &&
1932 		(r32 != BFI_IOC_MEMTEST));
1933 }
1934 
1935 void
1936 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1937 {
1938 	__be32	*msgp = mbmsg;
1939 	u32	r32;
1940 	int		i;
1941 
1942 	/*
1943 	 * read the MBOX msg
1944 	 */
1945 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1946 	     i++) {
1947 		r32 = readl(ioc->ioc_regs.lpu_mbox +
1948 				   i * sizeof(u32));
1949 		msgp[i] = cpu_to_be32(r32);
1950 	}
1951 
1952 	/*
1953 	 * turn off mailbox interrupt by clearing mailbox status
1954 	 */
1955 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1956 	readl(ioc->ioc_regs.lpu_mbox_cmd);
1957 }
1958 
1959 void
1960 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1961 {
1962 	union bfi_ioc_i2h_msg_u	*msg;
1963 	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1964 
1965 	msg = (union bfi_ioc_i2h_msg_u *) m;
1966 
1967 	bfa_ioc_stats(ioc, ioc_isrs);
1968 
1969 	switch (msg->mh.msg_id) {
1970 	case BFI_IOC_I2H_HBEAT:
1971 		break;
1972 
1973 	case BFI_IOC_I2H_READY_EVENT:
1974 		bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1975 		break;
1976 
1977 	case BFI_IOC_I2H_ENABLE_REPLY:
1978 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1979 		break;
1980 
1981 	case BFI_IOC_I2H_DISABLE_REPLY:
1982 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1983 		break;
1984 
1985 	case BFI_IOC_I2H_GETATTR_REPLY:
1986 		bfa_ioc_getattr_reply(ioc);
1987 		break;
1988 
1989 	default:
1990 		bfa_trc(ioc, msg->mh.msg_id);
1991 		WARN_ON(1);
1992 	}
1993 }
1994 
1995 /*
1996  * IOC attach time initialization and setup.
1997  *
1998  * @param[in]	ioc	memory for IOC
1999  * @param[in]	bfa	driver instance structure
2000  */
2001 void
2002 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2003 	       struct bfa_timer_mod_s *timer_mod)
2004 {
2005 	ioc->bfa	= bfa;
2006 	ioc->cbfn	= cbfn;
2007 	ioc->timer_mod	= timer_mod;
2008 	ioc->fcmode	= BFA_FALSE;
2009 	ioc->pllinit	= BFA_FALSE;
2010 	ioc->dbg_fwsave_once = BFA_TRUE;
2011 	ioc->iocpf.ioc	= ioc;
2012 
2013 	bfa_ioc_mbox_attach(ioc);
2014 	INIT_LIST_HEAD(&ioc->hb_notify_q);
2015 
2016 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2017 	bfa_fsm_send_event(ioc, IOC_E_RESET);
2018 }
2019 
2020 /*
2021  * Driver detach time IOC cleanup.
2022  */
2023 void
2024 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2025 {
2026 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2027 }
2028 
2029 /*
2030  * Setup IOC PCI properties.
2031  *
2032  * @param[in]	pcidev	PCI device information for this IOC
2033  */
2034 void
2035 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2036 		 enum bfi_mclass mc)
2037 {
2038 	ioc->ioc_mc	= mc;
2039 	ioc->pcidev	= *pcidev;
2040 	ioc->ctdev	= bfa_asic_id_ct(ioc->pcidev.device_id);
2041 	ioc->cna	= ioc->ctdev && !ioc->fcmode;
2042 
2043 	/*
2044 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2045 	 */
2046 	if (ioc->ctdev)
2047 		bfa_ioc_set_ct_hwif(ioc);
2048 	else
2049 		bfa_ioc_set_cb_hwif(ioc);
2050 
2051 	bfa_ioc_map_port(ioc);
2052 	bfa_ioc_reg_init(ioc);
2053 }
2054 
2055 /*
2056  * Initialize IOC dma memory
2057  *
2058  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2059  * @param[in]	dm_pa	physical address of IOC dma memory
2060  */
2061 void
2062 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2063 {
2064 	/*
2065 	 * dma memory for firmware attribute
2066 	 */
2067 	ioc->attr_dma.kva = dm_kva;
2068 	ioc->attr_dma.pa = dm_pa;
2069 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2070 }
2071 
2072 void
2073 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2074 {
2075 	bfa_ioc_stats(ioc, ioc_enables);
2076 	ioc->dbg_fwsave_once = BFA_TRUE;
2077 
2078 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2079 }
2080 
2081 void
2082 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2083 {
2084 	bfa_ioc_stats(ioc, ioc_disables);
2085 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2086 }
2087 
2088 
2089 /*
2090  * Initialize memory for saving firmware trace. Driver must initialize
2091  * trace memory before call bfa_ioc_enable().
2092  */
2093 void
2094 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2095 {
2096 	ioc->dbg_fwsave	    = dbg_fwsave;
2097 	ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2098 }
2099 
2100 /*
2101  * Register mailbox message handler functions
2102  *
2103  * @param[in]	ioc		IOC instance
2104  * @param[in]	mcfuncs		message class handler functions
2105  */
2106 void
2107 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2108 {
2109 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2110 	int				mc;
2111 
2112 	for (mc = 0; mc < BFI_MC_MAX; mc++)
2113 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2114 }
2115 
2116 /*
2117  * Register mailbox message handler function, to be called by common modules
2118  */
2119 void
2120 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2121 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2122 {
2123 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2124 
2125 	mod->mbhdlr[mc].cbfn	= cbfn;
2126 	mod->mbhdlr[mc].cbarg	= cbarg;
2127 }
2128 
2129 /*
2130  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2131  * Responsibility of caller to serialize
2132  *
2133  * @param[in]	ioc	IOC instance
2134  * @param[i]	cmd	Mailbox command
2135  */
2136 void
2137 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2138 {
2139 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2140 	u32			stat;
2141 
2142 	/*
2143 	 * If a previous command is pending, queue new command
2144 	 */
2145 	if (!list_empty(&mod->cmd_q)) {
2146 		list_add_tail(&cmd->qe, &mod->cmd_q);
2147 		return;
2148 	}
2149 
2150 	/*
2151 	 * If mailbox is busy, queue command for poll timer
2152 	 */
2153 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2154 	if (stat) {
2155 		list_add_tail(&cmd->qe, &mod->cmd_q);
2156 		return;
2157 	}
2158 
2159 	/*
2160 	 * mailbox is free -- queue command to firmware
2161 	 */
2162 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2163 }
2164 
2165 /*
2166  * Handle mailbox interrupts
2167  */
2168 void
2169 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2170 {
2171 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2172 	struct bfi_mbmsg_s		m;
2173 	int				mc;
2174 
2175 	bfa_ioc_msgget(ioc, &m);
2176 
2177 	/*
2178 	 * Treat IOC message class as special.
2179 	 */
2180 	mc = m.mh.msg_class;
2181 	if (mc == BFI_MC_IOC) {
2182 		bfa_ioc_isr(ioc, &m);
2183 		return;
2184 	}
2185 
2186 	if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2187 		return;
2188 
2189 	mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2190 }
2191 
2192 void
2193 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2194 {
2195 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2196 }
2197 
2198 void
2199 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2200 {
2201 	ioc->fcmode  = BFA_TRUE;
2202 	ioc->port_id = bfa_ioc_pcifn(ioc);
2203 }
2204 
2205 /*
2206  * return true if IOC is disabled
2207  */
2208 bfa_boolean_t
2209 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2210 {
2211 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2212 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2213 }
2214 
2215 /*
2216  * return true if IOC firmware is different.
2217  */
2218 bfa_boolean_t
2219 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2220 {
2221 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2222 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2223 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2224 }
2225 
2226 #define bfa_ioc_state_disabled(__sm)		\
2227 	(((__sm) == BFI_IOC_UNINIT) ||		\
2228 	 ((__sm) == BFI_IOC_INITING) ||		\
2229 	 ((__sm) == BFI_IOC_HWINIT) ||		\
2230 	 ((__sm) == BFI_IOC_DISABLED) ||	\
2231 	 ((__sm) == BFI_IOC_FAIL) ||		\
2232 	 ((__sm) == BFI_IOC_CFG_DISABLED))
2233 
2234 /*
2235  * Check if adapter is disabled -- both IOCs should be in a disabled
2236  * state.
2237  */
2238 bfa_boolean_t
2239 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2240 {
2241 	u32	ioc_state;
2242 	void __iomem *rb = ioc->pcidev.pci_bar_kva;
2243 
2244 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2245 		return BFA_FALSE;
2246 
2247 	ioc_state = readl(rb + BFA_IOC0_STATE_REG);
2248 	if (!bfa_ioc_state_disabled(ioc_state))
2249 		return BFA_FALSE;
2250 
2251 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2252 		ioc_state = readl(rb + BFA_IOC1_STATE_REG);
2253 		if (!bfa_ioc_state_disabled(ioc_state))
2254 			return BFA_FALSE;
2255 	}
2256 
2257 	return BFA_TRUE;
2258 }
2259 
2260 /*
2261  * Reset IOC fwstate registers.
2262  */
2263 void
2264 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2265 {
2266 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2267 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2268 }
2269 
2270 #define BFA_MFG_NAME "Brocade"
2271 void
2272 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2273 			 struct bfa_adapter_attr_s *ad_attr)
2274 {
2275 	struct bfi_ioc_attr_s	*ioc_attr;
2276 
2277 	ioc_attr = ioc->attr;
2278 
2279 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2280 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2281 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2282 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2283 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2284 		      sizeof(struct bfa_mfg_vpd_s));
2285 
2286 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2287 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2288 
2289 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2290 	/* For now, model descr uses same model string */
2291 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2292 
2293 	ad_attr->card_type = ioc_attr->card_type;
2294 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2295 
2296 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2297 		ad_attr->prototype = 1;
2298 	else
2299 		ad_attr->prototype = 0;
2300 
2301 	ad_attr->pwwn = ioc->attr->pwwn;
2302 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2303 
2304 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2305 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2306 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2307 	ad_attr->asic_rev = ioc_attr->asic_rev;
2308 
2309 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2310 
2311 	ad_attr->cna_capable = ioc->cna;
2312 	ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
2313 				!ad_attr->is_mezz;
2314 }
2315 
2316 enum bfa_ioc_type_e
2317 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2318 {
2319 	if (!ioc->ctdev || ioc->fcmode)
2320 		return BFA_IOC_TYPE_FC;
2321 	else if (ioc->ioc_mc == BFI_MC_IOCFC)
2322 		return BFA_IOC_TYPE_FCoE;
2323 	else if (ioc->ioc_mc == BFI_MC_LL)
2324 		return BFA_IOC_TYPE_LL;
2325 	else {
2326 		WARN_ON(ioc->ioc_mc != BFI_MC_LL);
2327 		return BFA_IOC_TYPE_LL;
2328 	}
2329 }
2330 
2331 void
2332 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2333 {
2334 	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2335 	memcpy((void *)serial_num,
2336 			(void *)ioc->attr->brcd_serialnum,
2337 			BFA_ADAPTER_SERIAL_NUM_LEN);
2338 }
2339 
2340 void
2341 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2342 {
2343 	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2344 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2345 }
2346 
2347 void
2348 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2349 {
2350 	WARN_ON(!chip_rev);
2351 
2352 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2353 
2354 	chip_rev[0] = 'R';
2355 	chip_rev[1] = 'e';
2356 	chip_rev[2] = 'v';
2357 	chip_rev[3] = '-';
2358 	chip_rev[4] = ioc->attr->asic_rev;
2359 	chip_rev[5] = '\0';
2360 }
2361 
2362 void
2363 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2364 {
2365 	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2366 	memcpy(optrom_ver, ioc->attr->optrom_version,
2367 		      BFA_VERSION_LEN);
2368 }
2369 
2370 void
2371 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2372 {
2373 	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2374 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2375 }
2376 
2377 void
2378 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2379 {
2380 	struct bfi_ioc_attr_s	*ioc_attr;
2381 
2382 	WARN_ON(!model);
2383 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2384 
2385 	ioc_attr = ioc->attr;
2386 
2387 	/*
2388 	 * model name
2389 	 */
2390 	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2391 		BFA_MFG_NAME, ioc_attr->card_type);
2392 }
2393 
2394 enum bfa_ioc_state
2395 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2396 {
2397 	enum bfa_iocpf_state iocpf_st;
2398 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2399 
2400 	if (ioc_st == BFA_IOC_ENABLING ||
2401 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2402 
2403 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2404 
2405 		switch (iocpf_st) {
2406 		case BFA_IOCPF_SEMWAIT:
2407 			ioc_st = BFA_IOC_SEMWAIT;
2408 			break;
2409 
2410 		case BFA_IOCPF_HWINIT:
2411 			ioc_st = BFA_IOC_HWINIT;
2412 			break;
2413 
2414 		case BFA_IOCPF_FWMISMATCH:
2415 			ioc_st = BFA_IOC_FWMISMATCH;
2416 			break;
2417 
2418 		case BFA_IOCPF_FAIL:
2419 			ioc_st = BFA_IOC_FAIL;
2420 			break;
2421 
2422 		case BFA_IOCPF_INITFAIL:
2423 			ioc_st = BFA_IOC_INITFAIL;
2424 			break;
2425 
2426 		default:
2427 			break;
2428 		}
2429 	}
2430 
2431 	return ioc_st;
2432 }
2433 
2434 void
2435 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2436 {
2437 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2438 
2439 	ioc_attr->state = bfa_ioc_get_state(ioc);
2440 	ioc_attr->port_id = ioc->port_id;
2441 
2442 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2443 
2444 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2445 
2446 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2447 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2448 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2449 }
2450 
2451 mac_t
2452 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2453 {
2454 	/*
2455 	 * Check the IOC type and return the appropriate MAC
2456 	 */
2457 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2458 		return ioc->attr->fcoe_mac;
2459 	else
2460 		return ioc->attr->mac;
2461 }
2462 
2463 mac_t
2464 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2465 {
2466 	mac_t	m;
2467 
2468 	m = ioc->attr->mfg_mac;
2469 	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2470 		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2471 	else
2472 		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2473 			bfa_ioc_pcifn(ioc));
2474 
2475 	return m;
2476 }
2477 
2478 bfa_boolean_t
2479 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2480 {
2481 	return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2482 }
2483 
2484 /*
2485  * Retrieve saved firmware trace from a prior IOC failure.
2486  */
2487 bfa_status_t
2488 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2489 {
2490 	int	tlen;
2491 
2492 	if (ioc->dbg_fwsave_len == 0)
2493 		return BFA_STATUS_ENOFSAVE;
2494 
2495 	tlen = *trclen;
2496 	if (tlen > ioc->dbg_fwsave_len)
2497 		tlen = ioc->dbg_fwsave_len;
2498 
2499 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2500 	*trclen = tlen;
2501 	return BFA_STATUS_OK;
2502 }
2503 
2504 
2505 /*
2506  * Retrieve saved firmware trace from a prior IOC failure.
2507  */
2508 bfa_status_t
2509 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2510 {
2511 	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2512 	int tlen;
2513 	bfa_status_t status;
2514 
2515 	bfa_trc(ioc, *trclen);
2516 
2517 	tlen = *trclen;
2518 	if (tlen > BFA_DBG_FWTRC_LEN)
2519 		tlen = BFA_DBG_FWTRC_LEN;
2520 
2521 	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2522 	*trclen = tlen;
2523 	return status;
2524 }
2525 
2526 static void
2527 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2528 {
2529 	struct bfa_mbox_cmd_s cmd;
2530 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2531 
2532 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2533 		    bfa_ioc_portid(ioc));
2534 	req->ioc_class = ioc->ioc_mc;
2535 	bfa_ioc_mbox_queue(ioc, &cmd);
2536 }
2537 
2538 static void
2539 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2540 {
2541 	u32 fwsync_iter = 1000;
2542 
2543 	bfa_ioc_send_fwsync(ioc);
2544 
2545 	/*
2546 	 * After sending a fw sync mbox command wait for it to
2547 	 * take effect.  We will not wait for a response because
2548 	 *    1. fw_sync mbox cmd doesn't have a response.
2549 	 *    2. Even if we implement that,  interrupts might not
2550 	 *	 be enabled when we call this function.
2551 	 * So, just keep checking if any mbox cmd is pending, and
2552 	 * after waiting for a reasonable amount of time, go ahead.
2553 	 * It is possible that fw has crashed and the mbox command
2554 	 * is never acknowledged.
2555 	 */
2556 	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2557 		fwsync_iter--;
2558 }
2559 
2560 /*
2561  * Dump firmware smem
2562  */
2563 bfa_status_t
2564 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2565 				u32 *offset, int *buflen)
2566 {
2567 	u32 loff;
2568 	int dlen;
2569 	bfa_status_t status;
2570 	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2571 
2572 	if (*offset >= smem_len) {
2573 		*offset = *buflen = 0;
2574 		return BFA_STATUS_EINVAL;
2575 	}
2576 
2577 	loff = *offset;
2578 	dlen = *buflen;
2579 
2580 	/*
2581 	 * First smem read, sync smem before proceeding
2582 	 * No need to sync before reading every chunk.
2583 	 */
2584 	if (loff == 0)
2585 		bfa_ioc_fwsync(ioc);
2586 
2587 	if ((loff + dlen) >= smem_len)
2588 		dlen = smem_len - loff;
2589 
2590 	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2591 
2592 	if (status != BFA_STATUS_OK) {
2593 		*offset = *buflen = 0;
2594 		return status;
2595 	}
2596 
2597 	*offset += dlen;
2598 
2599 	if (*offset >= smem_len)
2600 		*offset = 0;
2601 
2602 	*buflen = dlen;
2603 
2604 	return status;
2605 }
2606 
2607 /*
2608  * Firmware statistics
2609  */
2610 bfa_status_t
2611 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2612 {
2613 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2614 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2615 	int tlen;
2616 	bfa_status_t status;
2617 
2618 	if (ioc->stats_busy) {
2619 		bfa_trc(ioc, ioc->stats_busy);
2620 		return BFA_STATUS_DEVBUSY;
2621 	}
2622 	ioc->stats_busy = BFA_TRUE;
2623 
2624 	tlen = sizeof(struct bfa_fw_stats_s);
2625 	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2626 
2627 	ioc->stats_busy = BFA_FALSE;
2628 	return status;
2629 }
2630 
2631 bfa_status_t
2632 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2633 {
2634 	u32 loff = BFI_IOC_FWSTATS_OFF + \
2635 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2636 	int tlen;
2637 	bfa_status_t status;
2638 
2639 	if (ioc->stats_busy) {
2640 		bfa_trc(ioc, ioc->stats_busy);
2641 		return BFA_STATUS_DEVBUSY;
2642 	}
2643 	ioc->stats_busy = BFA_TRUE;
2644 
2645 	tlen = sizeof(struct bfa_fw_stats_s);
2646 	status = bfa_ioc_smem_clr(ioc, loff, tlen);
2647 
2648 	ioc->stats_busy = BFA_FALSE;
2649 	return status;
2650 }
2651 
2652 /*
2653  * Save firmware trace if configured.
2654  */
2655 static void
2656 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2657 {
2658 	int		tlen;
2659 
2660 	if (ioc->dbg_fwsave_once) {
2661 		ioc->dbg_fwsave_once = BFA_FALSE;
2662 		if (ioc->dbg_fwsave_len) {
2663 			tlen = ioc->dbg_fwsave_len;
2664 			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2665 		}
2666 	}
2667 }
2668 
2669 /*
2670  * Firmware failure detected. Start recovery actions.
2671  */
2672 static void
2673 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2674 {
2675 	bfa_ioc_stats(ioc, ioc_hbfails);
2676 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2677 }
2678 
2679 static void
2680 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2681 {
2682 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2683 		return;
2684 }
2685 
2686 /*
2687  *  BFA IOC PF private functions
2688  */
2689 static void
2690 bfa_iocpf_timeout(void *ioc_arg)
2691 {
2692 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2693 
2694 	bfa_trc(ioc, 0);
2695 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2696 }
2697 
2698 static void
2699 bfa_iocpf_sem_timeout(void *ioc_arg)
2700 {
2701 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2702 
2703 	bfa_ioc_hw_sem_get(ioc);
2704 }
2705 
2706 /*
2707  *  bfa timer function
2708  */
2709 void
2710 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2711 {
2712 	struct list_head *qh = &mod->timer_q;
2713 	struct list_head *qe, *qe_next;
2714 	struct bfa_timer_s *elem;
2715 	struct list_head timedout_q;
2716 
2717 	INIT_LIST_HEAD(&timedout_q);
2718 
2719 	qe = bfa_q_next(qh);
2720 
2721 	while (qe != qh) {
2722 		qe_next = bfa_q_next(qe);
2723 
2724 		elem = (struct bfa_timer_s *) qe;
2725 		if (elem->timeout <= BFA_TIMER_FREQ) {
2726 			elem->timeout = 0;
2727 			list_del(&elem->qe);
2728 			list_add_tail(&elem->qe, &timedout_q);
2729 		} else {
2730 			elem->timeout -= BFA_TIMER_FREQ;
2731 		}
2732 
2733 		qe = qe_next;	/* go to next elem */
2734 	}
2735 
2736 	/*
2737 	 * Pop all the timeout entries
2738 	 */
2739 	while (!list_empty(&timedout_q)) {
2740 		bfa_q_deq(&timedout_q, &elem);
2741 		elem->timercb(elem->arg);
2742 	}
2743 }
2744 
2745 /*
2746  * Should be called with lock protection
2747  */
2748 void
2749 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2750 		    void (*timercb) (void *), void *arg, unsigned int timeout)
2751 {
2752 
2753 	WARN_ON(timercb == NULL);
2754 	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2755 
2756 	timer->timeout = timeout;
2757 	timer->timercb = timercb;
2758 	timer->arg = arg;
2759 
2760 	list_add_tail(&timer->qe, &mod->timer_q);
2761 }
2762 
2763 /*
2764  * Should be called with lock protection
2765  */
2766 void
2767 bfa_timer_stop(struct bfa_timer_s *timer)
2768 {
2769 	WARN_ON(list_empty(&timer->qe));
2770 
2771 	list_del(&timer->qe);
2772 }
2773