xref: /linux/drivers/net/ethernet/brocade/bna/bfa_ioc.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18 
19 #include "bfa_ioc.h"
20 #include "bfi_reg.h"
21 #include "bfa_defs.h"
22 
23 /**
24  * IOC local definitions
25  */
26 
27 /**
28  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
29  */
30 
31 #define bfa_ioc_firmware_lock(__ioc)			\
32 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
33 #define bfa_ioc_firmware_unlock(__ioc)			\
34 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
35 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
36 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
37 #define bfa_ioc_notify_fail(__ioc)			\
38 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
39 #define bfa_ioc_sync_start(__ioc)               \
40 			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
41 #define bfa_ioc_sync_join(__ioc)			\
42 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43 #define bfa_ioc_sync_leave(__ioc)			\
44 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
45 #define bfa_ioc_sync_ack(__ioc)				\
46 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
47 #define bfa_ioc_sync_complete(__ioc)			\
48 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
49 
50 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
51 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
52 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
53 
54 static bool bfa_nw_auto_recover = true;
55 
56 /*
57  * forward declarations
58  */
59 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
60 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
61 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
62 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
63 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
64 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
65 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
66 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
67 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
68 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
69 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
70 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
71 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
72 static void bfa_ioc_recover(struct bfa_ioc *ioc);
73 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
74 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
75 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
76 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
77 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
79 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
80 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
81 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
82 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
83 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
84 			 u32 boot_param);
85 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
86 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
87 						char *serial_num);
88 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
89 						char *fw_ver);
90 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
91 						char *chip_rev);
92 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
93 						char *optrom_ver);
94 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
95 						char *manufacturer);
96 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
97 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
98 
99 /**
100  * IOC state machine definitions/declarations
101  */
102 enum ioc_event {
103 	IOC_E_RESET		= 1,	/*!< IOC reset request		*/
104 	IOC_E_ENABLE		= 2,	/*!< IOC enable request		*/
105 	IOC_E_DISABLE		= 3,	/*!< IOC disable request	*/
106 	IOC_E_DETACH		= 4,	/*!< driver detach cleanup	*/
107 	IOC_E_ENABLED		= 5,	/*!< f/w enabled		*/
108 	IOC_E_FWRSP_GETATTR	= 6,	/*!< IOC get attribute response	*/
109 	IOC_E_DISABLED		= 7,	/*!< f/w disabled		*/
110 	IOC_E_PFFAILED		= 8,	/*!< failure notice by iocpf sm	*/
111 	IOC_E_HBFAIL		= 9,	/*!< heartbeat failure		*/
112 	IOC_E_HWERROR		= 10,	/*!< hardware error interrupt	*/
113 	IOC_E_TIMEOUT		= 11,	/*!< timeout			*/
114 	IOC_E_HWFAILED		= 12,	/*!< PCI mapping failure notice	*/
115 };
116 
117 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
127 
128 static struct bfa_sm_table ioc_sm_table[] = {
129 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
130 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
131 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
132 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
135 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
136 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138 	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
139 };
140 
141 /*
142  * Forward declareations for iocpf state machine
143  */
144 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
145 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
146 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
147 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
150 
151 /**
152  * IOCPF state machine events
153  */
154 enum iocpf_event {
155 	IOCPF_E_ENABLE		= 1,	/*!< IOCPF enable request	*/
156 	IOCPF_E_DISABLE		= 2,	/*!< IOCPF disable request	*/
157 	IOCPF_E_STOP		= 3,	/*!< stop on driver detach	*/
158 	IOCPF_E_FWREADY		= 4,	/*!< f/w initialization done	*/
159 	IOCPF_E_FWRSP_ENABLE	= 5,	/*!< enable f/w response	*/
160 	IOCPF_E_FWRSP_DISABLE	= 6,	/*!< disable f/w response	*/
161 	IOCPF_E_FAIL		= 7,	/*!< failure notice by ioc sm	*/
162 	IOCPF_E_INITFAIL	= 8,	/*!< init fail notice by ioc sm	*/
163 	IOCPF_E_GETATTRFAIL	= 9,	/*!< init fail notice by ioc sm	*/
164 	IOCPF_E_SEMLOCKED	= 10,   /*!< h/w semaphore is locked	*/
165 	IOCPF_E_TIMEOUT		= 11,   /*!< f/w response timeout	*/
166 	IOCPF_E_SEM_ERROR	= 12,   /*!< h/w sem mapping error	*/
167 };
168 
169 /**
170  * IOCPF states
171  */
172 enum bfa_iocpf_state {
173 	BFA_IOCPF_RESET		= 1,	/*!< IOC is in reset state */
174 	BFA_IOCPF_SEMWAIT	= 2,	/*!< Waiting for IOC h/w semaphore */
175 	BFA_IOCPF_HWINIT	= 3,	/*!< IOC h/w is being initialized */
176 	BFA_IOCPF_READY		= 4,	/*!< IOCPF is initialized */
177 	BFA_IOCPF_INITFAIL	= 5,	/*!< IOCPF failed */
178 	BFA_IOCPF_FAIL		= 6,	/*!< IOCPF failed */
179 	BFA_IOCPF_DISABLING	= 7,	/*!< IOCPF is being disabled */
180 	BFA_IOCPF_DISABLED	= 8,	/*!< IOCPF is disabled */
181 	BFA_IOCPF_FWMISMATCH	= 9,	/*!< IOC f/w different from drivers */
182 };
183 
184 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
185 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
186 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
188 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
190 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
191 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
192 						enum iocpf_event);
193 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
198 						enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
200 
201 static struct bfa_sm_table iocpf_sm_table[] = {
202 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
203 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
204 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
205 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
206 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
207 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
208 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
209 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
210 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
211 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
212 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
213 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
214 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
215 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
216 };
217 
218 /**
219  * IOC State Machine
220  */
221 
222 /**
223  * Beginning state. IOC uninit state.
224  */
225 static void
226 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
227 {
228 }
229 
230 /**
231  * IOC is in uninit state.
232  */
233 static void
234 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
235 {
236 	switch (event) {
237 	case IOC_E_RESET:
238 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
239 		break;
240 
241 	default:
242 		bfa_sm_fault(event);
243 	}
244 }
245 
246 /**
247  * Reset entry actions -- initialize state machine
248  */
249 static void
250 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
251 {
252 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
253 }
254 
255 /**
256  * IOC is in reset state.
257  */
258 static void
259 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
260 {
261 	switch (event) {
262 	case IOC_E_ENABLE:
263 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
264 		break;
265 
266 	case IOC_E_DISABLE:
267 		bfa_ioc_disable_comp(ioc);
268 		break;
269 
270 	case IOC_E_DETACH:
271 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
272 		break;
273 
274 	default:
275 		bfa_sm_fault(event);
276 	}
277 }
278 
279 static void
280 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
281 {
282 	bfa_iocpf_enable(ioc);
283 }
284 
285 /**
286  * Host IOC function is being enabled, awaiting response from firmware.
287  * Semaphore is acquired.
288  */
289 static void
290 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
291 {
292 	switch (event) {
293 	case IOC_E_ENABLED:
294 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
295 		break;
296 
297 	case IOC_E_PFFAILED:
298 		/* !!! fall through !!! */
299 	case IOC_E_HWERROR:
300 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
301 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
302 		if (event != IOC_E_PFFAILED)
303 			bfa_iocpf_initfail(ioc);
304 		break;
305 
306 	case IOC_E_HWFAILED:
307 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
308 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
309 		break;
310 
311 	case IOC_E_DISABLE:
312 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
313 		break;
314 
315 	case IOC_E_DETACH:
316 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
317 		bfa_iocpf_stop(ioc);
318 		break;
319 
320 	case IOC_E_ENABLE:
321 		break;
322 
323 	default:
324 		bfa_sm_fault(event);
325 	}
326 }
327 
328 /**
329  * Semaphore should be acquired for version check.
330  */
331 static void
332 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
333 {
334 	mod_timer(&ioc->ioc_timer, jiffies +
335 		msecs_to_jiffies(BFA_IOC_TOV));
336 	bfa_ioc_send_getattr(ioc);
337 }
338 
339 /**
340  * IOC configuration in progress. Timer is active.
341  */
342 static void
343 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
344 {
345 	switch (event) {
346 	case IOC_E_FWRSP_GETATTR:
347 		del_timer(&ioc->ioc_timer);
348 		bfa_ioc_check_attr_wwns(ioc);
349 		bfa_ioc_hb_monitor(ioc);
350 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
351 		break;
352 
353 	case IOC_E_PFFAILED:
354 	case IOC_E_HWERROR:
355 		del_timer(&ioc->ioc_timer);
356 		/* fall through */
357 	case IOC_E_TIMEOUT:
358 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
359 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
360 		if (event != IOC_E_PFFAILED)
361 			bfa_iocpf_getattrfail(ioc);
362 		break;
363 
364 	case IOC_E_DISABLE:
365 		del_timer(&ioc->ioc_timer);
366 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
367 		break;
368 
369 	case IOC_E_ENABLE:
370 		break;
371 
372 	default:
373 		bfa_sm_fault(event);
374 	}
375 }
376 
377 static void
378 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
379 {
380 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
381 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
382 }
383 
384 static void
385 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
386 {
387 	switch (event) {
388 	case IOC_E_ENABLE:
389 		break;
390 
391 	case IOC_E_DISABLE:
392 		bfa_ioc_hb_stop(ioc);
393 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
394 		break;
395 
396 	case IOC_E_PFFAILED:
397 	case IOC_E_HWERROR:
398 		bfa_ioc_hb_stop(ioc);
399 		/* !!! fall through !!! */
400 	case IOC_E_HBFAIL:
401 		if (ioc->iocpf.auto_recover)
402 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
403 		else
404 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
405 
406 		bfa_ioc_fail_notify(ioc);
407 
408 		if (event != IOC_E_PFFAILED)
409 			bfa_iocpf_fail(ioc);
410 		break;
411 
412 	default:
413 		bfa_sm_fault(event);
414 	}
415 }
416 
417 static void
418 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
419 {
420 	bfa_iocpf_disable(ioc);
421 }
422 
423 /**
424  * IOC is being disabled
425  */
426 static void
427 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
428 {
429 	switch (event) {
430 	case IOC_E_DISABLED:
431 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
432 		break;
433 
434 	case IOC_E_HWERROR:
435 		/*
436 		 * No state change.  Will move to disabled state
437 		 * after iocpf sm completes failure processing and
438 		 * moves to disabled state.
439 		 */
440 		bfa_iocpf_fail(ioc);
441 		break;
442 
443 	case IOC_E_HWFAILED:
444 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
445 		bfa_ioc_disable_comp(ioc);
446 		break;
447 
448 	default:
449 		bfa_sm_fault(event);
450 	}
451 }
452 
453 /**
454  * IOC disable completion entry.
455  */
456 static void
457 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
458 {
459 	bfa_ioc_disable_comp(ioc);
460 }
461 
462 static void
463 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
464 {
465 	switch (event) {
466 	case IOC_E_ENABLE:
467 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
468 		break;
469 
470 	case IOC_E_DISABLE:
471 		ioc->cbfn->disable_cbfn(ioc->bfa);
472 		break;
473 
474 	case IOC_E_DETACH:
475 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
476 		bfa_iocpf_stop(ioc);
477 		break;
478 
479 	default:
480 		bfa_sm_fault(event);
481 	}
482 }
483 
484 static void
485 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
486 {
487 }
488 
489 /**
490  * Hardware initialization retry.
491  */
492 static void
493 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
494 {
495 	switch (event) {
496 	case IOC_E_ENABLED:
497 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
498 		break;
499 
500 	case IOC_E_PFFAILED:
501 	case IOC_E_HWERROR:
502 		/**
503 		 * Initialization retry failed.
504 		 */
505 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
506 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
507 		if (event != IOC_E_PFFAILED)
508 			bfa_iocpf_initfail(ioc);
509 		break;
510 
511 	case IOC_E_HWFAILED:
512 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
513 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
514 		break;
515 
516 	case IOC_E_ENABLE:
517 		break;
518 
519 	case IOC_E_DISABLE:
520 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
521 		break;
522 
523 	case IOC_E_DETACH:
524 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
525 		bfa_iocpf_stop(ioc);
526 		break;
527 
528 	default:
529 		bfa_sm_fault(event);
530 	}
531 }
532 
533 static void
534 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
535 {
536 }
537 
538 /**
539  * IOC failure.
540  */
541 static void
542 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
543 {
544 	switch (event) {
545 	case IOC_E_ENABLE:
546 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
547 		break;
548 
549 	case IOC_E_DISABLE:
550 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
551 		break;
552 
553 	case IOC_E_DETACH:
554 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
555 		bfa_iocpf_stop(ioc);
556 		break;
557 
558 	case IOC_E_HWERROR:
559 		/* HB failure notification, ignore. */
560 		break;
561 
562 	default:
563 		bfa_sm_fault(event);
564 	}
565 }
566 
567 static void
568 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
569 {
570 }
571 
572 /**
573  * IOC failure.
574  */
575 static void
576 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
577 {
578 	switch (event) {
579 
580 	case IOC_E_ENABLE:
581 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
582 		break;
583 
584 	case IOC_E_DISABLE:
585 		ioc->cbfn->disable_cbfn(ioc->bfa);
586 		break;
587 
588 	case IOC_E_DETACH:
589 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
590 		break;
591 
592 	default:
593 		bfa_sm_fault(event);
594 	}
595 }
596 
597 /**
598  * IOCPF State Machine
599  */
600 
601 /**
602  * Reset entry actions -- initialize state machine
603  */
604 static void
605 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
606 {
607 	iocpf->fw_mismatch_notified = false;
608 	iocpf->auto_recover = bfa_nw_auto_recover;
609 }
610 
611 /**
612  * Beginning state. IOC is in reset state.
613  */
614 static void
615 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
616 {
617 	switch (event) {
618 	case IOCPF_E_ENABLE:
619 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
620 		break;
621 
622 	case IOCPF_E_STOP:
623 		break;
624 
625 	default:
626 		bfa_sm_fault(event);
627 	}
628 }
629 
630 /**
631  * Semaphore should be acquired for version check.
632  */
633 static void
634 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
635 {
636 	bfa_ioc_hw_sem_init(iocpf->ioc);
637 	bfa_ioc_hw_sem_get(iocpf->ioc);
638 }
639 
640 /**
641  * Awaiting h/w semaphore to continue with version check.
642  */
643 static void
644 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
645 {
646 	struct bfa_ioc *ioc = iocpf->ioc;
647 
648 	switch (event) {
649 	case IOCPF_E_SEMLOCKED:
650 		if (bfa_ioc_firmware_lock(ioc)) {
651 			if (bfa_ioc_sync_start(ioc)) {
652 				bfa_ioc_sync_join(ioc);
653 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
654 			} else {
655 				bfa_ioc_firmware_unlock(ioc);
656 				bfa_nw_ioc_hw_sem_release(ioc);
657 				mod_timer(&ioc->sem_timer, jiffies +
658 					msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
659 			}
660 		} else {
661 			bfa_nw_ioc_hw_sem_release(ioc);
662 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
663 		}
664 		break;
665 
666 	case IOCPF_E_SEM_ERROR:
667 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
668 		bfa_ioc_pf_hwfailed(ioc);
669 		break;
670 
671 	case IOCPF_E_DISABLE:
672 		bfa_ioc_hw_sem_get_cancel(ioc);
673 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674 		bfa_ioc_pf_disabled(ioc);
675 		break;
676 
677 	case IOCPF_E_STOP:
678 		bfa_ioc_hw_sem_get_cancel(ioc);
679 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
680 		break;
681 
682 	default:
683 		bfa_sm_fault(event);
684 	}
685 }
686 
687 /**
688  * Notify enable completion callback
689  */
690 static void
691 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
692 {
693 	/* Call only the first time sm enters fwmismatch state. */
694 	if (iocpf->fw_mismatch_notified == false)
695 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
696 
697 	iocpf->fw_mismatch_notified = true;
698 	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
699 		msecs_to_jiffies(BFA_IOC_TOV));
700 }
701 
702 /**
703  * Awaiting firmware version match.
704  */
705 static void
706 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
707 {
708 	struct bfa_ioc *ioc = iocpf->ioc;
709 
710 	switch (event) {
711 	case IOCPF_E_TIMEOUT:
712 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
713 		break;
714 
715 	case IOCPF_E_DISABLE:
716 		del_timer(&ioc->iocpf_timer);
717 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
718 		bfa_ioc_pf_disabled(ioc);
719 		break;
720 
721 	case IOCPF_E_STOP:
722 		del_timer(&ioc->iocpf_timer);
723 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
724 		break;
725 
726 	default:
727 		bfa_sm_fault(event);
728 	}
729 }
730 
731 /**
732  * Request for semaphore.
733  */
734 static void
735 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
736 {
737 	bfa_ioc_hw_sem_get(iocpf->ioc);
738 }
739 
740 /**
741  * Awaiting semaphore for h/w initialzation.
742  */
743 static void
744 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
745 {
746 	struct bfa_ioc *ioc = iocpf->ioc;
747 
748 	switch (event) {
749 	case IOCPF_E_SEMLOCKED:
750 		if (bfa_ioc_sync_complete(ioc)) {
751 			bfa_ioc_sync_join(ioc);
752 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
753 		} else {
754 			bfa_nw_ioc_hw_sem_release(ioc);
755 			mod_timer(&ioc->sem_timer, jiffies +
756 				msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
757 		}
758 		break;
759 
760 	case IOCPF_E_SEM_ERROR:
761 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
762 		bfa_ioc_pf_hwfailed(ioc);
763 		break;
764 
765 	case IOCPF_E_DISABLE:
766 		bfa_ioc_hw_sem_get_cancel(ioc);
767 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
768 		break;
769 
770 	default:
771 		bfa_sm_fault(event);
772 	}
773 }
774 
775 static void
776 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
777 {
778 	iocpf->poll_time = 0;
779 	bfa_ioc_reset(iocpf->ioc, false);
780 }
781 
782 /**
783  * Hardware is being initialized. Interrupts are enabled.
784  * Holding hardware semaphore lock.
785  */
786 static void
787 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
788 {
789 	struct bfa_ioc *ioc = iocpf->ioc;
790 
791 	switch (event) {
792 	case IOCPF_E_FWREADY:
793 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
794 		break;
795 
796 	case IOCPF_E_TIMEOUT:
797 		bfa_nw_ioc_hw_sem_release(ioc);
798 			bfa_ioc_pf_failed(ioc);
799 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
800 		break;
801 
802 	case IOCPF_E_DISABLE:
803 		del_timer(&ioc->iocpf_timer);
804 		bfa_ioc_sync_leave(ioc);
805 		bfa_nw_ioc_hw_sem_release(ioc);
806 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
807 		break;
808 
809 	default:
810 		bfa_sm_fault(event);
811 	}
812 }
813 
814 static void
815 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
816 {
817 	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
818 		msecs_to_jiffies(BFA_IOC_TOV));
819 	/**
820 	 * Enable Interrupts before sending fw IOC ENABLE cmd.
821 	 */
822 	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
823 	bfa_ioc_send_enable(iocpf->ioc);
824 }
825 
826 /**
827  * Host IOC function is being enabled, awaiting response from firmware.
828  * Semaphore is acquired.
829  */
830 static void
831 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
832 {
833 	struct bfa_ioc *ioc = iocpf->ioc;
834 
835 	switch (event) {
836 	case IOCPF_E_FWRSP_ENABLE:
837 		del_timer(&ioc->iocpf_timer);
838 		bfa_nw_ioc_hw_sem_release(ioc);
839 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
840 		break;
841 
842 	case IOCPF_E_INITFAIL:
843 		del_timer(&ioc->iocpf_timer);
844 		/*
845 		 * !!! fall through !!!
846 		 */
847 	case IOCPF_E_TIMEOUT:
848 		bfa_nw_ioc_hw_sem_release(ioc);
849 		if (event == IOCPF_E_TIMEOUT)
850 			bfa_ioc_pf_failed(ioc);
851 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
852 		break;
853 
854 	case IOCPF_E_DISABLE:
855 		del_timer(&ioc->iocpf_timer);
856 		bfa_nw_ioc_hw_sem_release(ioc);
857 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
858 		break;
859 
860 	default:
861 		bfa_sm_fault(event);
862 	}
863 }
864 
865 static void
866 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
867 {
868 	bfa_ioc_pf_enabled(iocpf->ioc);
869 }
870 
871 static void
872 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
873 {
874 	switch (event) {
875 	case IOCPF_E_DISABLE:
876 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
877 		break;
878 
879 	case IOCPF_E_GETATTRFAIL:
880 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
881 		break;
882 
883 	case IOCPF_E_FAIL:
884 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
885 		break;
886 
887 	default:
888 		bfa_sm_fault(event);
889 	}
890 }
891 
892 static void
893 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
894 {
895 	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
896 		msecs_to_jiffies(BFA_IOC_TOV));
897 	bfa_ioc_send_disable(iocpf->ioc);
898 }
899 
900 /**
901  * IOC is being disabled
902  */
903 static void
904 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
905 {
906 	struct bfa_ioc *ioc = iocpf->ioc;
907 
908 	switch (event) {
909 	case IOCPF_E_FWRSP_DISABLE:
910 		del_timer(&ioc->iocpf_timer);
911 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
912 		break;
913 
914 	case IOCPF_E_FAIL:
915 		del_timer(&ioc->iocpf_timer);
916 		/*
917 		 * !!! fall through !!!
918 		 */
919 
920 	case IOCPF_E_TIMEOUT:
921 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
922 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
923 		break;
924 
925 	case IOCPF_E_FWRSP_ENABLE:
926 		break;
927 
928 	default:
929 		bfa_sm_fault(event);
930 	}
931 }
932 
933 static void
934 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
935 {
936 	bfa_ioc_hw_sem_get(iocpf->ioc);
937 }
938 
939 /**
940  * IOC hb ack request is being removed.
941  */
942 static void
943 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
944 {
945 	struct bfa_ioc *ioc = iocpf->ioc;
946 
947 	switch (event) {
948 	case IOCPF_E_SEMLOCKED:
949 		bfa_ioc_sync_leave(ioc);
950 		bfa_nw_ioc_hw_sem_release(ioc);
951 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
952 		break;
953 
954 	case IOCPF_E_SEM_ERROR:
955 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
956 		bfa_ioc_pf_hwfailed(ioc);
957 		break;
958 
959 	case IOCPF_E_FAIL:
960 		break;
961 
962 	default:
963 		bfa_sm_fault(event);
964 	}
965 }
966 
967 /**
968  * IOC disable completion entry.
969  */
970 static void
971 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
972 {
973 	bfa_ioc_mbox_flush(iocpf->ioc);
974 	bfa_ioc_pf_disabled(iocpf->ioc);
975 }
976 
977 static void
978 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
979 {
980 	struct bfa_ioc *ioc = iocpf->ioc;
981 
982 	switch (event) {
983 	case IOCPF_E_ENABLE:
984 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
985 		break;
986 
987 	case IOCPF_E_STOP:
988 		bfa_ioc_firmware_unlock(ioc);
989 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
990 		break;
991 
992 	default:
993 		bfa_sm_fault(event);
994 	}
995 }
996 
997 static void
998 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
999 {
1000 	bfa_ioc_hw_sem_get(iocpf->ioc);
1001 }
1002 
1003 /**
1004  * Hardware initialization failed.
1005  */
1006 static void
1007 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1008 {
1009 	struct bfa_ioc *ioc = iocpf->ioc;
1010 
1011 	switch (event) {
1012 	case IOCPF_E_SEMLOCKED:
1013 		bfa_ioc_notify_fail(ioc);
1014 		bfa_ioc_sync_leave(ioc);
1015 		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1016 		bfa_nw_ioc_hw_sem_release(ioc);
1017 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1018 		break;
1019 
1020 	case IOCPF_E_SEM_ERROR:
1021 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1022 		bfa_ioc_pf_hwfailed(ioc);
1023 		break;
1024 
1025 	case IOCPF_E_DISABLE:
1026 		bfa_ioc_hw_sem_get_cancel(ioc);
1027 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1028 		break;
1029 
1030 	case IOCPF_E_STOP:
1031 		bfa_ioc_hw_sem_get_cancel(ioc);
1032 		bfa_ioc_firmware_unlock(ioc);
1033 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1034 		break;
1035 
1036 	case IOCPF_E_FAIL:
1037 		break;
1038 
1039 	default:
1040 		bfa_sm_fault(event);
1041 	}
1042 }
1043 
1044 static void
1045 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1046 {
1047 }
1048 
1049 /**
1050  * Hardware initialization failed.
1051  */
1052 static void
1053 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1054 {
1055 	struct bfa_ioc *ioc = iocpf->ioc;
1056 
1057 	switch (event) {
1058 	case IOCPF_E_DISABLE:
1059 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1060 		break;
1061 
1062 	case IOCPF_E_STOP:
1063 		bfa_ioc_firmware_unlock(ioc);
1064 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1065 		break;
1066 
1067 	default:
1068 		bfa_sm_fault(event);
1069 	}
1070 }
1071 
1072 static void
1073 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1074 {
1075 	/**
1076 	 * Mark IOC as failed in hardware and stop firmware.
1077 	 */
1078 	bfa_ioc_lpu_stop(iocpf->ioc);
1079 
1080 	/**
1081 	 * Flush any queued up mailbox requests.
1082 	 */
1083 	bfa_ioc_mbox_flush(iocpf->ioc);
1084 	bfa_ioc_hw_sem_get(iocpf->ioc);
1085 }
1086 
1087 /**
1088  * IOC is in failed state.
1089  */
1090 static void
1091 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1092 {
1093 	struct bfa_ioc *ioc = iocpf->ioc;
1094 
1095 	switch (event) {
1096 	case IOCPF_E_SEMLOCKED:
1097 		bfa_ioc_sync_ack(ioc);
1098 		bfa_ioc_notify_fail(ioc);
1099 		if (!iocpf->auto_recover) {
1100 			bfa_ioc_sync_leave(ioc);
1101 			writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1102 			bfa_nw_ioc_hw_sem_release(ioc);
1103 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1104 		} else {
1105 			if (bfa_ioc_sync_complete(ioc))
1106 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1107 			else {
1108 				bfa_nw_ioc_hw_sem_release(ioc);
1109 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1110 			}
1111 		}
1112 		break;
1113 
1114 	case IOCPF_E_SEM_ERROR:
1115 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1116 		bfa_ioc_pf_hwfailed(ioc);
1117 		break;
1118 
1119 	case IOCPF_E_DISABLE:
1120 		bfa_ioc_hw_sem_get_cancel(ioc);
1121 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1122 		break;
1123 
1124 	case IOCPF_E_FAIL:
1125 		break;
1126 
1127 	default:
1128 		bfa_sm_fault(event);
1129 	}
1130 }
1131 
1132 static void
1133 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1134 {
1135 }
1136 
1137 /**
1138  * @brief
1139  * IOC is in failed state.
1140  */
1141 static void
1142 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1143 {
1144 	switch (event) {
1145 	case IOCPF_E_DISABLE:
1146 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1147 		break;
1148 
1149 	default:
1150 		bfa_sm_fault(event);
1151 	}
1152 }
1153 
1154 /**
1155  * BFA IOC private functions
1156  */
1157 
1158 /**
1159  * Notify common modules registered for notification.
1160  */
1161 static void
1162 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1163 {
1164 	struct bfa_ioc_notify *notify;
1165 	struct list_head			*qe;
1166 
1167 	list_for_each(qe, &ioc->notify_q) {
1168 		notify = (struct bfa_ioc_notify *)qe;
1169 		notify->cbfn(notify->cbarg, event);
1170 	}
1171 }
1172 
1173 static void
1174 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1175 {
1176 	ioc->cbfn->disable_cbfn(ioc->bfa);
1177 	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1178 }
1179 
1180 bool
1181 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1182 {
1183 	u32 r32;
1184 	int cnt = 0;
1185 #define BFA_SEM_SPINCNT	3000
1186 
1187 	r32 = readl(sem_reg);
1188 
1189 	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1190 		cnt++;
1191 		udelay(2);
1192 		r32 = readl(sem_reg);
1193 	}
1194 
1195 	if (!(r32 & 1))
1196 		return true;
1197 
1198 	return false;
1199 }
1200 
1201 void
1202 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1203 {
1204 	readl(sem_reg);
1205 	writel(1, sem_reg);
1206 }
1207 
1208 static void
1209 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1210 {
1211 	struct bfi_ioc_image_hdr fwhdr;
1212 	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1213 
1214 	if (fwstate == BFI_IOC_UNINIT)
1215 		return;
1216 
1217 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1218 
1219 	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
1220 		return;
1221 
1222 	writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1223 
1224 	/*
1225 	 * Try to lock and then unlock the semaphore.
1226 	 */
1227 	readl(ioc->ioc_regs.ioc_sem_reg);
1228 	writel(1, ioc->ioc_regs.ioc_sem_reg);
1229 }
1230 
1231 static void
1232 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1233 {
1234 	u32	r32;
1235 
1236 	/**
1237 	 * First read to the semaphore register will return 0, subsequent reads
1238 	 * will return 1. Semaphore is released by writing 1 to the register
1239 	 */
1240 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1241 	if (r32 == ~0) {
1242 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1243 		return;
1244 	}
1245 	if (!(r32 & 1)) {
1246 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1247 		return;
1248 	}
1249 
1250 	mod_timer(&ioc->sem_timer, jiffies +
1251 		msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1252 }
1253 
1254 void
1255 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1256 {
1257 	writel(1, ioc->ioc_regs.ioc_sem_reg);
1258 }
1259 
1260 static void
1261 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1262 {
1263 	del_timer(&ioc->sem_timer);
1264 }
1265 
1266 /**
1267  * @brief
1268  * Initialize LPU local memory (aka secondary memory / SRAM)
1269  */
1270 static void
1271 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1272 {
1273 	u32	pss_ctl;
1274 	int		i;
1275 #define PSS_LMEM_INIT_TIME  10000
1276 
1277 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1278 	pss_ctl &= ~__PSS_LMEM_RESET;
1279 	pss_ctl |= __PSS_LMEM_INIT_EN;
1280 
1281 	/*
1282 	 * i2c workaround 12.5khz clock
1283 	 */
1284 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1285 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1286 
1287 	/**
1288 	 * wait for memory initialization to be complete
1289 	 */
1290 	i = 0;
1291 	do {
1292 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1293 		i++;
1294 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1295 
1296 	/**
1297 	 * If memory initialization is not successful, IOC timeout will catch
1298 	 * such failures.
1299 	 */
1300 	BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1301 
1302 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1303 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1304 }
1305 
1306 static void
1307 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1308 {
1309 	u32	pss_ctl;
1310 
1311 	/**
1312 	 * Take processor out of reset.
1313 	 */
1314 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1315 	pss_ctl &= ~__PSS_LPU0_RESET;
1316 
1317 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1318 }
1319 
1320 static void
1321 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1322 {
1323 	u32	pss_ctl;
1324 
1325 	/**
1326 	 * Put processors in reset.
1327 	 */
1328 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1329 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1330 
1331 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1332 }
1333 
1334 /**
1335  * Get driver and firmware versions.
1336  */
1337 void
1338 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1339 {
1340 	u32	pgnum;
1341 	u32	loff = 0;
1342 	int		i;
1343 	u32	*fwsig = (u32 *) fwhdr;
1344 
1345 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1346 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1347 
1348 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1349 	     i++) {
1350 		fwsig[i] =
1351 			swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1352 		loff += sizeof(u32);
1353 	}
1354 }
1355 
1356 /**
1357  * Returns TRUE if same.
1358  */
1359 bool
1360 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1361 {
1362 	struct bfi_ioc_image_hdr *drv_fwhdr;
1363 	int i;
1364 
1365 	drv_fwhdr = (struct bfi_ioc_image_hdr *)
1366 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1367 
1368 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1369 		if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1370 			return false;
1371 	}
1372 
1373 	return true;
1374 }
1375 
1376 /**
1377  * Return true if current running version is valid. Firmware signature and
1378  * execution context (driver/bios) must match.
1379  */
1380 static bool
1381 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1382 {
1383 	struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1384 
1385 	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1386 	drv_fwhdr = (struct bfi_ioc_image_hdr *)
1387 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1388 
1389 	if (fwhdr.signature != drv_fwhdr->signature)
1390 		return false;
1391 
1392 	if (swab32(fwhdr.bootenv) != boot_env)
1393 		return false;
1394 
1395 	return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1396 }
1397 
1398 /**
1399  * Conditionally flush any pending message from firmware at start.
1400  */
1401 static void
1402 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1403 {
1404 	u32	r32;
1405 
1406 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1407 	if (r32)
1408 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1409 }
1410 
1411 /**
1412  * @img ioc_init_logic.jpg
1413  */
1414 static void
1415 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1416 {
1417 	enum bfi_ioc_state ioc_fwstate;
1418 	bool fwvalid;
1419 	u32 boot_env;
1420 
1421 	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1422 
1423 	if (force)
1424 		ioc_fwstate = BFI_IOC_UNINIT;
1425 
1426 	boot_env = BFI_FWBOOT_ENV_OS;
1427 
1428 	/**
1429 	 * check if firmware is valid
1430 	 */
1431 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1432 		false : bfa_ioc_fwver_valid(ioc, boot_env);
1433 
1434 	if (!fwvalid) {
1435 		bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1436 		bfa_ioc_poll_fwinit(ioc);
1437 		return;
1438 	}
1439 
1440 	/**
1441 	 * If hardware initialization is in progress (initialized by other IOC),
1442 	 * just wait for an initialization completion interrupt.
1443 	 */
1444 	if (ioc_fwstate == BFI_IOC_INITING) {
1445 		bfa_ioc_poll_fwinit(ioc);
1446 		return;
1447 	}
1448 
1449 	/**
1450 	 * If IOC function is disabled and firmware version is same,
1451 	 * just re-enable IOC.
1452 	 */
1453 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1454 		/**
1455 		 * When using MSI-X any pending firmware ready event should
1456 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1457 		 */
1458 		bfa_ioc_msgflush(ioc);
1459 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1460 		return;
1461 	}
1462 
1463 	/**
1464 	 * Initialize the h/w for any other states.
1465 	 */
1466 	bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1467 	bfa_ioc_poll_fwinit(ioc);
1468 }
1469 
1470 void
1471 bfa_nw_ioc_timeout(void *ioc_arg)
1472 {
1473 	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1474 
1475 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1476 }
1477 
1478 static void
1479 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1480 {
1481 	u32 *msgp = (u32 *) ioc_msg;
1482 	u32 i;
1483 
1484 	BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1485 
1486 	/*
1487 	 * first write msg to mailbox registers
1488 	 */
1489 	for (i = 0; i < len / sizeof(u32); i++)
1490 		writel(cpu_to_le32(msgp[i]),
1491 			      ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1492 
1493 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1494 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1495 
1496 	/*
1497 	 * write 1 to mailbox CMD to trigger LPU event
1498 	 */
1499 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1500 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1501 }
1502 
1503 static void
1504 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1505 {
1506 	struct bfi_ioc_ctrl_req enable_req;
1507 	struct timeval tv;
1508 
1509 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1510 		    bfa_ioc_portid(ioc));
1511 	enable_req.clscode = htons(ioc->clscode);
1512 	do_gettimeofday(&tv);
1513 	enable_req.tv_sec = ntohl(tv.tv_sec);
1514 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1515 }
1516 
1517 static void
1518 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1519 {
1520 	struct bfi_ioc_ctrl_req disable_req;
1521 
1522 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1523 		    bfa_ioc_portid(ioc));
1524 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1525 }
1526 
1527 static void
1528 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1529 {
1530 	struct bfi_ioc_getattr_req attr_req;
1531 
1532 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1533 		    bfa_ioc_portid(ioc));
1534 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1535 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1536 }
1537 
1538 void
1539 bfa_nw_ioc_hb_check(void *cbarg)
1540 {
1541 	struct bfa_ioc *ioc = cbarg;
1542 	u32	hb_count;
1543 
1544 	hb_count = readl(ioc->ioc_regs.heartbeat);
1545 	if (ioc->hb_count == hb_count) {
1546 		bfa_ioc_recover(ioc);
1547 		return;
1548 	} else {
1549 		ioc->hb_count = hb_count;
1550 	}
1551 
1552 	bfa_ioc_mbox_poll(ioc);
1553 	mod_timer(&ioc->hb_timer, jiffies +
1554 		msecs_to_jiffies(BFA_IOC_HB_TOV));
1555 }
1556 
1557 static void
1558 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1559 {
1560 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1561 	mod_timer(&ioc->hb_timer, jiffies +
1562 		msecs_to_jiffies(BFA_IOC_HB_TOV));
1563 }
1564 
1565 static void
1566 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1567 {
1568 	del_timer(&ioc->hb_timer);
1569 }
1570 
1571 /**
1572  * @brief
1573  *	Initiate a full firmware download.
1574  */
1575 static void
1576 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1577 		    u32 boot_env)
1578 {
1579 	u32 *fwimg;
1580 	u32 pgnum;
1581 	u32 loff = 0;
1582 	u32 chunkno = 0;
1583 	u32 i;
1584 	u32 asicmode;
1585 
1586 	/**
1587 	 * Initialize LMEM first before code download
1588 	 */
1589 	bfa_ioc_lmem_init(ioc);
1590 
1591 	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1592 
1593 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1594 
1595 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1596 
1597 	for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1598 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1599 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1600 			fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1601 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1602 		}
1603 
1604 		/**
1605 		 * write smem
1606 		 */
1607 		writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1608 			      ((ioc->ioc_regs.smem_page_start) + (loff)));
1609 
1610 		loff += sizeof(u32);
1611 
1612 		/**
1613 		 * handle page offset wrap around
1614 		 */
1615 		loff = PSS_SMEM_PGOFF(loff);
1616 		if (loff == 0) {
1617 			pgnum++;
1618 			writel(pgnum,
1619 				      ioc->ioc_regs.host_page_num_fn);
1620 		}
1621 	}
1622 
1623 	writel(bfa_ioc_smem_pgnum(ioc, 0),
1624 		      ioc->ioc_regs.host_page_num_fn);
1625 
1626 	/*
1627 	 * Set boot type, env and device mode at the end.
1628 	*/
1629 	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1630 					ioc->port0_mode, ioc->port1_mode);
1631 	writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1632 			+ BFI_FWBOOT_DEVMODE_OFF));
1633 	writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1634 			+ (BFI_FWBOOT_TYPE_OFF)));
1635 	writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1636 			+ (BFI_FWBOOT_ENV_OFF)));
1637 }
1638 
1639 static void
1640 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1641 {
1642 	bfa_ioc_hwinit(ioc, force);
1643 }
1644 
1645 /**
1646  * BFA ioc enable reply by firmware
1647  */
1648 static void
1649 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1650 			u8 cap_bm)
1651 {
1652 	struct bfa_iocpf *iocpf = &ioc->iocpf;
1653 
1654 	ioc->port_mode = ioc->port_mode_cfg = port_mode;
1655 	ioc->ad_cap_bm = cap_bm;
1656 	bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1657 }
1658 
1659 /**
1660  * @brief
1661  * Update BFA configuration from firmware configuration.
1662  */
1663 static void
1664 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1665 {
1666 	struct bfi_ioc_attr *attr = ioc->attr;
1667 
1668 	attr->adapter_prop  = ntohl(attr->adapter_prop);
1669 	attr->card_type     = ntohl(attr->card_type);
1670 	attr->maxfrsize	    = ntohs(attr->maxfrsize);
1671 
1672 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1673 }
1674 
1675 /**
1676  * Attach time initialization of mbox logic.
1677  */
1678 static void
1679 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1680 {
1681 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1682 	int	mc;
1683 
1684 	INIT_LIST_HEAD(&mod->cmd_q);
1685 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1686 		mod->mbhdlr[mc].cbfn = NULL;
1687 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1688 	}
1689 }
1690 
1691 /**
1692  * Mbox poll timer -- restarts any pending mailbox requests.
1693  */
1694 static void
1695 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1696 {
1697 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1698 	struct bfa_mbox_cmd *cmd;
1699 	bfa_mbox_cmd_cbfn_t cbfn;
1700 	void *cbarg;
1701 	u32 stat;
1702 
1703 	/**
1704 	 * If no command pending, do nothing
1705 	 */
1706 	if (list_empty(&mod->cmd_q))
1707 		return;
1708 
1709 	/**
1710 	 * If previous command is not yet fetched by firmware, do nothing
1711 	 */
1712 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1713 	if (stat)
1714 		return;
1715 
1716 	/**
1717 	 * Enqueue command to firmware.
1718 	 */
1719 	bfa_q_deq(&mod->cmd_q, &cmd);
1720 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1721 
1722 	/**
1723 	 * Give a callback to the client, indicating that the command is sent
1724 	 */
1725 	if (cmd->cbfn) {
1726 		cbfn = cmd->cbfn;
1727 		cbarg = cmd->cbarg;
1728 		cmd->cbfn = NULL;
1729 		cbfn(cbarg);
1730 	}
1731 }
1732 
1733 /**
1734  * Cleanup any pending requests.
1735  */
1736 static void
1737 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1738 {
1739 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1740 	struct bfa_mbox_cmd *cmd;
1741 
1742 	while (!list_empty(&mod->cmd_q))
1743 		bfa_q_deq(&mod->cmd_q, &cmd);
1744 }
1745 
1746 static void
1747 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1748 {
1749 	/**
1750 	 * Notify driver and common modules registered for notification.
1751 	 */
1752 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
1753 	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1754 }
1755 
1756 /**
1757  * IOCPF to IOC interface
1758  */
1759 static void
1760 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1761 {
1762 	bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1763 }
1764 
1765 static void
1766 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1767 {
1768 	bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1769 }
1770 
1771 static void
1772 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1773 {
1774 	bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1775 }
1776 
1777 static void
1778 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1779 {
1780 	bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1781 }
1782 
1783 static void
1784 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1785 {
1786 	/**
1787 	 * Provide enable completion callback and AEN notification.
1788 	 */
1789 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1790 }
1791 
1792 /**
1793  * IOC public
1794  */
1795 static enum bfa_status
1796 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1797 {
1798 	/*
1799 	 *  Hold semaphore so that nobody can access the chip during init.
1800 	 */
1801 	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1802 
1803 	bfa_ioc_pll_init_asic(ioc);
1804 
1805 	ioc->pllinit = true;
1806 	/*
1807 	 *  release semaphore.
1808 	 */
1809 	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1810 
1811 	return BFA_STATUS_OK;
1812 }
1813 
1814 /**
1815  * Interface used by diag module to do firmware boot with memory test
1816  * as the entry vector.
1817  */
1818 static void
1819 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1820 		u32 boot_env)
1821 {
1822 	bfa_ioc_stats(ioc, ioc_boots);
1823 
1824 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1825 		return;
1826 
1827 	/**
1828 	 * Initialize IOC state of all functions on a chip reset.
1829 	 */
1830 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1831 		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1832 		writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1833 	} else {
1834 		writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1835 		writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1836 	}
1837 
1838 	bfa_ioc_msgflush(ioc);
1839 	bfa_ioc_download_fw(ioc, boot_type, boot_env);
1840 	bfa_ioc_lpu_start(ioc);
1841 }
1842 
1843 /**
1844  * Enable/disable IOC failure auto recovery.
1845  */
1846 void
1847 bfa_nw_ioc_auto_recover(bool auto_recover)
1848 {
1849 	bfa_nw_auto_recover = auto_recover;
1850 }
1851 
1852 static bool
1853 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1854 {
1855 	u32	*msgp = mbmsg;
1856 	u32	r32;
1857 	int		i;
1858 
1859 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1860 	if ((r32 & 1) == 0)
1861 		return false;
1862 
1863 	/**
1864 	 * read the MBOX msg
1865 	 */
1866 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1867 	     i++) {
1868 		r32 = readl(ioc->ioc_regs.lpu_mbox +
1869 				   i * sizeof(u32));
1870 		msgp[i] = htonl(r32);
1871 	}
1872 
1873 	/**
1874 	 * turn off mailbox interrupt by clearing mailbox status
1875 	 */
1876 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1877 	readl(ioc->ioc_regs.lpu_mbox_cmd);
1878 
1879 	return true;
1880 }
1881 
1882 static void
1883 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1884 {
1885 	union bfi_ioc_i2h_msg_u	*msg;
1886 	struct bfa_iocpf *iocpf = &ioc->iocpf;
1887 
1888 	msg = (union bfi_ioc_i2h_msg_u *) m;
1889 
1890 	bfa_ioc_stats(ioc, ioc_isrs);
1891 
1892 	switch (msg->mh.msg_id) {
1893 	case BFI_IOC_I2H_HBEAT:
1894 		break;
1895 
1896 	case BFI_IOC_I2H_ENABLE_REPLY:
1897 		bfa_ioc_enable_reply(ioc,
1898 			(enum bfa_mode)msg->fw_event.port_mode,
1899 			msg->fw_event.cap_bm);
1900 		break;
1901 
1902 	case BFI_IOC_I2H_DISABLE_REPLY:
1903 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1904 		break;
1905 
1906 	case BFI_IOC_I2H_GETATTR_REPLY:
1907 		bfa_ioc_getattr_reply(ioc);
1908 		break;
1909 
1910 	default:
1911 		BUG_ON(1);
1912 	}
1913 }
1914 
1915 /**
1916  * IOC attach time initialization and setup.
1917  *
1918  * @param[in]	ioc	memory for IOC
1919  * @param[in]	bfa	driver instance structure
1920  */
1921 void
1922 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1923 {
1924 	ioc->bfa	= bfa;
1925 	ioc->cbfn	= cbfn;
1926 	ioc->fcmode	= false;
1927 	ioc->pllinit	= false;
1928 	ioc->dbg_fwsave_once = true;
1929 	ioc->iocpf.ioc  = ioc;
1930 
1931 	bfa_ioc_mbox_attach(ioc);
1932 	INIT_LIST_HEAD(&ioc->notify_q);
1933 
1934 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1935 	bfa_fsm_send_event(ioc, IOC_E_RESET);
1936 }
1937 
1938 /**
1939  * Driver detach time IOC cleanup.
1940  */
1941 void
1942 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1943 {
1944 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
1945 
1946 	/* Done with detach, empty the notify_q. */
1947 	INIT_LIST_HEAD(&ioc->notify_q);
1948 }
1949 
1950 /**
1951  * Setup IOC PCI properties.
1952  *
1953  * @param[in]	pcidev	PCI device information for this IOC
1954  */
1955 void
1956 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1957 		 enum bfi_pcifn_class clscode)
1958 {
1959 	ioc->clscode	= clscode;
1960 	ioc->pcidev	= *pcidev;
1961 
1962 	/**
1963 	 * Initialize IOC and device personality
1964 	 */
1965 	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
1966 	ioc->asic_mode  = BFI_ASIC_MODE_FC;
1967 
1968 	switch (pcidev->device_id) {
1969 	case PCI_DEVICE_ID_BROCADE_CT:
1970 		ioc->asic_gen = BFI_ASIC_GEN_CT;
1971 		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1972 		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
1973 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
1974 		ioc->ad_cap_bm = BFA_CM_CNA;
1975 		break;
1976 
1977 	case BFA_PCI_DEVICE_ID_CT2:
1978 		ioc->asic_gen = BFI_ASIC_GEN_CT2;
1979 		if (clscode == BFI_PCIFN_CLASS_FC &&
1980 			pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
1981 			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
1982 			ioc->fcmode = true;
1983 			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
1984 			ioc->ad_cap_bm = BFA_CM_HBA;
1985 		} else {
1986 			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1987 			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
1988 			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
1989 				ioc->port_mode =
1990 				ioc->port_mode_cfg = BFA_MODE_CNA;
1991 				ioc->ad_cap_bm = BFA_CM_CNA;
1992 			} else {
1993 				ioc->port_mode =
1994 				ioc->port_mode_cfg = BFA_MODE_NIC;
1995 				ioc->ad_cap_bm = BFA_CM_NIC;
1996 			}
1997 		}
1998 		break;
1999 
2000 	default:
2001 		BUG_ON(1);
2002 	}
2003 
2004 	/**
2005 	 * Set asic specific interfaces.
2006 	 */
2007 	if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2008 		bfa_nw_ioc_set_ct_hwif(ioc);
2009 	else {
2010 		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2011 		bfa_nw_ioc_set_ct2_hwif(ioc);
2012 		bfa_nw_ioc_ct2_poweron(ioc);
2013 	}
2014 
2015 	bfa_ioc_map_port(ioc);
2016 	bfa_ioc_reg_init(ioc);
2017 }
2018 
2019 /**
2020  * Initialize IOC dma memory
2021  *
2022  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2023  * @param[in]	dm_pa	physical address of IOC dma memory
2024  */
2025 void
2026 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
2027 {
2028 	/**
2029 	 * dma memory for firmware attribute
2030 	 */
2031 	ioc->attr_dma.kva = dm_kva;
2032 	ioc->attr_dma.pa = dm_pa;
2033 	ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2034 }
2035 
2036 /**
2037  * Return size of dma memory required.
2038  */
2039 u32
2040 bfa_nw_ioc_meminfo(void)
2041 {
2042 	return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2043 }
2044 
2045 void
2046 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2047 {
2048 	bfa_ioc_stats(ioc, ioc_enables);
2049 	ioc->dbg_fwsave_once = true;
2050 
2051 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2052 }
2053 
2054 void
2055 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2056 {
2057 	bfa_ioc_stats(ioc, ioc_disables);
2058 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2059 }
2060 
2061 static u32
2062 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2063 {
2064 	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2065 }
2066 
2067 /**
2068  * Register mailbox message handler function, to be called by common modules
2069  */
2070 void
2071 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2072 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2073 {
2074 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2075 
2076 	mod->mbhdlr[mc].cbfn	= cbfn;
2077 	mod->mbhdlr[mc].cbarg = cbarg;
2078 }
2079 
2080 /**
2081  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2082  * Responsibility of caller to serialize
2083  *
2084  * @param[in]	ioc	IOC instance
2085  * @param[i]	cmd	Mailbox command
2086  */
2087 bool
2088 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2089 			bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2090 {
2091 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2092 	u32			stat;
2093 
2094 	cmd->cbfn = cbfn;
2095 	cmd->cbarg = cbarg;
2096 
2097 	/**
2098 	 * If a previous command is pending, queue new command
2099 	 */
2100 	if (!list_empty(&mod->cmd_q)) {
2101 		list_add_tail(&cmd->qe, &mod->cmd_q);
2102 		return true;
2103 	}
2104 
2105 	/**
2106 	 * If mailbox is busy, queue command for poll timer
2107 	 */
2108 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2109 	if (stat) {
2110 		list_add_tail(&cmd->qe, &mod->cmd_q);
2111 		return true;
2112 	}
2113 
2114 	/**
2115 	 * mailbox is free -- queue command to firmware
2116 	 */
2117 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2118 
2119 	return false;
2120 }
2121 
2122 /**
2123  * Handle mailbox interrupts
2124  */
2125 void
2126 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2127 {
2128 	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2129 	struct bfi_mbmsg m;
2130 	int				mc;
2131 
2132 	if (bfa_ioc_msgget(ioc, &m)) {
2133 		/**
2134 		 * Treat IOC message class as special.
2135 		 */
2136 		mc = m.mh.msg_class;
2137 		if (mc == BFI_MC_IOC) {
2138 			bfa_ioc_isr(ioc, &m);
2139 			return;
2140 		}
2141 
2142 		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2143 			return;
2144 
2145 		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2146 	}
2147 
2148 	bfa_ioc_lpu_read_stat(ioc);
2149 
2150 	/**
2151 	 * Try to send pending mailbox commands
2152 	 */
2153 	bfa_ioc_mbox_poll(ioc);
2154 }
2155 
2156 void
2157 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2158 {
2159 	bfa_ioc_stats(ioc, ioc_hbfails);
2160 	bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2161 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2162 }
2163 
2164 /**
2165  * return true if IOC is disabled
2166  */
2167 bool
2168 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2169 {
2170 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2171 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2172 }
2173 
2174 /**
2175  * Add to IOC heartbeat failure notification queue. To be used by common
2176  * modules such as cee, port, diag.
2177  */
2178 void
2179 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2180 			struct bfa_ioc_notify *notify)
2181 {
2182 	list_add_tail(&notify->qe, &ioc->notify_q);
2183 }
2184 
2185 #define BFA_MFG_NAME "Brocade"
2186 static void
2187 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2188 			 struct bfa_adapter_attr *ad_attr)
2189 {
2190 	struct bfi_ioc_attr *ioc_attr;
2191 
2192 	ioc_attr = ioc->attr;
2193 
2194 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2195 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2196 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2197 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2198 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2199 		      sizeof(struct bfa_mfg_vpd));
2200 
2201 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2202 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2203 
2204 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2205 	/* For now, model descr uses same model string */
2206 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2207 
2208 	ad_attr->card_type = ioc_attr->card_type;
2209 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2210 
2211 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2212 		ad_attr->prototype = 1;
2213 	else
2214 		ad_attr->prototype = 0;
2215 
2216 	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2217 	ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2218 
2219 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2220 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2221 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2222 	ad_attr->asic_rev = ioc_attr->asic_rev;
2223 
2224 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2225 }
2226 
2227 static enum bfa_ioc_type
2228 bfa_ioc_get_type(struct bfa_ioc *ioc)
2229 {
2230 	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2231 		return BFA_IOC_TYPE_LL;
2232 
2233 	BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2234 
2235 	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2236 		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2237 }
2238 
2239 static void
2240 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2241 {
2242 	memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2243 	memcpy(serial_num,
2244 			(void *)ioc->attr->brcd_serialnum,
2245 			BFA_ADAPTER_SERIAL_NUM_LEN);
2246 }
2247 
2248 static void
2249 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2250 {
2251 	memset(fw_ver, 0, BFA_VERSION_LEN);
2252 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2253 }
2254 
2255 static void
2256 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2257 {
2258 	BUG_ON(!(chip_rev));
2259 
2260 	memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2261 
2262 	chip_rev[0] = 'R';
2263 	chip_rev[1] = 'e';
2264 	chip_rev[2] = 'v';
2265 	chip_rev[3] = '-';
2266 	chip_rev[4] = ioc->attr->asic_rev;
2267 	chip_rev[5] = '\0';
2268 }
2269 
2270 static void
2271 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2272 {
2273 	memset(optrom_ver, 0, BFA_VERSION_LEN);
2274 	memcpy(optrom_ver, ioc->attr->optrom_version,
2275 		      BFA_VERSION_LEN);
2276 }
2277 
2278 static void
2279 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2280 {
2281 	memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2282 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2283 }
2284 
2285 static void
2286 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2287 {
2288 	struct bfi_ioc_attr *ioc_attr;
2289 
2290 	BUG_ON(!(model));
2291 	memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2292 
2293 	ioc_attr = ioc->attr;
2294 
2295 	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2296 		BFA_MFG_NAME, ioc_attr->card_type);
2297 }
2298 
2299 static enum bfa_ioc_state
2300 bfa_ioc_get_state(struct bfa_ioc *ioc)
2301 {
2302 	enum bfa_iocpf_state iocpf_st;
2303 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2304 
2305 	if (ioc_st == BFA_IOC_ENABLING ||
2306 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2307 
2308 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2309 
2310 		switch (iocpf_st) {
2311 		case BFA_IOCPF_SEMWAIT:
2312 			ioc_st = BFA_IOC_SEMWAIT;
2313 			break;
2314 
2315 		case BFA_IOCPF_HWINIT:
2316 			ioc_st = BFA_IOC_HWINIT;
2317 			break;
2318 
2319 		case BFA_IOCPF_FWMISMATCH:
2320 			ioc_st = BFA_IOC_FWMISMATCH;
2321 			break;
2322 
2323 		case BFA_IOCPF_FAIL:
2324 			ioc_st = BFA_IOC_FAIL;
2325 			break;
2326 
2327 		case BFA_IOCPF_INITFAIL:
2328 			ioc_st = BFA_IOC_INITFAIL;
2329 			break;
2330 
2331 		default:
2332 			break;
2333 		}
2334 	}
2335 	return ioc_st;
2336 }
2337 
2338 void
2339 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2340 {
2341 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2342 
2343 	ioc_attr->state = bfa_ioc_get_state(ioc);
2344 	ioc_attr->port_id = ioc->port_id;
2345 	ioc_attr->port_mode = ioc->port_mode;
2346 
2347 	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2348 	ioc_attr->cap_bm = ioc->ad_cap_bm;
2349 
2350 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2351 
2352 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2353 
2354 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2355 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2356 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2357 }
2358 
2359 /**
2360  * WWN public
2361  */
2362 static u64
2363 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2364 {
2365 	return ioc->attr->pwwn;
2366 }
2367 
2368 mac_t
2369 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2370 {
2371 	return ioc->attr->mac;
2372 }
2373 
2374 /**
2375  * Firmware failure detected. Start recovery actions.
2376  */
2377 static void
2378 bfa_ioc_recover(struct bfa_ioc *ioc)
2379 {
2380 	pr_crit("Heart Beat of IOC has failed\n");
2381 	bfa_ioc_stats(ioc, ioc_hbfails);
2382 	bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2383 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2384 }
2385 
2386 static void
2387 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2388 {
2389 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2390 		return;
2391 }
2392 
2393 /**
2394  * @dg hal_iocpf_pvt BFA IOC PF private functions
2395  * @{
2396  */
2397 
2398 static void
2399 bfa_iocpf_enable(struct bfa_ioc *ioc)
2400 {
2401 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2402 }
2403 
2404 static void
2405 bfa_iocpf_disable(struct bfa_ioc *ioc)
2406 {
2407 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2408 }
2409 
2410 static void
2411 bfa_iocpf_fail(struct bfa_ioc *ioc)
2412 {
2413 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2414 }
2415 
2416 static void
2417 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2418 {
2419 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2420 }
2421 
2422 static void
2423 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2424 {
2425 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2426 }
2427 
2428 static void
2429 bfa_iocpf_stop(struct bfa_ioc *ioc)
2430 {
2431 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2432 }
2433 
2434 void
2435 bfa_nw_iocpf_timeout(void *ioc_arg)
2436 {
2437 	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2438 	enum bfa_iocpf_state iocpf_st;
2439 
2440 	iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2441 
2442 	if (iocpf_st == BFA_IOCPF_HWINIT)
2443 		bfa_ioc_poll_fwinit(ioc);
2444 	else
2445 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2446 }
2447 
2448 void
2449 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2450 {
2451 	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
2452 
2453 	bfa_ioc_hw_sem_get(ioc);
2454 }
2455 
2456 static void
2457 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2458 {
2459 	u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2460 
2461 	if (fwstate == BFI_IOC_DISABLED) {
2462 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2463 		return;
2464 	}
2465 
2466 	if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2467 		bfa_nw_iocpf_timeout(ioc);
2468 	} else {
2469 		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2470 		mod_timer(&ioc->iocpf_timer, jiffies +
2471 			msecs_to_jiffies(BFA_IOC_POLL_TOV));
2472 	}
2473 }
2474