xref: /linux/drivers/scsi/bfa/bfa_ioc.c (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_ioc.h"
21 #include "bfi_reg.h"
22 #include "bfa_defs.h"
23 #include "bfa_defs_svc.h"
24 #include "bfi.h"
25 
26 BFA_TRC_FILE(CNA, IOC);
27 
28 /*
29  * IOC local definitions
30  */
31 #define BFA_IOC_TOV		3000	/* msecs */
32 #define BFA_IOC_HWSEM_TOV	500	/* msecs */
33 #define BFA_IOC_HB_TOV		500	/* msecs */
34 #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
35 #define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
36 
37 #define bfa_ioc_timer_start(__ioc)					\
38 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
39 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
40 #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
41 
42 #define bfa_hb_timer_start(__ioc)					\
43 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
44 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
45 #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
46 
47 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
48 
49 #define bfa_ioc_state_disabled(__sm)		\
50 	(((__sm) == BFI_IOC_UNINIT) ||		\
51 	((__sm) == BFI_IOC_INITING) ||		\
52 	((__sm) == BFI_IOC_HWINIT) ||		\
53 	((__sm) == BFI_IOC_DISABLED) ||		\
54 	((__sm) == BFI_IOC_FAIL) ||		\
55 	((__sm) == BFI_IOC_CFG_DISABLED))
56 
57 /*
58  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
59  */
60 
61 #define bfa_ioc_firmware_lock(__ioc)			\
62 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
63 #define bfa_ioc_firmware_unlock(__ioc)			\
64 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
65 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
66 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
67 #define bfa_ioc_notify_fail(__ioc)              \
68 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
69 #define bfa_ioc_sync_start(__ioc)               \
70 			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
71 #define bfa_ioc_sync_join(__ioc)                \
72 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
73 #define bfa_ioc_sync_leave(__ioc)               \
74 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
75 #define bfa_ioc_sync_ack(__ioc)                 \
76 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
77 #define bfa_ioc_sync_complete(__ioc)            \
78 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
79 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
80 			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
81 #define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
82 			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
83 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
84 		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
85 #define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
86 			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
87 
88 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
89 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
90 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
91 
92 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
93 
94 /*
95  * forward declarations
96  */
97 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
98 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
99 static void bfa_ioc_timeout(void *ioc);
100 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
101 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
102 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
103 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
104 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
105 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
106 static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
107 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
108 static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
109 				enum bfa_ioc_event_e event);
110 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
111 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
112 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
113 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
114 static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
115 				struct bfi_ioc_image_hdr_s *base_fwhdr,
116 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
117 static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
118 				struct bfa_ioc_s *ioc,
119 				struct bfi_ioc_image_hdr_s *base_fwhdr);
120 
121 /*
122  * IOC state machine definitions/declarations
123  */
124 enum ioc_event {
125 	IOC_E_RESET		= 1,	/*  IOC reset request		*/
126 	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
127 	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
128 	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
129 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
130 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
131 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
132 	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
133 	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
134 	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
135 	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
136 	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
137 };
138 
139 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
140 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
141 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
142 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
143 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
144 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
145 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
146 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
147 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
148 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
149 
150 static struct bfa_sm_table_s ioc_sm_table[] = {
151 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
152 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
153 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
154 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
155 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
156 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
157 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
158 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
159 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
160 	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
161 };
162 
163 /*
164  * IOCPF state machine definitions/declarations
165  */
166 
167 #define bfa_iocpf_timer_start(__ioc)					\
168 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
169 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
170 #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
171 
172 #define bfa_iocpf_poll_timer_start(__ioc)				\
173 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
174 			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
175 
176 #define bfa_sem_timer_start(__ioc)					\
177 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
178 			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
179 #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
180 
181 /*
182  * Forward declareations for iocpf state machine
183  */
184 static void bfa_iocpf_timeout(void *ioc_arg);
185 static void bfa_iocpf_sem_timeout(void *ioc_arg);
186 static void bfa_iocpf_poll_timeout(void *ioc_arg);
187 
188 /*
189  * IOCPF state machine events
190  */
191 enum iocpf_event {
192 	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
193 	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
194 	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
195 	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
196 	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
197 	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
198 	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
199 	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
200 	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
201 	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
202 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
203 	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
204 };
205 
206 /*
207  * IOCPF states
208  */
209 enum bfa_iocpf_state {
210 	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
211 	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
212 	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
213 	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
214 	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
215 	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
216 	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
217 	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
218 	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
219 };
220 
221 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
222 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
223 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
224 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
225 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
226 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
227 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
228 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
229 						enum iocpf_event);
230 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
231 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
232 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
233 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
234 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
235 						enum iocpf_event);
236 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
237 
238 static struct bfa_sm_table_s iocpf_sm_table[] = {
239 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
240 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
241 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
242 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
243 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
244 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
245 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
246 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
247 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
248 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
249 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
250 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
251 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
252 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
253 };
254 
255 /*
256  * IOC State Machine
257  */
258 
259 /*
260  * Beginning state. IOC uninit state.
261  */
262 
263 static void
264 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
265 {
266 }
267 
268 /*
269  * IOC is in uninit state.
270  */
271 static void
272 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
273 {
274 	bfa_trc(ioc, event);
275 
276 	switch (event) {
277 	case IOC_E_RESET:
278 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
279 		break;
280 
281 	default:
282 		bfa_sm_fault(ioc, event);
283 	}
284 }
285 /*
286  * Reset entry actions -- initialize state machine
287  */
288 static void
289 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
290 {
291 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
292 }
293 
294 /*
295  * IOC is in reset state.
296  */
297 static void
298 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
299 {
300 	bfa_trc(ioc, event);
301 
302 	switch (event) {
303 	case IOC_E_ENABLE:
304 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
305 		break;
306 
307 	case IOC_E_DISABLE:
308 		bfa_ioc_disable_comp(ioc);
309 		break;
310 
311 	case IOC_E_DETACH:
312 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
313 		break;
314 
315 	default:
316 		bfa_sm_fault(ioc, event);
317 	}
318 }
319 
320 
321 static void
322 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
323 {
324 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
325 }
326 
327 /*
328  * Host IOC function is being enabled, awaiting response from firmware.
329  * Semaphore is acquired.
330  */
331 static void
332 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
333 {
334 	bfa_trc(ioc, event);
335 
336 	switch (event) {
337 	case IOC_E_ENABLED:
338 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
339 		break;
340 
341 	case IOC_E_PFFAILED:
342 		/* !!! fall through !!! */
343 	case IOC_E_HWERROR:
344 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
345 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
346 		if (event != IOC_E_PFFAILED)
347 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
348 		break;
349 
350 	case IOC_E_HWFAILED:
351 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
352 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
353 		break;
354 
355 	case IOC_E_DISABLE:
356 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
357 		break;
358 
359 	case IOC_E_DETACH:
360 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
361 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
362 		break;
363 
364 	case IOC_E_ENABLE:
365 		break;
366 
367 	default:
368 		bfa_sm_fault(ioc, event);
369 	}
370 }
371 
372 
373 static void
374 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
375 {
376 	bfa_ioc_timer_start(ioc);
377 	bfa_ioc_send_getattr(ioc);
378 }
379 
380 /*
381  * IOC configuration in progress. Timer is active.
382  */
383 static void
384 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
385 {
386 	bfa_trc(ioc, event);
387 
388 	switch (event) {
389 	case IOC_E_FWRSP_GETATTR:
390 		bfa_ioc_timer_stop(ioc);
391 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
392 		break;
393 
394 	case IOC_E_PFFAILED:
395 	case IOC_E_HWERROR:
396 		bfa_ioc_timer_stop(ioc);
397 		/* !!! fall through !!! */
398 	case IOC_E_TIMEOUT:
399 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
400 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
401 		if (event != IOC_E_PFFAILED)
402 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
403 		break;
404 
405 	case IOC_E_DISABLE:
406 		bfa_ioc_timer_stop(ioc);
407 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
408 		break;
409 
410 	case IOC_E_ENABLE:
411 		break;
412 
413 	default:
414 		bfa_sm_fault(ioc, event);
415 	}
416 }
417 
418 static void
419 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
420 {
421 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
422 
423 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
424 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
425 	bfa_ioc_hb_monitor(ioc);
426 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
427 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
428 }
429 
430 static void
431 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
432 {
433 	bfa_trc(ioc, event);
434 
435 	switch (event) {
436 	case IOC_E_ENABLE:
437 		break;
438 
439 	case IOC_E_DISABLE:
440 		bfa_hb_timer_stop(ioc);
441 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
442 		break;
443 
444 	case IOC_E_PFFAILED:
445 	case IOC_E_HWERROR:
446 		bfa_hb_timer_stop(ioc);
447 		/* !!! fall through !!! */
448 	case IOC_E_HBFAIL:
449 		if (ioc->iocpf.auto_recover)
450 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
451 		else
452 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
453 
454 		bfa_ioc_fail_notify(ioc);
455 
456 		if (event != IOC_E_PFFAILED)
457 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
458 		break;
459 
460 	default:
461 		bfa_sm_fault(ioc, event);
462 	}
463 }
464 
465 
466 static void
467 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
468 {
469 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
470 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
471 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
472 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
473 }
474 
475 /*
476  * IOC is being disabled
477  */
478 static void
479 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
480 {
481 	bfa_trc(ioc, event);
482 
483 	switch (event) {
484 	case IOC_E_DISABLED:
485 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
486 		break;
487 
488 	case IOC_E_HWERROR:
489 		/*
490 		 * No state change.  Will move to disabled state
491 		 * after iocpf sm completes failure processing and
492 		 * moves to disabled state.
493 		 */
494 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
495 		break;
496 
497 	case IOC_E_HWFAILED:
498 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
499 		bfa_ioc_disable_comp(ioc);
500 		break;
501 
502 	default:
503 		bfa_sm_fault(ioc, event);
504 	}
505 }
506 
507 /*
508  * IOC disable completion entry.
509  */
510 static void
511 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
512 {
513 	bfa_ioc_disable_comp(ioc);
514 }
515 
516 static void
517 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
518 {
519 	bfa_trc(ioc, event);
520 
521 	switch (event) {
522 	case IOC_E_ENABLE:
523 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
524 		break;
525 
526 	case IOC_E_DISABLE:
527 		ioc->cbfn->disable_cbfn(ioc->bfa);
528 		break;
529 
530 	case IOC_E_DETACH:
531 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
532 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
533 		break;
534 
535 	default:
536 		bfa_sm_fault(ioc, event);
537 	}
538 }
539 
540 
541 static void
542 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
543 {
544 	bfa_trc(ioc, 0);
545 }
546 
547 /*
548  * Hardware initialization retry.
549  */
550 static void
551 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
552 {
553 	bfa_trc(ioc, event);
554 
555 	switch (event) {
556 	case IOC_E_ENABLED:
557 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
558 		break;
559 
560 	case IOC_E_PFFAILED:
561 	case IOC_E_HWERROR:
562 		/*
563 		 * Initialization retry failed.
564 		 */
565 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
566 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
567 		if (event != IOC_E_PFFAILED)
568 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
569 		break;
570 
571 	case IOC_E_HWFAILED:
572 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
573 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
574 		break;
575 
576 	case IOC_E_ENABLE:
577 		break;
578 
579 	case IOC_E_DISABLE:
580 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
581 		break;
582 
583 	case IOC_E_DETACH:
584 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
585 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
586 		break;
587 
588 	default:
589 		bfa_sm_fault(ioc, event);
590 	}
591 }
592 
593 
594 static void
595 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
596 {
597 	bfa_trc(ioc, 0);
598 }
599 
600 /*
601  * IOC failure.
602  */
603 static void
604 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
605 {
606 	bfa_trc(ioc, event);
607 
608 	switch (event) {
609 
610 	case IOC_E_ENABLE:
611 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
612 		break;
613 
614 	case IOC_E_DISABLE:
615 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
616 		break;
617 
618 	case IOC_E_DETACH:
619 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
620 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
621 		break;
622 
623 	case IOC_E_HWERROR:
624 	case IOC_E_HWFAILED:
625 		/*
626 		 * HB failure / HW error notification, ignore.
627 		 */
628 		break;
629 	default:
630 		bfa_sm_fault(ioc, event);
631 	}
632 }
633 
634 static void
635 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
636 {
637 	bfa_trc(ioc, 0);
638 }
639 
640 static void
641 bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
642 {
643 	bfa_trc(ioc, event);
644 
645 	switch (event) {
646 	case IOC_E_ENABLE:
647 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
648 		break;
649 
650 	case IOC_E_DISABLE:
651 		ioc->cbfn->disable_cbfn(ioc->bfa);
652 		break;
653 
654 	case IOC_E_DETACH:
655 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
656 		break;
657 
658 	case IOC_E_HWERROR:
659 		/* Ignore - already in hwfail state */
660 		break;
661 
662 	default:
663 		bfa_sm_fault(ioc, event);
664 	}
665 }
666 
667 /*
668  * IOCPF State Machine
669  */
670 
671 /*
672  * Reset entry actions -- initialize state machine
673  */
674 static void
675 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
676 {
677 	iocpf->fw_mismatch_notified = BFA_FALSE;
678 	iocpf->auto_recover = bfa_auto_recover;
679 }
680 
681 /*
682  * Beginning state. IOC is in reset state.
683  */
684 static void
685 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
686 {
687 	struct bfa_ioc_s *ioc = iocpf->ioc;
688 
689 	bfa_trc(ioc, event);
690 
691 	switch (event) {
692 	case IOCPF_E_ENABLE:
693 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
694 		break;
695 
696 	case IOCPF_E_STOP:
697 		break;
698 
699 	default:
700 		bfa_sm_fault(ioc, event);
701 	}
702 }
703 
704 /*
705  * Semaphore should be acquired for version check.
706  */
707 static void
708 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
709 {
710 	struct bfi_ioc_image_hdr_s	fwhdr;
711 	u32	r32, fwstate, pgnum, pgoff, loff = 0;
712 	int	i;
713 
714 	/*
715 	 * Spin on init semaphore to serialize.
716 	 */
717 	r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
718 	while (r32 & 0x1) {
719 		udelay(20);
720 		r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
721 	}
722 
723 	/* h/w sem init */
724 	fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
725 	if (fwstate == BFI_IOC_UNINIT) {
726 		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
727 		goto sem_get;
728 	}
729 
730 	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
731 
732 	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
733 		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
734 		goto sem_get;
735 	}
736 
737 	/*
738 	 * Clear fwver hdr
739 	 */
740 	pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
741 	pgoff = PSS_SMEM_PGOFF(loff);
742 	writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
743 
744 	for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
745 		bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
746 		loff += sizeof(u32);
747 	}
748 
749 	bfa_trc(iocpf->ioc, fwstate);
750 	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
751 	bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
752 	bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
753 
754 	/*
755 	 * Unlock the hw semaphore. Should be here only once per boot.
756 	 */
757 	bfa_ioc_ownership_reset(iocpf->ioc);
758 
759 	/*
760 	 * unlock init semaphore.
761 	 */
762 	writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
763 
764 sem_get:
765 	bfa_ioc_hw_sem_get(iocpf->ioc);
766 }
767 
768 /*
769  * Awaiting h/w semaphore to continue with version check.
770  */
771 static void
772 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
773 {
774 	struct bfa_ioc_s *ioc = iocpf->ioc;
775 
776 	bfa_trc(ioc, event);
777 
778 	switch (event) {
779 	case IOCPF_E_SEMLOCKED:
780 		if (bfa_ioc_firmware_lock(ioc)) {
781 			if (bfa_ioc_sync_start(ioc)) {
782 				bfa_ioc_sync_join(ioc);
783 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
784 			} else {
785 				bfa_ioc_firmware_unlock(ioc);
786 				writel(1, ioc->ioc_regs.ioc_sem_reg);
787 				bfa_sem_timer_start(ioc);
788 			}
789 		} else {
790 			writel(1, ioc->ioc_regs.ioc_sem_reg);
791 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
792 		}
793 		break;
794 
795 	case IOCPF_E_SEM_ERROR:
796 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
797 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
798 		break;
799 
800 	case IOCPF_E_DISABLE:
801 		bfa_sem_timer_stop(ioc);
802 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
803 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
804 		break;
805 
806 	case IOCPF_E_STOP:
807 		bfa_sem_timer_stop(ioc);
808 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
809 		break;
810 
811 	default:
812 		bfa_sm_fault(ioc, event);
813 	}
814 }
815 
816 /*
817  * Notify enable completion callback.
818  */
819 static void
820 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
821 {
822 	/*
823 	 * Call only the first time sm enters fwmismatch state.
824 	 */
825 	if (iocpf->fw_mismatch_notified == BFA_FALSE)
826 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
827 
828 	iocpf->fw_mismatch_notified = BFA_TRUE;
829 	bfa_iocpf_timer_start(iocpf->ioc);
830 }
831 
832 /*
833  * Awaiting firmware version match.
834  */
835 static void
836 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
837 {
838 	struct bfa_ioc_s *ioc = iocpf->ioc;
839 
840 	bfa_trc(ioc, event);
841 
842 	switch (event) {
843 	case IOCPF_E_TIMEOUT:
844 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
845 		break;
846 
847 	case IOCPF_E_DISABLE:
848 		bfa_iocpf_timer_stop(ioc);
849 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
850 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
851 		break;
852 
853 	case IOCPF_E_STOP:
854 		bfa_iocpf_timer_stop(ioc);
855 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
856 		break;
857 
858 	default:
859 		bfa_sm_fault(ioc, event);
860 	}
861 }
862 
863 /*
864  * Request for semaphore.
865  */
866 static void
867 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
868 {
869 	bfa_ioc_hw_sem_get(iocpf->ioc);
870 }
871 
872 /*
873  * Awaiting semaphore for h/w initialzation.
874  */
875 static void
876 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
877 {
878 	struct bfa_ioc_s *ioc = iocpf->ioc;
879 
880 	bfa_trc(ioc, event);
881 
882 	switch (event) {
883 	case IOCPF_E_SEMLOCKED:
884 		if (bfa_ioc_sync_complete(ioc)) {
885 			bfa_ioc_sync_join(ioc);
886 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
887 		} else {
888 			writel(1, ioc->ioc_regs.ioc_sem_reg);
889 			bfa_sem_timer_start(ioc);
890 		}
891 		break;
892 
893 	case IOCPF_E_SEM_ERROR:
894 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
895 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
896 		break;
897 
898 	case IOCPF_E_DISABLE:
899 		bfa_sem_timer_stop(ioc);
900 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
901 		break;
902 
903 	default:
904 		bfa_sm_fault(ioc, event);
905 	}
906 }
907 
908 static void
909 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
910 {
911 	iocpf->poll_time = 0;
912 	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
913 }
914 
915 /*
916  * Hardware is being initialized. Interrupts are enabled.
917  * Holding hardware semaphore lock.
918  */
919 static void
920 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
921 {
922 	struct bfa_ioc_s *ioc = iocpf->ioc;
923 
924 	bfa_trc(ioc, event);
925 
926 	switch (event) {
927 	case IOCPF_E_FWREADY:
928 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
929 		break;
930 
931 	case IOCPF_E_TIMEOUT:
932 		writel(1, ioc->ioc_regs.ioc_sem_reg);
933 		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
934 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
935 		break;
936 
937 	case IOCPF_E_DISABLE:
938 		bfa_iocpf_timer_stop(ioc);
939 		bfa_ioc_sync_leave(ioc);
940 		writel(1, ioc->ioc_regs.ioc_sem_reg);
941 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
942 		break;
943 
944 	default:
945 		bfa_sm_fault(ioc, event);
946 	}
947 }
948 
949 static void
950 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
951 {
952 	bfa_iocpf_timer_start(iocpf->ioc);
953 	/*
954 	 * Enable Interrupts before sending fw IOC ENABLE cmd.
955 	 */
956 	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
957 	bfa_ioc_send_enable(iocpf->ioc);
958 }
959 
960 /*
961  * Host IOC function is being enabled, awaiting response from firmware.
962  * Semaphore is acquired.
963  */
964 static void
965 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
966 {
967 	struct bfa_ioc_s *ioc = iocpf->ioc;
968 
969 	bfa_trc(ioc, event);
970 
971 	switch (event) {
972 	case IOCPF_E_FWRSP_ENABLE:
973 		bfa_iocpf_timer_stop(ioc);
974 		writel(1, ioc->ioc_regs.ioc_sem_reg);
975 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
976 		break;
977 
978 	case IOCPF_E_INITFAIL:
979 		bfa_iocpf_timer_stop(ioc);
980 		/*
981 		 * !!! fall through !!!
982 		 */
983 
984 	case IOCPF_E_TIMEOUT:
985 		writel(1, ioc->ioc_regs.ioc_sem_reg);
986 		if (event == IOCPF_E_TIMEOUT)
987 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
988 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
989 		break;
990 
991 	case IOCPF_E_DISABLE:
992 		bfa_iocpf_timer_stop(ioc);
993 		writel(1, ioc->ioc_regs.ioc_sem_reg);
994 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
995 		break;
996 
997 	default:
998 		bfa_sm_fault(ioc, event);
999 	}
1000 }
1001 
1002 static void
1003 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
1004 {
1005 	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
1006 }
1007 
1008 static void
1009 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1010 {
1011 	struct bfa_ioc_s *ioc = iocpf->ioc;
1012 
1013 	bfa_trc(ioc, event);
1014 
1015 	switch (event) {
1016 	case IOCPF_E_DISABLE:
1017 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1018 		break;
1019 
1020 	case IOCPF_E_GETATTRFAIL:
1021 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1022 		break;
1023 
1024 	case IOCPF_E_FAIL:
1025 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1026 		break;
1027 
1028 	default:
1029 		bfa_sm_fault(ioc, event);
1030 	}
1031 }
1032 
1033 static void
1034 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1035 {
1036 	bfa_iocpf_timer_start(iocpf->ioc);
1037 	bfa_ioc_send_disable(iocpf->ioc);
1038 }
1039 
1040 /*
1041  * IOC is being disabled
1042  */
1043 static void
1044 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1045 {
1046 	struct bfa_ioc_s *ioc = iocpf->ioc;
1047 
1048 	bfa_trc(ioc, event);
1049 
1050 	switch (event) {
1051 	case IOCPF_E_FWRSP_DISABLE:
1052 		bfa_iocpf_timer_stop(ioc);
1053 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1054 		break;
1055 
1056 	case IOCPF_E_FAIL:
1057 		bfa_iocpf_timer_stop(ioc);
1058 		/*
1059 		 * !!! fall through !!!
1060 		 */
1061 
1062 	case IOCPF_E_TIMEOUT:
1063 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1064 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1065 		break;
1066 
1067 	case IOCPF_E_FWRSP_ENABLE:
1068 		break;
1069 
1070 	default:
1071 		bfa_sm_fault(ioc, event);
1072 	}
1073 }
1074 
1075 static void
1076 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1077 {
1078 	bfa_ioc_hw_sem_get(iocpf->ioc);
1079 }
1080 
1081 /*
1082  * IOC hb ack request is being removed.
1083  */
1084 static void
1085 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1086 {
1087 	struct bfa_ioc_s *ioc = iocpf->ioc;
1088 
1089 	bfa_trc(ioc, event);
1090 
1091 	switch (event) {
1092 	case IOCPF_E_SEMLOCKED:
1093 		bfa_ioc_sync_leave(ioc);
1094 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1095 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1096 		break;
1097 
1098 	case IOCPF_E_SEM_ERROR:
1099 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1100 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1101 		break;
1102 
1103 	case IOCPF_E_FAIL:
1104 		break;
1105 
1106 	default:
1107 		bfa_sm_fault(ioc, event);
1108 	}
1109 }
1110 
1111 /*
1112  * IOC disable completion entry.
1113  */
1114 static void
1115 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1116 {
1117 	bfa_ioc_mbox_flush(iocpf->ioc);
1118 	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1119 }
1120 
1121 static void
1122 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1123 {
1124 	struct bfa_ioc_s *ioc = iocpf->ioc;
1125 
1126 	bfa_trc(ioc, event);
1127 
1128 	switch (event) {
1129 	case IOCPF_E_ENABLE:
1130 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1131 		break;
1132 
1133 	case IOCPF_E_STOP:
1134 		bfa_ioc_firmware_unlock(ioc);
1135 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1136 		break;
1137 
1138 	default:
1139 		bfa_sm_fault(ioc, event);
1140 	}
1141 }
1142 
1143 static void
1144 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1145 {
1146 	bfa_ioc_debug_save_ftrc(iocpf->ioc);
1147 	bfa_ioc_hw_sem_get(iocpf->ioc);
1148 }
1149 
1150 /*
1151  * Hardware initialization failed.
1152  */
1153 static void
1154 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1155 {
1156 	struct bfa_ioc_s *ioc = iocpf->ioc;
1157 
1158 	bfa_trc(ioc, event);
1159 
1160 	switch (event) {
1161 	case IOCPF_E_SEMLOCKED:
1162 		bfa_ioc_notify_fail(ioc);
1163 		bfa_ioc_sync_leave(ioc);
1164 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1165 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1166 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1167 		break;
1168 
1169 	case IOCPF_E_SEM_ERROR:
1170 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1171 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1172 		break;
1173 
1174 	case IOCPF_E_DISABLE:
1175 		bfa_sem_timer_stop(ioc);
1176 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1177 		break;
1178 
1179 	case IOCPF_E_STOP:
1180 		bfa_sem_timer_stop(ioc);
1181 		bfa_ioc_firmware_unlock(ioc);
1182 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1183 		break;
1184 
1185 	case IOCPF_E_FAIL:
1186 		break;
1187 
1188 	default:
1189 		bfa_sm_fault(ioc, event);
1190 	}
1191 }
1192 
1193 static void
1194 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1195 {
1196 	bfa_trc(iocpf->ioc, 0);
1197 }
1198 
1199 /*
1200  * Hardware initialization failed.
1201  */
1202 static void
1203 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1204 {
1205 	struct bfa_ioc_s *ioc = iocpf->ioc;
1206 
1207 	bfa_trc(ioc, event);
1208 
1209 	switch (event) {
1210 	case IOCPF_E_DISABLE:
1211 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1212 		break;
1213 
1214 	case IOCPF_E_STOP:
1215 		bfa_ioc_firmware_unlock(ioc);
1216 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1217 		break;
1218 
1219 	default:
1220 		bfa_sm_fault(ioc, event);
1221 	}
1222 }
1223 
1224 static void
1225 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1226 {
1227 	/*
1228 	 * Mark IOC as failed in hardware and stop firmware.
1229 	 */
1230 	bfa_ioc_lpu_stop(iocpf->ioc);
1231 
1232 	/*
1233 	 * Flush any queued up mailbox requests.
1234 	 */
1235 	bfa_ioc_mbox_flush(iocpf->ioc);
1236 
1237 	bfa_ioc_hw_sem_get(iocpf->ioc);
1238 }
1239 
1240 static void
1241 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1242 {
1243 	struct bfa_ioc_s *ioc = iocpf->ioc;
1244 
1245 	bfa_trc(ioc, event);
1246 
1247 	switch (event) {
1248 	case IOCPF_E_SEMLOCKED:
1249 		bfa_ioc_sync_ack(ioc);
1250 		bfa_ioc_notify_fail(ioc);
1251 		if (!iocpf->auto_recover) {
1252 			bfa_ioc_sync_leave(ioc);
1253 			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1254 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1255 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1256 		} else {
1257 			if (bfa_ioc_sync_complete(ioc))
1258 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1259 			else {
1260 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1261 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1262 			}
1263 		}
1264 		break;
1265 
1266 	case IOCPF_E_SEM_ERROR:
1267 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1268 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1269 		break;
1270 
1271 	case IOCPF_E_DISABLE:
1272 		bfa_sem_timer_stop(ioc);
1273 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1274 		break;
1275 
1276 	case IOCPF_E_FAIL:
1277 		break;
1278 
1279 	default:
1280 		bfa_sm_fault(ioc, event);
1281 	}
1282 }
1283 
1284 static void
1285 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1286 {
1287 	bfa_trc(iocpf->ioc, 0);
1288 }
1289 
1290 /*
1291  * IOC is in failed state.
1292  */
1293 static void
1294 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1295 {
1296 	struct bfa_ioc_s *ioc = iocpf->ioc;
1297 
1298 	bfa_trc(ioc, event);
1299 
1300 	switch (event) {
1301 	case IOCPF_E_DISABLE:
1302 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1303 		break;
1304 
1305 	default:
1306 		bfa_sm_fault(ioc, event);
1307 	}
1308 }
1309 
1310 /*
1311  *  BFA IOC private functions
1312  */
1313 
1314 /*
1315  * Notify common modules registered for notification.
1316  */
1317 static void
1318 bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1319 {
1320 	struct bfa_ioc_notify_s	*notify;
1321 	struct list_head	*qe;
1322 
1323 	list_for_each(qe, &ioc->notify_q) {
1324 		notify = (struct bfa_ioc_notify_s *)qe;
1325 		notify->cbfn(notify->cbarg, event);
1326 	}
1327 }
1328 
1329 static void
1330 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1331 {
1332 	ioc->cbfn->disable_cbfn(ioc->bfa);
1333 	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1334 }
1335 
1336 bfa_boolean_t
1337 bfa_ioc_sem_get(void __iomem *sem_reg)
1338 {
1339 	u32 r32;
1340 	int cnt = 0;
1341 #define BFA_SEM_SPINCNT	3000
1342 
1343 	r32 = readl(sem_reg);
1344 
1345 	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1346 		cnt++;
1347 		udelay(2);
1348 		r32 = readl(sem_reg);
1349 	}
1350 
1351 	if (!(r32 & 1))
1352 		return BFA_TRUE;
1353 
1354 	return BFA_FALSE;
1355 }
1356 
1357 static void
1358 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1359 {
1360 	u32	r32;
1361 
1362 	/*
1363 	 * First read to the semaphore register will return 0, subsequent reads
1364 	 * will return 1. Semaphore is released by writing 1 to the register
1365 	 */
1366 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1367 	if (r32 == ~0) {
1368 		WARN_ON(r32 == ~0);
1369 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1370 		return;
1371 	}
1372 	if (!(r32 & 1)) {
1373 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1374 		return;
1375 	}
1376 
1377 	bfa_sem_timer_start(ioc);
1378 }
1379 
1380 /*
1381  * Initialize LPU local memory (aka secondary memory / SRAM)
1382  */
1383 static void
1384 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1385 {
1386 	u32	pss_ctl;
1387 	int		i;
1388 #define PSS_LMEM_INIT_TIME  10000
1389 
1390 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1391 	pss_ctl &= ~__PSS_LMEM_RESET;
1392 	pss_ctl |= __PSS_LMEM_INIT_EN;
1393 
1394 	/*
1395 	 * i2c workaround 12.5khz clock
1396 	 */
1397 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1398 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1399 
1400 	/*
1401 	 * wait for memory initialization to be complete
1402 	 */
1403 	i = 0;
1404 	do {
1405 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1406 		i++;
1407 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1408 
1409 	/*
1410 	 * If memory initialization is not successful, IOC timeout will catch
1411 	 * such failures.
1412 	 */
1413 	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1414 	bfa_trc(ioc, pss_ctl);
1415 
1416 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1417 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1418 }
1419 
1420 static void
1421 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1422 {
1423 	u32	pss_ctl;
1424 
1425 	/*
1426 	 * Take processor out of reset.
1427 	 */
1428 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1429 	pss_ctl &= ~__PSS_LPU0_RESET;
1430 
1431 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1432 }
1433 
1434 static void
1435 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1436 {
1437 	u32	pss_ctl;
1438 
1439 	/*
1440 	 * Put processors in reset.
1441 	 */
1442 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1443 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1444 
1445 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1446 }
1447 
1448 /*
1449  * Get driver and firmware versions.
1450  */
1451 void
1452 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1453 {
1454 	u32	pgnum, pgoff;
1455 	u32	loff = 0;
1456 	int		i;
1457 	u32	*fwsig = (u32 *) fwhdr;
1458 
1459 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1460 	pgoff = PSS_SMEM_PGOFF(loff);
1461 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1462 
1463 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1464 	     i++) {
1465 		fwsig[i] =
1466 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1467 		loff += sizeof(u32);
1468 	}
1469 }
1470 
1471 /*
1472  * Returns TRUE if driver is willing to work with current smem f/w version.
1473  */
1474 bfa_boolean_t
1475 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1476 		struct bfi_ioc_image_hdr_s *smem_fwhdr)
1477 {
1478 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1479 	enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1480 
1481 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1482 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1483 
1484 	/*
1485 	 * If smem is incompatible or old, driver should not work with it.
1486 	 */
1487 	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1488 	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1489 		drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1490 		return BFA_FALSE;
1491 	}
1492 
1493 	/*
1494 	 * IF Flash has a better F/W than smem do not work with smem.
1495 	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1496 	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1497 	 */
1498 	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1499 
1500 	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1501 		return BFA_FALSE;
1502 	} else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1503 		return BFA_TRUE;
1504 	} else {
1505 		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1506 			BFA_TRUE : BFA_FALSE;
1507 	}
1508 }
1509 
1510 /*
1511  * Return true if current running version is valid. Firmware signature and
1512  * execution context (driver/bios) must match.
1513  */
1514 static bfa_boolean_t
1515 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1516 {
1517 	struct bfi_ioc_image_hdr_s fwhdr;
1518 
1519 	bfa_ioc_fwver_get(ioc, &fwhdr);
1520 
1521 	if (swab32(fwhdr.bootenv) != boot_env) {
1522 		bfa_trc(ioc, fwhdr.bootenv);
1523 		bfa_trc(ioc, boot_env);
1524 		return BFA_FALSE;
1525 	}
1526 
1527 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1528 }
1529 
1530 static bfa_boolean_t
1531 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1532 				struct bfi_ioc_image_hdr_s *fwhdr_2)
1533 {
1534 	int i;
1535 
1536 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1537 		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1538 			return BFA_FALSE;
1539 
1540 	return BFA_TRUE;
1541 }
1542 
1543 /*
1544  * Returns TRUE if major minor and maintainence are same.
1545  * If patch versions are same, check for MD5 Checksum to be same.
1546  */
1547 static bfa_boolean_t
1548 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1549 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1550 {
1551 	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1552 		return BFA_FALSE;
1553 
1554 	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1555 		return BFA_FALSE;
1556 
1557 	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1558 		return BFA_FALSE;
1559 
1560 	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1561 		return BFA_FALSE;
1562 
1563 	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1564 		drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1565 		drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1566 		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1567 	}
1568 
1569 	return BFA_TRUE;
1570 }
1571 
1572 static bfa_boolean_t
1573 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1574 {
1575 	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1576 		return BFA_FALSE;
1577 
1578 	return BFA_TRUE;
1579 }
1580 
1581 static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1582 {
1583 	if (fwhdr->fwver.phase == 0 &&
1584 		fwhdr->fwver.build == 0)
1585 		return BFA_TRUE;
1586 
1587 	return BFA_FALSE;
1588 }
1589 
1590 /*
1591  * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1592  */
1593 static enum bfi_ioc_img_ver_cmp_e
1594 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1595 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1596 {
1597 	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1598 		return BFI_IOC_IMG_VER_INCOMP;
1599 
1600 	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1601 		return BFI_IOC_IMG_VER_BETTER;
1602 
1603 	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1604 		return BFI_IOC_IMG_VER_OLD;
1605 
1606 	/*
1607 	 * GA takes priority over internal builds of the same patch stream.
1608 	 * At this point major minor maint and patch numbers are same.
1609 	 */
1610 
1611 	if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1612 		if (fwhdr_is_ga(fwhdr_to_cmp))
1613 			return BFI_IOC_IMG_VER_SAME;
1614 		else
1615 			return BFI_IOC_IMG_VER_OLD;
1616 	} else {
1617 		if (fwhdr_is_ga(fwhdr_to_cmp))
1618 			return BFI_IOC_IMG_VER_BETTER;
1619 	}
1620 
1621 	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1622 		return BFI_IOC_IMG_VER_BETTER;
1623 	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1624 		return BFI_IOC_IMG_VER_OLD;
1625 
1626 	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1627 		return BFI_IOC_IMG_VER_BETTER;
1628 	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1629 		return BFI_IOC_IMG_VER_OLD;
1630 
1631 	/*
1632 	 * All Version Numbers are equal.
1633 	 * Md5 check to be done as a part of compatibility check.
1634 	 */
1635 	return BFI_IOC_IMG_VER_SAME;
1636 }
1637 
1638 #define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
1639 
1640 bfa_status_t
1641 bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1642 				u32 *fwimg)
1643 {
1644 	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1645 			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1646 			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
1647 }
1648 
1649 static enum bfi_ioc_img_ver_cmp_e
1650 bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1651 			struct bfi_ioc_image_hdr_s *base_fwhdr)
1652 {
1653 	struct bfi_ioc_image_hdr_s *flash_fwhdr;
1654 	bfa_status_t status;
1655 	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1656 
1657 	status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1658 	if (status != BFA_STATUS_OK)
1659 		return BFI_IOC_IMG_VER_INCOMP;
1660 
1661 	flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1662 	if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1663 		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1664 	else
1665 		return BFI_IOC_IMG_VER_INCOMP;
1666 }
1667 
1668 
1669 /*
1670  * Invalidate fwver signature
1671  */
1672 bfa_status_t
1673 bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1674 {
1675 
1676 	u32	pgnum, pgoff;
1677 	u32	loff = 0;
1678 	enum bfi_ioc_state ioc_fwstate;
1679 
1680 	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1681 	if (!bfa_ioc_state_disabled(ioc_fwstate))
1682 		return BFA_STATUS_ADAPTER_ENABLED;
1683 
1684 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1685 	pgoff = PSS_SMEM_PGOFF(loff);
1686 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1687 	bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1688 
1689 	return BFA_STATUS_OK;
1690 }
1691 
1692 /*
1693  * Conditionally flush any pending message from firmware at start.
1694  */
1695 static void
1696 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1697 {
1698 	u32	r32;
1699 
1700 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1701 	if (r32)
1702 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1703 }
1704 
1705 static void
1706 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1707 {
1708 	enum bfi_ioc_state ioc_fwstate;
1709 	bfa_boolean_t fwvalid;
1710 	u32 boot_type;
1711 	u32 boot_env;
1712 
1713 	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1714 
1715 	if (force)
1716 		ioc_fwstate = BFI_IOC_UNINIT;
1717 
1718 	bfa_trc(ioc, ioc_fwstate);
1719 
1720 	boot_type = BFI_FWBOOT_TYPE_NORMAL;
1721 	boot_env = BFI_FWBOOT_ENV_OS;
1722 
1723 	/*
1724 	 * check if firmware is valid
1725 	 */
1726 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1727 		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1728 
1729 	if (!fwvalid) {
1730 		if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1731 			bfa_ioc_poll_fwinit(ioc);
1732 		return;
1733 	}
1734 
1735 	/*
1736 	 * If hardware initialization is in progress (initialized by other IOC),
1737 	 * just wait for an initialization completion interrupt.
1738 	 */
1739 	if (ioc_fwstate == BFI_IOC_INITING) {
1740 		bfa_ioc_poll_fwinit(ioc);
1741 		return;
1742 	}
1743 
1744 	/*
1745 	 * If IOC function is disabled and firmware version is same,
1746 	 * just re-enable IOC.
1747 	 *
1748 	 * If option rom, IOC must not be in operational state. With
1749 	 * convergence, IOC will be in operational state when 2nd driver
1750 	 * is loaded.
1751 	 */
1752 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1753 
1754 		/*
1755 		 * When using MSI-X any pending firmware ready event should
1756 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1757 		 */
1758 		bfa_ioc_msgflush(ioc);
1759 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1760 		return;
1761 	}
1762 
1763 	/*
1764 	 * Initialize the h/w for any other states.
1765 	 */
1766 	if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1767 		bfa_ioc_poll_fwinit(ioc);
1768 }
1769 
1770 static void
1771 bfa_ioc_timeout(void *ioc_arg)
1772 {
1773 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1774 
1775 	bfa_trc(ioc, 0);
1776 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1777 }
1778 
1779 void
1780 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1781 {
1782 	u32 *msgp = (u32 *) ioc_msg;
1783 	u32 i;
1784 
1785 	bfa_trc(ioc, msgp[0]);
1786 	bfa_trc(ioc, len);
1787 
1788 	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1789 
1790 	/*
1791 	 * first write msg to mailbox registers
1792 	 */
1793 	for (i = 0; i < len / sizeof(u32); i++)
1794 		writel(cpu_to_le32(msgp[i]),
1795 			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1796 
1797 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1798 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1799 
1800 	/*
1801 	 * write 1 to mailbox CMD to trigger LPU event
1802 	 */
1803 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1804 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1805 }
1806 
1807 static void
1808 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1809 {
1810 	struct bfi_ioc_ctrl_req_s enable_req;
1811 	struct timeval tv;
1812 
1813 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1814 		    bfa_ioc_portid(ioc));
1815 	enable_req.clscode = cpu_to_be16(ioc->clscode);
1816 	do_gettimeofday(&tv);
1817 	enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1818 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1819 }
1820 
1821 static void
1822 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1823 {
1824 	struct bfi_ioc_ctrl_req_s disable_req;
1825 
1826 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1827 		    bfa_ioc_portid(ioc));
1828 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1829 }
1830 
1831 static void
1832 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1833 {
1834 	struct bfi_ioc_getattr_req_s	attr_req;
1835 
1836 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1837 		    bfa_ioc_portid(ioc));
1838 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1839 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1840 }
1841 
1842 static void
1843 bfa_ioc_hb_check(void *cbarg)
1844 {
1845 	struct bfa_ioc_s  *ioc = cbarg;
1846 	u32	hb_count;
1847 
1848 	hb_count = readl(ioc->ioc_regs.heartbeat);
1849 	if (ioc->hb_count == hb_count) {
1850 		bfa_ioc_recover(ioc);
1851 		return;
1852 	} else {
1853 		ioc->hb_count = hb_count;
1854 	}
1855 
1856 	bfa_ioc_mbox_poll(ioc);
1857 	bfa_hb_timer_start(ioc);
1858 }
1859 
1860 static void
1861 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1862 {
1863 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1864 	bfa_hb_timer_start(ioc);
1865 }
1866 
1867 /*
1868  *	Initiate a full firmware download.
1869  */
1870 static bfa_status_t
1871 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1872 		    u32 boot_env)
1873 {
1874 	u32 *fwimg;
1875 	u32 pgnum, pgoff;
1876 	u32 loff = 0;
1877 	u32 chunkno = 0;
1878 	u32 i;
1879 	u32 asicmode;
1880 	u32 fwimg_size;
1881 	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1882 	bfa_status_t status;
1883 
1884 	if (boot_env == BFI_FWBOOT_ENV_OS &&
1885 		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1886 		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1887 
1888 		status = bfa_ioc_flash_img_get_chnk(ioc,
1889 			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1890 		if (status != BFA_STATUS_OK)
1891 			return status;
1892 
1893 		fwimg = fwimg_buf;
1894 	} else {
1895 		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1896 		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1897 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1898 	}
1899 
1900 	bfa_trc(ioc, fwimg_size);
1901 
1902 
1903 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1904 	pgoff = PSS_SMEM_PGOFF(loff);
1905 
1906 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1907 
1908 	for (i = 0; i < fwimg_size; i++) {
1909 
1910 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1911 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1912 
1913 			if (boot_env == BFI_FWBOOT_ENV_OS &&
1914 				boot_type == BFI_FWBOOT_TYPE_FLASH) {
1915 				status = bfa_ioc_flash_img_get_chnk(ioc,
1916 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1917 					fwimg_buf);
1918 				if (status != BFA_STATUS_OK)
1919 					return status;
1920 
1921 				fwimg = fwimg_buf;
1922 			} else {
1923 				fwimg = bfa_cb_image_get_chunk(
1924 					bfa_ioc_asic_gen(ioc),
1925 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1926 			}
1927 		}
1928 
1929 		/*
1930 		 * write smem
1931 		 */
1932 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1933 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1934 
1935 		loff += sizeof(u32);
1936 
1937 		/*
1938 		 * handle page offset wrap around
1939 		 */
1940 		loff = PSS_SMEM_PGOFF(loff);
1941 		if (loff == 0) {
1942 			pgnum++;
1943 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1944 		}
1945 	}
1946 
1947 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1948 			ioc->ioc_regs.host_page_num_fn);
1949 
1950 	/*
1951 	 * Set boot type, env and device mode at the end.
1952 	 */
1953 	if (boot_env == BFI_FWBOOT_ENV_OS &&
1954 		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1955 		boot_type = BFI_FWBOOT_TYPE_NORMAL;
1956 	}
1957 	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1958 				ioc->port0_mode, ioc->port1_mode);
1959 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1960 			swab32(asicmode));
1961 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1962 			swab32(boot_type));
1963 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1964 			swab32(boot_env));
1965 	return BFA_STATUS_OK;
1966 }
1967 
1968 
1969 /*
1970  * Update BFA configuration from firmware configuration.
1971  */
1972 static void
1973 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1974 {
1975 	struct bfi_ioc_attr_s	*attr = ioc->attr;
1976 
1977 	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1978 	attr->card_type     = be32_to_cpu(attr->card_type);
1979 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1980 	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
1981 	attr->mfg_year	= be16_to_cpu(attr->mfg_year);
1982 
1983 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1984 }
1985 
1986 /*
1987  * Attach time initialization of mbox logic.
1988  */
1989 static void
1990 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1991 {
1992 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1993 	int	mc;
1994 
1995 	INIT_LIST_HEAD(&mod->cmd_q);
1996 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1997 		mod->mbhdlr[mc].cbfn = NULL;
1998 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1999 	}
2000 }
2001 
2002 /*
2003  * Mbox poll timer -- restarts any pending mailbox requests.
2004  */
2005 static void
2006 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
2007 {
2008 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2009 	struct bfa_mbox_cmd_s		*cmd;
2010 	u32			stat;
2011 
2012 	/*
2013 	 * If no command pending, do nothing
2014 	 */
2015 	if (list_empty(&mod->cmd_q))
2016 		return;
2017 
2018 	/*
2019 	 * If previous command is not yet fetched by firmware, do nothing
2020 	 */
2021 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2022 	if (stat)
2023 		return;
2024 
2025 	/*
2026 	 * Enqueue command to firmware.
2027 	 */
2028 	bfa_q_deq(&mod->cmd_q, &cmd);
2029 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2030 }
2031 
2032 /*
2033  * Cleanup any pending requests.
2034  */
2035 static void
2036 bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2037 {
2038 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2039 	struct bfa_mbox_cmd_s		*cmd;
2040 
2041 	while (!list_empty(&mod->cmd_q))
2042 		bfa_q_deq(&mod->cmd_q, &cmd);
2043 }
2044 
2045 /*
2046  * Read data from SMEM to host through PCI memmap
2047  *
2048  * @param[in]	ioc	memory for IOC
2049  * @param[in]	tbuf	app memory to store data from smem
2050  * @param[in]	soff	smem offset
2051  * @param[in]	sz	size of smem in bytes
2052  */
2053 static bfa_status_t
2054 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2055 {
2056 	u32 pgnum, loff;
2057 	__be32 r32;
2058 	int i, len;
2059 	u32 *buf = tbuf;
2060 
2061 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2062 	loff = PSS_SMEM_PGOFF(soff);
2063 	bfa_trc(ioc, pgnum);
2064 	bfa_trc(ioc, loff);
2065 	bfa_trc(ioc, sz);
2066 
2067 	/*
2068 	 *  Hold semaphore to serialize pll init and fwtrc.
2069 	 */
2070 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2071 		bfa_trc(ioc, 0);
2072 		return BFA_STATUS_FAILED;
2073 	}
2074 
2075 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2076 
2077 	len = sz/sizeof(u32);
2078 	bfa_trc(ioc, len);
2079 	for (i = 0; i < len; i++) {
2080 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2081 		buf[i] = swab32(r32);
2082 		loff += sizeof(u32);
2083 
2084 		/*
2085 		 * handle page offset wrap around
2086 		 */
2087 		loff = PSS_SMEM_PGOFF(loff);
2088 		if (loff == 0) {
2089 			pgnum++;
2090 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2091 		}
2092 	}
2093 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2094 			ioc->ioc_regs.host_page_num_fn);
2095 	/*
2096 	 *  release semaphore.
2097 	 */
2098 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2099 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2100 
2101 	bfa_trc(ioc, pgnum);
2102 	return BFA_STATUS_OK;
2103 }
2104 
2105 /*
2106  * Clear SMEM data from host through PCI memmap
2107  *
2108  * @param[in]	ioc	memory for IOC
2109  * @param[in]	soff	smem offset
2110  * @param[in]	sz	size of smem in bytes
2111  */
2112 static bfa_status_t
2113 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2114 {
2115 	int i, len;
2116 	u32 pgnum, loff;
2117 
2118 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2119 	loff = PSS_SMEM_PGOFF(soff);
2120 	bfa_trc(ioc, pgnum);
2121 	bfa_trc(ioc, loff);
2122 	bfa_trc(ioc, sz);
2123 
2124 	/*
2125 	 *  Hold semaphore to serialize pll init and fwtrc.
2126 	 */
2127 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2128 		bfa_trc(ioc, 0);
2129 		return BFA_STATUS_FAILED;
2130 	}
2131 
2132 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2133 
2134 	len = sz/sizeof(u32); /* len in words */
2135 	bfa_trc(ioc, len);
2136 	for (i = 0; i < len; i++) {
2137 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2138 		loff += sizeof(u32);
2139 
2140 		/*
2141 		 * handle page offset wrap around
2142 		 */
2143 		loff = PSS_SMEM_PGOFF(loff);
2144 		if (loff == 0) {
2145 			pgnum++;
2146 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2147 		}
2148 	}
2149 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2150 			ioc->ioc_regs.host_page_num_fn);
2151 
2152 	/*
2153 	 *  release semaphore.
2154 	 */
2155 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2156 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2157 	bfa_trc(ioc, pgnum);
2158 	return BFA_STATUS_OK;
2159 }
2160 
2161 static void
2162 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2163 {
2164 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2165 
2166 	/*
2167 	 * Notify driver and common modules registered for notification.
2168 	 */
2169 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
2170 	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2171 
2172 	bfa_ioc_debug_save_ftrc(ioc);
2173 
2174 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2175 		"Heart Beat of IOC has failed\n");
2176 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2177 
2178 }
2179 
2180 static void
2181 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2182 {
2183 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2184 	/*
2185 	 * Provide enable completion callback.
2186 	 */
2187 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2188 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2189 		"Running firmware version is incompatible "
2190 		"with the driver version\n");
2191 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2192 }
2193 
2194 bfa_status_t
2195 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2196 {
2197 
2198 	/*
2199 	 *  Hold semaphore so that nobody can access the chip during init.
2200 	 */
2201 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2202 
2203 	bfa_ioc_pll_init_asic(ioc);
2204 
2205 	ioc->pllinit = BFA_TRUE;
2206 
2207 	/*
2208 	 * Initialize LMEM
2209 	 */
2210 	bfa_ioc_lmem_init(ioc);
2211 
2212 	/*
2213 	 *  release semaphore.
2214 	 */
2215 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2216 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2217 
2218 	return BFA_STATUS_OK;
2219 }
2220 
2221 /*
2222  * Interface used by diag module to do firmware boot with memory test
2223  * as the entry vector.
2224  */
2225 bfa_status_t
2226 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2227 {
2228 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
2229 	bfa_status_t status;
2230 	bfa_ioc_stats(ioc, ioc_boots);
2231 
2232 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2233 		return BFA_STATUS_FAILED;
2234 
2235 	if (boot_env == BFI_FWBOOT_ENV_OS &&
2236 		boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2237 
2238 		drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2239 			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2240 
2241 		/*
2242 		 * Work with Flash iff flash f/w is better than driver f/w.
2243 		 * Otherwise push drivers firmware.
2244 		 */
2245 		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2246 						BFI_IOC_IMG_VER_BETTER)
2247 			boot_type = BFI_FWBOOT_TYPE_FLASH;
2248 	}
2249 
2250 	/*
2251 	 * Initialize IOC state of all functions on a chip reset.
2252 	 */
2253 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2254 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2255 		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2256 	} else {
2257 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2258 		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2259 	}
2260 
2261 	bfa_ioc_msgflush(ioc);
2262 	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2263 	if (status == BFA_STATUS_OK)
2264 		bfa_ioc_lpu_start(ioc);
2265 	else {
2266 		WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2267 		bfa_iocpf_timeout(ioc);
2268 	}
2269 	return status;
2270 }
2271 
2272 /*
2273  * Enable/disable IOC failure auto recovery.
2274  */
2275 void
2276 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2277 {
2278 	bfa_auto_recover = auto_recover;
2279 }
2280 
2281 
2282 
2283 bfa_boolean_t
2284 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2285 {
2286 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2287 }
2288 
2289 bfa_boolean_t
2290 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2291 {
2292 	u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2293 
2294 	return ((r32 != BFI_IOC_UNINIT) &&
2295 		(r32 != BFI_IOC_INITING) &&
2296 		(r32 != BFI_IOC_MEMTEST));
2297 }
2298 
2299 bfa_boolean_t
2300 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2301 {
2302 	__be32	*msgp = mbmsg;
2303 	u32	r32;
2304 	int		i;
2305 
2306 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2307 	if ((r32 & 1) == 0)
2308 		return BFA_FALSE;
2309 
2310 	/*
2311 	 * read the MBOX msg
2312 	 */
2313 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2314 	     i++) {
2315 		r32 = readl(ioc->ioc_regs.lpu_mbox +
2316 				   i * sizeof(u32));
2317 		msgp[i] = cpu_to_be32(r32);
2318 	}
2319 
2320 	/*
2321 	 * turn off mailbox interrupt by clearing mailbox status
2322 	 */
2323 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2324 	readl(ioc->ioc_regs.lpu_mbox_cmd);
2325 
2326 	return BFA_TRUE;
2327 }
2328 
2329 void
2330 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2331 {
2332 	union bfi_ioc_i2h_msg_u	*msg;
2333 	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2334 
2335 	msg = (union bfi_ioc_i2h_msg_u *) m;
2336 
2337 	bfa_ioc_stats(ioc, ioc_isrs);
2338 
2339 	switch (msg->mh.msg_id) {
2340 	case BFI_IOC_I2H_HBEAT:
2341 		break;
2342 
2343 	case BFI_IOC_I2H_ENABLE_REPLY:
2344 		ioc->port_mode = ioc->port_mode_cfg =
2345 				(enum bfa_mode_s)msg->fw_event.port_mode;
2346 		ioc->ad_cap_bm = msg->fw_event.cap_bm;
2347 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2348 		break;
2349 
2350 	case BFI_IOC_I2H_DISABLE_REPLY:
2351 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2352 		break;
2353 
2354 	case BFI_IOC_I2H_GETATTR_REPLY:
2355 		bfa_ioc_getattr_reply(ioc);
2356 		break;
2357 
2358 	default:
2359 		bfa_trc(ioc, msg->mh.msg_id);
2360 		WARN_ON(1);
2361 	}
2362 }
2363 
2364 /*
2365  * IOC attach time initialization and setup.
2366  *
2367  * @param[in]	ioc	memory for IOC
2368  * @param[in]	bfa	driver instance structure
2369  */
2370 void
2371 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2372 	       struct bfa_timer_mod_s *timer_mod)
2373 {
2374 	ioc->bfa	= bfa;
2375 	ioc->cbfn	= cbfn;
2376 	ioc->timer_mod	= timer_mod;
2377 	ioc->fcmode	= BFA_FALSE;
2378 	ioc->pllinit	= BFA_FALSE;
2379 	ioc->dbg_fwsave_once = BFA_TRUE;
2380 	ioc->iocpf.ioc	= ioc;
2381 
2382 	bfa_ioc_mbox_attach(ioc);
2383 	INIT_LIST_HEAD(&ioc->notify_q);
2384 
2385 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2386 	bfa_fsm_send_event(ioc, IOC_E_RESET);
2387 }
2388 
2389 /*
2390  * Driver detach time IOC cleanup.
2391  */
2392 void
2393 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2394 {
2395 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2396 	INIT_LIST_HEAD(&ioc->notify_q);
2397 }
2398 
2399 /*
2400  * Setup IOC PCI properties.
2401  *
2402  * @param[in]	pcidev	PCI device information for this IOC
2403  */
2404 void
2405 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2406 		enum bfi_pcifn_class clscode)
2407 {
2408 	ioc->clscode	= clscode;
2409 	ioc->pcidev	= *pcidev;
2410 
2411 	/*
2412 	 * Initialize IOC and device personality
2413 	 */
2414 	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2415 	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2416 
2417 	switch (pcidev->device_id) {
2418 	case BFA_PCI_DEVICE_ID_FC_8G1P:
2419 	case BFA_PCI_DEVICE_ID_FC_8G2P:
2420 		ioc->asic_gen = BFI_ASIC_GEN_CB;
2421 		ioc->fcmode = BFA_TRUE;
2422 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2423 		ioc->ad_cap_bm = BFA_CM_HBA;
2424 		break;
2425 
2426 	case BFA_PCI_DEVICE_ID_CT:
2427 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2428 		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2429 		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2430 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2431 		ioc->ad_cap_bm = BFA_CM_CNA;
2432 		break;
2433 
2434 	case BFA_PCI_DEVICE_ID_CT_FC:
2435 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2436 		ioc->fcmode = BFA_TRUE;
2437 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2438 		ioc->ad_cap_bm = BFA_CM_HBA;
2439 		break;
2440 
2441 	case BFA_PCI_DEVICE_ID_CT2:
2442 	case BFA_PCI_DEVICE_ID_CT2_QUAD:
2443 		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2444 		if (clscode == BFI_PCIFN_CLASS_FC &&
2445 		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2446 			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2447 			ioc->fcmode = BFA_TRUE;
2448 			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2449 			ioc->ad_cap_bm = BFA_CM_HBA;
2450 		} else {
2451 			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2452 			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2453 			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2454 				ioc->port_mode =
2455 				ioc->port_mode_cfg = BFA_MODE_CNA;
2456 				ioc->ad_cap_bm = BFA_CM_CNA;
2457 			} else {
2458 				ioc->port_mode =
2459 				ioc->port_mode_cfg = BFA_MODE_NIC;
2460 				ioc->ad_cap_bm = BFA_CM_NIC;
2461 			}
2462 		}
2463 		break;
2464 
2465 	default:
2466 		WARN_ON(1);
2467 	}
2468 
2469 	/*
2470 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2471 	 */
2472 	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2473 		bfa_ioc_set_cb_hwif(ioc);
2474 	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2475 		bfa_ioc_set_ct_hwif(ioc);
2476 	else {
2477 		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2478 		bfa_ioc_set_ct2_hwif(ioc);
2479 		bfa_ioc_ct2_poweron(ioc);
2480 	}
2481 
2482 	bfa_ioc_map_port(ioc);
2483 	bfa_ioc_reg_init(ioc);
2484 }
2485 
2486 /*
2487  * Initialize IOC dma memory
2488  *
2489  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2490  * @param[in]	dm_pa	physical address of IOC dma memory
2491  */
2492 void
2493 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2494 {
2495 	/*
2496 	 * dma memory for firmware attribute
2497 	 */
2498 	ioc->attr_dma.kva = dm_kva;
2499 	ioc->attr_dma.pa = dm_pa;
2500 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2501 }
2502 
2503 void
2504 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2505 {
2506 	bfa_ioc_stats(ioc, ioc_enables);
2507 	ioc->dbg_fwsave_once = BFA_TRUE;
2508 
2509 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2510 }
2511 
2512 void
2513 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2514 {
2515 	bfa_ioc_stats(ioc, ioc_disables);
2516 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2517 }
2518 
2519 void
2520 bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2521 {
2522 	ioc->dbg_fwsave_once = BFA_TRUE;
2523 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2524 }
2525 
2526 /*
2527  * Initialize memory for saving firmware trace. Driver must initialize
2528  * trace memory before call bfa_ioc_enable().
2529  */
2530 void
2531 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2532 {
2533 	ioc->dbg_fwsave	    = dbg_fwsave;
2534 	ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2535 }
2536 
2537 /*
2538  * Register mailbox message handler functions
2539  *
2540  * @param[in]	ioc		IOC instance
2541  * @param[in]	mcfuncs		message class handler functions
2542  */
2543 void
2544 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2545 {
2546 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2547 	int				mc;
2548 
2549 	for (mc = 0; mc < BFI_MC_MAX; mc++)
2550 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2551 }
2552 
2553 /*
2554  * Register mailbox message handler function, to be called by common modules
2555  */
2556 void
2557 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2558 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2559 {
2560 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2561 
2562 	mod->mbhdlr[mc].cbfn	= cbfn;
2563 	mod->mbhdlr[mc].cbarg	= cbarg;
2564 }
2565 
2566 /*
2567  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2568  * Responsibility of caller to serialize
2569  *
2570  * @param[in]	ioc	IOC instance
2571  * @param[i]	cmd	Mailbox command
2572  */
2573 void
2574 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2575 {
2576 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2577 	u32			stat;
2578 
2579 	/*
2580 	 * If a previous command is pending, queue new command
2581 	 */
2582 	if (!list_empty(&mod->cmd_q)) {
2583 		list_add_tail(&cmd->qe, &mod->cmd_q);
2584 		return;
2585 	}
2586 
2587 	/*
2588 	 * If mailbox is busy, queue command for poll timer
2589 	 */
2590 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2591 	if (stat) {
2592 		list_add_tail(&cmd->qe, &mod->cmd_q);
2593 		return;
2594 	}
2595 
2596 	/*
2597 	 * mailbox is free -- queue command to firmware
2598 	 */
2599 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2600 }
2601 
2602 /*
2603  * Handle mailbox interrupts
2604  */
2605 void
2606 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2607 {
2608 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2609 	struct bfi_mbmsg_s		m;
2610 	int				mc;
2611 
2612 	if (bfa_ioc_msgget(ioc, &m)) {
2613 		/*
2614 		 * Treat IOC message class as special.
2615 		 */
2616 		mc = m.mh.msg_class;
2617 		if (mc == BFI_MC_IOC) {
2618 			bfa_ioc_isr(ioc, &m);
2619 			return;
2620 		}
2621 
2622 		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2623 			return;
2624 
2625 		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2626 	}
2627 
2628 	bfa_ioc_lpu_read_stat(ioc);
2629 
2630 	/*
2631 	 * Try to send pending mailbox commands
2632 	 */
2633 	bfa_ioc_mbox_poll(ioc);
2634 }
2635 
2636 void
2637 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2638 {
2639 	bfa_ioc_stats(ioc, ioc_hbfails);
2640 	ioc->stats.hb_count = ioc->hb_count;
2641 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2642 }
2643 
2644 /*
2645  * return true if IOC is disabled
2646  */
2647 bfa_boolean_t
2648 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2649 {
2650 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2651 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2652 }
2653 
2654 /*
2655  * return true if IOC firmware is different.
2656  */
2657 bfa_boolean_t
2658 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2659 {
2660 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2661 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2662 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2663 }
2664 
2665 /*
2666  * Check if adapter is disabled -- both IOCs should be in a disabled
2667  * state.
2668  */
2669 bfa_boolean_t
2670 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2671 {
2672 	u32	ioc_state;
2673 
2674 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2675 		return BFA_FALSE;
2676 
2677 	ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2678 	if (!bfa_ioc_state_disabled(ioc_state))
2679 		return BFA_FALSE;
2680 
2681 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2682 		ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2683 		if (!bfa_ioc_state_disabled(ioc_state))
2684 			return BFA_FALSE;
2685 	}
2686 
2687 	return BFA_TRUE;
2688 }
2689 
2690 /*
2691  * Reset IOC fwstate registers.
2692  */
2693 void
2694 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2695 {
2696 	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2697 	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2698 }
2699 
2700 #define BFA_MFG_NAME "Brocade"
2701 void
2702 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2703 			 struct bfa_adapter_attr_s *ad_attr)
2704 {
2705 	struct bfi_ioc_attr_s	*ioc_attr;
2706 
2707 	ioc_attr = ioc->attr;
2708 
2709 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2710 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2711 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2712 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2713 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2714 		      sizeof(struct bfa_mfg_vpd_s));
2715 
2716 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2717 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2718 
2719 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2720 	/* For now, model descr uses same model string */
2721 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2722 
2723 	ad_attr->card_type = ioc_attr->card_type;
2724 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2725 
2726 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2727 		ad_attr->prototype = 1;
2728 	else
2729 		ad_attr->prototype = 0;
2730 
2731 	ad_attr->pwwn = ioc->attr->pwwn;
2732 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2733 
2734 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2735 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2736 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2737 	ad_attr->asic_rev = ioc_attr->asic_rev;
2738 
2739 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2740 
2741 	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2742 	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2743 				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2744 	ad_attr->mfg_day = ioc_attr->mfg_day;
2745 	ad_attr->mfg_month = ioc_attr->mfg_month;
2746 	ad_attr->mfg_year = ioc_attr->mfg_year;
2747 	memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2748 }
2749 
2750 enum bfa_ioc_type_e
2751 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2752 {
2753 	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2754 		return BFA_IOC_TYPE_LL;
2755 
2756 	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2757 
2758 	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2759 		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2760 }
2761 
2762 void
2763 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2764 {
2765 	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2766 	memcpy((void *)serial_num,
2767 			(void *)ioc->attr->brcd_serialnum,
2768 			BFA_ADAPTER_SERIAL_NUM_LEN);
2769 }
2770 
2771 void
2772 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2773 {
2774 	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2775 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2776 }
2777 
2778 void
2779 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2780 {
2781 	WARN_ON(!chip_rev);
2782 
2783 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2784 
2785 	chip_rev[0] = 'R';
2786 	chip_rev[1] = 'e';
2787 	chip_rev[2] = 'v';
2788 	chip_rev[3] = '-';
2789 	chip_rev[4] = ioc->attr->asic_rev;
2790 	chip_rev[5] = '\0';
2791 }
2792 
2793 void
2794 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2795 {
2796 	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2797 	memcpy(optrom_ver, ioc->attr->optrom_version,
2798 		      BFA_VERSION_LEN);
2799 }
2800 
2801 void
2802 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2803 {
2804 	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2805 	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2806 }
2807 
2808 void
2809 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2810 {
2811 	struct bfi_ioc_attr_s	*ioc_attr;
2812 	u8 nports = bfa_ioc_get_nports(ioc);
2813 
2814 	WARN_ON(!model);
2815 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2816 
2817 	ioc_attr = ioc->attr;
2818 
2819 	if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2820 		(!bfa_mfg_is_mezz(ioc_attr->card_type)))
2821 		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2822 			BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2823 	else
2824 		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2825 			BFA_MFG_NAME, ioc_attr->card_type);
2826 }
2827 
2828 enum bfa_ioc_state
2829 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2830 {
2831 	enum bfa_iocpf_state iocpf_st;
2832 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2833 
2834 	if (ioc_st == BFA_IOC_ENABLING ||
2835 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2836 
2837 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2838 
2839 		switch (iocpf_st) {
2840 		case BFA_IOCPF_SEMWAIT:
2841 			ioc_st = BFA_IOC_SEMWAIT;
2842 			break;
2843 
2844 		case BFA_IOCPF_HWINIT:
2845 			ioc_st = BFA_IOC_HWINIT;
2846 			break;
2847 
2848 		case BFA_IOCPF_FWMISMATCH:
2849 			ioc_st = BFA_IOC_FWMISMATCH;
2850 			break;
2851 
2852 		case BFA_IOCPF_FAIL:
2853 			ioc_st = BFA_IOC_FAIL;
2854 			break;
2855 
2856 		case BFA_IOCPF_INITFAIL:
2857 			ioc_st = BFA_IOC_INITFAIL;
2858 			break;
2859 
2860 		default:
2861 			break;
2862 		}
2863 	}
2864 
2865 	return ioc_st;
2866 }
2867 
2868 void
2869 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2870 {
2871 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2872 
2873 	ioc_attr->state = bfa_ioc_get_state(ioc);
2874 	ioc_attr->port_id = bfa_ioc_portid(ioc);
2875 	ioc_attr->port_mode = ioc->port_mode;
2876 	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2877 	ioc_attr->cap_bm = ioc->ad_cap_bm;
2878 
2879 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2880 
2881 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2882 
2883 	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2884 	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2885 	ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2886 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2887 }
2888 
2889 mac_t
2890 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2891 {
2892 	/*
2893 	 * Check the IOC type and return the appropriate MAC
2894 	 */
2895 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2896 		return ioc->attr->fcoe_mac;
2897 	else
2898 		return ioc->attr->mac;
2899 }
2900 
2901 mac_t
2902 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2903 {
2904 	mac_t	m;
2905 
2906 	m = ioc->attr->mfg_mac;
2907 	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2908 		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2909 	else
2910 		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2911 			bfa_ioc_pcifn(ioc));
2912 
2913 	return m;
2914 }
2915 
2916 /*
2917  * Send AEN notification
2918  */
2919 void
2920 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2921 {
2922 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2923 	struct bfa_aen_entry_s	*aen_entry;
2924 	enum bfa_ioc_type_e ioc_type;
2925 
2926 	bfad_get_aen_entry(bfad, aen_entry);
2927 	if (!aen_entry)
2928 		return;
2929 
2930 	ioc_type = bfa_ioc_get_type(ioc);
2931 	switch (ioc_type) {
2932 	case BFA_IOC_TYPE_FC:
2933 		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2934 		break;
2935 	case BFA_IOC_TYPE_FCoE:
2936 		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2937 		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2938 		break;
2939 	case BFA_IOC_TYPE_LL:
2940 		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2941 		break;
2942 	default:
2943 		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2944 		break;
2945 	}
2946 
2947 	/* Send the AEN notification */
2948 	aen_entry->aen_data.ioc.ioc_type = ioc_type;
2949 	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2950 				  BFA_AEN_CAT_IOC, event);
2951 }
2952 
2953 /*
2954  * Retrieve saved firmware trace from a prior IOC failure.
2955  */
2956 bfa_status_t
2957 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2958 {
2959 	int	tlen;
2960 
2961 	if (ioc->dbg_fwsave_len == 0)
2962 		return BFA_STATUS_ENOFSAVE;
2963 
2964 	tlen = *trclen;
2965 	if (tlen > ioc->dbg_fwsave_len)
2966 		tlen = ioc->dbg_fwsave_len;
2967 
2968 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2969 	*trclen = tlen;
2970 	return BFA_STATUS_OK;
2971 }
2972 
2973 
2974 /*
2975  * Retrieve saved firmware trace from a prior IOC failure.
2976  */
2977 bfa_status_t
2978 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2979 {
2980 	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2981 	int tlen;
2982 	bfa_status_t status;
2983 
2984 	bfa_trc(ioc, *trclen);
2985 
2986 	tlen = *trclen;
2987 	if (tlen > BFA_DBG_FWTRC_LEN)
2988 		tlen = BFA_DBG_FWTRC_LEN;
2989 
2990 	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2991 	*trclen = tlen;
2992 	return status;
2993 }
2994 
2995 static void
2996 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2997 {
2998 	struct bfa_mbox_cmd_s cmd;
2999 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
3000 
3001 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
3002 		    bfa_ioc_portid(ioc));
3003 	req->clscode = cpu_to_be16(ioc->clscode);
3004 	bfa_ioc_mbox_queue(ioc, &cmd);
3005 }
3006 
3007 static void
3008 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
3009 {
3010 	u32 fwsync_iter = 1000;
3011 
3012 	bfa_ioc_send_fwsync(ioc);
3013 
3014 	/*
3015 	 * After sending a fw sync mbox command wait for it to
3016 	 * take effect.  We will not wait for a response because
3017 	 *    1. fw_sync mbox cmd doesn't have a response.
3018 	 *    2. Even if we implement that,  interrupts might not
3019 	 *	 be enabled when we call this function.
3020 	 * So, just keep checking if any mbox cmd is pending, and
3021 	 * after waiting for a reasonable amount of time, go ahead.
3022 	 * It is possible that fw has crashed and the mbox command
3023 	 * is never acknowledged.
3024 	 */
3025 	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3026 		fwsync_iter--;
3027 }
3028 
3029 /*
3030  * Dump firmware smem
3031  */
3032 bfa_status_t
3033 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3034 				u32 *offset, int *buflen)
3035 {
3036 	u32 loff;
3037 	int dlen;
3038 	bfa_status_t status;
3039 	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3040 
3041 	if (*offset >= smem_len) {
3042 		*offset = *buflen = 0;
3043 		return BFA_STATUS_EINVAL;
3044 	}
3045 
3046 	loff = *offset;
3047 	dlen = *buflen;
3048 
3049 	/*
3050 	 * First smem read, sync smem before proceeding
3051 	 * No need to sync before reading every chunk.
3052 	 */
3053 	if (loff == 0)
3054 		bfa_ioc_fwsync(ioc);
3055 
3056 	if ((loff + dlen) >= smem_len)
3057 		dlen = smem_len - loff;
3058 
3059 	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3060 
3061 	if (status != BFA_STATUS_OK) {
3062 		*offset = *buflen = 0;
3063 		return status;
3064 	}
3065 
3066 	*offset += dlen;
3067 
3068 	if (*offset >= smem_len)
3069 		*offset = 0;
3070 
3071 	*buflen = dlen;
3072 
3073 	return status;
3074 }
3075 
3076 /*
3077  * Firmware statistics
3078  */
3079 bfa_status_t
3080 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3081 {
3082 	u32 loff = BFI_IOC_FWSTATS_OFF + \
3083 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3084 	int tlen;
3085 	bfa_status_t status;
3086 
3087 	if (ioc->stats_busy) {
3088 		bfa_trc(ioc, ioc->stats_busy);
3089 		return BFA_STATUS_DEVBUSY;
3090 	}
3091 	ioc->stats_busy = BFA_TRUE;
3092 
3093 	tlen = sizeof(struct bfa_fw_stats_s);
3094 	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3095 
3096 	ioc->stats_busy = BFA_FALSE;
3097 	return status;
3098 }
3099 
3100 bfa_status_t
3101 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3102 {
3103 	u32 loff = BFI_IOC_FWSTATS_OFF + \
3104 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3105 	int tlen;
3106 	bfa_status_t status;
3107 
3108 	if (ioc->stats_busy) {
3109 		bfa_trc(ioc, ioc->stats_busy);
3110 		return BFA_STATUS_DEVBUSY;
3111 	}
3112 	ioc->stats_busy = BFA_TRUE;
3113 
3114 	tlen = sizeof(struct bfa_fw_stats_s);
3115 	status = bfa_ioc_smem_clr(ioc, loff, tlen);
3116 
3117 	ioc->stats_busy = BFA_FALSE;
3118 	return status;
3119 }
3120 
3121 /*
3122  * Save firmware trace if configured.
3123  */
3124 void
3125 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3126 {
3127 	int		tlen;
3128 
3129 	if (ioc->dbg_fwsave_once) {
3130 		ioc->dbg_fwsave_once = BFA_FALSE;
3131 		if (ioc->dbg_fwsave_len) {
3132 			tlen = ioc->dbg_fwsave_len;
3133 			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3134 		}
3135 	}
3136 }
3137 
3138 /*
3139  * Firmware failure detected. Start recovery actions.
3140  */
3141 static void
3142 bfa_ioc_recover(struct bfa_ioc_s *ioc)
3143 {
3144 	bfa_ioc_stats(ioc, ioc_hbfails);
3145 	ioc->stats.hb_count = ioc->hb_count;
3146 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3147 }
3148 
3149 /*
3150  *  BFA IOC PF private functions
3151  */
3152 static void
3153 bfa_iocpf_timeout(void *ioc_arg)
3154 {
3155 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3156 
3157 	bfa_trc(ioc, 0);
3158 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3159 }
3160 
3161 static void
3162 bfa_iocpf_sem_timeout(void *ioc_arg)
3163 {
3164 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3165 
3166 	bfa_ioc_hw_sem_get(ioc);
3167 }
3168 
3169 static void
3170 bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3171 {
3172 	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3173 
3174 	bfa_trc(ioc, fwstate);
3175 
3176 	if (fwstate == BFI_IOC_DISABLED) {
3177 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3178 		return;
3179 	}
3180 
3181 	if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3182 		bfa_iocpf_timeout(ioc);
3183 	else {
3184 		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3185 		bfa_iocpf_poll_timer_start(ioc);
3186 	}
3187 }
3188 
3189 static void
3190 bfa_iocpf_poll_timeout(void *ioc_arg)
3191 {
3192 	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3193 
3194 	bfa_ioc_poll_fwinit(ioc);
3195 }
3196 
3197 /*
3198  *  bfa timer function
3199  */
3200 void
3201 bfa_timer_beat(struct bfa_timer_mod_s *mod)
3202 {
3203 	struct list_head *qh = &mod->timer_q;
3204 	struct list_head *qe, *qe_next;
3205 	struct bfa_timer_s *elem;
3206 	struct list_head timedout_q;
3207 
3208 	INIT_LIST_HEAD(&timedout_q);
3209 
3210 	qe = bfa_q_next(qh);
3211 
3212 	while (qe != qh) {
3213 		qe_next = bfa_q_next(qe);
3214 
3215 		elem = (struct bfa_timer_s *) qe;
3216 		if (elem->timeout <= BFA_TIMER_FREQ) {
3217 			elem->timeout = 0;
3218 			list_del(&elem->qe);
3219 			list_add_tail(&elem->qe, &timedout_q);
3220 		} else {
3221 			elem->timeout -= BFA_TIMER_FREQ;
3222 		}
3223 
3224 		qe = qe_next;	/* go to next elem */
3225 	}
3226 
3227 	/*
3228 	 * Pop all the timeout entries
3229 	 */
3230 	while (!list_empty(&timedout_q)) {
3231 		bfa_q_deq(&timedout_q, &elem);
3232 		elem->timercb(elem->arg);
3233 	}
3234 }
3235 
3236 /*
3237  * Should be called with lock protection
3238  */
3239 void
3240 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3241 		    void (*timercb) (void *), void *arg, unsigned int timeout)
3242 {
3243 
3244 	WARN_ON(timercb == NULL);
3245 	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3246 
3247 	timer->timeout = timeout;
3248 	timer->timercb = timercb;
3249 	timer->arg = arg;
3250 
3251 	list_add_tail(&timer->qe, &mod->timer_q);
3252 }
3253 
3254 /*
3255  * Should be called with lock protection
3256  */
3257 void
3258 bfa_timer_stop(struct bfa_timer_s *timer)
3259 {
3260 	WARN_ON(list_empty(&timer->qe));
3261 
3262 	list_del(&timer->qe);
3263 }
3264 
3265 /*
3266  *	ASIC block related
3267  */
3268 static void
3269 bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3270 {
3271 	struct bfa_ablk_cfg_inst_s *cfg_inst;
3272 	int i, j;
3273 	u16	be16;
3274 
3275 	for (i = 0; i < BFA_ABLK_MAX; i++) {
3276 		cfg_inst = &cfg->inst[i];
3277 		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3278 			be16 = cfg_inst->pf_cfg[j].pers;
3279 			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3280 			be16 = cfg_inst->pf_cfg[j].num_qpairs;
3281 			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3282 			be16 = cfg_inst->pf_cfg[j].num_vectors;
3283 			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3284 			be16 = cfg_inst->pf_cfg[j].bw_min;
3285 			cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3286 			be16 = cfg_inst->pf_cfg[j].bw_max;
3287 			cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3288 		}
3289 	}
3290 }
3291 
3292 static void
3293 bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3294 {
3295 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3296 	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3297 	bfa_ablk_cbfn_t cbfn;
3298 
3299 	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3300 	bfa_trc(ablk->ioc, msg->mh.msg_id);
3301 
3302 	switch (msg->mh.msg_id) {
3303 	case BFI_ABLK_I2H_QUERY:
3304 		if (rsp->status == BFA_STATUS_OK) {
3305 			memcpy(ablk->cfg, ablk->dma_addr.kva,
3306 				sizeof(struct bfa_ablk_cfg_s));
3307 			bfa_ablk_config_swap(ablk->cfg);
3308 			ablk->cfg = NULL;
3309 		}
3310 		break;
3311 
3312 	case BFI_ABLK_I2H_ADPT_CONFIG:
3313 	case BFI_ABLK_I2H_PORT_CONFIG:
3314 		/* update config port mode */
3315 		ablk->ioc->port_mode_cfg = rsp->port_mode;
3316 
3317 	case BFI_ABLK_I2H_PF_DELETE:
3318 	case BFI_ABLK_I2H_PF_UPDATE:
3319 	case BFI_ABLK_I2H_OPTROM_ENABLE:
3320 	case BFI_ABLK_I2H_OPTROM_DISABLE:
3321 		/* No-op */
3322 		break;
3323 
3324 	case BFI_ABLK_I2H_PF_CREATE:
3325 		*(ablk->pcifn) = rsp->pcifn;
3326 		ablk->pcifn = NULL;
3327 		break;
3328 
3329 	default:
3330 		WARN_ON(1);
3331 	}
3332 
3333 	ablk->busy = BFA_FALSE;
3334 	if (ablk->cbfn) {
3335 		cbfn = ablk->cbfn;
3336 		ablk->cbfn = NULL;
3337 		cbfn(ablk->cbarg, rsp->status);
3338 	}
3339 }
3340 
3341 static void
3342 bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3343 {
3344 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3345 
3346 	bfa_trc(ablk->ioc, event);
3347 
3348 	switch (event) {
3349 	case BFA_IOC_E_ENABLED:
3350 		WARN_ON(ablk->busy != BFA_FALSE);
3351 		break;
3352 
3353 	case BFA_IOC_E_DISABLED:
3354 	case BFA_IOC_E_FAILED:
3355 		/* Fail any pending requests */
3356 		ablk->pcifn = NULL;
3357 		if (ablk->busy) {
3358 			if (ablk->cbfn)
3359 				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3360 			ablk->cbfn = NULL;
3361 			ablk->busy = BFA_FALSE;
3362 		}
3363 		break;
3364 
3365 	default:
3366 		WARN_ON(1);
3367 		break;
3368 	}
3369 }
3370 
3371 u32
3372 bfa_ablk_meminfo(void)
3373 {
3374 	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3375 }
3376 
3377 void
3378 bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3379 {
3380 	ablk->dma_addr.kva = dma_kva;
3381 	ablk->dma_addr.pa  = dma_pa;
3382 }
3383 
3384 void
3385 bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3386 {
3387 	ablk->ioc = ioc;
3388 
3389 	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3390 	bfa_q_qe_init(&ablk->ioc_notify);
3391 	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3392 	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3393 }
3394 
3395 bfa_status_t
3396 bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3397 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3398 {
3399 	struct bfi_ablk_h2i_query_s *m;
3400 
3401 	WARN_ON(!ablk_cfg);
3402 
3403 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3404 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3405 		return BFA_STATUS_IOC_FAILURE;
3406 	}
3407 
3408 	if (ablk->busy) {
3409 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3410 		return  BFA_STATUS_DEVBUSY;
3411 	}
3412 
3413 	ablk->cfg = ablk_cfg;
3414 	ablk->cbfn  = cbfn;
3415 	ablk->cbarg = cbarg;
3416 	ablk->busy  = BFA_TRUE;
3417 
3418 	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3419 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3420 		    bfa_ioc_portid(ablk->ioc));
3421 	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3422 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3423 
3424 	return BFA_STATUS_OK;
3425 }
3426 
3427 bfa_status_t
3428 bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3429 		u8 port, enum bfi_pcifn_class personality,
3430 		u16 bw_min, u16 bw_max,
3431 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3432 {
3433 	struct bfi_ablk_h2i_pf_req_s *m;
3434 
3435 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3436 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3437 		return BFA_STATUS_IOC_FAILURE;
3438 	}
3439 
3440 	if (ablk->busy) {
3441 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3442 		return  BFA_STATUS_DEVBUSY;
3443 	}
3444 
3445 	ablk->pcifn = pcifn;
3446 	ablk->cbfn = cbfn;
3447 	ablk->cbarg = cbarg;
3448 	ablk->busy  = BFA_TRUE;
3449 
3450 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3451 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3452 		    bfa_ioc_portid(ablk->ioc));
3453 	m->pers = cpu_to_be16((u16)personality);
3454 	m->bw_min = cpu_to_be16(bw_min);
3455 	m->bw_max = cpu_to_be16(bw_max);
3456 	m->port = port;
3457 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3458 
3459 	return BFA_STATUS_OK;
3460 }
3461 
3462 bfa_status_t
3463 bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3464 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3465 {
3466 	struct bfi_ablk_h2i_pf_req_s *m;
3467 
3468 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3469 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3470 		return BFA_STATUS_IOC_FAILURE;
3471 	}
3472 
3473 	if (ablk->busy) {
3474 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3475 		return  BFA_STATUS_DEVBUSY;
3476 	}
3477 
3478 	ablk->cbfn  = cbfn;
3479 	ablk->cbarg = cbarg;
3480 	ablk->busy  = BFA_TRUE;
3481 
3482 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3483 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3484 		    bfa_ioc_portid(ablk->ioc));
3485 	m->pcifn = (u8)pcifn;
3486 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3487 
3488 	return BFA_STATUS_OK;
3489 }
3490 
3491 bfa_status_t
3492 bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3493 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3494 {
3495 	struct bfi_ablk_h2i_cfg_req_s *m;
3496 
3497 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3498 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3499 		return BFA_STATUS_IOC_FAILURE;
3500 	}
3501 
3502 	if (ablk->busy) {
3503 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3504 		return  BFA_STATUS_DEVBUSY;
3505 	}
3506 
3507 	ablk->cbfn  = cbfn;
3508 	ablk->cbarg = cbarg;
3509 	ablk->busy  = BFA_TRUE;
3510 
3511 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3512 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3513 		    bfa_ioc_portid(ablk->ioc));
3514 	m->mode = (u8)mode;
3515 	m->max_pf = (u8)max_pf;
3516 	m->max_vf = (u8)max_vf;
3517 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3518 
3519 	return BFA_STATUS_OK;
3520 }
3521 
3522 bfa_status_t
3523 bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3524 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3525 {
3526 	struct bfi_ablk_h2i_cfg_req_s *m;
3527 
3528 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3529 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3530 		return BFA_STATUS_IOC_FAILURE;
3531 	}
3532 
3533 	if (ablk->busy) {
3534 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3535 		return  BFA_STATUS_DEVBUSY;
3536 	}
3537 
3538 	ablk->cbfn  = cbfn;
3539 	ablk->cbarg = cbarg;
3540 	ablk->busy  = BFA_TRUE;
3541 
3542 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3543 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3544 		bfa_ioc_portid(ablk->ioc));
3545 	m->port = (u8)port;
3546 	m->mode = (u8)mode;
3547 	m->max_pf = (u8)max_pf;
3548 	m->max_vf = (u8)max_vf;
3549 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3550 
3551 	return BFA_STATUS_OK;
3552 }
3553 
3554 bfa_status_t
3555 bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3556 		   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3557 {
3558 	struct bfi_ablk_h2i_pf_req_s *m;
3559 
3560 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3561 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3562 		return BFA_STATUS_IOC_FAILURE;
3563 	}
3564 
3565 	if (ablk->busy) {
3566 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3567 		return  BFA_STATUS_DEVBUSY;
3568 	}
3569 
3570 	ablk->cbfn  = cbfn;
3571 	ablk->cbarg = cbarg;
3572 	ablk->busy  = BFA_TRUE;
3573 
3574 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3575 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3576 		bfa_ioc_portid(ablk->ioc));
3577 	m->pcifn = (u8)pcifn;
3578 	m->bw_min = cpu_to_be16(bw_min);
3579 	m->bw_max = cpu_to_be16(bw_max);
3580 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3581 
3582 	return BFA_STATUS_OK;
3583 }
3584 
3585 bfa_status_t
3586 bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3587 {
3588 	struct bfi_ablk_h2i_optrom_s *m;
3589 
3590 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3591 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3592 		return BFA_STATUS_IOC_FAILURE;
3593 	}
3594 
3595 	if (ablk->busy) {
3596 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3597 		return  BFA_STATUS_DEVBUSY;
3598 	}
3599 
3600 	ablk->cbfn  = cbfn;
3601 	ablk->cbarg = cbarg;
3602 	ablk->busy  = BFA_TRUE;
3603 
3604 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3605 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3606 		bfa_ioc_portid(ablk->ioc));
3607 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3608 
3609 	return BFA_STATUS_OK;
3610 }
3611 
3612 bfa_status_t
3613 bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3614 {
3615 	struct bfi_ablk_h2i_optrom_s *m;
3616 
3617 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3618 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3619 		return BFA_STATUS_IOC_FAILURE;
3620 	}
3621 
3622 	if (ablk->busy) {
3623 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3624 		return  BFA_STATUS_DEVBUSY;
3625 	}
3626 
3627 	ablk->cbfn  = cbfn;
3628 	ablk->cbarg = cbarg;
3629 	ablk->busy  = BFA_TRUE;
3630 
3631 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3632 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3633 		bfa_ioc_portid(ablk->ioc));
3634 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3635 
3636 	return BFA_STATUS_OK;
3637 }
3638 
3639 /*
3640  *	SFP module specific
3641  */
3642 
3643 /* forward declarations */
3644 static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3645 static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3646 static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3647 				enum bfa_port_speed portspeed);
3648 
3649 static void
3650 bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3651 {
3652 	bfa_trc(sfp, sfp->lock);
3653 	if (sfp->cbfn)
3654 		sfp->cbfn(sfp->cbarg, sfp->status);
3655 	sfp->lock = 0;
3656 	sfp->cbfn = NULL;
3657 }
3658 
3659 static void
3660 bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3661 {
3662 	bfa_trc(sfp, sfp->portspeed);
3663 	if (sfp->media) {
3664 		bfa_sfp_media_get(sfp);
3665 		if (sfp->state_query_cbfn)
3666 			sfp->state_query_cbfn(sfp->state_query_cbarg,
3667 					sfp->status);
3668 			sfp->media = NULL;
3669 		}
3670 
3671 		if (sfp->portspeed) {
3672 			sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3673 			if (sfp->state_query_cbfn)
3674 				sfp->state_query_cbfn(sfp->state_query_cbarg,
3675 						sfp->status);
3676 				sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3677 		}
3678 
3679 		sfp->state_query_lock = 0;
3680 		sfp->state_query_cbfn = NULL;
3681 }
3682 
3683 /*
3684  *	IOC event handler.
3685  */
3686 static void
3687 bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3688 {
3689 	struct bfa_sfp_s *sfp = sfp_arg;
3690 
3691 	bfa_trc(sfp, event);
3692 	bfa_trc(sfp, sfp->lock);
3693 	bfa_trc(sfp, sfp->state_query_lock);
3694 
3695 	switch (event) {
3696 	case BFA_IOC_E_DISABLED:
3697 	case BFA_IOC_E_FAILED:
3698 		if (sfp->lock) {
3699 			sfp->status = BFA_STATUS_IOC_FAILURE;
3700 			bfa_cb_sfp_show(sfp);
3701 		}
3702 
3703 		if (sfp->state_query_lock) {
3704 			sfp->status = BFA_STATUS_IOC_FAILURE;
3705 			bfa_cb_sfp_state_query(sfp);
3706 		}
3707 		break;
3708 
3709 	default:
3710 		break;
3711 	}
3712 }
3713 
3714 /*
3715  * SFP's State Change Notification post to AEN
3716  */
3717 static void
3718 bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3719 {
3720 	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3721 	struct bfa_aen_entry_s  *aen_entry;
3722 	enum bfa_port_aen_event aen_evt = 0;
3723 
3724 	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3725 		      ((u64)rsp->event));
3726 
3727 	bfad_get_aen_entry(bfad, aen_entry);
3728 	if (!aen_entry)
3729 		return;
3730 
3731 	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3732 	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3733 	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3734 
3735 	switch (rsp->event) {
3736 	case BFA_SFP_SCN_INSERTED:
3737 		aen_evt = BFA_PORT_AEN_SFP_INSERT;
3738 		break;
3739 	case BFA_SFP_SCN_REMOVED:
3740 		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3741 		break;
3742 	case BFA_SFP_SCN_FAILED:
3743 		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3744 		break;
3745 	case BFA_SFP_SCN_UNSUPPORT:
3746 		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3747 		break;
3748 	case BFA_SFP_SCN_POM:
3749 		aen_evt = BFA_PORT_AEN_SFP_POM;
3750 		aen_entry->aen_data.port.level = rsp->pomlvl;
3751 		break;
3752 	default:
3753 		bfa_trc(sfp, rsp->event);
3754 		WARN_ON(1);
3755 	}
3756 
3757 	/* Send the AEN notification */
3758 	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3759 				  BFA_AEN_CAT_PORT, aen_evt);
3760 }
3761 
3762 /*
3763  *	SFP get data send
3764  */
3765 static void
3766 bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3767 {
3768 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3769 
3770 	bfa_trc(sfp, req->memtype);
3771 
3772 	/* build host command */
3773 	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3774 			bfa_ioc_portid(sfp->ioc));
3775 
3776 	/* send mbox cmd */
3777 	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3778 }
3779 
3780 /*
3781  *	SFP is valid, read sfp data
3782  */
3783 static void
3784 bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3785 {
3786 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3787 
3788 	WARN_ON(sfp->lock != 0);
3789 	bfa_trc(sfp, sfp->state);
3790 
3791 	sfp->lock = 1;
3792 	sfp->memtype = memtype;
3793 	req->memtype = memtype;
3794 
3795 	/* Setup SG list */
3796 	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3797 
3798 	bfa_sfp_getdata_send(sfp);
3799 }
3800 
3801 /*
3802  *	SFP scn handler
3803  */
3804 static void
3805 bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3806 {
3807 	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3808 
3809 	switch (rsp->event) {
3810 	case BFA_SFP_SCN_INSERTED:
3811 		sfp->state = BFA_SFP_STATE_INSERTED;
3812 		sfp->data_valid = 0;
3813 		bfa_sfp_scn_aen_post(sfp, rsp);
3814 		break;
3815 	case BFA_SFP_SCN_REMOVED:
3816 		sfp->state = BFA_SFP_STATE_REMOVED;
3817 		sfp->data_valid = 0;
3818 		bfa_sfp_scn_aen_post(sfp, rsp);
3819 		 break;
3820 	case BFA_SFP_SCN_FAILED:
3821 		sfp->state = BFA_SFP_STATE_FAILED;
3822 		sfp->data_valid = 0;
3823 		bfa_sfp_scn_aen_post(sfp, rsp);
3824 		break;
3825 	case BFA_SFP_SCN_UNSUPPORT:
3826 		sfp->state = BFA_SFP_STATE_UNSUPPORT;
3827 		bfa_sfp_scn_aen_post(sfp, rsp);
3828 		if (!sfp->lock)
3829 			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3830 		break;
3831 	case BFA_SFP_SCN_POM:
3832 		bfa_sfp_scn_aen_post(sfp, rsp);
3833 		break;
3834 	case BFA_SFP_SCN_VALID:
3835 		sfp->state = BFA_SFP_STATE_VALID;
3836 		if (!sfp->lock)
3837 			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3838 		break;
3839 	default:
3840 		bfa_trc(sfp, rsp->event);
3841 		WARN_ON(1);
3842 	}
3843 }
3844 
3845 /*
3846  * SFP show complete
3847  */
3848 static void
3849 bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3850 {
3851 	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3852 
3853 	if (!sfp->lock) {
3854 		/*
3855 		 * receiving response after ioc failure
3856 		 */
3857 		bfa_trc(sfp, sfp->lock);
3858 		return;
3859 	}
3860 
3861 	bfa_trc(sfp, rsp->status);
3862 	if (rsp->status == BFA_STATUS_OK) {
3863 		sfp->data_valid = 1;
3864 		if (sfp->state == BFA_SFP_STATE_VALID)
3865 			sfp->status = BFA_STATUS_OK;
3866 		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3867 			sfp->status = BFA_STATUS_SFP_UNSUPP;
3868 		else
3869 			bfa_trc(sfp, sfp->state);
3870 	} else {
3871 		sfp->data_valid = 0;
3872 		sfp->status = rsp->status;
3873 		/* sfpshow shouldn't change sfp state */
3874 	}
3875 
3876 	bfa_trc(sfp, sfp->memtype);
3877 	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3878 		bfa_trc(sfp, sfp->data_valid);
3879 		if (sfp->data_valid) {
3880 			u32	size = sizeof(struct sfp_mem_s);
3881 			u8 *des = (u8 *) &(sfp->sfpmem);
3882 			memcpy(des, sfp->dbuf_kva, size);
3883 		}
3884 		/*
3885 		 * Queue completion callback.
3886 		 */
3887 		bfa_cb_sfp_show(sfp);
3888 	} else
3889 		sfp->lock = 0;
3890 
3891 	bfa_trc(sfp, sfp->state_query_lock);
3892 	if (sfp->state_query_lock) {
3893 		sfp->state = rsp->state;
3894 		/* Complete callback */
3895 		bfa_cb_sfp_state_query(sfp);
3896 	}
3897 }
3898 
3899 /*
3900  *	SFP query fw sfp state
3901  */
3902 static void
3903 bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3904 {
3905 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3906 
3907 	/* Should not be doing query if not in _INIT state */
3908 	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3909 	WARN_ON(sfp->state_query_lock != 0);
3910 	bfa_trc(sfp, sfp->state);
3911 
3912 	sfp->state_query_lock = 1;
3913 	req->memtype = 0;
3914 
3915 	if (!sfp->lock)
3916 		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3917 }
3918 
3919 static void
3920 bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3921 {
3922 	enum bfa_defs_sfp_media_e *media = sfp->media;
3923 
3924 	*media = BFA_SFP_MEDIA_UNKNOWN;
3925 
3926 	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3927 		*media = BFA_SFP_MEDIA_UNSUPPORT;
3928 	else if (sfp->state == BFA_SFP_STATE_VALID) {
3929 		union sfp_xcvr_e10g_code_u e10g;
3930 		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3931 		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3932 				(sfpmem->srlid_base.xcvr[5] >> 1);
3933 
3934 		e10g.b = sfpmem->srlid_base.xcvr[0];
3935 		bfa_trc(sfp, e10g.b);
3936 		bfa_trc(sfp, xmtr_tech);
3937 		/* check fc transmitter tech */
3938 		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3939 		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3940 		    (xmtr_tech & SFP_XMTR_TECH_CA))
3941 			*media = BFA_SFP_MEDIA_CU;
3942 		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3943 			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3944 			*media = BFA_SFP_MEDIA_EL;
3945 		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3946 			 (xmtr_tech & SFP_XMTR_TECH_LC))
3947 			*media = BFA_SFP_MEDIA_LW;
3948 		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3949 			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3950 			 (xmtr_tech & SFP_XMTR_TECH_SA))
3951 			*media = BFA_SFP_MEDIA_SW;
3952 		/* Check 10G Ethernet Compilance code */
3953 		else if (e10g.r.e10g_sr)
3954 			*media = BFA_SFP_MEDIA_SW;
3955 		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3956 			*media = BFA_SFP_MEDIA_LW;
3957 		else if (e10g.r.e10g_unall)
3958 			*media = BFA_SFP_MEDIA_UNKNOWN;
3959 		else
3960 			bfa_trc(sfp, 0);
3961 	} else
3962 		bfa_trc(sfp, sfp->state);
3963 }
3964 
3965 static bfa_status_t
3966 bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3967 {
3968 	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3969 	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3970 	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3971 	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3972 
3973 	if (portspeed == BFA_PORT_SPEED_10GBPS) {
3974 		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3975 			return BFA_STATUS_OK;
3976 		else {
3977 			bfa_trc(sfp, e10g.b);
3978 			return BFA_STATUS_UNSUPP_SPEED;
3979 		}
3980 	}
3981 	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3982 	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3983 	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3984 	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3985 	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3986 		return BFA_STATUS_OK;
3987 	else {
3988 		bfa_trc(sfp, portspeed);
3989 		bfa_trc(sfp, fc3.b);
3990 		bfa_trc(sfp, e10g.b);
3991 		return BFA_STATUS_UNSUPP_SPEED;
3992 	}
3993 }
3994 
3995 /*
3996  *	SFP hmbox handler
3997  */
3998 void
3999 bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
4000 {
4001 	struct bfa_sfp_s *sfp = sfparg;
4002 
4003 	switch (msg->mh.msg_id) {
4004 	case BFI_SFP_I2H_SHOW:
4005 		bfa_sfp_show_comp(sfp, msg);
4006 		break;
4007 
4008 	case BFI_SFP_I2H_SCN:
4009 		bfa_sfp_scn(sfp, msg);
4010 		break;
4011 
4012 	default:
4013 		bfa_trc(sfp, msg->mh.msg_id);
4014 		WARN_ON(1);
4015 	}
4016 }
4017 
4018 /*
4019  *	Return DMA memory needed by sfp module.
4020  */
4021 u32
4022 bfa_sfp_meminfo(void)
4023 {
4024 	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4025 }
4026 
4027 /*
4028  *	Attach virtual and physical memory for SFP.
4029  */
4030 void
4031 bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4032 		struct bfa_trc_mod_s *trcmod)
4033 {
4034 	sfp->dev = dev;
4035 	sfp->ioc = ioc;
4036 	sfp->trcmod = trcmod;
4037 
4038 	sfp->cbfn = NULL;
4039 	sfp->cbarg = NULL;
4040 	sfp->sfpmem = NULL;
4041 	sfp->lock = 0;
4042 	sfp->data_valid = 0;
4043 	sfp->state = BFA_SFP_STATE_INIT;
4044 	sfp->state_query_lock = 0;
4045 	sfp->state_query_cbfn = NULL;
4046 	sfp->state_query_cbarg = NULL;
4047 	sfp->media = NULL;
4048 	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4049 	sfp->is_elb = BFA_FALSE;
4050 
4051 	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4052 	bfa_q_qe_init(&sfp->ioc_notify);
4053 	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4054 	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4055 }
4056 
4057 /*
4058  *	Claim Memory for SFP
4059  */
4060 void
4061 bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4062 {
4063 	sfp->dbuf_kva   = dm_kva;
4064 	sfp->dbuf_pa    = dm_pa;
4065 	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4066 
4067 	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4068 	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4069 }
4070 
4071 /*
4072  * Show SFP eeprom content
4073  *
4074  * @param[in] sfp   - bfa sfp module
4075  *
4076  * @param[out] sfpmem - sfp eeprom data
4077  *
4078  */
4079 bfa_status_t
4080 bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4081 		bfa_cb_sfp_t cbfn, void *cbarg)
4082 {
4083 
4084 	if (!bfa_ioc_is_operational(sfp->ioc)) {
4085 		bfa_trc(sfp, 0);
4086 		return BFA_STATUS_IOC_NON_OP;
4087 	}
4088 
4089 	if (sfp->lock) {
4090 		bfa_trc(sfp, 0);
4091 		return BFA_STATUS_DEVBUSY;
4092 	}
4093 
4094 	sfp->cbfn = cbfn;
4095 	sfp->cbarg = cbarg;
4096 	sfp->sfpmem = sfpmem;
4097 
4098 	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4099 	return BFA_STATUS_OK;
4100 }
4101 
4102 /*
4103  * Return SFP Media type
4104  *
4105  * @param[in] sfp   - bfa sfp module
4106  *
4107  * @param[out] media - port speed from user
4108  *
4109  */
4110 bfa_status_t
4111 bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4112 		bfa_cb_sfp_t cbfn, void *cbarg)
4113 {
4114 	if (!bfa_ioc_is_operational(sfp->ioc)) {
4115 		bfa_trc(sfp, 0);
4116 		return BFA_STATUS_IOC_NON_OP;
4117 	}
4118 
4119 	sfp->media = media;
4120 	if (sfp->state == BFA_SFP_STATE_INIT) {
4121 		if (sfp->state_query_lock) {
4122 			bfa_trc(sfp, 0);
4123 			return BFA_STATUS_DEVBUSY;
4124 		} else {
4125 			sfp->state_query_cbfn = cbfn;
4126 			sfp->state_query_cbarg = cbarg;
4127 			bfa_sfp_state_query(sfp);
4128 			return BFA_STATUS_SFP_NOT_READY;
4129 		}
4130 	}
4131 
4132 	bfa_sfp_media_get(sfp);
4133 	return BFA_STATUS_OK;
4134 }
4135 
4136 /*
4137  * Check if user set port speed is allowed by the SFP
4138  *
4139  * @param[in] sfp   - bfa sfp module
4140  * @param[in] portspeed - port speed from user
4141  *
4142  */
4143 bfa_status_t
4144 bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4145 		bfa_cb_sfp_t cbfn, void *cbarg)
4146 {
4147 	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4148 
4149 	if (!bfa_ioc_is_operational(sfp->ioc))
4150 		return BFA_STATUS_IOC_NON_OP;
4151 
4152 	/* For Mezz card, all speed is allowed */
4153 	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4154 		return BFA_STATUS_OK;
4155 
4156 	/* Check SFP state */
4157 	sfp->portspeed = portspeed;
4158 	if (sfp->state == BFA_SFP_STATE_INIT) {
4159 		if (sfp->state_query_lock) {
4160 			bfa_trc(sfp, 0);
4161 			return BFA_STATUS_DEVBUSY;
4162 		} else {
4163 			sfp->state_query_cbfn = cbfn;
4164 			sfp->state_query_cbarg = cbarg;
4165 			bfa_sfp_state_query(sfp);
4166 			return BFA_STATUS_SFP_NOT_READY;
4167 		}
4168 	}
4169 
4170 	if (sfp->state == BFA_SFP_STATE_REMOVED ||
4171 	    sfp->state == BFA_SFP_STATE_FAILED) {
4172 		bfa_trc(sfp, sfp->state);
4173 		return BFA_STATUS_NO_SFP_DEV;
4174 	}
4175 
4176 	if (sfp->state == BFA_SFP_STATE_INSERTED) {
4177 		bfa_trc(sfp, sfp->state);
4178 		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
4179 	}
4180 
4181 	/* For eloopback, all speed is allowed */
4182 	if (sfp->is_elb)
4183 		return BFA_STATUS_OK;
4184 
4185 	return bfa_sfp_speed_valid(sfp, portspeed);
4186 }
4187 
4188 /*
4189  *	Flash module specific
4190  */
4191 
4192 /*
4193  * FLASH DMA buffer should be big enough to hold both MFG block and
4194  * asic block(64k) at the same time and also should be 2k aligned to
4195  * avoid write segement to cross sector boundary.
4196  */
4197 #define BFA_FLASH_SEG_SZ	2048
4198 #define BFA_FLASH_DMA_BUF_SZ	\
4199 	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4200 
4201 static void
4202 bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4203 			int inst, int type)
4204 {
4205 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4206 	struct bfa_aen_entry_s  *aen_entry;
4207 
4208 	bfad_get_aen_entry(bfad, aen_entry);
4209 	if (!aen_entry)
4210 		return;
4211 
4212 	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4213 	aen_entry->aen_data.audit.partition_inst = inst;
4214 	aen_entry->aen_data.audit.partition_type = type;
4215 
4216 	/* Send the AEN notification */
4217 	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4218 				  BFA_AEN_CAT_AUDIT, event);
4219 }
4220 
4221 static void
4222 bfa_flash_cb(struct bfa_flash_s *flash)
4223 {
4224 	flash->op_busy = 0;
4225 	if (flash->cbfn)
4226 		flash->cbfn(flash->cbarg, flash->status);
4227 }
4228 
4229 static void
4230 bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4231 {
4232 	struct bfa_flash_s	*flash = cbarg;
4233 
4234 	bfa_trc(flash, event);
4235 	switch (event) {
4236 	case BFA_IOC_E_DISABLED:
4237 	case BFA_IOC_E_FAILED:
4238 		if (flash->op_busy) {
4239 			flash->status = BFA_STATUS_IOC_FAILURE;
4240 			flash->cbfn(flash->cbarg, flash->status);
4241 			flash->op_busy = 0;
4242 		}
4243 		break;
4244 
4245 	default:
4246 		break;
4247 	}
4248 }
4249 
4250 /*
4251  * Send flash attribute query request.
4252  *
4253  * @param[in] cbarg - callback argument
4254  */
4255 static void
4256 bfa_flash_query_send(void *cbarg)
4257 {
4258 	struct bfa_flash_s *flash = cbarg;
4259 	struct bfi_flash_query_req_s *msg =
4260 			(struct bfi_flash_query_req_s *) flash->mb.msg;
4261 
4262 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4263 		bfa_ioc_portid(flash->ioc));
4264 	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4265 		flash->dbuf_pa);
4266 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4267 }
4268 
4269 /*
4270  * Send flash write request.
4271  *
4272  * @param[in] cbarg - callback argument
4273  */
4274 static void
4275 bfa_flash_write_send(struct bfa_flash_s *flash)
4276 {
4277 	struct bfi_flash_write_req_s *msg =
4278 			(struct bfi_flash_write_req_s *) flash->mb.msg;
4279 	u32	len;
4280 
4281 	msg->type = be32_to_cpu(flash->type);
4282 	msg->instance = flash->instance;
4283 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4284 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4285 		flash->residue : BFA_FLASH_DMA_BUF_SZ;
4286 	msg->length = be32_to_cpu(len);
4287 
4288 	/* indicate if it's the last msg of the whole write operation */
4289 	msg->last = (len == flash->residue) ? 1 : 0;
4290 
4291 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4292 			bfa_ioc_portid(flash->ioc));
4293 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4294 	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4295 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4296 
4297 	flash->residue -= len;
4298 	flash->offset += len;
4299 }
4300 
4301 /*
4302  * Send flash read request.
4303  *
4304  * @param[in] cbarg - callback argument
4305  */
4306 static void
4307 bfa_flash_read_send(void *cbarg)
4308 {
4309 	struct bfa_flash_s *flash = cbarg;
4310 	struct bfi_flash_read_req_s *msg =
4311 			(struct bfi_flash_read_req_s *) flash->mb.msg;
4312 	u32	len;
4313 
4314 	msg->type = be32_to_cpu(flash->type);
4315 	msg->instance = flash->instance;
4316 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4317 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4318 			flash->residue : BFA_FLASH_DMA_BUF_SZ;
4319 	msg->length = be32_to_cpu(len);
4320 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4321 		bfa_ioc_portid(flash->ioc));
4322 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4323 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4324 }
4325 
4326 /*
4327  * Send flash erase request.
4328  *
4329  * @param[in] cbarg - callback argument
4330  */
4331 static void
4332 bfa_flash_erase_send(void *cbarg)
4333 {
4334 	struct bfa_flash_s *flash = cbarg;
4335 	struct bfi_flash_erase_req_s *msg =
4336 			(struct bfi_flash_erase_req_s *) flash->mb.msg;
4337 
4338 	msg->type = be32_to_cpu(flash->type);
4339 	msg->instance = flash->instance;
4340 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4341 			bfa_ioc_portid(flash->ioc));
4342 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4343 }
4344 
4345 /*
4346  * Process flash response messages upon receiving interrupts.
4347  *
4348  * @param[in] flasharg - flash structure
4349  * @param[in] msg - message structure
4350  */
4351 static void
4352 bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4353 {
4354 	struct bfa_flash_s *flash = flasharg;
4355 	u32	status;
4356 
4357 	union {
4358 		struct bfi_flash_query_rsp_s *query;
4359 		struct bfi_flash_erase_rsp_s *erase;
4360 		struct bfi_flash_write_rsp_s *write;
4361 		struct bfi_flash_read_rsp_s *read;
4362 		struct bfi_flash_event_s *event;
4363 		struct bfi_mbmsg_s   *msg;
4364 	} m;
4365 
4366 	m.msg = msg;
4367 	bfa_trc(flash, msg->mh.msg_id);
4368 
4369 	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4370 		/* receiving response after ioc failure */
4371 		bfa_trc(flash, 0x9999);
4372 		return;
4373 	}
4374 
4375 	switch (msg->mh.msg_id) {
4376 	case BFI_FLASH_I2H_QUERY_RSP:
4377 		status = be32_to_cpu(m.query->status);
4378 		bfa_trc(flash, status);
4379 		if (status == BFA_STATUS_OK) {
4380 			u32	i;
4381 			struct bfa_flash_attr_s *attr, *f;
4382 
4383 			attr = (struct bfa_flash_attr_s *) flash->ubuf;
4384 			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4385 			attr->status = be32_to_cpu(f->status);
4386 			attr->npart = be32_to_cpu(f->npart);
4387 			bfa_trc(flash, attr->status);
4388 			bfa_trc(flash, attr->npart);
4389 			for (i = 0; i < attr->npart; i++) {
4390 				attr->part[i].part_type =
4391 					be32_to_cpu(f->part[i].part_type);
4392 				attr->part[i].part_instance =
4393 					be32_to_cpu(f->part[i].part_instance);
4394 				attr->part[i].part_off =
4395 					be32_to_cpu(f->part[i].part_off);
4396 				attr->part[i].part_size =
4397 					be32_to_cpu(f->part[i].part_size);
4398 				attr->part[i].part_len =
4399 					be32_to_cpu(f->part[i].part_len);
4400 				attr->part[i].part_status =
4401 					be32_to_cpu(f->part[i].part_status);
4402 			}
4403 		}
4404 		flash->status = status;
4405 		bfa_flash_cb(flash);
4406 		break;
4407 	case BFI_FLASH_I2H_ERASE_RSP:
4408 		status = be32_to_cpu(m.erase->status);
4409 		bfa_trc(flash, status);
4410 		flash->status = status;
4411 		bfa_flash_cb(flash);
4412 		break;
4413 	case BFI_FLASH_I2H_WRITE_RSP:
4414 		status = be32_to_cpu(m.write->status);
4415 		bfa_trc(flash, status);
4416 		if (status != BFA_STATUS_OK || flash->residue == 0) {
4417 			flash->status = status;
4418 			bfa_flash_cb(flash);
4419 		} else {
4420 			bfa_trc(flash, flash->offset);
4421 			bfa_flash_write_send(flash);
4422 		}
4423 		break;
4424 	case BFI_FLASH_I2H_READ_RSP:
4425 		status = be32_to_cpu(m.read->status);
4426 		bfa_trc(flash, status);
4427 		if (status != BFA_STATUS_OK) {
4428 			flash->status = status;
4429 			bfa_flash_cb(flash);
4430 		} else {
4431 			u32 len = be32_to_cpu(m.read->length);
4432 			bfa_trc(flash, flash->offset);
4433 			bfa_trc(flash, len);
4434 			memcpy(flash->ubuf + flash->offset,
4435 				flash->dbuf_kva, len);
4436 			flash->residue -= len;
4437 			flash->offset += len;
4438 			if (flash->residue == 0) {
4439 				flash->status = status;
4440 				bfa_flash_cb(flash);
4441 			} else
4442 				bfa_flash_read_send(flash);
4443 		}
4444 		break;
4445 	case BFI_FLASH_I2H_BOOT_VER_RSP:
4446 		break;
4447 	case BFI_FLASH_I2H_EVENT:
4448 		status = be32_to_cpu(m.event->status);
4449 		bfa_trc(flash, status);
4450 		if (status == BFA_STATUS_BAD_FWCFG)
4451 			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4452 		else if (status == BFA_STATUS_INVALID_VENDOR) {
4453 			u32 param;
4454 			param = be32_to_cpu(m.event->param);
4455 			bfa_trc(flash, param);
4456 			bfa_ioc_aen_post(flash->ioc,
4457 				BFA_IOC_AEN_INVALID_VENDOR);
4458 		}
4459 		break;
4460 
4461 	default:
4462 		WARN_ON(1);
4463 	}
4464 }
4465 
4466 /*
4467  * Flash memory info API.
4468  *
4469  * @param[in] mincfg - minimal cfg variable
4470  */
4471 u32
4472 bfa_flash_meminfo(bfa_boolean_t mincfg)
4473 {
4474 	/* min driver doesn't need flash */
4475 	if (mincfg)
4476 		return 0;
4477 	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4478 }
4479 
4480 /*
4481  * Flash attach API.
4482  *
4483  * @param[in] flash - flash structure
4484  * @param[in] ioc  - ioc structure
4485  * @param[in] dev  - device structure
4486  * @param[in] trcmod - trace module
4487  * @param[in] logmod - log module
4488  */
4489 void
4490 bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4491 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4492 {
4493 	flash->ioc = ioc;
4494 	flash->trcmod = trcmod;
4495 	flash->cbfn = NULL;
4496 	flash->cbarg = NULL;
4497 	flash->op_busy = 0;
4498 
4499 	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4500 	bfa_q_qe_init(&flash->ioc_notify);
4501 	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4502 	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4503 
4504 	/* min driver doesn't need flash */
4505 	if (mincfg) {
4506 		flash->dbuf_kva = NULL;
4507 		flash->dbuf_pa = 0;
4508 	}
4509 }
4510 
4511 /*
4512  * Claim memory for flash
4513  *
4514  * @param[in] flash - flash structure
4515  * @param[in] dm_kva - pointer to virtual memory address
4516  * @param[in] dm_pa - physical memory address
4517  * @param[in] mincfg - minimal cfg variable
4518  */
4519 void
4520 bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4521 		bfa_boolean_t mincfg)
4522 {
4523 	if (mincfg)
4524 		return;
4525 
4526 	flash->dbuf_kva = dm_kva;
4527 	flash->dbuf_pa = dm_pa;
4528 	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4529 	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4530 	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4531 }
4532 
4533 /*
4534  * Get flash attribute.
4535  *
4536  * @param[in] flash - flash structure
4537  * @param[in] attr - flash attribute structure
4538  * @param[in] cbfn - callback function
4539  * @param[in] cbarg - callback argument
4540  *
4541  * Return status.
4542  */
4543 bfa_status_t
4544 bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4545 		bfa_cb_flash_t cbfn, void *cbarg)
4546 {
4547 	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4548 
4549 	if (!bfa_ioc_is_operational(flash->ioc))
4550 		return BFA_STATUS_IOC_NON_OP;
4551 
4552 	if (flash->op_busy) {
4553 		bfa_trc(flash, flash->op_busy);
4554 		return BFA_STATUS_DEVBUSY;
4555 	}
4556 
4557 	flash->op_busy = 1;
4558 	flash->cbfn = cbfn;
4559 	flash->cbarg = cbarg;
4560 	flash->ubuf = (u8 *) attr;
4561 	bfa_flash_query_send(flash);
4562 
4563 	return BFA_STATUS_OK;
4564 }
4565 
4566 /*
4567  * Erase flash partition.
4568  *
4569  * @param[in] flash - flash structure
4570  * @param[in] type - flash partition type
4571  * @param[in] instance - flash partition instance
4572  * @param[in] cbfn - callback function
4573  * @param[in] cbarg - callback argument
4574  *
4575  * Return status.
4576  */
4577 bfa_status_t
4578 bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4579 		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4580 {
4581 	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4582 	bfa_trc(flash, type);
4583 	bfa_trc(flash, instance);
4584 
4585 	if (!bfa_ioc_is_operational(flash->ioc))
4586 		return BFA_STATUS_IOC_NON_OP;
4587 
4588 	if (flash->op_busy) {
4589 		bfa_trc(flash, flash->op_busy);
4590 		return BFA_STATUS_DEVBUSY;
4591 	}
4592 
4593 	flash->op_busy = 1;
4594 	flash->cbfn = cbfn;
4595 	flash->cbarg = cbarg;
4596 	flash->type = type;
4597 	flash->instance = instance;
4598 
4599 	bfa_flash_erase_send(flash);
4600 	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4601 				instance, type);
4602 	return BFA_STATUS_OK;
4603 }
4604 
4605 /*
4606  * Update flash partition.
4607  *
4608  * @param[in] flash - flash structure
4609  * @param[in] type - flash partition type
4610  * @param[in] instance - flash partition instance
4611  * @param[in] buf - update data buffer
4612  * @param[in] len - data buffer length
4613  * @param[in] offset - offset relative to the partition starting address
4614  * @param[in] cbfn - callback function
4615  * @param[in] cbarg - callback argument
4616  *
4617  * Return status.
4618  */
4619 bfa_status_t
4620 bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4621 		u8 instance, void *buf, u32 len, u32 offset,
4622 		bfa_cb_flash_t cbfn, void *cbarg)
4623 {
4624 	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4625 	bfa_trc(flash, type);
4626 	bfa_trc(flash, instance);
4627 	bfa_trc(flash, len);
4628 	bfa_trc(flash, offset);
4629 
4630 	if (!bfa_ioc_is_operational(flash->ioc))
4631 		return BFA_STATUS_IOC_NON_OP;
4632 
4633 	/*
4634 	 * 'len' must be in word (4-byte) boundary
4635 	 * 'offset' must be in sector (16kb) boundary
4636 	 */
4637 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4638 		return BFA_STATUS_FLASH_BAD_LEN;
4639 
4640 	if (type == BFA_FLASH_PART_MFG)
4641 		return BFA_STATUS_EINVAL;
4642 
4643 	if (flash->op_busy) {
4644 		bfa_trc(flash, flash->op_busy);
4645 		return BFA_STATUS_DEVBUSY;
4646 	}
4647 
4648 	flash->op_busy = 1;
4649 	flash->cbfn = cbfn;
4650 	flash->cbarg = cbarg;
4651 	flash->type = type;
4652 	flash->instance = instance;
4653 	flash->residue = len;
4654 	flash->offset = 0;
4655 	flash->addr_off = offset;
4656 	flash->ubuf = buf;
4657 
4658 	bfa_flash_write_send(flash);
4659 	return BFA_STATUS_OK;
4660 }
4661 
4662 /*
4663  * Read flash partition.
4664  *
4665  * @param[in] flash - flash structure
4666  * @param[in] type - flash partition type
4667  * @param[in] instance - flash partition instance
4668  * @param[in] buf - read data buffer
4669  * @param[in] len - data buffer length
4670  * @param[in] offset - offset relative to the partition starting address
4671  * @param[in] cbfn - callback function
4672  * @param[in] cbarg - callback argument
4673  *
4674  * Return status.
4675  */
4676 bfa_status_t
4677 bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4678 		u8 instance, void *buf, u32 len, u32 offset,
4679 		bfa_cb_flash_t cbfn, void *cbarg)
4680 {
4681 	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4682 	bfa_trc(flash, type);
4683 	bfa_trc(flash, instance);
4684 	bfa_trc(flash, len);
4685 	bfa_trc(flash, offset);
4686 
4687 	if (!bfa_ioc_is_operational(flash->ioc))
4688 		return BFA_STATUS_IOC_NON_OP;
4689 
4690 	/*
4691 	 * 'len' must be in word (4-byte) boundary
4692 	 * 'offset' must be in sector (16kb) boundary
4693 	 */
4694 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4695 		return BFA_STATUS_FLASH_BAD_LEN;
4696 
4697 	if (flash->op_busy) {
4698 		bfa_trc(flash, flash->op_busy);
4699 		return BFA_STATUS_DEVBUSY;
4700 	}
4701 
4702 	flash->op_busy = 1;
4703 	flash->cbfn = cbfn;
4704 	flash->cbarg = cbarg;
4705 	flash->type = type;
4706 	flash->instance = instance;
4707 	flash->residue = len;
4708 	flash->offset = 0;
4709 	flash->addr_off = offset;
4710 	flash->ubuf = buf;
4711 	bfa_flash_read_send(flash);
4712 
4713 	return BFA_STATUS_OK;
4714 }
4715 
4716 /*
4717  *	DIAG module specific
4718  */
4719 
4720 #define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
4721 #define CT2_BFA_DIAG_MEMTEST_TOV	(9*30*1000)  /* 4.5 min */
4722 
4723 /* IOC event handler */
4724 static void
4725 bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4726 {
4727 	struct bfa_diag_s *diag = diag_arg;
4728 
4729 	bfa_trc(diag, event);
4730 	bfa_trc(diag, diag->block);
4731 	bfa_trc(diag, diag->fwping.lock);
4732 	bfa_trc(diag, diag->tsensor.lock);
4733 
4734 	switch (event) {
4735 	case BFA_IOC_E_DISABLED:
4736 	case BFA_IOC_E_FAILED:
4737 		if (diag->fwping.lock) {
4738 			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4739 			diag->fwping.cbfn(diag->fwping.cbarg,
4740 					diag->fwping.status);
4741 			diag->fwping.lock = 0;
4742 		}
4743 
4744 		if (diag->tsensor.lock) {
4745 			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4746 			diag->tsensor.cbfn(diag->tsensor.cbarg,
4747 					   diag->tsensor.status);
4748 			diag->tsensor.lock = 0;
4749 		}
4750 
4751 		if (diag->block) {
4752 			if (diag->timer_active) {
4753 				bfa_timer_stop(&diag->timer);
4754 				diag->timer_active = 0;
4755 			}
4756 
4757 			diag->status = BFA_STATUS_IOC_FAILURE;
4758 			diag->cbfn(diag->cbarg, diag->status);
4759 			diag->block = 0;
4760 		}
4761 		break;
4762 
4763 	default:
4764 		break;
4765 	}
4766 }
4767 
4768 static void
4769 bfa_diag_memtest_done(void *cbarg)
4770 {
4771 	struct bfa_diag_s *diag = cbarg;
4772 	struct bfa_ioc_s  *ioc = diag->ioc;
4773 	struct bfa_diag_memtest_result *res = diag->result;
4774 	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
4775 	u32	pgnum, pgoff, i;
4776 
4777 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4778 	pgoff = PSS_SMEM_PGOFF(loff);
4779 
4780 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4781 
4782 	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4783 			 sizeof(u32)); i++) {
4784 		/* read test result from smem */
4785 		*((u32 *) res + i) =
4786 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4787 		loff += sizeof(u32);
4788 	}
4789 
4790 	/* Reset IOC fwstates to BFI_IOC_UNINIT */
4791 	bfa_ioc_reset_fwstate(ioc);
4792 
4793 	res->status = swab32(res->status);
4794 	bfa_trc(diag, res->status);
4795 
4796 	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4797 		diag->status = BFA_STATUS_OK;
4798 	else {
4799 		diag->status = BFA_STATUS_MEMTEST_FAILED;
4800 		res->addr = swab32(res->addr);
4801 		res->exp = swab32(res->exp);
4802 		res->act = swab32(res->act);
4803 		res->err_status = swab32(res->err_status);
4804 		res->err_status1 = swab32(res->err_status1);
4805 		res->err_addr = swab32(res->err_addr);
4806 		bfa_trc(diag, res->addr);
4807 		bfa_trc(diag, res->exp);
4808 		bfa_trc(diag, res->act);
4809 		bfa_trc(diag, res->err_status);
4810 		bfa_trc(diag, res->err_status1);
4811 		bfa_trc(diag, res->err_addr);
4812 	}
4813 	diag->timer_active = 0;
4814 	diag->cbfn(diag->cbarg, diag->status);
4815 	diag->block = 0;
4816 }
4817 
4818 /*
4819  * Firmware ping
4820  */
4821 
4822 /*
4823  * Perform DMA test directly
4824  */
4825 static void
4826 diag_fwping_send(struct bfa_diag_s *diag)
4827 {
4828 	struct bfi_diag_fwping_req_s *fwping_req;
4829 	u32	i;
4830 
4831 	bfa_trc(diag, diag->fwping.dbuf_pa);
4832 
4833 	/* fill DMA area with pattern */
4834 	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4835 		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4836 
4837 	/* Fill mbox msg */
4838 	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4839 
4840 	/* Setup SG list */
4841 	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4842 			diag->fwping.dbuf_pa);
4843 	/* Set up dma count */
4844 	fwping_req->count = cpu_to_be32(diag->fwping.count);
4845 	/* Set up data pattern */
4846 	fwping_req->data = diag->fwping.data;
4847 
4848 	/* build host command */
4849 	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4850 		bfa_ioc_portid(diag->ioc));
4851 
4852 	/* send mbox cmd */
4853 	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4854 }
4855 
4856 static void
4857 diag_fwping_comp(struct bfa_diag_s *diag,
4858 		 struct bfi_diag_fwping_rsp_s *diag_rsp)
4859 {
4860 	u32	rsp_data = diag_rsp->data;
4861 	u8	rsp_dma_status = diag_rsp->dma_status;
4862 
4863 	bfa_trc(diag, rsp_data);
4864 	bfa_trc(diag, rsp_dma_status);
4865 
4866 	if (rsp_dma_status == BFA_STATUS_OK) {
4867 		u32	i, pat;
4868 		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4869 			diag->fwping.data;
4870 		/* Check mbox data */
4871 		if (diag->fwping.data != rsp_data) {
4872 			bfa_trc(diag, rsp_data);
4873 			diag->fwping.result->dmastatus =
4874 					BFA_STATUS_DATACORRUPTED;
4875 			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4876 			diag->fwping.cbfn(diag->fwping.cbarg,
4877 					diag->fwping.status);
4878 			diag->fwping.lock = 0;
4879 			return;
4880 		}
4881 		/* Check dma pattern */
4882 		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4883 			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4884 				bfa_trc(diag, i);
4885 				bfa_trc(diag, pat);
4886 				bfa_trc(diag,
4887 					*((u32 *)diag->fwping.dbuf_kva + i));
4888 				diag->fwping.result->dmastatus =
4889 						BFA_STATUS_DATACORRUPTED;
4890 				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4891 				diag->fwping.cbfn(diag->fwping.cbarg,
4892 						diag->fwping.status);
4893 				diag->fwping.lock = 0;
4894 				return;
4895 			}
4896 		}
4897 		diag->fwping.result->dmastatus = BFA_STATUS_OK;
4898 		diag->fwping.status = BFA_STATUS_OK;
4899 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4900 		diag->fwping.lock = 0;
4901 	} else {
4902 		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4903 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4904 		diag->fwping.lock = 0;
4905 	}
4906 }
4907 
4908 /*
4909  * Temperature Sensor
4910  */
4911 
4912 static void
4913 diag_tempsensor_send(struct bfa_diag_s *diag)
4914 {
4915 	struct bfi_diag_ts_req_s *msg;
4916 
4917 	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4918 	bfa_trc(diag, msg->temp);
4919 	/* build host command */
4920 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4921 		bfa_ioc_portid(diag->ioc));
4922 	/* send mbox cmd */
4923 	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4924 }
4925 
4926 static void
4927 diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4928 {
4929 	if (!diag->tsensor.lock) {
4930 		/* receiving response after ioc failure */
4931 		bfa_trc(diag, diag->tsensor.lock);
4932 		return;
4933 	}
4934 
4935 	/*
4936 	 * ASIC junction tempsensor is a reg read operation
4937 	 * it will always return OK
4938 	 */
4939 	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4940 	diag->tsensor.temp->ts_junc = rsp->ts_junc;
4941 	diag->tsensor.temp->ts_brd = rsp->ts_brd;
4942 
4943 	if (rsp->ts_brd) {
4944 		/* tsensor.temp->status is brd_temp status */
4945 		diag->tsensor.temp->status = rsp->status;
4946 		if (rsp->status == BFA_STATUS_OK) {
4947 			diag->tsensor.temp->brd_temp =
4948 				be16_to_cpu(rsp->brd_temp);
4949 		} else
4950 			diag->tsensor.temp->brd_temp = 0;
4951 	}
4952 
4953 	bfa_trc(diag, rsp->status);
4954 	bfa_trc(diag, rsp->ts_junc);
4955 	bfa_trc(diag, rsp->temp);
4956 	bfa_trc(diag, rsp->ts_brd);
4957 	bfa_trc(diag, rsp->brd_temp);
4958 
4959 	/* tsensor status is always good bcos we always have junction temp */
4960 	diag->tsensor.status = BFA_STATUS_OK;
4961 	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4962 	diag->tsensor.lock = 0;
4963 }
4964 
4965 /*
4966  *	LED Test command
4967  */
4968 static void
4969 diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4970 {
4971 	struct bfi_diag_ledtest_req_s  *msg;
4972 
4973 	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4974 	/* build host command */
4975 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4976 			bfa_ioc_portid(diag->ioc));
4977 
4978 	/*
4979 	 * convert the freq from N blinks per 10 sec to
4980 	 * crossbow ontime value. We do it here because division is need
4981 	 */
4982 	if (ledtest->freq)
4983 		ledtest->freq = 500 / ledtest->freq;
4984 
4985 	if (ledtest->freq == 0)
4986 		ledtest->freq = 1;
4987 
4988 	bfa_trc(diag, ledtest->freq);
4989 	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4990 	msg->cmd = (u8) ledtest->cmd;
4991 	msg->color = (u8) ledtest->color;
4992 	msg->portid = bfa_ioc_portid(diag->ioc);
4993 	msg->led = ledtest->led;
4994 	msg->freq = cpu_to_be16(ledtest->freq);
4995 
4996 	/* send mbox cmd */
4997 	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4998 }
4999 
5000 static void
5001 diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
5002 {
5003 	bfa_trc(diag, diag->ledtest.lock);
5004 	diag->ledtest.lock = BFA_FALSE;
5005 	/* no bfa_cb_queue is needed because driver is not waiting */
5006 }
5007 
5008 /*
5009  * Port beaconing
5010  */
5011 static void
5012 diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
5013 {
5014 	struct bfi_diag_portbeacon_req_s *msg;
5015 
5016 	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
5017 	/* build host command */
5018 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
5019 		bfa_ioc_portid(diag->ioc));
5020 	msg->beacon = beacon;
5021 	msg->period = cpu_to_be32(sec);
5022 	/* send mbox cmd */
5023 	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
5024 }
5025 
5026 static void
5027 diag_portbeacon_comp(struct bfa_diag_s *diag)
5028 {
5029 	bfa_trc(diag, diag->beacon.state);
5030 	diag->beacon.state = BFA_FALSE;
5031 	if (diag->cbfn_beacon)
5032 		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
5033 }
5034 
5035 /*
5036  *	Diag hmbox handler
5037  */
5038 void
5039 bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5040 {
5041 	struct bfa_diag_s *diag = diagarg;
5042 
5043 	switch (msg->mh.msg_id) {
5044 	case BFI_DIAG_I2H_PORTBEACON:
5045 		diag_portbeacon_comp(diag);
5046 		break;
5047 	case BFI_DIAG_I2H_FWPING:
5048 		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5049 		break;
5050 	case BFI_DIAG_I2H_TEMPSENSOR:
5051 		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5052 		break;
5053 	case BFI_DIAG_I2H_LEDTEST:
5054 		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5055 		break;
5056 	default:
5057 		bfa_trc(diag, msg->mh.msg_id);
5058 		WARN_ON(1);
5059 	}
5060 }
5061 
5062 /*
5063  * Gen RAM Test
5064  *
5065  *   @param[in] *diag           - diag data struct
5066  *   @param[in] *memtest        - mem test params input from upper layer,
5067  *   @param[in] pattern         - mem test pattern
5068  *   @param[in] *result         - mem test result
5069  *   @param[in] cbfn            - mem test callback functioin
5070  *   @param[in] cbarg           - callback functioin arg
5071  *
5072  *   @param[out]
5073  */
5074 bfa_status_t
5075 bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5076 		u32 pattern, struct bfa_diag_memtest_result *result,
5077 		bfa_cb_diag_t cbfn, void *cbarg)
5078 {
5079 	u32	memtest_tov;
5080 
5081 	bfa_trc(diag, pattern);
5082 
5083 	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5084 		return BFA_STATUS_ADAPTER_ENABLED;
5085 
5086 	/* check to see if there is another destructive diag cmd running */
5087 	if (diag->block) {
5088 		bfa_trc(diag, diag->block);
5089 		return BFA_STATUS_DEVBUSY;
5090 	} else
5091 		diag->block = 1;
5092 
5093 	diag->result = result;
5094 	diag->cbfn = cbfn;
5095 	diag->cbarg = cbarg;
5096 
5097 	/* download memtest code and take LPU0 out of reset */
5098 	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5099 
5100 	memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5101 		       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5102 	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5103 			bfa_diag_memtest_done, diag, memtest_tov);
5104 	diag->timer_active = 1;
5105 	return BFA_STATUS_OK;
5106 }
5107 
5108 /*
5109  * DIAG firmware ping command
5110  *
5111  *   @param[in] *diag           - diag data struct
5112  *   @param[in] cnt             - dma loop count for testing PCIE
5113  *   @param[in] data            - data pattern to pass in fw
5114  *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
5115  *   @param[in] cbfn            - callback function
5116  *   @param[in] *cbarg          - callback functioin arg
5117  *
5118  *   @param[out]
5119  */
5120 bfa_status_t
5121 bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5122 		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5123 		void *cbarg)
5124 {
5125 	bfa_trc(diag, cnt);
5126 	bfa_trc(diag, data);
5127 
5128 	if (!bfa_ioc_is_operational(diag->ioc))
5129 		return BFA_STATUS_IOC_NON_OP;
5130 
5131 	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5132 	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5133 		return BFA_STATUS_CMD_NOTSUPP;
5134 
5135 	/* check to see if there is another destructive diag cmd running */
5136 	if (diag->block || diag->fwping.lock) {
5137 		bfa_trc(diag, diag->block);
5138 		bfa_trc(diag, diag->fwping.lock);
5139 		return BFA_STATUS_DEVBUSY;
5140 	}
5141 
5142 	/* Initialization */
5143 	diag->fwping.lock = 1;
5144 	diag->fwping.cbfn = cbfn;
5145 	diag->fwping.cbarg = cbarg;
5146 	diag->fwping.result = result;
5147 	diag->fwping.data = data;
5148 	diag->fwping.count = cnt;
5149 
5150 	/* Init test results */
5151 	diag->fwping.result->data = 0;
5152 	diag->fwping.result->status = BFA_STATUS_OK;
5153 
5154 	/* kick off the first ping */
5155 	diag_fwping_send(diag);
5156 	return BFA_STATUS_OK;
5157 }
5158 
5159 /*
5160  * Read Temperature Sensor
5161  *
5162  *   @param[in] *diag           - diag data struct
5163  *   @param[in] *result         - pt to bfa_diag_temp_t data struct
5164  *   @param[in] cbfn            - callback function
5165  *   @param[in] *cbarg          - callback functioin arg
5166  *
5167  *   @param[out]
5168  */
5169 bfa_status_t
5170 bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5171 		struct bfa_diag_results_tempsensor_s *result,
5172 		bfa_cb_diag_t cbfn, void *cbarg)
5173 {
5174 	/* check to see if there is a destructive diag cmd running */
5175 	if (diag->block || diag->tsensor.lock) {
5176 		bfa_trc(diag, diag->block);
5177 		bfa_trc(diag, diag->tsensor.lock);
5178 		return BFA_STATUS_DEVBUSY;
5179 	}
5180 
5181 	if (!bfa_ioc_is_operational(diag->ioc))
5182 		return BFA_STATUS_IOC_NON_OP;
5183 
5184 	/* Init diag mod params */
5185 	diag->tsensor.lock = 1;
5186 	diag->tsensor.temp = result;
5187 	diag->tsensor.cbfn = cbfn;
5188 	diag->tsensor.cbarg = cbarg;
5189 	diag->tsensor.status = BFA_STATUS_OK;
5190 
5191 	/* Send msg to fw */
5192 	diag_tempsensor_send(diag);
5193 
5194 	return BFA_STATUS_OK;
5195 }
5196 
5197 /*
5198  * LED Test command
5199  *
5200  *   @param[in] *diag           - diag data struct
5201  *   @param[in] *ledtest        - pt to ledtest data structure
5202  *
5203  *   @param[out]
5204  */
5205 bfa_status_t
5206 bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5207 {
5208 	bfa_trc(diag, ledtest->cmd);
5209 
5210 	if (!bfa_ioc_is_operational(diag->ioc))
5211 		return BFA_STATUS_IOC_NON_OP;
5212 
5213 	if (diag->beacon.state)
5214 		return BFA_STATUS_BEACON_ON;
5215 
5216 	if (diag->ledtest.lock)
5217 		return BFA_STATUS_LEDTEST_OP;
5218 
5219 	/* Send msg to fw */
5220 	diag->ledtest.lock = BFA_TRUE;
5221 	diag_ledtest_send(diag, ledtest);
5222 
5223 	return BFA_STATUS_OK;
5224 }
5225 
5226 /*
5227  * Port beaconing command
5228  *
5229  *   @param[in] *diag           - diag data struct
5230  *   @param[in] beacon          - port beaconing 1:ON   0:OFF
5231  *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
5232  *   @param[in] sec             - beaconing duration in seconds
5233  *
5234  *   @param[out]
5235  */
5236 bfa_status_t
5237 bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5238 		bfa_boolean_t link_e2e_beacon, uint32_t sec)
5239 {
5240 	bfa_trc(diag, beacon);
5241 	bfa_trc(diag, link_e2e_beacon);
5242 	bfa_trc(diag, sec);
5243 
5244 	if (!bfa_ioc_is_operational(diag->ioc))
5245 		return BFA_STATUS_IOC_NON_OP;
5246 
5247 	if (diag->ledtest.lock)
5248 		return BFA_STATUS_LEDTEST_OP;
5249 
5250 	if (diag->beacon.state && beacon)       /* beacon alread on */
5251 		return BFA_STATUS_BEACON_ON;
5252 
5253 	diag->beacon.state	= beacon;
5254 	diag->beacon.link_e2e	= link_e2e_beacon;
5255 	if (diag->cbfn_beacon)
5256 		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5257 
5258 	/* Send msg to fw */
5259 	diag_portbeacon_send(diag, beacon, sec);
5260 
5261 	return BFA_STATUS_OK;
5262 }
5263 
5264 /*
5265  * Return DMA memory needed by diag module.
5266  */
5267 u32
5268 bfa_diag_meminfo(void)
5269 {
5270 	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5271 }
5272 
5273 /*
5274  *	Attach virtual and physical memory for Diag.
5275  */
5276 void
5277 bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5278 	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5279 {
5280 	diag->dev = dev;
5281 	diag->ioc = ioc;
5282 	diag->trcmod = trcmod;
5283 
5284 	diag->block = 0;
5285 	diag->cbfn = NULL;
5286 	diag->cbarg = NULL;
5287 	diag->result = NULL;
5288 	diag->cbfn_beacon = cbfn_beacon;
5289 
5290 	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5291 	bfa_q_qe_init(&diag->ioc_notify);
5292 	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5293 	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5294 }
5295 
5296 void
5297 bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5298 {
5299 	diag->fwping.dbuf_kva = dm_kva;
5300 	diag->fwping.dbuf_pa = dm_pa;
5301 	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5302 }
5303 
5304 /*
5305  *	PHY module specific
5306  */
5307 #define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
5308 #define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
5309 
5310 static void
5311 bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5312 {
5313 	int i, m = sz >> 2;
5314 
5315 	for (i = 0; i < m; i++)
5316 		obuf[i] = be32_to_cpu(ibuf[i]);
5317 }
5318 
5319 static bfa_boolean_t
5320 bfa_phy_present(struct bfa_phy_s *phy)
5321 {
5322 	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5323 }
5324 
5325 static void
5326 bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5327 {
5328 	struct bfa_phy_s *phy = cbarg;
5329 
5330 	bfa_trc(phy, event);
5331 
5332 	switch (event) {
5333 	case BFA_IOC_E_DISABLED:
5334 	case BFA_IOC_E_FAILED:
5335 		if (phy->op_busy) {
5336 			phy->status = BFA_STATUS_IOC_FAILURE;
5337 			phy->cbfn(phy->cbarg, phy->status);
5338 			phy->op_busy = 0;
5339 		}
5340 		break;
5341 
5342 	default:
5343 		break;
5344 	}
5345 }
5346 
5347 /*
5348  * Send phy attribute query request.
5349  *
5350  * @param[in] cbarg - callback argument
5351  */
5352 static void
5353 bfa_phy_query_send(void *cbarg)
5354 {
5355 	struct bfa_phy_s *phy = cbarg;
5356 	struct bfi_phy_query_req_s *msg =
5357 			(struct bfi_phy_query_req_s *) phy->mb.msg;
5358 
5359 	msg->instance = phy->instance;
5360 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5361 		bfa_ioc_portid(phy->ioc));
5362 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5363 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5364 }
5365 
5366 /*
5367  * Send phy write request.
5368  *
5369  * @param[in] cbarg - callback argument
5370  */
5371 static void
5372 bfa_phy_write_send(void *cbarg)
5373 {
5374 	struct bfa_phy_s *phy = cbarg;
5375 	struct bfi_phy_write_req_s *msg =
5376 			(struct bfi_phy_write_req_s *) phy->mb.msg;
5377 	u32	len;
5378 	u16	*buf, *dbuf;
5379 	int	i, sz;
5380 
5381 	msg->instance = phy->instance;
5382 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5383 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5384 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5385 	msg->length = cpu_to_be32(len);
5386 
5387 	/* indicate if it's the last msg of the whole write operation */
5388 	msg->last = (len == phy->residue) ? 1 : 0;
5389 
5390 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5391 		bfa_ioc_portid(phy->ioc));
5392 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5393 
5394 	buf = (u16 *) (phy->ubuf + phy->offset);
5395 	dbuf = (u16 *)phy->dbuf_kva;
5396 	sz = len >> 1;
5397 	for (i = 0; i < sz; i++)
5398 		buf[i] = cpu_to_be16(dbuf[i]);
5399 
5400 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5401 
5402 	phy->residue -= len;
5403 	phy->offset += len;
5404 }
5405 
5406 /*
5407  * Send phy read request.
5408  *
5409  * @param[in] cbarg - callback argument
5410  */
5411 static void
5412 bfa_phy_read_send(void *cbarg)
5413 {
5414 	struct bfa_phy_s *phy = cbarg;
5415 	struct bfi_phy_read_req_s *msg =
5416 			(struct bfi_phy_read_req_s *) phy->mb.msg;
5417 	u32	len;
5418 
5419 	msg->instance = phy->instance;
5420 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5421 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5422 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5423 	msg->length = cpu_to_be32(len);
5424 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5425 		bfa_ioc_portid(phy->ioc));
5426 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5427 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5428 }
5429 
5430 /*
5431  * Send phy stats request.
5432  *
5433  * @param[in] cbarg - callback argument
5434  */
5435 static void
5436 bfa_phy_stats_send(void *cbarg)
5437 {
5438 	struct bfa_phy_s *phy = cbarg;
5439 	struct bfi_phy_stats_req_s *msg =
5440 			(struct bfi_phy_stats_req_s *) phy->mb.msg;
5441 
5442 	msg->instance = phy->instance;
5443 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5444 		bfa_ioc_portid(phy->ioc));
5445 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5446 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5447 }
5448 
5449 /*
5450  * Flash memory info API.
5451  *
5452  * @param[in] mincfg - minimal cfg variable
5453  */
5454 u32
5455 bfa_phy_meminfo(bfa_boolean_t mincfg)
5456 {
5457 	/* min driver doesn't need phy */
5458 	if (mincfg)
5459 		return 0;
5460 
5461 	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5462 }
5463 
5464 /*
5465  * Flash attach API.
5466  *
5467  * @param[in] phy - phy structure
5468  * @param[in] ioc  - ioc structure
5469  * @param[in] dev  - device structure
5470  * @param[in] trcmod - trace module
5471  * @param[in] logmod - log module
5472  */
5473 void
5474 bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5475 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5476 {
5477 	phy->ioc = ioc;
5478 	phy->trcmod = trcmod;
5479 	phy->cbfn = NULL;
5480 	phy->cbarg = NULL;
5481 	phy->op_busy = 0;
5482 
5483 	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5484 	bfa_q_qe_init(&phy->ioc_notify);
5485 	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5486 	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5487 
5488 	/* min driver doesn't need phy */
5489 	if (mincfg) {
5490 		phy->dbuf_kva = NULL;
5491 		phy->dbuf_pa = 0;
5492 	}
5493 }
5494 
5495 /*
5496  * Claim memory for phy
5497  *
5498  * @param[in] phy - phy structure
5499  * @param[in] dm_kva - pointer to virtual memory address
5500  * @param[in] dm_pa - physical memory address
5501  * @param[in] mincfg - minimal cfg variable
5502  */
5503 void
5504 bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5505 		bfa_boolean_t mincfg)
5506 {
5507 	if (mincfg)
5508 		return;
5509 
5510 	phy->dbuf_kva = dm_kva;
5511 	phy->dbuf_pa = dm_pa;
5512 	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5513 	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5514 	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5515 }
5516 
5517 bfa_boolean_t
5518 bfa_phy_busy(struct bfa_ioc_s *ioc)
5519 {
5520 	void __iomem	*rb;
5521 
5522 	rb = bfa_ioc_bar0(ioc);
5523 	return readl(rb + BFA_PHY_LOCK_STATUS);
5524 }
5525 
5526 /*
5527  * Get phy attribute.
5528  *
5529  * @param[in] phy - phy structure
5530  * @param[in] attr - phy attribute structure
5531  * @param[in] cbfn - callback function
5532  * @param[in] cbarg - callback argument
5533  *
5534  * Return status.
5535  */
5536 bfa_status_t
5537 bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5538 		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5539 {
5540 	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5541 	bfa_trc(phy, instance);
5542 
5543 	if (!bfa_phy_present(phy))
5544 		return BFA_STATUS_PHY_NOT_PRESENT;
5545 
5546 	if (!bfa_ioc_is_operational(phy->ioc))
5547 		return BFA_STATUS_IOC_NON_OP;
5548 
5549 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5550 		bfa_trc(phy, phy->op_busy);
5551 		return BFA_STATUS_DEVBUSY;
5552 	}
5553 
5554 	phy->op_busy = 1;
5555 	phy->cbfn = cbfn;
5556 	phy->cbarg = cbarg;
5557 	phy->instance = instance;
5558 	phy->ubuf = (uint8_t *) attr;
5559 	bfa_phy_query_send(phy);
5560 
5561 	return BFA_STATUS_OK;
5562 }
5563 
5564 /*
5565  * Get phy stats.
5566  *
5567  * @param[in] phy - phy structure
5568  * @param[in] instance - phy image instance
5569  * @param[in] stats - pointer to phy stats
5570  * @param[in] cbfn - callback function
5571  * @param[in] cbarg - callback argument
5572  *
5573  * Return status.
5574  */
5575 bfa_status_t
5576 bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5577 		struct bfa_phy_stats_s *stats,
5578 		bfa_cb_phy_t cbfn, void *cbarg)
5579 {
5580 	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5581 	bfa_trc(phy, instance);
5582 
5583 	if (!bfa_phy_present(phy))
5584 		return BFA_STATUS_PHY_NOT_PRESENT;
5585 
5586 	if (!bfa_ioc_is_operational(phy->ioc))
5587 		return BFA_STATUS_IOC_NON_OP;
5588 
5589 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5590 		bfa_trc(phy, phy->op_busy);
5591 		return BFA_STATUS_DEVBUSY;
5592 	}
5593 
5594 	phy->op_busy = 1;
5595 	phy->cbfn = cbfn;
5596 	phy->cbarg = cbarg;
5597 	phy->instance = instance;
5598 	phy->ubuf = (u8 *) stats;
5599 	bfa_phy_stats_send(phy);
5600 
5601 	return BFA_STATUS_OK;
5602 }
5603 
5604 /*
5605  * Update phy image.
5606  *
5607  * @param[in] phy - phy structure
5608  * @param[in] instance - phy image instance
5609  * @param[in] buf - update data buffer
5610  * @param[in] len - data buffer length
5611  * @param[in] offset - offset relative to starting address
5612  * @param[in] cbfn - callback function
5613  * @param[in] cbarg - callback argument
5614  *
5615  * Return status.
5616  */
5617 bfa_status_t
5618 bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5619 		void *buf, u32 len, u32 offset,
5620 		bfa_cb_phy_t cbfn, void *cbarg)
5621 {
5622 	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5623 	bfa_trc(phy, instance);
5624 	bfa_trc(phy, len);
5625 	bfa_trc(phy, offset);
5626 
5627 	if (!bfa_phy_present(phy))
5628 		return BFA_STATUS_PHY_NOT_PRESENT;
5629 
5630 	if (!bfa_ioc_is_operational(phy->ioc))
5631 		return BFA_STATUS_IOC_NON_OP;
5632 
5633 	/* 'len' must be in word (4-byte) boundary */
5634 	if (!len || (len & 0x03))
5635 		return BFA_STATUS_FAILED;
5636 
5637 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5638 		bfa_trc(phy, phy->op_busy);
5639 		return BFA_STATUS_DEVBUSY;
5640 	}
5641 
5642 	phy->op_busy = 1;
5643 	phy->cbfn = cbfn;
5644 	phy->cbarg = cbarg;
5645 	phy->instance = instance;
5646 	phy->residue = len;
5647 	phy->offset = 0;
5648 	phy->addr_off = offset;
5649 	phy->ubuf = buf;
5650 
5651 	bfa_phy_write_send(phy);
5652 	return BFA_STATUS_OK;
5653 }
5654 
5655 /*
5656  * Read phy image.
5657  *
5658  * @param[in] phy - phy structure
5659  * @param[in] instance - phy image instance
5660  * @param[in] buf - read data buffer
5661  * @param[in] len - data buffer length
5662  * @param[in] offset - offset relative to starting address
5663  * @param[in] cbfn - callback function
5664  * @param[in] cbarg - callback argument
5665  *
5666  * Return status.
5667  */
5668 bfa_status_t
5669 bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5670 		void *buf, u32 len, u32 offset,
5671 		bfa_cb_phy_t cbfn, void *cbarg)
5672 {
5673 	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5674 	bfa_trc(phy, instance);
5675 	bfa_trc(phy, len);
5676 	bfa_trc(phy, offset);
5677 
5678 	if (!bfa_phy_present(phy))
5679 		return BFA_STATUS_PHY_NOT_PRESENT;
5680 
5681 	if (!bfa_ioc_is_operational(phy->ioc))
5682 		return BFA_STATUS_IOC_NON_OP;
5683 
5684 	/* 'len' must be in word (4-byte) boundary */
5685 	if (!len || (len & 0x03))
5686 		return BFA_STATUS_FAILED;
5687 
5688 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5689 		bfa_trc(phy, phy->op_busy);
5690 		return BFA_STATUS_DEVBUSY;
5691 	}
5692 
5693 	phy->op_busy = 1;
5694 	phy->cbfn = cbfn;
5695 	phy->cbarg = cbarg;
5696 	phy->instance = instance;
5697 	phy->residue = len;
5698 	phy->offset = 0;
5699 	phy->addr_off = offset;
5700 	phy->ubuf = buf;
5701 	bfa_phy_read_send(phy);
5702 
5703 	return BFA_STATUS_OK;
5704 }
5705 
5706 /*
5707  * Process phy response messages upon receiving interrupts.
5708  *
5709  * @param[in] phyarg - phy structure
5710  * @param[in] msg - message structure
5711  */
5712 void
5713 bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5714 {
5715 	struct bfa_phy_s *phy = phyarg;
5716 	u32	status;
5717 
5718 	union {
5719 		struct bfi_phy_query_rsp_s *query;
5720 		struct bfi_phy_stats_rsp_s *stats;
5721 		struct bfi_phy_write_rsp_s *write;
5722 		struct bfi_phy_read_rsp_s *read;
5723 		struct bfi_mbmsg_s   *msg;
5724 	} m;
5725 
5726 	m.msg = msg;
5727 	bfa_trc(phy, msg->mh.msg_id);
5728 
5729 	if (!phy->op_busy) {
5730 		/* receiving response after ioc failure */
5731 		bfa_trc(phy, 0x9999);
5732 		return;
5733 	}
5734 
5735 	switch (msg->mh.msg_id) {
5736 	case BFI_PHY_I2H_QUERY_RSP:
5737 		status = be32_to_cpu(m.query->status);
5738 		bfa_trc(phy, status);
5739 
5740 		if (status == BFA_STATUS_OK) {
5741 			struct bfa_phy_attr_s *attr =
5742 				(struct bfa_phy_attr_s *) phy->ubuf;
5743 			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5744 					sizeof(struct bfa_phy_attr_s));
5745 			bfa_trc(phy, attr->status);
5746 			bfa_trc(phy, attr->length);
5747 		}
5748 
5749 		phy->status = status;
5750 		phy->op_busy = 0;
5751 		if (phy->cbfn)
5752 			phy->cbfn(phy->cbarg, phy->status);
5753 		break;
5754 	case BFI_PHY_I2H_STATS_RSP:
5755 		status = be32_to_cpu(m.stats->status);
5756 		bfa_trc(phy, status);
5757 
5758 		if (status == BFA_STATUS_OK) {
5759 			struct bfa_phy_stats_s *stats =
5760 				(struct bfa_phy_stats_s *) phy->ubuf;
5761 			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5762 				sizeof(struct bfa_phy_stats_s));
5763 				bfa_trc(phy, stats->status);
5764 		}
5765 
5766 		phy->status = status;
5767 		phy->op_busy = 0;
5768 		if (phy->cbfn)
5769 			phy->cbfn(phy->cbarg, phy->status);
5770 		break;
5771 	case BFI_PHY_I2H_WRITE_RSP:
5772 		status = be32_to_cpu(m.write->status);
5773 		bfa_trc(phy, status);
5774 
5775 		if (status != BFA_STATUS_OK || phy->residue == 0) {
5776 			phy->status = status;
5777 			phy->op_busy = 0;
5778 			if (phy->cbfn)
5779 				phy->cbfn(phy->cbarg, phy->status);
5780 		} else {
5781 			bfa_trc(phy, phy->offset);
5782 			bfa_phy_write_send(phy);
5783 		}
5784 		break;
5785 	case BFI_PHY_I2H_READ_RSP:
5786 		status = be32_to_cpu(m.read->status);
5787 		bfa_trc(phy, status);
5788 
5789 		if (status != BFA_STATUS_OK) {
5790 			phy->status = status;
5791 			phy->op_busy = 0;
5792 			if (phy->cbfn)
5793 				phy->cbfn(phy->cbarg, phy->status);
5794 		} else {
5795 			u32 len = be32_to_cpu(m.read->length);
5796 			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5797 			u16 *dbuf = (u16 *)phy->dbuf_kva;
5798 			int i, sz = len >> 1;
5799 
5800 			bfa_trc(phy, phy->offset);
5801 			bfa_trc(phy, len);
5802 
5803 			for (i = 0; i < sz; i++)
5804 				buf[i] = be16_to_cpu(dbuf[i]);
5805 
5806 			phy->residue -= len;
5807 			phy->offset += len;
5808 
5809 			if (phy->residue == 0) {
5810 				phy->status = status;
5811 				phy->op_busy = 0;
5812 				if (phy->cbfn)
5813 					phy->cbfn(phy->cbarg, phy->status);
5814 			} else
5815 				bfa_phy_read_send(phy);
5816 		}
5817 		break;
5818 	default:
5819 		WARN_ON(1);
5820 	}
5821 }
5822 
5823 /*
5824  *	DCONF module specific
5825  */
5826 
5827 BFA_MODULE(dconf);
5828 
5829 /*
5830  * DCONF state machine events
5831  */
5832 enum bfa_dconf_event {
5833 	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
5834 	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
5835 	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
5836 	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
5837 	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
5838 	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
5839 };
5840 
5841 /* forward declaration of DCONF state machine */
5842 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5843 				enum bfa_dconf_event event);
5844 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5845 				enum bfa_dconf_event event);
5846 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5847 				enum bfa_dconf_event event);
5848 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5849 				enum bfa_dconf_event event);
5850 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5851 				enum bfa_dconf_event event);
5852 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5853 				enum bfa_dconf_event event);
5854 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5855 				enum bfa_dconf_event event);
5856 
5857 static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5858 static void bfa_dconf_timer(void *cbarg);
5859 static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5860 static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5861 
5862 /*
5863  * Beginning state of dconf module. Waiting for an event to start.
5864  */
5865 static void
5866 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5867 {
5868 	bfa_status_t bfa_status;
5869 	bfa_trc(dconf->bfa, event);
5870 
5871 	switch (event) {
5872 	case BFA_DCONF_SM_INIT:
5873 		if (dconf->min_cfg) {
5874 			bfa_trc(dconf->bfa, dconf->min_cfg);
5875 			bfa_fsm_send_event(&dconf->bfa->iocfc,
5876 					IOCFC_E_DCONF_DONE);
5877 			return;
5878 		}
5879 		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5880 		bfa_timer_start(dconf->bfa, &dconf->timer,
5881 			bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5882 		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5883 					BFA_FLASH_PART_DRV, dconf->instance,
5884 					dconf->dconf,
5885 					sizeof(struct bfa_dconf_s), 0,
5886 					bfa_dconf_init_cb, dconf->bfa);
5887 		if (bfa_status != BFA_STATUS_OK) {
5888 			bfa_timer_stop(&dconf->timer);
5889 			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5890 			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5891 			return;
5892 		}
5893 		break;
5894 	case BFA_DCONF_SM_EXIT:
5895 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5896 	case BFA_DCONF_SM_IOCDISABLE:
5897 	case BFA_DCONF_SM_WR:
5898 	case BFA_DCONF_SM_FLASH_COMP:
5899 		break;
5900 	default:
5901 		bfa_sm_fault(dconf->bfa, event);
5902 	}
5903 }
5904 
5905 /*
5906  * Read flash for dconf entries and make a call back to the driver once done.
5907  */
5908 static void
5909 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5910 			enum bfa_dconf_event event)
5911 {
5912 	bfa_trc(dconf->bfa, event);
5913 
5914 	switch (event) {
5915 	case BFA_DCONF_SM_FLASH_COMP:
5916 		bfa_timer_stop(&dconf->timer);
5917 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5918 		break;
5919 	case BFA_DCONF_SM_TIMEOUT:
5920 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5921 		bfa_ioc_suspend(&dconf->bfa->ioc);
5922 		break;
5923 	case BFA_DCONF_SM_EXIT:
5924 		bfa_timer_stop(&dconf->timer);
5925 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5926 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5927 		break;
5928 	case BFA_DCONF_SM_IOCDISABLE:
5929 		bfa_timer_stop(&dconf->timer);
5930 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5931 		break;
5932 	default:
5933 		bfa_sm_fault(dconf->bfa, event);
5934 	}
5935 }
5936 
5937 /*
5938  * DCONF Module is in ready state. Has completed the initialization.
5939  */
5940 static void
5941 bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5942 {
5943 	bfa_trc(dconf->bfa, event);
5944 
5945 	switch (event) {
5946 	case BFA_DCONF_SM_WR:
5947 		bfa_timer_start(dconf->bfa, &dconf->timer,
5948 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5949 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5950 		break;
5951 	case BFA_DCONF_SM_EXIT:
5952 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5953 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5954 		break;
5955 	case BFA_DCONF_SM_INIT:
5956 	case BFA_DCONF_SM_IOCDISABLE:
5957 		break;
5958 	default:
5959 		bfa_sm_fault(dconf->bfa, event);
5960 	}
5961 }
5962 
5963 /*
5964  * entries are dirty, write back to the flash.
5965  */
5966 
5967 static void
5968 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5969 {
5970 	bfa_trc(dconf->bfa, event);
5971 
5972 	switch (event) {
5973 	case BFA_DCONF_SM_TIMEOUT:
5974 		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5975 		bfa_dconf_flash_write(dconf);
5976 		break;
5977 	case BFA_DCONF_SM_WR:
5978 		bfa_timer_stop(&dconf->timer);
5979 		bfa_timer_start(dconf->bfa, &dconf->timer,
5980 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5981 		break;
5982 	case BFA_DCONF_SM_EXIT:
5983 		bfa_timer_stop(&dconf->timer);
5984 		bfa_timer_start(dconf->bfa, &dconf->timer,
5985 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5986 		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5987 		bfa_dconf_flash_write(dconf);
5988 		break;
5989 	case BFA_DCONF_SM_FLASH_COMP:
5990 		break;
5991 	case BFA_DCONF_SM_IOCDISABLE:
5992 		bfa_timer_stop(&dconf->timer);
5993 		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5994 		break;
5995 	default:
5996 		bfa_sm_fault(dconf->bfa, event);
5997 	}
5998 }
5999 
6000 /*
6001  * Sync the dconf entries to the flash.
6002  */
6003 static void
6004 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
6005 			enum bfa_dconf_event event)
6006 {
6007 	bfa_trc(dconf->bfa, event);
6008 
6009 	switch (event) {
6010 	case BFA_DCONF_SM_IOCDISABLE:
6011 	case BFA_DCONF_SM_FLASH_COMP:
6012 		bfa_timer_stop(&dconf->timer);
6013 	case BFA_DCONF_SM_TIMEOUT:
6014 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6015 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6016 		break;
6017 	default:
6018 		bfa_sm_fault(dconf->bfa, event);
6019 	}
6020 }
6021 
6022 static void
6023 bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
6024 {
6025 	bfa_trc(dconf->bfa, event);
6026 
6027 	switch (event) {
6028 	case BFA_DCONF_SM_FLASH_COMP:
6029 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
6030 		break;
6031 	case BFA_DCONF_SM_WR:
6032 		bfa_timer_start(dconf->bfa, &dconf->timer,
6033 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6034 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6035 		break;
6036 	case BFA_DCONF_SM_EXIT:
6037 		bfa_timer_start(dconf->bfa, &dconf->timer,
6038 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6039 		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
6040 		break;
6041 	case BFA_DCONF_SM_IOCDISABLE:
6042 		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
6043 		break;
6044 	default:
6045 		bfa_sm_fault(dconf->bfa, event);
6046 	}
6047 }
6048 
6049 static void
6050 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
6051 			enum bfa_dconf_event event)
6052 {
6053 	bfa_trc(dconf->bfa, event);
6054 
6055 	switch (event) {
6056 	case BFA_DCONF_SM_INIT:
6057 		bfa_timer_start(dconf->bfa, &dconf->timer,
6058 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6059 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6060 		break;
6061 	case BFA_DCONF_SM_EXIT:
6062 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6063 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6064 		break;
6065 	case BFA_DCONF_SM_IOCDISABLE:
6066 		break;
6067 	default:
6068 		bfa_sm_fault(dconf->bfa, event);
6069 	}
6070 }
6071 
6072 /*
6073  * Compute and return memory needed by DRV_CFG module.
6074  */
6075 static void
6076 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6077 		  struct bfa_s *bfa)
6078 {
6079 	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6080 
6081 	if (cfg->drvcfg.min_cfg)
6082 		bfa_mem_kva_setup(meminfo, dconf_kva,
6083 				sizeof(struct bfa_dconf_hdr_s));
6084 	else
6085 		bfa_mem_kva_setup(meminfo, dconf_kva,
6086 				sizeof(struct bfa_dconf_s));
6087 }
6088 
6089 static void
6090 bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
6091 		struct bfa_pcidev_s *pcidev)
6092 {
6093 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6094 
6095 	dconf->bfad = bfad;
6096 	dconf->bfa = bfa;
6097 	dconf->instance = bfa->ioc.port_id;
6098 	bfa_trc(bfa, dconf->instance);
6099 
6100 	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6101 	if (cfg->drvcfg.min_cfg) {
6102 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6103 		dconf->min_cfg = BFA_TRUE;
6104 	} else {
6105 		dconf->min_cfg = BFA_FALSE;
6106 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6107 	}
6108 
6109 	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6110 	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6111 }
6112 
6113 static void
6114 bfa_dconf_init_cb(void *arg, bfa_status_t status)
6115 {
6116 	struct bfa_s *bfa = arg;
6117 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6118 
6119 	if (status == BFA_STATUS_OK) {
6120 		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6121 		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6122 			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6123 		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6124 			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6125 	}
6126 	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6127 	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6128 }
6129 
6130 void
6131 bfa_dconf_modinit(struct bfa_s *bfa)
6132 {
6133 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6134 	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6135 }
6136 static void
6137 bfa_dconf_start(struct bfa_s *bfa)
6138 {
6139 }
6140 
6141 static void
6142 bfa_dconf_stop(struct bfa_s *bfa)
6143 {
6144 }
6145 
6146 static void bfa_dconf_timer(void *cbarg)
6147 {
6148 	struct bfa_dconf_mod_s *dconf = cbarg;
6149 	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6150 }
6151 static void
6152 bfa_dconf_iocdisable(struct bfa_s *bfa)
6153 {
6154 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6155 	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6156 }
6157 
6158 static void
6159 bfa_dconf_detach(struct bfa_s *bfa)
6160 {
6161 }
6162 
6163 static bfa_status_t
6164 bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6165 {
6166 	bfa_status_t bfa_status;
6167 	bfa_trc(dconf->bfa, 0);
6168 
6169 	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6170 				BFA_FLASH_PART_DRV, dconf->instance,
6171 				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
6172 				bfa_dconf_cbfn, dconf);
6173 	if (bfa_status != BFA_STATUS_OK)
6174 		WARN_ON(bfa_status);
6175 	bfa_trc(dconf->bfa, bfa_status);
6176 
6177 	return bfa_status;
6178 }
6179 
6180 bfa_status_t
6181 bfa_dconf_update(struct bfa_s *bfa)
6182 {
6183 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6184 	bfa_trc(dconf->bfa, 0);
6185 	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6186 		return BFA_STATUS_FAILED;
6187 
6188 	if (dconf->min_cfg) {
6189 		bfa_trc(dconf->bfa, dconf->min_cfg);
6190 		return BFA_STATUS_FAILED;
6191 	}
6192 
6193 	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6194 	return BFA_STATUS_OK;
6195 }
6196 
6197 static void
6198 bfa_dconf_cbfn(void *arg, bfa_status_t status)
6199 {
6200 	struct bfa_dconf_mod_s *dconf = arg;
6201 	WARN_ON(status);
6202 	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6203 }
6204 
6205 void
6206 bfa_dconf_modexit(struct bfa_s *bfa)
6207 {
6208 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6209 	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6210 }
6211 
6212 /*
6213  * FRU specific functions
6214  */
6215 
6216 #define BFA_FRU_DMA_BUF_SZ	0x02000		/* 8k dma buffer */
6217 #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6218 #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6219 
6220 static void
6221 bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6222 {
6223 	struct bfa_fru_s *fru = cbarg;
6224 
6225 	bfa_trc(fru, event);
6226 
6227 	switch (event) {
6228 	case BFA_IOC_E_DISABLED:
6229 	case BFA_IOC_E_FAILED:
6230 		if (fru->op_busy) {
6231 			fru->status = BFA_STATUS_IOC_FAILURE;
6232 			fru->cbfn(fru->cbarg, fru->status);
6233 			fru->op_busy = 0;
6234 		}
6235 		break;
6236 
6237 	default:
6238 		break;
6239 	}
6240 }
6241 
6242 /*
6243  * Send fru write request.
6244  *
6245  * @param[in] cbarg - callback argument
6246  */
6247 static void
6248 bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6249 {
6250 	struct bfa_fru_s *fru = cbarg;
6251 	struct bfi_fru_write_req_s *msg =
6252 			(struct bfi_fru_write_req_s *) fru->mb.msg;
6253 	u32 len;
6254 
6255 	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6256 	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6257 				fru->residue : BFA_FRU_DMA_BUF_SZ;
6258 	msg->length = cpu_to_be32(len);
6259 
6260 	/*
6261 	 * indicate if it's the last msg of the whole write operation
6262 	 */
6263 	msg->last = (len == fru->residue) ? 1 : 0;
6264 
6265 	msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6266 	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6267 	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6268 
6269 	memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6270 	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6271 
6272 	fru->residue -= len;
6273 	fru->offset += len;
6274 }
6275 
6276 /*
6277  * Send fru read request.
6278  *
6279  * @param[in] cbarg - callback argument
6280  */
6281 static void
6282 bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6283 {
6284 	struct bfa_fru_s *fru = cbarg;
6285 	struct bfi_fru_read_req_s *msg =
6286 			(struct bfi_fru_read_req_s *) fru->mb.msg;
6287 	u32 len;
6288 
6289 	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6290 	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6291 				fru->residue : BFA_FRU_DMA_BUF_SZ;
6292 	msg->length = cpu_to_be32(len);
6293 	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6294 	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6295 	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6296 }
6297 
6298 /*
6299  * Flash memory info API.
6300  *
6301  * @param[in] mincfg - minimal cfg variable
6302  */
6303 u32
6304 bfa_fru_meminfo(bfa_boolean_t mincfg)
6305 {
6306 	/* min driver doesn't need fru */
6307 	if (mincfg)
6308 		return 0;
6309 
6310 	return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6311 }
6312 
6313 /*
6314  * Flash attach API.
6315  *
6316  * @param[in] fru - fru structure
6317  * @param[in] ioc  - ioc structure
6318  * @param[in] dev  - device structure
6319  * @param[in] trcmod - trace module
6320  * @param[in] logmod - log module
6321  */
6322 void
6323 bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6324 	struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6325 {
6326 	fru->ioc = ioc;
6327 	fru->trcmod = trcmod;
6328 	fru->cbfn = NULL;
6329 	fru->cbarg = NULL;
6330 	fru->op_busy = 0;
6331 
6332 	bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6333 	bfa_q_qe_init(&fru->ioc_notify);
6334 	bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6335 	list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6336 
6337 	/* min driver doesn't need fru */
6338 	if (mincfg) {
6339 		fru->dbuf_kva = NULL;
6340 		fru->dbuf_pa = 0;
6341 	}
6342 }
6343 
6344 /*
6345  * Claim memory for fru
6346  *
6347  * @param[in] fru - fru structure
6348  * @param[in] dm_kva - pointer to virtual memory address
6349  * @param[in] dm_pa - frusical memory address
6350  * @param[in] mincfg - minimal cfg variable
6351  */
6352 void
6353 bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6354 	bfa_boolean_t mincfg)
6355 {
6356 	if (mincfg)
6357 		return;
6358 
6359 	fru->dbuf_kva = dm_kva;
6360 	fru->dbuf_pa = dm_pa;
6361 	memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6362 	dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6363 	dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6364 }
6365 
6366 /*
6367  * Update fru vpd image.
6368  *
6369  * @param[in] fru - fru structure
6370  * @param[in] buf - update data buffer
6371  * @param[in] len - data buffer length
6372  * @param[in] offset - offset relative to starting address
6373  * @param[in] cbfn - callback function
6374  * @param[in] cbarg - callback argument
6375  *
6376  * Return status.
6377  */
6378 bfa_status_t
6379 bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6380 		  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6381 {
6382 	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6383 	bfa_trc(fru, len);
6384 	bfa_trc(fru, offset);
6385 
6386 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6387 		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6388 		return BFA_STATUS_FRU_NOT_PRESENT;
6389 
6390 	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6391 		return BFA_STATUS_CMD_NOTSUPP;
6392 
6393 	if (!bfa_ioc_is_operational(fru->ioc))
6394 		return BFA_STATUS_IOC_NON_OP;
6395 
6396 	if (fru->op_busy) {
6397 		bfa_trc(fru, fru->op_busy);
6398 		return BFA_STATUS_DEVBUSY;
6399 	}
6400 
6401 	fru->op_busy = 1;
6402 
6403 	fru->cbfn = cbfn;
6404 	fru->cbarg = cbarg;
6405 	fru->residue = len;
6406 	fru->offset = 0;
6407 	fru->addr_off = offset;
6408 	fru->ubuf = buf;
6409 	fru->trfr_cmpl = trfr_cmpl;
6410 
6411 	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6412 
6413 	return BFA_STATUS_OK;
6414 }
6415 
6416 /*
6417  * Read fru vpd image.
6418  *
6419  * @param[in] fru - fru structure
6420  * @param[in] buf - read data buffer
6421  * @param[in] len - data buffer length
6422  * @param[in] offset - offset relative to starting address
6423  * @param[in] cbfn - callback function
6424  * @param[in] cbarg - callback argument
6425  *
6426  * Return status.
6427  */
6428 bfa_status_t
6429 bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6430 		bfa_cb_fru_t cbfn, void *cbarg)
6431 {
6432 	bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6433 	bfa_trc(fru, len);
6434 	bfa_trc(fru, offset);
6435 
6436 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6437 		return BFA_STATUS_FRU_NOT_PRESENT;
6438 
6439 	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6440 		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6441 		return BFA_STATUS_CMD_NOTSUPP;
6442 
6443 	if (!bfa_ioc_is_operational(fru->ioc))
6444 		return BFA_STATUS_IOC_NON_OP;
6445 
6446 	if (fru->op_busy) {
6447 		bfa_trc(fru, fru->op_busy);
6448 		return BFA_STATUS_DEVBUSY;
6449 	}
6450 
6451 	fru->op_busy = 1;
6452 
6453 	fru->cbfn = cbfn;
6454 	fru->cbarg = cbarg;
6455 	fru->residue = len;
6456 	fru->offset = 0;
6457 	fru->addr_off = offset;
6458 	fru->ubuf = buf;
6459 	bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6460 
6461 	return BFA_STATUS_OK;
6462 }
6463 
6464 /*
6465  * Get maximum size fru vpd image.
6466  *
6467  * @param[in] fru - fru structure
6468  * @param[out] size - maximum size of fru vpd data
6469  *
6470  * Return status.
6471  */
6472 bfa_status_t
6473 bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6474 {
6475 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6476 		return BFA_STATUS_FRU_NOT_PRESENT;
6477 
6478 	if (!bfa_ioc_is_operational(fru->ioc))
6479 		return BFA_STATUS_IOC_NON_OP;
6480 
6481 	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6482 		fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6483 		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6484 	else
6485 		return BFA_STATUS_CMD_NOTSUPP;
6486 	return BFA_STATUS_OK;
6487 }
6488 /*
6489  * tfru write.
6490  *
6491  * @param[in] fru - fru structure
6492  * @param[in] buf - update data buffer
6493  * @param[in] len - data buffer length
6494  * @param[in] offset - offset relative to starting address
6495  * @param[in] cbfn - callback function
6496  * @param[in] cbarg - callback argument
6497  *
6498  * Return status.
6499  */
6500 bfa_status_t
6501 bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6502 	       bfa_cb_fru_t cbfn, void *cbarg)
6503 {
6504 	bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6505 	bfa_trc(fru, len);
6506 	bfa_trc(fru, offset);
6507 	bfa_trc(fru, *((u8 *) buf));
6508 
6509 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6510 		return BFA_STATUS_FRU_NOT_PRESENT;
6511 
6512 	if (!bfa_ioc_is_operational(fru->ioc))
6513 		return BFA_STATUS_IOC_NON_OP;
6514 
6515 	if (fru->op_busy) {
6516 		bfa_trc(fru, fru->op_busy);
6517 		return BFA_STATUS_DEVBUSY;
6518 	}
6519 
6520 	fru->op_busy = 1;
6521 
6522 	fru->cbfn = cbfn;
6523 	fru->cbarg = cbarg;
6524 	fru->residue = len;
6525 	fru->offset = 0;
6526 	fru->addr_off = offset;
6527 	fru->ubuf = buf;
6528 
6529 	bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6530 
6531 	return BFA_STATUS_OK;
6532 }
6533 
6534 /*
6535  * tfru read.
6536  *
6537  * @param[in] fru - fru structure
6538  * @param[in] buf - read data buffer
6539  * @param[in] len - data buffer length
6540  * @param[in] offset - offset relative to starting address
6541  * @param[in] cbfn - callback function
6542  * @param[in] cbarg - callback argument
6543  *
6544  * Return status.
6545  */
6546 bfa_status_t
6547 bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6548 	      bfa_cb_fru_t cbfn, void *cbarg)
6549 {
6550 	bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6551 	bfa_trc(fru, len);
6552 	bfa_trc(fru, offset);
6553 
6554 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6555 		return BFA_STATUS_FRU_NOT_PRESENT;
6556 
6557 	if (!bfa_ioc_is_operational(fru->ioc))
6558 		return BFA_STATUS_IOC_NON_OP;
6559 
6560 	if (fru->op_busy) {
6561 		bfa_trc(fru, fru->op_busy);
6562 		return BFA_STATUS_DEVBUSY;
6563 	}
6564 
6565 	fru->op_busy = 1;
6566 
6567 	fru->cbfn = cbfn;
6568 	fru->cbarg = cbarg;
6569 	fru->residue = len;
6570 	fru->offset = 0;
6571 	fru->addr_off = offset;
6572 	fru->ubuf = buf;
6573 	bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6574 
6575 	return BFA_STATUS_OK;
6576 }
6577 
6578 /*
6579  * Process fru response messages upon receiving interrupts.
6580  *
6581  * @param[in] fruarg - fru structure
6582  * @param[in] msg - message structure
6583  */
6584 void
6585 bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6586 {
6587 	struct bfa_fru_s *fru = fruarg;
6588 	struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6589 	u32 status;
6590 
6591 	bfa_trc(fru, msg->mh.msg_id);
6592 
6593 	if (!fru->op_busy) {
6594 		/*
6595 		 * receiving response after ioc failure
6596 		 */
6597 		bfa_trc(fru, 0x9999);
6598 		return;
6599 	}
6600 
6601 	switch (msg->mh.msg_id) {
6602 	case BFI_FRUVPD_I2H_WRITE_RSP:
6603 	case BFI_TFRU_I2H_WRITE_RSP:
6604 		status = be32_to_cpu(rsp->status);
6605 		bfa_trc(fru, status);
6606 
6607 		if (status != BFA_STATUS_OK || fru->residue == 0) {
6608 			fru->status = status;
6609 			fru->op_busy = 0;
6610 			if (fru->cbfn)
6611 				fru->cbfn(fru->cbarg, fru->status);
6612 		} else {
6613 			bfa_trc(fru, fru->offset);
6614 			if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6615 				bfa_fru_write_send(fru,
6616 					BFI_FRUVPD_H2I_WRITE_REQ);
6617 			else
6618 				bfa_fru_write_send(fru,
6619 					BFI_TFRU_H2I_WRITE_REQ);
6620 		}
6621 		break;
6622 	case BFI_FRUVPD_I2H_READ_RSP:
6623 	case BFI_TFRU_I2H_READ_RSP:
6624 		status = be32_to_cpu(rsp->status);
6625 		bfa_trc(fru, status);
6626 
6627 		if (status != BFA_STATUS_OK) {
6628 			fru->status = status;
6629 			fru->op_busy = 0;
6630 			if (fru->cbfn)
6631 				fru->cbfn(fru->cbarg, fru->status);
6632 		} else {
6633 			u32 len = be32_to_cpu(rsp->length);
6634 
6635 			bfa_trc(fru, fru->offset);
6636 			bfa_trc(fru, len);
6637 
6638 			memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6639 			fru->residue -= len;
6640 			fru->offset += len;
6641 
6642 			if (fru->residue == 0) {
6643 				fru->status = status;
6644 				fru->op_busy = 0;
6645 				if (fru->cbfn)
6646 					fru->cbfn(fru->cbarg, fru->status);
6647 			} else {
6648 				if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6649 					bfa_fru_read_send(fru,
6650 						BFI_FRUVPD_H2I_READ_REQ);
6651 				else
6652 					bfa_fru_read_send(fru,
6653 						BFI_TFRU_H2I_READ_REQ);
6654 			}
6655 		}
6656 		break;
6657 	default:
6658 		WARN_ON(1);
6659 	}
6660 }
6661 
6662 /*
6663  * register definitions
6664  */
6665 #define FLI_CMD_REG			0x0001d000
6666 #define FLI_RDDATA_REG			0x0001d010
6667 #define FLI_ADDR_REG			0x0001d004
6668 #define FLI_DEV_STATUS_REG		0x0001d014
6669 
6670 #define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
6671 #define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
6672 #define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
6673 #define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
6674 
6675 enum bfa_flash_cmd {
6676 	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
6677 	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
6678 };
6679 
6680 /**
6681  * @brief hardware error definition
6682  */
6683 enum bfa_flash_err {
6684 	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
6685 	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
6686 	BFA_FLASH_BAD		= -3,	/*!< flash bad */
6687 	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
6688 	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
6689 	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
6690 	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
6691 	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
6692 	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
6693 };
6694 
6695 /**
6696  * @brief flash command register data structure
6697  */
6698 union bfa_flash_cmd_reg_u {
6699 	struct {
6700 #ifdef __BIG_ENDIAN
6701 		u32	act:1;
6702 		u32	rsv:1;
6703 		u32	write_cnt:9;
6704 		u32	read_cnt:9;
6705 		u32	addr_cnt:4;
6706 		u32	cmd:8;
6707 #else
6708 		u32	cmd:8;
6709 		u32	addr_cnt:4;
6710 		u32	read_cnt:9;
6711 		u32	write_cnt:9;
6712 		u32	rsv:1;
6713 		u32	act:1;
6714 #endif
6715 	} r;
6716 	u32	i;
6717 };
6718 
6719 /**
6720  * @brief flash device status register data structure
6721  */
6722 union bfa_flash_dev_status_reg_u {
6723 	struct {
6724 #ifdef __BIG_ENDIAN
6725 		u32	rsv:21;
6726 		u32	fifo_cnt:6;
6727 		u32	busy:1;
6728 		u32	init_status:1;
6729 		u32	present:1;
6730 		u32	bad:1;
6731 		u32	good:1;
6732 #else
6733 		u32	good:1;
6734 		u32	bad:1;
6735 		u32	present:1;
6736 		u32	init_status:1;
6737 		u32	busy:1;
6738 		u32	fifo_cnt:6;
6739 		u32	rsv:21;
6740 #endif
6741 	} r;
6742 	u32	i;
6743 };
6744 
6745 /**
6746  * @brief flash address register data structure
6747  */
6748 union bfa_flash_addr_reg_u {
6749 	struct {
6750 #ifdef __BIG_ENDIAN
6751 		u32	addr:24;
6752 		u32	dummy:8;
6753 #else
6754 		u32	dummy:8;
6755 		u32	addr:24;
6756 #endif
6757 	} r;
6758 	u32	i;
6759 };
6760 
6761 /**
6762  * dg flash_raw_private Flash raw private functions
6763  */
6764 static void
6765 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6766 		  u8 rd_cnt, u8 ad_cnt, u8 op)
6767 {
6768 	union bfa_flash_cmd_reg_u cmd;
6769 
6770 	cmd.i = 0;
6771 	cmd.r.act = 1;
6772 	cmd.r.write_cnt = wr_cnt;
6773 	cmd.r.read_cnt = rd_cnt;
6774 	cmd.r.addr_cnt = ad_cnt;
6775 	cmd.r.cmd = op;
6776 	writel(cmd.i, (pci_bar + FLI_CMD_REG));
6777 }
6778 
6779 static void
6780 bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6781 {
6782 	union bfa_flash_addr_reg_u addr;
6783 
6784 	addr.r.addr = address & 0x00ffffff;
6785 	addr.r.dummy = 0;
6786 	writel(addr.i, (pci_bar + FLI_ADDR_REG));
6787 }
6788 
6789 static int
6790 bfa_flash_cmd_act_check(void __iomem *pci_bar)
6791 {
6792 	union bfa_flash_cmd_reg_u cmd;
6793 
6794 	cmd.i = readl(pci_bar + FLI_CMD_REG);
6795 
6796 	if (cmd.r.act)
6797 		return BFA_FLASH_ERR_CMD_ACT;
6798 
6799 	return 0;
6800 }
6801 
6802 /**
6803  * @brief
6804  * Flush FLI data fifo.
6805  *
6806  * @param[in] pci_bar - pci bar address
6807  * @param[in] dev_status - device status
6808  *
6809  * Return 0 on success, negative error number on error.
6810  */
6811 static u32
6812 bfa_flash_fifo_flush(void __iomem *pci_bar)
6813 {
6814 	u32 i;
6815 	u32 t;
6816 	union bfa_flash_dev_status_reg_u dev_status;
6817 
6818 	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6819 
6820 	if (!dev_status.r.fifo_cnt)
6821 		return 0;
6822 
6823 	/* fifo counter in terms of words */
6824 	for (i = 0; i < dev_status.r.fifo_cnt; i++)
6825 		t = readl(pci_bar + FLI_RDDATA_REG);
6826 
6827 	/*
6828 	 * Check the device status. It may take some time.
6829 	 */
6830 	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6831 		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6832 		if (!dev_status.r.fifo_cnt)
6833 			break;
6834 	}
6835 
6836 	if (dev_status.r.fifo_cnt)
6837 		return BFA_FLASH_ERR_FIFO_CNT;
6838 
6839 	return 0;
6840 }
6841 
6842 /**
6843  * @brief
6844  * Read flash status.
6845  *
6846  * @param[in] pci_bar - pci bar address
6847  *
6848  * Return 0 on success, negative error number on error.
6849 */
6850 static u32
6851 bfa_flash_status_read(void __iomem *pci_bar)
6852 {
6853 	union bfa_flash_dev_status_reg_u	dev_status;
6854 	int				status;
6855 	u32			ret_status;
6856 	int				i;
6857 
6858 	status = bfa_flash_fifo_flush(pci_bar);
6859 	if (status < 0)
6860 		return status;
6861 
6862 	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6863 
6864 	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6865 		status = bfa_flash_cmd_act_check(pci_bar);
6866 		if (!status)
6867 			break;
6868 	}
6869 
6870 	if (status)
6871 		return status;
6872 
6873 	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6874 	if (!dev_status.r.fifo_cnt)
6875 		return BFA_FLASH_BUSY;
6876 
6877 	ret_status = readl(pci_bar + FLI_RDDATA_REG);
6878 	ret_status >>= 24;
6879 
6880 	status = bfa_flash_fifo_flush(pci_bar);
6881 	if (status < 0)
6882 		return status;
6883 
6884 	return ret_status;
6885 }
6886 
6887 /**
6888  * @brief
6889  * Start flash read operation.
6890  *
6891  * @param[in] pci_bar - pci bar address
6892  * @param[in] offset - flash address offset
6893  * @param[in] len - read data length
6894  * @param[in] buf - read data buffer
6895  *
6896  * Return 0 on success, negative error number on error.
6897  */
6898 static u32
6899 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6900 			 char *buf)
6901 {
6902 	int status;
6903 
6904 	/*
6905 	 * len must be mutiple of 4 and not exceeding fifo size
6906 	 */
6907 	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6908 		return BFA_FLASH_ERR_LEN;
6909 
6910 	/*
6911 	 * check status
6912 	 */
6913 	status = bfa_flash_status_read(pci_bar);
6914 	if (status == BFA_FLASH_BUSY)
6915 		status = bfa_flash_status_read(pci_bar);
6916 
6917 	if (status < 0)
6918 		return status;
6919 
6920 	/*
6921 	 * check if write-in-progress bit is cleared
6922 	 */
6923 	if (status & BFA_FLASH_WIP_MASK)
6924 		return BFA_FLASH_ERR_WIP;
6925 
6926 	bfa_flash_set_addr(pci_bar, offset);
6927 
6928 	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6929 
6930 	return 0;
6931 }
6932 
6933 /**
6934  * @brief
6935  * Check flash read operation.
6936  *
6937  * @param[in] pci_bar - pci bar address
6938  *
6939  * Return flash device status, 1 if busy, 0 if not.
6940  */
6941 static u32
6942 bfa_flash_read_check(void __iomem *pci_bar)
6943 {
6944 	if (bfa_flash_cmd_act_check(pci_bar))
6945 		return 1;
6946 
6947 	return 0;
6948 }
6949 /**
6950  * @brief
6951  * End flash read operation.
6952  *
6953  * @param[in] pci_bar - pci bar address
6954  * @param[in] len - read data length
6955  * @param[in] buf - read data buffer
6956  *
6957  */
6958 static void
6959 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6960 {
6961 
6962 	u32 i;
6963 
6964 	/*
6965 	 * read data fifo up to 32 words
6966 	 */
6967 	for (i = 0; i < len; i += 4) {
6968 		u32 w = readl(pci_bar + FLI_RDDATA_REG);
6969 		*((u32 *) (buf + i)) = swab32(w);
6970 	}
6971 
6972 	bfa_flash_fifo_flush(pci_bar);
6973 }
6974 
6975 /**
6976  * @brief
6977  * Perform flash raw read.
6978  *
6979  * @param[in] pci_bar - pci bar address
6980  * @param[in] offset - flash partition address offset
6981  * @param[in] buf - read data buffer
6982  * @param[in] len - read data length
6983  *
6984  * Return status.
6985  */
6986 
6987 
6988 #define FLASH_BLOCKING_OP_MAX   500
6989 #define FLASH_SEM_LOCK_REG	0x18820
6990 
6991 static int
6992 bfa_raw_sem_get(void __iomem *bar)
6993 {
6994 	int	locked;
6995 
6996 	locked = readl((bar + FLASH_SEM_LOCK_REG));
6997 	return !locked;
6998 
6999 }
7000 
7001 bfa_status_t
7002 bfa_flash_sem_get(void __iomem *bar)
7003 {
7004 	u32 n = FLASH_BLOCKING_OP_MAX;
7005 
7006 	while (!bfa_raw_sem_get(bar)) {
7007 		if (--n <= 0)
7008 			return BFA_STATUS_BADFLASH;
7009 		mdelay(10);
7010 	}
7011 	return BFA_STATUS_OK;
7012 }
7013 
7014 void
7015 bfa_flash_sem_put(void __iomem *bar)
7016 {
7017 	writel(0, (bar + FLASH_SEM_LOCK_REG));
7018 }
7019 
7020 bfa_status_t
7021 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
7022 		       u32 len)
7023 {
7024 	u32 n;
7025 	int status;
7026 	u32 off, l, s, residue, fifo_sz;
7027 
7028 	residue = len;
7029 	off = 0;
7030 	fifo_sz = BFA_FLASH_FIFO_SIZE;
7031 	status = bfa_flash_sem_get(pci_bar);
7032 	if (status != BFA_STATUS_OK)
7033 		return status;
7034 
7035 	while (residue) {
7036 		s = offset + off;
7037 		n = s / fifo_sz;
7038 		l = (n + 1) * fifo_sz - s;
7039 		if (l > residue)
7040 			l = residue;
7041 
7042 		status = bfa_flash_read_start(pci_bar, offset + off, l,
7043 								&buf[off]);
7044 		if (status < 0) {
7045 			bfa_flash_sem_put(pci_bar);
7046 			return BFA_STATUS_FAILED;
7047 		}
7048 
7049 		n = BFA_FLASH_BLOCKING_OP_MAX;
7050 		while (bfa_flash_read_check(pci_bar)) {
7051 			if (--n <= 0) {
7052 				bfa_flash_sem_put(pci_bar);
7053 				return BFA_STATUS_FAILED;
7054 			}
7055 		}
7056 
7057 		bfa_flash_read_end(pci_bar, l, &buf[off]);
7058 
7059 		residue -= l;
7060 		off += l;
7061 	}
7062 	bfa_flash_sem_put(pci_bar);
7063 
7064 	return BFA_STATUS_OK;
7065 }
7066