xref: /linux/drivers/scsi/bfa/bfa_svc.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17 
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_plog.h"
21 #include "bfa_cs.h"
22 #include "bfa_modules.h"
23 
24 BFA_TRC_FILE(HAL, FCXP);
25 BFA_MODULE(fcdiag);
26 BFA_MODULE(fcxp);
27 BFA_MODULE(sgpg);
28 BFA_MODULE(lps);
29 BFA_MODULE(fcport);
30 BFA_MODULE(rport);
31 BFA_MODULE(uf);
32 
33 /*
34  * LPS related definitions
35  */
36 #define BFA_LPS_MIN_LPORTS      (1)
37 #define BFA_LPS_MAX_LPORTS      (256)
38 
39 /*
40  * Maximum Vports supported per physical port or vf.
41  */
42 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
43 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
44 
45 
46 /*
47  * FC PORT related definitions
48  */
49 /*
50  * The port is considered disabled if corresponding physical port or IOC are
51  * disabled explicitly
52  */
53 #define BFA_PORT_IS_DISABLED(bfa) \
54 	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
55 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
56 
57 /*
58  * BFA port state machine events
59  */
60 enum bfa_fcport_sm_event {
61 	BFA_FCPORT_SM_START	= 1,	/*  start port state machine	*/
62 	BFA_FCPORT_SM_STOP	= 2,	/*  stop port state machine	*/
63 	BFA_FCPORT_SM_ENABLE	= 3,	/*  enable port		*/
64 	BFA_FCPORT_SM_DISABLE	= 4,	/*  disable port state machine */
65 	BFA_FCPORT_SM_FWRSP	= 5,	/*  firmware enable/disable rsp */
66 	BFA_FCPORT_SM_LINKUP	= 6,	/*  firmware linkup event	*/
67 	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
68 	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
69 	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
70 };
71 
72 /*
73  * BFA port link notification state machine events
74  */
75 
76 enum bfa_fcport_ln_sm_event {
77 	BFA_FCPORT_LN_SM_LINKUP		= 1,	/*  linkup event	*/
78 	BFA_FCPORT_LN_SM_LINKDOWN	= 2,	/*  linkdown event	*/
79 	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
80 };
81 
82 /*
83  * RPORT related definitions
84  */
85 #define bfa_rport_offline_cb(__rp) do {					\
86 	if ((__rp)->bfa->fcs)						\
87 		bfa_cb_rport_offline((__rp)->rport_drv);      \
88 	else {								\
89 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
90 				__bfa_cb_rport_offline, (__rp));      \
91 	}								\
92 } while (0)
93 
94 #define bfa_rport_online_cb(__rp) do {					\
95 	if ((__rp)->bfa->fcs)						\
96 		bfa_cb_rport_online((__rp)->rport_drv);      \
97 	else {								\
98 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
99 				  __bfa_cb_rport_online, (__rp));      \
100 		}							\
101 } while (0)
102 
103 /*
104  * forward declarations FCXP related functions
105  */
106 static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
107 static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
108 				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
109 static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
110 				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
111 static void	bfa_fcxp_qresume(void *cbarg);
112 static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
113 				struct bfi_fcxp_send_req_s *send_req);
114 
115 /*
116  * forward declarations for LPS functions
117  */
118 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
119 		struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
120 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
121 				struct bfa_iocfc_cfg_s *cfg,
122 				struct bfa_pcidev_s *pcidev);
123 static void bfa_lps_detach(struct bfa_s *bfa);
124 static void bfa_lps_start(struct bfa_s *bfa);
125 static void bfa_lps_stop(struct bfa_s *bfa);
126 static void bfa_lps_iocdisable(struct bfa_s *bfa);
127 static void bfa_lps_login_rsp(struct bfa_s *bfa,
128 				struct bfi_lps_login_rsp_s *rsp);
129 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
130 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
131 				struct bfi_lps_logout_rsp_s *rsp);
132 static void bfa_lps_reqq_resume(void *lps_arg);
133 static void bfa_lps_free(struct bfa_lps_s *lps);
134 static void bfa_lps_send_login(struct bfa_lps_s *lps);
135 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
136 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
137 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
138 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
139 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
140 
141 /*
142  * forward declaration for LPS state machine
143  */
144 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
145 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
146 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
147 					event);
148 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
149 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
150 					enum bfa_lps_event event);
151 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
152 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
153 					event);
154 
155 /*
156  * forward declaration for FC Port functions
157  */
158 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
159 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
160 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
161 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
162 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
163 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
164 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
165 			enum bfa_port_linkstate event, bfa_boolean_t trunk);
166 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
167 				enum bfa_port_linkstate event);
168 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
169 static void bfa_fcport_stats_get_timeout(void *cbarg);
170 static void bfa_fcport_stats_clr_timeout(void *cbarg);
171 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
172 
173 /*
174  * forward declaration for FC PORT state machine
175  */
176 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
177 					enum bfa_fcport_sm_event event);
178 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
179 					enum bfa_fcport_sm_event event);
180 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
181 					enum bfa_fcport_sm_event event);
182 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
183 					enum bfa_fcport_sm_event event);
184 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
185 					enum bfa_fcport_sm_event event);
186 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
187 					enum bfa_fcport_sm_event event);
188 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
189 					enum bfa_fcport_sm_event event);
190 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
191 					enum bfa_fcport_sm_event event);
192 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
193 					enum bfa_fcport_sm_event event);
194 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
195 					enum bfa_fcport_sm_event event);
196 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
197 					enum bfa_fcport_sm_event event);
198 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
199 					enum bfa_fcport_sm_event event);
200 
201 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
202 					enum bfa_fcport_ln_sm_event event);
203 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
204 					enum bfa_fcport_ln_sm_event event);
205 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
206 					enum bfa_fcport_ln_sm_event event);
207 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
208 					enum bfa_fcport_ln_sm_event event);
209 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
210 					enum bfa_fcport_ln_sm_event event);
211 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
212 					enum bfa_fcport_ln_sm_event event);
213 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
214 					enum bfa_fcport_ln_sm_event event);
215 
216 static struct bfa_sm_table_s hal_port_sm_table[] = {
217 	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
218 	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
219 	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
220 	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
221 	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
222 	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
223 	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
224 	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
225 	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
226 	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
227 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
228 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
229 };
230 
231 
232 /*
233  * forward declaration for RPORT related functions
234  */
235 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
236 static void		bfa_rport_free(struct bfa_rport_s *rport);
237 static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
238 static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
239 static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
240 static void		__bfa_cb_rport_online(void *cbarg,
241 						bfa_boolean_t complete);
242 static void		__bfa_cb_rport_offline(void *cbarg,
243 						bfa_boolean_t complete);
244 
245 /*
246  * forward declaration for RPORT state machine
247  */
248 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
249 					enum bfa_rport_event event);
250 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
251 					enum bfa_rport_event event);
252 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
253 					enum bfa_rport_event event);
254 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
255 					enum bfa_rport_event event);
256 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
257 					enum bfa_rport_event event);
258 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
259 					enum bfa_rport_event event);
260 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
261 					enum bfa_rport_event event);
262 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
263 					enum bfa_rport_event event);
264 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
265 					enum bfa_rport_event event);
266 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
267 					enum bfa_rport_event event);
268 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
269 					enum bfa_rport_event event);
270 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
271 					enum bfa_rport_event event);
272 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
273 					enum bfa_rport_event event);
274 
275 /*
276  * PLOG related definitions
277  */
278 static int
279 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
280 {
281 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
282 		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
283 		return 1;
284 
285 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
286 		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
287 		return 1;
288 
289 	return 0;
290 }
291 
292 static u64
293 bfa_get_log_time(void)
294 {
295 	u64 system_time = 0;
296 	struct timeval tv;
297 	do_gettimeofday(&tv);
298 
299 	/* We are interested in seconds only. */
300 	system_time = tv.tv_sec;
301 	return system_time;
302 }
303 
304 static void
305 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
306 {
307 	u16 tail;
308 	struct bfa_plog_rec_s *pl_recp;
309 
310 	if (plog->plog_enabled == 0)
311 		return;
312 
313 	if (plkd_validate_logrec(pl_rec)) {
314 		WARN_ON(1);
315 		return;
316 	}
317 
318 	tail = plog->tail;
319 
320 	pl_recp = &(plog->plog_recs[tail]);
321 
322 	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
323 
324 	pl_recp->tv = bfa_get_log_time();
325 	BFA_PL_LOG_REC_INCR(plog->tail);
326 
327 	if (plog->head == plog->tail)
328 		BFA_PL_LOG_REC_INCR(plog->head);
329 }
330 
331 void
332 bfa_plog_init(struct bfa_plog_s *plog)
333 {
334 	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
335 
336 	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
337 	plog->head = plog->tail = 0;
338 	plog->plog_enabled = 1;
339 }
340 
341 void
342 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
343 		enum bfa_plog_eid event,
344 		u16 misc, char *log_str)
345 {
346 	struct bfa_plog_rec_s  lp;
347 
348 	if (plog->plog_enabled) {
349 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
350 		lp.mid = mid;
351 		lp.eid = event;
352 		lp.log_type = BFA_PL_LOG_TYPE_STRING;
353 		lp.misc = misc;
354 		strncpy(lp.log_entry.string_log, log_str,
355 			BFA_PL_STRING_LOG_SZ - 1);
356 		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
357 		bfa_plog_add(plog, &lp);
358 	}
359 }
360 
361 void
362 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
363 		enum bfa_plog_eid event,
364 		u16 misc, u32 *intarr, u32 num_ints)
365 {
366 	struct bfa_plog_rec_s  lp;
367 	u32 i;
368 
369 	if (num_ints > BFA_PL_INT_LOG_SZ)
370 		num_ints = BFA_PL_INT_LOG_SZ;
371 
372 	if (plog->plog_enabled) {
373 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
374 		lp.mid = mid;
375 		lp.eid = event;
376 		lp.log_type = BFA_PL_LOG_TYPE_INT;
377 		lp.misc = misc;
378 
379 		for (i = 0; i < num_ints; i++)
380 			lp.log_entry.int_log[i] = intarr[i];
381 
382 		lp.log_num_ints = (u8) num_ints;
383 
384 		bfa_plog_add(plog, &lp);
385 	}
386 }
387 
388 void
389 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
390 			enum bfa_plog_eid event,
391 			u16 misc, struct fchs_s *fchdr)
392 {
393 	struct bfa_plog_rec_s  lp;
394 	u32	*tmp_int = (u32 *) fchdr;
395 	u32	ints[BFA_PL_INT_LOG_SZ];
396 
397 	if (plog->plog_enabled) {
398 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
399 
400 		ints[0] = tmp_int[0];
401 		ints[1] = tmp_int[1];
402 		ints[2] = tmp_int[4];
403 
404 		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
405 	}
406 }
407 
408 void
409 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
410 		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
411 		      u32 pld_w0)
412 {
413 	struct bfa_plog_rec_s  lp;
414 	u32	*tmp_int = (u32 *) fchdr;
415 	u32	ints[BFA_PL_INT_LOG_SZ];
416 
417 	if (plog->plog_enabled) {
418 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
419 
420 		ints[0] = tmp_int[0];
421 		ints[1] = tmp_int[1];
422 		ints[2] = tmp_int[4];
423 		ints[3] = pld_w0;
424 
425 		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
426 	}
427 }
428 
429 
430 /*
431  *  fcxp_pvt BFA FCXP private functions
432  */
433 
434 static void
435 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
436 {
437 	u16	i;
438 	struct bfa_fcxp_s *fcxp;
439 
440 	fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
441 	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
442 
443 	INIT_LIST_HEAD(&mod->fcxp_free_q);
444 	INIT_LIST_HEAD(&mod->fcxp_active_q);
445 	INIT_LIST_HEAD(&mod->fcxp_unused_q);
446 
447 	mod->fcxp_list = fcxp;
448 
449 	for (i = 0; i < mod->num_fcxps; i++) {
450 		fcxp->fcxp_mod = mod;
451 		fcxp->fcxp_tag = i;
452 
453 		list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
454 		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
455 		fcxp->reqq_waiting = BFA_FALSE;
456 
457 		fcxp = fcxp + 1;
458 	}
459 
460 	bfa_mem_kva_curp(mod) = (void *)fcxp;
461 }
462 
463 static void
464 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
465 		struct bfa_s *bfa)
466 {
467 	struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
468 	struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
469 	struct bfa_mem_dma_s *seg_ptr;
470 	u16	nsegs, idx, per_seg_fcxp;
471 	u16	num_fcxps = cfg->fwcfg.num_fcxp_reqs;
472 	u32	per_fcxp_sz;
473 
474 	if (num_fcxps == 0)
475 		return;
476 
477 	if (cfg->drvcfg.min_cfg)
478 		per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
479 	else
480 		per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
481 
482 	/* dma memory */
483 	nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
484 	per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
485 
486 	bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
487 		if (num_fcxps >= per_seg_fcxp) {
488 			num_fcxps -= per_seg_fcxp;
489 			bfa_mem_dma_setup(minfo, seg_ptr,
490 				per_seg_fcxp * per_fcxp_sz);
491 		} else
492 			bfa_mem_dma_setup(minfo, seg_ptr,
493 				num_fcxps * per_fcxp_sz);
494 	}
495 
496 	/* kva memory */
497 	bfa_mem_kva_setup(minfo, fcxp_kva,
498 		cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
499 }
500 
501 static void
502 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
503 		struct bfa_pcidev_s *pcidev)
504 {
505 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
506 
507 	mod->bfa = bfa;
508 	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
509 
510 	/*
511 	 * Initialize FCXP request and response payload sizes.
512 	 */
513 	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
514 	if (!cfg->drvcfg.min_cfg)
515 		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
516 
517 	INIT_LIST_HEAD(&mod->wait_q);
518 
519 	claim_fcxps_mem(mod);
520 }
521 
522 static void
523 bfa_fcxp_detach(struct bfa_s *bfa)
524 {
525 }
526 
527 static void
528 bfa_fcxp_start(struct bfa_s *bfa)
529 {
530 }
531 
532 static void
533 bfa_fcxp_stop(struct bfa_s *bfa)
534 {
535 }
536 
537 static void
538 bfa_fcxp_iocdisable(struct bfa_s *bfa)
539 {
540 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
541 	struct bfa_fcxp_s *fcxp;
542 	struct list_head	      *qe, *qen;
543 
544 	/* Enqueue unused fcxp resources to free_q */
545 	list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
546 
547 	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
548 		fcxp = (struct bfa_fcxp_s *) qe;
549 		if (fcxp->caller == NULL) {
550 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
551 					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
552 			bfa_fcxp_free(fcxp);
553 		} else {
554 			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
555 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
556 				     __bfa_fcxp_send_cbfn, fcxp);
557 		}
558 	}
559 }
560 
561 static struct bfa_fcxp_s *
562 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
563 {
564 	struct bfa_fcxp_s *fcxp;
565 
566 	bfa_q_deq(&fm->fcxp_free_q, &fcxp);
567 
568 	if (fcxp)
569 		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
570 
571 	return fcxp;
572 }
573 
574 static void
575 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
576 	       struct bfa_s *bfa,
577 	       u8 *use_ibuf,
578 	       u32 *nr_sgles,
579 	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
580 	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
581 	       struct list_head *r_sgpg_q,
582 	       int n_sgles,
583 	       bfa_fcxp_get_sgaddr_t sga_cbfn,
584 	       bfa_fcxp_get_sglen_t sglen_cbfn)
585 {
586 
587 	WARN_ON(bfa == NULL);
588 
589 	bfa_trc(bfa, fcxp->fcxp_tag);
590 
591 	if (n_sgles == 0) {
592 		*use_ibuf = 1;
593 	} else {
594 		WARN_ON(*sga_cbfn == NULL);
595 		WARN_ON(*sglen_cbfn == NULL);
596 
597 		*use_ibuf = 0;
598 		*r_sga_cbfn = sga_cbfn;
599 		*r_sglen_cbfn = sglen_cbfn;
600 
601 		*nr_sgles = n_sgles;
602 
603 		/*
604 		 * alloc required sgpgs
605 		 */
606 		if (n_sgles > BFI_SGE_INLINE)
607 			WARN_ON(1);
608 	}
609 
610 }
611 
612 static void
613 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
614 	       void *caller, struct bfa_s *bfa, int nreq_sgles,
615 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
616 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
617 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
618 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
619 {
620 
621 	WARN_ON(bfa == NULL);
622 
623 	bfa_trc(bfa, fcxp->fcxp_tag);
624 
625 	fcxp->caller = caller;
626 
627 	bfa_fcxp_init_reqrsp(fcxp, bfa,
628 		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
629 		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
630 		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
631 
632 	bfa_fcxp_init_reqrsp(fcxp, bfa,
633 		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
634 		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
635 		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
636 
637 }
638 
639 static void
640 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
641 {
642 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
643 	struct bfa_fcxp_wqe_s *wqe;
644 
645 	bfa_q_deq(&mod->wait_q, &wqe);
646 	if (wqe) {
647 		bfa_trc(mod->bfa, fcxp->fcxp_tag);
648 
649 		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
650 			wqe->nrsp_sgles, wqe->req_sga_cbfn,
651 			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
652 			wqe->rsp_sglen_cbfn);
653 
654 		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
655 		return;
656 	}
657 
658 	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
659 	list_del(&fcxp->qe);
660 	list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
661 }
662 
663 static void
664 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
665 		   bfa_status_t req_status, u32 rsp_len,
666 		   u32 resid_len, struct fchs_s *rsp_fchs)
667 {
668 	/* discarded fcxp completion */
669 }
670 
671 static void
672 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
673 {
674 	struct bfa_fcxp_s *fcxp = cbarg;
675 
676 	if (complete) {
677 		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
678 				fcxp->rsp_status, fcxp->rsp_len,
679 				fcxp->residue_len, &fcxp->rsp_fchs);
680 	} else {
681 		bfa_fcxp_free(fcxp);
682 	}
683 }
684 
685 static void
686 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
687 {
688 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
689 	struct bfa_fcxp_s	*fcxp;
690 	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
691 
692 	bfa_trc(bfa, fcxp_tag);
693 
694 	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
695 
696 	/*
697 	 * @todo f/w should not set residue to non-0 when everything
698 	 *	 is received.
699 	 */
700 	if (fcxp_rsp->req_status == BFA_STATUS_OK)
701 		fcxp_rsp->residue_len = 0;
702 	else
703 		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
704 
705 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
706 
707 	WARN_ON(fcxp->send_cbfn == NULL);
708 
709 	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
710 
711 	if (fcxp->send_cbfn != NULL) {
712 		bfa_trc(mod->bfa, (NULL == fcxp->caller));
713 		if (fcxp->caller == NULL) {
714 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
715 					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
716 					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
717 			/*
718 			 * fcxp automatically freed on return from the callback
719 			 */
720 			bfa_fcxp_free(fcxp);
721 		} else {
722 			fcxp->rsp_status = fcxp_rsp->req_status;
723 			fcxp->rsp_len = fcxp_rsp->rsp_len;
724 			fcxp->residue_len = fcxp_rsp->residue_len;
725 			fcxp->rsp_fchs = fcxp_rsp->fchs;
726 
727 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
728 					__bfa_fcxp_send_cbfn, fcxp);
729 		}
730 	} else {
731 		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
732 	}
733 }
734 
735 static void
736 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
737 		 struct fchs_s *fchs)
738 {
739 	/*
740 	 * TODO: TX ox_id
741 	 */
742 	if (reqlen > 0) {
743 		if (fcxp->use_ireqbuf) {
744 			u32	pld_w0 =
745 				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
746 
747 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
748 					BFA_PL_EID_TX,
749 					reqlen + sizeof(struct fchs_s), fchs,
750 					pld_w0);
751 		} else {
752 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
753 					BFA_PL_EID_TX,
754 					reqlen + sizeof(struct fchs_s),
755 					fchs);
756 		}
757 	} else {
758 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
759 			       reqlen + sizeof(struct fchs_s), fchs);
760 	}
761 }
762 
763 static void
764 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
765 		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
766 {
767 	if (fcxp_rsp->rsp_len > 0) {
768 		if (fcxp->use_irspbuf) {
769 			u32	pld_w0 =
770 				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
771 
772 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
773 					      BFA_PL_EID_RX,
774 					      (u16) fcxp_rsp->rsp_len,
775 					      &fcxp_rsp->fchs, pld_w0);
776 		} else {
777 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
778 				       BFA_PL_EID_RX,
779 				       (u16) fcxp_rsp->rsp_len,
780 				       &fcxp_rsp->fchs);
781 		}
782 	} else {
783 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
784 			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
785 	}
786 }
787 
788 /*
789  * Handler to resume sending fcxp when space in available in cpe queue.
790  */
791 static void
792 bfa_fcxp_qresume(void *cbarg)
793 {
794 	struct bfa_fcxp_s		*fcxp = cbarg;
795 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
796 	struct bfi_fcxp_send_req_s	*send_req;
797 
798 	fcxp->reqq_waiting = BFA_FALSE;
799 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
800 	bfa_fcxp_queue(fcxp, send_req);
801 }
802 
803 /*
804  * Queue fcxp send request to foimrware.
805  */
806 static void
807 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
808 {
809 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
810 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
811 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
812 	struct bfa_rport_s		*rport = reqi->bfa_rport;
813 
814 	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
815 		    bfa_fn_lpu(bfa));
816 
817 	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
818 	if (rport) {
819 		send_req->rport_fw_hndl = rport->fw_handle;
820 		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
821 		if (send_req->max_frmsz == 0)
822 			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
823 	} else {
824 		send_req->rport_fw_hndl = 0;
825 		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
826 	}
827 
828 	send_req->vf_id = cpu_to_be16(reqi->vf_id);
829 	send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
830 	send_req->class = reqi->class;
831 	send_req->rsp_timeout = rspi->rsp_timeout;
832 	send_req->cts = reqi->cts;
833 	send_req->fchs = reqi->fchs;
834 
835 	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
836 	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
837 
838 	/*
839 	 * setup req sgles
840 	 */
841 	if (fcxp->use_ireqbuf == 1) {
842 		bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
843 					BFA_FCXP_REQ_PLD_PA(fcxp));
844 	} else {
845 		if (fcxp->nreq_sgles > 0) {
846 			WARN_ON(fcxp->nreq_sgles != 1);
847 			bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
848 				fcxp->req_sga_cbfn(fcxp->caller, 0));
849 		} else {
850 			WARN_ON(reqi->req_tot_len != 0);
851 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
852 		}
853 	}
854 
855 	/*
856 	 * setup rsp sgles
857 	 */
858 	if (fcxp->use_irspbuf == 1) {
859 		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
860 
861 		bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
862 					BFA_FCXP_RSP_PLD_PA(fcxp));
863 	} else {
864 		if (fcxp->nrsp_sgles > 0) {
865 			WARN_ON(fcxp->nrsp_sgles != 1);
866 			bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
867 				fcxp->rsp_sga_cbfn(fcxp->caller, 0));
868 
869 		} else {
870 			WARN_ON(rspi->rsp_maxlen != 0);
871 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
872 		}
873 	}
874 
875 	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
876 
877 	bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
878 
879 	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
880 	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
881 }
882 
883 /*
884  * Allocate an FCXP instance to send a response or to send a request
885  * that has a response. Request/response buffers are allocated by caller.
886  *
887  * @param[in]	bfa		BFA bfa instance
888  * @param[in]	nreq_sgles	Number of SG elements required for request
889  *				buffer. 0, if fcxp internal buffers are	used.
890  *				Use bfa_fcxp_get_reqbuf() to get the
891  *				internal req buffer.
892  * @param[in]	req_sgles	SG elements describing request buffer. Will be
893  *				copied in by BFA and hence can be freed on
894  *				return from this function.
895  * @param[in]	get_req_sga	function ptr to be called to get a request SG
896  *				Address (given the sge index).
897  * @param[in]	get_req_sglen	function ptr to be called to get a request SG
898  *				len (given the sge index).
899  * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
900  *				Address (given the sge index).
901  * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
902  *				len (given the sge index).
903  *
904  * @return FCXP instance. NULL on failure.
905  */
906 struct bfa_fcxp_s *
907 bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
908 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
909 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
910 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
911 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
912 {
913 	struct bfa_fcxp_s *fcxp = NULL;
914 
915 	WARN_ON(bfa == NULL);
916 
917 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
918 	if (fcxp == NULL)
919 		return NULL;
920 
921 	bfa_trc(bfa, fcxp->fcxp_tag);
922 
923 	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
924 			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
925 
926 	return fcxp;
927 }
928 
929 /*
930  * Get the internal request buffer pointer
931  *
932  * @param[in]	fcxp	BFA fcxp pointer
933  *
934  * @return		pointer to the internal request buffer
935  */
936 void *
937 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
938 {
939 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
940 	void	*reqbuf;
941 
942 	WARN_ON(fcxp->use_ireqbuf != 1);
943 	reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
944 				mod->req_pld_sz + mod->rsp_pld_sz);
945 	return reqbuf;
946 }
947 
948 u32
949 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
950 {
951 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
952 
953 	return mod->req_pld_sz;
954 }
955 
956 /*
957  * Get the internal response buffer pointer
958  *
959  * @param[in]	fcxp	BFA fcxp pointer
960  *
961  * @return		pointer to the internal request buffer
962  */
963 void *
964 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
965 {
966 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
967 	void	*fcxp_buf;
968 
969 	WARN_ON(fcxp->use_irspbuf != 1);
970 
971 	fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
972 				mod->req_pld_sz + mod->rsp_pld_sz);
973 
974 	/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
975 	return ((u8 *) fcxp_buf) + mod->req_pld_sz;
976 }
977 
978 /*
979  * Free the BFA FCXP
980  *
981  * @param[in]	fcxp			BFA fcxp pointer
982  *
983  * @return		void
984  */
985 void
986 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
987 {
988 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
989 
990 	WARN_ON(fcxp == NULL);
991 	bfa_trc(mod->bfa, fcxp->fcxp_tag);
992 	bfa_fcxp_put(fcxp);
993 }
994 
995 /*
996  * Send a FCXP request
997  *
998  * @param[in]	fcxp	BFA fcxp pointer
999  * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
1000  * @param[in]	vf_id	virtual Fabric ID
1001  * @param[in]	lp_tag	lport tag
1002  * @param[in]	cts	use Continuous sequence
1003  * @param[in]	cos	fc Class of Service
1004  * @param[in]	reqlen	request length, does not include FCHS length
1005  * @param[in]	fchs	fc Header Pointer. The header content will be copied
1006  *			in by BFA.
1007  *
1008  * @param[in]	cbfn	call back function to be called on receiving
1009  *								the response
1010  * @param[in]	cbarg	arg for cbfn
1011  * @param[in]	rsp_timeout
1012  *			response timeout
1013  *
1014  * @return		bfa_status_t
1015  */
1016 void
1017 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1018 	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1019 	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1020 	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1021 {
1022 	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
1023 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
1024 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
1025 	struct bfi_fcxp_send_req_s	*send_req;
1026 
1027 	bfa_trc(bfa, fcxp->fcxp_tag);
1028 
1029 	/*
1030 	 * setup request/response info
1031 	 */
1032 	reqi->bfa_rport = rport;
1033 	reqi->vf_id = vf_id;
1034 	reqi->lp_tag = lp_tag;
1035 	reqi->class = cos;
1036 	rspi->rsp_timeout = rsp_timeout;
1037 	reqi->cts = cts;
1038 	reqi->fchs = *fchs;
1039 	reqi->req_tot_len = reqlen;
1040 	rspi->rsp_maxlen = rsp_maxlen;
1041 	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1042 	fcxp->send_cbarg = cbarg;
1043 
1044 	/*
1045 	 * If no room in CPE queue, wait for space in request queue
1046 	 */
1047 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1048 	if (!send_req) {
1049 		bfa_trc(bfa, fcxp->fcxp_tag);
1050 		fcxp->reqq_waiting = BFA_TRUE;
1051 		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1052 		return;
1053 	}
1054 
1055 	bfa_fcxp_queue(fcxp, send_req);
1056 }
1057 
1058 /*
1059  * Abort a BFA FCXP
1060  *
1061  * @param[in]	fcxp	BFA fcxp pointer
1062  *
1063  * @return		void
1064  */
1065 bfa_status_t
1066 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1067 {
1068 	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1069 	WARN_ON(1);
1070 	return BFA_STATUS_OK;
1071 }
1072 
1073 void
1074 bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1075 	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1076 	       void *caller, int nreq_sgles,
1077 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1078 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
1079 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1080 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1081 {
1082 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1083 
1084 	WARN_ON(!list_empty(&mod->fcxp_free_q));
1085 
1086 	wqe->alloc_cbfn = alloc_cbfn;
1087 	wqe->alloc_cbarg = alloc_cbarg;
1088 	wqe->caller = caller;
1089 	wqe->bfa = bfa;
1090 	wqe->nreq_sgles = nreq_sgles;
1091 	wqe->nrsp_sgles = nrsp_sgles;
1092 	wqe->req_sga_cbfn = req_sga_cbfn;
1093 	wqe->req_sglen_cbfn = req_sglen_cbfn;
1094 	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1095 	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1096 
1097 	list_add_tail(&wqe->qe, &mod->wait_q);
1098 }
1099 
1100 void
1101 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1102 {
1103 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1104 
1105 	WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
1106 	list_del(&wqe->qe);
1107 }
1108 
1109 void
1110 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1111 {
1112 	/*
1113 	 * If waiting for room in request queue, cancel reqq wait
1114 	 * and free fcxp.
1115 	 */
1116 	if (fcxp->reqq_waiting) {
1117 		fcxp->reqq_waiting = BFA_FALSE;
1118 		bfa_reqq_wcancel(&fcxp->reqq_wqe);
1119 		bfa_fcxp_free(fcxp);
1120 		return;
1121 	}
1122 
1123 	fcxp->send_cbfn = bfa_fcxp_null_comp;
1124 }
1125 
1126 void
1127 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1128 {
1129 	switch (msg->mhdr.msg_id) {
1130 	case BFI_FCXP_I2H_SEND_RSP:
1131 		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1132 		break;
1133 
1134 	default:
1135 		bfa_trc(bfa, msg->mhdr.msg_id);
1136 		WARN_ON(1);
1137 	}
1138 }
1139 
1140 u32
1141 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1142 {
1143 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1144 
1145 	return mod->rsp_pld_sz;
1146 }
1147 
1148 void
1149 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1150 {
1151 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
1152 	struct list_head	*qe;
1153 	int	i;
1154 
1155 	for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1156 		bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
1157 		list_add_tail(qe, &mod->fcxp_unused_q);
1158 	}
1159 }
1160 
1161 /*
1162  *  BFA LPS state machine functions
1163  */
1164 
1165 /*
1166  * Init state -- no login
1167  */
1168 static void
1169 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1170 {
1171 	bfa_trc(lps->bfa, lps->bfa_tag);
1172 	bfa_trc(lps->bfa, event);
1173 
1174 	switch (event) {
1175 	case BFA_LPS_SM_LOGIN:
1176 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1177 			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1178 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1179 		} else {
1180 			bfa_sm_set_state(lps, bfa_lps_sm_login);
1181 			bfa_lps_send_login(lps);
1182 		}
1183 
1184 		if (lps->fdisc)
1185 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1186 				BFA_PL_EID_LOGIN, 0, "FDISC Request");
1187 		else
1188 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1189 				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1190 		break;
1191 
1192 	case BFA_LPS_SM_LOGOUT:
1193 		bfa_lps_logout_comp(lps);
1194 		break;
1195 
1196 	case BFA_LPS_SM_DELETE:
1197 		bfa_lps_free(lps);
1198 		break;
1199 
1200 	case BFA_LPS_SM_RX_CVL:
1201 	case BFA_LPS_SM_OFFLINE:
1202 		break;
1203 
1204 	case BFA_LPS_SM_FWRSP:
1205 		/*
1206 		 * Could happen when fabric detects loopback and discards
1207 		 * the lps request. Fw will eventually sent out the timeout
1208 		 * Just ignore
1209 		 */
1210 		break;
1211 
1212 	default:
1213 		bfa_sm_fault(lps->bfa, event);
1214 	}
1215 }
1216 
1217 /*
1218  * login is in progress -- awaiting response from firmware
1219  */
1220 static void
1221 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1222 {
1223 	bfa_trc(lps->bfa, lps->bfa_tag);
1224 	bfa_trc(lps->bfa, event);
1225 
1226 	switch (event) {
1227 	case BFA_LPS_SM_FWRSP:
1228 		if (lps->status == BFA_STATUS_OK) {
1229 			bfa_sm_set_state(lps, bfa_lps_sm_online);
1230 			if (lps->fdisc)
1231 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1232 					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1233 			else
1234 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1235 					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1236 			/* If N2N, send the assigned PID to FW */
1237 			bfa_trc(lps->bfa, lps->fport);
1238 			bfa_trc(lps->bfa, lps->lp_pid);
1239 
1240 			if (!lps->fport && lps->lp_pid)
1241 				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1242 		} else {
1243 			bfa_sm_set_state(lps, bfa_lps_sm_init);
1244 			if (lps->fdisc)
1245 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1246 					BFA_PL_EID_LOGIN, 0,
1247 					"FDISC Fail (RJT or timeout)");
1248 			else
1249 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1250 					BFA_PL_EID_LOGIN, 0,
1251 					"FLOGI Fail (RJT or timeout)");
1252 		}
1253 		bfa_lps_login_comp(lps);
1254 		break;
1255 
1256 	case BFA_LPS_SM_OFFLINE:
1257 	case BFA_LPS_SM_DELETE:
1258 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1259 		break;
1260 
1261 	case BFA_LPS_SM_SET_N2N_PID:
1262 		bfa_trc(lps->bfa, lps->fport);
1263 		bfa_trc(lps->bfa, lps->lp_pid);
1264 		break;
1265 
1266 	default:
1267 		bfa_sm_fault(lps->bfa, event);
1268 	}
1269 }
1270 
1271 /*
1272  * login pending - awaiting space in request queue
1273  */
1274 static void
1275 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1276 {
1277 	bfa_trc(lps->bfa, lps->bfa_tag);
1278 	bfa_trc(lps->bfa, event);
1279 
1280 	switch (event) {
1281 	case BFA_LPS_SM_RESUME:
1282 		bfa_sm_set_state(lps, bfa_lps_sm_login);
1283 		break;
1284 
1285 	case BFA_LPS_SM_OFFLINE:
1286 	case BFA_LPS_SM_DELETE:
1287 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1288 		bfa_reqq_wcancel(&lps->wqe);
1289 		break;
1290 
1291 	case BFA_LPS_SM_RX_CVL:
1292 		/*
1293 		 * Login was not even sent out; so when getting out
1294 		 * of this state, it will appear like a login retry
1295 		 * after Clear virtual link
1296 		 */
1297 		break;
1298 
1299 	default:
1300 		bfa_sm_fault(lps->bfa, event);
1301 	}
1302 }
1303 
1304 /*
1305  * login complete
1306  */
1307 static void
1308 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1309 {
1310 	bfa_trc(lps->bfa, lps->bfa_tag);
1311 	bfa_trc(lps->bfa, event);
1312 
1313 	switch (event) {
1314 	case BFA_LPS_SM_LOGOUT:
1315 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1316 			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1317 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1318 		} else {
1319 			bfa_sm_set_state(lps, bfa_lps_sm_logout);
1320 			bfa_lps_send_logout(lps);
1321 		}
1322 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1323 			BFA_PL_EID_LOGO, 0, "Logout");
1324 		break;
1325 
1326 	case BFA_LPS_SM_RX_CVL:
1327 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1328 
1329 		/* Let the vport module know about this event */
1330 		bfa_lps_cvl_event(lps);
1331 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1332 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1333 		break;
1334 
1335 	case BFA_LPS_SM_SET_N2N_PID:
1336 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1337 			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1338 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1339 		} else
1340 			bfa_lps_send_set_n2n_pid(lps);
1341 		break;
1342 
1343 	case BFA_LPS_SM_OFFLINE:
1344 	case BFA_LPS_SM_DELETE:
1345 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1346 		break;
1347 
1348 	default:
1349 		bfa_sm_fault(lps->bfa, event);
1350 	}
1351 }
1352 
1353 /*
1354  * login complete
1355  */
1356 static void
1357 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1358 {
1359 	bfa_trc(lps->bfa, lps->bfa_tag);
1360 	bfa_trc(lps->bfa, event);
1361 
1362 	switch (event) {
1363 	case BFA_LPS_SM_RESUME:
1364 		bfa_sm_set_state(lps, bfa_lps_sm_online);
1365 		bfa_lps_send_set_n2n_pid(lps);
1366 		break;
1367 
1368 	case BFA_LPS_SM_LOGOUT:
1369 		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1370 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1371 			BFA_PL_EID_LOGO, 0, "Logout");
1372 		break;
1373 
1374 	case BFA_LPS_SM_RX_CVL:
1375 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1376 		bfa_reqq_wcancel(&lps->wqe);
1377 
1378 		/* Let the vport module know about this event */
1379 		bfa_lps_cvl_event(lps);
1380 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1381 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1382 		break;
1383 
1384 	case BFA_LPS_SM_OFFLINE:
1385 	case BFA_LPS_SM_DELETE:
1386 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1387 		bfa_reqq_wcancel(&lps->wqe);
1388 		break;
1389 
1390 	default:
1391 		bfa_sm_fault(lps->bfa, event);
1392 	}
1393 }
1394 
1395 /*
1396  * logout in progress - awaiting firmware response
1397  */
1398 static void
1399 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1400 {
1401 	bfa_trc(lps->bfa, lps->bfa_tag);
1402 	bfa_trc(lps->bfa, event);
1403 
1404 	switch (event) {
1405 	case BFA_LPS_SM_FWRSP:
1406 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1407 		bfa_lps_logout_comp(lps);
1408 		break;
1409 
1410 	case BFA_LPS_SM_OFFLINE:
1411 	case BFA_LPS_SM_DELETE:
1412 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1413 		break;
1414 
1415 	default:
1416 		bfa_sm_fault(lps->bfa, event);
1417 	}
1418 }
1419 
1420 /*
1421  * logout pending -- awaiting space in request queue
1422  */
1423 static void
1424 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1425 {
1426 	bfa_trc(lps->bfa, lps->bfa_tag);
1427 	bfa_trc(lps->bfa, event);
1428 
1429 	switch (event) {
1430 	case BFA_LPS_SM_RESUME:
1431 		bfa_sm_set_state(lps, bfa_lps_sm_logout);
1432 		bfa_lps_send_logout(lps);
1433 		break;
1434 
1435 	case BFA_LPS_SM_OFFLINE:
1436 	case BFA_LPS_SM_DELETE:
1437 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1438 		bfa_reqq_wcancel(&lps->wqe);
1439 		break;
1440 
1441 	default:
1442 		bfa_sm_fault(lps->bfa, event);
1443 	}
1444 }
1445 
1446 
1447 
1448 /*
1449  *  lps_pvt BFA LPS private functions
1450  */
1451 
1452 /*
1453  * return memory requirement
1454  */
1455 static void
1456 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1457 		struct bfa_s *bfa)
1458 {
1459 	struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1460 
1461 	if (cfg->drvcfg.min_cfg)
1462 		bfa_mem_kva_setup(minfo, lps_kva,
1463 			sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1464 	else
1465 		bfa_mem_kva_setup(minfo, lps_kva,
1466 			sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1467 }
1468 
1469 /*
1470  * bfa module attach at initialization time
1471  */
1472 static void
1473 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1474 	struct bfa_pcidev_s *pcidev)
1475 {
1476 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1477 	struct bfa_lps_s	*lps;
1478 	int			i;
1479 
1480 	mod->num_lps = BFA_LPS_MAX_LPORTS;
1481 	if (cfg->drvcfg.min_cfg)
1482 		mod->num_lps = BFA_LPS_MIN_LPORTS;
1483 	else
1484 		mod->num_lps = BFA_LPS_MAX_LPORTS;
1485 	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1486 
1487 	bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1488 
1489 	INIT_LIST_HEAD(&mod->lps_free_q);
1490 	INIT_LIST_HEAD(&mod->lps_active_q);
1491 	INIT_LIST_HEAD(&mod->lps_login_q);
1492 
1493 	for (i = 0; i < mod->num_lps; i++, lps++) {
1494 		lps->bfa	= bfa;
1495 		lps->bfa_tag	= (u8) i;
1496 		lps->reqq	= BFA_REQQ_LPS;
1497 		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1498 		list_add_tail(&lps->qe, &mod->lps_free_q);
1499 	}
1500 }
1501 
1502 static void
1503 bfa_lps_detach(struct bfa_s *bfa)
1504 {
1505 }
1506 
1507 static void
1508 bfa_lps_start(struct bfa_s *bfa)
1509 {
1510 }
1511 
1512 static void
1513 bfa_lps_stop(struct bfa_s *bfa)
1514 {
1515 }
1516 
1517 /*
1518  * IOC in disabled state -- consider all lps offline
1519  */
1520 static void
1521 bfa_lps_iocdisable(struct bfa_s *bfa)
1522 {
1523 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1524 	struct bfa_lps_s	*lps;
1525 	struct list_head		*qe, *qen;
1526 
1527 	list_for_each_safe(qe, qen, &mod->lps_active_q) {
1528 		lps = (struct bfa_lps_s *) qe;
1529 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1530 	}
1531 	list_for_each_safe(qe, qen, &mod->lps_login_q) {
1532 		lps = (struct bfa_lps_s *) qe;
1533 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1534 	}
1535 	list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1536 }
1537 
1538 /*
1539  * Firmware login response
1540  */
1541 static void
1542 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1543 {
1544 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1545 	struct bfa_lps_s	*lps;
1546 
1547 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1548 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1549 
1550 	lps->status = rsp->status;
1551 	switch (rsp->status) {
1552 	case BFA_STATUS_OK:
1553 		lps->fw_tag	= rsp->fw_tag;
1554 		lps->fport	= rsp->f_port;
1555 		if (lps->fport)
1556 			lps->lp_pid = rsp->lp_pid;
1557 		lps->npiv_en	= rsp->npiv_en;
1558 		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
1559 		lps->pr_pwwn	= rsp->port_name;
1560 		lps->pr_nwwn	= rsp->node_name;
1561 		lps->auth_req	= rsp->auth_req;
1562 		lps->lp_mac	= rsp->lp_mac;
1563 		lps->brcd_switch = rsp->brcd_switch;
1564 		lps->fcf_mac	= rsp->fcf_mac;
1565 		lps->pr_bbscn	= rsp->bb_scn;
1566 
1567 		break;
1568 
1569 	case BFA_STATUS_FABRIC_RJT:
1570 		lps->lsrjt_rsn = rsp->lsrjt_rsn;
1571 		lps->lsrjt_expl = rsp->lsrjt_expl;
1572 
1573 		break;
1574 
1575 	case BFA_STATUS_EPROTOCOL:
1576 		lps->ext_status = rsp->ext_status;
1577 
1578 		break;
1579 
1580 	case BFA_STATUS_VPORT_MAX:
1581 		if (!rsp->ext_status)
1582 			bfa_lps_no_res(lps, rsp->ext_status);
1583 		break;
1584 
1585 	default:
1586 		/* Nothing to do with other status */
1587 		break;
1588 	}
1589 
1590 	list_del(&lps->qe);
1591 	list_add_tail(&lps->qe, &mod->lps_active_q);
1592 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1593 }
1594 
1595 static void
1596 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1597 {
1598 	struct bfa_s		*bfa = first_lps->bfa;
1599 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1600 	struct list_head	*qe, *qe_next;
1601 	struct bfa_lps_s	*lps;
1602 
1603 	bfa_trc(bfa, count);
1604 
1605 	qe = bfa_q_next(first_lps);
1606 
1607 	while (count && qe) {
1608 		qe_next = bfa_q_next(qe);
1609 		lps = (struct bfa_lps_s *)qe;
1610 		bfa_trc(bfa, lps->bfa_tag);
1611 		lps->status = first_lps->status;
1612 		list_del(&lps->qe);
1613 		list_add_tail(&lps->qe, &mod->lps_active_q);
1614 		bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1615 		qe = qe_next;
1616 		count--;
1617 	}
1618 }
1619 
1620 /*
1621  * Firmware logout response
1622  */
1623 static void
1624 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1625 {
1626 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1627 	struct bfa_lps_s	*lps;
1628 
1629 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1630 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1631 
1632 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1633 }
1634 
1635 /*
1636  * Firmware received a Clear virtual link request (for FCoE)
1637  */
1638 static void
1639 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1640 {
1641 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1642 	struct bfa_lps_s	*lps;
1643 
1644 	lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1645 
1646 	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1647 }
1648 
1649 /*
1650  * Space is available in request queue, resume queueing request to firmware.
1651  */
1652 static void
1653 bfa_lps_reqq_resume(void *lps_arg)
1654 {
1655 	struct bfa_lps_s	*lps = lps_arg;
1656 
1657 	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1658 }
1659 
1660 /*
1661  * lps is freed -- triggered by vport delete
1662  */
1663 static void
1664 bfa_lps_free(struct bfa_lps_s *lps)
1665 {
1666 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1667 
1668 	lps->lp_pid = 0;
1669 	list_del(&lps->qe);
1670 	list_add_tail(&lps->qe, &mod->lps_free_q);
1671 }
1672 
1673 /*
1674  * send login request to firmware
1675  */
1676 static void
1677 bfa_lps_send_login(struct bfa_lps_s *lps)
1678 {
1679 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1680 	struct bfi_lps_login_req_s	*m;
1681 
1682 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1683 	WARN_ON(!m);
1684 
1685 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1686 		bfa_fn_lpu(lps->bfa));
1687 
1688 	m->bfa_tag	= lps->bfa_tag;
1689 	m->alpa		= lps->alpa;
1690 	m->pdu_size	= cpu_to_be16(lps->pdusz);
1691 	m->pwwn		= lps->pwwn;
1692 	m->nwwn		= lps->nwwn;
1693 	m->fdisc	= lps->fdisc;
1694 	m->auth_en	= lps->auth_en;
1695 	m->bb_scn	= lps->bb_scn;
1696 
1697 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1698 	list_del(&lps->qe);
1699 	list_add_tail(&lps->qe, &mod->lps_login_q);
1700 }
1701 
1702 /*
1703  * send logout request to firmware
1704  */
1705 static void
1706 bfa_lps_send_logout(struct bfa_lps_s *lps)
1707 {
1708 	struct bfi_lps_logout_req_s *m;
1709 
1710 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1711 	WARN_ON(!m);
1712 
1713 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1714 		bfa_fn_lpu(lps->bfa));
1715 
1716 	m->fw_tag = lps->fw_tag;
1717 	m->port_name = lps->pwwn;
1718 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1719 }
1720 
1721 /*
1722  * send n2n pid set request to firmware
1723  */
1724 static void
1725 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1726 {
1727 	struct bfi_lps_n2n_pid_req_s *m;
1728 
1729 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1730 	WARN_ON(!m);
1731 
1732 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1733 		bfa_fn_lpu(lps->bfa));
1734 
1735 	m->fw_tag = lps->fw_tag;
1736 	m->lp_pid = lps->lp_pid;
1737 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1738 }
1739 
1740 /*
1741  * Indirect login completion handler for non-fcs
1742  */
1743 static void
1744 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1745 {
1746 	struct bfa_lps_s *lps	= arg;
1747 
1748 	if (!complete)
1749 		return;
1750 
1751 	if (lps->fdisc)
1752 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1753 	else
1754 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1755 }
1756 
1757 /*
1758  * Login completion handler -- direct call for fcs, queue for others
1759  */
1760 static void
1761 bfa_lps_login_comp(struct bfa_lps_s *lps)
1762 {
1763 	if (!lps->bfa->fcs) {
1764 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1765 			lps);
1766 		return;
1767 	}
1768 
1769 	if (lps->fdisc)
1770 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1771 	else
1772 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1773 }
1774 
1775 /*
1776  * Indirect logout completion handler for non-fcs
1777  */
1778 static void
1779 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1780 {
1781 	struct bfa_lps_s *lps	= arg;
1782 
1783 	if (!complete)
1784 		return;
1785 
1786 	if (lps->fdisc)
1787 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1788 }
1789 
1790 /*
1791  * Logout completion handler -- direct call for fcs, queue for others
1792  */
1793 static void
1794 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1795 {
1796 	if (!lps->bfa->fcs) {
1797 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1798 			lps);
1799 		return;
1800 	}
1801 	if (lps->fdisc)
1802 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1803 }
1804 
1805 /*
1806  * Clear virtual link completion handler for non-fcs
1807  */
1808 static void
1809 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1810 {
1811 	struct bfa_lps_s *lps	= arg;
1812 
1813 	if (!complete)
1814 		return;
1815 
1816 	/* Clear virtual link to base port will result in link down */
1817 	if (lps->fdisc)
1818 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1819 }
1820 
1821 /*
1822  * Received Clear virtual link event --direct call for fcs,
1823  * queue for others
1824  */
1825 static void
1826 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1827 {
1828 	if (!lps->bfa->fcs) {
1829 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1830 			lps);
1831 		return;
1832 	}
1833 
1834 	/* Clear virtual link to base port will result in link down */
1835 	if (lps->fdisc)
1836 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1837 }
1838 
1839 
1840 
1841 /*
1842  *  lps_public BFA LPS public functions
1843  */
1844 
1845 u32
1846 bfa_lps_get_max_vport(struct bfa_s *bfa)
1847 {
1848 	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1849 		return BFA_LPS_MAX_VPORTS_SUPP_CT;
1850 	else
1851 		return BFA_LPS_MAX_VPORTS_SUPP_CB;
1852 }
1853 
1854 /*
1855  * Allocate a lport srvice tag.
1856  */
1857 struct bfa_lps_s  *
1858 bfa_lps_alloc(struct bfa_s *bfa)
1859 {
1860 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1861 	struct bfa_lps_s	*lps = NULL;
1862 
1863 	bfa_q_deq(&mod->lps_free_q, &lps);
1864 
1865 	if (lps == NULL)
1866 		return NULL;
1867 
1868 	list_add_tail(&lps->qe, &mod->lps_active_q);
1869 
1870 	bfa_sm_set_state(lps, bfa_lps_sm_init);
1871 	return lps;
1872 }
1873 
1874 /*
1875  * Free lport service tag. This can be called anytime after an alloc.
1876  * No need to wait for any pending login/logout completions.
1877  */
1878 void
1879 bfa_lps_delete(struct bfa_lps_s *lps)
1880 {
1881 	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1882 }
1883 
1884 /*
1885  * Initiate a lport login.
1886  */
1887 void
1888 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1889 	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
1890 {
1891 	lps->uarg	= uarg;
1892 	lps->alpa	= alpa;
1893 	lps->pdusz	= pdusz;
1894 	lps->pwwn	= pwwn;
1895 	lps->nwwn	= nwwn;
1896 	lps->fdisc	= BFA_FALSE;
1897 	lps->auth_en	= auth_en;
1898 	lps->bb_scn	= bb_scn;
1899 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1900 }
1901 
1902 /*
1903  * Initiate a lport fdisc login.
1904  */
1905 void
1906 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1907 	wwn_t nwwn)
1908 {
1909 	lps->uarg	= uarg;
1910 	lps->alpa	= 0;
1911 	lps->pdusz	= pdusz;
1912 	lps->pwwn	= pwwn;
1913 	lps->nwwn	= nwwn;
1914 	lps->fdisc	= BFA_TRUE;
1915 	lps->auth_en	= BFA_FALSE;
1916 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1917 }
1918 
1919 
1920 /*
1921  * Initiate a lport FDSIC logout.
1922  */
1923 void
1924 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1925 {
1926 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1927 }
1928 
1929 u8
1930 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1931 {
1932 	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1933 
1934 	return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1935 }
1936 
1937 /*
1938  * Return lport services tag given the pid
1939  */
1940 u8
1941 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1942 {
1943 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1944 	struct bfa_lps_s	*lps;
1945 	int			i;
1946 
1947 	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1948 		if (lps->lp_pid == pid)
1949 			return lps->bfa_tag;
1950 	}
1951 
1952 	/* Return base port tag anyway */
1953 	return 0;
1954 }
1955 
1956 
1957 /*
1958  * return port id assigned to the base lport
1959  */
1960 u32
1961 bfa_lps_get_base_pid(struct bfa_s *bfa)
1962 {
1963 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1964 
1965 	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1966 }
1967 
1968 /*
1969  * Set PID in case of n2n (which is assigned during PLOGI)
1970  */
1971 void
1972 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1973 {
1974 	bfa_trc(lps->bfa, lps->bfa_tag);
1975 	bfa_trc(lps->bfa, n2n_pid);
1976 
1977 	lps->lp_pid = n2n_pid;
1978 	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1979 }
1980 
1981 /*
1982  * LPS firmware message class handler.
1983  */
1984 void
1985 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1986 {
1987 	union bfi_lps_i2h_msg_u	msg;
1988 
1989 	bfa_trc(bfa, m->mhdr.msg_id);
1990 	msg.msg = m;
1991 
1992 	switch (m->mhdr.msg_id) {
1993 	case BFI_LPS_I2H_LOGIN_RSP:
1994 		bfa_lps_login_rsp(bfa, msg.login_rsp);
1995 		break;
1996 
1997 	case BFI_LPS_I2H_LOGOUT_RSP:
1998 		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1999 		break;
2000 
2001 	case BFI_LPS_I2H_CVL_EVENT:
2002 		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2003 		break;
2004 
2005 	default:
2006 		bfa_trc(bfa, m->mhdr.msg_id);
2007 		WARN_ON(1);
2008 	}
2009 }
2010 
2011 static void
2012 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2013 {
2014 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2015 	struct bfa_aen_entry_s  *aen_entry;
2016 
2017 	bfad_get_aen_entry(bfad, aen_entry);
2018 	if (!aen_entry)
2019 		return;
2020 
2021 	aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2022 	aen_entry->aen_data.port.pwwn = fcport->pwwn;
2023 
2024 	/* Send the AEN notification */
2025 	bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2026 				  BFA_AEN_CAT_PORT, event);
2027 }
2028 
2029 /*
2030  * FC PORT state machine functions
2031  */
2032 static void
2033 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2034 			enum bfa_fcport_sm_event event)
2035 {
2036 	bfa_trc(fcport->bfa, event);
2037 
2038 	switch (event) {
2039 	case BFA_FCPORT_SM_START:
2040 		/*
2041 		 * Start event after IOC is configured and BFA is started.
2042 		 */
2043 		fcport->use_flash_cfg = BFA_TRUE;
2044 
2045 		if (bfa_fcport_send_enable(fcport)) {
2046 			bfa_trc(fcport->bfa, BFA_TRUE);
2047 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2048 		} else {
2049 			bfa_trc(fcport->bfa, BFA_FALSE);
2050 			bfa_sm_set_state(fcport,
2051 					bfa_fcport_sm_enabling_qwait);
2052 		}
2053 		break;
2054 
2055 	case BFA_FCPORT_SM_ENABLE:
2056 		/*
2057 		 * Port is persistently configured to be in enabled state. Do
2058 		 * not change state. Port enabling is done when START event is
2059 		 * received.
2060 		 */
2061 		break;
2062 
2063 	case BFA_FCPORT_SM_DISABLE:
2064 		/*
2065 		 * If a port is persistently configured to be disabled, the
2066 		 * first event will a port disable request.
2067 		 */
2068 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2069 		break;
2070 
2071 	case BFA_FCPORT_SM_HWFAIL:
2072 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2073 		break;
2074 
2075 	default:
2076 		bfa_sm_fault(fcport->bfa, event);
2077 	}
2078 }
2079 
2080 static void
2081 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2082 				enum bfa_fcport_sm_event event)
2083 {
2084 	char pwwn_buf[BFA_STRING_32];
2085 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2086 	bfa_trc(fcport->bfa, event);
2087 
2088 	switch (event) {
2089 	case BFA_FCPORT_SM_QRESUME:
2090 		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2091 		bfa_fcport_send_enable(fcport);
2092 		break;
2093 
2094 	case BFA_FCPORT_SM_STOP:
2095 		bfa_reqq_wcancel(&fcport->reqq_wait);
2096 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2097 		break;
2098 
2099 	case BFA_FCPORT_SM_ENABLE:
2100 		/*
2101 		 * Already enable is in progress.
2102 		 */
2103 		break;
2104 
2105 	case BFA_FCPORT_SM_DISABLE:
2106 		/*
2107 		 * Just send disable request to firmware when room becomes
2108 		 * available in request queue.
2109 		 */
2110 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2111 		bfa_reqq_wcancel(&fcport->reqq_wait);
2112 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2113 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2114 		wwn2str(pwwn_buf, fcport->pwwn);
2115 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2116 			"Base port disabled: WWN = %s\n", pwwn_buf);
2117 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2118 		break;
2119 
2120 	case BFA_FCPORT_SM_LINKUP:
2121 	case BFA_FCPORT_SM_LINKDOWN:
2122 		/*
2123 		 * Possible to get link events when doing back-to-back
2124 		 * enable/disables.
2125 		 */
2126 		break;
2127 
2128 	case BFA_FCPORT_SM_HWFAIL:
2129 		bfa_reqq_wcancel(&fcport->reqq_wait);
2130 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2131 		break;
2132 
2133 	default:
2134 		bfa_sm_fault(fcport->bfa, event);
2135 	}
2136 }
2137 
2138 static void
2139 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2140 						enum bfa_fcport_sm_event event)
2141 {
2142 	char pwwn_buf[BFA_STRING_32];
2143 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2144 	bfa_trc(fcport->bfa, event);
2145 
2146 	switch (event) {
2147 	case BFA_FCPORT_SM_FWRSP:
2148 	case BFA_FCPORT_SM_LINKDOWN:
2149 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2150 		break;
2151 
2152 	case BFA_FCPORT_SM_LINKUP:
2153 		bfa_fcport_update_linkinfo(fcport);
2154 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2155 
2156 		WARN_ON(!fcport->event_cbfn);
2157 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2158 		break;
2159 
2160 	case BFA_FCPORT_SM_ENABLE:
2161 		/*
2162 		 * Already being enabled.
2163 		 */
2164 		break;
2165 
2166 	case BFA_FCPORT_SM_DISABLE:
2167 		if (bfa_fcport_send_disable(fcport))
2168 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2169 		else
2170 			bfa_sm_set_state(fcport,
2171 					 bfa_fcport_sm_disabling_qwait);
2172 
2173 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2174 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2175 		wwn2str(pwwn_buf, fcport->pwwn);
2176 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2177 			"Base port disabled: WWN = %s\n", pwwn_buf);
2178 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2179 		break;
2180 
2181 	case BFA_FCPORT_SM_STOP:
2182 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2183 		break;
2184 
2185 	case BFA_FCPORT_SM_HWFAIL:
2186 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2187 		break;
2188 
2189 	default:
2190 		bfa_sm_fault(fcport->bfa, event);
2191 	}
2192 }
2193 
2194 static void
2195 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2196 						enum bfa_fcport_sm_event event)
2197 {
2198 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2199 	char pwwn_buf[BFA_STRING_32];
2200 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2201 
2202 	bfa_trc(fcport->bfa, event);
2203 
2204 	switch (event) {
2205 	case BFA_FCPORT_SM_LINKUP:
2206 		bfa_fcport_update_linkinfo(fcport);
2207 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2208 		WARN_ON(!fcport->event_cbfn);
2209 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2210 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2211 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2212 
2213 			bfa_trc(fcport->bfa,
2214 				pevent->link_state.vc_fcf.fcf.fipenabled);
2215 			bfa_trc(fcport->bfa,
2216 				pevent->link_state.vc_fcf.fcf.fipfailed);
2217 
2218 			if (pevent->link_state.vc_fcf.fcf.fipfailed)
2219 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2220 					BFA_PL_EID_FIP_FCF_DISC, 0,
2221 					"FIP FCF Discovery Failed");
2222 			else
2223 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2224 					BFA_PL_EID_FIP_FCF_DISC, 0,
2225 					"FIP FCF Discovered");
2226 		}
2227 
2228 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2229 		wwn2str(pwwn_buf, fcport->pwwn);
2230 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2231 			"Base port online: WWN = %s\n", pwwn_buf);
2232 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2233 
2234 		/* If QoS is enabled and it is not online, send AEN */
2235 		if (fcport->cfg.qos_enabled &&
2236 		    fcport->qos_attr.state != BFA_QOS_ONLINE)
2237 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2238 		break;
2239 
2240 	case BFA_FCPORT_SM_LINKDOWN:
2241 		/*
2242 		 * Possible to get link down event.
2243 		 */
2244 		break;
2245 
2246 	case BFA_FCPORT_SM_ENABLE:
2247 		/*
2248 		 * Already enabled.
2249 		 */
2250 		break;
2251 
2252 	case BFA_FCPORT_SM_DISABLE:
2253 		if (bfa_fcport_send_disable(fcport))
2254 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2255 		else
2256 			bfa_sm_set_state(fcport,
2257 					 bfa_fcport_sm_disabling_qwait);
2258 
2259 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2260 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2261 		wwn2str(pwwn_buf, fcport->pwwn);
2262 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2263 			"Base port disabled: WWN = %s\n", pwwn_buf);
2264 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2265 		break;
2266 
2267 	case BFA_FCPORT_SM_STOP:
2268 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2269 		break;
2270 
2271 	case BFA_FCPORT_SM_HWFAIL:
2272 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2273 		break;
2274 
2275 	default:
2276 		bfa_sm_fault(fcport->bfa, event);
2277 	}
2278 }
2279 
2280 static void
2281 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2282 	enum bfa_fcport_sm_event event)
2283 {
2284 	char pwwn_buf[BFA_STRING_32];
2285 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2286 
2287 	bfa_trc(fcport->bfa, event);
2288 
2289 	switch (event) {
2290 	case BFA_FCPORT_SM_ENABLE:
2291 		/*
2292 		 * Already enabled.
2293 		 */
2294 		break;
2295 
2296 	case BFA_FCPORT_SM_DISABLE:
2297 		if (bfa_fcport_send_disable(fcport))
2298 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2299 		else
2300 			bfa_sm_set_state(fcport,
2301 					 bfa_fcport_sm_disabling_qwait);
2302 
2303 		bfa_fcport_reset_linkinfo(fcport);
2304 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2305 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2306 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2307 		wwn2str(pwwn_buf, fcport->pwwn);
2308 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2309 			"Base port offline: WWN = %s\n", pwwn_buf);
2310 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2311 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2312 			"Base port disabled: WWN = %s\n", pwwn_buf);
2313 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2314 		break;
2315 
2316 	case BFA_FCPORT_SM_LINKDOWN:
2317 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2318 		bfa_fcport_reset_linkinfo(fcport);
2319 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2320 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2321 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2322 		wwn2str(pwwn_buf, fcport->pwwn);
2323 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2324 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2325 				"Base port offline: WWN = %s\n", pwwn_buf);
2326 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2327 		} else {
2328 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2329 				"Base port (WWN = %s) "
2330 				"lost fabric connectivity\n", pwwn_buf);
2331 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2332 		}
2333 		break;
2334 
2335 	case BFA_FCPORT_SM_STOP:
2336 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2337 		bfa_fcport_reset_linkinfo(fcport);
2338 		wwn2str(pwwn_buf, fcport->pwwn);
2339 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2340 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2341 				"Base port offline: WWN = %s\n", pwwn_buf);
2342 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2343 		} else {
2344 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2345 				"Base port (WWN = %s) "
2346 				"lost fabric connectivity\n", pwwn_buf);
2347 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2348 		}
2349 		break;
2350 
2351 	case BFA_FCPORT_SM_HWFAIL:
2352 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2353 		bfa_fcport_reset_linkinfo(fcport);
2354 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2355 		wwn2str(pwwn_buf, fcport->pwwn);
2356 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2357 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2358 				"Base port offline: WWN = %s\n", pwwn_buf);
2359 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2360 		} else {
2361 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2362 				"Base port (WWN = %s) "
2363 				"lost fabric connectivity\n", pwwn_buf);
2364 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2365 		}
2366 		break;
2367 
2368 	default:
2369 		bfa_sm_fault(fcport->bfa, event);
2370 	}
2371 }
2372 
2373 static void
2374 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2375 				 enum bfa_fcport_sm_event event)
2376 {
2377 	bfa_trc(fcport->bfa, event);
2378 
2379 	switch (event) {
2380 	case BFA_FCPORT_SM_QRESUME:
2381 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2382 		bfa_fcport_send_disable(fcport);
2383 		break;
2384 
2385 	case BFA_FCPORT_SM_STOP:
2386 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2387 		bfa_reqq_wcancel(&fcport->reqq_wait);
2388 		break;
2389 
2390 	case BFA_FCPORT_SM_ENABLE:
2391 		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2392 		break;
2393 
2394 	case BFA_FCPORT_SM_DISABLE:
2395 		/*
2396 		 * Already being disabled.
2397 		 */
2398 		break;
2399 
2400 	case BFA_FCPORT_SM_LINKUP:
2401 	case BFA_FCPORT_SM_LINKDOWN:
2402 		/*
2403 		 * Possible to get link events when doing back-to-back
2404 		 * enable/disables.
2405 		 */
2406 		break;
2407 
2408 	case BFA_FCPORT_SM_HWFAIL:
2409 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2410 		bfa_reqq_wcancel(&fcport->reqq_wait);
2411 		break;
2412 
2413 	default:
2414 		bfa_sm_fault(fcport->bfa, event);
2415 	}
2416 }
2417 
2418 static void
2419 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2420 				 enum bfa_fcport_sm_event event)
2421 {
2422 	bfa_trc(fcport->bfa, event);
2423 
2424 	switch (event) {
2425 	case BFA_FCPORT_SM_QRESUME:
2426 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2427 		bfa_fcport_send_disable(fcport);
2428 		if (bfa_fcport_send_enable(fcport))
2429 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2430 		else
2431 			bfa_sm_set_state(fcport,
2432 					 bfa_fcport_sm_enabling_qwait);
2433 		break;
2434 
2435 	case BFA_FCPORT_SM_STOP:
2436 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2437 		bfa_reqq_wcancel(&fcport->reqq_wait);
2438 		break;
2439 
2440 	case BFA_FCPORT_SM_ENABLE:
2441 		break;
2442 
2443 	case BFA_FCPORT_SM_DISABLE:
2444 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2445 		break;
2446 
2447 	case BFA_FCPORT_SM_LINKUP:
2448 	case BFA_FCPORT_SM_LINKDOWN:
2449 		/*
2450 		 * Possible to get link events when doing back-to-back
2451 		 * enable/disables.
2452 		 */
2453 		break;
2454 
2455 	case BFA_FCPORT_SM_HWFAIL:
2456 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2457 		bfa_reqq_wcancel(&fcport->reqq_wait);
2458 		break;
2459 
2460 	default:
2461 		bfa_sm_fault(fcport->bfa, event);
2462 	}
2463 }
2464 
2465 static void
2466 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2467 						enum bfa_fcport_sm_event event)
2468 {
2469 	char pwwn_buf[BFA_STRING_32];
2470 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2471 	bfa_trc(fcport->bfa, event);
2472 
2473 	switch (event) {
2474 	case BFA_FCPORT_SM_FWRSP:
2475 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2476 		break;
2477 
2478 	case BFA_FCPORT_SM_DISABLE:
2479 		/*
2480 		 * Already being disabled.
2481 		 */
2482 		break;
2483 
2484 	case BFA_FCPORT_SM_ENABLE:
2485 		if (bfa_fcport_send_enable(fcport))
2486 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2487 		else
2488 			bfa_sm_set_state(fcport,
2489 					 bfa_fcport_sm_enabling_qwait);
2490 
2491 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2492 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2493 		wwn2str(pwwn_buf, fcport->pwwn);
2494 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2495 			"Base port enabled: WWN = %s\n", pwwn_buf);
2496 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2497 		break;
2498 
2499 	case BFA_FCPORT_SM_STOP:
2500 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2501 		break;
2502 
2503 	case BFA_FCPORT_SM_LINKUP:
2504 	case BFA_FCPORT_SM_LINKDOWN:
2505 		/*
2506 		 * Possible to get link events when doing back-to-back
2507 		 * enable/disables.
2508 		 */
2509 		break;
2510 
2511 	case BFA_FCPORT_SM_HWFAIL:
2512 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2513 		break;
2514 
2515 	default:
2516 		bfa_sm_fault(fcport->bfa, event);
2517 	}
2518 }
2519 
2520 static void
2521 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2522 						enum bfa_fcport_sm_event event)
2523 {
2524 	char pwwn_buf[BFA_STRING_32];
2525 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2526 	bfa_trc(fcport->bfa, event);
2527 
2528 	switch (event) {
2529 	case BFA_FCPORT_SM_START:
2530 		/*
2531 		 * Ignore start event for a port that is disabled.
2532 		 */
2533 		break;
2534 
2535 	case BFA_FCPORT_SM_STOP:
2536 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2537 		break;
2538 
2539 	case BFA_FCPORT_SM_ENABLE:
2540 		if (bfa_fcport_send_enable(fcport))
2541 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2542 		else
2543 			bfa_sm_set_state(fcport,
2544 					 bfa_fcport_sm_enabling_qwait);
2545 
2546 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2547 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2548 		wwn2str(pwwn_buf, fcport->pwwn);
2549 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2550 			"Base port enabled: WWN = %s\n", pwwn_buf);
2551 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2552 		break;
2553 
2554 	case BFA_FCPORT_SM_DISABLE:
2555 		/*
2556 		 * Already disabled.
2557 		 */
2558 		break;
2559 
2560 	case BFA_FCPORT_SM_HWFAIL:
2561 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2562 		break;
2563 
2564 	default:
2565 		bfa_sm_fault(fcport->bfa, event);
2566 	}
2567 }
2568 
2569 static void
2570 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2571 			 enum bfa_fcport_sm_event event)
2572 {
2573 	bfa_trc(fcport->bfa, event);
2574 
2575 	switch (event) {
2576 	case BFA_FCPORT_SM_START:
2577 		if (bfa_fcport_send_enable(fcport))
2578 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2579 		else
2580 			bfa_sm_set_state(fcport,
2581 					 bfa_fcport_sm_enabling_qwait);
2582 		break;
2583 
2584 	default:
2585 		/*
2586 		 * Ignore all other events.
2587 		 */
2588 		;
2589 	}
2590 }
2591 
2592 /*
2593  * Port is enabled. IOC is down/failed.
2594  */
2595 static void
2596 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2597 			 enum bfa_fcport_sm_event event)
2598 {
2599 	bfa_trc(fcport->bfa, event);
2600 
2601 	switch (event) {
2602 	case BFA_FCPORT_SM_START:
2603 		if (bfa_fcport_send_enable(fcport))
2604 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2605 		else
2606 			bfa_sm_set_state(fcport,
2607 					 bfa_fcport_sm_enabling_qwait);
2608 		break;
2609 
2610 	default:
2611 		/*
2612 		 * Ignore all events.
2613 		 */
2614 		;
2615 	}
2616 }
2617 
2618 /*
2619  * Port is disabled. IOC is down/failed.
2620  */
2621 static void
2622 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2623 			 enum bfa_fcport_sm_event event)
2624 {
2625 	bfa_trc(fcport->bfa, event);
2626 
2627 	switch (event) {
2628 	case BFA_FCPORT_SM_START:
2629 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2630 		break;
2631 
2632 	case BFA_FCPORT_SM_ENABLE:
2633 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2634 		break;
2635 
2636 	default:
2637 		/*
2638 		 * Ignore all events.
2639 		 */
2640 		;
2641 	}
2642 }
2643 
2644 /*
2645  * Link state is down
2646  */
2647 static void
2648 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2649 		enum bfa_fcport_ln_sm_event event)
2650 {
2651 	bfa_trc(ln->fcport->bfa, event);
2652 
2653 	switch (event) {
2654 	case BFA_FCPORT_LN_SM_LINKUP:
2655 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2656 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2657 		break;
2658 
2659 	default:
2660 		bfa_sm_fault(ln->fcport->bfa, event);
2661 	}
2662 }
2663 
2664 /*
2665  * Link state is waiting for down notification
2666  */
2667 static void
2668 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2669 		enum bfa_fcport_ln_sm_event event)
2670 {
2671 	bfa_trc(ln->fcport->bfa, event);
2672 
2673 	switch (event) {
2674 	case BFA_FCPORT_LN_SM_LINKUP:
2675 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2676 		break;
2677 
2678 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2679 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2680 		break;
2681 
2682 	default:
2683 		bfa_sm_fault(ln->fcport->bfa, event);
2684 	}
2685 }
2686 
2687 /*
2688  * Link state is waiting for down notification and there is a pending up
2689  */
2690 static void
2691 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2692 		enum bfa_fcport_ln_sm_event event)
2693 {
2694 	bfa_trc(ln->fcport->bfa, event);
2695 
2696 	switch (event) {
2697 	case BFA_FCPORT_LN_SM_LINKDOWN:
2698 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2699 		break;
2700 
2701 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2702 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2703 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2704 		break;
2705 
2706 	default:
2707 		bfa_sm_fault(ln->fcport->bfa, event);
2708 	}
2709 }
2710 
2711 /*
2712  * Link state is up
2713  */
2714 static void
2715 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2716 		enum bfa_fcport_ln_sm_event event)
2717 {
2718 	bfa_trc(ln->fcport->bfa, event);
2719 
2720 	switch (event) {
2721 	case BFA_FCPORT_LN_SM_LINKDOWN:
2722 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2723 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2724 		break;
2725 
2726 	default:
2727 		bfa_sm_fault(ln->fcport->bfa, event);
2728 	}
2729 }
2730 
2731 /*
2732  * Link state is waiting for up notification
2733  */
2734 static void
2735 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2736 		enum bfa_fcport_ln_sm_event event)
2737 {
2738 	bfa_trc(ln->fcport->bfa, event);
2739 
2740 	switch (event) {
2741 	case BFA_FCPORT_LN_SM_LINKDOWN:
2742 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2743 		break;
2744 
2745 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2746 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2747 		break;
2748 
2749 	default:
2750 		bfa_sm_fault(ln->fcport->bfa, event);
2751 	}
2752 }
2753 
2754 /*
2755  * Link state is waiting for up notification and there is a pending down
2756  */
2757 static void
2758 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2759 		enum bfa_fcport_ln_sm_event event)
2760 {
2761 	bfa_trc(ln->fcport->bfa, event);
2762 
2763 	switch (event) {
2764 	case BFA_FCPORT_LN_SM_LINKUP:
2765 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2766 		break;
2767 
2768 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2769 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2770 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2771 		break;
2772 
2773 	default:
2774 		bfa_sm_fault(ln->fcport->bfa, event);
2775 	}
2776 }
2777 
2778 /*
2779  * Link state is waiting for up notification and there are pending down and up
2780  */
2781 static void
2782 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2783 			enum bfa_fcport_ln_sm_event event)
2784 {
2785 	bfa_trc(ln->fcport->bfa, event);
2786 
2787 	switch (event) {
2788 	case BFA_FCPORT_LN_SM_LINKDOWN:
2789 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2790 		break;
2791 
2792 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2793 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2794 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2795 		break;
2796 
2797 	default:
2798 		bfa_sm_fault(ln->fcport->bfa, event);
2799 	}
2800 }
2801 
2802 static void
2803 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2804 {
2805 	struct bfa_fcport_ln_s *ln = cbarg;
2806 
2807 	if (complete)
2808 		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2809 	else
2810 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2811 }
2812 
2813 /*
2814  * Send SCN notification to upper layers.
2815  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2816  */
2817 static void
2818 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2819 	bfa_boolean_t trunk)
2820 {
2821 	if (fcport->cfg.trunked && !trunk)
2822 		return;
2823 
2824 	switch (event) {
2825 	case BFA_PORT_LINKUP:
2826 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2827 		break;
2828 	case BFA_PORT_LINKDOWN:
2829 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2830 		break;
2831 	default:
2832 		WARN_ON(1);
2833 	}
2834 }
2835 
2836 static void
2837 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2838 {
2839 	struct bfa_fcport_s *fcport = ln->fcport;
2840 
2841 	if (fcport->bfa->fcs) {
2842 		fcport->event_cbfn(fcport->event_cbarg, event);
2843 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2844 	} else {
2845 		ln->ln_event = event;
2846 		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2847 			__bfa_cb_fcport_event, ln);
2848 	}
2849 }
2850 
2851 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2852 							BFA_CACHELINE_SZ))
2853 
2854 static void
2855 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2856 		   struct bfa_s *bfa)
2857 {
2858 	struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2859 
2860 	bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2861 }
2862 
2863 static void
2864 bfa_fcport_qresume(void *cbarg)
2865 {
2866 	struct bfa_fcport_s *fcport = cbarg;
2867 
2868 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2869 }
2870 
2871 static void
2872 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
2873 {
2874 	struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
2875 
2876 	fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2877 	fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
2878 	fcport->stats = (union bfa_fcport_stats_u *)
2879 				bfa_mem_dma_virt(fcport_dma);
2880 }
2881 
2882 /*
2883  * Memory initialization.
2884  */
2885 static void
2886 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2887 		struct bfa_pcidev_s *pcidev)
2888 {
2889 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2890 	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2891 	struct bfa_fcport_ln_s *ln = &fcport->ln;
2892 	struct timeval tv;
2893 
2894 	fcport->bfa = bfa;
2895 	ln->fcport = fcport;
2896 
2897 	bfa_fcport_mem_claim(fcport);
2898 
2899 	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2900 	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2901 
2902 	/*
2903 	 * initialize time stamp for stats reset
2904 	 */
2905 	do_gettimeofday(&tv);
2906 	fcport->stats_reset_time = tv.tv_sec;
2907 
2908 	/*
2909 	 * initialize and set default configuration
2910 	 */
2911 	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2912 	port_cfg->speed = BFA_PORT_SPEED_AUTO;
2913 	port_cfg->trunked = BFA_FALSE;
2914 	port_cfg->maxfrsize = 0;
2915 
2916 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2917 
2918 	INIT_LIST_HEAD(&fcport->stats_pending_q);
2919 	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
2920 
2921 	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2922 }
2923 
2924 static void
2925 bfa_fcport_detach(struct bfa_s *bfa)
2926 {
2927 }
2928 
2929 /*
2930  * Called when IOC is ready.
2931  */
2932 static void
2933 bfa_fcport_start(struct bfa_s *bfa)
2934 {
2935 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2936 }
2937 
2938 /*
2939  * Called before IOC is stopped.
2940  */
2941 static void
2942 bfa_fcport_stop(struct bfa_s *bfa)
2943 {
2944 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2945 	bfa_trunk_iocdisable(bfa);
2946 }
2947 
2948 /*
2949  * Called when IOC failure is detected.
2950  */
2951 static void
2952 bfa_fcport_iocdisable(struct bfa_s *bfa)
2953 {
2954 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2955 
2956 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2957 	bfa_trunk_iocdisable(bfa);
2958 }
2959 
2960 static void
2961 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2962 {
2963 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2964 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2965 
2966 	fcport->speed = pevent->link_state.speed;
2967 	fcport->topology = pevent->link_state.topology;
2968 
2969 	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2970 		fcport->myalpa = 0;
2971 
2972 	/* QoS Details */
2973 	fcport->qos_attr = pevent->link_state.qos_attr;
2974 	fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
2975 
2976 	/*
2977 	 * update trunk state if applicable
2978 	 */
2979 	if (!fcport->cfg.trunked)
2980 		trunk->attr.state = BFA_TRUNK_DISABLED;
2981 
2982 	/* update FCoE specific */
2983 	fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
2984 
2985 	bfa_trc(fcport->bfa, fcport->speed);
2986 	bfa_trc(fcport->bfa, fcport->topology);
2987 }
2988 
2989 static void
2990 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2991 {
2992 	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2993 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2994 	fcport->bbsc_op_state = BFA_FALSE;
2995 }
2996 
2997 /*
2998  * Send port enable message to firmware.
2999  */
3000 static bfa_boolean_t
3001 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3002 {
3003 	struct bfi_fcport_enable_req_s *m;
3004 
3005 	/*
3006 	 * Increment message tag before queue check, so that responses to old
3007 	 * requests are discarded.
3008 	 */
3009 	fcport->msgtag++;
3010 
3011 	/*
3012 	 * check for room in queue to send request now
3013 	 */
3014 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3015 	if (!m) {
3016 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3017 							&fcport->reqq_wait);
3018 		return BFA_FALSE;
3019 	}
3020 
3021 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3022 			bfa_fn_lpu(fcport->bfa));
3023 	m->nwwn = fcport->nwwn;
3024 	m->pwwn = fcport->pwwn;
3025 	m->port_cfg = fcport->cfg;
3026 	m->msgtag = fcport->msgtag;
3027 	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3028 	 m->use_flash_cfg = fcport->use_flash_cfg;
3029 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3030 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3031 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3032 
3033 	/*
3034 	 * queue I/O message to firmware
3035 	 */
3036 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3037 	return BFA_TRUE;
3038 }
3039 
3040 /*
3041  * Send port disable message to firmware.
3042  */
3043 static	bfa_boolean_t
3044 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3045 {
3046 	struct bfi_fcport_req_s *m;
3047 
3048 	/*
3049 	 * Increment message tag before queue check, so that responses to old
3050 	 * requests are discarded.
3051 	 */
3052 	fcport->msgtag++;
3053 
3054 	/*
3055 	 * check for room in queue to send request now
3056 	 */
3057 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3058 	if (!m) {
3059 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3060 							&fcport->reqq_wait);
3061 		return BFA_FALSE;
3062 	}
3063 
3064 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3065 			bfa_fn_lpu(fcport->bfa));
3066 	m->msgtag = fcport->msgtag;
3067 
3068 	/*
3069 	 * queue I/O message to firmware
3070 	 */
3071 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3072 
3073 	return BFA_TRUE;
3074 }
3075 
3076 static void
3077 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3078 {
3079 	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3080 	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3081 
3082 	bfa_trc(fcport->bfa, fcport->pwwn);
3083 	bfa_trc(fcport->bfa, fcport->nwwn);
3084 }
3085 
3086 static void
3087 bfa_fcport_send_txcredit(void *port_cbarg)
3088 {
3089 
3090 	struct bfa_fcport_s *fcport = port_cbarg;
3091 	struct bfi_fcport_set_svc_params_req_s *m;
3092 
3093 	/*
3094 	 * check for room in queue to send request now
3095 	 */
3096 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3097 	if (!m) {
3098 		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3099 		return;
3100 	}
3101 
3102 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3103 			bfa_fn_lpu(fcport->bfa));
3104 	m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
3105 	m->bb_scn = fcport->cfg.bb_scn;
3106 
3107 	/*
3108 	 * queue I/O message to firmware
3109 	 */
3110 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3111 }
3112 
3113 static void
3114 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3115 	struct bfa_qos_stats_s *s)
3116 {
3117 	u32	*dip = (u32 *) d;
3118 	__be32	*sip = (__be32 *) s;
3119 	int		i;
3120 
3121 	/* Now swap the 32 bit fields */
3122 	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3123 		dip[i] = be32_to_cpu(sip[i]);
3124 }
3125 
3126 static void
3127 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3128 	struct bfa_fcoe_stats_s *s)
3129 {
3130 	u32	*dip = (u32 *) d;
3131 	__be32	*sip = (__be32 *) s;
3132 	int		i;
3133 
3134 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3135 	     i = i + 2) {
3136 #ifdef __BIG_ENDIAN
3137 		dip[i] = be32_to_cpu(sip[i]);
3138 		dip[i + 1] = be32_to_cpu(sip[i + 1]);
3139 #else
3140 		dip[i] = be32_to_cpu(sip[i + 1]);
3141 		dip[i + 1] = be32_to_cpu(sip[i]);
3142 #endif
3143 	}
3144 }
3145 
3146 static void
3147 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3148 {
3149 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3150 	struct bfa_cb_pending_q_s *cb;
3151 	struct list_head *qe, *qen;
3152 	union bfa_fcport_stats_u *ret;
3153 
3154 	if (complete) {
3155 		struct timeval tv;
3156 		if (fcport->stats_status == BFA_STATUS_OK)
3157 			do_gettimeofday(&tv);
3158 
3159 		list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3160 			bfa_q_deq(&fcport->stats_pending_q, &qe);
3161 			cb = (struct bfa_cb_pending_q_s *)qe;
3162 			if (fcport->stats_status == BFA_STATUS_OK) {
3163 				ret = (union bfa_fcport_stats_u *)cb->data;
3164 				/* Swap FC QoS or FCoE stats */
3165 				if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3166 					bfa_fcport_qos_stats_swap(&ret->fcqos,
3167 							&fcport->stats->fcqos);
3168 				else {
3169 					bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3170 							&fcport->stats->fcoe);
3171 					ret->fcoe.secs_reset =
3172 					tv.tv_sec - fcport->stats_reset_time;
3173 				}
3174 			}
3175 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3176 					fcport->stats_status);
3177 		}
3178 		fcport->stats_status = BFA_STATUS_OK;
3179 	} else {
3180 		INIT_LIST_HEAD(&fcport->stats_pending_q);
3181 		fcport->stats_status = BFA_STATUS_OK;
3182 	}
3183 }
3184 
3185 static void
3186 bfa_fcport_stats_get_timeout(void *cbarg)
3187 {
3188 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3189 
3190 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3191 
3192 	if (fcport->stats_qfull) {
3193 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3194 		fcport->stats_qfull = BFA_FALSE;
3195 	}
3196 
3197 	fcport->stats_status = BFA_STATUS_ETIMER;
3198 	__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3199 }
3200 
3201 static void
3202 bfa_fcport_send_stats_get(void *cbarg)
3203 {
3204 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3205 	struct bfi_fcport_req_s *msg;
3206 
3207 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3208 
3209 	if (!msg) {
3210 		fcport->stats_qfull = BFA_TRUE;
3211 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3212 				bfa_fcport_send_stats_get, fcport);
3213 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3214 				&fcport->stats_reqq_wait);
3215 		return;
3216 	}
3217 	fcport->stats_qfull = BFA_FALSE;
3218 
3219 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3220 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3221 			bfa_fn_lpu(fcport->bfa));
3222 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3223 }
3224 
3225 static void
3226 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3227 {
3228 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3229 	struct bfa_cb_pending_q_s *cb;
3230 	struct list_head *qe, *qen;
3231 
3232 	if (complete) {
3233 		struct timeval tv;
3234 
3235 		/*
3236 		 * re-initialize time stamp for stats reset
3237 		 */
3238 		do_gettimeofday(&tv);
3239 		fcport->stats_reset_time = tv.tv_sec;
3240 		list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3241 			bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3242 			cb = (struct bfa_cb_pending_q_s *)qe;
3243 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3244 						fcport->stats_status);
3245 		}
3246 		fcport->stats_status = BFA_STATUS_OK;
3247 	} else {
3248 		INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3249 		fcport->stats_status = BFA_STATUS_OK;
3250 	}
3251 }
3252 
3253 static void
3254 bfa_fcport_stats_clr_timeout(void *cbarg)
3255 {
3256 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3257 
3258 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3259 
3260 	if (fcport->stats_qfull) {
3261 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3262 		fcport->stats_qfull = BFA_FALSE;
3263 	}
3264 
3265 	fcport->stats_status = BFA_STATUS_ETIMER;
3266 	__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3267 }
3268 
3269 static void
3270 bfa_fcport_send_stats_clear(void *cbarg)
3271 {
3272 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3273 	struct bfi_fcport_req_s *msg;
3274 
3275 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3276 
3277 	if (!msg) {
3278 		fcport->stats_qfull = BFA_TRUE;
3279 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3280 				bfa_fcport_send_stats_clear, fcport);
3281 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3282 						&fcport->stats_reqq_wait);
3283 		return;
3284 	}
3285 	fcport->stats_qfull = BFA_FALSE;
3286 
3287 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3288 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3289 			bfa_fn_lpu(fcport->bfa));
3290 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3291 }
3292 
3293 /*
3294  * Handle trunk SCN event from firmware.
3295  */
3296 static void
3297 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3298 {
3299 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3300 	struct bfi_fcport_trunk_link_s *tlink;
3301 	struct bfa_trunk_link_attr_s *lattr;
3302 	enum bfa_trunk_state state_prev;
3303 	int i;
3304 	int link_bm = 0;
3305 
3306 	bfa_trc(fcport->bfa, fcport->cfg.trunked);
3307 	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3308 		   scn->trunk_state != BFA_TRUNK_OFFLINE);
3309 
3310 	bfa_trc(fcport->bfa, trunk->attr.state);
3311 	bfa_trc(fcport->bfa, scn->trunk_state);
3312 	bfa_trc(fcport->bfa, scn->trunk_speed);
3313 
3314 	/*
3315 	 * Save off new state for trunk attribute query
3316 	 */
3317 	state_prev = trunk->attr.state;
3318 	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3319 		trunk->attr.state = scn->trunk_state;
3320 	trunk->attr.speed = scn->trunk_speed;
3321 	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3322 		lattr = &trunk->attr.link_attr[i];
3323 		tlink = &scn->tlink[i];
3324 
3325 		lattr->link_state = tlink->state;
3326 		lattr->trunk_wwn  = tlink->trunk_wwn;
3327 		lattr->fctl	  = tlink->fctl;
3328 		lattr->speed	  = tlink->speed;
3329 		lattr->deskew	  = be32_to_cpu(tlink->deskew);
3330 
3331 		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3332 			fcport->speed	 = tlink->speed;
3333 			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3334 			link_bm |= 1 << i;
3335 		}
3336 
3337 		bfa_trc(fcport->bfa, lattr->link_state);
3338 		bfa_trc(fcport->bfa, lattr->trunk_wwn);
3339 		bfa_trc(fcport->bfa, lattr->fctl);
3340 		bfa_trc(fcport->bfa, lattr->speed);
3341 		bfa_trc(fcport->bfa, lattr->deskew);
3342 	}
3343 
3344 	switch (link_bm) {
3345 	case 3:
3346 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3347 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3348 		break;
3349 	case 2:
3350 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3351 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3352 		break;
3353 	case 1:
3354 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3355 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3356 		break;
3357 	default:
3358 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3359 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3360 	}
3361 
3362 	/*
3363 	 * Notify upper layers if trunk state changed.
3364 	 */
3365 	if ((state_prev != trunk->attr.state) ||
3366 		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3367 		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3368 			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3369 	}
3370 }
3371 
3372 static void
3373 bfa_trunk_iocdisable(struct bfa_s *bfa)
3374 {
3375 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3376 	int i = 0;
3377 
3378 	/*
3379 	 * In trunked mode, notify upper layers that link is down
3380 	 */
3381 	if (fcport->cfg.trunked) {
3382 		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3383 			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3384 
3385 		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3386 		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3387 		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3388 			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3389 			fcport->trunk.attr.link_attr[i].fctl =
3390 						BFA_TRUNK_LINK_FCTL_NORMAL;
3391 			fcport->trunk.attr.link_attr[i].link_state =
3392 						BFA_TRUNK_LINK_STATE_DN_LINKDN;
3393 			fcport->trunk.attr.link_attr[i].speed =
3394 						BFA_PORT_SPEED_UNKNOWN;
3395 			fcport->trunk.attr.link_attr[i].deskew = 0;
3396 		}
3397 	}
3398 }
3399 
3400 /*
3401  * Called to initialize port attributes
3402  */
3403 void
3404 bfa_fcport_init(struct bfa_s *bfa)
3405 {
3406 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3407 
3408 	/*
3409 	 * Initialize port attributes from IOC hardware data.
3410 	 */
3411 	bfa_fcport_set_wwns(fcport);
3412 	if (fcport->cfg.maxfrsize == 0)
3413 		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3414 	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3415 	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3416 
3417 	if (bfa_fcport_is_pbcdisabled(bfa))
3418 		bfa->modules.port.pbc_disabled = BFA_TRUE;
3419 
3420 	WARN_ON(!fcport->cfg.maxfrsize);
3421 	WARN_ON(!fcport->cfg.rx_bbcredit);
3422 	WARN_ON(!fcport->speed_sup);
3423 }
3424 
3425 /*
3426  * Firmware message handler.
3427  */
3428 void
3429 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3430 {
3431 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3432 	union bfi_fcport_i2h_msg_u i2hmsg;
3433 
3434 	i2hmsg.msg = msg;
3435 	fcport->event_arg.i2hmsg = i2hmsg;
3436 
3437 	bfa_trc(bfa, msg->mhdr.msg_id);
3438 	bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3439 
3440 	switch (msg->mhdr.msg_id) {
3441 	case BFI_FCPORT_I2H_ENABLE_RSP:
3442 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3443 
3444 			if (fcport->use_flash_cfg) {
3445 				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3446 				fcport->cfg.maxfrsize =
3447 					cpu_to_be16(fcport->cfg.maxfrsize);
3448 				fcport->cfg.path_tov =
3449 					cpu_to_be16(fcport->cfg.path_tov);
3450 				fcport->cfg.q_depth =
3451 					cpu_to_be16(fcport->cfg.q_depth);
3452 
3453 				if (fcport->cfg.trunked)
3454 					fcport->trunk.attr.state =
3455 						BFA_TRUNK_OFFLINE;
3456 				else
3457 					fcport->trunk.attr.state =
3458 						BFA_TRUNK_DISABLED;
3459 				fcport->use_flash_cfg = BFA_FALSE;
3460 			}
3461 
3462 			if (fcport->cfg.qos_enabled)
3463 				fcport->qos_attr.state = BFA_QOS_OFFLINE;
3464 			else
3465 				fcport->qos_attr.state = BFA_QOS_DISABLED;
3466 
3467 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3468 		}
3469 		break;
3470 
3471 	case BFI_FCPORT_I2H_DISABLE_RSP:
3472 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3473 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3474 		break;
3475 
3476 	case BFI_FCPORT_I2H_EVENT:
3477 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3478 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3479 		else
3480 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3481 		break;
3482 
3483 	case BFI_FCPORT_I2H_TRUNK_SCN:
3484 		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3485 		break;
3486 
3487 	case BFI_FCPORT_I2H_STATS_GET_RSP:
3488 		/*
3489 		 * check for timer pop before processing the rsp
3490 		 */
3491 		if (list_empty(&fcport->stats_pending_q) ||
3492 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3493 			break;
3494 
3495 		bfa_timer_stop(&fcport->timer);
3496 		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3497 		__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3498 		break;
3499 
3500 	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3501 		/*
3502 		 * check for timer pop before processing the rsp
3503 		 */
3504 		if (list_empty(&fcport->statsclr_pending_q) ||
3505 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3506 			break;
3507 
3508 		bfa_timer_stop(&fcport->timer);
3509 		fcport->stats_status = BFA_STATUS_OK;
3510 		__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3511 		break;
3512 
3513 	case BFI_FCPORT_I2H_ENABLE_AEN:
3514 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3515 		break;
3516 
3517 	case BFI_FCPORT_I2H_DISABLE_AEN:
3518 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3519 		break;
3520 
3521 	default:
3522 		WARN_ON(1);
3523 	break;
3524 	}
3525 }
3526 
3527 /*
3528  * Registered callback for port events.
3529  */
3530 void
3531 bfa_fcport_event_register(struct bfa_s *bfa,
3532 				void (*cbfn) (void *cbarg,
3533 				enum bfa_port_linkstate event),
3534 				void *cbarg)
3535 {
3536 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3537 
3538 	fcport->event_cbfn = cbfn;
3539 	fcport->event_cbarg = cbarg;
3540 }
3541 
3542 bfa_status_t
3543 bfa_fcport_enable(struct bfa_s *bfa)
3544 {
3545 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3546 
3547 	if (bfa_fcport_is_pbcdisabled(bfa))
3548 		return BFA_STATUS_PBC;
3549 
3550 	if (bfa_ioc_is_disabled(&bfa->ioc))
3551 		return BFA_STATUS_IOC_DISABLED;
3552 
3553 	if (fcport->diag_busy)
3554 		return BFA_STATUS_DIAG_BUSY;
3555 
3556 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3557 	return BFA_STATUS_OK;
3558 }
3559 
3560 bfa_status_t
3561 bfa_fcport_disable(struct bfa_s *bfa)
3562 {
3563 	if (bfa_fcport_is_pbcdisabled(bfa))
3564 		return BFA_STATUS_PBC;
3565 
3566 	if (bfa_ioc_is_disabled(&bfa->ioc))
3567 		return BFA_STATUS_IOC_DISABLED;
3568 
3569 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3570 	return BFA_STATUS_OK;
3571 }
3572 
3573 /* If PBC is disabled on port, return error */
3574 bfa_status_t
3575 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3576 {
3577 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3578 	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3579 	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3580 
3581 	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3582 		bfa_trc(bfa, fcport->pwwn);
3583 		return BFA_STATUS_PBC;
3584 	}
3585 	return BFA_STATUS_OK;
3586 }
3587 
3588 /*
3589  * Configure port speed.
3590  */
3591 bfa_status_t
3592 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3593 {
3594 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3595 
3596 	bfa_trc(bfa, speed);
3597 
3598 	if (fcport->cfg.trunked == BFA_TRUE)
3599 		return BFA_STATUS_TRUNK_ENABLED;
3600 	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3601 		bfa_trc(bfa, fcport->speed_sup);
3602 		return BFA_STATUS_UNSUPP_SPEED;
3603 	}
3604 
3605 	/* For Mezz card, port speed entered needs to be checked */
3606 	if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
3607 		if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3608 			/* For CT2, 1G is not supported */
3609 			if ((speed == BFA_PORT_SPEED_1GBPS) &&
3610 			    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3611 				return BFA_STATUS_UNSUPP_SPEED;
3612 
3613 			/* Already checked for Auto Speed and Max Speed supp */
3614 			if (!(speed == BFA_PORT_SPEED_1GBPS ||
3615 			      speed == BFA_PORT_SPEED_2GBPS ||
3616 			      speed == BFA_PORT_SPEED_4GBPS ||
3617 			      speed == BFA_PORT_SPEED_8GBPS ||
3618 			      speed == BFA_PORT_SPEED_16GBPS ||
3619 			      speed == BFA_PORT_SPEED_AUTO))
3620 				return BFA_STATUS_UNSUPP_SPEED;
3621 		} else {
3622 			if (speed != BFA_PORT_SPEED_10GBPS)
3623 				return BFA_STATUS_UNSUPP_SPEED;
3624 		}
3625 	}
3626 
3627 	fcport->cfg.speed = speed;
3628 
3629 	return BFA_STATUS_OK;
3630 }
3631 
3632 /*
3633  * Get current speed.
3634  */
3635 enum bfa_port_speed
3636 bfa_fcport_get_speed(struct bfa_s *bfa)
3637 {
3638 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3639 
3640 	return fcport->speed;
3641 }
3642 
3643 /*
3644  * Configure port topology.
3645  */
3646 bfa_status_t
3647 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3648 {
3649 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3650 
3651 	bfa_trc(bfa, topology);
3652 	bfa_trc(bfa, fcport->cfg.topology);
3653 
3654 	switch (topology) {
3655 	case BFA_PORT_TOPOLOGY_P2P:
3656 	case BFA_PORT_TOPOLOGY_LOOP:
3657 	case BFA_PORT_TOPOLOGY_AUTO:
3658 		break;
3659 
3660 	default:
3661 		return BFA_STATUS_EINVAL;
3662 	}
3663 
3664 	fcport->cfg.topology = topology;
3665 	return BFA_STATUS_OK;
3666 }
3667 
3668 /*
3669  * Get current topology.
3670  */
3671 enum bfa_port_topology
3672 bfa_fcport_get_topology(struct bfa_s *bfa)
3673 {
3674 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3675 
3676 	return fcport->topology;
3677 }
3678 
3679 bfa_status_t
3680 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3681 {
3682 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3683 
3684 	bfa_trc(bfa, alpa);
3685 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3686 	bfa_trc(bfa, fcport->cfg.hardalpa);
3687 
3688 	fcport->cfg.cfg_hardalpa = BFA_TRUE;
3689 	fcport->cfg.hardalpa = alpa;
3690 
3691 	return BFA_STATUS_OK;
3692 }
3693 
3694 bfa_status_t
3695 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3696 {
3697 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3698 
3699 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3700 	bfa_trc(bfa, fcport->cfg.hardalpa);
3701 
3702 	fcport->cfg.cfg_hardalpa = BFA_FALSE;
3703 	return BFA_STATUS_OK;
3704 }
3705 
3706 bfa_boolean_t
3707 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3708 {
3709 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3710 
3711 	*alpa = fcport->cfg.hardalpa;
3712 	return fcport->cfg.cfg_hardalpa;
3713 }
3714 
3715 u8
3716 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3717 {
3718 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3719 
3720 	return fcport->myalpa;
3721 }
3722 
3723 bfa_status_t
3724 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3725 {
3726 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3727 
3728 	bfa_trc(bfa, maxfrsize);
3729 	bfa_trc(bfa, fcport->cfg.maxfrsize);
3730 
3731 	/* with in range */
3732 	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3733 		return BFA_STATUS_INVLD_DFSZ;
3734 
3735 	/* power of 2, if not the max frame size of 2112 */
3736 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3737 		return BFA_STATUS_INVLD_DFSZ;
3738 
3739 	fcport->cfg.maxfrsize = maxfrsize;
3740 	return BFA_STATUS_OK;
3741 }
3742 
3743 u16
3744 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3745 {
3746 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3747 
3748 	return fcport->cfg.maxfrsize;
3749 }
3750 
3751 u8
3752 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3753 {
3754 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3755 
3756 	return fcport->cfg.rx_bbcredit;
3757 }
3758 
3759 void
3760 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
3761 {
3762 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3763 
3764 	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3765 	fcport->cfg.bb_scn = bb_scn;
3766 	if (bb_scn)
3767 		fcport->bbsc_op_state = BFA_TRUE;
3768 	bfa_fcport_send_txcredit(fcport);
3769 }
3770 
3771 /*
3772  * Get port attributes.
3773  */
3774 
3775 wwn_t
3776 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3777 {
3778 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3779 	if (node)
3780 		return fcport->nwwn;
3781 	else
3782 		return fcport->pwwn;
3783 }
3784 
3785 void
3786 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3787 {
3788 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3789 
3790 	memset(attr, 0, sizeof(struct bfa_port_attr_s));
3791 
3792 	attr->nwwn = fcport->nwwn;
3793 	attr->pwwn = fcport->pwwn;
3794 
3795 	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3796 	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3797 
3798 	memcpy(&attr->pport_cfg, &fcport->cfg,
3799 		sizeof(struct bfa_port_cfg_s));
3800 	/* speed attributes */
3801 	attr->pport_cfg.speed = fcport->cfg.speed;
3802 	attr->speed_supported = fcport->speed_sup;
3803 	attr->speed = fcport->speed;
3804 	attr->cos_supported = FC_CLASS_3;
3805 
3806 	/* topology attributes */
3807 	attr->pport_cfg.topology = fcport->cfg.topology;
3808 	attr->topology = fcport->topology;
3809 	attr->pport_cfg.trunked = fcport->cfg.trunked;
3810 
3811 	/* beacon attributes */
3812 	attr->beacon = fcport->beacon;
3813 	attr->link_e2e_beacon = fcport->link_e2e_beacon;
3814 
3815 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3816 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3817 	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3818 	attr->bbsc_op_status =  fcport->bbsc_op_state;
3819 
3820 	/* PBC Disabled State */
3821 	if (bfa_fcport_is_pbcdisabled(bfa))
3822 		attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3823 	else {
3824 		if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3825 			attr->port_state = BFA_PORT_ST_IOCDIS;
3826 		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3827 			attr->port_state = BFA_PORT_ST_FWMISMATCH;
3828 		else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
3829 			attr->port_state = BFA_PORT_ST_ACQ_ADDR;
3830 	}
3831 
3832 	/* FCoE vlan */
3833 	attr->fcoe_vlan = fcport->fcoe_vlan;
3834 }
3835 
3836 #define BFA_FCPORT_STATS_TOV	1000
3837 
3838 /*
3839  * Fetch port statistics (FCQoS or FCoE).
3840  */
3841 bfa_status_t
3842 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3843 {
3844 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3845 
3846 	if (bfa_ioc_is_disabled(&bfa->ioc))
3847 		return BFA_STATUS_IOC_DISABLED;
3848 
3849 	if (!list_empty(&fcport->statsclr_pending_q))
3850 		return BFA_STATUS_DEVBUSY;
3851 
3852 	if (list_empty(&fcport->stats_pending_q)) {
3853 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3854 		bfa_fcport_send_stats_get(fcport);
3855 		bfa_timer_start(bfa, &fcport->timer,
3856 				bfa_fcport_stats_get_timeout,
3857 				fcport, BFA_FCPORT_STATS_TOV);
3858 	} else
3859 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3860 
3861 	return BFA_STATUS_OK;
3862 }
3863 
3864 /*
3865  * Reset port statistics (FCQoS or FCoE).
3866  */
3867 bfa_status_t
3868 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3869 {
3870 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3871 
3872 	if (!list_empty(&fcport->stats_pending_q))
3873 		return BFA_STATUS_DEVBUSY;
3874 
3875 	if (list_empty(&fcport->statsclr_pending_q)) {
3876 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3877 		bfa_fcport_send_stats_clear(fcport);
3878 		bfa_timer_start(bfa, &fcport->timer,
3879 				bfa_fcport_stats_clr_timeout,
3880 				fcport, BFA_FCPORT_STATS_TOV);
3881 	} else
3882 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3883 
3884 	return BFA_STATUS_OK;
3885 }
3886 
3887 /*
3888  * Fetch port attributes.
3889  */
3890 bfa_boolean_t
3891 bfa_fcport_is_disabled(struct bfa_s *bfa)
3892 {
3893 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3894 
3895 	return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3896 		BFA_PORT_ST_DISABLED;
3897 
3898 }
3899 
3900 bfa_boolean_t
3901 bfa_fcport_is_ratelim(struct bfa_s *bfa)
3902 {
3903 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3904 
3905 	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3906 
3907 }
3908 
3909 /*
3910  *	Enable/Disable FAA feature in port config
3911  */
3912 void
3913 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
3914 {
3915 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3916 
3917 	bfa_trc(bfa, state);
3918 	fcport->cfg.faa_state = state;
3919 }
3920 
3921 /*
3922  * Get default minimum ratelim speed
3923  */
3924 enum bfa_port_speed
3925 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3926 {
3927 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3928 
3929 	bfa_trc(bfa, fcport->cfg.trl_def_speed);
3930 	return fcport->cfg.trl_def_speed;
3931 
3932 }
3933 
3934 void
3935 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
3936 		  bfa_boolean_t link_e2e_beacon)
3937 {
3938 	struct bfa_s *bfa = dev;
3939 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3940 
3941 	bfa_trc(bfa, beacon);
3942 	bfa_trc(bfa, link_e2e_beacon);
3943 	bfa_trc(bfa, fcport->beacon);
3944 	bfa_trc(bfa, fcport->link_e2e_beacon);
3945 
3946 	fcport->beacon = beacon;
3947 	fcport->link_e2e_beacon = link_e2e_beacon;
3948 }
3949 
3950 bfa_boolean_t
3951 bfa_fcport_is_linkup(struct bfa_s *bfa)
3952 {
3953 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3954 
3955 	return	(!fcport->cfg.trunked &&
3956 		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3957 		(fcport->cfg.trunked &&
3958 		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3959 }
3960 
3961 bfa_boolean_t
3962 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3963 {
3964 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3965 
3966 	return fcport->cfg.qos_enabled;
3967 }
3968 
3969 bfa_boolean_t
3970 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3971 {
3972 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3973 
3974 	return fcport->cfg.trunked;
3975 }
3976 
3977 /*
3978  * Rport State machine functions
3979  */
3980 /*
3981  * Beginning state, only online event expected.
3982  */
3983 static void
3984 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3985 {
3986 	bfa_trc(rp->bfa, rp->rport_tag);
3987 	bfa_trc(rp->bfa, event);
3988 
3989 	switch (event) {
3990 	case BFA_RPORT_SM_CREATE:
3991 		bfa_stats(rp, sm_un_cr);
3992 		bfa_sm_set_state(rp, bfa_rport_sm_created);
3993 		break;
3994 
3995 	default:
3996 		bfa_stats(rp, sm_un_unexp);
3997 		bfa_sm_fault(rp->bfa, event);
3998 	}
3999 }
4000 
4001 static void
4002 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4003 {
4004 	bfa_trc(rp->bfa, rp->rport_tag);
4005 	bfa_trc(rp->bfa, event);
4006 
4007 	switch (event) {
4008 	case BFA_RPORT_SM_ONLINE:
4009 		bfa_stats(rp, sm_cr_on);
4010 		if (bfa_rport_send_fwcreate(rp))
4011 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4012 		else
4013 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4014 		break;
4015 
4016 	case BFA_RPORT_SM_DELETE:
4017 		bfa_stats(rp, sm_cr_del);
4018 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4019 		bfa_rport_free(rp);
4020 		break;
4021 
4022 	case BFA_RPORT_SM_HWFAIL:
4023 		bfa_stats(rp, sm_cr_hwf);
4024 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4025 		break;
4026 
4027 	default:
4028 		bfa_stats(rp, sm_cr_unexp);
4029 		bfa_sm_fault(rp->bfa, event);
4030 	}
4031 }
4032 
4033 /*
4034  * Waiting for rport create response from firmware.
4035  */
4036 static void
4037 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4038 {
4039 	bfa_trc(rp->bfa, rp->rport_tag);
4040 	bfa_trc(rp->bfa, event);
4041 
4042 	switch (event) {
4043 	case BFA_RPORT_SM_FWRSP:
4044 		bfa_stats(rp, sm_fwc_rsp);
4045 		bfa_sm_set_state(rp, bfa_rport_sm_online);
4046 		bfa_rport_online_cb(rp);
4047 		break;
4048 
4049 	case BFA_RPORT_SM_DELETE:
4050 		bfa_stats(rp, sm_fwc_del);
4051 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4052 		break;
4053 
4054 	case BFA_RPORT_SM_OFFLINE:
4055 		bfa_stats(rp, sm_fwc_off);
4056 		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4057 		break;
4058 
4059 	case BFA_RPORT_SM_HWFAIL:
4060 		bfa_stats(rp, sm_fwc_hwf);
4061 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4062 		break;
4063 
4064 	default:
4065 		bfa_stats(rp, sm_fwc_unexp);
4066 		bfa_sm_fault(rp->bfa, event);
4067 	}
4068 }
4069 
4070 /*
4071  * Request queue is full, awaiting queue resume to send create request.
4072  */
4073 static void
4074 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4075 {
4076 	bfa_trc(rp->bfa, rp->rport_tag);
4077 	bfa_trc(rp->bfa, event);
4078 
4079 	switch (event) {
4080 	case BFA_RPORT_SM_QRESUME:
4081 		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4082 		bfa_rport_send_fwcreate(rp);
4083 		break;
4084 
4085 	case BFA_RPORT_SM_DELETE:
4086 		bfa_stats(rp, sm_fwc_del);
4087 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4088 		bfa_reqq_wcancel(&rp->reqq_wait);
4089 		bfa_rport_free(rp);
4090 		break;
4091 
4092 	case BFA_RPORT_SM_OFFLINE:
4093 		bfa_stats(rp, sm_fwc_off);
4094 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4095 		bfa_reqq_wcancel(&rp->reqq_wait);
4096 		bfa_rport_offline_cb(rp);
4097 		break;
4098 
4099 	case BFA_RPORT_SM_HWFAIL:
4100 		bfa_stats(rp, sm_fwc_hwf);
4101 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4102 		bfa_reqq_wcancel(&rp->reqq_wait);
4103 		break;
4104 
4105 	default:
4106 		bfa_stats(rp, sm_fwc_unexp);
4107 		bfa_sm_fault(rp->bfa, event);
4108 	}
4109 }
4110 
4111 /*
4112  * Online state - normal parking state.
4113  */
4114 static void
4115 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4116 {
4117 	struct bfi_rport_qos_scn_s *qos_scn;
4118 
4119 	bfa_trc(rp->bfa, rp->rport_tag);
4120 	bfa_trc(rp->bfa, event);
4121 
4122 	switch (event) {
4123 	case BFA_RPORT_SM_OFFLINE:
4124 		bfa_stats(rp, sm_on_off);
4125 		if (bfa_rport_send_fwdelete(rp))
4126 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4127 		else
4128 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4129 		break;
4130 
4131 	case BFA_RPORT_SM_DELETE:
4132 		bfa_stats(rp, sm_on_del);
4133 		if (bfa_rport_send_fwdelete(rp))
4134 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4135 		else
4136 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4137 		break;
4138 
4139 	case BFA_RPORT_SM_HWFAIL:
4140 		bfa_stats(rp, sm_on_hwf);
4141 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4142 		break;
4143 
4144 	case BFA_RPORT_SM_SET_SPEED:
4145 		bfa_rport_send_fwspeed(rp);
4146 		break;
4147 
4148 	case BFA_RPORT_SM_QOS_SCN:
4149 		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4150 		rp->qos_attr = qos_scn->new_qos_attr;
4151 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4152 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4153 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4154 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4155 
4156 		qos_scn->old_qos_attr.qos_flow_id  =
4157 			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4158 		qos_scn->new_qos_attr.qos_flow_id  =
4159 			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4160 
4161 		if (qos_scn->old_qos_attr.qos_flow_id !=
4162 			qos_scn->new_qos_attr.qos_flow_id)
4163 			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4164 						    qos_scn->old_qos_attr,
4165 						    qos_scn->new_qos_attr);
4166 		if (qos_scn->old_qos_attr.qos_priority !=
4167 			qos_scn->new_qos_attr.qos_priority)
4168 			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4169 						  qos_scn->old_qos_attr,
4170 						  qos_scn->new_qos_attr);
4171 		break;
4172 
4173 	default:
4174 		bfa_stats(rp, sm_on_unexp);
4175 		bfa_sm_fault(rp->bfa, event);
4176 	}
4177 }
4178 
4179 /*
4180  * Firmware rport is being deleted - awaiting f/w response.
4181  */
4182 static void
4183 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4184 {
4185 	bfa_trc(rp->bfa, rp->rport_tag);
4186 	bfa_trc(rp->bfa, event);
4187 
4188 	switch (event) {
4189 	case BFA_RPORT_SM_FWRSP:
4190 		bfa_stats(rp, sm_fwd_rsp);
4191 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4192 		bfa_rport_offline_cb(rp);
4193 		break;
4194 
4195 	case BFA_RPORT_SM_DELETE:
4196 		bfa_stats(rp, sm_fwd_del);
4197 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4198 		break;
4199 
4200 	case BFA_RPORT_SM_HWFAIL:
4201 		bfa_stats(rp, sm_fwd_hwf);
4202 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4203 		bfa_rport_offline_cb(rp);
4204 		break;
4205 
4206 	default:
4207 		bfa_stats(rp, sm_fwd_unexp);
4208 		bfa_sm_fault(rp->bfa, event);
4209 	}
4210 }
4211 
4212 static void
4213 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4214 {
4215 	bfa_trc(rp->bfa, rp->rport_tag);
4216 	bfa_trc(rp->bfa, event);
4217 
4218 	switch (event) {
4219 	case BFA_RPORT_SM_QRESUME:
4220 		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4221 		bfa_rport_send_fwdelete(rp);
4222 		break;
4223 
4224 	case BFA_RPORT_SM_DELETE:
4225 		bfa_stats(rp, sm_fwd_del);
4226 		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4227 		break;
4228 
4229 	case BFA_RPORT_SM_HWFAIL:
4230 		bfa_stats(rp, sm_fwd_hwf);
4231 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4232 		bfa_reqq_wcancel(&rp->reqq_wait);
4233 		bfa_rport_offline_cb(rp);
4234 		break;
4235 
4236 	default:
4237 		bfa_stats(rp, sm_fwd_unexp);
4238 		bfa_sm_fault(rp->bfa, event);
4239 	}
4240 }
4241 
4242 /*
4243  * Offline state.
4244  */
4245 static void
4246 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4247 {
4248 	bfa_trc(rp->bfa, rp->rport_tag);
4249 	bfa_trc(rp->bfa, event);
4250 
4251 	switch (event) {
4252 	case BFA_RPORT_SM_DELETE:
4253 		bfa_stats(rp, sm_off_del);
4254 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4255 		bfa_rport_free(rp);
4256 		break;
4257 
4258 	case BFA_RPORT_SM_ONLINE:
4259 		bfa_stats(rp, sm_off_on);
4260 		if (bfa_rport_send_fwcreate(rp))
4261 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4262 		else
4263 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4264 		break;
4265 
4266 	case BFA_RPORT_SM_HWFAIL:
4267 		bfa_stats(rp, sm_off_hwf);
4268 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4269 		break;
4270 
4271 	default:
4272 		bfa_stats(rp, sm_off_unexp);
4273 		bfa_sm_fault(rp->bfa, event);
4274 	}
4275 }
4276 
4277 /*
4278  * Rport is deleted, waiting for firmware response to delete.
4279  */
4280 static void
4281 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4282 {
4283 	bfa_trc(rp->bfa, rp->rport_tag);
4284 	bfa_trc(rp->bfa, event);
4285 
4286 	switch (event) {
4287 	case BFA_RPORT_SM_FWRSP:
4288 		bfa_stats(rp, sm_del_fwrsp);
4289 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4290 		bfa_rport_free(rp);
4291 		break;
4292 
4293 	case BFA_RPORT_SM_HWFAIL:
4294 		bfa_stats(rp, sm_del_hwf);
4295 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4296 		bfa_rport_free(rp);
4297 		break;
4298 
4299 	default:
4300 		bfa_sm_fault(rp->bfa, event);
4301 	}
4302 }
4303 
4304 static void
4305 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4306 {
4307 	bfa_trc(rp->bfa, rp->rport_tag);
4308 	bfa_trc(rp->bfa, event);
4309 
4310 	switch (event) {
4311 	case BFA_RPORT_SM_QRESUME:
4312 		bfa_stats(rp, sm_del_fwrsp);
4313 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4314 		bfa_rport_send_fwdelete(rp);
4315 		break;
4316 
4317 	case BFA_RPORT_SM_HWFAIL:
4318 		bfa_stats(rp, sm_del_hwf);
4319 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4320 		bfa_reqq_wcancel(&rp->reqq_wait);
4321 		bfa_rport_free(rp);
4322 		break;
4323 
4324 	default:
4325 		bfa_sm_fault(rp->bfa, event);
4326 	}
4327 }
4328 
4329 /*
4330  * Waiting for rport create response from firmware. A delete is pending.
4331  */
4332 static void
4333 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4334 				enum bfa_rport_event event)
4335 {
4336 	bfa_trc(rp->bfa, rp->rport_tag);
4337 	bfa_trc(rp->bfa, event);
4338 
4339 	switch (event) {
4340 	case BFA_RPORT_SM_FWRSP:
4341 		bfa_stats(rp, sm_delp_fwrsp);
4342 		if (bfa_rport_send_fwdelete(rp))
4343 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4344 		else
4345 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4346 		break;
4347 
4348 	case BFA_RPORT_SM_HWFAIL:
4349 		bfa_stats(rp, sm_delp_hwf);
4350 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4351 		bfa_rport_free(rp);
4352 		break;
4353 
4354 	default:
4355 		bfa_stats(rp, sm_delp_unexp);
4356 		bfa_sm_fault(rp->bfa, event);
4357 	}
4358 }
4359 
4360 /*
4361  * Waiting for rport create response from firmware. Rport offline is pending.
4362  */
4363 static void
4364 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4365 				 enum bfa_rport_event event)
4366 {
4367 	bfa_trc(rp->bfa, rp->rport_tag);
4368 	bfa_trc(rp->bfa, event);
4369 
4370 	switch (event) {
4371 	case BFA_RPORT_SM_FWRSP:
4372 		bfa_stats(rp, sm_offp_fwrsp);
4373 		if (bfa_rport_send_fwdelete(rp))
4374 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4375 		else
4376 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4377 		break;
4378 
4379 	case BFA_RPORT_SM_DELETE:
4380 		bfa_stats(rp, sm_offp_del);
4381 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4382 		break;
4383 
4384 	case BFA_RPORT_SM_HWFAIL:
4385 		bfa_stats(rp, sm_offp_hwf);
4386 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4387 		break;
4388 
4389 	default:
4390 		bfa_stats(rp, sm_offp_unexp);
4391 		bfa_sm_fault(rp->bfa, event);
4392 	}
4393 }
4394 
4395 /*
4396  * IOC h/w failed.
4397  */
4398 static void
4399 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4400 {
4401 	bfa_trc(rp->bfa, rp->rport_tag);
4402 	bfa_trc(rp->bfa, event);
4403 
4404 	switch (event) {
4405 	case BFA_RPORT_SM_OFFLINE:
4406 		bfa_stats(rp, sm_iocd_off);
4407 		bfa_rport_offline_cb(rp);
4408 		break;
4409 
4410 	case BFA_RPORT_SM_DELETE:
4411 		bfa_stats(rp, sm_iocd_del);
4412 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4413 		bfa_rport_free(rp);
4414 		break;
4415 
4416 	case BFA_RPORT_SM_ONLINE:
4417 		bfa_stats(rp, sm_iocd_on);
4418 		if (bfa_rport_send_fwcreate(rp))
4419 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4420 		else
4421 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4422 		break;
4423 
4424 	case BFA_RPORT_SM_HWFAIL:
4425 		break;
4426 
4427 	default:
4428 		bfa_stats(rp, sm_iocd_unexp);
4429 		bfa_sm_fault(rp->bfa, event);
4430 	}
4431 }
4432 
4433 
4434 
4435 /*
4436  *  bfa_rport_private BFA rport private functions
4437  */
4438 
4439 static void
4440 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4441 {
4442 	struct bfa_rport_s *rp = cbarg;
4443 
4444 	if (complete)
4445 		bfa_cb_rport_online(rp->rport_drv);
4446 }
4447 
4448 static void
4449 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4450 {
4451 	struct bfa_rport_s *rp = cbarg;
4452 
4453 	if (complete)
4454 		bfa_cb_rport_offline(rp->rport_drv);
4455 }
4456 
4457 static void
4458 bfa_rport_qresume(void *cbarg)
4459 {
4460 	struct bfa_rport_s	*rp = cbarg;
4461 
4462 	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4463 }
4464 
4465 static void
4466 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4467 		struct bfa_s *bfa)
4468 {
4469 	struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4470 
4471 	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4472 		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4473 
4474 	/* kva memory */
4475 	bfa_mem_kva_setup(minfo, rport_kva,
4476 		cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4477 }
4478 
4479 static void
4480 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4481 		struct bfa_pcidev_s *pcidev)
4482 {
4483 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4484 	struct bfa_rport_s *rp;
4485 	u16 i;
4486 
4487 	INIT_LIST_HEAD(&mod->rp_free_q);
4488 	INIT_LIST_HEAD(&mod->rp_active_q);
4489 	INIT_LIST_HEAD(&mod->rp_unused_q);
4490 
4491 	rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4492 	mod->rps_list = rp;
4493 	mod->num_rports = cfg->fwcfg.num_rports;
4494 
4495 	WARN_ON(!mod->num_rports ||
4496 		   (mod->num_rports & (mod->num_rports - 1)));
4497 
4498 	for (i = 0; i < mod->num_rports; i++, rp++) {
4499 		memset(rp, 0, sizeof(struct bfa_rport_s));
4500 		rp->bfa = bfa;
4501 		rp->rport_tag = i;
4502 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4503 
4504 		/*
4505 		 *  - is unused
4506 		 */
4507 		if (i)
4508 			list_add_tail(&rp->qe, &mod->rp_free_q);
4509 
4510 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4511 	}
4512 
4513 	/*
4514 	 * consume memory
4515 	 */
4516 	bfa_mem_kva_curp(mod) = (u8 *) rp;
4517 }
4518 
4519 static void
4520 bfa_rport_detach(struct bfa_s *bfa)
4521 {
4522 }
4523 
4524 static void
4525 bfa_rport_start(struct bfa_s *bfa)
4526 {
4527 }
4528 
4529 static void
4530 bfa_rport_stop(struct bfa_s *bfa)
4531 {
4532 }
4533 
4534 static void
4535 bfa_rport_iocdisable(struct bfa_s *bfa)
4536 {
4537 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4538 	struct bfa_rport_s *rport;
4539 	struct list_head *qe, *qen;
4540 
4541 	/* Enqueue unused rport resources to free_q */
4542 	list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4543 
4544 	list_for_each_safe(qe, qen, &mod->rp_active_q) {
4545 		rport = (struct bfa_rport_s *) qe;
4546 		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4547 	}
4548 }
4549 
4550 static struct bfa_rport_s *
4551 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4552 {
4553 	struct bfa_rport_s *rport;
4554 
4555 	bfa_q_deq(&mod->rp_free_q, &rport);
4556 	if (rport)
4557 		list_add_tail(&rport->qe, &mod->rp_active_q);
4558 
4559 	return rport;
4560 }
4561 
4562 static void
4563 bfa_rport_free(struct bfa_rport_s *rport)
4564 {
4565 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4566 
4567 	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4568 	list_del(&rport->qe);
4569 	list_add_tail(&rport->qe, &mod->rp_free_q);
4570 }
4571 
4572 static bfa_boolean_t
4573 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4574 {
4575 	struct bfi_rport_create_req_s *m;
4576 
4577 	/*
4578 	 * check for room in queue to send request now
4579 	 */
4580 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4581 	if (!m) {
4582 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4583 		return BFA_FALSE;
4584 	}
4585 
4586 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4587 			bfa_fn_lpu(rp->bfa));
4588 	m->bfa_handle = rp->rport_tag;
4589 	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4590 	m->pid = rp->rport_info.pid;
4591 	m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4592 	m->local_pid = rp->rport_info.local_pid;
4593 	m->fc_class = rp->rport_info.fc_class;
4594 	m->vf_en = rp->rport_info.vf_en;
4595 	m->vf_id = rp->rport_info.vf_id;
4596 	m->cisc = rp->rport_info.cisc;
4597 
4598 	/*
4599 	 * queue I/O message to firmware
4600 	 */
4601 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4602 	return BFA_TRUE;
4603 }
4604 
4605 static bfa_boolean_t
4606 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4607 {
4608 	struct bfi_rport_delete_req_s *m;
4609 
4610 	/*
4611 	 * check for room in queue to send request now
4612 	 */
4613 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4614 	if (!m) {
4615 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4616 		return BFA_FALSE;
4617 	}
4618 
4619 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4620 			bfa_fn_lpu(rp->bfa));
4621 	m->fw_handle = rp->fw_handle;
4622 
4623 	/*
4624 	 * queue I/O message to firmware
4625 	 */
4626 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4627 	return BFA_TRUE;
4628 }
4629 
4630 static bfa_boolean_t
4631 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4632 {
4633 	struct bfa_rport_speed_req_s *m;
4634 
4635 	/*
4636 	 * check for room in queue to send request now
4637 	 */
4638 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4639 	if (!m) {
4640 		bfa_trc(rp->bfa, rp->rport_info.speed);
4641 		return BFA_FALSE;
4642 	}
4643 
4644 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4645 			bfa_fn_lpu(rp->bfa));
4646 	m->fw_handle = rp->fw_handle;
4647 	m->speed = (u8)rp->rport_info.speed;
4648 
4649 	/*
4650 	 * queue I/O message to firmware
4651 	 */
4652 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4653 	return BFA_TRUE;
4654 }
4655 
4656 
4657 
4658 /*
4659  *  bfa_rport_public
4660  */
4661 
4662 /*
4663  * Rport interrupt processing.
4664  */
4665 void
4666 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4667 {
4668 	union bfi_rport_i2h_msg_u msg;
4669 	struct bfa_rport_s *rp;
4670 
4671 	bfa_trc(bfa, m->mhdr.msg_id);
4672 
4673 	msg.msg = m;
4674 
4675 	switch (m->mhdr.msg_id) {
4676 	case BFI_RPORT_I2H_CREATE_RSP:
4677 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4678 		rp->fw_handle = msg.create_rsp->fw_handle;
4679 		rp->qos_attr = msg.create_rsp->qos_attr;
4680 		bfa_rport_set_lunmask(bfa, rp);
4681 		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4682 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4683 		break;
4684 
4685 	case BFI_RPORT_I2H_DELETE_RSP:
4686 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4687 		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4688 		bfa_rport_unset_lunmask(bfa, rp);
4689 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4690 		break;
4691 
4692 	case BFI_RPORT_I2H_QOS_SCN:
4693 		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4694 		rp->event_arg.fw_msg = msg.qos_scn_evt;
4695 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4696 		break;
4697 
4698 	default:
4699 		bfa_trc(bfa, m->mhdr.msg_id);
4700 		WARN_ON(1);
4701 	}
4702 }
4703 
4704 void
4705 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4706 {
4707 	struct bfa_rport_mod_s	*mod = BFA_RPORT_MOD(bfa);
4708 	struct list_head	*qe;
4709 	int	i;
4710 
4711 	for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4712 		bfa_q_deq_tail(&mod->rp_free_q, &qe);
4713 		list_add_tail(qe, &mod->rp_unused_q);
4714 	}
4715 }
4716 
4717 /*
4718  *  bfa_rport_api
4719  */
4720 
4721 struct bfa_rport_s *
4722 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4723 {
4724 	struct bfa_rport_s *rp;
4725 
4726 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4727 
4728 	if (rp == NULL)
4729 		return NULL;
4730 
4731 	rp->bfa = bfa;
4732 	rp->rport_drv = rport_drv;
4733 	memset(&rp->stats, 0, sizeof(rp->stats));
4734 
4735 	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4736 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4737 
4738 	return rp;
4739 }
4740 
4741 void
4742 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4743 {
4744 	WARN_ON(rport_info->max_frmsz == 0);
4745 
4746 	/*
4747 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4748 	 * responses. Default to minimum size.
4749 	 */
4750 	if (rport_info->max_frmsz == 0) {
4751 		bfa_trc(rport->bfa, rport->rport_tag);
4752 		rport_info->max_frmsz = FC_MIN_PDUSZ;
4753 	}
4754 
4755 	rport->rport_info = *rport_info;
4756 	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4757 }
4758 
4759 void
4760 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4761 {
4762 	WARN_ON(speed == 0);
4763 	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
4764 
4765 	rport->rport_info.speed = speed;
4766 	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4767 }
4768 
4769 /* Set Rport LUN Mask */
4770 void
4771 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4772 {
4773 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
4774 	wwn_t	lp_wwn, rp_wwn;
4775 	u8 lp_tag = (u8)rp->rport_info.lp_tag;
4776 
4777 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4778 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4779 
4780 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4781 					rp->lun_mask = BFA_TRUE;
4782 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
4783 }
4784 
4785 /* Unset Rport LUN mask */
4786 void
4787 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4788 {
4789 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
4790 	wwn_t	lp_wwn, rp_wwn;
4791 
4792 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4793 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4794 
4795 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4796 				rp->lun_mask = BFA_FALSE;
4797 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
4798 			BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
4799 }
4800 
4801 /*
4802  * SGPG related functions
4803  */
4804 
4805 /*
4806  * Compute and return memory needed by FCP(im) module.
4807  */
4808 static void
4809 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4810 		struct bfa_s *bfa)
4811 {
4812 	struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4813 	struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4814 	struct bfa_mem_dma_s *seg_ptr;
4815 	u16	nsegs, idx, per_seg_sgpg, num_sgpg;
4816 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
4817 
4818 	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4819 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4820 	else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4821 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
4822 
4823 	num_sgpg = cfg->drvcfg.num_sgpgs;
4824 
4825 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4826 	per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4827 
4828 	bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4829 		if (num_sgpg >= per_seg_sgpg) {
4830 			num_sgpg -= per_seg_sgpg;
4831 			bfa_mem_dma_setup(minfo, seg_ptr,
4832 					per_seg_sgpg * sgpg_sz);
4833 		} else
4834 			bfa_mem_dma_setup(minfo, seg_ptr,
4835 					num_sgpg * sgpg_sz);
4836 	}
4837 
4838 	/* kva memory */
4839 	bfa_mem_kva_setup(minfo, sgpg_kva,
4840 		cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
4841 }
4842 
4843 static void
4844 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4845 		struct bfa_pcidev_s *pcidev)
4846 {
4847 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4848 	struct bfa_sgpg_s *hsgpg;
4849 	struct bfi_sgpg_s *sgpg;
4850 	u64 align_len;
4851 	struct bfa_mem_dma_s *seg_ptr;
4852 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
4853 	u16	i, idx, nsegs, per_seg_sgpg, num_sgpg;
4854 
4855 	union {
4856 		u64 pa;
4857 		union bfi_addr_u addr;
4858 	} sgpg_pa, sgpg_pa_tmp;
4859 
4860 	INIT_LIST_HEAD(&mod->sgpg_q);
4861 	INIT_LIST_HEAD(&mod->sgpg_wait_q);
4862 
4863 	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4864 
4865 	mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4866 
4867 	num_sgpg = cfg->drvcfg.num_sgpgs;
4868 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4869 
4870 	/* dma/kva mem claim */
4871 	hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
4872 
4873 	bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
4874 
4875 		if (!bfa_mem_dma_virt(seg_ptr))
4876 			break;
4877 
4878 		align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
4879 					     bfa_mem_dma_phys(seg_ptr);
4880 
4881 		sgpg = (struct bfi_sgpg_s *)
4882 			(((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
4883 		sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
4884 		WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
4885 
4886 		per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
4887 
4888 		for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
4889 			memset(hsgpg, 0, sizeof(*hsgpg));
4890 			memset(sgpg, 0, sizeof(*sgpg));
4891 
4892 			hsgpg->sgpg = sgpg;
4893 			sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4894 			hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4895 			list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4896 
4897 			sgpg++;
4898 			hsgpg++;
4899 			sgpg_pa.pa += sgpg_sz;
4900 		}
4901 	}
4902 
4903 	bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
4904 }
4905 
4906 static void
4907 bfa_sgpg_detach(struct bfa_s *bfa)
4908 {
4909 }
4910 
4911 static void
4912 bfa_sgpg_start(struct bfa_s *bfa)
4913 {
4914 }
4915 
4916 static void
4917 bfa_sgpg_stop(struct bfa_s *bfa)
4918 {
4919 }
4920 
4921 static void
4922 bfa_sgpg_iocdisable(struct bfa_s *bfa)
4923 {
4924 }
4925 
4926 bfa_status_t
4927 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4928 {
4929 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4930 	struct bfa_sgpg_s *hsgpg;
4931 	int i;
4932 
4933 	if (mod->free_sgpgs < nsgpgs)
4934 		return BFA_STATUS_ENOMEM;
4935 
4936 	for (i = 0; i < nsgpgs; i++) {
4937 		bfa_q_deq(&mod->sgpg_q, &hsgpg);
4938 		WARN_ON(!hsgpg);
4939 		list_add_tail(&hsgpg->qe, sgpg_q);
4940 	}
4941 
4942 	mod->free_sgpgs -= nsgpgs;
4943 	return BFA_STATUS_OK;
4944 }
4945 
4946 void
4947 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4948 {
4949 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4950 	struct bfa_sgpg_wqe_s *wqe;
4951 
4952 	mod->free_sgpgs += nsgpg;
4953 	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
4954 
4955 	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4956 
4957 	if (list_empty(&mod->sgpg_wait_q))
4958 		return;
4959 
4960 	/*
4961 	 * satisfy as many waiting requests as possible
4962 	 */
4963 	do {
4964 		wqe = bfa_q_first(&mod->sgpg_wait_q);
4965 		if (mod->free_sgpgs < wqe->nsgpg)
4966 			nsgpg = mod->free_sgpgs;
4967 		else
4968 			nsgpg = wqe->nsgpg;
4969 		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4970 		wqe->nsgpg -= nsgpg;
4971 		if (wqe->nsgpg == 0) {
4972 			list_del(&wqe->qe);
4973 			wqe->cbfn(wqe->cbarg);
4974 		}
4975 	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4976 }
4977 
4978 void
4979 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4980 {
4981 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4982 
4983 	WARN_ON(nsgpg <= 0);
4984 	WARN_ON(nsgpg <= mod->free_sgpgs);
4985 
4986 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4987 
4988 	/*
4989 	 * allocate any left to this one first
4990 	 */
4991 	if (mod->free_sgpgs) {
4992 		/*
4993 		 * no one else is waiting for SGPG
4994 		 */
4995 		WARN_ON(!list_empty(&mod->sgpg_wait_q));
4996 		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4997 		wqe->nsgpg -= mod->free_sgpgs;
4998 		mod->free_sgpgs = 0;
4999 	}
5000 
5001 	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5002 }
5003 
5004 void
5005 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5006 {
5007 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5008 
5009 	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5010 	list_del(&wqe->qe);
5011 
5012 	if (wqe->nsgpg_total != wqe->nsgpg)
5013 		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5014 				   wqe->nsgpg_total - wqe->nsgpg);
5015 }
5016 
5017 void
5018 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5019 		   void *cbarg)
5020 {
5021 	INIT_LIST_HEAD(&wqe->sgpg_q);
5022 	wqe->cbfn = cbfn;
5023 	wqe->cbarg = cbarg;
5024 }
5025 
5026 /*
5027  *  UF related functions
5028  */
5029 /*
5030  *****************************************************************************
5031  * Internal functions
5032  *****************************************************************************
5033  */
5034 static void
5035 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5036 {
5037 	struct bfa_uf_s   *uf = cbarg;
5038 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5039 
5040 	if (complete)
5041 		ufm->ufrecv(ufm->cbarg, uf);
5042 }
5043 
5044 static void
5045 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5046 {
5047 	struct bfi_uf_buf_post_s *uf_bp_msg;
5048 	u16 i;
5049 	u16 buf_len;
5050 
5051 	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5052 	uf_bp_msg = ufm->uf_buf_posts;
5053 
5054 	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5055 	     i++, uf_bp_msg++) {
5056 		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5057 
5058 		uf_bp_msg->buf_tag = i;
5059 		buf_len = sizeof(struct bfa_uf_buf_s);
5060 		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5061 		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5062 			    bfa_fn_lpu(ufm->bfa));
5063 		bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5064 	}
5065 
5066 	/*
5067 	 * advance pointer beyond consumed memory
5068 	 */
5069 	bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5070 }
5071 
5072 static void
5073 claim_ufs(struct bfa_uf_mod_s *ufm)
5074 {
5075 	u16 i;
5076 	struct bfa_uf_s   *uf;
5077 
5078 	/*
5079 	 * Claim block of memory for UF list
5080 	 */
5081 	ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5082 
5083 	/*
5084 	 * Initialize UFs and queue it in UF free queue
5085 	 */
5086 	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5087 		memset(uf, 0, sizeof(struct bfa_uf_s));
5088 		uf->bfa = ufm->bfa;
5089 		uf->uf_tag = i;
5090 		uf->pb_len = BFA_PER_UF_DMA_SZ;
5091 		uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5092 		uf->buf_pa = ufm_pbs_pa(ufm, i);
5093 		list_add_tail(&uf->qe, &ufm->uf_free_q);
5094 	}
5095 
5096 	/*
5097 	 * advance memory pointer
5098 	 */
5099 	bfa_mem_kva_curp(ufm) = (u8 *) uf;
5100 }
5101 
5102 static void
5103 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5104 {
5105 	claim_ufs(ufm);
5106 	claim_uf_post_msgs(ufm);
5107 }
5108 
5109 static void
5110 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5111 		struct bfa_s *bfa)
5112 {
5113 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5114 	struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5115 	u32	num_ufs = cfg->fwcfg.num_uf_bufs;
5116 	struct bfa_mem_dma_s *seg_ptr;
5117 	u16	nsegs, idx, per_seg_uf = 0;
5118 
5119 	nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5120 	per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5121 
5122 	bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5123 		if (num_ufs >= per_seg_uf) {
5124 			num_ufs -= per_seg_uf;
5125 			bfa_mem_dma_setup(minfo, seg_ptr,
5126 				per_seg_uf * BFA_PER_UF_DMA_SZ);
5127 		} else
5128 			bfa_mem_dma_setup(minfo, seg_ptr,
5129 				num_ufs * BFA_PER_UF_DMA_SZ);
5130 	}
5131 
5132 	/* kva memory */
5133 	bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5134 		(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5135 }
5136 
5137 static void
5138 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5139 		struct bfa_pcidev_s *pcidev)
5140 {
5141 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5142 
5143 	ufm->bfa = bfa;
5144 	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5145 	INIT_LIST_HEAD(&ufm->uf_free_q);
5146 	INIT_LIST_HEAD(&ufm->uf_posted_q);
5147 	INIT_LIST_HEAD(&ufm->uf_unused_q);
5148 
5149 	uf_mem_claim(ufm);
5150 }
5151 
5152 static void
5153 bfa_uf_detach(struct bfa_s *bfa)
5154 {
5155 }
5156 
5157 static struct bfa_uf_s *
5158 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5159 {
5160 	struct bfa_uf_s   *uf;
5161 
5162 	bfa_q_deq(&uf_mod->uf_free_q, &uf);
5163 	return uf;
5164 }
5165 
5166 static void
5167 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5168 {
5169 	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5170 }
5171 
5172 static bfa_status_t
5173 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5174 {
5175 	struct bfi_uf_buf_post_s *uf_post_msg;
5176 
5177 	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5178 	if (!uf_post_msg)
5179 		return BFA_STATUS_FAILED;
5180 
5181 	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5182 		      sizeof(struct bfi_uf_buf_post_s));
5183 	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5184 
5185 	bfa_trc(ufm->bfa, uf->uf_tag);
5186 
5187 	list_add_tail(&uf->qe, &ufm->uf_posted_q);
5188 	return BFA_STATUS_OK;
5189 }
5190 
5191 static void
5192 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5193 {
5194 	struct bfa_uf_s   *uf;
5195 
5196 	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5197 		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5198 			break;
5199 	}
5200 }
5201 
5202 static void
5203 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5204 {
5205 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5206 	u16 uf_tag = m->buf_tag;
5207 	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5208 	struct bfa_uf_buf_s *uf_buf;
5209 	uint8_t *buf;
5210 	struct fchs_s *fchs;
5211 
5212 	uf_buf = (struct bfa_uf_buf_s *)
5213 			bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5214 	buf = &uf_buf->d[0];
5215 
5216 	m->frm_len = be16_to_cpu(m->frm_len);
5217 	m->xfr_len = be16_to_cpu(m->xfr_len);
5218 
5219 	fchs = (struct fchs_s *)uf_buf;
5220 
5221 	list_del(&uf->qe);	/* dequeue from posted queue */
5222 
5223 	uf->data_ptr = buf;
5224 	uf->data_len = m->xfr_len;
5225 
5226 	WARN_ON(uf->data_len < sizeof(struct fchs_s));
5227 
5228 	if (uf->data_len == sizeof(struct fchs_s)) {
5229 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5230 			       uf->data_len, (struct fchs_s *)buf);
5231 	} else {
5232 		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5233 		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5234 				      BFA_PL_EID_RX, uf->data_len,
5235 				      (struct fchs_s *)buf, pld_w0);
5236 	}
5237 
5238 	if (bfa->fcs)
5239 		__bfa_cb_uf_recv(uf, BFA_TRUE);
5240 	else
5241 		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5242 }
5243 
5244 static void
5245 bfa_uf_stop(struct bfa_s *bfa)
5246 {
5247 }
5248 
5249 static void
5250 bfa_uf_iocdisable(struct bfa_s *bfa)
5251 {
5252 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5253 	struct bfa_uf_s *uf;
5254 	struct list_head *qe, *qen;
5255 
5256 	/* Enqueue unused uf resources to free_q */
5257 	list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5258 
5259 	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5260 		uf = (struct bfa_uf_s *) qe;
5261 		list_del(&uf->qe);
5262 		bfa_uf_put(ufm, uf);
5263 	}
5264 }
5265 
5266 static void
5267 bfa_uf_start(struct bfa_s *bfa)
5268 {
5269 	bfa_uf_post_all(BFA_UF_MOD(bfa));
5270 }
5271 
5272 /*
5273  * Register handler for all unsolicted receive frames.
5274  *
5275  * @param[in]	bfa		BFA instance
5276  * @param[in]	ufrecv	receive handler function
5277  * @param[in]	cbarg	receive handler arg
5278  */
5279 void
5280 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5281 {
5282 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5283 
5284 	ufm->ufrecv = ufrecv;
5285 	ufm->cbarg = cbarg;
5286 }
5287 
5288 /*
5289  *	Free an unsolicited frame back to BFA.
5290  *
5291  * @param[in]		uf		unsolicited frame to be freed
5292  *
5293  * @return None
5294  */
5295 void
5296 bfa_uf_free(struct bfa_uf_s *uf)
5297 {
5298 	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5299 	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5300 }
5301 
5302 
5303 
5304 /*
5305  *  uf_pub BFA uf module public functions
5306  */
5307 void
5308 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5309 {
5310 	bfa_trc(bfa, msg->mhdr.msg_id);
5311 
5312 	switch (msg->mhdr.msg_id) {
5313 	case BFI_UF_I2H_FRM_RCVD:
5314 		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5315 		break;
5316 
5317 	default:
5318 		bfa_trc(bfa, msg->mhdr.msg_id);
5319 		WARN_ON(1);
5320 	}
5321 }
5322 
5323 void
5324 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5325 {
5326 	struct bfa_uf_mod_s	*mod = BFA_UF_MOD(bfa);
5327 	struct list_head	*qe;
5328 	int	i;
5329 
5330 	for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5331 		bfa_q_deq_tail(&mod->uf_free_q, &qe);
5332 		list_add_tail(qe, &mod->uf_unused_q);
5333 	}
5334 }
5335 
5336 /*
5337  *	BFA fcdiag module
5338  */
5339 #define BFA_DIAG_QTEST_TOV	1000    /* msec */
5340 
5341 /*
5342  *	Set port status to busy
5343  */
5344 static void
5345 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5346 {
5347 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5348 
5349 	if (fcdiag->lb.lock)
5350 		fcport->diag_busy = BFA_TRUE;
5351 	else
5352 		fcport->diag_busy = BFA_FALSE;
5353 }
5354 
5355 static void
5356 bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5357 		struct bfa_s *bfa)
5358 {
5359 }
5360 
5361 static void
5362 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5363 		struct bfa_pcidev_s *pcidev)
5364 {
5365 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5366 	fcdiag->bfa             = bfa;
5367 	fcdiag->trcmod  = bfa->trcmod;
5368 	/* The common DIAG attach bfa_diag_attach() will do all memory claim */
5369 }
5370 
5371 static void
5372 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5373 {
5374 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5375 	bfa_trc(fcdiag, fcdiag->lb.lock);
5376 	if (fcdiag->lb.lock) {
5377 		fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5378 		fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5379 		fcdiag->lb.lock = 0;
5380 		bfa_fcdiag_set_busy_status(fcdiag);
5381 	}
5382 }
5383 
5384 static void
5385 bfa_fcdiag_detach(struct bfa_s *bfa)
5386 {
5387 }
5388 
5389 static void
5390 bfa_fcdiag_start(struct bfa_s *bfa)
5391 {
5392 }
5393 
5394 static void
5395 bfa_fcdiag_stop(struct bfa_s *bfa)
5396 {
5397 }
5398 
5399 static void
5400 bfa_fcdiag_queuetest_timeout(void *cbarg)
5401 {
5402 	struct bfa_fcdiag_s       *fcdiag = cbarg;
5403 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5404 
5405 	bfa_trc(fcdiag, fcdiag->qtest.all);
5406 	bfa_trc(fcdiag, fcdiag->qtest.count);
5407 
5408 	fcdiag->qtest.timer_active = 0;
5409 
5410 	res->status = BFA_STATUS_ETIMER;
5411 	res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5412 	if (fcdiag->qtest.all)
5413 		res->queue  = fcdiag->qtest.all;
5414 
5415 	bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5416 	fcdiag->qtest.status = BFA_STATUS_ETIMER;
5417 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5418 	fcdiag->qtest.lock = 0;
5419 }
5420 
5421 static bfa_status_t
5422 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5423 {
5424 	u32	i;
5425 	struct bfi_diag_qtest_req_s *req;
5426 
5427 	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5428 	if (!req)
5429 		return BFA_STATUS_DEVBUSY;
5430 
5431 	/* build host command */
5432 	bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5433 		bfa_fn_lpu(fcdiag->bfa));
5434 
5435 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5436 		req->data[i] = QTEST_PAT_DEFAULT;
5437 
5438 	bfa_trc(fcdiag, fcdiag->qtest.queue);
5439 	/* ring door bell */
5440 	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5441 	return BFA_STATUS_OK;
5442 }
5443 
5444 static void
5445 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5446 			bfi_diag_qtest_rsp_t *rsp)
5447 {
5448 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5449 	bfa_status_t status = BFA_STATUS_OK;
5450 	int i;
5451 
5452 	/* Check timer, should still be active   */
5453 	if (!fcdiag->qtest.timer_active) {
5454 		bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5455 		return;
5456 	}
5457 
5458 	/* update count */
5459 	fcdiag->qtest.count--;
5460 
5461 	/* Check result */
5462 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5463 		if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5464 			res->status = BFA_STATUS_DATACORRUPTED;
5465 			break;
5466 		}
5467 	}
5468 
5469 	if (res->status == BFA_STATUS_OK) {
5470 		if (fcdiag->qtest.count > 0) {
5471 			status = bfa_fcdiag_queuetest_send(fcdiag);
5472 			if (status == BFA_STATUS_OK)
5473 				return;
5474 			else
5475 				res->status = status;
5476 		} else if (fcdiag->qtest.all > 0 &&
5477 			fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5478 			fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5479 			fcdiag->qtest.queue++;
5480 			status = bfa_fcdiag_queuetest_send(fcdiag);
5481 			if (status == BFA_STATUS_OK)
5482 				return;
5483 			else
5484 				res->status = status;
5485 		}
5486 	}
5487 
5488 	/* Stop timer when we comp all queue */
5489 	if (fcdiag->qtest.timer_active) {
5490 		bfa_timer_stop(&fcdiag->qtest.timer);
5491 		fcdiag->qtest.timer_active = 0;
5492 	}
5493 	res->queue = fcdiag->qtest.queue;
5494 	res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5495 	bfa_trc(fcdiag, res->count);
5496 	bfa_trc(fcdiag, res->status);
5497 	fcdiag->qtest.status = res->status;
5498 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5499 	fcdiag->qtest.lock = 0;
5500 }
5501 
5502 static void
5503 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5504 			struct bfi_diag_lb_rsp_s *rsp)
5505 {
5506 	struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5507 
5508 	res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5509 	res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5510 	res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5511 	res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5512 	res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5513 	res->status     = rsp->res.status;
5514 	fcdiag->lb.status = rsp->res.status;
5515 	bfa_trc(fcdiag, fcdiag->lb.status);
5516 	fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5517 	fcdiag->lb.lock = 0;
5518 	bfa_fcdiag_set_busy_status(fcdiag);
5519 }
5520 
5521 static bfa_status_t
5522 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5523 			struct bfa_diag_loopback_s *loopback)
5524 {
5525 	struct bfi_diag_lb_req_s *lb_req;
5526 
5527 	lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5528 	if (!lb_req)
5529 		return BFA_STATUS_DEVBUSY;
5530 
5531 	/* build host command */
5532 	bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5533 		bfa_fn_lpu(fcdiag->bfa));
5534 
5535 	lb_req->lb_mode = loopback->lb_mode;
5536 	lb_req->speed = loopback->speed;
5537 	lb_req->loopcnt = loopback->loopcnt;
5538 	lb_req->pattern = loopback->pattern;
5539 
5540 	/* ring door bell */
5541 	bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5542 
5543 	bfa_trc(fcdiag, loopback->lb_mode);
5544 	bfa_trc(fcdiag, loopback->speed);
5545 	bfa_trc(fcdiag, loopback->loopcnt);
5546 	bfa_trc(fcdiag, loopback->pattern);
5547 	return BFA_STATUS_OK;
5548 }
5549 
5550 /*
5551  *	cpe/rme intr handler
5552  */
5553 void
5554 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5555 {
5556 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5557 
5558 	switch (msg->mhdr.msg_id) {
5559 	case BFI_DIAG_I2H_LOOPBACK:
5560 		bfa_fcdiag_loopback_comp(fcdiag,
5561 				(struct bfi_diag_lb_rsp_s *) msg);
5562 		break;
5563 	case BFI_DIAG_I2H_QTEST:
5564 		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5565 		break;
5566 	default:
5567 		bfa_trc(fcdiag, msg->mhdr.msg_id);
5568 		WARN_ON(1);
5569 	}
5570 }
5571 
5572 /*
5573  *	Loopback test
5574  *
5575  *   @param[in] *bfa            - bfa data struct
5576  *   @param[in] opmode          - port operation mode
5577  *   @param[in] speed           - port speed
5578  *   @param[in] lpcnt           - loop count
5579  *   @param[in] pat                     - pattern to build packet
5580  *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5581  *   @param[in] cbfn            - callback function
5582  *   @param[in] cbarg           - callback functioin arg
5583  *
5584  *   @param[out]
5585  */
5586 bfa_status_t
5587 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5588 		enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5589 		struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5590 		void *cbarg)
5591 {
5592 	struct  bfa_diag_loopback_s loopback;
5593 	struct bfa_port_attr_s attr;
5594 	bfa_status_t status;
5595 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5596 
5597 	if (!bfa_iocfc_is_operational(bfa))
5598 		return BFA_STATUS_IOC_NON_OP;
5599 
5600 	/* if port is PBC disabled, return error */
5601 	if (bfa_fcport_is_pbcdisabled(bfa)) {
5602 		bfa_trc(fcdiag, BFA_STATUS_PBC);
5603 		return BFA_STATUS_PBC;
5604 	}
5605 
5606 	if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5607 		bfa_trc(fcdiag, opmode);
5608 		return BFA_STATUS_PORT_NOT_DISABLED;
5609 	}
5610 
5611 	/*
5612 	 * Check if input speed is supported by the port mode
5613 	 */
5614 	if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5615 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
5616 		      speed == BFA_PORT_SPEED_2GBPS ||
5617 		      speed == BFA_PORT_SPEED_4GBPS ||
5618 		      speed == BFA_PORT_SPEED_8GBPS ||
5619 		      speed == BFA_PORT_SPEED_16GBPS ||
5620 		      speed == BFA_PORT_SPEED_AUTO)) {
5621 			bfa_trc(fcdiag, speed);
5622 			return BFA_STATUS_UNSUPP_SPEED;
5623 		}
5624 		bfa_fcport_get_attr(bfa, &attr);
5625 		bfa_trc(fcdiag, attr.speed_supported);
5626 		if (speed > attr.speed_supported)
5627 			return BFA_STATUS_UNSUPP_SPEED;
5628 	} else {
5629 		if (speed != BFA_PORT_SPEED_10GBPS) {
5630 			bfa_trc(fcdiag, speed);
5631 			return BFA_STATUS_UNSUPP_SPEED;
5632 		}
5633 	}
5634 
5635 	/* For Mezz card, port speed entered needs to be checked */
5636 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5637 		if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5638 			if ((speed == BFA_PORT_SPEED_1GBPS) &&
5639 			    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
5640 				return BFA_STATUS_UNSUPP_SPEED;
5641 			if (!(speed == BFA_PORT_SPEED_1GBPS ||
5642 			      speed == BFA_PORT_SPEED_2GBPS ||
5643 			      speed == BFA_PORT_SPEED_4GBPS ||
5644 			      speed == BFA_PORT_SPEED_8GBPS ||
5645 			      speed == BFA_PORT_SPEED_16GBPS ||
5646 			      speed == BFA_PORT_SPEED_AUTO))
5647 				return BFA_STATUS_UNSUPP_SPEED;
5648 		} else {
5649 			if (speed != BFA_PORT_SPEED_10GBPS)
5650 				return BFA_STATUS_UNSUPP_SPEED;
5651 		}
5652 	}
5653 
5654 	/* check to see if there is another destructive diag cmd running */
5655 	if (fcdiag->lb.lock) {
5656 		bfa_trc(fcdiag, fcdiag->lb.lock);
5657 		return BFA_STATUS_DEVBUSY;
5658 	}
5659 
5660 	fcdiag->lb.lock = 1;
5661 	loopback.lb_mode = opmode;
5662 	loopback.speed = speed;
5663 	loopback.loopcnt = lpcnt;
5664 	loopback.pattern = pat;
5665 	fcdiag->lb.result = result;
5666 	fcdiag->lb.cbfn = cbfn;
5667 	fcdiag->lb.cbarg = cbarg;
5668 	memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5669 	bfa_fcdiag_set_busy_status(fcdiag);
5670 
5671 	/* Send msg to fw */
5672 	status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5673 	return status;
5674 }
5675 
5676 /*
5677  *	DIAG queue test command
5678  *
5679  *   @param[in] *bfa            - bfa data struct
5680  *   @param[in] force           - 1: don't do ioc op checking
5681  *   @param[in] queue           - queue no. to test
5682  *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
5683  *   @param[in] cbfn            - callback function
5684  *   @param[in] *cbarg          - callback functioin arg
5685  *
5686  *   @param[out]
5687  */
5688 bfa_status_t
5689 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5690 		struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5691 		void *cbarg)
5692 {
5693 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5694 	bfa_status_t status;
5695 	bfa_trc(fcdiag, force);
5696 	bfa_trc(fcdiag, queue);
5697 
5698 	if (!force && !bfa_iocfc_is_operational(bfa))
5699 		return BFA_STATUS_IOC_NON_OP;
5700 
5701 	/* check to see if there is another destructive diag cmd running */
5702 	if (fcdiag->qtest.lock) {
5703 		bfa_trc(fcdiag, fcdiag->qtest.lock);
5704 		return BFA_STATUS_DEVBUSY;
5705 	}
5706 
5707 	/* Initialization */
5708 	fcdiag->qtest.lock = 1;
5709 	fcdiag->qtest.cbfn = cbfn;
5710 	fcdiag->qtest.cbarg = cbarg;
5711 	fcdiag->qtest.result = result;
5712 	fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5713 
5714 	/* Init test results */
5715 	fcdiag->qtest.result->status = BFA_STATUS_OK;
5716 	fcdiag->qtest.result->count  = 0;
5717 
5718 	/* send */
5719 	if (queue < BFI_IOC_MAX_CQS) {
5720 		fcdiag->qtest.result->queue  = (u8)queue;
5721 		fcdiag->qtest.queue = (u8)queue;
5722 		fcdiag->qtest.all   = 0;
5723 	} else {
5724 		fcdiag->qtest.result->queue  = 0;
5725 		fcdiag->qtest.queue = 0;
5726 		fcdiag->qtest.all   = 1;
5727 	}
5728 	status = bfa_fcdiag_queuetest_send(fcdiag);
5729 
5730 	/* Start a timer */
5731 	if (status == BFA_STATUS_OK) {
5732 		bfa_timer_start(bfa, &fcdiag->qtest.timer,
5733 				bfa_fcdiag_queuetest_timeout, fcdiag,
5734 				BFA_DIAG_QTEST_TOV);
5735 		fcdiag->qtest.timer_active = 1;
5736 	}
5737 	return status;
5738 }
5739 
5740 /*
5741  * DIAG PLB is running
5742  *
5743  *   @param[in] *bfa    - bfa data struct
5744  *
5745  *   @param[out]
5746  */
5747 bfa_status_t
5748 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5749 {
5750 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5751 	return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5752 }
5753