xref: /linux/drivers/scsi/bfa/bfa_svc.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4  * Copyright (c) 2014- QLogic Corporation.
5  * All rights reserved
6  * www.qlogic.com
7  *
8  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9  */
10 
11 #include "bfad_drv.h"
12 #include "bfad_im.h"
13 #include "bfa_plog.h"
14 #include "bfa_cs.h"
15 #include "bfa_modules.h"
16 
17 BFA_TRC_FILE(HAL, FCXP);
18 
19 /*
20  * LPS related definitions
21  */
22 #define BFA_LPS_MIN_LPORTS      (1)
23 #define BFA_LPS_MAX_LPORTS      (256)
24 
25 /*
26  * Maximum Vports supported per physical port or vf.
27  */
28 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
29 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
30 
31 
32 /*
33  * FC PORT related definitions
34  */
35 /*
36  * The port is considered disabled if corresponding physical port or IOC are
37  * disabled explicitly
38  */
39 #define BFA_PORT_IS_DISABLED(bfa) \
40 	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
41 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
42 
43 /*
44  * RPORT related definitions
45  */
46 #define bfa_rport_offline_cb(__rp) do {					\
47 	if ((__rp)->bfa->fcs)						\
48 		bfa_cb_rport_offline((__rp)->rport_drv);      \
49 	else {								\
50 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
51 				__bfa_cb_rport_offline, (__rp));      \
52 	}								\
53 } while (0)
54 
55 #define bfa_rport_online_cb(__rp) do {					\
56 	if ((__rp)->bfa->fcs)						\
57 		bfa_cb_rport_online((__rp)->rport_drv);      \
58 	else {								\
59 		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
60 				  __bfa_cb_rport_online, (__rp));      \
61 		}							\
62 } while (0)
63 
64 /*
65  * forward declarations FCXP related functions
66  */
67 static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
68 static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
69 				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
70 static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
71 				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
72 static void	bfa_fcxp_qresume(void *cbarg);
73 static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
74 				struct bfi_fcxp_send_req_s *send_req);
75 
76 /*
77  * forward declarations for LPS functions
78  */
79 static void bfa_lps_login_rsp(struct bfa_s *bfa,
80 				struct bfi_lps_login_rsp_s *rsp);
81 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
82 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
83 				struct bfi_lps_logout_rsp_s *rsp);
84 static void bfa_lps_reqq_resume(void *lps_arg);
85 static void bfa_lps_free(struct bfa_lps_s *lps);
86 static void bfa_lps_send_login(struct bfa_lps_s *lps);
87 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
88 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
89 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
90 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
91 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
92 
93 /*
94  * forward declaration for LPS state machine
95  */
96 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
97 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
98 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
99 					event);
100 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
101 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
102 					enum bfa_lps_event event);
103 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
104 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
105 					event);
106 
107 /*
108  * forward declaration for FC Port functions
109  */
110 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
111 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
112 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
113 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
114 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
115 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
116 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
117 			enum bfa_port_linkstate event, bfa_boolean_t trunk);
118 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
119 				enum bfa_port_linkstate event);
120 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
121 static void bfa_fcport_stats_get_timeout(void *cbarg);
122 static void bfa_fcport_stats_clr_timeout(void *cbarg);
123 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
124 
125 /*
126  * forward declaration for FC PORT state machine
127  */
128 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
129 					enum bfa_fcport_sm_event event);
130 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
131 					enum bfa_fcport_sm_event event);
132 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
133 					enum bfa_fcport_sm_event event);
134 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
135 					enum bfa_fcport_sm_event event);
136 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
137 					enum bfa_fcport_sm_event event);
138 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
139 					enum bfa_fcport_sm_event event);
140 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
141 					enum bfa_fcport_sm_event event);
142 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
143 					enum bfa_fcport_sm_event event);
144 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
145 					enum bfa_fcport_sm_event event);
146 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
147 					enum bfa_fcport_sm_event event);
148 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
149 					enum bfa_fcport_sm_event event);
150 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
151 					enum bfa_fcport_sm_event event);
152 static void	bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
153 					enum bfa_fcport_sm_event event);
154 static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
155 					enum bfa_fcport_sm_event event);
156 static void	bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
157 					enum bfa_fcport_sm_event event);
158 
159 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
160 					enum bfa_fcport_ln_sm_event event);
161 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
162 					enum bfa_fcport_ln_sm_event event);
163 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
164 					enum bfa_fcport_ln_sm_event event);
165 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
166 					enum bfa_fcport_ln_sm_event event);
167 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
168 					enum bfa_fcport_ln_sm_event event);
169 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
170 					enum bfa_fcport_ln_sm_event event);
171 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
172 					enum bfa_fcport_ln_sm_event event);
173 
174 struct bfa_fcport_sm_table_s {
175 	bfa_fcport_sm_t sm;		/*  state machine function	*/
176 	enum bfa_port_states state;	/*  state machine encoding	*/
177 	char		*name;		/*  state name for display	*/
178 };
179 
180 static inline enum bfa_port_states
181 bfa_fcport_sm_to_state(struct bfa_fcport_sm_table_s *smt, bfa_fcport_sm_t sm)
182 {
183 	int i = 0;
184 
185 	while (smt[i].sm && smt[i].sm != sm)
186 		i++;
187 	return smt[i].state;
188 }
189 
190 static struct bfa_fcport_sm_table_s hal_port_sm_table[] = {
191 	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
192 	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
193 	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
194 	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
195 	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
196 	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
197 	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
198 	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
199 	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
200 	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
201 	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
202 	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
203 	{BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
204 	{BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
205 	{BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
206 };
207 
208 
209 /*
210  * forward declaration for RPORT related functions
211  */
212 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
213 static void		bfa_rport_free(struct bfa_rport_s *rport);
214 static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
215 static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
216 static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
217 static void		__bfa_cb_rport_online(void *cbarg,
218 						bfa_boolean_t complete);
219 static void		__bfa_cb_rport_offline(void *cbarg,
220 						bfa_boolean_t complete);
221 
222 /*
223  * forward declaration for RPORT state machine
224  */
225 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
226 					enum bfa_rport_event event);
227 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
228 					enum bfa_rport_event event);
229 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
230 					enum bfa_rport_event event);
231 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
232 					enum bfa_rport_event event);
233 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
234 					enum bfa_rport_event event);
235 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
236 					enum bfa_rport_event event);
237 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
238 					enum bfa_rport_event event);
239 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
240 					enum bfa_rport_event event);
241 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
242 					enum bfa_rport_event event);
243 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
244 					enum bfa_rport_event event);
245 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
246 					enum bfa_rport_event event);
247 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
248 					enum bfa_rport_event event);
249 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
250 					enum bfa_rport_event event);
251 
252 /*
253  * PLOG related definitions
254  */
255 static int
256 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
257 {
258 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
259 		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
260 		return 1;
261 
262 	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
263 		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
264 		return 1;
265 
266 	return 0;
267 }
268 
269 static void
270 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
271 {
272 	u16 tail;
273 	struct bfa_plog_rec_s *pl_recp;
274 
275 	if (plog->plog_enabled == 0)
276 		return;
277 
278 	if (plkd_validate_logrec(pl_rec)) {
279 		WARN_ON(1);
280 		return;
281 	}
282 
283 	tail = plog->tail;
284 
285 	pl_recp = &(plog->plog_recs[tail]);
286 
287 	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
288 
289 	pl_recp->tv = ktime_get_real_seconds();
290 	BFA_PL_LOG_REC_INCR(plog->tail);
291 
292 	if (plog->head == plog->tail)
293 		BFA_PL_LOG_REC_INCR(plog->head);
294 }
295 
296 void
297 bfa_plog_init(struct bfa_plog_s *plog)
298 {
299 	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
300 
301 	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
302 	plog->head = plog->tail = 0;
303 	plog->plog_enabled = 1;
304 }
305 
306 void
307 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
308 		enum bfa_plog_eid event,
309 		u16 misc, char *log_str)
310 {
311 	struct bfa_plog_rec_s  lp;
312 
313 	if (plog->plog_enabled) {
314 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
315 		lp.mid = mid;
316 		lp.eid = event;
317 		lp.log_type = BFA_PL_LOG_TYPE_STRING;
318 		lp.misc = misc;
319 		strscpy(lp.log_entry.string_log, log_str,
320 			BFA_PL_STRING_LOG_SZ);
321 		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
322 		bfa_plog_add(plog, &lp);
323 	}
324 }
325 
326 void
327 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
328 		enum bfa_plog_eid event,
329 		u16 misc, u32 *intarr, u32 num_ints)
330 {
331 	struct bfa_plog_rec_s  lp;
332 	u32 i;
333 
334 	if (num_ints > BFA_PL_INT_LOG_SZ)
335 		num_ints = BFA_PL_INT_LOG_SZ;
336 
337 	if (plog->plog_enabled) {
338 		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
339 		lp.mid = mid;
340 		lp.eid = event;
341 		lp.log_type = BFA_PL_LOG_TYPE_INT;
342 		lp.misc = misc;
343 
344 		for (i = 0; i < num_ints; i++)
345 			lp.log_entry.int_log[i] = intarr[i];
346 
347 		lp.log_num_ints = (u8) num_ints;
348 
349 		bfa_plog_add(plog, &lp);
350 	}
351 }
352 
353 void
354 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
355 			enum bfa_plog_eid event,
356 			u16 misc, struct fchs_s *fchdr)
357 {
358 	u32	*tmp_int = (u32 *) fchdr;
359 	u32	ints[BFA_PL_INT_LOG_SZ];
360 
361 	if (plog->plog_enabled) {
362 		ints[0] = tmp_int[0];
363 		ints[1] = tmp_int[1];
364 		ints[2] = tmp_int[4];
365 
366 		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
367 	}
368 }
369 
370 void
371 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
372 		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
373 		      u32 pld_w0)
374 {
375 	u32	*tmp_int = (u32 *) fchdr;
376 	u32	ints[BFA_PL_INT_LOG_SZ];
377 
378 	if (plog->plog_enabled) {
379 		ints[0] = tmp_int[0];
380 		ints[1] = tmp_int[1];
381 		ints[2] = tmp_int[4];
382 		ints[3] = pld_w0;
383 
384 		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
385 	}
386 }
387 
388 
389 /*
390  *  fcxp_pvt BFA FCXP private functions
391  */
392 
393 static void
394 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
395 {
396 	u16	i;
397 	struct bfa_fcxp_s *fcxp;
398 
399 	fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
400 	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
401 
402 	INIT_LIST_HEAD(&mod->fcxp_req_free_q);
403 	INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
404 	INIT_LIST_HEAD(&mod->fcxp_active_q);
405 	INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
406 	INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
407 
408 	mod->fcxp_list = fcxp;
409 
410 	for (i = 0; i < mod->num_fcxps; i++) {
411 		fcxp->fcxp_mod = mod;
412 		fcxp->fcxp_tag = i;
413 
414 		if (i < (mod->num_fcxps / 2)) {
415 			list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
416 			fcxp->req_rsp = BFA_TRUE;
417 		} else {
418 			list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
419 			fcxp->req_rsp = BFA_FALSE;
420 		}
421 
422 		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
423 		fcxp->reqq_waiting = BFA_FALSE;
424 
425 		fcxp = fcxp + 1;
426 	}
427 
428 	bfa_mem_kva_curp(mod) = (void *)fcxp;
429 }
430 
431 void
432 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
433 		struct bfa_s *bfa)
434 {
435 	struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
436 	struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
437 	struct bfa_mem_dma_s *seg_ptr;
438 	u16	nsegs, idx, per_seg_fcxp;
439 	u16	num_fcxps = cfg->fwcfg.num_fcxp_reqs;
440 	u32	per_fcxp_sz;
441 
442 	if (num_fcxps == 0)
443 		return;
444 
445 	if (cfg->drvcfg.min_cfg)
446 		per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
447 	else
448 		per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
449 
450 	/* dma memory */
451 	nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
452 	per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
453 
454 	bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
455 		if (num_fcxps >= per_seg_fcxp) {
456 			num_fcxps -= per_seg_fcxp;
457 			bfa_mem_dma_setup(minfo, seg_ptr,
458 				per_seg_fcxp * per_fcxp_sz);
459 		} else
460 			bfa_mem_dma_setup(minfo, seg_ptr,
461 				num_fcxps * per_fcxp_sz);
462 	}
463 
464 	/* kva memory */
465 	bfa_mem_kva_setup(minfo, fcxp_kva,
466 		cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
467 }
468 
469 void
470 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
471 		struct bfa_pcidev_s *pcidev)
472 {
473 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
474 
475 	mod->bfa = bfa;
476 	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
477 
478 	/*
479 	 * Initialize FCXP request and response payload sizes.
480 	 */
481 	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
482 	if (!cfg->drvcfg.min_cfg)
483 		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
484 
485 	INIT_LIST_HEAD(&mod->req_wait_q);
486 	INIT_LIST_HEAD(&mod->rsp_wait_q);
487 
488 	claim_fcxps_mem(mod);
489 }
490 
491 void
492 bfa_fcxp_iocdisable(struct bfa_s *bfa)
493 {
494 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
495 	struct bfa_fcxp_s *fcxp;
496 	struct list_head	      *qe, *qen;
497 
498 	/* Enqueue unused fcxp resources to free_q */
499 	list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
500 	list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
501 
502 	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
503 		fcxp = (struct bfa_fcxp_s *) qe;
504 		if (fcxp->caller == NULL) {
505 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
506 					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
507 			bfa_fcxp_free(fcxp);
508 		} else {
509 			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
510 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
511 				     __bfa_fcxp_send_cbfn, fcxp);
512 		}
513 	}
514 }
515 
516 static struct bfa_fcxp_s *
517 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
518 {
519 	struct bfa_fcxp_s *fcxp;
520 
521 	if (req)
522 		bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
523 	else
524 		bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
525 
526 	if (fcxp)
527 		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
528 
529 	return fcxp;
530 }
531 
532 static void
533 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
534 	       struct bfa_s *bfa,
535 	       u8 *use_ibuf,
536 	       u32 *nr_sgles,
537 	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
538 	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
539 	       struct list_head *r_sgpg_q,
540 	       int n_sgles,
541 	       bfa_fcxp_get_sgaddr_t sga_cbfn,
542 	       bfa_fcxp_get_sglen_t sglen_cbfn)
543 {
544 
545 	WARN_ON(bfa == NULL);
546 
547 	bfa_trc(bfa, fcxp->fcxp_tag);
548 
549 	if (n_sgles == 0) {
550 		*use_ibuf = 1;
551 	} else {
552 		WARN_ON(*sga_cbfn == NULL);
553 		WARN_ON(*sglen_cbfn == NULL);
554 
555 		*use_ibuf = 0;
556 		*r_sga_cbfn = sga_cbfn;
557 		*r_sglen_cbfn = sglen_cbfn;
558 
559 		*nr_sgles = n_sgles;
560 
561 		/*
562 		 * alloc required sgpgs
563 		 */
564 		if (n_sgles > BFI_SGE_INLINE)
565 			WARN_ON(1);
566 	}
567 
568 }
569 
570 static void
571 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
572 	       void *caller, struct bfa_s *bfa, int nreq_sgles,
573 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
574 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
575 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
576 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
577 {
578 
579 	WARN_ON(bfa == NULL);
580 
581 	bfa_trc(bfa, fcxp->fcxp_tag);
582 
583 	fcxp->caller = caller;
584 
585 	bfa_fcxp_init_reqrsp(fcxp, bfa,
586 		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
587 		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
588 		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
589 
590 	bfa_fcxp_init_reqrsp(fcxp, bfa,
591 		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
592 		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
593 		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
594 
595 }
596 
597 static void
598 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
599 {
600 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
601 	struct bfa_fcxp_wqe_s *wqe;
602 
603 	if (fcxp->req_rsp)
604 		bfa_q_deq(&mod->req_wait_q, &wqe);
605 	else
606 		bfa_q_deq(&mod->rsp_wait_q, &wqe);
607 
608 	if (wqe) {
609 		bfa_trc(mod->bfa, fcxp->fcxp_tag);
610 
611 		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
612 			wqe->nrsp_sgles, wqe->req_sga_cbfn,
613 			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
614 			wqe->rsp_sglen_cbfn);
615 
616 		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
617 		return;
618 	}
619 
620 	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
621 	list_del(&fcxp->qe);
622 
623 	if (fcxp->req_rsp)
624 		list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
625 	else
626 		list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
627 }
628 
629 static void
630 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
631 		   bfa_status_t req_status, u32 rsp_len,
632 		   u32 resid_len, struct fchs_s *rsp_fchs)
633 {
634 	/* discarded fcxp completion */
635 }
636 
637 static void
638 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
639 {
640 	struct bfa_fcxp_s *fcxp = cbarg;
641 
642 	if (complete) {
643 		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
644 				fcxp->rsp_status, fcxp->rsp_len,
645 				fcxp->residue_len, &fcxp->rsp_fchs);
646 	} else {
647 		bfa_fcxp_free(fcxp);
648 	}
649 }
650 
651 static void
652 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
653 {
654 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
655 	struct bfa_fcxp_s	*fcxp;
656 	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
657 
658 	bfa_trc(bfa, fcxp_tag);
659 
660 	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
661 
662 	/*
663 	 * @todo f/w should not set residue to non-0 when everything
664 	 *	 is received.
665 	 */
666 	if (fcxp_rsp->req_status == BFA_STATUS_OK)
667 		fcxp_rsp->residue_len = 0;
668 	else
669 		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
670 
671 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
672 
673 	WARN_ON(fcxp->send_cbfn == NULL);
674 
675 	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
676 
677 	if (fcxp->send_cbfn != NULL) {
678 		bfa_trc(mod->bfa, (NULL == fcxp->caller));
679 		if (fcxp->caller == NULL) {
680 			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
681 					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
682 					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
683 			/*
684 			 * fcxp automatically freed on return from the callback
685 			 */
686 			bfa_fcxp_free(fcxp);
687 		} else {
688 			fcxp->rsp_status = fcxp_rsp->req_status;
689 			fcxp->rsp_len = fcxp_rsp->rsp_len;
690 			fcxp->residue_len = fcxp_rsp->residue_len;
691 			fcxp->rsp_fchs = fcxp_rsp->fchs;
692 
693 			bfa_cb_queue(bfa, &fcxp->hcb_qe,
694 					__bfa_fcxp_send_cbfn, fcxp);
695 		}
696 	} else {
697 		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
698 	}
699 }
700 
701 static void
702 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
703 		 struct fchs_s *fchs)
704 {
705 	/*
706 	 * TODO: TX ox_id
707 	 */
708 	if (reqlen > 0) {
709 		if (fcxp->use_ireqbuf) {
710 			u32	pld_w0 =
711 				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
712 
713 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
714 					BFA_PL_EID_TX,
715 					reqlen + sizeof(struct fchs_s), fchs,
716 					pld_w0);
717 		} else {
718 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
719 					BFA_PL_EID_TX,
720 					reqlen + sizeof(struct fchs_s),
721 					fchs);
722 		}
723 	} else {
724 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
725 			       reqlen + sizeof(struct fchs_s), fchs);
726 	}
727 }
728 
729 static void
730 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
731 		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
732 {
733 	if (fcxp_rsp->rsp_len > 0) {
734 		if (fcxp->use_irspbuf) {
735 			u32	pld_w0 =
736 				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
737 
738 			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
739 					      BFA_PL_EID_RX,
740 					      (u16) fcxp_rsp->rsp_len,
741 					      &fcxp_rsp->fchs, pld_w0);
742 		} else {
743 			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
744 				       BFA_PL_EID_RX,
745 				       (u16) fcxp_rsp->rsp_len,
746 				       &fcxp_rsp->fchs);
747 		}
748 	} else {
749 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
750 			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
751 	}
752 }
753 
754 /*
755  * Handler to resume sending fcxp when space in available in cpe queue.
756  */
757 static void
758 bfa_fcxp_qresume(void *cbarg)
759 {
760 	struct bfa_fcxp_s		*fcxp = cbarg;
761 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
762 	struct bfi_fcxp_send_req_s	*send_req;
763 
764 	fcxp->reqq_waiting = BFA_FALSE;
765 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
766 	bfa_fcxp_queue(fcxp, send_req);
767 }
768 
769 /*
770  * Queue fcxp send request to foimrware.
771  */
772 static void
773 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
774 {
775 	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
776 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
777 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
778 	struct bfa_rport_s		*rport = reqi->bfa_rport;
779 
780 	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
781 		    bfa_fn_lpu(bfa));
782 
783 	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
784 	if (rport) {
785 		send_req->rport_fw_hndl = rport->fw_handle;
786 		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
787 		if (send_req->max_frmsz == 0)
788 			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
789 	} else {
790 		send_req->rport_fw_hndl = 0;
791 		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
792 	}
793 
794 	send_req->vf_id = cpu_to_be16(reqi->vf_id);
795 	send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
796 	send_req->class = reqi->class;
797 	send_req->rsp_timeout = rspi->rsp_timeout;
798 	send_req->cts = reqi->cts;
799 	send_req->fchs = reqi->fchs;
800 
801 	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
802 	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
803 
804 	/*
805 	 * setup req sgles
806 	 */
807 	if (fcxp->use_ireqbuf == 1) {
808 		bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
809 					BFA_FCXP_REQ_PLD_PA(fcxp));
810 	} else {
811 		if (fcxp->nreq_sgles > 0) {
812 			WARN_ON(fcxp->nreq_sgles != 1);
813 			bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
814 				fcxp->req_sga_cbfn(fcxp->caller, 0));
815 		} else {
816 			WARN_ON(reqi->req_tot_len != 0);
817 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
818 		}
819 	}
820 
821 	/*
822 	 * setup rsp sgles
823 	 */
824 	if (fcxp->use_irspbuf == 1) {
825 		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
826 
827 		bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
828 					BFA_FCXP_RSP_PLD_PA(fcxp));
829 	} else {
830 		if (fcxp->nrsp_sgles > 0) {
831 			WARN_ON(fcxp->nrsp_sgles != 1);
832 			bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
833 				fcxp->rsp_sga_cbfn(fcxp->caller, 0));
834 
835 		} else {
836 			WARN_ON(rspi->rsp_maxlen != 0);
837 			bfa_alen_set(&send_req->rsp_alen, 0, 0);
838 		}
839 	}
840 
841 	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
842 
843 	bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
844 
845 	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
846 	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
847 }
848 
849 /*
850  * Allocate an FCXP instance to send a response or to send a request
851  * that has a response. Request/response buffers are allocated by caller.
852  *
853  * @param[in]	bfa		BFA bfa instance
854  * @param[in]	nreq_sgles	Number of SG elements required for request
855  *				buffer. 0, if fcxp internal buffers are	used.
856  *				Use bfa_fcxp_get_reqbuf() to get the
857  *				internal req buffer.
858  * @param[in]	req_sgles	SG elements describing request buffer. Will be
859  *				copied in by BFA and hence can be freed on
860  *				return from this function.
861  * @param[in]	get_req_sga	function ptr to be called to get a request SG
862  *				Address (given the sge index).
863  * @param[in]	get_req_sglen	function ptr to be called to get a request SG
864  *				len (given the sge index).
865  * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
866  *				Address (given the sge index).
867  * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
868  *				len (given the sge index).
869  * @param[in]	req		Allocated FCXP is used to send req or rsp?
870  *				request - BFA_TRUE, response - BFA_FALSE
871  *
872  * @return FCXP instance. NULL on failure.
873  */
874 struct bfa_fcxp_s *
875 bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
876 		int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
877 		bfa_fcxp_get_sglen_t req_sglen_cbfn,
878 		bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
879 		bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
880 {
881 	struct bfa_fcxp_s *fcxp = NULL;
882 
883 	WARN_ON(bfa == NULL);
884 
885 	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
886 	if (fcxp == NULL)
887 		return NULL;
888 
889 	bfa_trc(bfa, fcxp->fcxp_tag);
890 
891 	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
892 			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
893 
894 	return fcxp;
895 }
896 
897 /*
898  * Get the internal request buffer pointer
899  *
900  * @param[in]	fcxp	BFA fcxp pointer
901  *
902  * @return		pointer to the internal request buffer
903  */
904 void *
905 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
906 {
907 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
908 	void	*reqbuf;
909 
910 	WARN_ON(fcxp->use_ireqbuf != 1);
911 	reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
912 				mod->req_pld_sz + mod->rsp_pld_sz);
913 	return reqbuf;
914 }
915 
916 /*
917  * Get the internal response buffer pointer
918  *
919  * @param[in]	fcxp	BFA fcxp pointer
920  *
921  * @return		pointer to the internal request buffer
922  */
923 void *
924 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
925 {
926 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
927 	void	*fcxp_buf;
928 
929 	WARN_ON(fcxp->use_irspbuf != 1);
930 
931 	fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
932 				mod->req_pld_sz + mod->rsp_pld_sz);
933 
934 	/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
935 	return ((u8 *) fcxp_buf) + mod->req_pld_sz;
936 }
937 
938 /*
939  * Free the BFA FCXP
940  *
941  * @param[in]	fcxp			BFA fcxp pointer
942  *
943  * @return		void
944  */
945 void
946 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
947 {
948 	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
949 
950 	WARN_ON(fcxp == NULL);
951 	bfa_trc(mod->bfa, fcxp->fcxp_tag);
952 	bfa_fcxp_put(fcxp);
953 }
954 
955 /*
956  * Send a FCXP request
957  *
958  * @param[in]	fcxp	BFA fcxp pointer
959  * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
960  * @param[in]	vf_id	virtual Fabric ID
961  * @param[in]	lp_tag	lport tag
962  * @param[in]	cts	use Continuous sequence
963  * @param[in]	cos	fc Class of Service
964  * @param[in]	reqlen	request length, does not include FCHS length
965  * @param[in]	fchs	fc Header Pointer. The header content will be copied
966  *			in by BFA.
967  *
968  * @param[in]	cbfn	call back function to be called on receiving
969  *								the response
970  * @param[in]	cbarg	arg for cbfn
971  * @param[in]	rsp_timeout
972  *			response timeout
973  *
974  * @return		bfa_status_t
975  */
976 void
977 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
978 	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
979 	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
980 	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
981 {
982 	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
983 	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
984 	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
985 	struct bfi_fcxp_send_req_s	*send_req;
986 
987 	bfa_trc(bfa, fcxp->fcxp_tag);
988 
989 	/*
990 	 * setup request/response info
991 	 */
992 	reqi->bfa_rport = rport;
993 	reqi->vf_id = vf_id;
994 	reqi->lp_tag = lp_tag;
995 	reqi->class = cos;
996 	rspi->rsp_timeout = rsp_timeout;
997 	reqi->cts = cts;
998 	reqi->fchs = *fchs;
999 	reqi->req_tot_len = reqlen;
1000 	rspi->rsp_maxlen = rsp_maxlen;
1001 	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1002 	fcxp->send_cbarg = cbarg;
1003 
1004 	/*
1005 	 * If no room in CPE queue, wait for space in request queue
1006 	 */
1007 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1008 	if (!send_req) {
1009 		bfa_trc(bfa, fcxp->fcxp_tag);
1010 		fcxp->reqq_waiting = BFA_TRUE;
1011 		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1012 		return;
1013 	}
1014 
1015 	bfa_fcxp_queue(fcxp, send_req);
1016 }
1017 
1018 void
1019 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1020 	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1021 	       void *caller, int nreq_sgles,
1022 	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1023 	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
1024 	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1025 	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1026 {
1027 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1028 
1029 	if (req)
1030 		WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1031 	else
1032 		WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1033 
1034 	wqe->alloc_cbfn = alloc_cbfn;
1035 	wqe->alloc_cbarg = alloc_cbarg;
1036 	wqe->caller = caller;
1037 	wqe->bfa = bfa;
1038 	wqe->nreq_sgles = nreq_sgles;
1039 	wqe->nrsp_sgles = nrsp_sgles;
1040 	wqe->req_sga_cbfn = req_sga_cbfn;
1041 	wqe->req_sglen_cbfn = req_sglen_cbfn;
1042 	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1043 	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1044 
1045 	if (req)
1046 		list_add_tail(&wqe->qe, &mod->req_wait_q);
1047 	else
1048 		list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1049 }
1050 
1051 void
1052 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1053 {
1054 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1055 
1056 	WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1057 		!bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1058 	list_del(&wqe->qe);
1059 }
1060 
1061 void
1062 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1063 {
1064 	/*
1065 	 * If waiting for room in request queue, cancel reqq wait
1066 	 * and free fcxp.
1067 	 */
1068 	if (fcxp->reqq_waiting) {
1069 		fcxp->reqq_waiting = BFA_FALSE;
1070 		bfa_reqq_wcancel(&fcxp->reqq_wqe);
1071 		bfa_fcxp_free(fcxp);
1072 		return;
1073 	}
1074 
1075 	fcxp->send_cbfn = bfa_fcxp_null_comp;
1076 }
1077 
1078 void
1079 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1080 {
1081 	switch (msg->mhdr.msg_id) {
1082 	case BFI_FCXP_I2H_SEND_RSP:
1083 		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1084 		break;
1085 
1086 	default:
1087 		bfa_trc(bfa, msg->mhdr.msg_id);
1088 		WARN_ON(1);
1089 	}
1090 }
1091 
1092 u32
1093 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1094 {
1095 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1096 
1097 	return mod->rsp_pld_sz;
1098 }
1099 
1100 void
1101 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1102 {
1103 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
1104 	struct list_head	*qe;
1105 	int	i;
1106 
1107 	for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1108 		if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1109 			bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1110 			list_add_tail(qe, &mod->fcxp_req_unused_q);
1111 		} else {
1112 			bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1113 			list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1114 		}
1115 	}
1116 }
1117 
1118 /*
1119  *  BFA LPS state machine functions
1120  */
1121 
1122 /*
1123  * Init state -- no login
1124  */
1125 static void
1126 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1127 {
1128 	bfa_trc(lps->bfa, lps->bfa_tag);
1129 	bfa_trc(lps->bfa, event);
1130 
1131 	switch (event) {
1132 	case BFA_LPS_SM_LOGIN:
1133 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1134 			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1135 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1136 		} else {
1137 			bfa_sm_set_state(lps, bfa_lps_sm_login);
1138 			bfa_lps_send_login(lps);
1139 		}
1140 
1141 		if (lps->fdisc)
1142 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1143 				BFA_PL_EID_LOGIN, 0, "FDISC Request");
1144 		else
1145 			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1146 				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1147 		break;
1148 
1149 	case BFA_LPS_SM_LOGOUT:
1150 		bfa_lps_logout_comp(lps);
1151 		break;
1152 
1153 	case BFA_LPS_SM_DELETE:
1154 		bfa_lps_free(lps);
1155 		break;
1156 
1157 	case BFA_LPS_SM_RX_CVL:
1158 	case BFA_LPS_SM_OFFLINE:
1159 		break;
1160 
1161 	case BFA_LPS_SM_FWRSP:
1162 		/*
1163 		 * Could happen when fabric detects loopback and discards
1164 		 * the lps request. Fw will eventually sent out the timeout
1165 		 * Just ignore
1166 		 */
1167 		break;
1168 	case BFA_LPS_SM_SET_N2N_PID:
1169 		/*
1170 		 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1171 		 * this event. Ignore this event.
1172 		 */
1173 		break;
1174 
1175 	default:
1176 		bfa_sm_fault(lps->bfa, event);
1177 	}
1178 }
1179 
1180 /*
1181  * login is in progress -- awaiting response from firmware
1182  */
1183 static void
1184 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1185 {
1186 	bfa_trc(lps->bfa, lps->bfa_tag);
1187 	bfa_trc(lps->bfa, event);
1188 
1189 	switch (event) {
1190 	case BFA_LPS_SM_FWRSP:
1191 		if (lps->status == BFA_STATUS_OK) {
1192 			bfa_sm_set_state(lps, bfa_lps_sm_online);
1193 			if (lps->fdisc)
1194 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1195 					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1196 			else
1197 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1198 					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1199 			/* If N2N, send the assigned PID to FW */
1200 			bfa_trc(lps->bfa, lps->fport);
1201 			bfa_trc(lps->bfa, lps->lp_pid);
1202 
1203 			if (!lps->fport && lps->lp_pid)
1204 				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1205 		} else {
1206 			bfa_sm_set_state(lps, bfa_lps_sm_init);
1207 			if (lps->fdisc)
1208 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1209 					BFA_PL_EID_LOGIN, 0,
1210 					"FDISC Fail (RJT or timeout)");
1211 			else
1212 				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1213 					BFA_PL_EID_LOGIN, 0,
1214 					"FLOGI Fail (RJT or timeout)");
1215 		}
1216 		bfa_lps_login_comp(lps);
1217 		break;
1218 
1219 	case BFA_LPS_SM_OFFLINE:
1220 	case BFA_LPS_SM_DELETE:
1221 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1222 		break;
1223 
1224 	case BFA_LPS_SM_SET_N2N_PID:
1225 		bfa_trc(lps->bfa, lps->fport);
1226 		bfa_trc(lps->bfa, lps->lp_pid);
1227 		break;
1228 
1229 	default:
1230 		bfa_sm_fault(lps->bfa, event);
1231 	}
1232 }
1233 
1234 /*
1235  * login pending - awaiting space in request queue
1236  */
1237 static void
1238 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1239 {
1240 	bfa_trc(lps->bfa, lps->bfa_tag);
1241 	bfa_trc(lps->bfa, event);
1242 
1243 	switch (event) {
1244 	case BFA_LPS_SM_RESUME:
1245 		bfa_sm_set_state(lps, bfa_lps_sm_login);
1246 		bfa_lps_send_login(lps);
1247 		break;
1248 
1249 	case BFA_LPS_SM_OFFLINE:
1250 	case BFA_LPS_SM_DELETE:
1251 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1252 		bfa_reqq_wcancel(&lps->wqe);
1253 		break;
1254 
1255 	case BFA_LPS_SM_RX_CVL:
1256 		/*
1257 		 * Login was not even sent out; so when getting out
1258 		 * of this state, it will appear like a login retry
1259 		 * after Clear virtual link
1260 		 */
1261 		break;
1262 
1263 	default:
1264 		bfa_sm_fault(lps->bfa, event);
1265 	}
1266 }
1267 
1268 /*
1269  * login complete
1270  */
1271 static void
1272 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1273 {
1274 	bfa_trc(lps->bfa, lps->bfa_tag);
1275 	bfa_trc(lps->bfa, event);
1276 
1277 	switch (event) {
1278 	case BFA_LPS_SM_LOGOUT:
1279 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1280 			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1281 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1282 		} else {
1283 			bfa_sm_set_state(lps, bfa_lps_sm_logout);
1284 			bfa_lps_send_logout(lps);
1285 		}
1286 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1287 			BFA_PL_EID_LOGO, 0, "Logout");
1288 		break;
1289 
1290 	case BFA_LPS_SM_RX_CVL:
1291 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1292 
1293 		/* Let the vport module know about this event */
1294 		bfa_lps_cvl_event(lps);
1295 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1296 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1297 		break;
1298 
1299 	case BFA_LPS_SM_SET_N2N_PID:
1300 		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1301 			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1302 			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1303 		} else
1304 			bfa_lps_send_set_n2n_pid(lps);
1305 		break;
1306 
1307 	case BFA_LPS_SM_OFFLINE:
1308 	case BFA_LPS_SM_DELETE:
1309 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1310 		break;
1311 
1312 	default:
1313 		bfa_sm_fault(lps->bfa, event);
1314 	}
1315 }
1316 
1317 /*
1318  * login complete
1319  */
1320 static void
1321 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1322 {
1323 	bfa_trc(lps->bfa, lps->bfa_tag);
1324 	bfa_trc(lps->bfa, event);
1325 
1326 	switch (event) {
1327 	case BFA_LPS_SM_RESUME:
1328 		bfa_sm_set_state(lps, bfa_lps_sm_online);
1329 		bfa_lps_send_set_n2n_pid(lps);
1330 		break;
1331 
1332 	case BFA_LPS_SM_LOGOUT:
1333 		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1334 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1335 			BFA_PL_EID_LOGO, 0, "Logout");
1336 		break;
1337 
1338 	case BFA_LPS_SM_RX_CVL:
1339 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1340 		bfa_reqq_wcancel(&lps->wqe);
1341 
1342 		/* Let the vport module know about this event */
1343 		bfa_lps_cvl_event(lps);
1344 		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1345 			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1346 		break;
1347 
1348 	case BFA_LPS_SM_OFFLINE:
1349 	case BFA_LPS_SM_DELETE:
1350 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1351 		bfa_reqq_wcancel(&lps->wqe);
1352 		break;
1353 
1354 	default:
1355 		bfa_sm_fault(lps->bfa, event);
1356 	}
1357 }
1358 
1359 /*
1360  * logout in progress - awaiting firmware response
1361  */
1362 static void
1363 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1364 {
1365 	bfa_trc(lps->bfa, lps->bfa_tag);
1366 	bfa_trc(lps->bfa, event);
1367 
1368 	switch (event) {
1369 	case BFA_LPS_SM_FWRSP:
1370 	case BFA_LPS_SM_OFFLINE:
1371 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1372 		bfa_lps_logout_comp(lps);
1373 		break;
1374 
1375 	case BFA_LPS_SM_DELETE:
1376 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1377 		break;
1378 
1379 	default:
1380 		bfa_sm_fault(lps->bfa, event);
1381 	}
1382 }
1383 
1384 /*
1385  * logout pending -- awaiting space in request queue
1386  */
1387 static void
1388 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1389 {
1390 	bfa_trc(lps->bfa, lps->bfa_tag);
1391 	bfa_trc(lps->bfa, event);
1392 
1393 	switch (event) {
1394 	case BFA_LPS_SM_RESUME:
1395 		bfa_sm_set_state(lps, bfa_lps_sm_logout);
1396 		bfa_lps_send_logout(lps);
1397 		break;
1398 
1399 	case BFA_LPS_SM_OFFLINE:
1400 	case BFA_LPS_SM_DELETE:
1401 		bfa_sm_set_state(lps, bfa_lps_sm_init);
1402 		bfa_reqq_wcancel(&lps->wqe);
1403 		break;
1404 
1405 	default:
1406 		bfa_sm_fault(lps->bfa, event);
1407 	}
1408 }
1409 
1410 
1411 
1412 /*
1413  *  lps_pvt BFA LPS private functions
1414  */
1415 
1416 /*
1417  * return memory requirement
1418  */
1419 void
1420 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1421 		struct bfa_s *bfa)
1422 {
1423 	struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1424 
1425 	if (cfg->drvcfg.min_cfg)
1426 		bfa_mem_kva_setup(minfo, lps_kva,
1427 			sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1428 	else
1429 		bfa_mem_kva_setup(minfo, lps_kva,
1430 			sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1431 }
1432 
1433 /*
1434  * bfa module attach at initialization time
1435  */
1436 void
1437 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1438 	struct bfa_pcidev_s *pcidev)
1439 {
1440 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1441 	struct bfa_lps_s	*lps;
1442 	int			i;
1443 
1444 	mod->num_lps = BFA_LPS_MAX_LPORTS;
1445 	if (cfg->drvcfg.min_cfg)
1446 		mod->num_lps = BFA_LPS_MIN_LPORTS;
1447 	else
1448 		mod->num_lps = BFA_LPS_MAX_LPORTS;
1449 	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1450 
1451 	bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1452 
1453 	INIT_LIST_HEAD(&mod->lps_free_q);
1454 	INIT_LIST_HEAD(&mod->lps_active_q);
1455 	INIT_LIST_HEAD(&mod->lps_login_q);
1456 
1457 	for (i = 0; i < mod->num_lps; i++, lps++) {
1458 		lps->bfa	= bfa;
1459 		lps->bfa_tag	= (u8) i;
1460 		lps->reqq	= BFA_REQQ_LPS;
1461 		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1462 		list_add_tail(&lps->qe, &mod->lps_free_q);
1463 	}
1464 }
1465 
1466 /*
1467  * IOC in disabled state -- consider all lps offline
1468  */
1469 void
1470 bfa_lps_iocdisable(struct bfa_s *bfa)
1471 {
1472 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1473 	struct bfa_lps_s	*lps;
1474 	struct list_head		*qe, *qen;
1475 
1476 	list_for_each_safe(qe, qen, &mod->lps_active_q) {
1477 		lps = (struct bfa_lps_s *) qe;
1478 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1479 	}
1480 	list_for_each_safe(qe, qen, &mod->lps_login_q) {
1481 		lps = (struct bfa_lps_s *) qe;
1482 		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1483 	}
1484 	list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1485 }
1486 
1487 /*
1488  * Firmware login response
1489  */
1490 static void
1491 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1492 {
1493 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1494 	struct bfa_lps_s	*lps;
1495 
1496 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1497 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1498 
1499 	lps->status = rsp->status;
1500 	switch (rsp->status) {
1501 	case BFA_STATUS_OK:
1502 		lps->fw_tag	= rsp->fw_tag;
1503 		lps->fport	= rsp->f_port;
1504 		if (lps->fport)
1505 			lps->lp_pid = rsp->lp_pid;
1506 		lps->npiv_en	= rsp->npiv_en;
1507 		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
1508 		lps->pr_pwwn	= rsp->port_name;
1509 		lps->pr_nwwn	= rsp->node_name;
1510 		lps->auth_req	= rsp->auth_req;
1511 		lps->lp_mac	= rsp->lp_mac;
1512 		lps->brcd_switch = rsp->brcd_switch;
1513 		lps->fcf_mac	= rsp->fcf_mac;
1514 
1515 		break;
1516 
1517 	case BFA_STATUS_FABRIC_RJT:
1518 		lps->lsrjt_rsn = rsp->lsrjt_rsn;
1519 		lps->lsrjt_expl = rsp->lsrjt_expl;
1520 
1521 		break;
1522 
1523 	case BFA_STATUS_EPROTOCOL:
1524 		lps->ext_status = rsp->ext_status;
1525 
1526 		break;
1527 
1528 	case BFA_STATUS_VPORT_MAX:
1529 		if (rsp->ext_status)
1530 			bfa_lps_no_res(lps, rsp->ext_status);
1531 		break;
1532 
1533 	default:
1534 		/* Nothing to do with other status */
1535 		break;
1536 	}
1537 
1538 	list_del(&lps->qe);
1539 	list_add_tail(&lps->qe, &mod->lps_active_q);
1540 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1541 }
1542 
1543 static void
1544 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1545 {
1546 	struct bfa_s		*bfa = first_lps->bfa;
1547 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1548 	struct list_head	*qe, *qe_next;
1549 	struct bfa_lps_s	*lps;
1550 
1551 	bfa_trc(bfa, count);
1552 
1553 	qe = bfa_q_next(first_lps);
1554 
1555 	while (count && qe) {
1556 		qe_next = bfa_q_next(qe);
1557 		lps = (struct bfa_lps_s *)qe;
1558 		bfa_trc(bfa, lps->bfa_tag);
1559 		lps->status = first_lps->status;
1560 		list_del(&lps->qe);
1561 		list_add_tail(&lps->qe, &mod->lps_active_q);
1562 		bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1563 		qe = qe_next;
1564 		count--;
1565 	}
1566 }
1567 
1568 /*
1569  * Firmware logout response
1570  */
1571 static void
1572 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1573 {
1574 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1575 	struct bfa_lps_s	*lps;
1576 
1577 	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1578 	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1579 
1580 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1581 }
1582 
1583 /*
1584  * Firmware received a Clear virtual link request (for FCoE)
1585  */
1586 static void
1587 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1588 {
1589 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1590 	struct bfa_lps_s	*lps;
1591 
1592 	lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1593 
1594 	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1595 }
1596 
1597 /*
1598  * Space is available in request queue, resume queueing request to firmware.
1599  */
1600 static void
1601 bfa_lps_reqq_resume(void *lps_arg)
1602 {
1603 	struct bfa_lps_s	*lps = lps_arg;
1604 
1605 	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1606 }
1607 
1608 /*
1609  * lps is freed -- triggered by vport delete
1610  */
1611 static void
1612 bfa_lps_free(struct bfa_lps_s *lps)
1613 {
1614 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1615 
1616 	lps->lp_pid = 0;
1617 	list_del(&lps->qe);
1618 	list_add_tail(&lps->qe, &mod->lps_free_q);
1619 }
1620 
1621 /*
1622  * send login request to firmware
1623  */
1624 static void
1625 bfa_lps_send_login(struct bfa_lps_s *lps)
1626 {
1627 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1628 	struct bfi_lps_login_req_s	*m;
1629 
1630 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1631 	WARN_ON(!m);
1632 
1633 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1634 		bfa_fn_lpu(lps->bfa));
1635 
1636 	m->bfa_tag	= lps->bfa_tag;
1637 	m->alpa		= lps->alpa;
1638 	m->pdu_size	= cpu_to_be16(lps->pdusz);
1639 	m->pwwn		= lps->pwwn;
1640 	m->nwwn		= lps->nwwn;
1641 	m->fdisc	= lps->fdisc;
1642 	m->auth_en	= lps->auth_en;
1643 
1644 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1645 	list_del(&lps->qe);
1646 	list_add_tail(&lps->qe, &mod->lps_login_q);
1647 }
1648 
1649 /*
1650  * send logout request to firmware
1651  */
1652 static void
1653 bfa_lps_send_logout(struct bfa_lps_s *lps)
1654 {
1655 	struct bfi_lps_logout_req_s *m;
1656 
1657 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1658 	WARN_ON(!m);
1659 
1660 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1661 		bfa_fn_lpu(lps->bfa));
1662 
1663 	m->fw_tag = lps->fw_tag;
1664 	m->port_name = lps->pwwn;
1665 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1666 }
1667 
1668 /*
1669  * send n2n pid set request to firmware
1670  */
1671 static void
1672 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1673 {
1674 	struct bfi_lps_n2n_pid_req_s *m;
1675 
1676 	m = bfa_reqq_next(lps->bfa, lps->reqq);
1677 	WARN_ON(!m);
1678 
1679 	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1680 		bfa_fn_lpu(lps->bfa));
1681 
1682 	m->fw_tag = lps->fw_tag;
1683 	m->lp_pid = lps->lp_pid;
1684 	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1685 }
1686 
1687 /*
1688  * Indirect login completion handler for non-fcs
1689  */
1690 static void
1691 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1692 {
1693 	struct bfa_lps_s *lps	= arg;
1694 
1695 	if (!complete)
1696 		return;
1697 
1698 	if (lps->fdisc)
1699 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1700 	else
1701 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1702 }
1703 
1704 /*
1705  * Login completion handler -- direct call for fcs, queue for others
1706  */
1707 static void
1708 bfa_lps_login_comp(struct bfa_lps_s *lps)
1709 {
1710 	if (!lps->bfa->fcs) {
1711 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1712 			lps);
1713 		return;
1714 	}
1715 
1716 	if (lps->fdisc)
1717 		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1718 	else
1719 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1720 }
1721 
1722 /*
1723  * Indirect logout completion handler for non-fcs
1724  */
1725 static void
1726 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1727 {
1728 	struct bfa_lps_s *lps	= arg;
1729 
1730 	if (!complete)
1731 		return;
1732 
1733 	if (lps->fdisc)
1734 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1735 	else
1736 		bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1737 }
1738 
1739 /*
1740  * Logout completion handler -- direct call for fcs, queue for others
1741  */
1742 static void
1743 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1744 {
1745 	if (!lps->bfa->fcs) {
1746 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1747 			lps);
1748 		return;
1749 	}
1750 	if (lps->fdisc)
1751 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1752 }
1753 
1754 /*
1755  * Clear virtual link completion handler for non-fcs
1756  */
1757 static void
1758 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1759 {
1760 	struct bfa_lps_s *lps	= arg;
1761 
1762 	if (!complete)
1763 		return;
1764 
1765 	/* Clear virtual link to base port will result in link down */
1766 	if (lps->fdisc)
1767 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1768 }
1769 
1770 /*
1771  * Received Clear virtual link event --direct call for fcs,
1772  * queue for others
1773  */
1774 static void
1775 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1776 {
1777 	if (!lps->bfa->fcs) {
1778 		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1779 			lps);
1780 		return;
1781 	}
1782 
1783 	/* Clear virtual link to base port will result in link down */
1784 	if (lps->fdisc)
1785 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1786 }
1787 
1788 
1789 
1790 /*
1791  *  lps_public BFA LPS public functions
1792  */
1793 
1794 u32
1795 bfa_lps_get_max_vport(struct bfa_s *bfa)
1796 {
1797 	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1798 		return BFA_LPS_MAX_VPORTS_SUPP_CT;
1799 	else
1800 		return BFA_LPS_MAX_VPORTS_SUPP_CB;
1801 }
1802 
1803 /*
1804  * Allocate a lport srvice tag.
1805  */
1806 struct bfa_lps_s  *
1807 bfa_lps_alloc(struct bfa_s *bfa)
1808 {
1809 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1810 	struct bfa_lps_s	*lps = NULL;
1811 
1812 	bfa_q_deq(&mod->lps_free_q, &lps);
1813 
1814 	if (lps == NULL)
1815 		return NULL;
1816 
1817 	list_add_tail(&lps->qe, &mod->lps_active_q);
1818 
1819 	bfa_sm_set_state(lps, bfa_lps_sm_init);
1820 	return lps;
1821 }
1822 
1823 /*
1824  * Free lport service tag. This can be called anytime after an alloc.
1825  * No need to wait for any pending login/logout completions.
1826  */
1827 void
1828 bfa_lps_delete(struct bfa_lps_s *lps)
1829 {
1830 	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1831 }
1832 
1833 /*
1834  * Initiate a lport login.
1835  */
1836 void
1837 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1838 	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1839 {
1840 	lps->uarg	= uarg;
1841 	lps->alpa	= alpa;
1842 	lps->pdusz	= pdusz;
1843 	lps->pwwn	= pwwn;
1844 	lps->nwwn	= nwwn;
1845 	lps->fdisc	= BFA_FALSE;
1846 	lps->auth_en	= auth_en;
1847 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1848 }
1849 
1850 /*
1851  * Initiate a lport fdisc login.
1852  */
1853 void
1854 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1855 	wwn_t nwwn)
1856 {
1857 	lps->uarg	= uarg;
1858 	lps->alpa	= 0;
1859 	lps->pdusz	= pdusz;
1860 	lps->pwwn	= pwwn;
1861 	lps->nwwn	= nwwn;
1862 	lps->fdisc	= BFA_TRUE;
1863 	lps->auth_en	= BFA_FALSE;
1864 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1865 }
1866 
1867 
1868 /*
1869  * Initiate a lport FDSIC logout.
1870  */
1871 void
1872 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1873 {
1874 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1875 }
1876 
1877 u8
1878 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1879 {
1880 	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1881 
1882 	return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1883 }
1884 
1885 /*
1886  * Return lport services tag given the pid
1887  */
1888 u8
1889 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1890 {
1891 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1892 	struct bfa_lps_s	*lps;
1893 	int			i;
1894 
1895 	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1896 		if (lps->lp_pid == pid)
1897 			return lps->bfa_tag;
1898 	}
1899 
1900 	/* Return base port tag anyway */
1901 	return 0;
1902 }
1903 
1904 
1905 /*
1906  * return port id assigned to the base lport
1907  */
1908 u32
1909 bfa_lps_get_base_pid(struct bfa_s *bfa)
1910 {
1911 	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1912 
1913 	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1914 }
1915 
1916 /*
1917  * Set PID in case of n2n (which is assigned during PLOGI)
1918  */
1919 void
1920 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1921 {
1922 	bfa_trc(lps->bfa, lps->bfa_tag);
1923 	bfa_trc(lps->bfa, n2n_pid);
1924 
1925 	lps->lp_pid = n2n_pid;
1926 	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1927 }
1928 
1929 /*
1930  * LPS firmware message class handler.
1931  */
1932 void
1933 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1934 {
1935 	union bfi_lps_i2h_msg_u	msg;
1936 
1937 	bfa_trc(bfa, m->mhdr.msg_id);
1938 	msg.msg = m;
1939 
1940 	switch (m->mhdr.msg_id) {
1941 	case BFI_LPS_I2H_LOGIN_RSP:
1942 		bfa_lps_login_rsp(bfa, msg.login_rsp);
1943 		break;
1944 
1945 	case BFI_LPS_I2H_LOGOUT_RSP:
1946 		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1947 		break;
1948 
1949 	case BFI_LPS_I2H_CVL_EVENT:
1950 		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1951 		break;
1952 
1953 	default:
1954 		bfa_trc(bfa, m->mhdr.msg_id);
1955 		WARN_ON(1);
1956 	}
1957 }
1958 
1959 static void
1960 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
1961 {
1962 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1963 	struct bfa_aen_entry_s  *aen_entry;
1964 
1965 	bfad_get_aen_entry(bfad, aen_entry);
1966 	if (!aen_entry)
1967 		return;
1968 
1969 	aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
1970 	aen_entry->aen_data.port.pwwn = fcport->pwwn;
1971 
1972 	/* Send the AEN notification */
1973 	bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
1974 				  BFA_AEN_CAT_PORT, event);
1975 }
1976 
1977 /*
1978  * FC PORT state machine functions
1979  */
1980 static void
1981 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
1982 			enum bfa_fcport_sm_event event)
1983 {
1984 	bfa_trc(fcport->bfa, event);
1985 
1986 	switch (event) {
1987 	case BFA_FCPORT_SM_START:
1988 		/*
1989 		 * Start event after IOC is configured and BFA is started.
1990 		 */
1991 		fcport->use_flash_cfg = BFA_TRUE;
1992 
1993 		if (bfa_fcport_send_enable(fcport)) {
1994 			bfa_trc(fcport->bfa, BFA_TRUE);
1995 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1996 		} else {
1997 			bfa_trc(fcport->bfa, BFA_FALSE);
1998 			bfa_sm_set_state(fcport,
1999 					bfa_fcport_sm_enabling_qwait);
2000 		}
2001 		break;
2002 
2003 	case BFA_FCPORT_SM_ENABLE:
2004 		/*
2005 		 * Port is persistently configured to be in enabled state. Do
2006 		 * not change state. Port enabling is done when START event is
2007 		 * received.
2008 		 */
2009 		break;
2010 
2011 	case BFA_FCPORT_SM_DISABLE:
2012 		/*
2013 		 * If a port is persistently configured to be disabled, the
2014 		 * first event will a port disable request.
2015 		 */
2016 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2017 		break;
2018 
2019 	case BFA_FCPORT_SM_HWFAIL:
2020 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2021 		break;
2022 
2023 	default:
2024 		bfa_sm_fault(fcport->bfa, event);
2025 	}
2026 }
2027 
2028 static void
2029 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2030 				enum bfa_fcport_sm_event event)
2031 {
2032 	char pwwn_buf[BFA_STRING_32];
2033 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2034 	bfa_trc(fcport->bfa, event);
2035 
2036 	switch (event) {
2037 	case BFA_FCPORT_SM_QRESUME:
2038 		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2039 		bfa_fcport_send_enable(fcport);
2040 		break;
2041 
2042 	case BFA_FCPORT_SM_STOP:
2043 		bfa_reqq_wcancel(&fcport->reqq_wait);
2044 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2045 		break;
2046 
2047 	case BFA_FCPORT_SM_ENABLE:
2048 		/*
2049 		 * Already enable is in progress.
2050 		 */
2051 		break;
2052 
2053 	case BFA_FCPORT_SM_DISABLE:
2054 		/*
2055 		 * Just send disable request to firmware when room becomes
2056 		 * available in request queue.
2057 		 */
2058 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2059 		bfa_reqq_wcancel(&fcport->reqq_wait);
2060 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2061 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2062 		wwn2str(pwwn_buf, fcport->pwwn);
2063 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2064 			"Base port disabled: WWN = %s\n", pwwn_buf);
2065 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2066 		break;
2067 
2068 	case BFA_FCPORT_SM_LINKUP:
2069 	case BFA_FCPORT_SM_LINKDOWN:
2070 		/*
2071 		 * Possible to get link events when doing back-to-back
2072 		 * enable/disables.
2073 		 */
2074 		break;
2075 
2076 	case BFA_FCPORT_SM_HWFAIL:
2077 		bfa_reqq_wcancel(&fcport->reqq_wait);
2078 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2079 		break;
2080 
2081 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2082 		bfa_fcport_reset_linkinfo(fcport);
2083 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2084 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2085 		break;
2086 
2087 	default:
2088 		bfa_sm_fault(fcport->bfa, event);
2089 	}
2090 }
2091 
2092 static void
2093 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2094 						enum bfa_fcport_sm_event event)
2095 {
2096 	char pwwn_buf[BFA_STRING_32];
2097 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2098 	bfa_trc(fcport->bfa, event);
2099 
2100 	switch (event) {
2101 	case BFA_FCPORT_SM_FWRSP:
2102 	case BFA_FCPORT_SM_LINKDOWN:
2103 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2104 		break;
2105 
2106 	case BFA_FCPORT_SM_LINKUP:
2107 		bfa_fcport_update_linkinfo(fcport);
2108 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2109 
2110 		WARN_ON(!fcport->event_cbfn);
2111 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2112 		break;
2113 
2114 	case BFA_FCPORT_SM_ENABLE:
2115 		/*
2116 		 * Already being enabled.
2117 		 */
2118 		break;
2119 
2120 	case BFA_FCPORT_SM_DISABLE:
2121 		if (bfa_fcport_send_disable(fcport))
2122 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2123 		else
2124 			bfa_sm_set_state(fcport,
2125 					 bfa_fcport_sm_disabling_qwait);
2126 
2127 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2128 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2129 		wwn2str(pwwn_buf, fcport->pwwn);
2130 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2131 			"Base port disabled: WWN = %s\n", pwwn_buf);
2132 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2133 		break;
2134 
2135 	case BFA_FCPORT_SM_STOP:
2136 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2137 		break;
2138 
2139 	case BFA_FCPORT_SM_HWFAIL:
2140 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2141 		break;
2142 
2143 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2144 		bfa_fcport_reset_linkinfo(fcport);
2145 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2146 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2147 		break;
2148 
2149 	default:
2150 		bfa_sm_fault(fcport->bfa, event);
2151 	}
2152 }
2153 
2154 static void
2155 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2156 						enum bfa_fcport_sm_event event)
2157 {
2158 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2159 	char pwwn_buf[BFA_STRING_32];
2160 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2161 
2162 	bfa_trc(fcport->bfa, event);
2163 
2164 	switch (event) {
2165 	case BFA_FCPORT_SM_LINKUP:
2166 		bfa_fcport_update_linkinfo(fcport);
2167 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2168 		WARN_ON(!fcport->event_cbfn);
2169 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2170 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2171 		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2172 
2173 			bfa_trc(fcport->bfa,
2174 				pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2175 			bfa_trc(fcport->bfa,
2176 				pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2177 
2178 			if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2179 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2180 					BFA_PL_EID_FIP_FCF_DISC, 0,
2181 					"FIP FCF Discovery Failed");
2182 			else
2183 				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2184 					BFA_PL_EID_FIP_FCF_DISC, 0,
2185 					"FIP FCF Discovered");
2186 		}
2187 
2188 		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2189 		wwn2str(pwwn_buf, fcport->pwwn);
2190 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2191 			"Base port online: WWN = %s\n", pwwn_buf);
2192 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2193 
2194 		/* If QoS is enabled and it is not online, send AEN */
2195 		if (fcport->cfg.qos_enabled &&
2196 		    fcport->qos_attr.state != BFA_QOS_ONLINE)
2197 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2198 		break;
2199 
2200 	case BFA_FCPORT_SM_LINKDOWN:
2201 		/*
2202 		 * Possible to get link down event.
2203 		 */
2204 		break;
2205 
2206 	case BFA_FCPORT_SM_ENABLE:
2207 		/*
2208 		 * Already enabled.
2209 		 */
2210 		break;
2211 
2212 	case BFA_FCPORT_SM_DISABLE:
2213 		if (bfa_fcport_send_disable(fcport))
2214 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2215 		else
2216 			bfa_sm_set_state(fcport,
2217 					 bfa_fcport_sm_disabling_qwait);
2218 
2219 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2220 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2221 		wwn2str(pwwn_buf, fcport->pwwn);
2222 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2223 			"Base port disabled: WWN = %s\n", pwwn_buf);
2224 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2225 		break;
2226 
2227 	case BFA_FCPORT_SM_STOP:
2228 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2229 		break;
2230 
2231 	case BFA_FCPORT_SM_HWFAIL:
2232 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2233 		break;
2234 
2235 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2236 		bfa_fcport_reset_linkinfo(fcport);
2237 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2238 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2239 		break;
2240 
2241 	default:
2242 		bfa_sm_fault(fcport->bfa, event);
2243 	}
2244 }
2245 
2246 static void
2247 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2248 	enum bfa_fcport_sm_event event)
2249 {
2250 	char pwwn_buf[BFA_STRING_32];
2251 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2252 
2253 	bfa_trc(fcport->bfa, event);
2254 
2255 	switch (event) {
2256 	case BFA_FCPORT_SM_ENABLE:
2257 		/*
2258 		 * Already enabled.
2259 		 */
2260 		break;
2261 
2262 	case BFA_FCPORT_SM_DISABLE:
2263 		if (bfa_fcport_send_disable(fcport))
2264 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2265 		else
2266 			bfa_sm_set_state(fcport,
2267 					 bfa_fcport_sm_disabling_qwait);
2268 
2269 		bfa_fcport_reset_linkinfo(fcport);
2270 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2271 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2272 				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2273 		wwn2str(pwwn_buf, fcport->pwwn);
2274 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2275 			"Base port offline: WWN = %s\n", pwwn_buf);
2276 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2277 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2278 			"Base port disabled: WWN = %s\n", pwwn_buf);
2279 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2280 		break;
2281 
2282 	case BFA_FCPORT_SM_LINKDOWN:
2283 		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2284 		bfa_fcport_reset_linkinfo(fcport);
2285 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2286 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2287 				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2288 		wwn2str(pwwn_buf, fcport->pwwn);
2289 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2290 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2291 				"Base port offline: WWN = %s\n", pwwn_buf);
2292 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2293 		} else {
2294 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2295 				"Base port (WWN = %s) "
2296 				"lost fabric connectivity\n", pwwn_buf);
2297 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2298 		}
2299 		break;
2300 
2301 	case BFA_FCPORT_SM_STOP:
2302 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2303 		bfa_fcport_reset_linkinfo(fcport);
2304 		wwn2str(pwwn_buf, fcport->pwwn);
2305 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2306 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2307 				"Base port offline: WWN = %s\n", pwwn_buf);
2308 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2309 		} else {
2310 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2311 				"Base port (WWN = %s) "
2312 				"lost fabric connectivity\n", pwwn_buf);
2313 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2314 		}
2315 		break;
2316 
2317 	case BFA_FCPORT_SM_HWFAIL:
2318 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2319 		bfa_fcport_reset_linkinfo(fcport);
2320 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2321 		wwn2str(pwwn_buf, fcport->pwwn);
2322 		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2323 			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2324 				"Base port offline: WWN = %s\n", pwwn_buf);
2325 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2326 		} else {
2327 			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2328 				"Base port (WWN = %s) "
2329 				"lost fabric connectivity\n", pwwn_buf);
2330 			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2331 		}
2332 		break;
2333 
2334 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2335 		bfa_fcport_reset_linkinfo(fcport);
2336 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2337 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2338 		break;
2339 
2340 	default:
2341 		bfa_sm_fault(fcport->bfa, event);
2342 	}
2343 }
2344 
2345 static void
2346 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2347 				 enum bfa_fcport_sm_event event)
2348 {
2349 	bfa_trc(fcport->bfa, event);
2350 
2351 	switch (event) {
2352 	case BFA_FCPORT_SM_QRESUME:
2353 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2354 		bfa_fcport_send_disable(fcport);
2355 		break;
2356 
2357 	case BFA_FCPORT_SM_STOP:
2358 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2359 		bfa_reqq_wcancel(&fcport->reqq_wait);
2360 		break;
2361 
2362 	case BFA_FCPORT_SM_ENABLE:
2363 		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2364 		break;
2365 
2366 	case BFA_FCPORT_SM_DISABLE:
2367 		/*
2368 		 * Already being disabled.
2369 		 */
2370 		break;
2371 
2372 	case BFA_FCPORT_SM_LINKUP:
2373 	case BFA_FCPORT_SM_LINKDOWN:
2374 		/*
2375 		 * Possible to get link events when doing back-to-back
2376 		 * enable/disables.
2377 		 */
2378 		break;
2379 
2380 	case BFA_FCPORT_SM_HWFAIL:
2381 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2382 		bfa_reqq_wcancel(&fcport->reqq_wait);
2383 		break;
2384 
2385 	case BFA_FCPORT_SM_FAA_MISCONFIG:
2386 		bfa_fcport_reset_linkinfo(fcport);
2387 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2388 		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2389 		break;
2390 
2391 	default:
2392 		bfa_sm_fault(fcport->bfa, event);
2393 	}
2394 }
2395 
2396 static void
2397 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2398 				 enum bfa_fcport_sm_event event)
2399 {
2400 	bfa_trc(fcport->bfa, event);
2401 
2402 	switch (event) {
2403 	case BFA_FCPORT_SM_QRESUME:
2404 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2405 		bfa_fcport_send_disable(fcport);
2406 		if (bfa_fcport_send_enable(fcport))
2407 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2408 		else
2409 			bfa_sm_set_state(fcport,
2410 					 bfa_fcport_sm_enabling_qwait);
2411 		break;
2412 
2413 	case BFA_FCPORT_SM_STOP:
2414 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2415 		bfa_reqq_wcancel(&fcport->reqq_wait);
2416 		break;
2417 
2418 	case BFA_FCPORT_SM_ENABLE:
2419 		break;
2420 
2421 	case BFA_FCPORT_SM_DISABLE:
2422 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2423 		break;
2424 
2425 	case BFA_FCPORT_SM_LINKUP:
2426 	case BFA_FCPORT_SM_LINKDOWN:
2427 		/*
2428 		 * Possible to get link events when doing back-to-back
2429 		 * enable/disables.
2430 		 */
2431 		break;
2432 
2433 	case BFA_FCPORT_SM_HWFAIL:
2434 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2435 		bfa_reqq_wcancel(&fcport->reqq_wait);
2436 		break;
2437 
2438 	default:
2439 		bfa_sm_fault(fcport->bfa, event);
2440 	}
2441 }
2442 
2443 static void
2444 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2445 						enum bfa_fcport_sm_event event)
2446 {
2447 	char pwwn_buf[BFA_STRING_32];
2448 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2449 	bfa_trc(fcport->bfa, event);
2450 
2451 	switch (event) {
2452 	case BFA_FCPORT_SM_FWRSP:
2453 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2454 		break;
2455 
2456 	case BFA_FCPORT_SM_DISABLE:
2457 		/*
2458 		 * Already being disabled.
2459 		 */
2460 		break;
2461 
2462 	case BFA_FCPORT_SM_ENABLE:
2463 		if (bfa_fcport_send_enable(fcport))
2464 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2465 		else
2466 			bfa_sm_set_state(fcport,
2467 					 bfa_fcport_sm_enabling_qwait);
2468 
2469 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2470 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2471 		wwn2str(pwwn_buf, fcport->pwwn);
2472 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2473 			"Base port enabled: WWN = %s\n", pwwn_buf);
2474 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2475 		break;
2476 
2477 	case BFA_FCPORT_SM_STOP:
2478 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2479 		break;
2480 
2481 	case BFA_FCPORT_SM_LINKUP:
2482 	case BFA_FCPORT_SM_LINKDOWN:
2483 		/*
2484 		 * Possible to get link events when doing back-to-back
2485 		 * enable/disables.
2486 		 */
2487 		break;
2488 
2489 	case BFA_FCPORT_SM_HWFAIL:
2490 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2491 		break;
2492 
2493 	default:
2494 		bfa_sm_fault(fcport->bfa, event);
2495 	}
2496 }
2497 
2498 static void
2499 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2500 						enum bfa_fcport_sm_event event)
2501 {
2502 	char pwwn_buf[BFA_STRING_32];
2503 	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2504 	bfa_trc(fcport->bfa, event);
2505 
2506 	switch (event) {
2507 	case BFA_FCPORT_SM_START:
2508 		/*
2509 		 * Ignore start event for a port that is disabled.
2510 		 */
2511 		break;
2512 
2513 	case BFA_FCPORT_SM_STOP:
2514 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2515 		break;
2516 
2517 	case BFA_FCPORT_SM_ENABLE:
2518 		if (bfa_fcport_send_enable(fcport))
2519 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2520 		else
2521 			bfa_sm_set_state(fcport,
2522 					 bfa_fcport_sm_enabling_qwait);
2523 
2524 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2525 				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2526 		wwn2str(pwwn_buf, fcport->pwwn);
2527 		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2528 			"Base port enabled: WWN = %s\n", pwwn_buf);
2529 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2530 		break;
2531 
2532 	case BFA_FCPORT_SM_DISABLE:
2533 		/*
2534 		 * Already disabled.
2535 		 */
2536 		break;
2537 
2538 	case BFA_FCPORT_SM_HWFAIL:
2539 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2540 		break;
2541 
2542 	case BFA_FCPORT_SM_DPORTENABLE:
2543 		bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2544 		break;
2545 
2546 	case BFA_FCPORT_SM_DDPORTENABLE:
2547 		bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2548 		break;
2549 
2550 	default:
2551 		bfa_sm_fault(fcport->bfa, event);
2552 	}
2553 }
2554 
2555 static void
2556 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2557 			 enum bfa_fcport_sm_event event)
2558 {
2559 	bfa_trc(fcport->bfa, event);
2560 
2561 	switch (event) {
2562 	case BFA_FCPORT_SM_START:
2563 		if (bfa_fcport_send_enable(fcport))
2564 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2565 		else
2566 			bfa_sm_set_state(fcport,
2567 					 bfa_fcport_sm_enabling_qwait);
2568 		break;
2569 
2570 	default:
2571 		/*
2572 		 * Ignore all other events.
2573 		 */
2574 		;
2575 	}
2576 }
2577 
2578 /*
2579  * Port is enabled. IOC is down/failed.
2580  */
2581 static void
2582 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2583 			 enum bfa_fcport_sm_event event)
2584 {
2585 	bfa_trc(fcport->bfa, event);
2586 
2587 	switch (event) {
2588 	case BFA_FCPORT_SM_START:
2589 		if (bfa_fcport_send_enable(fcport))
2590 			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2591 		else
2592 			bfa_sm_set_state(fcport,
2593 					 bfa_fcport_sm_enabling_qwait);
2594 		break;
2595 
2596 	default:
2597 		/*
2598 		 * Ignore all events.
2599 		 */
2600 		;
2601 	}
2602 }
2603 
2604 /*
2605  * Port is disabled. IOC is down/failed.
2606  */
2607 static void
2608 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2609 			 enum bfa_fcport_sm_event event)
2610 {
2611 	bfa_trc(fcport->bfa, event);
2612 
2613 	switch (event) {
2614 	case BFA_FCPORT_SM_START:
2615 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2616 		break;
2617 
2618 	case BFA_FCPORT_SM_ENABLE:
2619 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2620 		break;
2621 
2622 	default:
2623 		/*
2624 		 * Ignore all events.
2625 		 */
2626 		;
2627 	}
2628 }
2629 
2630 static void
2631 bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2632 {
2633 	bfa_trc(fcport->bfa, event);
2634 
2635 	switch (event) {
2636 	case BFA_FCPORT_SM_DPORTENABLE:
2637 	case BFA_FCPORT_SM_DISABLE:
2638 	case BFA_FCPORT_SM_ENABLE:
2639 	case BFA_FCPORT_SM_START:
2640 		/*
2641 		 * Ignore event for a port that is dport
2642 		 */
2643 		break;
2644 
2645 	case BFA_FCPORT_SM_STOP:
2646 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2647 		break;
2648 
2649 	case BFA_FCPORT_SM_HWFAIL:
2650 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2651 		break;
2652 
2653 	case BFA_FCPORT_SM_DPORTDISABLE:
2654 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2655 		break;
2656 
2657 	default:
2658 		bfa_sm_fault(fcport->bfa, event);
2659 	}
2660 }
2661 
2662 static void
2663 bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2664 			enum bfa_fcport_sm_event event)
2665 {
2666 	bfa_trc(fcport->bfa, event);
2667 
2668 	switch (event) {
2669 	case BFA_FCPORT_SM_DISABLE:
2670 	case BFA_FCPORT_SM_DDPORTDISABLE:
2671 		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2672 		break;
2673 
2674 	case BFA_FCPORT_SM_DPORTENABLE:
2675 	case BFA_FCPORT_SM_DPORTDISABLE:
2676 	case BFA_FCPORT_SM_ENABLE:
2677 	case BFA_FCPORT_SM_START:
2678 		/*
2679 		 * Ignore event for a port that is ddport
2680 		 */
2681 		break;
2682 
2683 	case BFA_FCPORT_SM_STOP:
2684 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2685 		break;
2686 
2687 	case BFA_FCPORT_SM_HWFAIL:
2688 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2689 		break;
2690 
2691 	default:
2692 		bfa_sm_fault(fcport->bfa, event);
2693 	}
2694 }
2695 
2696 static void
2697 bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2698 			    enum bfa_fcport_sm_event event)
2699 {
2700 	bfa_trc(fcport->bfa, event);
2701 
2702 	switch (event) {
2703 	case BFA_FCPORT_SM_DPORTENABLE:
2704 	case BFA_FCPORT_SM_ENABLE:
2705 	case BFA_FCPORT_SM_START:
2706 		/*
2707 		 * Ignore event for a port as there is FAA misconfig
2708 		 */
2709 		break;
2710 
2711 	case BFA_FCPORT_SM_DISABLE:
2712 		if (bfa_fcport_send_disable(fcport))
2713 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2714 		else
2715 			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2716 
2717 		bfa_fcport_reset_linkinfo(fcport);
2718 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2719 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2720 			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2721 		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2722 		break;
2723 
2724 	case BFA_FCPORT_SM_STOP:
2725 		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2726 		break;
2727 
2728 	case BFA_FCPORT_SM_HWFAIL:
2729 		bfa_fcport_reset_linkinfo(fcport);
2730 		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2731 		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2732 		break;
2733 
2734 	default:
2735 		bfa_sm_fault(fcport->bfa, event);
2736 	}
2737 }
2738 
2739 /*
2740  * Link state is down
2741  */
2742 static void
2743 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2744 		enum bfa_fcport_ln_sm_event event)
2745 {
2746 	bfa_trc(ln->fcport->bfa, event);
2747 
2748 	switch (event) {
2749 	case BFA_FCPORT_LN_SM_LINKUP:
2750 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2751 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2752 		break;
2753 
2754 	default:
2755 		bfa_sm_fault(ln->fcport->bfa, event);
2756 	}
2757 }
2758 
2759 /*
2760  * Link state is waiting for down notification
2761  */
2762 static void
2763 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2764 		enum bfa_fcport_ln_sm_event event)
2765 {
2766 	bfa_trc(ln->fcport->bfa, event);
2767 
2768 	switch (event) {
2769 	case BFA_FCPORT_LN_SM_LINKUP:
2770 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2771 		break;
2772 
2773 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2774 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2775 		break;
2776 
2777 	default:
2778 		bfa_sm_fault(ln->fcport->bfa, event);
2779 	}
2780 }
2781 
2782 /*
2783  * Link state is waiting for down notification and there is a pending up
2784  */
2785 static void
2786 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2787 		enum bfa_fcport_ln_sm_event event)
2788 {
2789 	bfa_trc(ln->fcport->bfa, event);
2790 
2791 	switch (event) {
2792 	case BFA_FCPORT_LN_SM_LINKDOWN:
2793 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2794 		break;
2795 
2796 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2797 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2798 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2799 		break;
2800 
2801 	default:
2802 		bfa_sm_fault(ln->fcport->bfa, event);
2803 	}
2804 }
2805 
2806 /*
2807  * Link state is up
2808  */
2809 static void
2810 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2811 		enum bfa_fcport_ln_sm_event event)
2812 {
2813 	bfa_trc(ln->fcport->bfa, event);
2814 
2815 	switch (event) {
2816 	case BFA_FCPORT_LN_SM_LINKDOWN:
2817 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2818 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2819 		break;
2820 
2821 	default:
2822 		bfa_sm_fault(ln->fcport->bfa, event);
2823 	}
2824 }
2825 
2826 /*
2827  * Link state is waiting for up notification
2828  */
2829 static void
2830 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2831 		enum bfa_fcport_ln_sm_event event)
2832 {
2833 	bfa_trc(ln->fcport->bfa, event);
2834 
2835 	switch (event) {
2836 	case BFA_FCPORT_LN_SM_LINKDOWN:
2837 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2838 		break;
2839 
2840 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2841 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2842 		break;
2843 
2844 	default:
2845 		bfa_sm_fault(ln->fcport->bfa, event);
2846 	}
2847 }
2848 
2849 /*
2850  * Link state is waiting for up notification and there is a pending down
2851  */
2852 static void
2853 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2854 		enum bfa_fcport_ln_sm_event event)
2855 {
2856 	bfa_trc(ln->fcport->bfa, event);
2857 
2858 	switch (event) {
2859 	case BFA_FCPORT_LN_SM_LINKUP:
2860 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2861 		break;
2862 
2863 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2864 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2865 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2866 		break;
2867 
2868 	default:
2869 		bfa_sm_fault(ln->fcport->bfa, event);
2870 	}
2871 }
2872 
2873 /*
2874  * Link state is waiting for up notification and there are pending down and up
2875  */
2876 static void
2877 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2878 			enum bfa_fcport_ln_sm_event event)
2879 {
2880 	bfa_trc(ln->fcport->bfa, event);
2881 
2882 	switch (event) {
2883 	case BFA_FCPORT_LN_SM_LINKDOWN:
2884 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2885 		break;
2886 
2887 	case BFA_FCPORT_LN_SM_NOTIFICATION:
2888 		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2889 		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2890 		break;
2891 
2892 	default:
2893 		bfa_sm_fault(ln->fcport->bfa, event);
2894 	}
2895 }
2896 
2897 static void
2898 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2899 {
2900 	struct bfa_fcport_ln_s *ln = cbarg;
2901 
2902 	if (complete)
2903 		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2904 	else
2905 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2906 }
2907 
2908 /*
2909  * Send SCN notification to upper layers.
2910  * trunk - false if caller is fcport to ignore fcport event in trunked mode
2911  */
2912 static void
2913 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2914 	bfa_boolean_t trunk)
2915 {
2916 	if (fcport->cfg.trunked && !trunk)
2917 		return;
2918 
2919 	switch (event) {
2920 	case BFA_PORT_LINKUP:
2921 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2922 		break;
2923 	case BFA_PORT_LINKDOWN:
2924 		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2925 		break;
2926 	default:
2927 		WARN_ON(1);
2928 	}
2929 }
2930 
2931 static void
2932 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2933 {
2934 	struct bfa_fcport_s *fcport = ln->fcport;
2935 
2936 	if (fcport->bfa->fcs) {
2937 		fcport->event_cbfn(fcport->event_cbarg, event);
2938 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2939 	} else {
2940 		ln->ln_event = event;
2941 		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2942 			__bfa_cb_fcport_event, ln);
2943 	}
2944 }
2945 
2946 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2947 							BFA_CACHELINE_SZ))
2948 
2949 void
2950 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2951 		   struct bfa_s *bfa)
2952 {
2953 	struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2954 
2955 	bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2956 }
2957 
2958 static void
2959 bfa_fcport_qresume(void *cbarg)
2960 {
2961 	struct bfa_fcport_s *fcport = cbarg;
2962 
2963 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2964 }
2965 
2966 static void
2967 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
2968 {
2969 	struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
2970 
2971 	fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2972 	fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
2973 	fcport->stats = (union bfa_fcport_stats_u *)
2974 				bfa_mem_dma_virt(fcport_dma);
2975 }
2976 
2977 /*
2978  * Memory initialization.
2979  */
2980 void
2981 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2982 		struct bfa_pcidev_s *pcidev)
2983 {
2984 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2985 	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2986 	struct bfa_fcport_ln_s *ln = &fcport->ln;
2987 
2988 	fcport->bfa = bfa;
2989 	ln->fcport = fcport;
2990 
2991 	bfa_fcport_mem_claim(fcport);
2992 
2993 	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2994 	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2995 
2996 	/*
2997 	 * initialize time stamp for stats reset
2998 	 */
2999 	fcport->stats_reset_time = ktime_get_seconds();
3000 	fcport->stats_dma_ready = BFA_FALSE;
3001 
3002 	/*
3003 	 * initialize and set default configuration
3004 	 */
3005 	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3006 	port_cfg->speed = BFA_PORT_SPEED_AUTO;
3007 	port_cfg->trunked = BFA_FALSE;
3008 	port_cfg->maxfrsize = 0;
3009 
3010 	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3011 	port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3012 	port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3013 	port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3014 
3015 	fcport->fec_state = BFA_FEC_OFFLINE;
3016 
3017 	INIT_LIST_HEAD(&fcport->stats_pending_q);
3018 	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3019 
3020 	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3021 }
3022 
3023 void
3024 bfa_fcport_start(struct bfa_s *bfa)
3025 {
3026 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3027 }
3028 
3029 /*
3030  * Called when IOC failure is detected.
3031  */
3032 void
3033 bfa_fcport_iocdisable(struct bfa_s *bfa)
3034 {
3035 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3036 
3037 	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3038 	bfa_trunk_iocdisable(bfa);
3039 }
3040 
3041 /*
3042  * Update loop info in fcport for SCN online
3043  */
3044 static void
3045 bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3046 			struct bfa_fcport_loop_info_s *loop_info)
3047 {
3048 	fcport->myalpa = loop_info->myalpa;
3049 	fcport->alpabm_valid =
3050 			loop_info->alpabm_val;
3051 	memcpy(fcport->alpabm.alpa_bm,
3052 			loop_info->alpabm.alpa_bm,
3053 			sizeof(struct fc_alpabm_s));
3054 }
3055 
3056 static void
3057 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3058 {
3059 	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3060 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3061 
3062 	fcport->speed = pevent->link_state.speed;
3063 	fcport->topology = pevent->link_state.topology;
3064 
3065 	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3066 		bfa_fcport_update_loop_info(fcport,
3067 				&pevent->link_state.attr.loop_info);
3068 		return;
3069 	}
3070 
3071 	/* QoS Details */
3072 	fcport->qos_attr = pevent->link_state.qos_attr;
3073 	fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3074 
3075 	if (fcport->cfg.bb_cr_enabled)
3076 		fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3077 
3078 	fcport->fec_state = pevent->link_state.fec_state;
3079 
3080 	/*
3081 	 * update trunk state if applicable
3082 	 */
3083 	if (!fcport->cfg.trunked)
3084 		trunk->attr.state = BFA_TRUNK_DISABLED;
3085 
3086 	/* update FCoE specific */
3087 	fcport->fcoe_vlan =
3088 		be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3089 
3090 	bfa_trc(fcport->bfa, fcport->speed);
3091 	bfa_trc(fcport->bfa, fcport->topology);
3092 }
3093 
3094 static void
3095 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3096 {
3097 	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3098 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3099 	fcport->fec_state = BFA_FEC_OFFLINE;
3100 }
3101 
3102 /*
3103  * Send port enable message to firmware.
3104  */
3105 static bfa_boolean_t
3106 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3107 {
3108 	struct bfi_fcport_enable_req_s *m;
3109 
3110 	/*
3111 	 * Increment message tag before queue check, so that responses to old
3112 	 * requests are discarded.
3113 	 */
3114 	fcport->msgtag++;
3115 
3116 	/*
3117 	 * check for room in queue to send request now
3118 	 */
3119 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3120 	if (!m) {
3121 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3122 							&fcport->reqq_wait);
3123 		return BFA_FALSE;
3124 	}
3125 
3126 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3127 			bfa_fn_lpu(fcport->bfa));
3128 	m->nwwn = fcport->nwwn;
3129 	m->pwwn = fcport->pwwn;
3130 	m->port_cfg = fcport->cfg;
3131 	m->msgtag = fcport->msgtag;
3132 	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3133 	m->use_flash_cfg = fcport->use_flash_cfg;
3134 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3135 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3136 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3137 
3138 	/*
3139 	 * queue I/O message to firmware
3140 	 */
3141 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3142 	return BFA_TRUE;
3143 }
3144 
3145 /*
3146  * Send port disable message to firmware.
3147  */
3148 static	bfa_boolean_t
3149 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3150 {
3151 	struct bfi_fcport_req_s *m;
3152 
3153 	/*
3154 	 * Increment message tag before queue check, so that responses to old
3155 	 * requests are discarded.
3156 	 */
3157 	fcport->msgtag++;
3158 
3159 	/*
3160 	 * check for room in queue to send request now
3161 	 */
3162 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3163 	if (!m) {
3164 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3165 							&fcport->reqq_wait);
3166 		return BFA_FALSE;
3167 	}
3168 
3169 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3170 			bfa_fn_lpu(fcport->bfa));
3171 	m->msgtag = fcport->msgtag;
3172 
3173 	/*
3174 	 * queue I/O message to firmware
3175 	 */
3176 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3177 
3178 	return BFA_TRUE;
3179 }
3180 
3181 static void
3182 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3183 {
3184 	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3185 	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3186 
3187 	bfa_trc(fcport->bfa, fcport->pwwn);
3188 	bfa_trc(fcport->bfa, fcport->nwwn);
3189 }
3190 
3191 static void
3192 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3193 	struct bfa_qos_stats_s *s)
3194 {
3195 	u32	*dip = (u32 *) d;
3196 	__be32	*sip = (__be32 *) s;
3197 	int		i;
3198 
3199 	/* Now swap the 32 bit fields */
3200 	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3201 		dip[i] = be32_to_cpu(sip[i]);
3202 }
3203 
3204 static void
3205 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3206 	struct bfa_fcoe_stats_s *s)
3207 {
3208 	u32	*dip = (u32 *) d;
3209 	__be32	*sip = (__be32 *) s;
3210 	int		i;
3211 
3212 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3213 	     i = i + 2) {
3214 #ifdef __BIG_ENDIAN
3215 		dip[i] = be32_to_cpu(sip[i]);
3216 		dip[i + 1] = be32_to_cpu(sip[i + 1]);
3217 #else
3218 		dip[i] = be32_to_cpu(sip[i + 1]);
3219 		dip[i + 1] = be32_to_cpu(sip[i]);
3220 #endif
3221 	}
3222 }
3223 
3224 static void
3225 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3226 {
3227 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3228 	struct bfa_cb_pending_q_s *cb;
3229 	struct list_head *qe, *qen;
3230 	union bfa_fcport_stats_u *ret;
3231 
3232 	if (complete) {
3233 		time64_t time = ktime_get_seconds();
3234 
3235 		list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3236 			bfa_q_deq(&fcport->stats_pending_q, &qe);
3237 			cb = (struct bfa_cb_pending_q_s *)qe;
3238 			if (fcport->stats_status == BFA_STATUS_OK) {
3239 				ret = (union bfa_fcport_stats_u *)cb->data;
3240 				/* Swap FC QoS or FCoE stats */
3241 				if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3242 					bfa_fcport_qos_stats_swap(&ret->fcqos,
3243 							&fcport->stats->fcqos);
3244 				else {
3245 					bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3246 							&fcport->stats->fcoe);
3247 					ret->fcoe.secs_reset =
3248 						time - fcport->stats_reset_time;
3249 				}
3250 			}
3251 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3252 					fcport->stats_status);
3253 		}
3254 		fcport->stats_status = BFA_STATUS_OK;
3255 	} else {
3256 		INIT_LIST_HEAD(&fcport->stats_pending_q);
3257 		fcport->stats_status = BFA_STATUS_OK;
3258 	}
3259 }
3260 
3261 static void
3262 bfa_fcport_stats_get_timeout(void *cbarg)
3263 {
3264 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3265 
3266 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3267 
3268 	if (fcport->stats_qfull) {
3269 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3270 		fcport->stats_qfull = BFA_FALSE;
3271 	}
3272 
3273 	fcport->stats_status = BFA_STATUS_ETIMER;
3274 	__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3275 }
3276 
3277 static void
3278 bfa_fcport_send_stats_get(void *cbarg)
3279 {
3280 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3281 	struct bfi_fcport_req_s *msg;
3282 
3283 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3284 
3285 	if (!msg) {
3286 		fcport->stats_qfull = BFA_TRUE;
3287 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3288 				bfa_fcport_send_stats_get, fcport);
3289 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3290 				&fcport->stats_reqq_wait);
3291 		return;
3292 	}
3293 	fcport->stats_qfull = BFA_FALSE;
3294 
3295 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3296 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3297 			bfa_fn_lpu(fcport->bfa));
3298 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3299 }
3300 
3301 static void
3302 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3303 {
3304 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3305 	struct bfa_cb_pending_q_s *cb;
3306 	struct list_head *qe, *qen;
3307 
3308 	if (complete) {
3309 		/*
3310 		 * re-initialize time stamp for stats reset
3311 		 */
3312 		fcport->stats_reset_time = ktime_get_seconds();
3313 		list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3314 			bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3315 			cb = (struct bfa_cb_pending_q_s *)qe;
3316 			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3317 						fcport->stats_status);
3318 		}
3319 		fcport->stats_status = BFA_STATUS_OK;
3320 	} else {
3321 		INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3322 		fcport->stats_status = BFA_STATUS_OK;
3323 	}
3324 }
3325 
3326 static void
3327 bfa_fcport_stats_clr_timeout(void *cbarg)
3328 {
3329 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3330 
3331 	bfa_trc(fcport->bfa, fcport->stats_qfull);
3332 
3333 	if (fcport->stats_qfull) {
3334 		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3335 		fcport->stats_qfull = BFA_FALSE;
3336 	}
3337 
3338 	fcport->stats_status = BFA_STATUS_ETIMER;
3339 	__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3340 }
3341 
3342 static void
3343 bfa_fcport_send_stats_clear(void *cbarg)
3344 {
3345 	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3346 	struct bfi_fcport_req_s *msg;
3347 
3348 	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3349 
3350 	if (!msg) {
3351 		fcport->stats_qfull = BFA_TRUE;
3352 		bfa_reqq_winit(&fcport->stats_reqq_wait,
3353 				bfa_fcport_send_stats_clear, fcport);
3354 		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3355 						&fcport->stats_reqq_wait);
3356 		return;
3357 	}
3358 	fcport->stats_qfull = BFA_FALSE;
3359 
3360 	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3361 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3362 			bfa_fn_lpu(fcport->bfa));
3363 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3364 }
3365 
3366 /*
3367  * Handle trunk SCN event from firmware.
3368  */
3369 static void
3370 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3371 {
3372 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3373 	struct bfi_fcport_trunk_link_s *tlink;
3374 	struct bfa_trunk_link_attr_s *lattr;
3375 	enum bfa_trunk_state state_prev;
3376 	int i;
3377 	int link_bm = 0;
3378 
3379 	bfa_trc(fcport->bfa, fcport->cfg.trunked);
3380 	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3381 		   scn->trunk_state != BFA_TRUNK_OFFLINE);
3382 
3383 	bfa_trc(fcport->bfa, trunk->attr.state);
3384 	bfa_trc(fcport->bfa, scn->trunk_state);
3385 	bfa_trc(fcport->bfa, scn->trunk_speed);
3386 
3387 	/*
3388 	 * Save off new state for trunk attribute query
3389 	 */
3390 	state_prev = trunk->attr.state;
3391 	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3392 		trunk->attr.state = scn->trunk_state;
3393 	trunk->attr.speed = scn->trunk_speed;
3394 	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3395 		lattr = &trunk->attr.link_attr[i];
3396 		tlink = &scn->tlink[i];
3397 
3398 		lattr->link_state = tlink->state;
3399 		lattr->trunk_wwn  = tlink->trunk_wwn;
3400 		lattr->fctl	  = tlink->fctl;
3401 		lattr->speed	  = tlink->speed;
3402 		lattr->deskew	  = be32_to_cpu(tlink->deskew);
3403 
3404 		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3405 			fcport->speed	 = tlink->speed;
3406 			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3407 			link_bm |= 1 << i;
3408 		}
3409 
3410 		bfa_trc(fcport->bfa, lattr->link_state);
3411 		bfa_trc(fcport->bfa, lattr->trunk_wwn);
3412 		bfa_trc(fcport->bfa, lattr->fctl);
3413 		bfa_trc(fcport->bfa, lattr->speed);
3414 		bfa_trc(fcport->bfa, lattr->deskew);
3415 	}
3416 
3417 	switch (link_bm) {
3418 	case 3:
3419 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3420 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3421 		break;
3422 	case 2:
3423 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3424 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3425 		break;
3426 	case 1:
3427 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3428 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3429 		break;
3430 	default:
3431 		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3432 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3433 	}
3434 
3435 	/*
3436 	 * Notify upper layers if trunk state changed.
3437 	 */
3438 	if ((state_prev != trunk->attr.state) ||
3439 		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3440 		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3441 			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3442 	}
3443 }
3444 
3445 static void
3446 bfa_trunk_iocdisable(struct bfa_s *bfa)
3447 {
3448 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3449 	int i = 0;
3450 
3451 	/*
3452 	 * In trunked mode, notify upper layers that link is down
3453 	 */
3454 	if (fcport->cfg.trunked) {
3455 		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3456 			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3457 
3458 		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3459 		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3460 		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3461 			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3462 			fcport->trunk.attr.link_attr[i].fctl =
3463 						BFA_TRUNK_LINK_FCTL_NORMAL;
3464 			fcport->trunk.attr.link_attr[i].link_state =
3465 						BFA_TRUNK_LINK_STATE_DN_LINKDN;
3466 			fcport->trunk.attr.link_attr[i].speed =
3467 						BFA_PORT_SPEED_UNKNOWN;
3468 			fcport->trunk.attr.link_attr[i].deskew = 0;
3469 		}
3470 	}
3471 }
3472 
3473 /*
3474  * Called to initialize port attributes
3475  */
3476 void
3477 bfa_fcport_init(struct bfa_s *bfa)
3478 {
3479 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3480 
3481 	/*
3482 	 * Initialize port attributes from IOC hardware data.
3483 	 */
3484 	bfa_fcport_set_wwns(fcport);
3485 	if (fcport->cfg.maxfrsize == 0)
3486 		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3487 	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3488 	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3489 
3490 	if (bfa_fcport_is_pbcdisabled(bfa))
3491 		bfa->modules.port.pbc_disabled = BFA_TRUE;
3492 
3493 	WARN_ON(!fcport->cfg.maxfrsize);
3494 	WARN_ON(!fcport->cfg.rx_bbcredit);
3495 	WARN_ON(!fcport->speed_sup);
3496 }
3497 
3498 /*
3499  * Firmware message handler.
3500  */
3501 void
3502 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3503 {
3504 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3505 	union bfi_fcport_i2h_msg_u i2hmsg;
3506 
3507 	i2hmsg.msg = msg;
3508 	fcport->event_arg.i2hmsg = i2hmsg;
3509 
3510 	bfa_trc(bfa, msg->mhdr.msg_id);
3511 	bfa_trc(bfa, bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm));
3512 
3513 	switch (msg->mhdr.msg_id) {
3514 	case BFI_FCPORT_I2H_ENABLE_RSP:
3515 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3516 
3517 			fcport->stats_dma_ready = BFA_TRUE;
3518 			if (fcport->use_flash_cfg) {
3519 				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3520 				fcport->cfg.maxfrsize =
3521 					cpu_to_be16(fcport->cfg.maxfrsize);
3522 				fcport->cfg.path_tov =
3523 					cpu_to_be16(fcport->cfg.path_tov);
3524 				fcport->cfg.q_depth =
3525 					cpu_to_be16(fcport->cfg.q_depth);
3526 
3527 				if (fcport->cfg.trunked)
3528 					fcport->trunk.attr.state =
3529 						BFA_TRUNK_OFFLINE;
3530 				else
3531 					fcport->trunk.attr.state =
3532 						BFA_TRUNK_DISABLED;
3533 				fcport->qos_attr.qos_bw =
3534 					i2hmsg.penable_rsp->port_cfg.qos_bw;
3535 				fcport->use_flash_cfg = BFA_FALSE;
3536 			}
3537 
3538 			if (fcport->cfg.qos_enabled)
3539 				fcport->qos_attr.state = BFA_QOS_OFFLINE;
3540 			else
3541 				fcport->qos_attr.state = BFA_QOS_DISABLED;
3542 
3543 			fcport->qos_attr.qos_bw_op =
3544 					i2hmsg.penable_rsp->port_cfg.qos_bw;
3545 
3546 			if (fcport->cfg.bb_cr_enabled)
3547 				fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3548 			else
3549 				fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3550 
3551 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3552 		}
3553 		break;
3554 
3555 	case BFI_FCPORT_I2H_DISABLE_RSP:
3556 		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3557 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3558 		break;
3559 
3560 	case BFI_FCPORT_I2H_EVENT:
3561 		if (fcport->cfg.bb_cr_enabled)
3562 			fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3563 		else
3564 			fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3565 
3566 		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3567 			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3568 		else {
3569 			if (i2hmsg.event->link_state.linkstate_rsn ==
3570 			    BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3571 				bfa_sm_send_event(fcport,
3572 						  BFA_FCPORT_SM_FAA_MISCONFIG);
3573 			else
3574 				bfa_sm_send_event(fcport,
3575 						  BFA_FCPORT_SM_LINKDOWN);
3576 		}
3577 		fcport->qos_attr.qos_bw_op =
3578 				i2hmsg.event->link_state.qos_attr.qos_bw_op;
3579 		break;
3580 
3581 	case BFI_FCPORT_I2H_TRUNK_SCN:
3582 		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3583 		break;
3584 
3585 	case BFI_FCPORT_I2H_STATS_GET_RSP:
3586 		/*
3587 		 * check for timer pop before processing the rsp
3588 		 */
3589 		if (list_empty(&fcport->stats_pending_q) ||
3590 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3591 			break;
3592 
3593 		bfa_timer_stop(&fcport->timer);
3594 		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3595 		__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3596 		break;
3597 
3598 	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3599 		/*
3600 		 * check for timer pop before processing the rsp
3601 		 */
3602 		if (list_empty(&fcport->statsclr_pending_q) ||
3603 		    (fcport->stats_status == BFA_STATUS_ETIMER))
3604 			break;
3605 
3606 		bfa_timer_stop(&fcport->timer);
3607 		fcport->stats_status = BFA_STATUS_OK;
3608 		__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3609 		break;
3610 
3611 	case BFI_FCPORT_I2H_ENABLE_AEN:
3612 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3613 		break;
3614 
3615 	case BFI_FCPORT_I2H_DISABLE_AEN:
3616 		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3617 		break;
3618 
3619 	default:
3620 		WARN_ON(1);
3621 	break;
3622 	}
3623 }
3624 
3625 /*
3626  * Registered callback for port events.
3627  */
3628 void
3629 bfa_fcport_event_register(struct bfa_s *bfa,
3630 				void (*cbfn) (void *cbarg,
3631 				enum bfa_port_linkstate event),
3632 				void *cbarg)
3633 {
3634 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3635 
3636 	fcport->event_cbfn = cbfn;
3637 	fcport->event_cbarg = cbarg;
3638 }
3639 
3640 bfa_status_t
3641 bfa_fcport_enable(struct bfa_s *bfa)
3642 {
3643 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3644 
3645 	if (bfa_fcport_is_pbcdisabled(bfa))
3646 		return BFA_STATUS_PBC;
3647 
3648 	if (bfa_ioc_is_disabled(&bfa->ioc))
3649 		return BFA_STATUS_IOC_DISABLED;
3650 
3651 	if (fcport->diag_busy)
3652 		return BFA_STATUS_DIAG_BUSY;
3653 
3654 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3655 	return BFA_STATUS_OK;
3656 }
3657 
3658 bfa_status_t
3659 bfa_fcport_disable(struct bfa_s *bfa)
3660 {
3661 	if (bfa_fcport_is_pbcdisabled(bfa))
3662 		return BFA_STATUS_PBC;
3663 
3664 	if (bfa_ioc_is_disabled(&bfa->ioc))
3665 		return BFA_STATUS_IOC_DISABLED;
3666 
3667 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3668 	return BFA_STATUS_OK;
3669 }
3670 
3671 /* If PBC is disabled on port, return error */
3672 bfa_status_t
3673 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3674 {
3675 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3676 	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3677 	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3678 
3679 	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3680 		bfa_trc(bfa, fcport->pwwn);
3681 		return BFA_STATUS_PBC;
3682 	}
3683 	return BFA_STATUS_OK;
3684 }
3685 
3686 /*
3687  * Configure port speed.
3688  */
3689 bfa_status_t
3690 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3691 {
3692 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3693 
3694 	bfa_trc(bfa, speed);
3695 
3696 	if (fcport->cfg.trunked == BFA_TRUE)
3697 		return BFA_STATUS_TRUNK_ENABLED;
3698 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3699 			(speed == BFA_PORT_SPEED_16GBPS))
3700 		return BFA_STATUS_UNSUPP_SPEED;
3701 	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3702 		bfa_trc(bfa, fcport->speed_sup);
3703 		return BFA_STATUS_UNSUPP_SPEED;
3704 	}
3705 
3706 	/* Port speed entered needs to be checked */
3707 	if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3708 		/* For CT2, 1G is not supported */
3709 		if ((speed == BFA_PORT_SPEED_1GBPS) &&
3710 		    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3711 			return BFA_STATUS_UNSUPP_SPEED;
3712 
3713 		/* Already checked for Auto Speed and Max Speed supp */
3714 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
3715 		      speed == BFA_PORT_SPEED_2GBPS ||
3716 		      speed == BFA_PORT_SPEED_4GBPS ||
3717 		      speed == BFA_PORT_SPEED_8GBPS ||
3718 		      speed == BFA_PORT_SPEED_16GBPS ||
3719 		      speed == BFA_PORT_SPEED_AUTO))
3720 			return BFA_STATUS_UNSUPP_SPEED;
3721 	} else {
3722 		if (speed != BFA_PORT_SPEED_10GBPS)
3723 			return BFA_STATUS_UNSUPP_SPEED;
3724 	}
3725 
3726 	fcport->cfg.speed = speed;
3727 
3728 	return BFA_STATUS_OK;
3729 }
3730 
3731 /*
3732  * Get current speed.
3733  */
3734 enum bfa_port_speed
3735 bfa_fcport_get_speed(struct bfa_s *bfa)
3736 {
3737 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3738 
3739 	return fcport->speed;
3740 }
3741 
3742 /*
3743  * Configure port topology.
3744  */
3745 bfa_status_t
3746 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3747 {
3748 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3749 
3750 	bfa_trc(bfa, topology);
3751 	bfa_trc(bfa, fcport->cfg.topology);
3752 
3753 	switch (topology) {
3754 	case BFA_PORT_TOPOLOGY_P2P:
3755 		break;
3756 
3757 	case BFA_PORT_TOPOLOGY_LOOP:
3758 		if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3759 			(fcport->qos_attr.state != BFA_QOS_DISABLED))
3760 			return BFA_STATUS_ERROR_QOS_ENABLED;
3761 		if (fcport->cfg.ratelimit != BFA_FALSE)
3762 			return BFA_STATUS_ERROR_TRL_ENABLED;
3763 		if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3764 			(fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3765 			return BFA_STATUS_ERROR_TRUNK_ENABLED;
3766 		if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3767 			(fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3768 			return BFA_STATUS_UNSUPP_SPEED;
3769 		if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3770 			return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3771 		if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3772 			return BFA_STATUS_DPORT_ERR;
3773 		if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3774 			return BFA_STATUS_DPORT_ERR;
3775 		break;
3776 
3777 	case BFA_PORT_TOPOLOGY_AUTO:
3778 		break;
3779 
3780 	default:
3781 		return BFA_STATUS_EINVAL;
3782 	}
3783 
3784 	fcport->cfg.topology = topology;
3785 	return BFA_STATUS_OK;
3786 }
3787 
3788 /*
3789  * Get current topology.
3790  */
3791 enum bfa_port_topology
3792 bfa_fcport_get_topology(struct bfa_s *bfa)
3793 {
3794 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3795 
3796 	return fcport->topology;
3797 }
3798 
3799 /*
3800  * Get config topology.
3801  */
3802 enum bfa_port_topology
3803 bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3804 {
3805 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3806 
3807 	return fcport->cfg.topology;
3808 }
3809 
3810 bfa_status_t
3811 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3812 {
3813 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3814 
3815 	bfa_trc(bfa, alpa);
3816 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3817 	bfa_trc(bfa, fcport->cfg.hardalpa);
3818 
3819 	fcport->cfg.cfg_hardalpa = BFA_TRUE;
3820 	fcport->cfg.hardalpa = alpa;
3821 
3822 	return BFA_STATUS_OK;
3823 }
3824 
3825 bfa_status_t
3826 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3827 {
3828 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3829 
3830 	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3831 	bfa_trc(bfa, fcport->cfg.hardalpa);
3832 
3833 	fcport->cfg.cfg_hardalpa = BFA_FALSE;
3834 	return BFA_STATUS_OK;
3835 }
3836 
3837 u8
3838 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3839 {
3840 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3841 
3842 	return fcport->myalpa;
3843 }
3844 
3845 bfa_status_t
3846 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3847 {
3848 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3849 
3850 	bfa_trc(bfa, maxfrsize);
3851 	bfa_trc(bfa, fcport->cfg.maxfrsize);
3852 
3853 	/* with in range */
3854 	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3855 		return BFA_STATUS_INVLD_DFSZ;
3856 
3857 	/* power of 2, if not the max frame size of 2112 */
3858 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3859 		return BFA_STATUS_INVLD_DFSZ;
3860 
3861 	fcport->cfg.maxfrsize = maxfrsize;
3862 	return BFA_STATUS_OK;
3863 }
3864 
3865 u16
3866 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3867 {
3868 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3869 
3870 	return fcport->cfg.maxfrsize;
3871 }
3872 
3873 u8
3874 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3875 {
3876 	if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3877 		return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3878 
3879 	else
3880 		return 0;
3881 }
3882 
3883 void
3884 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3885 {
3886 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3887 
3888 	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3889 }
3890 
3891 /*
3892  * Get port attributes.
3893  */
3894 void
3895 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3896 {
3897 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3898 
3899 	memset(attr, 0, sizeof(struct bfa_port_attr_s));
3900 
3901 	attr->nwwn = fcport->nwwn;
3902 	attr->pwwn = fcport->pwwn;
3903 
3904 	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3905 	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3906 
3907 	memcpy(&attr->pport_cfg, &fcport->cfg,
3908 		sizeof(struct bfa_port_cfg_s));
3909 	/* speed attributes */
3910 	attr->pport_cfg.speed = fcport->cfg.speed;
3911 	attr->speed_supported = fcport->speed_sup;
3912 	attr->speed = fcport->speed;
3913 	attr->cos_supported = FC_CLASS_3;
3914 
3915 	/* topology attributes */
3916 	attr->pport_cfg.topology = fcport->cfg.topology;
3917 	attr->topology = fcport->topology;
3918 	attr->pport_cfg.trunked = fcport->cfg.trunked;
3919 
3920 	/* beacon attributes */
3921 	attr->beacon = fcport->beacon;
3922 	attr->link_e2e_beacon = fcport->link_e2e_beacon;
3923 
3924 	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3925 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3926 	attr->port_state = bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm);
3927 
3928 	attr->fec_state = fcport->fec_state;
3929 
3930 	/* PBC Disabled State */
3931 	if (bfa_fcport_is_pbcdisabled(bfa))
3932 		attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3933 	else {
3934 		if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3935 			attr->port_state = BFA_PORT_ST_IOCDIS;
3936 		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3937 			attr->port_state = BFA_PORT_ST_FWMISMATCH;
3938 	}
3939 
3940 	/* FCoE vlan */
3941 	attr->fcoe_vlan = fcport->fcoe_vlan;
3942 }
3943 
3944 #define BFA_FCPORT_STATS_TOV	1000
3945 
3946 /*
3947  * Fetch port statistics (FCQoS or FCoE).
3948  */
3949 bfa_status_t
3950 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3951 {
3952 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3953 
3954 	if (!bfa_iocfc_is_operational(bfa) ||
3955 	    !fcport->stats_dma_ready)
3956 		return BFA_STATUS_IOC_NON_OP;
3957 
3958 	if (!list_empty(&fcport->statsclr_pending_q))
3959 		return BFA_STATUS_DEVBUSY;
3960 
3961 	if (list_empty(&fcport->stats_pending_q)) {
3962 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3963 		bfa_fcport_send_stats_get(fcport);
3964 		bfa_timer_start(bfa, &fcport->timer,
3965 				bfa_fcport_stats_get_timeout,
3966 				fcport, BFA_FCPORT_STATS_TOV);
3967 	} else
3968 		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3969 
3970 	return BFA_STATUS_OK;
3971 }
3972 
3973 /*
3974  * Reset port statistics (FCQoS or FCoE).
3975  */
3976 bfa_status_t
3977 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
3978 {
3979 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3980 
3981 	if (!bfa_iocfc_is_operational(bfa) ||
3982 	    !fcport->stats_dma_ready)
3983 		return BFA_STATUS_IOC_NON_OP;
3984 
3985 	if (!list_empty(&fcport->stats_pending_q))
3986 		return BFA_STATUS_DEVBUSY;
3987 
3988 	if (list_empty(&fcport->statsclr_pending_q)) {
3989 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3990 		bfa_fcport_send_stats_clear(fcport);
3991 		bfa_timer_start(bfa, &fcport->timer,
3992 				bfa_fcport_stats_clr_timeout,
3993 				fcport, BFA_FCPORT_STATS_TOV);
3994 	} else
3995 		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3996 
3997 	return BFA_STATUS_OK;
3998 }
3999 
4000 /*
4001  * Fetch port attributes.
4002  */
4003 bfa_boolean_t
4004 bfa_fcport_is_disabled(struct bfa_s *bfa)
4005 {
4006 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4007 
4008 	return bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
4009 		BFA_PORT_ST_DISABLED;
4010 
4011 }
4012 
4013 bfa_boolean_t
4014 bfa_fcport_is_dport(struct bfa_s *bfa)
4015 {
4016 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4017 
4018 	return (bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
4019 		BFA_PORT_ST_DPORT);
4020 }
4021 
4022 bfa_boolean_t
4023 bfa_fcport_is_ddport(struct bfa_s *bfa)
4024 {
4025 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4026 
4027 	return (bfa_fcport_sm_to_state(hal_port_sm_table, fcport->sm) ==
4028 		BFA_PORT_ST_DDPORT);
4029 }
4030 
4031 bfa_status_t
4032 bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4033 {
4034 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4035 	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4036 
4037 	bfa_trc(bfa, ioc_type);
4038 
4039 	if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4040 		return BFA_STATUS_QOS_BW_INVALID;
4041 
4042 	if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4043 		return BFA_STATUS_QOS_BW_INVALID;
4044 
4045 	if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4046 	    (qos_bw->low > qos_bw->high))
4047 		return BFA_STATUS_QOS_BW_INVALID;
4048 
4049 	if ((ioc_type == BFA_IOC_TYPE_FC) &&
4050 	    (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4051 		fcport->cfg.qos_bw = *qos_bw;
4052 
4053 	return BFA_STATUS_OK;
4054 }
4055 
4056 bfa_boolean_t
4057 bfa_fcport_is_ratelim(struct bfa_s *bfa)
4058 {
4059 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4060 
4061 	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4062 
4063 }
4064 
4065 /*
4066  * Get default minimum ratelim speed
4067  */
4068 enum bfa_port_speed
4069 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4070 {
4071 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4072 
4073 	bfa_trc(bfa, fcport->cfg.trl_def_speed);
4074 	return fcport->cfg.trl_def_speed;
4075 
4076 }
4077 
4078 void
4079 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4080 		  bfa_boolean_t link_e2e_beacon)
4081 {
4082 	struct bfa_s *bfa = dev;
4083 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4084 
4085 	bfa_trc(bfa, beacon);
4086 	bfa_trc(bfa, link_e2e_beacon);
4087 	bfa_trc(bfa, fcport->beacon);
4088 	bfa_trc(bfa, fcport->link_e2e_beacon);
4089 
4090 	fcport->beacon = beacon;
4091 	fcport->link_e2e_beacon = link_e2e_beacon;
4092 }
4093 
4094 bfa_boolean_t
4095 bfa_fcport_is_linkup(struct bfa_s *bfa)
4096 {
4097 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4098 
4099 	return	(!fcport->cfg.trunked &&
4100 		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4101 		(fcport->cfg.trunked &&
4102 		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4103 }
4104 
4105 bfa_boolean_t
4106 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4107 {
4108 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4109 
4110 	return fcport->cfg.qos_enabled;
4111 }
4112 
4113 bfa_boolean_t
4114 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4115 {
4116 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4117 
4118 	return fcport->cfg.trunked;
4119 }
4120 
4121 bfa_status_t
4122 bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4123 {
4124 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4125 
4126 	bfa_trc(bfa, on_off);
4127 
4128 	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4129 		return BFA_STATUS_BBCR_FC_ONLY;
4130 
4131 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4132 		(bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4133 		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4134 
4135 	if (on_off) {
4136 		if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4137 			return BFA_STATUS_TOPOLOGY_LOOP;
4138 
4139 		if (fcport->cfg.qos_enabled)
4140 			return BFA_STATUS_ERROR_QOS_ENABLED;
4141 
4142 		if (fcport->cfg.trunked)
4143 			return BFA_STATUS_TRUNK_ENABLED;
4144 
4145 		if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4146 			(fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4147 			return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4148 
4149 		if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4150 			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4151 
4152 		if (fcport->cfg.bb_cr_enabled) {
4153 			if (bb_scn != fcport->cfg.bb_scn)
4154 				return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4155 			else
4156 				return BFA_STATUS_NO_CHANGE;
4157 		}
4158 
4159 		if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4160 			bb_scn = BFA_BB_SCN_DEF;
4161 
4162 		fcport->cfg.bb_cr_enabled = on_off;
4163 		fcport->cfg.bb_scn = bb_scn;
4164 	} else {
4165 		if (!fcport->cfg.bb_cr_enabled)
4166 			return BFA_STATUS_NO_CHANGE;
4167 
4168 		fcport->cfg.bb_cr_enabled = on_off;
4169 		fcport->cfg.bb_scn = 0;
4170 	}
4171 
4172 	return BFA_STATUS_OK;
4173 }
4174 
4175 bfa_status_t
4176 bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4177 		struct bfa_bbcr_attr_s *bbcr_attr)
4178 {
4179 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4180 
4181 	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4182 		return BFA_STATUS_BBCR_FC_ONLY;
4183 
4184 	if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4185 		return BFA_STATUS_TOPOLOGY_LOOP;
4186 
4187 	*bbcr_attr = fcport->bbcr_attr;
4188 
4189 	return BFA_STATUS_OK;
4190 }
4191 
4192 void
4193 bfa_fcport_dportenable(struct bfa_s *bfa)
4194 {
4195 	/*
4196 	 * Assume caller check for port is in disable state
4197 	 */
4198 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4199 	bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4200 }
4201 
4202 void
4203 bfa_fcport_dportdisable(struct bfa_s *bfa)
4204 {
4205 	/*
4206 	 * Assume caller check for port is in disable state
4207 	 */
4208 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4209 	bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4210 }
4211 
4212 static void
4213 bfa_fcport_ddportenable(struct bfa_s *bfa)
4214 {
4215 	/*
4216 	 * Assume caller check for port is in disable state
4217 	 */
4218 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4219 }
4220 
4221 static void
4222 bfa_fcport_ddportdisable(struct bfa_s *bfa)
4223 {
4224 	/*
4225 	 * Assume caller check for port is in disable state
4226 	 */
4227 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4228 }
4229 
4230 /*
4231  * Rport State machine functions
4232  */
4233 /*
4234  * Beginning state, only online event expected.
4235  */
4236 static void
4237 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4238 {
4239 	bfa_trc(rp->bfa, rp->rport_tag);
4240 	bfa_trc(rp->bfa, event);
4241 
4242 	switch (event) {
4243 	case BFA_RPORT_SM_CREATE:
4244 		bfa_stats(rp, sm_un_cr);
4245 		bfa_sm_set_state(rp, bfa_rport_sm_created);
4246 		break;
4247 
4248 	default:
4249 		bfa_stats(rp, sm_un_unexp);
4250 		bfa_sm_fault(rp->bfa, event);
4251 	}
4252 }
4253 
4254 static void
4255 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4256 {
4257 	bfa_trc(rp->bfa, rp->rport_tag);
4258 	bfa_trc(rp->bfa, event);
4259 
4260 	switch (event) {
4261 	case BFA_RPORT_SM_ONLINE:
4262 		bfa_stats(rp, sm_cr_on);
4263 		if (bfa_rport_send_fwcreate(rp))
4264 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4265 		else
4266 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4267 		break;
4268 
4269 	case BFA_RPORT_SM_DELETE:
4270 		bfa_stats(rp, sm_cr_del);
4271 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4272 		bfa_rport_free(rp);
4273 		break;
4274 
4275 	case BFA_RPORT_SM_HWFAIL:
4276 		bfa_stats(rp, sm_cr_hwf);
4277 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4278 		break;
4279 
4280 	default:
4281 		bfa_stats(rp, sm_cr_unexp);
4282 		bfa_sm_fault(rp->bfa, event);
4283 	}
4284 }
4285 
4286 /*
4287  * Waiting for rport create response from firmware.
4288  */
4289 static void
4290 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4291 {
4292 	bfa_trc(rp->bfa, rp->rport_tag);
4293 	bfa_trc(rp->bfa, event);
4294 
4295 	switch (event) {
4296 	case BFA_RPORT_SM_FWRSP:
4297 		bfa_stats(rp, sm_fwc_rsp);
4298 		bfa_sm_set_state(rp, bfa_rport_sm_online);
4299 		bfa_rport_online_cb(rp);
4300 		break;
4301 
4302 	case BFA_RPORT_SM_DELETE:
4303 		bfa_stats(rp, sm_fwc_del);
4304 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4305 		break;
4306 
4307 	case BFA_RPORT_SM_OFFLINE:
4308 		bfa_stats(rp, sm_fwc_off);
4309 		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4310 		break;
4311 
4312 	case BFA_RPORT_SM_HWFAIL:
4313 		bfa_stats(rp, sm_fwc_hwf);
4314 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4315 		break;
4316 
4317 	default:
4318 		bfa_stats(rp, sm_fwc_unexp);
4319 		bfa_sm_fault(rp->bfa, event);
4320 	}
4321 }
4322 
4323 /*
4324  * Request queue is full, awaiting queue resume to send create request.
4325  */
4326 static void
4327 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4328 {
4329 	bfa_trc(rp->bfa, rp->rport_tag);
4330 	bfa_trc(rp->bfa, event);
4331 
4332 	switch (event) {
4333 	case BFA_RPORT_SM_QRESUME:
4334 		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4335 		bfa_rport_send_fwcreate(rp);
4336 		break;
4337 
4338 	case BFA_RPORT_SM_DELETE:
4339 		bfa_stats(rp, sm_fwc_del);
4340 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4341 		bfa_reqq_wcancel(&rp->reqq_wait);
4342 		bfa_rport_free(rp);
4343 		break;
4344 
4345 	case BFA_RPORT_SM_OFFLINE:
4346 		bfa_stats(rp, sm_fwc_off);
4347 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4348 		bfa_reqq_wcancel(&rp->reqq_wait);
4349 		bfa_rport_offline_cb(rp);
4350 		break;
4351 
4352 	case BFA_RPORT_SM_HWFAIL:
4353 		bfa_stats(rp, sm_fwc_hwf);
4354 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4355 		bfa_reqq_wcancel(&rp->reqq_wait);
4356 		break;
4357 
4358 	default:
4359 		bfa_stats(rp, sm_fwc_unexp);
4360 		bfa_sm_fault(rp->bfa, event);
4361 	}
4362 }
4363 
4364 /*
4365  * Online state - normal parking state.
4366  */
4367 static void
4368 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4369 {
4370 	struct bfi_rport_qos_scn_s *qos_scn;
4371 
4372 	bfa_trc(rp->bfa, rp->rport_tag);
4373 	bfa_trc(rp->bfa, event);
4374 
4375 	switch (event) {
4376 	case BFA_RPORT_SM_OFFLINE:
4377 		bfa_stats(rp, sm_on_off);
4378 		if (bfa_rport_send_fwdelete(rp))
4379 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4380 		else
4381 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4382 		break;
4383 
4384 	case BFA_RPORT_SM_DELETE:
4385 		bfa_stats(rp, sm_on_del);
4386 		if (bfa_rport_send_fwdelete(rp))
4387 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4388 		else
4389 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4390 		break;
4391 
4392 	case BFA_RPORT_SM_HWFAIL:
4393 		bfa_stats(rp, sm_on_hwf);
4394 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4395 		break;
4396 
4397 	case BFA_RPORT_SM_SET_SPEED:
4398 		bfa_rport_send_fwspeed(rp);
4399 		break;
4400 
4401 	case BFA_RPORT_SM_QOS_SCN:
4402 		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4403 		rp->qos_attr = qos_scn->new_qos_attr;
4404 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4405 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4406 		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4407 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4408 
4409 		qos_scn->old_qos_attr.qos_flow_id  =
4410 			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4411 		qos_scn->new_qos_attr.qos_flow_id  =
4412 			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4413 
4414 		if (qos_scn->old_qos_attr.qos_flow_id !=
4415 			qos_scn->new_qos_attr.qos_flow_id)
4416 			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4417 						    qos_scn->old_qos_attr,
4418 						    qos_scn->new_qos_attr);
4419 		if (qos_scn->old_qos_attr.qos_priority !=
4420 			qos_scn->new_qos_attr.qos_priority)
4421 			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4422 						  qos_scn->old_qos_attr,
4423 						  qos_scn->new_qos_attr);
4424 		break;
4425 
4426 	default:
4427 		bfa_stats(rp, sm_on_unexp);
4428 		bfa_sm_fault(rp->bfa, event);
4429 	}
4430 }
4431 
4432 /*
4433  * Firmware rport is being deleted - awaiting f/w response.
4434  */
4435 static void
4436 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4437 {
4438 	bfa_trc(rp->bfa, rp->rport_tag);
4439 	bfa_trc(rp->bfa, event);
4440 
4441 	switch (event) {
4442 	case BFA_RPORT_SM_FWRSP:
4443 		bfa_stats(rp, sm_fwd_rsp);
4444 		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4445 		bfa_rport_offline_cb(rp);
4446 		break;
4447 
4448 	case BFA_RPORT_SM_DELETE:
4449 		bfa_stats(rp, sm_fwd_del);
4450 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4451 		break;
4452 
4453 	case BFA_RPORT_SM_HWFAIL:
4454 		bfa_stats(rp, sm_fwd_hwf);
4455 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4456 		bfa_rport_offline_cb(rp);
4457 		break;
4458 
4459 	default:
4460 		bfa_stats(rp, sm_fwd_unexp);
4461 		bfa_sm_fault(rp->bfa, event);
4462 	}
4463 }
4464 
4465 static void
4466 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4467 {
4468 	bfa_trc(rp->bfa, rp->rport_tag);
4469 	bfa_trc(rp->bfa, event);
4470 
4471 	switch (event) {
4472 	case BFA_RPORT_SM_QRESUME:
4473 		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4474 		bfa_rport_send_fwdelete(rp);
4475 		break;
4476 
4477 	case BFA_RPORT_SM_DELETE:
4478 		bfa_stats(rp, sm_fwd_del);
4479 		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4480 		break;
4481 
4482 	case BFA_RPORT_SM_HWFAIL:
4483 		bfa_stats(rp, sm_fwd_hwf);
4484 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4485 		bfa_reqq_wcancel(&rp->reqq_wait);
4486 		bfa_rport_offline_cb(rp);
4487 		break;
4488 
4489 	default:
4490 		bfa_stats(rp, sm_fwd_unexp);
4491 		bfa_sm_fault(rp->bfa, event);
4492 	}
4493 }
4494 
4495 /*
4496  * Offline state.
4497  */
4498 static void
4499 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4500 {
4501 	bfa_trc(rp->bfa, rp->rport_tag);
4502 	bfa_trc(rp->bfa, event);
4503 
4504 	switch (event) {
4505 	case BFA_RPORT_SM_DELETE:
4506 		bfa_stats(rp, sm_off_del);
4507 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4508 		bfa_rport_free(rp);
4509 		break;
4510 
4511 	case BFA_RPORT_SM_ONLINE:
4512 		bfa_stats(rp, sm_off_on);
4513 		if (bfa_rport_send_fwcreate(rp))
4514 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4515 		else
4516 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4517 		break;
4518 
4519 	case BFA_RPORT_SM_HWFAIL:
4520 		bfa_stats(rp, sm_off_hwf);
4521 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4522 		break;
4523 
4524 	case BFA_RPORT_SM_OFFLINE:
4525 		bfa_rport_offline_cb(rp);
4526 		break;
4527 
4528 	default:
4529 		bfa_stats(rp, sm_off_unexp);
4530 		bfa_sm_fault(rp->bfa, event);
4531 	}
4532 }
4533 
4534 /*
4535  * Rport is deleted, waiting for firmware response to delete.
4536  */
4537 static void
4538 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4539 {
4540 	bfa_trc(rp->bfa, rp->rport_tag);
4541 	bfa_trc(rp->bfa, event);
4542 
4543 	switch (event) {
4544 	case BFA_RPORT_SM_FWRSP:
4545 		bfa_stats(rp, sm_del_fwrsp);
4546 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4547 		bfa_rport_free(rp);
4548 		break;
4549 
4550 	case BFA_RPORT_SM_HWFAIL:
4551 		bfa_stats(rp, sm_del_hwf);
4552 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4553 		bfa_rport_free(rp);
4554 		break;
4555 
4556 	default:
4557 		bfa_sm_fault(rp->bfa, event);
4558 	}
4559 }
4560 
4561 static void
4562 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4563 {
4564 	bfa_trc(rp->bfa, rp->rport_tag);
4565 	bfa_trc(rp->bfa, event);
4566 
4567 	switch (event) {
4568 	case BFA_RPORT_SM_QRESUME:
4569 		bfa_stats(rp, sm_del_fwrsp);
4570 		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4571 		bfa_rport_send_fwdelete(rp);
4572 		break;
4573 
4574 	case BFA_RPORT_SM_HWFAIL:
4575 		bfa_stats(rp, sm_del_hwf);
4576 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4577 		bfa_reqq_wcancel(&rp->reqq_wait);
4578 		bfa_rport_free(rp);
4579 		break;
4580 
4581 	default:
4582 		bfa_sm_fault(rp->bfa, event);
4583 	}
4584 }
4585 
4586 /*
4587  * Waiting for rport create response from firmware. A delete is pending.
4588  */
4589 static void
4590 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4591 				enum bfa_rport_event event)
4592 {
4593 	bfa_trc(rp->bfa, rp->rport_tag);
4594 	bfa_trc(rp->bfa, event);
4595 
4596 	switch (event) {
4597 	case BFA_RPORT_SM_FWRSP:
4598 		bfa_stats(rp, sm_delp_fwrsp);
4599 		if (bfa_rport_send_fwdelete(rp))
4600 			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4601 		else
4602 			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4603 		break;
4604 
4605 	case BFA_RPORT_SM_HWFAIL:
4606 		bfa_stats(rp, sm_delp_hwf);
4607 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4608 		bfa_rport_free(rp);
4609 		break;
4610 
4611 	default:
4612 		bfa_stats(rp, sm_delp_unexp);
4613 		bfa_sm_fault(rp->bfa, event);
4614 	}
4615 }
4616 
4617 /*
4618  * Waiting for rport create response from firmware. Rport offline is pending.
4619  */
4620 static void
4621 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4622 				 enum bfa_rport_event event)
4623 {
4624 	bfa_trc(rp->bfa, rp->rport_tag);
4625 	bfa_trc(rp->bfa, event);
4626 
4627 	switch (event) {
4628 	case BFA_RPORT_SM_FWRSP:
4629 		bfa_stats(rp, sm_offp_fwrsp);
4630 		if (bfa_rport_send_fwdelete(rp))
4631 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4632 		else
4633 			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4634 		break;
4635 
4636 	case BFA_RPORT_SM_DELETE:
4637 		bfa_stats(rp, sm_offp_del);
4638 		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4639 		break;
4640 
4641 	case BFA_RPORT_SM_HWFAIL:
4642 		bfa_stats(rp, sm_offp_hwf);
4643 		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4644 		bfa_rport_offline_cb(rp);
4645 		break;
4646 
4647 	default:
4648 		bfa_stats(rp, sm_offp_unexp);
4649 		bfa_sm_fault(rp->bfa, event);
4650 	}
4651 }
4652 
4653 /*
4654  * IOC h/w failed.
4655  */
4656 static void
4657 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4658 {
4659 	bfa_trc(rp->bfa, rp->rport_tag);
4660 	bfa_trc(rp->bfa, event);
4661 
4662 	switch (event) {
4663 	case BFA_RPORT_SM_OFFLINE:
4664 		bfa_stats(rp, sm_iocd_off);
4665 		bfa_rport_offline_cb(rp);
4666 		break;
4667 
4668 	case BFA_RPORT_SM_DELETE:
4669 		bfa_stats(rp, sm_iocd_del);
4670 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4671 		bfa_rport_free(rp);
4672 		break;
4673 
4674 	case BFA_RPORT_SM_ONLINE:
4675 		bfa_stats(rp, sm_iocd_on);
4676 		if (bfa_rport_send_fwcreate(rp))
4677 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4678 		else
4679 			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4680 		break;
4681 
4682 	case BFA_RPORT_SM_HWFAIL:
4683 		break;
4684 
4685 	default:
4686 		bfa_stats(rp, sm_iocd_unexp);
4687 		bfa_sm_fault(rp->bfa, event);
4688 	}
4689 }
4690 
4691 
4692 
4693 /*
4694  *  bfa_rport_private BFA rport private functions
4695  */
4696 
4697 static void
4698 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4699 {
4700 	struct bfa_rport_s *rp = cbarg;
4701 
4702 	if (complete)
4703 		bfa_cb_rport_online(rp->rport_drv);
4704 }
4705 
4706 static void
4707 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4708 {
4709 	struct bfa_rport_s *rp = cbarg;
4710 
4711 	if (complete)
4712 		bfa_cb_rport_offline(rp->rport_drv);
4713 }
4714 
4715 static void
4716 bfa_rport_qresume(void *cbarg)
4717 {
4718 	struct bfa_rport_s	*rp = cbarg;
4719 
4720 	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4721 }
4722 
4723 void
4724 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4725 		struct bfa_s *bfa)
4726 {
4727 	struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4728 
4729 	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4730 		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4731 
4732 	/* kva memory */
4733 	bfa_mem_kva_setup(minfo, rport_kva,
4734 		cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4735 }
4736 
4737 void
4738 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4739 		struct bfa_pcidev_s *pcidev)
4740 {
4741 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4742 	struct bfa_rport_s *rp;
4743 	u16 i;
4744 
4745 	INIT_LIST_HEAD(&mod->rp_free_q);
4746 	INIT_LIST_HEAD(&mod->rp_active_q);
4747 	INIT_LIST_HEAD(&mod->rp_unused_q);
4748 
4749 	rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4750 	mod->rps_list = rp;
4751 	mod->num_rports = cfg->fwcfg.num_rports;
4752 
4753 	WARN_ON(!mod->num_rports ||
4754 		   (mod->num_rports & (mod->num_rports - 1)));
4755 
4756 	for (i = 0; i < mod->num_rports; i++, rp++) {
4757 		memset(rp, 0, sizeof(struct bfa_rport_s));
4758 		rp->bfa = bfa;
4759 		rp->rport_tag = i;
4760 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4761 
4762 		/*
4763 		 *  - is unused
4764 		 */
4765 		if (i)
4766 			list_add_tail(&rp->qe, &mod->rp_free_q);
4767 
4768 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4769 	}
4770 
4771 	/*
4772 	 * consume memory
4773 	 */
4774 	bfa_mem_kva_curp(mod) = (u8 *) rp;
4775 }
4776 
4777 void
4778 bfa_rport_iocdisable(struct bfa_s *bfa)
4779 {
4780 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4781 	struct bfa_rport_s *rport;
4782 	struct list_head *qe, *qen;
4783 
4784 	/* Enqueue unused rport resources to free_q */
4785 	list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4786 
4787 	list_for_each_safe(qe, qen, &mod->rp_active_q) {
4788 		rport = (struct bfa_rport_s *) qe;
4789 		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4790 	}
4791 }
4792 
4793 static struct bfa_rport_s *
4794 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4795 {
4796 	struct bfa_rport_s *rport;
4797 
4798 	bfa_q_deq(&mod->rp_free_q, &rport);
4799 	if (rport)
4800 		list_add_tail(&rport->qe, &mod->rp_active_q);
4801 
4802 	return rport;
4803 }
4804 
4805 static void
4806 bfa_rport_free(struct bfa_rport_s *rport)
4807 {
4808 	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4809 
4810 	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4811 	list_del(&rport->qe);
4812 	list_add_tail(&rport->qe, &mod->rp_free_q);
4813 }
4814 
4815 static bfa_boolean_t
4816 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4817 {
4818 	struct bfi_rport_create_req_s *m;
4819 
4820 	/*
4821 	 * check for room in queue to send request now
4822 	 */
4823 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4824 	if (!m) {
4825 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4826 		return BFA_FALSE;
4827 	}
4828 
4829 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4830 			bfa_fn_lpu(rp->bfa));
4831 	m->bfa_handle = rp->rport_tag;
4832 	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4833 	m->pid = rp->rport_info.pid;
4834 	m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4835 	m->local_pid = rp->rport_info.local_pid;
4836 	m->fc_class = rp->rport_info.fc_class;
4837 	m->vf_en = rp->rport_info.vf_en;
4838 	m->vf_id = rp->rport_info.vf_id;
4839 	m->cisc = rp->rport_info.cisc;
4840 
4841 	/*
4842 	 * queue I/O message to firmware
4843 	 */
4844 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4845 	return BFA_TRUE;
4846 }
4847 
4848 static bfa_boolean_t
4849 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4850 {
4851 	struct bfi_rport_delete_req_s *m;
4852 
4853 	/*
4854 	 * check for room in queue to send request now
4855 	 */
4856 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4857 	if (!m) {
4858 		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4859 		return BFA_FALSE;
4860 	}
4861 
4862 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4863 			bfa_fn_lpu(rp->bfa));
4864 	m->fw_handle = rp->fw_handle;
4865 
4866 	/*
4867 	 * queue I/O message to firmware
4868 	 */
4869 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4870 	return BFA_TRUE;
4871 }
4872 
4873 static bfa_boolean_t
4874 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4875 {
4876 	struct bfa_rport_speed_req_s *m;
4877 
4878 	/*
4879 	 * check for room in queue to send request now
4880 	 */
4881 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4882 	if (!m) {
4883 		bfa_trc(rp->bfa, rp->rport_info.speed);
4884 		return BFA_FALSE;
4885 	}
4886 
4887 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4888 			bfa_fn_lpu(rp->bfa));
4889 	m->fw_handle = rp->fw_handle;
4890 	m->speed = (u8)rp->rport_info.speed;
4891 
4892 	/*
4893 	 * queue I/O message to firmware
4894 	 */
4895 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4896 	return BFA_TRUE;
4897 }
4898 
4899 
4900 
4901 /*
4902  *  bfa_rport_public
4903  */
4904 
4905 /*
4906  * Rport interrupt processing.
4907  */
4908 void
4909 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4910 {
4911 	union bfi_rport_i2h_msg_u msg;
4912 	struct bfa_rport_s *rp;
4913 
4914 	bfa_trc(bfa, m->mhdr.msg_id);
4915 
4916 	msg.msg = m;
4917 
4918 	switch (m->mhdr.msg_id) {
4919 	case BFI_RPORT_I2H_CREATE_RSP:
4920 		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4921 		rp->fw_handle = msg.create_rsp->fw_handle;
4922 		rp->qos_attr = msg.create_rsp->qos_attr;
4923 		bfa_rport_set_lunmask(bfa, rp);
4924 		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4925 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4926 		break;
4927 
4928 	case BFI_RPORT_I2H_DELETE_RSP:
4929 		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4930 		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
4931 		bfa_rport_unset_lunmask(bfa, rp);
4932 		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4933 		break;
4934 
4935 	case BFI_RPORT_I2H_QOS_SCN:
4936 		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4937 		rp->event_arg.fw_msg = msg.qos_scn_evt;
4938 		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4939 		break;
4940 
4941 	case BFI_RPORT_I2H_LIP_SCN_ONLINE:
4942 		bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
4943 				&msg.lip_scn->loop_info);
4944 		bfa_cb_rport_scn_online(bfa);
4945 		break;
4946 
4947 	case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
4948 		bfa_cb_rport_scn_offline(bfa);
4949 		break;
4950 
4951 	case BFI_RPORT_I2H_NO_DEV:
4952 		rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
4953 		bfa_cb_rport_scn_no_dev(rp->rport_drv);
4954 		break;
4955 
4956 	default:
4957 		bfa_trc(bfa, m->mhdr.msg_id);
4958 		WARN_ON(1);
4959 	}
4960 }
4961 
4962 void
4963 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4964 {
4965 	struct bfa_rport_mod_s	*mod = BFA_RPORT_MOD(bfa);
4966 	struct list_head	*qe;
4967 	int	i;
4968 
4969 	for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4970 		bfa_q_deq_tail(&mod->rp_free_q, &qe);
4971 		list_add_tail(qe, &mod->rp_unused_q);
4972 	}
4973 }
4974 
4975 /*
4976  *  bfa_rport_api
4977  */
4978 
4979 struct bfa_rport_s *
4980 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4981 {
4982 	struct bfa_rport_s *rp;
4983 
4984 	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4985 
4986 	if (rp == NULL)
4987 		return NULL;
4988 
4989 	rp->bfa = bfa;
4990 	rp->rport_drv = rport_drv;
4991 	memset(&rp->stats, 0, sizeof(rp->stats));
4992 
4993 	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4994 	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4995 
4996 	return rp;
4997 }
4998 
4999 void
5000 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5001 {
5002 	WARN_ON(rport_info->max_frmsz == 0);
5003 
5004 	/*
5005 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5006 	 * responses. Default to minimum size.
5007 	 */
5008 	if (rport_info->max_frmsz == 0) {
5009 		bfa_trc(rport->bfa, rport->rport_tag);
5010 		rport_info->max_frmsz = FC_MIN_PDUSZ;
5011 	}
5012 
5013 	rport->rport_info = *rport_info;
5014 	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5015 }
5016 
5017 void
5018 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5019 {
5020 	WARN_ON(speed == 0);
5021 	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5022 
5023 	if (rport) {
5024 		rport->rport_info.speed = speed;
5025 		bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5026 	}
5027 }
5028 
5029 /* Set Rport LUN Mask */
5030 void
5031 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5032 {
5033 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
5034 	wwn_t	lp_wwn, rp_wwn;
5035 	u8 lp_tag = (u8)rp->rport_info.lp_tag;
5036 
5037 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5038 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5039 
5040 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5041 					rp->lun_mask = BFA_TRUE;
5042 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5043 }
5044 
5045 /* Unset Rport LUN mask */
5046 void
5047 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5048 {
5049 	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
5050 	wwn_t	lp_wwn, rp_wwn;
5051 
5052 	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5053 	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5054 
5055 	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5056 				rp->lun_mask = BFA_FALSE;
5057 	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5058 			BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5059 }
5060 
5061 /*
5062  * SGPG related functions
5063  */
5064 
5065 /*
5066  * Compute and return memory needed by FCP(im) module.
5067  */
5068 void
5069 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5070 		struct bfa_s *bfa)
5071 {
5072 	struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5073 	struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5074 	struct bfa_mem_dma_s *seg_ptr;
5075 	u16	nsegs, idx, per_seg_sgpg, num_sgpg;
5076 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
5077 
5078 	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5079 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5080 	else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5081 		cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5082 
5083 	num_sgpg = cfg->drvcfg.num_sgpgs;
5084 
5085 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5086 	per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5087 
5088 	bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5089 		if (num_sgpg >= per_seg_sgpg) {
5090 			num_sgpg -= per_seg_sgpg;
5091 			bfa_mem_dma_setup(minfo, seg_ptr,
5092 					per_seg_sgpg * sgpg_sz);
5093 		} else
5094 			bfa_mem_dma_setup(minfo, seg_ptr,
5095 					num_sgpg * sgpg_sz);
5096 	}
5097 
5098 	/* kva memory */
5099 	bfa_mem_kva_setup(minfo, sgpg_kva,
5100 		cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5101 }
5102 
5103 void
5104 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5105 		struct bfa_pcidev_s *pcidev)
5106 {
5107 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5108 	struct bfa_sgpg_s *hsgpg;
5109 	struct bfi_sgpg_s *sgpg;
5110 	u64 align_len;
5111 	struct bfa_mem_dma_s *seg_ptr;
5112 	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
5113 	u16	i, idx, nsegs, per_seg_sgpg, num_sgpg;
5114 
5115 	union {
5116 		u64 pa;
5117 		union bfi_addr_u addr;
5118 	} sgpg_pa, sgpg_pa_tmp;
5119 
5120 	INIT_LIST_HEAD(&mod->sgpg_q);
5121 	INIT_LIST_HEAD(&mod->sgpg_wait_q);
5122 
5123 	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5124 
5125 	mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5126 
5127 	num_sgpg = cfg->drvcfg.num_sgpgs;
5128 	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5129 
5130 	/* dma/kva mem claim */
5131 	hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5132 
5133 	bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5134 
5135 		if (!bfa_mem_dma_virt(seg_ptr))
5136 			break;
5137 
5138 		align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5139 					     bfa_mem_dma_phys(seg_ptr);
5140 
5141 		sgpg = (struct bfi_sgpg_s *)
5142 			(((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5143 		sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5144 		WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5145 
5146 		per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5147 
5148 		for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5149 			memset(hsgpg, 0, sizeof(*hsgpg));
5150 			memset(sgpg, 0, sizeof(*sgpg));
5151 
5152 			hsgpg->sgpg = sgpg;
5153 			sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5154 			hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5155 			list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5156 
5157 			sgpg++;
5158 			hsgpg++;
5159 			sgpg_pa.pa += sgpg_sz;
5160 		}
5161 	}
5162 
5163 	bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5164 }
5165 
5166 bfa_status_t
5167 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5168 {
5169 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5170 	struct bfa_sgpg_s *hsgpg;
5171 	int i;
5172 
5173 	if (mod->free_sgpgs < nsgpgs)
5174 		return BFA_STATUS_ENOMEM;
5175 
5176 	for (i = 0; i < nsgpgs; i++) {
5177 		bfa_q_deq(&mod->sgpg_q, &hsgpg);
5178 		WARN_ON(!hsgpg);
5179 		list_add_tail(&hsgpg->qe, sgpg_q);
5180 	}
5181 
5182 	mod->free_sgpgs -= nsgpgs;
5183 	return BFA_STATUS_OK;
5184 }
5185 
5186 void
5187 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5188 {
5189 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5190 	struct bfa_sgpg_wqe_s *wqe;
5191 
5192 	mod->free_sgpgs += nsgpg;
5193 	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5194 
5195 	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5196 
5197 	if (list_empty(&mod->sgpg_wait_q))
5198 		return;
5199 
5200 	/*
5201 	 * satisfy as many waiting requests as possible
5202 	 */
5203 	do {
5204 		wqe = bfa_q_first(&mod->sgpg_wait_q);
5205 		if (mod->free_sgpgs < wqe->nsgpg)
5206 			nsgpg = mod->free_sgpgs;
5207 		else
5208 			nsgpg = wqe->nsgpg;
5209 		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5210 		wqe->nsgpg -= nsgpg;
5211 		if (wqe->nsgpg == 0) {
5212 			list_del(&wqe->qe);
5213 			wqe->cbfn(wqe->cbarg);
5214 		}
5215 	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5216 }
5217 
5218 void
5219 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5220 {
5221 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5222 
5223 	WARN_ON(nsgpg <= 0);
5224 	WARN_ON(nsgpg <= mod->free_sgpgs);
5225 
5226 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5227 
5228 	/*
5229 	 * allocate any left to this one first
5230 	 */
5231 	if (mod->free_sgpgs) {
5232 		/*
5233 		 * no one else is waiting for SGPG
5234 		 */
5235 		WARN_ON(!list_empty(&mod->sgpg_wait_q));
5236 		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5237 		wqe->nsgpg -= mod->free_sgpgs;
5238 		mod->free_sgpgs = 0;
5239 	}
5240 
5241 	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5242 }
5243 
5244 void
5245 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5246 {
5247 	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5248 
5249 	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5250 	list_del(&wqe->qe);
5251 
5252 	if (wqe->nsgpg_total != wqe->nsgpg)
5253 		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5254 				   wqe->nsgpg_total - wqe->nsgpg);
5255 }
5256 
5257 void
5258 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5259 		   void *cbarg)
5260 {
5261 	INIT_LIST_HEAD(&wqe->sgpg_q);
5262 	wqe->cbfn = cbfn;
5263 	wqe->cbarg = cbarg;
5264 }
5265 
5266 /*
5267  *  UF related functions
5268  */
5269 /*
5270  *****************************************************************************
5271  * Internal functions
5272  *****************************************************************************
5273  */
5274 static void
5275 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5276 {
5277 	struct bfa_uf_s   *uf = cbarg;
5278 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5279 
5280 	if (complete)
5281 		ufm->ufrecv(ufm->cbarg, uf);
5282 }
5283 
5284 static void
5285 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5286 {
5287 	struct bfi_uf_buf_post_s *uf_bp_msg;
5288 	u16 i;
5289 	u16 buf_len;
5290 
5291 	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5292 	uf_bp_msg = ufm->uf_buf_posts;
5293 
5294 	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5295 	     i++, uf_bp_msg++) {
5296 		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5297 
5298 		uf_bp_msg->buf_tag = i;
5299 		buf_len = sizeof(struct bfa_uf_buf_s);
5300 		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5301 		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5302 			    bfa_fn_lpu(ufm->bfa));
5303 		bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5304 	}
5305 
5306 	/*
5307 	 * advance pointer beyond consumed memory
5308 	 */
5309 	bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5310 }
5311 
5312 static void
5313 claim_ufs(struct bfa_uf_mod_s *ufm)
5314 {
5315 	u16 i;
5316 	struct bfa_uf_s   *uf;
5317 
5318 	/*
5319 	 * Claim block of memory for UF list
5320 	 */
5321 	ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5322 
5323 	/*
5324 	 * Initialize UFs and queue it in UF free queue
5325 	 */
5326 	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5327 		memset(uf, 0, sizeof(struct bfa_uf_s));
5328 		uf->bfa = ufm->bfa;
5329 		uf->uf_tag = i;
5330 		uf->pb_len = BFA_PER_UF_DMA_SZ;
5331 		uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5332 		uf->buf_pa = ufm_pbs_pa(ufm, i);
5333 		list_add_tail(&uf->qe, &ufm->uf_free_q);
5334 	}
5335 
5336 	/*
5337 	 * advance memory pointer
5338 	 */
5339 	bfa_mem_kva_curp(ufm) = (u8 *) uf;
5340 }
5341 
5342 static void
5343 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5344 {
5345 	claim_ufs(ufm);
5346 	claim_uf_post_msgs(ufm);
5347 }
5348 
5349 void
5350 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5351 		struct bfa_s *bfa)
5352 {
5353 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5354 	struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5355 	u32	num_ufs = cfg->fwcfg.num_uf_bufs;
5356 	struct bfa_mem_dma_s *seg_ptr;
5357 	u16	nsegs, idx, per_seg_uf = 0;
5358 
5359 	nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5360 	per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5361 
5362 	bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5363 		if (num_ufs >= per_seg_uf) {
5364 			num_ufs -= per_seg_uf;
5365 			bfa_mem_dma_setup(minfo, seg_ptr,
5366 				per_seg_uf * BFA_PER_UF_DMA_SZ);
5367 		} else
5368 			bfa_mem_dma_setup(minfo, seg_ptr,
5369 				num_ufs * BFA_PER_UF_DMA_SZ);
5370 	}
5371 
5372 	/* kva memory */
5373 	bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5374 		(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5375 }
5376 
5377 void
5378 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5379 		struct bfa_pcidev_s *pcidev)
5380 {
5381 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5382 
5383 	ufm->bfa = bfa;
5384 	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5385 	INIT_LIST_HEAD(&ufm->uf_free_q);
5386 	INIT_LIST_HEAD(&ufm->uf_posted_q);
5387 	INIT_LIST_HEAD(&ufm->uf_unused_q);
5388 
5389 	uf_mem_claim(ufm);
5390 }
5391 
5392 static struct bfa_uf_s *
5393 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5394 {
5395 	struct bfa_uf_s   *uf;
5396 
5397 	bfa_q_deq(&uf_mod->uf_free_q, &uf);
5398 	return uf;
5399 }
5400 
5401 static void
5402 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5403 {
5404 	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5405 }
5406 
5407 static bfa_status_t
5408 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5409 {
5410 	struct bfi_uf_buf_post_s *uf_post_msg;
5411 
5412 	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5413 	if (!uf_post_msg)
5414 		return BFA_STATUS_FAILED;
5415 
5416 	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5417 		      sizeof(struct bfi_uf_buf_post_s));
5418 	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5419 
5420 	bfa_trc(ufm->bfa, uf->uf_tag);
5421 
5422 	list_add_tail(&uf->qe, &ufm->uf_posted_q);
5423 	return BFA_STATUS_OK;
5424 }
5425 
5426 static void
5427 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5428 {
5429 	struct bfa_uf_s   *uf;
5430 
5431 	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5432 		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5433 			break;
5434 	}
5435 }
5436 
5437 static void
5438 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5439 {
5440 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5441 	u16 uf_tag = m->buf_tag;
5442 	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5443 	struct bfa_uf_buf_s *uf_buf;
5444 	uint8_t *buf;
5445 
5446 	uf_buf = (struct bfa_uf_buf_s *)
5447 			bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5448 	buf = &uf_buf->d[0];
5449 
5450 	m->frm_len = be16_to_cpu(m->frm_len);
5451 	m->xfr_len = be16_to_cpu(m->xfr_len);
5452 
5453 	list_del(&uf->qe);	/* dequeue from posted queue */
5454 
5455 	uf->data_ptr = buf;
5456 	uf->data_len = m->xfr_len;
5457 
5458 	WARN_ON(uf->data_len < sizeof(struct fchs_s));
5459 
5460 	if (uf->data_len == sizeof(struct fchs_s)) {
5461 		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5462 			       uf->data_len, (struct fchs_s *)buf);
5463 	} else {
5464 		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5465 		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5466 				      BFA_PL_EID_RX, uf->data_len,
5467 				      (struct fchs_s *)buf, pld_w0);
5468 	}
5469 
5470 	if (bfa->fcs)
5471 		__bfa_cb_uf_recv(uf, BFA_TRUE);
5472 	else
5473 		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5474 }
5475 
5476 void
5477 bfa_uf_start(struct bfa_s *bfa)
5478 {
5479 	bfa_uf_post_all(BFA_UF_MOD(bfa));
5480 }
5481 
5482 /*
5483  * Register handler for all unsolicted receive frames.
5484  *
5485  * @param[in]	bfa		BFA instance
5486  * @param[in]	ufrecv	receive handler function
5487  * @param[in]	cbarg	receive handler arg
5488  */
5489 void
5490 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5491 {
5492 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5493 
5494 	ufm->ufrecv = ufrecv;
5495 	ufm->cbarg = cbarg;
5496 }
5497 
5498 /*
5499  *	Free an unsolicited frame back to BFA.
5500  *
5501  * @param[in]		uf		unsolicited frame to be freed
5502  *
5503  * @return None
5504  */
5505 void
5506 bfa_uf_free(struct bfa_uf_s *uf)
5507 {
5508 	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5509 	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5510 }
5511 
5512 
5513 
5514 /*
5515  *  uf_pub BFA uf module public functions
5516  */
5517 void
5518 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5519 {
5520 	bfa_trc(bfa, msg->mhdr.msg_id);
5521 
5522 	switch (msg->mhdr.msg_id) {
5523 	case BFI_UF_I2H_FRM_RCVD:
5524 		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5525 		break;
5526 
5527 	default:
5528 		bfa_trc(bfa, msg->mhdr.msg_id);
5529 		WARN_ON(1);
5530 	}
5531 }
5532 
5533 void
5534 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5535 {
5536 	struct bfa_uf_mod_s	*mod = BFA_UF_MOD(bfa);
5537 	struct list_head	*qe;
5538 	int	i;
5539 
5540 	for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5541 		bfa_q_deq_tail(&mod->uf_free_q, &qe);
5542 		list_add_tail(qe, &mod->uf_unused_q);
5543 	}
5544 }
5545 
5546 /*
5547  *	Dport forward declaration
5548  */
5549 
5550 enum bfa_dport_test_state_e {
5551 	BFA_DPORT_ST_DISABLED	= 0,	/*!< dport is disabled */
5552 	BFA_DPORT_ST_INP	= 1,	/*!< test in progress */
5553 	BFA_DPORT_ST_COMP	= 2,	/*!< test complete successfully */
5554 	BFA_DPORT_ST_NO_SFP	= 3,	/*!< sfp is not present */
5555 	BFA_DPORT_ST_NOTSTART	= 4,	/*!< test not start dport is enabled */
5556 };
5557 
5558 static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5559 				  enum bfa_dport_sm_event event);
5560 static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5561 				  enum bfa_dport_sm_event event);
5562 static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5563 				  enum bfa_dport_sm_event event);
5564 static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5565 				 enum bfa_dport_sm_event event);
5566 static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5567 				 enum bfa_dport_sm_event event);
5568 static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5569 				   enum bfa_dport_sm_event event);
5570 static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5571 					enum bfa_dport_sm_event event);
5572 static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5573 				  enum bfa_dport_sm_event event);
5574 static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5575 				   enum bfa_dport_sm_event event);
5576 static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5577 				   enum bfa_dport_sm_event event);
5578 static void bfa_dport_qresume(void *cbarg);
5579 static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5580 				struct bfi_diag_dport_rsp_s *msg);
5581 static void bfa_dport_scn(struct bfa_dport_s *dport,
5582 				struct bfi_diag_dport_scn_s *msg);
5583 
5584 /*
5585  *	BFA fcdiag module
5586  */
5587 #define BFA_DIAG_QTEST_TOV	1000    /* msec */
5588 
5589 /*
5590  *	Set port status to busy
5591  */
5592 static void
5593 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5594 {
5595 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5596 
5597 	if (fcdiag->lb.lock)
5598 		fcport->diag_busy = BFA_TRUE;
5599 	else
5600 		fcport->diag_busy = BFA_FALSE;
5601 }
5602 
5603 void
5604 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5605 		struct bfa_pcidev_s *pcidev)
5606 {
5607 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5608 	struct bfa_dport_s  *dport = &fcdiag->dport;
5609 
5610 	fcdiag->bfa             = bfa;
5611 	fcdiag->trcmod  = bfa->trcmod;
5612 	/* The common DIAG attach bfa_diag_attach() will do all memory claim */
5613 	dport->bfa = bfa;
5614 	bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5615 	bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5616 	dport->cbfn = NULL;
5617 	dport->cbarg = NULL;
5618 	dport->test_state = BFA_DPORT_ST_DISABLED;
5619 	memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5620 }
5621 
5622 void
5623 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5624 {
5625 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5626 	struct bfa_dport_s *dport = &fcdiag->dport;
5627 
5628 	bfa_trc(fcdiag, fcdiag->lb.lock);
5629 	if (fcdiag->lb.lock) {
5630 		fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5631 		fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5632 		fcdiag->lb.lock = 0;
5633 		bfa_fcdiag_set_busy_status(fcdiag);
5634 	}
5635 
5636 	bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5637 }
5638 
5639 static void
5640 bfa_fcdiag_queuetest_timeout(void *cbarg)
5641 {
5642 	struct bfa_fcdiag_s       *fcdiag = cbarg;
5643 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5644 
5645 	bfa_trc(fcdiag, fcdiag->qtest.all);
5646 	bfa_trc(fcdiag, fcdiag->qtest.count);
5647 
5648 	fcdiag->qtest.timer_active = 0;
5649 
5650 	res->status = BFA_STATUS_ETIMER;
5651 	res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5652 	if (fcdiag->qtest.all)
5653 		res->queue  = fcdiag->qtest.all;
5654 
5655 	bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5656 	fcdiag->qtest.status = BFA_STATUS_ETIMER;
5657 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5658 	fcdiag->qtest.lock = 0;
5659 }
5660 
5661 static bfa_status_t
5662 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5663 {
5664 	u32	i;
5665 	struct bfi_diag_qtest_req_s *req;
5666 
5667 	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5668 	if (!req)
5669 		return BFA_STATUS_DEVBUSY;
5670 
5671 	/* build host command */
5672 	bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5673 		bfa_fn_lpu(fcdiag->bfa));
5674 
5675 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5676 		req->data[i] = QTEST_PAT_DEFAULT;
5677 
5678 	bfa_trc(fcdiag, fcdiag->qtest.queue);
5679 	/* ring door bell */
5680 	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5681 	return BFA_STATUS_OK;
5682 }
5683 
5684 static void
5685 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5686 			bfi_diag_qtest_rsp_t *rsp)
5687 {
5688 	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5689 	bfa_status_t status = BFA_STATUS_OK;
5690 	int i;
5691 
5692 	/* Check timer, should still be active   */
5693 	if (!fcdiag->qtest.timer_active) {
5694 		bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5695 		return;
5696 	}
5697 
5698 	/* update count */
5699 	fcdiag->qtest.count--;
5700 
5701 	/* Check result */
5702 	for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5703 		if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5704 			res->status = BFA_STATUS_DATACORRUPTED;
5705 			break;
5706 		}
5707 	}
5708 
5709 	if (res->status == BFA_STATUS_OK) {
5710 		if (fcdiag->qtest.count > 0) {
5711 			status = bfa_fcdiag_queuetest_send(fcdiag);
5712 			if (status == BFA_STATUS_OK)
5713 				return;
5714 			else
5715 				res->status = status;
5716 		} else if (fcdiag->qtest.all > 0 &&
5717 			fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5718 			fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5719 			fcdiag->qtest.queue++;
5720 			status = bfa_fcdiag_queuetest_send(fcdiag);
5721 			if (status == BFA_STATUS_OK)
5722 				return;
5723 			else
5724 				res->status = status;
5725 		}
5726 	}
5727 
5728 	/* Stop timer when we comp all queue */
5729 	if (fcdiag->qtest.timer_active) {
5730 		bfa_timer_stop(&fcdiag->qtest.timer);
5731 		fcdiag->qtest.timer_active = 0;
5732 	}
5733 	res->queue = fcdiag->qtest.queue;
5734 	res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5735 	bfa_trc(fcdiag, res->count);
5736 	bfa_trc(fcdiag, res->status);
5737 	fcdiag->qtest.status = res->status;
5738 	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5739 	fcdiag->qtest.lock = 0;
5740 }
5741 
5742 static void
5743 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5744 			struct bfi_diag_lb_rsp_s *rsp)
5745 {
5746 	struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5747 
5748 	res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5749 	res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5750 	res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5751 	res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5752 	res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5753 	res->status     = rsp->res.status;
5754 	fcdiag->lb.status = rsp->res.status;
5755 	bfa_trc(fcdiag, fcdiag->lb.status);
5756 	fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5757 	fcdiag->lb.lock = 0;
5758 	bfa_fcdiag_set_busy_status(fcdiag);
5759 }
5760 
5761 static bfa_status_t
5762 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5763 			struct bfa_diag_loopback_s *loopback)
5764 {
5765 	struct bfi_diag_lb_req_s *lb_req;
5766 
5767 	lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5768 	if (!lb_req)
5769 		return BFA_STATUS_DEVBUSY;
5770 
5771 	/* build host command */
5772 	bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5773 		bfa_fn_lpu(fcdiag->bfa));
5774 
5775 	lb_req->lb_mode = loopback->lb_mode;
5776 	lb_req->speed = loopback->speed;
5777 	lb_req->loopcnt = loopback->loopcnt;
5778 	lb_req->pattern = loopback->pattern;
5779 
5780 	/* ring door bell */
5781 	bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5782 
5783 	bfa_trc(fcdiag, loopback->lb_mode);
5784 	bfa_trc(fcdiag, loopback->speed);
5785 	bfa_trc(fcdiag, loopback->loopcnt);
5786 	bfa_trc(fcdiag, loopback->pattern);
5787 	return BFA_STATUS_OK;
5788 }
5789 
5790 /*
5791  *	cpe/rme intr handler
5792  */
5793 void
5794 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5795 {
5796 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5797 
5798 	switch (msg->mhdr.msg_id) {
5799 	case BFI_DIAG_I2H_LOOPBACK:
5800 		bfa_fcdiag_loopback_comp(fcdiag,
5801 				(struct bfi_diag_lb_rsp_s *) msg);
5802 		break;
5803 	case BFI_DIAG_I2H_QTEST:
5804 		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5805 		break;
5806 	case BFI_DIAG_I2H_DPORT:
5807 		bfa_dport_req_comp(&fcdiag->dport,
5808 				(struct bfi_diag_dport_rsp_s *)msg);
5809 		break;
5810 	case BFI_DIAG_I2H_DPORT_SCN:
5811 		bfa_dport_scn(&fcdiag->dport,
5812 				(struct bfi_diag_dport_scn_s *)msg);
5813 		break;
5814 	default:
5815 		bfa_trc(fcdiag, msg->mhdr.msg_id);
5816 		WARN_ON(1);
5817 	}
5818 }
5819 
5820 /*
5821  *	Loopback test
5822  *
5823  *   @param[in] *bfa            - bfa data struct
5824  *   @param[in] opmode          - port operation mode
5825  *   @param[in] speed           - port speed
5826  *   @param[in] lpcnt           - loop count
5827  *   @param[in] pat                     - pattern to build packet
5828  *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5829  *   @param[in] cbfn            - callback function
5830  *   @param[in] cbarg           - callback functioin arg
5831  *
5832  *   @param[out]
5833  */
5834 bfa_status_t
5835 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5836 		enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5837 		struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5838 		void *cbarg)
5839 {
5840 	struct  bfa_diag_loopback_s loopback;
5841 	struct bfa_port_attr_s attr;
5842 	bfa_status_t status;
5843 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5844 
5845 	if (!bfa_iocfc_is_operational(bfa))
5846 		return BFA_STATUS_IOC_NON_OP;
5847 
5848 	/* if port is PBC disabled, return error */
5849 	if (bfa_fcport_is_pbcdisabled(bfa)) {
5850 		bfa_trc(fcdiag, BFA_STATUS_PBC);
5851 		return BFA_STATUS_PBC;
5852 	}
5853 
5854 	if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5855 		bfa_trc(fcdiag, opmode);
5856 		return BFA_STATUS_PORT_NOT_DISABLED;
5857 	}
5858 
5859 	/*
5860 	 * Check if input speed is supported by the port mode
5861 	 */
5862 	if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5863 		if (!(speed == BFA_PORT_SPEED_1GBPS ||
5864 		      speed == BFA_PORT_SPEED_2GBPS ||
5865 		      speed == BFA_PORT_SPEED_4GBPS ||
5866 		      speed == BFA_PORT_SPEED_8GBPS ||
5867 		      speed == BFA_PORT_SPEED_16GBPS ||
5868 		      speed == BFA_PORT_SPEED_AUTO)) {
5869 			bfa_trc(fcdiag, speed);
5870 			return BFA_STATUS_UNSUPP_SPEED;
5871 		}
5872 		bfa_fcport_get_attr(bfa, &attr);
5873 		bfa_trc(fcdiag, attr.speed_supported);
5874 		if (speed > attr.speed_supported)
5875 			return BFA_STATUS_UNSUPP_SPEED;
5876 	} else {
5877 		if (speed != BFA_PORT_SPEED_10GBPS) {
5878 			bfa_trc(fcdiag, speed);
5879 			return BFA_STATUS_UNSUPP_SPEED;
5880 		}
5881 	}
5882 
5883 	/*
5884 	 * For CT2, 1G is not supported
5885 	 */
5886 	if ((speed == BFA_PORT_SPEED_1GBPS) &&
5887 	    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
5888 		bfa_trc(fcdiag, speed);
5889 		return BFA_STATUS_UNSUPP_SPEED;
5890 	}
5891 
5892 	/* For Mezz card, port speed entered needs to be checked */
5893 	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5894 		if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5895 			if (!(speed == BFA_PORT_SPEED_1GBPS ||
5896 			      speed == BFA_PORT_SPEED_2GBPS ||
5897 			      speed == BFA_PORT_SPEED_4GBPS ||
5898 			      speed == BFA_PORT_SPEED_8GBPS ||
5899 			      speed == BFA_PORT_SPEED_16GBPS ||
5900 			      speed == BFA_PORT_SPEED_AUTO))
5901 				return BFA_STATUS_UNSUPP_SPEED;
5902 		} else {
5903 			if (speed != BFA_PORT_SPEED_10GBPS)
5904 				return BFA_STATUS_UNSUPP_SPEED;
5905 		}
5906 	}
5907 	/* check to see if fcport is dport */
5908 	if (bfa_fcport_is_dport(bfa)) {
5909 		bfa_trc(fcdiag, fcdiag->lb.lock);
5910 		return BFA_STATUS_DPORT_ENABLED;
5911 	}
5912 	/* check to see if there is another destructive diag cmd running */
5913 	if (fcdiag->lb.lock) {
5914 		bfa_trc(fcdiag, fcdiag->lb.lock);
5915 		return BFA_STATUS_DEVBUSY;
5916 	}
5917 
5918 	fcdiag->lb.lock = 1;
5919 	loopback.lb_mode = opmode;
5920 	loopback.speed = speed;
5921 	loopback.loopcnt = lpcnt;
5922 	loopback.pattern = pat;
5923 	fcdiag->lb.result = result;
5924 	fcdiag->lb.cbfn = cbfn;
5925 	fcdiag->lb.cbarg = cbarg;
5926 	memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5927 	bfa_fcdiag_set_busy_status(fcdiag);
5928 
5929 	/* Send msg to fw */
5930 	status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5931 	return status;
5932 }
5933 
5934 /*
5935  *	DIAG queue test command
5936  *
5937  *   @param[in] *bfa            - bfa data struct
5938  *   @param[in] force           - 1: don't do ioc op checking
5939  *   @param[in] queue           - queue no. to test
5940  *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
5941  *   @param[in] cbfn            - callback function
5942  *   @param[in] *cbarg          - callback functioin arg
5943  *
5944  *   @param[out]
5945  */
5946 bfa_status_t
5947 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5948 		struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5949 		void *cbarg)
5950 {
5951 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5952 	bfa_status_t status;
5953 	bfa_trc(fcdiag, force);
5954 	bfa_trc(fcdiag, queue);
5955 
5956 	if (!force && !bfa_iocfc_is_operational(bfa))
5957 		return BFA_STATUS_IOC_NON_OP;
5958 
5959 	/* check to see if there is another destructive diag cmd running */
5960 	if (fcdiag->qtest.lock) {
5961 		bfa_trc(fcdiag, fcdiag->qtest.lock);
5962 		return BFA_STATUS_DEVBUSY;
5963 	}
5964 
5965 	/* Initialization */
5966 	fcdiag->qtest.lock = 1;
5967 	fcdiag->qtest.cbfn = cbfn;
5968 	fcdiag->qtest.cbarg = cbarg;
5969 	fcdiag->qtest.result = result;
5970 	fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5971 
5972 	/* Init test results */
5973 	fcdiag->qtest.result->status = BFA_STATUS_OK;
5974 	fcdiag->qtest.result->count  = 0;
5975 
5976 	/* send */
5977 	if (queue < BFI_IOC_MAX_CQS) {
5978 		fcdiag->qtest.result->queue  = (u8)queue;
5979 		fcdiag->qtest.queue = (u8)queue;
5980 		fcdiag->qtest.all   = 0;
5981 	} else {
5982 		fcdiag->qtest.result->queue  = 0;
5983 		fcdiag->qtest.queue = 0;
5984 		fcdiag->qtest.all   = 1;
5985 	}
5986 	status = bfa_fcdiag_queuetest_send(fcdiag);
5987 
5988 	/* Start a timer */
5989 	if (status == BFA_STATUS_OK) {
5990 		bfa_timer_start(bfa, &fcdiag->qtest.timer,
5991 				bfa_fcdiag_queuetest_timeout, fcdiag,
5992 				BFA_DIAG_QTEST_TOV);
5993 		fcdiag->qtest.timer_active = 1;
5994 	}
5995 	return status;
5996 }
5997 
5998 /*
5999  * DIAG PLB is running
6000  *
6001  *   @param[in] *bfa    - bfa data struct
6002  *
6003  *   @param[out]
6004  */
6005 bfa_status_t
6006 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6007 {
6008 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6009 	return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6010 }
6011 
6012 /*
6013  *	D-port
6014  */
6015 #define bfa_dport_result_start(__dport, __mode) do {				\
6016 		(__dport)->result.start_time = ktime_get_real_seconds();	\
6017 		(__dport)->result.status = DPORT_TEST_ST_INPRG;			\
6018 		(__dport)->result.mode = (__mode);				\
6019 		(__dport)->result.rp_pwwn = (__dport)->rp_pwwn;			\
6020 		(__dport)->result.rp_nwwn = (__dport)->rp_nwwn;			\
6021 		(__dport)->result.lpcnt = (__dport)->lpcnt;			\
6022 } while (0)
6023 
6024 static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6025 					enum bfi_dport_req req);
6026 static void
6027 bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6028 {
6029 	if (dport->cbfn != NULL) {
6030 		dport->cbfn(dport->cbarg, bfa_status);
6031 		dport->cbfn = NULL;
6032 		dport->cbarg = NULL;
6033 	}
6034 }
6035 
6036 static void
6037 bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6038 {
6039 	bfa_trc(dport->bfa, event);
6040 
6041 	switch (event) {
6042 	case BFA_DPORT_SM_ENABLE:
6043 		bfa_fcport_dportenable(dport->bfa);
6044 		if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6045 			bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6046 		else
6047 			bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6048 		break;
6049 
6050 	case BFA_DPORT_SM_DISABLE:
6051 		/* Already disabled */
6052 		break;
6053 
6054 	case BFA_DPORT_SM_HWFAIL:
6055 		/* ignore */
6056 		break;
6057 
6058 	case BFA_DPORT_SM_SCN:
6059 		if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
6060 			bfa_fcport_ddportenable(dport->bfa);
6061 			dport->dynamic = BFA_TRUE;
6062 			dport->test_state = BFA_DPORT_ST_NOTSTART;
6063 			bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6064 		} else {
6065 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6066 			WARN_ON(1);
6067 		}
6068 		break;
6069 
6070 	default:
6071 		bfa_sm_fault(dport->bfa, event);
6072 	}
6073 }
6074 
6075 static void
6076 bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6077 			    enum bfa_dport_sm_event event)
6078 {
6079 	bfa_trc(dport->bfa, event);
6080 
6081 	switch (event) {
6082 	case BFA_DPORT_SM_QRESUME:
6083 		bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6084 		bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6085 		break;
6086 
6087 	case BFA_DPORT_SM_HWFAIL:
6088 		bfa_reqq_wcancel(&dport->reqq_wait);
6089 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6090 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6091 		break;
6092 
6093 	default:
6094 		bfa_sm_fault(dport->bfa, event);
6095 	}
6096 }
6097 
6098 static void
6099 bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6100 {
6101 	bfa_trc(dport->bfa, event);
6102 
6103 	switch (event) {
6104 	case BFA_DPORT_SM_FWRSP:
6105 		memset(&dport->result, 0,
6106 				sizeof(struct bfa_diag_dport_result_s));
6107 		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6108 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6109 		} else {
6110 			dport->test_state = BFA_DPORT_ST_INP;
6111 			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6112 		}
6113 		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6114 		break;
6115 
6116 	case BFA_DPORT_SM_REQFAIL:
6117 		dport->test_state = BFA_DPORT_ST_DISABLED;
6118 		bfa_fcport_dportdisable(dport->bfa);
6119 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6120 		break;
6121 
6122 	case BFA_DPORT_SM_HWFAIL:
6123 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6124 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6125 		break;
6126 
6127 	default:
6128 		bfa_sm_fault(dport->bfa, event);
6129 	}
6130 }
6131 
6132 static void
6133 bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6134 {
6135 	bfa_trc(dport->bfa, event);
6136 
6137 	switch (event) {
6138 	case BFA_DPORT_SM_START:
6139 		if (bfa_dport_send_req(dport, BFI_DPORT_START))
6140 			bfa_sm_set_state(dport, bfa_dport_sm_starting);
6141 		else
6142 			bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6143 		break;
6144 
6145 	case BFA_DPORT_SM_DISABLE:
6146 		bfa_fcport_dportdisable(dport->bfa);
6147 		if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6148 			bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6149 		else
6150 			bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6151 		break;
6152 
6153 	case BFA_DPORT_SM_HWFAIL:
6154 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6155 		break;
6156 
6157 	case BFA_DPORT_SM_SCN:
6158 		switch (dport->i2hmsg.scn.state) {
6159 		case BFI_DPORT_SCN_TESTCOMP:
6160 			dport->test_state = BFA_DPORT_ST_COMP;
6161 			break;
6162 
6163 		case BFI_DPORT_SCN_TESTSTART:
6164 			dport->test_state = BFA_DPORT_ST_INP;
6165 			break;
6166 
6167 		case BFI_DPORT_SCN_TESTSKIP:
6168 		case BFI_DPORT_SCN_SUBTESTSTART:
6169 			/* no state change */
6170 			break;
6171 
6172 		case BFI_DPORT_SCN_SFP_REMOVED:
6173 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6174 			break;
6175 
6176 		case BFI_DPORT_SCN_DDPORT_DISABLE:
6177 			bfa_fcport_ddportdisable(dport->bfa);
6178 
6179 			if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6180 				bfa_sm_set_state(dport,
6181 					 bfa_dport_sm_dynamic_disabling);
6182 			else
6183 				bfa_sm_set_state(dport,
6184 					 bfa_dport_sm_dynamic_disabling_qwait);
6185 			break;
6186 
6187 		case BFI_DPORT_SCN_FCPORT_DISABLE:
6188 			bfa_fcport_ddportdisable(dport->bfa);
6189 
6190 			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6191 			dport->dynamic = BFA_FALSE;
6192 			break;
6193 
6194 		default:
6195 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6196 			bfa_sm_fault(dport->bfa, event);
6197 		}
6198 		break;
6199 	default:
6200 		bfa_sm_fault(dport->bfa, event);
6201 	}
6202 }
6203 
6204 static void
6205 bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6206 			     enum bfa_dport_sm_event event)
6207 {
6208 	bfa_trc(dport->bfa, event);
6209 
6210 	switch (event) {
6211 	case BFA_DPORT_SM_QRESUME:
6212 		bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6213 		bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6214 		break;
6215 
6216 	case BFA_DPORT_SM_HWFAIL:
6217 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6218 		bfa_reqq_wcancel(&dport->reqq_wait);
6219 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6220 		break;
6221 
6222 	case BFA_DPORT_SM_SCN:
6223 		/* ignore */
6224 		break;
6225 
6226 	default:
6227 		bfa_sm_fault(dport->bfa, event);
6228 	}
6229 }
6230 
6231 static void
6232 bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6233 {
6234 	bfa_trc(dport->bfa, event);
6235 
6236 	switch (event) {
6237 	case BFA_DPORT_SM_FWRSP:
6238 		dport->test_state = BFA_DPORT_ST_DISABLED;
6239 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6240 		break;
6241 
6242 	case BFA_DPORT_SM_HWFAIL:
6243 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6244 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6245 		break;
6246 
6247 	case BFA_DPORT_SM_SCN:
6248 		/* no state change */
6249 		break;
6250 
6251 	default:
6252 		bfa_sm_fault(dport->bfa, event);
6253 	}
6254 }
6255 
6256 static void
6257 bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6258 			    enum bfa_dport_sm_event event)
6259 {
6260 	bfa_trc(dport->bfa, event);
6261 
6262 	switch (event) {
6263 	case BFA_DPORT_SM_QRESUME:
6264 		bfa_sm_set_state(dport, bfa_dport_sm_starting);
6265 		bfa_dport_send_req(dport, BFI_DPORT_START);
6266 		break;
6267 
6268 	case BFA_DPORT_SM_HWFAIL:
6269 		bfa_reqq_wcancel(&dport->reqq_wait);
6270 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6271 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6272 		break;
6273 
6274 	default:
6275 		bfa_sm_fault(dport->bfa, event);
6276 	}
6277 }
6278 
6279 static void
6280 bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6281 {
6282 	bfa_trc(dport->bfa, event);
6283 
6284 	switch (event) {
6285 	case BFA_DPORT_SM_FWRSP:
6286 		memset(&dport->result, 0,
6287 				sizeof(struct bfa_diag_dport_result_s));
6288 		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6289 			dport->test_state = BFA_DPORT_ST_NO_SFP;
6290 		} else {
6291 			dport->test_state = BFA_DPORT_ST_INP;
6292 			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6293 		}
6294 		fallthrough;
6295 
6296 	case BFA_DPORT_SM_REQFAIL:
6297 		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6298 		break;
6299 
6300 	case BFA_DPORT_SM_HWFAIL:
6301 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6302 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6303 		break;
6304 
6305 	default:
6306 		bfa_sm_fault(dport->bfa, event);
6307 	}
6308 }
6309 
6310 static void
6311 bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6312 			       enum bfa_dport_sm_event event)
6313 {
6314 	bfa_trc(dport->bfa, event);
6315 
6316 	switch (event) {
6317 	case BFA_DPORT_SM_SCN:
6318 		switch (dport->i2hmsg.scn.state) {
6319 		case BFI_DPORT_SCN_DDPORT_DISABLED:
6320 			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6321 			dport->dynamic = BFA_FALSE;
6322 			bfa_fcport_enable(dport->bfa);
6323 			break;
6324 
6325 		default:
6326 			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6327 			bfa_sm_fault(dport->bfa, event);
6328 
6329 		}
6330 		break;
6331 
6332 	case BFA_DPORT_SM_HWFAIL:
6333 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6334 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6335 		break;
6336 
6337 	default:
6338 		bfa_sm_fault(dport->bfa, event);
6339 	}
6340 }
6341 
6342 static void
6343 bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6344 			    enum bfa_dport_sm_event event)
6345 {
6346 	bfa_trc(dport->bfa, event);
6347 
6348 	switch (event) {
6349 	case BFA_DPORT_SM_QRESUME:
6350 		bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6351 		bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6352 		break;
6353 
6354 	case BFA_DPORT_SM_HWFAIL:
6355 		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6356 		bfa_reqq_wcancel(&dport->reqq_wait);
6357 		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6358 		break;
6359 
6360 	case BFA_DPORT_SM_SCN:
6361 		/* ignore */
6362 		break;
6363 
6364 	default:
6365 		bfa_sm_fault(dport->bfa, event);
6366 	}
6367 }
6368 
6369 static bfa_boolean_t
6370 bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6371 {
6372 	struct bfi_diag_dport_req_s *m;
6373 
6374 	/*
6375 	 * check for room in queue to send request now
6376 	 */
6377 	m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6378 	if (!m) {
6379 		bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6380 		return BFA_FALSE;
6381 	}
6382 
6383 	bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6384 		    bfa_fn_lpu(dport->bfa));
6385 	m->req  = req;
6386 	if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6387 		m->lpcnt = cpu_to_be32(dport->lpcnt);
6388 		m->payload = cpu_to_be32(dport->payload);
6389 	}
6390 
6391 	/*
6392 	 * queue I/O message to firmware
6393 	 */
6394 	bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6395 
6396 	return BFA_TRUE;
6397 }
6398 
6399 static void
6400 bfa_dport_qresume(void *cbarg)
6401 {
6402 	struct bfa_dport_s *dport = cbarg;
6403 
6404 	bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6405 }
6406 
6407 static void
6408 bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6409 {
6410 	msg->status = cpu_to_be32(msg->status);
6411 	dport->i2hmsg.rsp.status = msg->status;
6412 	dport->rp_pwwn = msg->pwwn;
6413 	dport->rp_nwwn = msg->nwwn;
6414 
6415 	if ((msg->status == BFA_STATUS_OK) ||
6416 	    (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6417 		bfa_trc(dport->bfa, msg->status);
6418 		bfa_trc(dport->bfa, dport->rp_pwwn);
6419 		bfa_trc(dport->bfa, dport->rp_nwwn);
6420 		bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6421 
6422 	} else {
6423 		bfa_trc(dport->bfa, msg->status);
6424 		bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6425 	}
6426 	bfa_cb_fcdiag_dport(dport, msg->status);
6427 }
6428 
6429 static bfa_boolean_t
6430 bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6431 {
6432 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)	||
6433 	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6434 	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)	||
6435 	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6436 	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting)	||
6437 	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6438 		return BFA_TRUE;
6439 	} else {
6440 		return BFA_FALSE;
6441 	}
6442 }
6443 
6444 static void
6445 bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6446 {
6447 	int i;
6448 	uint8_t subtesttype;
6449 
6450 	bfa_trc(dport->bfa, msg->state);
6451 	dport->i2hmsg.scn.state = msg->state;
6452 
6453 	switch (dport->i2hmsg.scn.state) {
6454 	case BFI_DPORT_SCN_TESTCOMP:
6455 		dport->result.end_time = ktime_get_real_seconds();
6456 		bfa_trc(dport->bfa, dport->result.end_time);
6457 
6458 		dport->result.status = msg->info.testcomp.status;
6459 		bfa_trc(dport->bfa, dport->result.status);
6460 
6461 		dport->result.roundtrip_latency =
6462 			cpu_to_be32(msg->info.testcomp.latency);
6463 		dport->result.est_cable_distance =
6464 			cpu_to_be32(msg->info.testcomp.distance);
6465 		dport->result.buffer_required =
6466 			be16_to_cpu(msg->info.testcomp.numbuffer);
6467 
6468 		dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6469 		dport->result.speed = msg->info.testcomp.speed;
6470 
6471 		bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6472 		bfa_trc(dport->bfa, dport->result.est_cable_distance);
6473 		bfa_trc(dport->bfa, dport->result.buffer_required);
6474 		bfa_trc(dport->bfa, dport->result.frmsz);
6475 		bfa_trc(dport->bfa, dport->result.speed);
6476 
6477 		for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6478 			dport->result.subtest[i].status =
6479 				msg->info.testcomp.subtest_status[i];
6480 			bfa_trc(dport->bfa, dport->result.subtest[i].status);
6481 		}
6482 		break;
6483 
6484 	case BFI_DPORT_SCN_TESTSKIP:
6485 	case BFI_DPORT_SCN_DDPORT_ENABLE:
6486 		memset(&dport->result, 0,
6487 				sizeof(struct bfa_diag_dport_result_s));
6488 		break;
6489 
6490 	case BFI_DPORT_SCN_TESTSTART:
6491 		memset(&dport->result, 0,
6492 				sizeof(struct bfa_diag_dport_result_s));
6493 		dport->rp_pwwn = msg->info.teststart.pwwn;
6494 		dport->rp_nwwn = msg->info.teststart.nwwn;
6495 		dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6496 		bfa_dport_result_start(dport, msg->info.teststart.mode);
6497 		break;
6498 
6499 	case BFI_DPORT_SCN_SUBTESTSTART:
6500 		subtesttype = msg->info.teststart.type;
6501 		dport->result.subtest[subtesttype].start_time =
6502 			ktime_get_real_seconds();
6503 		dport->result.subtest[subtesttype].status =
6504 			DPORT_TEST_ST_INPRG;
6505 
6506 		bfa_trc(dport->bfa, subtesttype);
6507 		bfa_trc(dport->bfa,
6508 			dport->result.subtest[subtesttype].start_time);
6509 		break;
6510 
6511 	case BFI_DPORT_SCN_SFP_REMOVED:
6512 	case BFI_DPORT_SCN_DDPORT_DISABLED:
6513 	case BFI_DPORT_SCN_DDPORT_DISABLE:
6514 	case BFI_DPORT_SCN_FCPORT_DISABLE:
6515 		dport->result.status = DPORT_TEST_ST_IDLE;
6516 		break;
6517 
6518 	default:
6519 		bfa_sm_fault(dport->bfa, msg->state);
6520 	}
6521 
6522 	bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6523 }
6524 
6525 /*
6526  * Dport enable
6527  *
6528  * @param[in] *bfa            - bfa data struct
6529  */
6530 bfa_status_t
6531 bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6532 				bfa_cb_diag_t cbfn, void *cbarg)
6533 {
6534 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6535 	struct bfa_dport_s  *dport = &fcdiag->dport;
6536 
6537 	/*
6538 	 * Dport is not support in MEZZ card
6539 	 */
6540 	if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6541 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6542 		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6543 	}
6544 
6545 	/*
6546 	 * Dport is supported in CT2 or above
6547 	 */
6548 	if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6549 		bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6550 		return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6551 	}
6552 
6553 	/*
6554 	 * Check to see if IOC is down
6555 	*/
6556 	if (!bfa_iocfc_is_operational(bfa))
6557 		return BFA_STATUS_IOC_NON_OP;
6558 
6559 	/* if port is PBC disabled, return error */
6560 	if (bfa_fcport_is_pbcdisabled(bfa)) {
6561 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6562 		return BFA_STATUS_PBC;
6563 	}
6564 
6565 	/*
6566 	 * Check if port mode is FC port
6567 	 */
6568 	if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6569 		bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6570 		return BFA_STATUS_CMD_NOTSUPP_CNA;
6571 	}
6572 
6573 	/*
6574 	 * Check if port is in LOOP mode
6575 	 */
6576 	if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6577 	    (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6578 		bfa_trc(dport->bfa, 0);
6579 		return BFA_STATUS_TOPOLOGY_LOOP;
6580 	}
6581 
6582 	/*
6583 	 * Check if port is TRUNK mode
6584 	 */
6585 	if (bfa_fcport_is_trunk_enabled(bfa)) {
6586 		bfa_trc(dport->bfa, 0);
6587 		return BFA_STATUS_ERROR_TRUNK_ENABLED;
6588 	}
6589 
6590 	/*
6591 	 * Check if diag loopback is running
6592 	 */
6593 	if (bfa_fcdiag_lb_is_running(bfa)) {
6594 		bfa_trc(dport->bfa, 0);
6595 		return BFA_STATUS_DIAG_BUSY;
6596 	}
6597 
6598 	/*
6599 	 * Check to see if port is disable or in dport state
6600 	 */
6601 	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6602 	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6603 		bfa_trc(dport->bfa, 0);
6604 		return BFA_STATUS_PORT_NOT_DISABLED;
6605 	}
6606 
6607 	/*
6608 	 * Check if dport is in dynamic mode
6609 	 */
6610 	if (dport->dynamic)
6611 		return BFA_STATUS_DDPORT_ERR;
6612 
6613 	/*
6614 	 * Check if dport is busy
6615 	 */
6616 	if (bfa_dport_is_sending_req(dport))
6617 		return BFA_STATUS_DEVBUSY;
6618 
6619 	/*
6620 	 * Check if dport is already enabled
6621 	 */
6622 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6623 		bfa_trc(dport->bfa, 0);
6624 		return BFA_STATUS_DPORT_ENABLED;
6625 	}
6626 
6627 	bfa_trc(dport->bfa, lpcnt);
6628 	bfa_trc(dport->bfa, pat);
6629 	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6630 	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6631 	dport->cbfn = cbfn;
6632 	dport->cbarg = cbarg;
6633 
6634 	bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6635 	return BFA_STATUS_OK;
6636 }
6637 
6638 /*
6639  *	Dport disable
6640  *
6641  *	@param[in] *bfa            - bfa data struct
6642  */
6643 bfa_status_t
6644 bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6645 {
6646 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6647 	struct bfa_dport_s *dport = &fcdiag->dport;
6648 
6649 	if (bfa_ioc_is_disabled(&bfa->ioc))
6650 		return BFA_STATUS_IOC_DISABLED;
6651 
6652 	/* if port is PBC disabled, return error */
6653 	if (bfa_fcport_is_pbcdisabled(bfa)) {
6654 		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6655 		return BFA_STATUS_PBC;
6656 	}
6657 
6658 	/*
6659 	 * Check if dport is in dynamic mode
6660 	 */
6661 	if (dport->dynamic) {
6662 		return BFA_STATUS_DDPORT_ERR;
6663 	}
6664 
6665 	/*
6666 	 * Check to see if port is disable or in dport state
6667 	 */
6668 	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6669 	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6670 		bfa_trc(dport->bfa, 0);
6671 		return BFA_STATUS_PORT_NOT_DISABLED;
6672 	}
6673 
6674 	/*
6675 	 * Check if dport is busy
6676 	 */
6677 	if (bfa_dport_is_sending_req(dport))
6678 		return BFA_STATUS_DEVBUSY;
6679 
6680 	/*
6681 	 * Check if dport is already disabled
6682 	 */
6683 	if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6684 		bfa_trc(dport->bfa, 0);
6685 		return BFA_STATUS_DPORT_DISABLED;
6686 	}
6687 
6688 	dport->cbfn = cbfn;
6689 	dport->cbarg = cbarg;
6690 
6691 	bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6692 	return BFA_STATUS_OK;
6693 }
6694 
6695 /*
6696  * Dport start -- restart dport test
6697  *
6698  *   @param[in] *bfa		- bfa data struct
6699  */
6700 bfa_status_t
6701 bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6702 			bfa_cb_diag_t cbfn, void *cbarg)
6703 {
6704 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6705 	struct bfa_dport_s *dport = &fcdiag->dport;
6706 
6707 	/*
6708 	 * Check to see if IOC is down
6709 	 */
6710 	if (!bfa_iocfc_is_operational(bfa))
6711 		return BFA_STATUS_IOC_NON_OP;
6712 
6713 	/*
6714 	 * Check if dport is in dynamic mode
6715 	 */
6716 	if (dport->dynamic)
6717 		return BFA_STATUS_DDPORT_ERR;
6718 
6719 	/*
6720 	 * Check if dport is busy
6721 	 */
6722 	if (bfa_dport_is_sending_req(dport))
6723 		return BFA_STATUS_DEVBUSY;
6724 
6725 	/*
6726 	 * Check if dport is in enabled state.
6727 	 * Test can only be restart when previous test has completed
6728 	 */
6729 	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6730 		bfa_trc(dport->bfa, 0);
6731 		return BFA_STATUS_DPORT_DISABLED;
6732 
6733 	} else {
6734 		if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6735 			return BFA_STATUS_DPORT_INV_SFP;
6736 
6737 		if (dport->test_state == BFA_DPORT_ST_INP)
6738 			return BFA_STATUS_DEVBUSY;
6739 
6740 		WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
6741 	}
6742 
6743 	bfa_trc(dport->bfa, lpcnt);
6744 	bfa_trc(dport->bfa, pat);
6745 
6746 	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6747 	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6748 
6749 	dport->cbfn = cbfn;
6750 	dport->cbarg = cbarg;
6751 
6752 	bfa_sm_send_event(dport, BFA_DPORT_SM_START);
6753 	return BFA_STATUS_OK;
6754 }
6755 
6756 /*
6757  * Dport show -- return dport test result
6758  *
6759  *   @param[in] *bfa		- bfa data struct
6760  */
6761 bfa_status_t
6762 bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
6763 {
6764 	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6765 	struct bfa_dport_s *dport = &fcdiag->dport;
6766 
6767 	/*
6768 	 * Check to see if IOC is down
6769 	 */
6770 	if (!bfa_iocfc_is_operational(bfa))
6771 		return BFA_STATUS_IOC_NON_OP;
6772 
6773 	/*
6774 	 * Check if dport is busy
6775 	 */
6776 	if (bfa_dport_is_sending_req(dport))
6777 		return BFA_STATUS_DEVBUSY;
6778 
6779 	/*
6780 	 * Check if dport is in enabled state.
6781 	 */
6782 	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6783 		bfa_trc(dport->bfa, 0);
6784 		return BFA_STATUS_DPORT_DISABLED;
6785 
6786 	}
6787 
6788 	/*
6789 	 * Check if there is SFP
6790 	 */
6791 	if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6792 		return BFA_STATUS_DPORT_INV_SFP;
6793 
6794 	memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
6795 
6796 	return BFA_STATUS_OK;
6797 }
6798