xref: /linux/drivers/scsi/bfa/bfad_bsg.c (revision 497e6b37b0099dc415578488287fd84fb74433eb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4  * Copyright (c) 2014- QLogic Corporation.
5  * All rights reserved
6  * www.qlogic.com
7  *
8  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9  */
10 
11 #include <linux/uaccess.h>
12 #include "bfad_drv.h"
13 #include "bfad_im.h"
14 #include "bfad_bsg.h"
15 
16 BFA_TRC_FILE(LDRV, BSG);
17 
18 static int
19 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
20 {
21 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
22 	unsigned long	flags;
23 
24 	spin_lock_irqsave(&bfad->bfad_lock, flags);
25 	/* If IOC is not in disabled state - return */
26 	if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
27 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
28 		iocmd->status = BFA_STATUS_OK;
29 		return 0;
30 	}
31 
32 	init_completion(&bfad->enable_comp);
33 	bfa_iocfc_enable(&bfad->bfa);
34 	iocmd->status = BFA_STATUS_OK;
35 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 	wait_for_completion(&bfad->enable_comp);
37 
38 	return 0;
39 }
40 
41 static int
42 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
43 {
44 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
45 	unsigned long	flags;
46 
47 	spin_lock_irqsave(&bfad->bfad_lock, flags);
48 	if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
49 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
50 		iocmd->status = BFA_STATUS_OK;
51 		return 0;
52 	}
53 
54 	if (bfad->disable_active) {
55 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
56 		return -EBUSY;
57 	}
58 
59 	bfad->disable_active = BFA_TRUE;
60 	init_completion(&bfad->disable_comp);
61 	bfa_iocfc_disable(&bfad->bfa);
62 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
63 
64 	wait_for_completion(&bfad->disable_comp);
65 	bfad->disable_active = BFA_FALSE;
66 	iocmd->status = BFA_STATUS_OK;
67 
68 	return 0;
69 }
70 
71 static int
72 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
73 {
74 	int	i;
75 	struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
76 	struct bfad_im_port_s	*im_port;
77 	struct bfa_port_attr_s	pattr;
78 	unsigned long	flags;
79 
80 	spin_lock_irqsave(&bfad->bfad_lock, flags);
81 	bfa_fcport_get_attr(&bfad->bfa, &pattr);
82 	iocmd->nwwn = pattr.nwwn;
83 	iocmd->pwwn = pattr.pwwn;
84 	iocmd->ioc_type = bfa_get_type(&bfad->bfa);
85 	iocmd->mac = bfa_get_mac(&bfad->bfa);
86 	iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
87 	bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
88 	iocmd->factorynwwn = pattr.factorynwwn;
89 	iocmd->factorypwwn = pattr.factorypwwn;
90 	iocmd->bfad_num = bfad->inst_no;
91 	im_port = bfad->pport.im_port;
92 	iocmd->host = im_port->shost->host_no;
93 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
94 
95 	strcpy(iocmd->name, bfad->adapter_name);
96 	strcpy(iocmd->port_name, bfad->port_name);
97 	strcpy(iocmd->hwpath, bfad->pci_name);
98 
99 	/* set adapter hw path */
100 	strcpy(iocmd->adapter_hwpath, bfad->pci_name);
101 	for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
102 		;
103 	for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
104 		;
105 	iocmd->adapter_hwpath[i] = '\0';
106 	iocmd->status = BFA_STATUS_OK;
107 	return 0;
108 }
109 
110 static int
111 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
112 {
113 	struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
114 	unsigned long	flags;
115 
116 	spin_lock_irqsave(&bfad->bfad_lock, flags);
117 	bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
118 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
119 
120 	/* fill in driver attr info */
121 	strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
122 	strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
123 		BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
124 	strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
125 		iocmd->ioc_attr.adapter_attr.fw_ver);
126 	strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
127 		iocmd->ioc_attr.adapter_attr.optrom_ver);
128 
129 	/* copy chip rev info first otherwise it will be overwritten */
130 	memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
131 		sizeof(bfad->pci_attr.chip_rev));
132 	memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
133 		sizeof(struct bfa_ioc_pci_attr_s));
134 
135 	iocmd->status = BFA_STATUS_OK;
136 	return 0;
137 }
138 
139 static int
140 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
141 {
142 	struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
143 
144 	bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
145 	iocmd->status = BFA_STATUS_OK;
146 	return 0;
147 }
148 
149 static int
150 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
151 			unsigned int payload_len)
152 {
153 	struct bfa_bsg_ioc_fwstats_s *iocmd =
154 			(struct bfa_bsg_ioc_fwstats_s *)cmd;
155 	void	*iocmd_bufptr;
156 	unsigned long	flags;
157 
158 	if (bfad_chk_iocmd_sz(payload_len,
159 			sizeof(struct bfa_bsg_ioc_fwstats_s),
160 			sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
161 		iocmd->status = BFA_STATUS_VERSION_FAIL;
162 		goto out;
163 	}
164 
165 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
166 	spin_lock_irqsave(&bfad->bfad_lock, flags);
167 	iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
168 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
169 
170 	if (iocmd->status != BFA_STATUS_OK) {
171 		bfa_trc(bfad, iocmd->status);
172 		goto out;
173 	}
174 out:
175 	bfa_trc(bfad, 0x6666);
176 	return 0;
177 }
178 
179 static int
180 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
181 {
182 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
183 	unsigned long	flags;
184 
185 	if (v_cmd == IOCMD_IOC_RESET_STATS) {
186 		bfa_ioc_clear_stats(&bfad->bfa);
187 		iocmd->status = BFA_STATUS_OK;
188 	} else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
189 		spin_lock_irqsave(&bfad->bfad_lock, flags);
190 		iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
191 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
192 	}
193 
194 	return 0;
195 }
196 
197 static int
198 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
199 {
200 	struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
201 
202 	if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
203 		strcpy(bfad->adapter_name, iocmd->name);
204 	else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
205 		strcpy(bfad->port_name, iocmd->name);
206 
207 	iocmd->status = BFA_STATUS_OK;
208 	return 0;
209 }
210 
211 static int
212 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
213 {
214 	struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
215 
216 	iocmd->status = BFA_STATUS_OK;
217 	bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
218 
219 	return 0;
220 }
221 
222 static int
223 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
224 {
225 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
226 	unsigned long flags;
227 
228 	spin_lock_irqsave(&bfad->bfad_lock, flags);
229 	iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc);
230 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
231 	return 0;
232 }
233 
234 static int
235 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
236 {
237 	struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
238 	unsigned long	flags;
239 
240 	spin_lock_irqsave(&bfad->bfad_lock, flags);
241 	iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
242 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
243 
244 	return 0;
245 }
246 
247 static int
248 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
249 {
250 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
251 	struct bfad_hal_comp fcomp;
252 	unsigned long flags;
253 
254 	init_completion(&fcomp.comp);
255 	spin_lock_irqsave(&bfad->bfad_lock, flags);
256 	iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
257 					bfad_hcb_comp, &fcomp);
258 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
259 	if (iocmd->status != BFA_STATUS_OK) {
260 		bfa_trc(bfad, iocmd->status);
261 		return 0;
262 	}
263 	wait_for_completion(&fcomp.comp);
264 	iocmd->status = fcomp.status;
265 	return 0;
266 }
267 
268 static int
269 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
270 {
271 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
272 	struct bfad_hal_comp fcomp;
273 	unsigned long flags;
274 
275 	init_completion(&fcomp.comp);
276 	spin_lock_irqsave(&bfad->bfad_lock, flags);
277 	iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
278 				bfad_hcb_comp, &fcomp);
279 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
280 
281 	if (iocmd->status != BFA_STATUS_OK) {
282 		bfa_trc(bfad, iocmd->status);
283 		return 0;
284 	}
285 	wait_for_completion(&fcomp.comp);
286 	iocmd->status = fcomp.status;
287 	return 0;
288 }
289 
290 static int
291 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
292 {
293 	struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
294 	struct bfa_lport_attr_s	port_attr;
295 	unsigned long	flags;
296 
297 	spin_lock_irqsave(&bfad->bfad_lock, flags);
298 	bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
299 	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
300 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
301 
302 	if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
303 		iocmd->attr.pid = port_attr.pid;
304 	else
305 		iocmd->attr.pid = 0;
306 
307 	iocmd->attr.port_type = port_attr.port_type;
308 	iocmd->attr.loopback = port_attr.loopback;
309 	iocmd->attr.authfail = port_attr.authfail;
310 	strlcpy(iocmd->attr.port_symname.symname,
311 		port_attr.port_cfg.sym_name.symname,
312 		sizeof(iocmd->attr.port_symname.symname));
313 
314 	iocmd->status = BFA_STATUS_OK;
315 	return 0;
316 }
317 
318 static int
319 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
320 			unsigned int payload_len)
321 {
322 	struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
323 	struct bfad_hal_comp fcomp;
324 	void	*iocmd_bufptr;
325 	unsigned long	flags;
326 
327 	if (bfad_chk_iocmd_sz(payload_len,
328 			sizeof(struct bfa_bsg_port_stats_s),
329 			sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
330 		iocmd->status = BFA_STATUS_VERSION_FAIL;
331 		return 0;
332 	}
333 
334 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
335 
336 	init_completion(&fcomp.comp);
337 	spin_lock_irqsave(&bfad->bfad_lock, flags);
338 	iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
339 				iocmd_bufptr, bfad_hcb_comp, &fcomp);
340 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
341 	if (iocmd->status != BFA_STATUS_OK) {
342 		bfa_trc(bfad, iocmd->status);
343 		goto out;
344 	}
345 
346 	wait_for_completion(&fcomp.comp);
347 	iocmd->status = fcomp.status;
348 out:
349 	return 0;
350 }
351 
352 static int
353 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
354 {
355 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
356 	struct bfad_hal_comp fcomp;
357 	unsigned long	flags;
358 
359 	init_completion(&fcomp.comp);
360 	spin_lock_irqsave(&bfad->bfad_lock, flags);
361 	iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
362 					bfad_hcb_comp, &fcomp);
363 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
364 	if (iocmd->status != BFA_STATUS_OK) {
365 		bfa_trc(bfad, iocmd->status);
366 		return 0;
367 	}
368 	wait_for_completion(&fcomp.comp);
369 	iocmd->status = fcomp.status;
370 	return 0;
371 }
372 
373 static int
374 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
375 {
376 	struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
377 	unsigned long	flags;
378 
379 	spin_lock_irqsave(&bfad->bfad_lock, flags);
380 	if (v_cmd == IOCMD_PORT_CFG_TOPO)
381 		cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
382 	else if (v_cmd == IOCMD_PORT_CFG_SPEED)
383 		cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
384 	else if (v_cmd == IOCMD_PORT_CFG_ALPA)
385 		cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
386 	else if (v_cmd == IOCMD_PORT_CLR_ALPA)
387 		cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
388 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
389 
390 	return 0;
391 }
392 
393 static int
394 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
395 {
396 	struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
397 				(struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
398 	unsigned long	flags;
399 
400 	spin_lock_irqsave(&bfad->bfad_lock, flags);
401 	iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
402 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
403 
404 	return 0;
405 }
406 
407 static int
408 bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
409 {
410 	struct bfa_bsg_bbcr_enable_s *iocmd =
411 			(struct bfa_bsg_bbcr_enable_s *)pcmd;
412 	unsigned long flags;
413 	int rc;
414 
415 	spin_lock_irqsave(&bfad->bfad_lock, flags);
416 	if (cmd == IOCMD_PORT_BBCR_ENABLE)
417 		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
418 	else if (cmd == IOCMD_PORT_BBCR_DISABLE)
419 		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
420 	else {
421 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
422 		return -EINVAL;
423 	}
424 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
425 
426 	iocmd->status = rc;
427 	return 0;
428 }
429 
430 static int
431 bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
432 {
433 	struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
434 	unsigned long flags;
435 
436 	spin_lock_irqsave(&bfad->bfad_lock, flags);
437 	iocmd->status =
438 		bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
439 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
440 
441 	return 0;
442 }
443 
444 
445 static int
446 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
447 {
448 	struct bfa_fcs_lport_s	*fcs_port;
449 	struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
450 	unsigned long	flags;
451 
452 	spin_lock_irqsave(&bfad->bfad_lock, flags);
453 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
454 				iocmd->vf_id, iocmd->pwwn);
455 	if (fcs_port == NULL) {
456 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
457 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
458 		goto out;
459 	}
460 
461 	bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
462 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
463 	iocmd->status = BFA_STATUS_OK;
464 out:
465 	return 0;
466 }
467 
468 static int
469 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
470 {
471 	struct bfa_fcs_lport_s *fcs_port;
472 	struct bfa_bsg_lport_stats_s *iocmd =
473 			(struct bfa_bsg_lport_stats_s *)cmd;
474 	unsigned long	flags;
475 
476 	spin_lock_irqsave(&bfad->bfad_lock, flags);
477 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
478 				iocmd->vf_id, iocmd->pwwn);
479 	if (fcs_port == NULL) {
480 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
481 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
482 		goto out;
483 	}
484 
485 	bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
486 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
487 	iocmd->status = BFA_STATUS_OK;
488 out:
489 	return 0;
490 }
491 
492 static int
493 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
494 {
495 	struct bfa_fcs_lport_s *fcs_port;
496 	struct bfa_bsg_reset_stats_s *iocmd =
497 			(struct bfa_bsg_reset_stats_s *)cmd;
498 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
499 	struct list_head *qe, *qen;
500 	struct bfa_itnim_s *itnim;
501 	unsigned long	flags;
502 
503 	spin_lock_irqsave(&bfad->bfad_lock, flags);
504 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
505 				iocmd->vf_id, iocmd->vpwwn);
506 	if (fcs_port == NULL) {
507 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
508 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
509 		goto out;
510 	}
511 
512 	bfa_fcs_lport_clear_stats(fcs_port);
513 	/* clear IO stats from all active itnims */
514 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
515 		itnim = (struct bfa_itnim_s *) qe;
516 		if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
517 			continue;
518 		bfa_itnim_clear_stats(itnim);
519 	}
520 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
521 	iocmd->status = BFA_STATUS_OK;
522 out:
523 	return 0;
524 }
525 
526 static int
527 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
528 {
529 	struct bfa_fcs_lport_s *fcs_port;
530 	struct bfa_bsg_lport_iostats_s *iocmd =
531 			(struct bfa_bsg_lport_iostats_s *)cmd;
532 	unsigned long	flags;
533 
534 	spin_lock_irqsave(&bfad->bfad_lock, flags);
535 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
536 				iocmd->vf_id, iocmd->pwwn);
537 	if (fcs_port == NULL) {
538 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
539 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
540 		goto out;
541 	}
542 
543 	bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
544 			fcs_port->lp_tag);
545 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
546 	iocmd->status = BFA_STATUS_OK;
547 out:
548 	return 0;
549 }
550 
551 static int
552 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
553 			unsigned int payload_len)
554 {
555 	struct bfa_bsg_lport_get_rports_s *iocmd =
556 			(struct bfa_bsg_lport_get_rports_s *)cmd;
557 	struct bfa_fcs_lport_s *fcs_port;
558 	unsigned long	flags;
559 	void	*iocmd_bufptr;
560 
561 	if (iocmd->nrports == 0)
562 		return -EINVAL;
563 
564 	if (bfad_chk_iocmd_sz(payload_len,
565 			sizeof(struct bfa_bsg_lport_get_rports_s),
566 			sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
567 			!= BFA_STATUS_OK) {
568 		iocmd->status = BFA_STATUS_VERSION_FAIL;
569 		return 0;
570 	}
571 
572 	iocmd_bufptr = (char *)iocmd +
573 			sizeof(struct bfa_bsg_lport_get_rports_s);
574 	spin_lock_irqsave(&bfad->bfad_lock, flags);
575 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
576 				iocmd->vf_id, iocmd->pwwn);
577 	if (fcs_port == NULL) {
578 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
579 		bfa_trc(bfad, 0);
580 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
581 		goto out;
582 	}
583 
584 	bfa_fcs_lport_get_rport_quals(fcs_port,
585 			(struct bfa_rport_qualifier_s *)iocmd_bufptr,
586 			&iocmd->nrports);
587 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
588 	iocmd->status = BFA_STATUS_OK;
589 out:
590 	return 0;
591 }
592 
593 static int
594 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
595 {
596 	struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
597 	struct bfa_fcs_lport_s *fcs_port;
598 	struct bfa_fcs_rport_s *fcs_rport;
599 	unsigned long	flags;
600 
601 	spin_lock_irqsave(&bfad->bfad_lock, flags);
602 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
603 				iocmd->vf_id, iocmd->pwwn);
604 	if (fcs_port == NULL) {
605 		bfa_trc(bfad, 0);
606 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
607 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
608 		goto out;
609 	}
610 
611 	if (iocmd->pid)
612 		fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
613 						iocmd->rpwwn, iocmd->pid);
614 	else
615 		fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
616 	if (fcs_rport == NULL) {
617 		bfa_trc(bfad, 0);
618 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
619 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
620 		goto out;
621 	}
622 
623 	bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
624 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
625 	iocmd->status = BFA_STATUS_OK;
626 out:
627 	return 0;
628 }
629 
630 static int
631 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
632 {
633 	struct bfa_bsg_rport_scsi_addr_s *iocmd =
634 			(struct bfa_bsg_rport_scsi_addr_s *)cmd;
635 	struct bfa_fcs_lport_s	*fcs_port;
636 	struct bfa_fcs_itnim_s	*fcs_itnim;
637 	struct bfad_itnim_s	*drv_itnim;
638 	unsigned long	flags;
639 
640 	spin_lock_irqsave(&bfad->bfad_lock, flags);
641 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
642 				iocmd->vf_id, iocmd->pwwn);
643 	if (fcs_port == NULL) {
644 		bfa_trc(bfad, 0);
645 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
646 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
647 		goto out;
648 	}
649 
650 	fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
651 	if (fcs_itnim == NULL) {
652 		bfa_trc(bfad, 0);
653 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
654 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
655 		goto out;
656 	}
657 
658 	drv_itnim = fcs_itnim->itnim_drv;
659 
660 	if (drv_itnim && drv_itnim->im_port)
661 		iocmd->host = drv_itnim->im_port->shost->host_no;
662 	else {
663 		bfa_trc(bfad, 0);
664 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
665 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
666 		goto out;
667 	}
668 
669 	iocmd->target = drv_itnim->scsi_tgt_id;
670 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
671 
672 	iocmd->bus = 0;
673 	iocmd->lun = 0;
674 	iocmd->status = BFA_STATUS_OK;
675 out:
676 	return 0;
677 }
678 
679 static int
680 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
681 {
682 	struct bfa_bsg_rport_stats_s *iocmd =
683 			(struct bfa_bsg_rport_stats_s *)cmd;
684 	struct bfa_fcs_lport_s *fcs_port;
685 	struct bfa_fcs_rport_s *fcs_rport;
686 	unsigned long	flags;
687 
688 	spin_lock_irqsave(&bfad->bfad_lock, flags);
689 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
690 				iocmd->vf_id, iocmd->pwwn);
691 	if (fcs_port == NULL) {
692 		bfa_trc(bfad, 0);
693 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
694 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
695 		goto out;
696 	}
697 
698 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
699 	if (fcs_rport == NULL) {
700 		bfa_trc(bfad, 0);
701 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
702 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
703 		goto out;
704 	}
705 
706 	memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
707 		sizeof(struct bfa_rport_stats_s));
708 	if (bfa_fcs_rport_get_halrport(fcs_rport)) {
709 		memcpy((void *)&iocmd->stats.hal_stats,
710 		       (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
711 			sizeof(struct bfa_rport_hal_stats_s));
712 	}
713 
714 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
715 	iocmd->status = BFA_STATUS_OK;
716 out:
717 	return 0;
718 }
719 
720 static int
721 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
722 {
723 	struct bfa_bsg_rport_reset_stats_s *iocmd =
724 				(struct bfa_bsg_rport_reset_stats_s *)cmd;
725 	struct bfa_fcs_lport_s *fcs_port;
726 	struct bfa_fcs_rport_s *fcs_rport;
727 	struct bfa_rport_s *rport;
728 	unsigned long	flags;
729 
730 	spin_lock_irqsave(&bfad->bfad_lock, flags);
731 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
732 				iocmd->vf_id, iocmd->pwwn);
733 	if (fcs_port == NULL) {
734 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
735 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
736 		goto out;
737 	}
738 
739 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
740 	if (fcs_rport == NULL) {
741 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
742 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
743 		goto out;
744 	}
745 
746 	memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
747 	rport = bfa_fcs_rport_get_halrport(fcs_rport);
748 	if (rport)
749 		memset(&rport->stats, 0, sizeof(rport->stats));
750 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
751 	iocmd->status = BFA_STATUS_OK;
752 out:
753 	return 0;
754 }
755 
756 static int
757 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
758 {
759 	struct bfa_bsg_rport_set_speed_s *iocmd =
760 				(struct bfa_bsg_rport_set_speed_s *)cmd;
761 	struct bfa_fcs_lport_s *fcs_port;
762 	struct bfa_fcs_rport_s *fcs_rport;
763 	unsigned long	flags;
764 
765 	spin_lock_irqsave(&bfad->bfad_lock, flags);
766 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
767 				iocmd->vf_id, iocmd->pwwn);
768 	if (fcs_port == NULL) {
769 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
770 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
771 		goto out;
772 	}
773 
774 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
775 	if (fcs_rport == NULL) {
776 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
777 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
778 		goto out;
779 	}
780 
781 	fcs_rport->rpf.assigned_speed  = iocmd->speed;
782 	/* Set this speed in f/w only if the RPSC speed is not available */
783 	if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
784 		if (fcs_rport->bfa_rport)
785 			bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
786 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
787 	iocmd->status = BFA_STATUS_OK;
788 out:
789 	return 0;
790 }
791 
792 static int
793 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
794 {
795 	struct bfa_fcs_vport_s *fcs_vport;
796 	struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
797 	unsigned long	flags;
798 
799 	spin_lock_irqsave(&bfad->bfad_lock, flags);
800 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
801 				iocmd->vf_id, iocmd->vpwwn);
802 	if (fcs_vport == NULL) {
803 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
804 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
805 		goto out;
806 	}
807 
808 	bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
809 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
810 	iocmd->status = BFA_STATUS_OK;
811 out:
812 	return 0;
813 }
814 
815 static int
816 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
817 {
818 	struct bfa_fcs_vport_s *fcs_vport;
819 	struct bfa_bsg_vport_stats_s *iocmd =
820 				(struct bfa_bsg_vport_stats_s *)cmd;
821 	unsigned long	flags;
822 
823 	spin_lock_irqsave(&bfad->bfad_lock, flags);
824 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
825 				iocmd->vf_id, iocmd->vpwwn);
826 	if (fcs_vport == NULL) {
827 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
828 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
829 		goto out;
830 	}
831 
832 	memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
833 		sizeof(struct bfa_vport_stats_s));
834 	memcpy((void *)&iocmd->vport_stats.port_stats,
835 	       (void *)&fcs_vport->lport.stats,
836 		sizeof(struct bfa_lport_stats_s));
837 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
838 	iocmd->status = BFA_STATUS_OK;
839 out:
840 	return 0;
841 }
842 
843 static int
844 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
845 {
846 	struct bfa_fcs_vport_s *fcs_vport;
847 	struct bfa_bsg_reset_stats_s *iocmd =
848 				(struct bfa_bsg_reset_stats_s *)cmd;
849 	unsigned long	flags;
850 
851 	spin_lock_irqsave(&bfad->bfad_lock, flags);
852 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
853 				iocmd->vf_id, iocmd->vpwwn);
854 	if (fcs_vport == NULL) {
855 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
856 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
857 		goto out;
858 	}
859 
860 	memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
861 	memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
862 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
863 	iocmd->status = BFA_STATUS_OK;
864 out:
865 	return 0;
866 }
867 
868 static int
869 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
870 			unsigned int payload_len)
871 {
872 	struct bfa_bsg_fabric_get_lports_s *iocmd =
873 			(struct bfa_bsg_fabric_get_lports_s *)cmd;
874 	bfa_fcs_vf_t	*fcs_vf;
875 	uint32_t	nports = iocmd->nports;
876 	unsigned long	flags;
877 	void	*iocmd_bufptr;
878 
879 	if (nports == 0) {
880 		iocmd->status = BFA_STATUS_EINVAL;
881 		goto out;
882 	}
883 
884 	if (bfad_chk_iocmd_sz(payload_len,
885 		sizeof(struct bfa_bsg_fabric_get_lports_s),
886 		sizeof(wwn_t) * iocmd->nports) != BFA_STATUS_OK) {
887 		iocmd->status = BFA_STATUS_VERSION_FAIL;
888 		goto out;
889 	}
890 
891 	iocmd_bufptr = (char *)iocmd +
892 			sizeof(struct bfa_bsg_fabric_get_lports_s);
893 
894 	spin_lock_irqsave(&bfad->bfad_lock, flags);
895 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
896 	if (fcs_vf == NULL) {
897 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
898 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
899 		goto out;
900 	}
901 	bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
902 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
903 
904 	iocmd->nports = nports;
905 	iocmd->status = BFA_STATUS_OK;
906 out:
907 	return 0;
908 }
909 
910 static int
911 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
912 {
913 	struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
914 	unsigned long	flags;
915 
916 	spin_lock_irqsave(&bfad->bfad_lock, flags);
917 	iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
918 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
919 
920 	return 0;
921 }
922 
923 static int
924 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
925 {
926 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
927 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
928 	unsigned long	flags;
929 
930 	spin_lock_irqsave(&bfad->bfad_lock, flags);
931 
932 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
933 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
934 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
935 	else {
936 		if (cmd == IOCMD_RATELIM_ENABLE)
937 			fcport->cfg.ratelimit = BFA_TRUE;
938 		else if (cmd == IOCMD_RATELIM_DISABLE)
939 			fcport->cfg.ratelimit = BFA_FALSE;
940 
941 		if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
942 			fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
943 
944 		iocmd->status = BFA_STATUS_OK;
945 	}
946 
947 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
948 
949 	return 0;
950 }
951 
952 static int
953 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
954 {
955 	struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
956 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
957 	unsigned long	flags;
958 
959 	spin_lock_irqsave(&bfad->bfad_lock, flags);
960 
961 	/* Auto and speeds greater than the supported speed, are invalid */
962 	if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
963 	    (iocmd->speed > fcport->speed_sup)) {
964 		iocmd->status = BFA_STATUS_UNSUPP_SPEED;
965 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
966 		return 0;
967 	}
968 
969 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
970 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
971 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
972 	else {
973 		fcport->cfg.trl_def_speed = iocmd->speed;
974 		iocmd->status = BFA_STATUS_OK;
975 	}
976 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
977 
978 	return 0;
979 }
980 
981 static int
982 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
983 {
984 	struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
985 	unsigned long	flags;
986 
987 	spin_lock_irqsave(&bfad->bfad_lock, flags);
988 	bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
989 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
990 	iocmd->status = BFA_STATUS_OK;
991 	return 0;
992 }
993 
994 static int
995 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
996 {
997 	struct bfa_bsg_fcpim_modstats_s *iocmd =
998 			(struct bfa_bsg_fcpim_modstats_s *)cmd;
999 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1000 	struct list_head *qe, *qen;
1001 	struct bfa_itnim_s *itnim;
1002 	unsigned long	flags;
1003 
1004 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1005 	/* accumulate IO stats from itnim */
1006 	memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
1007 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1008 		itnim = (struct bfa_itnim_s *) qe;
1009 		bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
1010 	}
1011 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1012 	iocmd->status = BFA_STATUS_OK;
1013 	return 0;
1014 }
1015 
1016 static int
1017 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
1018 {
1019 	struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
1020 				(struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
1021 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1022 	struct list_head *qe, *qen;
1023 	struct bfa_itnim_s *itnim;
1024 	unsigned long	flags;
1025 
1026 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1027 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1028 		itnim = (struct bfa_itnim_s *) qe;
1029 		bfa_itnim_clear_stats(itnim);
1030 	}
1031 	memset(&fcpim->del_itn_stats, 0,
1032 		sizeof(struct bfa_fcpim_del_itn_stats_s));
1033 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1034 	iocmd->status = BFA_STATUS_OK;
1035 	return 0;
1036 }
1037 
1038 static int
1039 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
1040 {
1041 	struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
1042 			(struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
1043 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1044 	unsigned long	flags;
1045 
1046 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1047 	memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
1048 		sizeof(struct bfa_fcpim_del_itn_stats_s));
1049 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1050 
1051 	iocmd->status = BFA_STATUS_OK;
1052 	return 0;
1053 }
1054 
1055 static int
1056 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
1057 {
1058 	struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
1059 	struct bfa_fcs_lport_s	*fcs_port;
1060 	unsigned long	flags;
1061 
1062 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1063 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1064 				iocmd->vf_id, iocmd->lpwwn);
1065 	if (!fcs_port)
1066 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1067 	else
1068 		iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
1069 					iocmd->rpwwn, &iocmd->attr);
1070 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1071 	return 0;
1072 }
1073 
1074 static int
1075 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
1076 {
1077 	struct bfa_bsg_itnim_iostats_s *iocmd =
1078 			(struct bfa_bsg_itnim_iostats_s *)cmd;
1079 	struct bfa_fcs_lport_s *fcs_port;
1080 	struct bfa_fcs_itnim_s *itnim;
1081 	unsigned long	flags;
1082 
1083 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1084 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1085 				iocmd->vf_id, iocmd->lpwwn);
1086 	if (!fcs_port) {
1087 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1088 		bfa_trc(bfad, 0);
1089 	} else {
1090 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1091 		if (itnim == NULL)
1092 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1093 		else {
1094 			iocmd->status = BFA_STATUS_OK;
1095 			if (bfa_fcs_itnim_get_halitn(itnim))
1096 				memcpy((void *)&iocmd->iostats, (void *)
1097 				&(bfa_fcs_itnim_get_halitn(itnim)->stats),
1098 				       sizeof(struct bfa_itnim_iostats_s));
1099 		}
1100 	}
1101 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1102 	return 0;
1103 }
1104 
1105 static int
1106 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1107 {
1108 	struct bfa_bsg_rport_reset_stats_s *iocmd =
1109 			(struct bfa_bsg_rport_reset_stats_s *)cmd;
1110 	struct bfa_fcs_lport_s	*fcs_port;
1111 	struct bfa_fcs_itnim_s	*itnim;
1112 	unsigned long	flags;
1113 
1114 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1115 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1116 				iocmd->vf_id, iocmd->pwwn);
1117 	if (!fcs_port)
1118 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1119 	else {
1120 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1121 		if (itnim == NULL)
1122 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1123 		else {
1124 			iocmd->status = BFA_STATUS_OK;
1125 			bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1126 			bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1127 		}
1128 	}
1129 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1130 
1131 	return 0;
1132 }
1133 
1134 static int
1135 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
1136 {
1137 	struct bfa_bsg_itnim_itnstats_s *iocmd =
1138 			(struct bfa_bsg_itnim_itnstats_s *)cmd;
1139 	struct bfa_fcs_lport_s *fcs_port;
1140 	struct bfa_fcs_itnim_s *itnim;
1141 	unsigned long	flags;
1142 
1143 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1144 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1145 				iocmd->vf_id, iocmd->lpwwn);
1146 	if (!fcs_port) {
1147 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1148 		bfa_trc(bfad, 0);
1149 	} else {
1150 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1151 		if (itnim == NULL)
1152 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1153 		else {
1154 			iocmd->status = BFA_STATUS_OK;
1155 			bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
1156 					&iocmd->itnstats);
1157 		}
1158 	}
1159 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1160 	return 0;
1161 }
1162 
1163 static int
1164 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
1165 {
1166 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1167 	unsigned long flags;
1168 
1169 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1170 	iocmd->status = bfa_fcport_enable(&bfad->bfa);
1171 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1172 
1173 	return 0;
1174 }
1175 
1176 static int
1177 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
1178 {
1179 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1180 	unsigned long flags;
1181 
1182 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1183 	iocmd->status = bfa_fcport_disable(&bfad->bfa);
1184 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1185 
1186 	return 0;
1187 }
1188 
1189 static int
1190 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
1191 {
1192 	struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
1193 	struct bfad_hal_comp fcomp;
1194 	unsigned long flags;
1195 
1196 	init_completion(&fcomp.comp);
1197 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1198 	iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
1199 				&iocmd->pcifn_cfg,
1200 				bfad_hcb_comp, &fcomp);
1201 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1202 	if (iocmd->status != BFA_STATUS_OK)
1203 		goto out;
1204 
1205 	wait_for_completion(&fcomp.comp);
1206 	iocmd->status = fcomp.status;
1207 out:
1208 	return 0;
1209 }
1210 
1211 static int
1212 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1213 {
1214 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1215 	struct bfad_hal_comp fcomp;
1216 	unsigned long flags;
1217 
1218 	init_completion(&fcomp.comp);
1219 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1220 	iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1221 				&iocmd->pcifn_id, iocmd->port,
1222 				iocmd->pcifn_class, iocmd->bw_min,
1223 				iocmd->bw_max, bfad_hcb_comp, &fcomp);
1224 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1225 	if (iocmd->status != BFA_STATUS_OK)
1226 		goto out;
1227 
1228 	wait_for_completion(&fcomp.comp);
1229 	iocmd->status = fcomp.status;
1230 out:
1231 	return 0;
1232 }
1233 
1234 static int
1235 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
1236 {
1237 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1238 	struct bfad_hal_comp fcomp;
1239 	unsigned long flags;
1240 
1241 	init_completion(&fcomp.comp);
1242 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1243 	iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
1244 				iocmd->pcifn_id,
1245 				bfad_hcb_comp, &fcomp);
1246 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1247 	if (iocmd->status != BFA_STATUS_OK)
1248 		goto out;
1249 
1250 	wait_for_completion(&fcomp.comp);
1251 	iocmd->status = fcomp.status;
1252 out:
1253 	return 0;
1254 }
1255 
1256 static int
1257 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1258 {
1259 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1260 	struct bfad_hal_comp fcomp;
1261 	unsigned long flags;
1262 
1263 	init_completion(&fcomp.comp);
1264 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1265 	iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1266 				iocmd->pcifn_id, iocmd->bw_min,
1267 				iocmd->bw_max, bfad_hcb_comp, &fcomp);
1268 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1269 	bfa_trc(bfad, iocmd->status);
1270 	if (iocmd->status != BFA_STATUS_OK)
1271 		goto out;
1272 
1273 	wait_for_completion(&fcomp.comp);
1274 	iocmd->status = fcomp.status;
1275 	bfa_trc(bfad, iocmd->status);
1276 out:
1277 	return 0;
1278 }
1279 
1280 static int
1281 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
1282 {
1283 	struct bfa_bsg_adapter_cfg_mode_s *iocmd =
1284 			(struct bfa_bsg_adapter_cfg_mode_s *)cmd;
1285 	struct bfad_hal_comp fcomp;
1286 	unsigned long flags = 0;
1287 
1288 	init_completion(&fcomp.comp);
1289 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1290 	iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
1291 				iocmd->cfg.mode, iocmd->cfg.max_pf,
1292 				iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
1293 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1294 	if (iocmd->status != BFA_STATUS_OK)
1295 		goto out;
1296 
1297 	wait_for_completion(&fcomp.comp);
1298 	iocmd->status = fcomp.status;
1299 out:
1300 	return 0;
1301 }
1302 
1303 static int
1304 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
1305 {
1306 	struct bfa_bsg_port_cfg_mode_s *iocmd =
1307 			(struct bfa_bsg_port_cfg_mode_s *)cmd;
1308 	struct bfad_hal_comp fcomp;
1309 	unsigned long flags = 0;
1310 
1311 	init_completion(&fcomp.comp);
1312 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1313 	iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
1314 				iocmd->instance, iocmd->cfg.mode,
1315 				iocmd->cfg.max_pf, iocmd->cfg.max_vf,
1316 				bfad_hcb_comp, &fcomp);
1317 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1318 	if (iocmd->status != BFA_STATUS_OK)
1319 		goto out;
1320 
1321 	wait_for_completion(&fcomp.comp);
1322 	iocmd->status = fcomp.status;
1323 out:
1324 	return 0;
1325 }
1326 
1327 static int
1328 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1329 {
1330 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1331 	struct bfad_hal_comp fcomp;
1332 	unsigned long   flags;
1333 
1334 	init_completion(&fcomp.comp);
1335 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1336 	if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
1337 		iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
1338 					bfad_hcb_comp, &fcomp);
1339 	else
1340 		iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
1341 					bfad_hcb_comp, &fcomp);
1342 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1343 
1344 	if (iocmd->status != BFA_STATUS_OK)
1345 		goto out;
1346 
1347 	wait_for_completion(&fcomp.comp);
1348 	iocmd->status = fcomp.status;
1349 out:
1350 	return 0;
1351 }
1352 
1353 static int
1354 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
1355 {
1356 	struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
1357 	struct bfad_hal_comp    fcomp;
1358 	unsigned long   flags;
1359 
1360 	init_completion(&fcomp.comp);
1361 	iocmd->status = BFA_STATUS_OK;
1362 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1363 	iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
1364 				bfad_hcb_comp, &fcomp);
1365 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1366 
1367 	if (iocmd->status != BFA_STATUS_OK)
1368 		goto out;
1369 
1370 	wait_for_completion(&fcomp.comp);
1371 	iocmd->status = fcomp.status;
1372 out:
1373 	return 0;
1374 }
1375 
1376 static int
1377 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1378 {
1379 	struct bfa_bsg_cee_attr_s *iocmd =
1380 				(struct bfa_bsg_cee_attr_s *)cmd;
1381 	void	*iocmd_bufptr;
1382 	struct bfad_hal_comp	cee_comp;
1383 	unsigned long	flags;
1384 
1385 	if (bfad_chk_iocmd_sz(payload_len,
1386 			sizeof(struct bfa_bsg_cee_attr_s),
1387 			sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
1388 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1389 		return 0;
1390 	}
1391 
1392 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
1393 
1394 	cee_comp.status = 0;
1395 	init_completion(&cee_comp.comp);
1396 	mutex_lock(&bfad_mutex);
1397 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1398 	iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
1399 					 bfad_hcb_comp, &cee_comp);
1400 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1401 	if (iocmd->status != BFA_STATUS_OK) {
1402 		mutex_unlock(&bfad_mutex);
1403 		bfa_trc(bfad, 0x5555);
1404 		goto out;
1405 	}
1406 	wait_for_completion(&cee_comp.comp);
1407 	mutex_unlock(&bfad_mutex);
1408 out:
1409 	return 0;
1410 }
1411 
1412 static int
1413 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
1414 			unsigned int payload_len)
1415 {
1416 	struct bfa_bsg_cee_stats_s *iocmd =
1417 				(struct bfa_bsg_cee_stats_s *)cmd;
1418 	void	*iocmd_bufptr;
1419 	struct bfad_hal_comp	cee_comp;
1420 	unsigned long	flags;
1421 
1422 	if (bfad_chk_iocmd_sz(payload_len,
1423 			sizeof(struct bfa_bsg_cee_stats_s),
1424 			sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1425 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1426 		return 0;
1427 	}
1428 
1429 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1430 
1431 	cee_comp.status = 0;
1432 	init_completion(&cee_comp.comp);
1433 	mutex_lock(&bfad_mutex);
1434 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1435 	iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1436 					bfad_hcb_comp, &cee_comp);
1437 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1438 	if (iocmd->status != BFA_STATUS_OK) {
1439 		mutex_unlock(&bfad_mutex);
1440 		bfa_trc(bfad, 0x5555);
1441 		goto out;
1442 	}
1443 	wait_for_completion(&cee_comp.comp);
1444 	mutex_unlock(&bfad_mutex);
1445 out:
1446 	return 0;
1447 }
1448 
1449 static int
1450 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1451 {
1452 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1453 	unsigned long	flags;
1454 
1455 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1456 	iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1457 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1458 	if (iocmd->status != BFA_STATUS_OK)
1459 		bfa_trc(bfad, 0x5555);
1460 	return 0;
1461 }
1462 
1463 static int
1464 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1465 {
1466 	struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1467 	struct bfad_hal_comp	fcomp;
1468 	unsigned long	flags;
1469 
1470 	init_completion(&fcomp.comp);
1471 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1472 	iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1473 				bfad_hcb_comp, &fcomp);
1474 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1475 	bfa_trc(bfad, iocmd->status);
1476 	if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1477 		goto out;
1478 
1479 	wait_for_completion(&fcomp.comp);
1480 	iocmd->status = fcomp.status;
1481 out:
1482 	return 0;
1483 }
1484 
1485 static int
1486 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1487 {
1488 	struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1489 	struct bfad_hal_comp	fcomp;
1490 	unsigned long	flags;
1491 
1492 	init_completion(&fcomp.comp);
1493 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1494 	iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1495 				bfad_hcb_comp, &fcomp);
1496 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1497 	bfa_trc(bfad, iocmd->status);
1498 	if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1499 		goto out;
1500 	wait_for_completion(&fcomp.comp);
1501 	iocmd->status = fcomp.status;
1502 out:
1503 	return 0;
1504 }
1505 
1506 static int
1507 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1508 {
1509 	struct bfa_bsg_flash_attr_s *iocmd =
1510 			(struct bfa_bsg_flash_attr_s *)cmd;
1511 	struct bfad_hal_comp fcomp;
1512 	unsigned long	flags;
1513 
1514 	init_completion(&fcomp.comp);
1515 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1516 	iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1517 				bfad_hcb_comp, &fcomp);
1518 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1519 	if (iocmd->status != BFA_STATUS_OK)
1520 		goto out;
1521 	wait_for_completion(&fcomp.comp);
1522 	iocmd->status = fcomp.status;
1523 out:
1524 	return 0;
1525 }
1526 
1527 static int
1528 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1529 {
1530 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1531 	struct bfad_hal_comp fcomp;
1532 	unsigned long	flags;
1533 
1534 	init_completion(&fcomp.comp);
1535 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1536 	iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1537 				iocmd->instance, bfad_hcb_comp, &fcomp);
1538 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1539 	if (iocmd->status != BFA_STATUS_OK)
1540 		goto out;
1541 	wait_for_completion(&fcomp.comp);
1542 	iocmd->status = fcomp.status;
1543 out:
1544 	return 0;
1545 }
1546 
1547 static int
1548 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1549 			unsigned int payload_len)
1550 {
1551 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1552 	void	*iocmd_bufptr;
1553 	struct bfad_hal_comp fcomp;
1554 	unsigned long	flags;
1555 
1556 	if (bfad_chk_iocmd_sz(payload_len,
1557 			sizeof(struct bfa_bsg_flash_s),
1558 			iocmd->bufsz) != BFA_STATUS_OK) {
1559 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1560 		return 0;
1561 	}
1562 
1563 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1564 
1565 	init_completion(&fcomp.comp);
1566 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1567 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1568 				iocmd->type, iocmd->instance, iocmd_bufptr,
1569 				iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1570 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1571 	if (iocmd->status != BFA_STATUS_OK)
1572 		goto out;
1573 	wait_for_completion(&fcomp.comp);
1574 	iocmd->status = fcomp.status;
1575 out:
1576 	return 0;
1577 }
1578 
1579 static int
1580 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1581 			unsigned int payload_len)
1582 {
1583 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1584 	struct bfad_hal_comp fcomp;
1585 	void	*iocmd_bufptr;
1586 	unsigned long	flags;
1587 
1588 	if (bfad_chk_iocmd_sz(payload_len,
1589 			sizeof(struct bfa_bsg_flash_s),
1590 			iocmd->bufsz) != BFA_STATUS_OK) {
1591 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1592 		return 0;
1593 	}
1594 
1595 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1596 
1597 	init_completion(&fcomp.comp);
1598 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1599 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1600 				iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1601 				bfad_hcb_comp, &fcomp);
1602 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1603 	if (iocmd->status != BFA_STATUS_OK)
1604 		goto out;
1605 	wait_for_completion(&fcomp.comp);
1606 	iocmd->status = fcomp.status;
1607 out:
1608 	return 0;
1609 }
1610 
1611 static int
1612 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1613 {
1614 	struct bfa_bsg_diag_get_temp_s *iocmd =
1615 			(struct bfa_bsg_diag_get_temp_s *)cmd;
1616 	struct bfad_hal_comp fcomp;
1617 	unsigned long	flags;
1618 
1619 	init_completion(&fcomp.comp);
1620 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1621 	iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1622 				&iocmd->result, bfad_hcb_comp, &fcomp);
1623 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1624 	bfa_trc(bfad, iocmd->status);
1625 	if (iocmd->status != BFA_STATUS_OK)
1626 		goto out;
1627 	wait_for_completion(&fcomp.comp);
1628 	iocmd->status = fcomp.status;
1629 out:
1630 	return 0;
1631 }
1632 
1633 static int
1634 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1635 {
1636 	struct bfa_bsg_diag_memtest_s *iocmd =
1637 			(struct bfa_bsg_diag_memtest_s *)cmd;
1638 	struct bfad_hal_comp fcomp;
1639 	unsigned long   flags;
1640 
1641 	init_completion(&fcomp.comp);
1642 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1643 	iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1644 				&iocmd->memtest, iocmd->pat,
1645 				&iocmd->result, bfad_hcb_comp, &fcomp);
1646 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1647 	bfa_trc(bfad, iocmd->status);
1648 	if (iocmd->status != BFA_STATUS_OK)
1649 		goto out;
1650 	wait_for_completion(&fcomp.comp);
1651 	iocmd->status = fcomp.status;
1652 out:
1653 	return 0;
1654 }
1655 
1656 static int
1657 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1658 {
1659 	struct bfa_bsg_diag_loopback_s *iocmd =
1660 			(struct bfa_bsg_diag_loopback_s *)cmd;
1661 	struct bfad_hal_comp fcomp;
1662 	unsigned long   flags;
1663 
1664 	init_completion(&fcomp.comp);
1665 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1666 	iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1667 				iocmd->speed, iocmd->lpcnt, iocmd->pat,
1668 				&iocmd->result, bfad_hcb_comp, &fcomp);
1669 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1670 	bfa_trc(bfad, iocmd->status);
1671 	if (iocmd->status != BFA_STATUS_OK)
1672 		goto out;
1673 	wait_for_completion(&fcomp.comp);
1674 	iocmd->status = fcomp.status;
1675 out:
1676 	return 0;
1677 }
1678 
1679 static int
1680 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1681 {
1682 	struct bfa_bsg_diag_fwping_s *iocmd =
1683 			(struct bfa_bsg_diag_fwping_s *)cmd;
1684 	struct bfad_hal_comp fcomp;
1685 	unsigned long   flags;
1686 
1687 	init_completion(&fcomp.comp);
1688 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1689 	iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1690 				iocmd->pattern, &iocmd->result,
1691 				bfad_hcb_comp, &fcomp);
1692 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1693 	bfa_trc(bfad, iocmd->status);
1694 	if (iocmd->status != BFA_STATUS_OK)
1695 		goto out;
1696 	bfa_trc(bfad, 0x77771);
1697 	wait_for_completion(&fcomp.comp);
1698 	iocmd->status = fcomp.status;
1699 out:
1700 	return 0;
1701 }
1702 
1703 static int
1704 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1705 {
1706 	struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1707 	struct bfad_hal_comp fcomp;
1708 	unsigned long   flags;
1709 
1710 	init_completion(&fcomp.comp);
1711 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1712 	iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1713 				iocmd->queue, &iocmd->result,
1714 				bfad_hcb_comp, &fcomp);
1715 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1716 	if (iocmd->status != BFA_STATUS_OK)
1717 		goto out;
1718 	wait_for_completion(&fcomp.comp);
1719 	iocmd->status = fcomp.status;
1720 out:
1721 	return 0;
1722 }
1723 
1724 static int
1725 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1726 {
1727 	struct bfa_bsg_sfp_show_s *iocmd =
1728 			(struct bfa_bsg_sfp_show_s *)cmd;
1729 	struct bfad_hal_comp fcomp;
1730 	unsigned long   flags;
1731 
1732 	init_completion(&fcomp.comp);
1733 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1734 	iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1735 				bfad_hcb_comp, &fcomp);
1736 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1737 	bfa_trc(bfad, iocmd->status);
1738 	if (iocmd->status != BFA_STATUS_OK)
1739 		goto out;
1740 	wait_for_completion(&fcomp.comp);
1741 	iocmd->status = fcomp.status;
1742 	bfa_trc(bfad, iocmd->status);
1743 out:
1744 	return 0;
1745 }
1746 
1747 static int
1748 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1749 {
1750 	struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1751 	unsigned long   flags;
1752 
1753 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1754 	iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1755 				&iocmd->ledtest);
1756 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1757 	return 0;
1758 }
1759 
1760 static int
1761 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1762 {
1763 	struct bfa_bsg_diag_beacon_s *iocmd =
1764 			(struct bfa_bsg_diag_beacon_s *)cmd;
1765 	unsigned long	flags;
1766 
1767 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1768 	iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1769 				iocmd->beacon, iocmd->link_e2e_beacon,
1770 				iocmd->second);
1771 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1772 	return 0;
1773 }
1774 
1775 static int
1776 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1777 {
1778 	struct bfa_bsg_diag_lb_stat_s *iocmd =
1779 			(struct bfa_bsg_diag_lb_stat_s *)cmd;
1780 	unsigned long	flags;
1781 
1782 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1783 	iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1784 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1785 	bfa_trc(bfad, iocmd->status);
1786 
1787 	return 0;
1788 }
1789 
1790 static int
1791 bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
1792 {
1793 	struct bfa_bsg_dport_enable_s *iocmd =
1794 				(struct bfa_bsg_dport_enable_s *)pcmd;
1795 	unsigned long	flags;
1796 	struct bfad_hal_comp fcomp;
1797 
1798 	init_completion(&fcomp.comp);
1799 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1800 	iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
1801 					iocmd->pat, bfad_hcb_comp, &fcomp);
1802 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1803 	if (iocmd->status != BFA_STATUS_OK)
1804 		bfa_trc(bfad, iocmd->status);
1805 	else {
1806 		wait_for_completion(&fcomp.comp);
1807 		iocmd->status = fcomp.status;
1808 	}
1809 	return 0;
1810 }
1811 
1812 static int
1813 bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
1814 {
1815 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1816 	unsigned long	flags;
1817 	struct bfad_hal_comp fcomp;
1818 
1819 	init_completion(&fcomp.comp);
1820 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1821 	iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
1822 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1823 	if (iocmd->status != BFA_STATUS_OK)
1824 		bfa_trc(bfad, iocmd->status);
1825 	else {
1826 		wait_for_completion(&fcomp.comp);
1827 		iocmd->status = fcomp.status;
1828 	}
1829 	return 0;
1830 }
1831 
1832 static int
1833 bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
1834 {
1835 	struct bfa_bsg_dport_enable_s *iocmd =
1836 				(struct bfa_bsg_dport_enable_s *)pcmd;
1837 	unsigned long   flags;
1838 	struct bfad_hal_comp fcomp;
1839 
1840 	init_completion(&fcomp.comp);
1841 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1842 	iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
1843 					iocmd->pat, bfad_hcb_comp,
1844 					&fcomp);
1845 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1846 
1847 	if (iocmd->status != BFA_STATUS_OK) {
1848 		bfa_trc(bfad, iocmd->status);
1849 	} else {
1850 		wait_for_completion(&fcomp.comp);
1851 		iocmd->status = fcomp.status;
1852 	}
1853 
1854 	return 0;
1855 }
1856 
1857 static int
1858 bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
1859 {
1860 	struct bfa_bsg_diag_dport_show_s *iocmd =
1861 				(struct bfa_bsg_diag_dport_show_s *)pcmd;
1862 	unsigned long   flags;
1863 
1864 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1865 	iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
1866 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1867 
1868 	return 0;
1869 }
1870 
1871 
1872 static int
1873 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1874 {
1875 	struct bfa_bsg_phy_attr_s *iocmd =
1876 			(struct bfa_bsg_phy_attr_s *)cmd;
1877 	struct bfad_hal_comp fcomp;
1878 	unsigned long	flags;
1879 
1880 	init_completion(&fcomp.comp);
1881 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1882 	iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1883 				&iocmd->attr, bfad_hcb_comp, &fcomp);
1884 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1885 	if (iocmd->status != BFA_STATUS_OK)
1886 		goto out;
1887 	wait_for_completion(&fcomp.comp);
1888 	iocmd->status = fcomp.status;
1889 out:
1890 	return 0;
1891 }
1892 
1893 static int
1894 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1895 {
1896 	struct bfa_bsg_phy_stats_s *iocmd =
1897 			(struct bfa_bsg_phy_stats_s *)cmd;
1898 	struct bfad_hal_comp fcomp;
1899 	unsigned long	flags;
1900 
1901 	init_completion(&fcomp.comp);
1902 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1903 	iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1904 				&iocmd->stats, bfad_hcb_comp, &fcomp);
1905 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1906 	if (iocmd->status != BFA_STATUS_OK)
1907 		goto out;
1908 	wait_for_completion(&fcomp.comp);
1909 	iocmd->status = fcomp.status;
1910 out:
1911 	return 0;
1912 }
1913 
1914 static int
1915 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1916 {
1917 	struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1918 	struct bfad_hal_comp fcomp;
1919 	void	*iocmd_bufptr;
1920 	unsigned long	flags;
1921 
1922 	if (bfad_chk_iocmd_sz(payload_len,
1923 			sizeof(struct bfa_bsg_phy_s),
1924 			iocmd->bufsz) != BFA_STATUS_OK) {
1925 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1926 		return 0;
1927 	}
1928 
1929 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1930 	init_completion(&fcomp.comp);
1931 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1932 	iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1933 				iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1934 				0, bfad_hcb_comp, &fcomp);
1935 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1936 	if (iocmd->status != BFA_STATUS_OK)
1937 		goto out;
1938 	wait_for_completion(&fcomp.comp);
1939 	iocmd->status = fcomp.status;
1940 	if (iocmd->status != BFA_STATUS_OK)
1941 		goto out;
1942 out:
1943 	return 0;
1944 }
1945 
1946 static int
1947 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
1948 {
1949 	struct bfa_bsg_vhba_attr_s *iocmd =
1950 			(struct bfa_bsg_vhba_attr_s *)cmd;
1951 	struct bfa_vhba_attr_s *attr = &iocmd->attr;
1952 	unsigned long flags;
1953 
1954 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1955 	attr->pwwn =  bfad->bfa.ioc.attr->pwwn;
1956 	attr->nwwn =  bfad->bfa.ioc.attr->nwwn;
1957 	attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
1958 	attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
1959 	attr->path_tov  = bfa_fcpim_path_tov_get(&bfad->bfa);
1960 	iocmd->status = BFA_STATUS_OK;
1961 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1962 	return 0;
1963 }
1964 
1965 static int
1966 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1967 {
1968 	struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1969 	void	*iocmd_bufptr;
1970 	struct bfad_hal_comp fcomp;
1971 	unsigned long	flags;
1972 
1973 	if (bfad_chk_iocmd_sz(payload_len,
1974 			sizeof(struct bfa_bsg_phy_s),
1975 			iocmd->bufsz) != BFA_STATUS_OK) {
1976 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1977 		return 0;
1978 	}
1979 
1980 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1981 	init_completion(&fcomp.comp);
1982 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1983 	iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1984 				iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1985 				0, bfad_hcb_comp, &fcomp);
1986 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1987 	if (iocmd->status != BFA_STATUS_OK)
1988 		goto out;
1989 	wait_for_completion(&fcomp.comp);
1990 	iocmd->status = fcomp.status;
1991 out:
1992 	return 0;
1993 }
1994 
1995 static int
1996 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
1997 {
1998 	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1999 	void *iocmd_bufptr;
2000 
2001 	if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
2002 		bfa_trc(bfad, sizeof(struct bfa_plog_s));
2003 		iocmd->status = BFA_STATUS_EINVAL;
2004 		goto out;
2005 	}
2006 
2007 	iocmd->status = BFA_STATUS_OK;
2008 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2009 	memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
2010 out:
2011 	return 0;
2012 }
2013 
2014 #define BFA_DEBUG_FW_CORE_CHUNK_SZ	0x4000U /* 16K chunks for FW dump */
2015 static int
2016 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
2017 			unsigned int payload_len)
2018 {
2019 	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
2020 	void	*iocmd_bufptr;
2021 	unsigned long	flags;
2022 	u32 offset;
2023 
2024 	if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
2025 			BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
2026 		iocmd->status = BFA_STATUS_VERSION_FAIL;
2027 		return 0;
2028 	}
2029 
2030 	if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
2031 			!IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
2032 			!IS_ALIGNED(iocmd->offset, sizeof(u32))) {
2033 		bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
2034 		iocmd->status = BFA_STATUS_EINVAL;
2035 		goto out;
2036 	}
2037 
2038 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2039 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2040 	offset = iocmd->offset;
2041 	iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
2042 				&offset, &iocmd->bufsz);
2043 	iocmd->offset = offset;
2044 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2045 out:
2046 	return 0;
2047 }
2048 
2049 static int
2050 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2051 {
2052 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2053 	unsigned long	flags;
2054 
2055 	if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
2056 		spin_lock_irqsave(&bfad->bfad_lock, flags);
2057 		bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
2058 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2059 	} else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
2060 		bfad->plog_buf.head = bfad->plog_buf.tail = 0;
2061 	else if (v_cmd == IOCMD_DEBUG_START_DTRC)
2062 		bfa_trc_init(bfad->trcmod);
2063 	else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
2064 		bfa_trc_stop(bfad->trcmod);
2065 
2066 	iocmd->status = BFA_STATUS_OK;
2067 	return 0;
2068 }
2069 
2070 static int
2071 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
2072 {
2073 	struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
2074 
2075 	if (iocmd->ctl == BFA_TRUE)
2076 		bfad->plog_buf.plog_enabled = 1;
2077 	else
2078 		bfad->plog_buf.plog_enabled = 0;
2079 
2080 	iocmd->status = BFA_STATUS_OK;
2081 	return 0;
2082 }
2083 
2084 static int
2085 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2086 {
2087 	struct bfa_bsg_fcpim_profile_s *iocmd =
2088 				(struct bfa_bsg_fcpim_profile_s *)cmd;
2089 	unsigned long	flags;
2090 
2091 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2092 	if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
2093 		iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
2094 	else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
2095 		iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
2096 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2097 
2098 	return 0;
2099 }
2100 
2101 static int
2102 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2103 {
2104 	struct bfa_bsg_itnim_ioprofile_s *iocmd =
2105 				(struct bfa_bsg_itnim_ioprofile_s *)cmd;
2106 	struct bfa_fcs_lport_s *fcs_port;
2107 	struct bfa_fcs_itnim_s *itnim;
2108 	unsigned long   flags;
2109 
2110 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2111 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2112 				iocmd->vf_id, iocmd->lpwwn);
2113 	if (!fcs_port)
2114 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
2115 	else {
2116 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2117 		if (itnim == NULL)
2118 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
2119 		else
2120 			iocmd->status = bfa_itnim_get_ioprofile(
2121 						bfa_fcs_itnim_get_halitn(itnim),
2122 						&iocmd->ioprofile);
2123 	}
2124 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2125 	return 0;
2126 }
2127 
2128 static int
2129 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2130 {
2131 	struct bfa_bsg_fcport_stats_s *iocmd =
2132 				(struct bfa_bsg_fcport_stats_s *)cmd;
2133 	struct bfad_hal_comp fcomp;
2134 	unsigned long	flags;
2135 	struct bfa_cb_pending_q_s cb_qe;
2136 
2137 	init_completion(&fcomp.comp);
2138 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2139 			   &fcomp, &iocmd->stats);
2140 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2141 	iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2142 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2143 	if (iocmd->status != BFA_STATUS_OK) {
2144 		bfa_trc(bfad, iocmd->status);
2145 		goto out;
2146 	}
2147 	wait_for_completion(&fcomp.comp);
2148 	iocmd->status = fcomp.status;
2149 out:
2150 	return 0;
2151 }
2152 
2153 static int
2154 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2155 {
2156 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2157 	struct bfad_hal_comp fcomp;
2158 	unsigned long	flags;
2159 	struct bfa_cb_pending_q_s cb_qe;
2160 
2161 	init_completion(&fcomp.comp);
2162 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2163 
2164 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2165 	iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2166 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2167 	if (iocmd->status != BFA_STATUS_OK) {
2168 		bfa_trc(bfad, iocmd->status);
2169 		goto out;
2170 	}
2171 	wait_for_completion(&fcomp.comp);
2172 	iocmd->status = fcomp.status;
2173 out:
2174 	return 0;
2175 }
2176 
2177 static int
2178 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2179 {
2180 	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2181 	struct bfad_hal_comp fcomp;
2182 	unsigned long	flags;
2183 
2184 	init_completion(&fcomp.comp);
2185 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2186 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2187 			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2188 			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2189 			bfad_hcb_comp, &fcomp);
2190 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2191 	if (iocmd->status != BFA_STATUS_OK)
2192 		goto out;
2193 	wait_for_completion(&fcomp.comp);
2194 	iocmd->status = fcomp.status;
2195 out:
2196 	return 0;
2197 }
2198 
2199 static int
2200 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2201 {
2202 	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2203 	struct bfad_hal_comp fcomp;
2204 	unsigned long	flags;
2205 
2206 	init_completion(&fcomp.comp);
2207 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2208 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2209 			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2210 			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2211 			bfad_hcb_comp, &fcomp);
2212 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2213 	if (iocmd->status != BFA_STATUS_OK)
2214 		goto out;
2215 	wait_for_completion(&fcomp.comp);
2216 	iocmd->status = fcomp.status;
2217 out:
2218 	return 0;
2219 }
2220 
2221 static int
2222 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2223 {
2224 	struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2225 	struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2226 	struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2227 	unsigned long	flags;
2228 
2229 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2230 	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2231 	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2232 	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2233 	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2234 	iocmd->status = BFA_STATUS_OK;
2235 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2236 
2237 	return 0;
2238 }
2239 
2240 static int
2241 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2242 {
2243 	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2244 	struct bfad_hal_comp fcomp;
2245 	unsigned long	flags;
2246 
2247 	init_completion(&fcomp.comp);
2248 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2249 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2250 				BFA_FLASH_PART_PXECFG,
2251 				bfad->bfa.ioc.port_id, &iocmd->cfg,
2252 				sizeof(struct bfa_ethboot_cfg_s), 0,
2253 				bfad_hcb_comp, &fcomp);
2254 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2255 	if (iocmd->status != BFA_STATUS_OK)
2256 		goto out;
2257 	wait_for_completion(&fcomp.comp);
2258 	iocmd->status = fcomp.status;
2259 out:
2260 	return 0;
2261 }
2262 
2263 static int
2264 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2265 {
2266 	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2267 	struct bfad_hal_comp fcomp;
2268 	unsigned long	flags;
2269 
2270 	init_completion(&fcomp.comp);
2271 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2272 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2273 				BFA_FLASH_PART_PXECFG,
2274 				bfad->bfa.ioc.port_id, &iocmd->cfg,
2275 				sizeof(struct bfa_ethboot_cfg_s), 0,
2276 				bfad_hcb_comp, &fcomp);
2277 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2278 	if (iocmd->status != BFA_STATUS_OK)
2279 		goto out;
2280 	wait_for_completion(&fcomp.comp);
2281 	iocmd->status = fcomp.status;
2282 out:
2283 	return 0;
2284 }
2285 
2286 static int
2287 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2288 {
2289 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2290 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2291 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2292 	unsigned long	flags;
2293 
2294 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2295 
2296 	if (bfa_fcport_is_dport(&bfad->bfa)) {
2297 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2298 		return BFA_STATUS_DPORT_ERR;
2299 	}
2300 
2301 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2302 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2303 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2304 	else {
2305 		if (v_cmd == IOCMD_TRUNK_ENABLE) {
2306 			trunk->attr.state = BFA_TRUNK_OFFLINE;
2307 			bfa_fcport_disable(&bfad->bfa);
2308 			fcport->cfg.trunked = BFA_TRUE;
2309 		} else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2310 			trunk->attr.state = BFA_TRUNK_DISABLED;
2311 			bfa_fcport_disable(&bfad->bfa);
2312 			fcport->cfg.trunked = BFA_FALSE;
2313 		}
2314 
2315 		if (!bfa_fcport_is_disabled(&bfad->bfa))
2316 			bfa_fcport_enable(&bfad->bfa);
2317 
2318 		iocmd->status = BFA_STATUS_OK;
2319 	}
2320 
2321 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2322 
2323 	return 0;
2324 }
2325 
2326 static int
2327 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2328 {
2329 	struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2330 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2331 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2332 	unsigned long	flags;
2333 
2334 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2335 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2336 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2337 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2338 	else {
2339 		memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2340 			sizeof(struct bfa_trunk_attr_s));
2341 		iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2342 		iocmd->status = BFA_STATUS_OK;
2343 	}
2344 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2345 
2346 	return 0;
2347 }
2348 
2349 static int
2350 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2351 {
2352 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2353 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2354 	unsigned long	flags;
2355 
2356 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2357 	if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2358 		if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2359 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2360 			iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2361 		else {
2362 			if (v_cmd == IOCMD_QOS_ENABLE)
2363 				fcport->cfg.qos_enabled = BFA_TRUE;
2364 			else if (v_cmd == IOCMD_QOS_DISABLE) {
2365 				fcport->cfg.qos_enabled = BFA_FALSE;
2366 				fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
2367 				fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
2368 				fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
2369 			}
2370 		}
2371 	}
2372 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2373 
2374 	return 0;
2375 }
2376 
2377 static int
2378 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2379 {
2380 	struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2381 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2382 	unsigned long	flags;
2383 
2384 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2385 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2386 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2387 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2388 	else {
2389 		iocmd->attr.state = fcport->qos_attr.state;
2390 		iocmd->attr.total_bb_cr =
2391 			be32_to_cpu(fcport->qos_attr.total_bb_cr);
2392 		iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
2393 		iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
2394 		iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
2395 		iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
2396 		iocmd->status = BFA_STATUS_OK;
2397 	}
2398 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2399 
2400 	return 0;
2401 }
2402 
2403 static int
2404 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2405 {
2406 	struct bfa_bsg_qos_vc_attr_s *iocmd =
2407 				(struct bfa_bsg_qos_vc_attr_s *)cmd;
2408 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2409 	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2410 	unsigned long	flags;
2411 	u32	i = 0;
2412 
2413 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2414 	iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2415 	iocmd->attr.shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
2416 	iocmd->attr.elp_opmode_flags  =
2417 				be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2418 
2419 	/* Individual VC info */
2420 	while (i < iocmd->attr.total_vc_count) {
2421 		iocmd->attr.vc_info[i].vc_credit =
2422 				bfa_vc_attr->vc_info[i].vc_credit;
2423 		iocmd->attr.vc_info[i].borrow_credit =
2424 				bfa_vc_attr->vc_info[i].borrow_credit;
2425 		iocmd->attr.vc_info[i].priority =
2426 				bfa_vc_attr->vc_info[i].priority;
2427 		i++;
2428 	}
2429 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2430 
2431 	iocmd->status = BFA_STATUS_OK;
2432 	return 0;
2433 }
2434 
2435 static int
2436 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2437 {
2438 	struct bfa_bsg_fcport_stats_s *iocmd =
2439 				(struct bfa_bsg_fcport_stats_s *)cmd;
2440 	struct bfad_hal_comp fcomp;
2441 	unsigned long	flags;
2442 	struct bfa_cb_pending_q_s cb_qe;
2443 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2444 
2445 	init_completion(&fcomp.comp);
2446 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2447 			   &fcomp, &iocmd->stats);
2448 
2449 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2450 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2451 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2452 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2453 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2454 	else
2455 		iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2456 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2457 	if (iocmd->status != BFA_STATUS_OK) {
2458 		bfa_trc(bfad, iocmd->status);
2459 		goto out;
2460 	}
2461 	wait_for_completion(&fcomp.comp);
2462 	iocmd->status = fcomp.status;
2463 out:
2464 	return 0;
2465 }
2466 
2467 static int
2468 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2469 {
2470 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2471 	struct bfad_hal_comp fcomp;
2472 	unsigned long	flags;
2473 	struct bfa_cb_pending_q_s cb_qe;
2474 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2475 
2476 	init_completion(&fcomp.comp);
2477 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2478 			   &fcomp, NULL);
2479 
2480 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2481 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2482 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2483 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2484 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2485 	else
2486 		iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2487 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2488 	if (iocmd->status != BFA_STATUS_OK) {
2489 		bfa_trc(bfad, iocmd->status);
2490 		goto out;
2491 	}
2492 	wait_for_completion(&fcomp.comp);
2493 	iocmd->status = fcomp.status;
2494 out:
2495 	return 0;
2496 }
2497 
2498 static int
2499 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2500 {
2501 	struct bfa_bsg_vf_stats_s *iocmd =
2502 			(struct bfa_bsg_vf_stats_s *)cmd;
2503 	struct bfa_fcs_fabric_s	*fcs_vf;
2504 	unsigned long	flags;
2505 
2506 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2507 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2508 	if (fcs_vf == NULL) {
2509 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2510 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2511 		goto out;
2512 	}
2513 	memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2514 		sizeof(struct bfa_vf_stats_s));
2515 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2516 	iocmd->status = BFA_STATUS_OK;
2517 out:
2518 	return 0;
2519 }
2520 
2521 static int
2522 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2523 {
2524 	struct bfa_bsg_vf_reset_stats_s *iocmd =
2525 			(struct bfa_bsg_vf_reset_stats_s *)cmd;
2526 	struct bfa_fcs_fabric_s	*fcs_vf;
2527 	unsigned long	flags;
2528 
2529 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2530 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2531 	if (fcs_vf == NULL) {
2532 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2533 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2534 		goto out;
2535 	}
2536 	memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2537 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2538 	iocmd->status = BFA_STATUS_OK;
2539 out:
2540 	return 0;
2541 }
2542 
2543 /*
2544  * Set the SCSI device sdev_bflags - sdev_bflags are used by the
2545  * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
2546  *
2547  * Internally iterates over all the ITNIM's part of the im_port & sets the
2548  * sdev_bflags for the scsi_device associated with LUN #0.
2549  */
2550 static void bfad_reset_sdev_bflags(struct bfad_im_port_s *im_port,
2551 				   int lunmask_cfg)
2552 {
2553 	const u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;
2554 	struct bfad_itnim_s *itnim;
2555 	struct scsi_device *sdev;
2556 	unsigned long flags;
2557 
2558 	spin_lock_irqsave(im_port->shost->host_lock, flags);
2559 	list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) {
2560 		sdev = __scsi_device_lookup(im_port->shost, itnim->channel,
2561 					    itnim->scsi_tgt_id, 0);
2562 		if (sdev) {
2563 			if (lunmask_cfg == BFA_TRUE)
2564 				sdev->sdev_bflags |= scan_flags;
2565 			else
2566 				sdev->sdev_bflags &= ~scan_flags;
2567 		}
2568 	}
2569 	spin_unlock_irqrestore(im_port->shost->host_lock, flags);
2570 }
2571 
2572 /* Function to reset the LUN SCAN mode */
2573 static void
2574 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2575 {
2576 	struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2577 	struct bfad_vport_s *vport = NULL;
2578 
2579 	/* Set the scsi device LUN SCAN flags for base port */
2580 	bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2581 
2582 	/* Set the scsi device LUN SCAN flags for the vports */
2583 	list_for_each_entry(vport, &bfad->vport_list, list_entry)
2584 		bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2585 }
2586 
2587 static int
2588 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2589 {
2590 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2591 	unsigned long	flags;
2592 
2593 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2594 	if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2595 		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2596 		/* Set the LUN Scanning mode to be Sequential scan */
2597 		if (iocmd->status == BFA_STATUS_OK)
2598 			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2599 	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2600 		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2601 		/* Set the LUN Scanning mode to default REPORT_LUNS scan */
2602 		if (iocmd->status == BFA_STATUS_OK)
2603 			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2604 	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2605 		iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2606 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2607 	return 0;
2608 }
2609 
2610 static int
2611 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2612 {
2613 	struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2614 			(struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2615 	struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2616 	unsigned long	flags;
2617 
2618 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2619 	iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2620 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2621 	return 0;
2622 }
2623 
2624 static int
2625 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2626 {
2627 	struct bfa_bsg_fcpim_lunmask_s *iocmd =
2628 				(struct bfa_bsg_fcpim_lunmask_s *)cmd;
2629 	unsigned long	flags;
2630 
2631 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2632 	if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2633 		iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2634 					&iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2635 	else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2636 		iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2637 					iocmd->vf_id, &iocmd->pwwn,
2638 					iocmd->rpwwn, iocmd->lun);
2639 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2640 	return 0;
2641 }
2642 
2643 static int
2644 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
2645 {
2646 	struct bfa_bsg_fcpim_throttle_s *iocmd =
2647 			(struct bfa_bsg_fcpim_throttle_s *)cmd;
2648 	unsigned long   flags;
2649 
2650 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2651 	iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
2652 				(void *)&iocmd->throttle);
2653 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2654 
2655 	return 0;
2656 }
2657 
2658 static int
2659 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
2660 {
2661 	struct bfa_bsg_fcpim_throttle_s *iocmd =
2662 			(struct bfa_bsg_fcpim_throttle_s *)cmd;
2663 	unsigned long	flags;
2664 
2665 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2666 	iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
2667 				iocmd->throttle.cfg_value);
2668 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2669 
2670 	return 0;
2671 }
2672 
2673 static int
2674 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
2675 {
2676 	struct bfa_bsg_tfru_s *iocmd =
2677 			(struct bfa_bsg_tfru_s *)cmd;
2678 	struct bfad_hal_comp fcomp;
2679 	unsigned long flags = 0;
2680 
2681 	init_completion(&fcomp.comp);
2682 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2683 	iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
2684 				&iocmd->data, iocmd->len, iocmd->offset,
2685 				bfad_hcb_comp, &fcomp);
2686 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2687 	if (iocmd->status == BFA_STATUS_OK) {
2688 		wait_for_completion(&fcomp.comp);
2689 		iocmd->status = fcomp.status;
2690 	}
2691 
2692 	return 0;
2693 }
2694 
2695 static int
2696 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
2697 {
2698 	struct bfa_bsg_tfru_s *iocmd =
2699 			(struct bfa_bsg_tfru_s *)cmd;
2700 	struct bfad_hal_comp fcomp;
2701 	unsigned long flags = 0;
2702 
2703 	init_completion(&fcomp.comp);
2704 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2705 	iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
2706 				&iocmd->data, iocmd->len, iocmd->offset,
2707 				bfad_hcb_comp, &fcomp);
2708 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2709 	if (iocmd->status == BFA_STATUS_OK) {
2710 		wait_for_completion(&fcomp.comp);
2711 		iocmd->status = fcomp.status;
2712 	}
2713 
2714 	return 0;
2715 }
2716 
2717 static int
2718 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
2719 {
2720 	struct bfa_bsg_fruvpd_s *iocmd =
2721 			(struct bfa_bsg_fruvpd_s *)cmd;
2722 	struct bfad_hal_comp fcomp;
2723 	unsigned long flags = 0;
2724 
2725 	init_completion(&fcomp.comp);
2726 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2727 	iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
2728 				&iocmd->data, iocmd->len, iocmd->offset,
2729 				bfad_hcb_comp, &fcomp);
2730 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2731 	if (iocmd->status == BFA_STATUS_OK) {
2732 		wait_for_completion(&fcomp.comp);
2733 		iocmd->status = fcomp.status;
2734 	}
2735 
2736 	return 0;
2737 }
2738 
2739 static int
2740 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
2741 {
2742 	struct bfa_bsg_fruvpd_s *iocmd =
2743 			(struct bfa_bsg_fruvpd_s *)cmd;
2744 	struct bfad_hal_comp fcomp;
2745 	unsigned long flags = 0;
2746 
2747 	init_completion(&fcomp.comp);
2748 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2749 	iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
2750 				&iocmd->data, iocmd->len, iocmd->offset,
2751 				bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
2752 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2753 	if (iocmd->status == BFA_STATUS_OK) {
2754 		wait_for_completion(&fcomp.comp);
2755 		iocmd->status = fcomp.status;
2756 	}
2757 
2758 	return 0;
2759 }
2760 
2761 static int
2762 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
2763 {
2764 	struct bfa_bsg_fruvpd_max_size_s *iocmd =
2765 			(struct bfa_bsg_fruvpd_max_size_s *)cmd;
2766 	unsigned long flags = 0;
2767 
2768 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2769 	iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
2770 						&iocmd->max_size);
2771 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2772 
2773 	return 0;
2774 }
2775 
2776 static int
2777 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2778 		unsigned int payload_len)
2779 {
2780 	int rc = -EINVAL;
2781 
2782 	switch (cmd) {
2783 	case IOCMD_IOC_ENABLE:
2784 		rc = bfad_iocmd_ioc_enable(bfad, iocmd);
2785 		break;
2786 	case IOCMD_IOC_DISABLE:
2787 		rc = bfad_iocmd_ioc_disable(bfad, iocmd);
2788 		break;
2789 	case IOCMD_IOC_GET_INFO:
2790 		rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
2791 		break;
2792 	case IOCMD_IOC_GET_ATTR:
2793 		rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
2794 		break;
2795 	case IOCMD_IOC_GET_STATS:
2796 		rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
2797 		break;
2798 	case IOCMD_IOC_GET_FWSTATS:
2799 		rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
2800 		break;
2801 	case IOCMD_IOC_RESET_STATS:
2802 	case IOCMD_IOC_RESET_FWSTATS:
2803 		rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2804 		break;
2805 	case IOCMD_IOC_SET_ADAPTER_NAME:
2806 	case IOCMD_IOC_SET_PORT_NAME:
2807 		rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2808 		break;
2809 	case IOCMD_IOCFC_GET_ATTR:
2810 		rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
2811 		break;
2812 	case IOCMD_IOCFC_SET_INTR:
2813 		rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
2814 		break;
2815 	case IOCMD_PORT_ENABLE:
2816 		rc = bfad_iocmd_port_enable(bfad, iocmd);
2817 		break;
2818 	case IOCMD_PORT_DISABLE:
2819 		rc = bfad_iocmd_port_disable(bfad, iocmd);
2820 		break;
2821 	case IOCMD_PORT_GET_ATTR:
2822 		rc = bfad_iocmd_port_get_attr(bfad, iocmd);
2823 		break;
2824 	case IOCMD_PORT_GET_STATS:
2825 		rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
2826 		break;
2827 	case IOCMD_PORT_RESET_STATS:
2828 		rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2829 		break;
2830 	case IOCMD_PORT_CFG_TOPO:
2831 	case IOCMD_PORT_CFG_SPEED:
2832 	case IOCMD_PORT_CFG_ALPA:
2833 	case IOCMD_PORT_CLR_ALPA:
2834 		rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2835 		break;
2836 	case IOCMD_PORT_CFG_MAXFRSZ:
2837 		rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2838 		break;
2839 	case IOCMD_PORT_BBCR_ENABLE:
2840 	case IOCMD_PORT_BBCR_DISABLE:
2841 		rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
2842 		break;
2843 	case IOCMD_PORT_BBCR_GET_ATTR:
2844 		rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
2845 		break;
2846 	case IOCMD_LPORT_GET_ATTR:
2847 		rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
2848 		break;
2849 	case IOCMD_LPORT_GET_STATS:
2850 		rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
2851 		break;
2852 	case IOCMD_LPORT_RESET_STATS:
2853 		rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2854 		break;
2855 	case IOCMD_LPORT_GET_IOSTATS:
2856 		rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
2857 		break;
2858 	case IOCMD_LPORT_GET_RPORTS:
2859 		rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
2860 		break;
2861 	case IOCMD_RPORT_GET_ATTR:
2862 		rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
2863 		break;
2864 	case IOCMD_RPORT_GET_ADDR:
2865 		rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
2866 		break;
2867 	case IOCMD_RPORT_GET_STATS:
2868 		rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
2869 		break;
2870 	case IOCMD_RPORT_RESET_STATS:
2871 		rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2872 		break;
2873 	case IOCMD_RPORT_SET_SPEED:
2874 		rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2875 		break;
2876 	case IOCMD_VPORT_GET_ATTR:
2877 		rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2878 		break;
2879 	case IOCMD_VPORT_GET_STATS:
2880 		rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2881 		break;
2882 	case IOCMD_VPORT_RESET_STATS:
2883 		rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2884 		break;
2885 	case IOCMD_FABRIC_GET_LPORTS:
2886 		rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
2887 		break;
2888 	case IOCMD_RATELIM_ENABLE:
2889 	case IOCMD_RATELIM_DISABLE:
2890 		rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2891 		break;
2892 	case IOCMD_RATELIM_DEF_SPEED:
2893 		rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2894 		break;
2895 	case IOCMD_FCPIM_FAILOVER:
2896 		rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2897 		break;
2898 	case IOCMD_FCPIM_MODSTATS:
2899 		rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
2900 		break;
2901 	case IOCMD_FCPIM_MODSTATSCLR:
2902 		rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2903 		break;
2904 	case IOCMD_FCPIM_DEL_ITN_STATS:
2905 		rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
2906 		break;
2907 	case IOCMD_ITNIM_GET_ATTR:
2908 		rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
2909 		break;
2910 	case IOCMD_ITNIM_GET_IOSTATS:
2911 		rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
2912 		break;
2913 	case IOCMD_ITNIM_RESET_STATS:
2914 		rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2915 		break;
2916 	case IOCMD_ITNIM_GET_ITNSTATS:
2917 		rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
2918 		break;
2919 	case IOCMD_FCPORT_ENABLE:
2920 		rc = bfad_iocmd_fcport_enable(bfad, iocmd);
2921 		break;
2922 	case IOCMD_FCPORT_DISABLE:
2923 		rc = bfad_iocmd_fcport_disable(bfad, iocmd);
2924 		break;
2925 	case IOCMD_IOC_PCIFN_CFG:
2926 		rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
2927 		break;
2928 	case IOCMD_IOC_FW_SIG_INV:
2929 		rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd);
2930 		break;
2931 	case IOCMD_PCIFN_CREATE:
2932 		rc = bfad_iocmd_pcifn_create(bfad, iocmd);
2933 		break;
2934 	case IOCMD_PCIFN_DELETE:
2935 		rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
2936 		break;
2937 	case IOCMD_PCIFN_BW:
2938 		rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
2939 		break;
2940 	case IOCMD_ADAPTER_CFG_MODE:
2941 		rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
2942 		break;
2943 	case IOCMD_PORT_CFG_MODE:
2944 		rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
2945 		break;
2946 	case IOCMD_FLASH_ENABLE_OPTROM:
2947 	case IOCMD_FLASH_DISABLE_OPTROM:
2948 		rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
2949 		break;
2950 	case IOCMD_FAA_QUERY:
2951 		rc = bfad_iocmd_faa_query(bfad, iocmd);
2952 		break;
2953 	case IOCMD_CEE_GET_ATTR:
2954 		rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
2955 		break;
2956 	case IOCMD_CEE_GET_STATS:
2957 		rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
2958 		break;
2959 	case IOCMD_CEE_RESET_STATS:
2960 		rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
2961 		break;
2962 	case IOCMD_SFP_MEDIA:
2963 		rc = bfad_iocmd_sfp_media(bfad, iocmd);
2964 		 break;
2965 	case IOCMD_SFP_SPEED:
2966 		rc = bfad_iocmd_sfp_speed(bfad, iocmd);
2967 		break;
2968 	case IOCMD_FLASH_GET_ATTR:
2969 		rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
2970 		break;
2971 	case IOCMD_FLASH_ERASE_PART:
2972 		rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
2973 		break;
2974 	case IOCMD_FLASH_UPDATE_PART:
2975 		rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
2976 		break;
2977 	case IOCMD_FLASH_READ_PART:
2978 		rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
2979 		break;
2980 	case IOCMD_DIAG_TEMP:
2981 		rc = bfad_iocmd_diag_temp(bfad, iocmd);
2982 		break;
2983 	case IOCMD_DIAG_MEMTEST:
2984 		rc = bfad_iocmd_diag_memtest(bfad, iocmd);
2985 		break;
2986 	case IOCMD_DIAG_LOOPBACK:
2987 		rc = bfad_iocmd_diag_loopback(bfad, iocmd);
2988 		break;
2989 	case IOCMD_DIAG_FWPING:
2990 		rc = bfad_iocmd_diag_fwping(bfad, iocmd);
2991 		break;
2992 	case IOCMD_DIAG_QUEUETEST:
2993 		rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
2994 		break;
2995 	case IOCMD_DIAG_SFP:
2996 		rc = bfad_iocmd_diag_sfp(bfad, iocmd);
2997 		break;
2998 	case IOCMD_DIAG_LED:
2999 		rc = bfad_iocmd_diag_led(bfad, iocmd);
3000 		break;
3001 	case IOCMD_DIAG_BEACON_LPORT:
3002 		rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
3003 		break;
3004 	case IOCMD_DIAG_LB_STAT:
3005 		rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
3006 		break;
3007 	case IOCMD_DIAG_DPORT_ENABLE:
3008 		rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
3009 		break;
3010 	case IOCMD_DIAG_DPORT_DISABLE:
3011 		rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
3012 		break;
3013 	case IOCMD_DIAG_DPORT_SHOW:
3014 		rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
3015 		break;
3016 	case IOCMD_DIAG_DPORT_START:
3017 		rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
3018 		break;
3019 	case IOCMD_PHY_GET_ATTR:
3020 		rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
3021 		break;
3022 	case IOCMD_PHY_GET_STATS:
3023 		rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
3024 		break;
3025 	case IOCMD_PHY_UPDATE_FW:
3026 		rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
3027 		break;
3028 	case IOCMD_PHY_READ_FW:
3029 		rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
3030 		break;
3031 	case IOCMD_VHBA_QUERY:
3032 		rc = bfad_iocmd_vhba_query(bfad, iocmd);
3033 		break;
3034 	case IOCMD_DEBUG_PORTLOG:
3035 		rc = bfad_iocmd_porglog_get(bfad, iocmd);
3036 		break;
3037 	case IOCMD_DEBUG_FW_CORE:
3038 		rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
3039 		break;
3040 	case IOCMD_DEBUG_FW_STATE_CLR:
3041 	case IOCMD_DEBUG_PORTLOG_CLR:
3042 	case IOCMD_DEBUG_START_DTRC:
3043 	case IOCMD_DEBUG_STOP_DTRC:
3044 		rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
3045 		break;
3046 	case IOCMD_DEBUG_PORTLOG_CTL:
3047 		rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
3048 		break;
3049 	case IOCMD_FCPIM_PROFILE_ON:
3050 	case IOCMD_FCPIM_PROFILE_OFF:
3051 		rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
3052 		break;
3053 	case IOCMD_ITNIM_GET_IOPROFILE:
3054 		rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
3055 		break;
3056 	case IOCMD_FCPORT_GET_STATS:
3057 		rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
3058 		break;
3059 	case IOCMD_FCPORT_RESET_STATS:
3060 		rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
3061 		break;
3062 	case IOCMD_BOOT_CFG:
3063 		rc = bfad_iocmd_boot_cfg(bfad, iocmd);
3064 		break;
3065 	case IOCMD_BOOT_QUERY:
3066 		rc = bfad_iocmd_boot_query(bfad, iocmd);
3067 		break;
3068 	case IOCMD_PREBOOT_QUERY:
3069 		rc = bfad_iocmd_preboot_query(bfad, iocmd);
3070 		break;
3071 	case IOCMD_ETHBOOT_CFG:
3072 		rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
3073 		break;
3074 	case IOCMD_ETHBOOT_QUERY:
3075 		rc = bfad_iocmd_ethboot_query(bfad, iocmd);
3076 		break;
3077 	case IOCMD_TRUNK_ENABLE:
3078 	case IOCMD_TRUNK_DISABLE:
3079 		rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
3080 		break;
3081 	case IOCMD_TRUNK_GET_ATTR:
3082 		rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
3083 		break;
3084 	case IOCMD_QOS_ENABLE:
3085 	case IOCMD_QOS_DISABLE:
3086 		rc = bfad_iocmd_qos(bfad, iocmd, cmd);
3087 		break;
3088 	case IOCMD_QOS_GET_ATTR:
3089 		rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
3090 		break;
3091 	case IOCMD_QOS_GET_VC_ATTR:
3092 		rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
3093 		break;
3094 	case IOCMD_QOS_GET_STATS:
3095 		rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
3096 		break;
3097 	case IOCMD_QOS_RESET_STATS:
3098 		rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
3099 		break;
3100 	case IOCMD_QOS_SET_BW:
3101 		rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
3102 		break;
3103 	case IOCMD_VF_GET_STATS:
3104 		rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
3105 		break;
3106 	case IOCMD_VF_RESET_STATS:
3107 		rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
3108 		break;
3109 	case IOCMD_FCPIM_LUNMASK_ENABLE:
3110 	case IOCMD_FCPIM_LUNMASK_DISABLE:
3111 	case IOCMD_FCPIM_LUNMASK_CLEAR:
3112 		rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
3113 		break;
3114 	case IOCMD_FCPIM_LUNMASK_QUERY:
3115 		rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
3116 		break;
3117 	case IOCMD_FCPIM_LUNMASK_ADD:
3118 	case IOCMD_FCPIM_LUNMASK_DELETE:
3119 		rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
3120 		break;
3121 	case IOCMD_FCPIM_THROTTLE_QUERY:
3122 		rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
3123 		break;
3124 	case IOCMD_FCPIM_THROTTLE_SET:
3125 		rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
3126 		break;
3127 	/* TFRU */
3128 	case IOCMD_TFRU_READ:
3129 		rc = bfad_iocmd_tfru_read(bfad, iocmd);
3130 		break;
3131 	case IOCMD_TFRU_WRITE:
3132 		rc = bfad_iocmd_tfru_write(bfad, iocmd);
3133 		break;
3134 	/* FRU */
3135 	case IOCMD_FRUVPD_READ:
3136 		rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
3137 		break;
3138 	case IOCMD_FRUVPD_UPDATE:
3139 		rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
3140 		break;
3141 	case IOCMD_FRUVPD_GET_MAX_SIZE:
3142 		rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
3143 		break;
3144 	default:
3145 		rc = -EINVAL;
3146 		break;
3147 	}
3148 	return rc;
3149 }
3150 
3151 static int
3152 bfad_im_bsg_vendor_request(struct bsg_job *job)
3153 {
3154 	struct fc_bsg_request *bsg_request = job->request;
3155 	struct fc_bsg_reply *bsg_reply = job->reply;
3156 	uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
3157 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
3158 	struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3159 	struct bfad_s *bfad = im_port->bfad;
3160 	void *payload_kbuf;
3161 	int rc = -EINVAL;
3162 
3163 	/* Allocate a temp buffer to hold the passed in user space command */
3164 	payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3165 	if (!payload_kbuf) {
3166 		rc = -ENOMEM;
3167 		goto out;
3168 	}
3169 
3170 	/* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3171 	sg_copy_to_buffer(job->request_payload.sg_list,
3172 			  job->request_payload.sg_cnt, payload_kbuf,
3173 			  job->request_payload.payload_len);
3174 
3175 	/* Invoke IOCMD handler - to handle all the vendor command requests */
3176 	rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
3177 				job->request_payload.payload_len);
3178 	if (rc != BFA_STATUS_OK)
3179 		goto error;
3180 
3181 	/* Copy the response data to the job->reply_payload sg_list */
3182 	sg_copy_from_buffer(job->reply_payload.sg_list,
3183 			    job->reply_payload.sg_cnt,
3184 			    payload_kbuf,
3185 			    job->reply_payload.payload_len);
3186 
3187 	/* free the command buffer */
3188 	kfree(payload_kbuf);
3189 
3190 	/* Fill the BSG job reply data */
3191 	job->reply_len = job->reply_payload.payload_len;
3192 	bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
3193 	bsg_reply->result = rc;
3194 
3195 	bsg_job_done(job, bsg_reply->result,
3196 		       bsg_reply->reply_payload_rcv_len);
3197 	return rc;
3198 error:
3199 	/* free the command buffer */
3200 	kfree(payload_kbuf);
3201 out:
3202 	bsg_reply->result = rc;
3203 	job->reply_len = sizeof(uint32_t);
3204 	bsg_reply->reply_payload_rcv_len = 0;
3205 	return rc;
3206 }
3207 
3208 /* FC passthru call backs */
3209 static u64
3210 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
3211 {
3212 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3213 	struct bfa_sge_s  *sge;
3214 	u64	addr;
3215 
3216 	sge = drv_fcxp->req_sge + sgeid;
3217 	addr = (u64)(size_t) sge->sg_addr;
3218 	return addr;
3219 }
3220 
3221 static u32
3222 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
3223 {
3224 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3225 	struct bfa_sge_s	*sge;
3226 
3227 	sge = drv_fcxp->req_sge + sgeid;
3228 	return sge->sg_len;
3229 }
3230 
3231 static u64
3232 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
3233 {
3234 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3235 	struct bfa_sge_s	*sge;
3236 	u64	addr;
3237 
3238 	sge = drv_fcxp->rsp_sge + sgeid;
3239 	addr = (u64)(size_t) sge->sg_addr;
3240 	return addr;
3241 }
3242 
3243 static u32
3244 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
3245 {
3246 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3247 	struct bfa_sge_s	*sge;
3248 
3249 	sge = drv_fcxp->rsp_sge + sgeid;
3250 	return sge->sg_len;
3251 }
3252 
3253 static void
3254 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
3255 		bfa_status_t req_status, u32 rsp_len, u32 resid_len,
3256 		struct fchs_s *rsp_fchs)
3257 {
3258 	struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3259 
3260 	drv_fcxp->req_status = req_status;
3261 	drv_fcxp->rsp_len = rsp_len;
3262 
3263 	/* bfa_fcxp will be automatically freed by BFA */
3264 	drv_fcxp->bfa_fcxp = NULL;
3265 	complete(&drv_fcxp->comp);
3266 }
3267 
3268 static struct bfad_buf_info *
3269 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3270 		 uint32_t payload_len, uint32_t *num_sgles)
3271 {
3272 	struct bfad_buf_info	*buf_base, *buf_info;
3273 	struct bfa_sge_s	*sg_table;
3274 	int sge_num = 1;
3275 
3276 	buf_base = kcalloc(sizeof(struct bfad_buf_info) +
3277 				sizeof(struct bfa_sge_s),
3278 			   sge_num, GFP_KERNEL);
3279 	if (!buf_base)
3280 		return NULL;
3281 
3282 	sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
3283 			(sizeof(struct bfad_buf_info) * sge_num));
3284 
3285 	/* Allocate dma coherent memory */
3286 	buf_info = buf_base;
3287 	buf_info->size = payload_len;
3288 	buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev,
3289 					    buf_info->size, &buf_info->phys,
3290 					    GFP_KERNEL);
3291 	if (!buf_info->virt)
3292 		goto out_free_mem;
3293 
3294 	/* copy the linear bsg buffer to buf_info */
3295 	memcpy(buf_info->virt, payload_kbuf, buf_info->size);
3296 
3297 	/*
3298 	 * Setup SG table
3299 	 */
3300 	sg_table->sg_len = buf_info->size;
3301 	sg_table->sg_addr = (void *)(size_t) buf_info->phys;
3302 
3303 	*num_sgles = sge_num;
3304 
3305 	return buf_base;
3306 
3307 out_free_mem:
3308 	kfree(buf_base);
3309 	return NULL;
3310 }
3311 
3312 static void
3313 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
3314 		   uint32_t num_sgles)
3315 {
3316 	int i;
3317 	struct bfad_buf_info *buf_info = buf_base;
3318 
3319 	if (buf_base) {
3320 		for (i = 0; i < num_sgles; buf_info++, i++) {
3321 			if (buf_info->virt != NULL)
3322 				dma_free_coherent(&bfad->pcidev->dev,
3323 					buf_info->size, buf_info->virt,
3324 					buf_info->phys);
3325 		}
3326 		kfree(buf_base);
3327 	}
3328 }
3329 
3330 static int
3331 bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
3332 		   bfa_bsg_fcpt_t *bsg_fcpt)
3333 {
3334 	struct bfa_fcxp_s *hal_fcxp;
3335 	struct bfad_s	*bfad = drv_fcxp->port->bfad;
3336 	unsigned long	flags;
3337 	uint8_t	lp_tag;
3338 
3339 	spin_lock_irqsave(&bfad->bfad_lock, flags);
3340 
3341 	/* Allocate bfa_fcxp structure */
3342 	hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
3343 				  drv_fcxp->num_req_sgles,
3344 				  drv_fcxp->num_rsp_sgles,
3345 				  bfad_fcxp_get_req_sgaddr_cb,
3346 				  bfad_fcxp_get_req_sglen_cb,
3347 				  bfad_fcxp_get_rsp_sgaddr_cb,
3348 				  bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
3349 	if (!hal_fcxp) {
3350 		bfa_trc(bfad, 0);
3351 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3352 		return BFA_STATUS_ENOMEM;
3353 	}
3354 
3355 	drv_fcxp->bfa_fcxp = hal_fcxp;
3356 
3357 	lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
3358 
3359 	bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
3360 		      bsg_fcpt->cts, bsg_fcpt->cos,
3361 		      job->request_payload.payload_len,
3362 		      &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
3363 		      job->reply_payload.payload_len, bsg_fcpt->tsecs);
3364 
3365 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3366 
3367 	return BFA_STATUS_OK;
3368 }
3369 
3370 static int
3371 bfad_im_bsg_els_ct_request(struct bsg_job *job)
3372 {
3373 	struct bfa_bsg_data *bsg_data;
3374 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
3375 	struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3376 	struct bfad_s *bfad = im_port->bfad;
3377 	bfa_bsg_fcpt_t *bsg_fcpt;
3378 	struct bfad_fcxp    *drv_fcxp;
3379 	struct bfa_fcs_lport_s *fcs_port;
3380 	struct bfa_fcs_rport_s *fcs_rport;
3381 	struct fc_bsg_request *bsg_request = job->request;
3382 	struct fc_bsg_reply *bsg_reply = job->reply;
3383 	uint32_t command_type = bsg_request->msgcode;
3384 	unsigned long flags;
3385 	struct bfad_buf_info *rsp_buf_info;
3386 	void *req_kbuf = NULL, *rsp_kbuf = NULL;
3387 	int rc = -EINVAL;
3388 
3389 	job->reply_len  = sizeof(uint32_t);	/* Atleast uint32_t reply_len */
3390 	bsg_reply->reply_payload_rcv_len = 0;
3391 
3392 	/* Get the payload passed in from userspace */
3393 	bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
3394 					    sizeof(struct fc_bsg_request));
3395 	if (bsg_data == NULL)
3396 		goto out;
3397 
3398 	/*
3399 	 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3400 	 * buffer of size bsg_data->payload_len
3401 	 */
3402 	bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3403 	if (!bsg_fcpt) {
3404 		rc = -ENOMEM;
3405 		goto out;
3406 	}
3407 
3408 	if (copy_from_user((uint8_t *)bsg_fcpt,
3409 				(void *)(unsigned long)bsg_data->payload,
3410 				bsg_data->payload_len)) {
3411 		kfree(bsg_fcpt);
3412 		rc = -EIO;
3413 		goto out;
3414 	}
3415 
3416 	drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
3417 	if (drv_fcxp == NULL) {
3418 		kfree(bsg_fcpt);
3419 		rc = -ENOMEM;
3420 		goto out;
3421 	}
3422 
3423 	spin_lock_irqsave(&bfad->bfad_lock, flags);
3424 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
3425 					bsg_fcpt->lpwwn);
3426 	if (fcs_port == NULL) {
3427 		bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
3428 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3429 		goto out_free_mem;
3430 	}
3431 
3432 	/* Check if the port is online before sending FC Passthru cmd */
3433 	if (!bfa_fcs_lport_is_online(fcs_port)) {
3434 		bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
3435 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3436 		goto out_free_mem;
3437 	}
3438 
3439 	drv_fcxp->port = fcs_port->bfad_port;
3440 
3441 	if (!drv_fcxp->port->bfad)
3442 		drv_fcxp->port->bfad = bfad;
3443 
3444 	/* Fetch the bfa_rport - if nexus needed */
3445 	if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
3446 	    command_type == FC_BSG_HST_CT) {
3447 		/* BSG HST commands: no nexus needed */
3448 		drv_fcxp->bfa_rport = NULL;
3449 
3450 	} else if (command_type == FC_BSG_RPT_ELS ||
3451 		   command_type == FC_BSG_RPT_CT) {
3452 		/* BSG RPT commands: nexus needed */
3453 		fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
3454 							    bsg_fcpt->dpwwn);
3455 		if (fcs_rport == NULL) {
3456 			bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
3457 			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3458 			goto out_free_mem;
3459 		}
3460 
3461 		drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
3462 
3463 	} else { /* Unknown BSG msgcode; return -EINVAL */
3464 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3465 		goto out_free_mem;
3466 	}
3467 
3468 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3469 
3470 	/* allocate memory for req / rsp buffers */
3471 	req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3472 	if (!req_kbuf) {
3473 		printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
3474 				bfad->pci_name);
3475 		rc = -ENOMEM;
3476 		goto out_free_mem;
3477 	}
3478 
3479 	rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
3480 	if (!rsp_kbuf) {
3481 		printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
3482 				bfad->pci_name);
3483 		rc = -ENOMEM;
3484 		goto out_free_mem;
3485 	}
3486 
3487 	/* map req sg - copy the sg_list passed in to the linear buffer */
3488 	sg_copy_to_buffer(job->request_payload.sg_list,
3489 			  job->request_payload.sg_cnt, req_kbuf,
3490 			  job->request_payload.payload_len);
3491 
3492 	drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
3493 					job->request_payload.payload_len,
3494 					&drv_fcxp->num_req_sgles);
3495 	if (!drv_fcxp->reqbuf_info) {
3496 		printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
3497 				bfad->pci_name);
3498 		rc = -ENOMEM;
3499 		goto out_free_mem;
3500 	}
3501 
3502 	drv_fcxp->req_sge = (struct bfa_sge_s *)
3503 			    (((uint8_t *)drv_fcxp->reqbuf_info) +
3504 			    (sizeof(struct bfad_buf_info) *
3505 					drv_fcxp->num_req_sgles));
3506 
3507 	/* map rsp sg */
3508 	drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
3509 					job->reply_payload.payload_len,
3510 					&drv_fcxp->num_rsp_sgles);
3511 	if (!drv_fcxp->rspbuf_info) {
3512 		printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
3513 				bfad->pci_name);
3514 		rc = -ENOMEM;
3515 		goto out_free_mem;
3516 	}
3517 
3518 	rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
3519 	drv_fcxp->rsp_sge = (struct bfa_sge_s  *)
3520 			    (((uint8_t *)drv_fcxp->rspbuf_info) +
3521 			    (sizeof(struct bfad_buf_info) *
3522 					drv_fcxp->num_rsp_sgles));
3523 
3524 	/* fcxp send */
3525 	init_completion(&drv_fcxp->comp);
3526 	rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
3527 	if (rc == BFA_STATUS_OK) {
3528 		wait_for_completion(&drv_fcxp->comp);
3529 		bsg_fcpt->status = drv_fcxp->req_status;
3530 	} else {
3531 		bsg_fcpt->status = rc;
3532 		goto out_free_mem;
3533 	}
3534 
3535 	/* fill the job->reply data */
3536 	if (drv_fcxp->req_status == BFA_STATUS_OK) {
3537 		job->reply_len = drv_fcxp->rsp_len;
3538 		bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
3539 		bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
3540 	} else {
3541 		bsg_reply->reply_payload_rcv_len =
3542 					sizeof(struct fc_bsg_ctels_reply);
3543 		job->reply_len = sizeof(uint32_t);
3544 		bsg_reply->reply_data.ctels_reply.status =
3545 						FC_CTELS_STATUS_REJECT;
3546 	}
3547 
3548 	/* Copy the response data to the reply_payload sg list */
3549 	sg_copy_from_buffer(job->reply_payload.sg_list,
3550 			    job->reply_payload.sg_cnt,
3551 			    (uint8_t *)rsp_buf_info->virt,
3552 			    job->reply_payload.payload_len);
3553 
3554 out_free_mem:
3555 	bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
3556 			   drv_fcxp->num_rsp_sgles);
3557 	bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
3558 			   drv_fcxp->num_req_sgles);
3559 	kfree(req_kbuf);
3560 	kfree(rsp_kbuf);
3561 
3562 	/* Need a copy to user op */
3563 	if (copy_to_user((void *)(unsigned long)bsg_data->payload,
3564 			(void *)bsg_fcpt, bsg_data->payload_len))
3565 		rc = -EIO;
3566 
3567 	kfree(bsg_fcpt);
3568 	kfree(drv_fcxp);
3569 out:
3570 	bsg_reply->result = rc;
3571 
3572 	if (rc == BFA_STATUS_OK)
3573 		bsg_job_done(job, bsg_reply->result,
3574 			       bsg_reply->reply_payload_rcv_len);
3575 
3576 	return rc;
3577 }
3578 
3579 int
3580 bfad_im_bsg_request(struct bsg_job *job)
3581 {
3582 	struct fc_bsg_request *bsg_request = job->request;
3583 	struct fc_bsg_reply *bsg_reply = job->reply;
3584 	uint32_t rc = BFA_STATUS_OK;
3585 
3586 	switch (bsg_request->msgcode) {
3587 	case FC_BSG_HST_VENDOR:
3588 		/* Process BSG HST Vendor requests */
3589 		rc = bfad_im_bsg_vendor_request(job);
3590 		break;
3591 	case FC_BSG_HST_ELS_NOLOGIN:
3592 	case FC_BSG_RPT_ELS:
3593 	case FC_BSG_HST_CT:
3594 	case FC_BSG_RPT_CT:
3595 		/* Process BSG ELS/CT commands */
3596 		rc = bfad_im_bsg_els_ct_request(job);
3597 		break;
3598 	default:
3599 		bsg_reply->result = rc = -EINVAL;
3600 		bsg_reply->reply_payload_rcv_len = 0;
3601 		break;
3602 	}
3603 
3604 	return rc;
3605 }
3606 
3607 int
3608 bfad_im_bsg_timeout(struct bsg_job *job)
3609 {
3610 	/* Don't complete the BSG job request - return -EAGAIN
3611 	 * to reset bsg job timeout : for ELS/CT pass thru we
3612 	 * already have timer to track the request.
3613 	 */
3614 	return -EAGAIN;
3615 }
3616