xref: /linux/drivers/scsi/lpfc/lpfc_bsg.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2009-2012 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  *                                                                 *
8  * This program is free software; you can redistribute it and/or   *
9  * modify it under the terms of version 2 of the GNU General       *
10  * Public License as published by the Free Software Foundation.    *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17  * more details, a copy of which can be found in the file COPYING  *
18  * included with this package.                                     *
19  *******************************************************************/
20 
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/list.h>
27 
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include <scsi/fc/fc_fs.h>
33 
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_bsg.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
42 #include "lpfc.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_debugfs.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_version.h"
48 
49 struct lpfc_bsg_event {
50 	struct list_head node;
51 	struct kref kref;
52 	wait_queue_head_t wq;
53 
54 	/* Event type and waiter identifiers */
55 	uint32_t type_mask;
56 	uint32_t req_id;
57 	uint32_t reg_id;
58 
59 	/* next two flags are here for the auto-delete logic */
60 	unsigned long wait_time_stamp;
61 	int waiting;
62 
63 	/* seen and not seen events */
64 	struct list_head events_to_get;
65 	struct list_head events_to_see;
66 
67 	/* job waiting for this event to finish */
68 	struct fc_bsg_job *set_job;
69 };
70 
71 struct lpfc_bsg_iocb {
72 	struct lpfc_iocbq *cmdiocbq;
73 	struct lpfc_iocbq *rspiocbq;
74 	struct lpfc_dmabuf *bmp;
75 	struct lpfc_nodelist *ndlp;
76 
77 	/* job waiting for this iocb to finish */
78 	struct fc_bsg_job *set_job;
79 };
80 
81 struct lpfc_bsg_mbox {
82 	LPFC_MBOXQ_t *pmboxq;
83 	MAILBOX_t *mb;
84 	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85 	uint8_t *ext; /* extended mailbox data */
86 	uint32_t mbOffset; /* from app */
87 	uint32_t inExtWLen; /* from app */
88 	uint32_t outExtWLen; /* from app */
89 
90 	/* job waiting for this mbox command to finish */
91 	struct fc_bsg_job *set_job;
92 };
93 
94 #define MENLO_DID 0x0000FC0E
95 
96 struct lpfc_bsg_menlo {
97 	struct lpfc_iocbq *cmdiocbq;
98 	struct lpfc_iocbq *rspiocbq;
99 	struct lpfc_dmabuf *bmp;
100 
101 	/* job waiting for this iocb to finish */
102 	struct fc_bsg_job *set_job;
103 };
104 
105 #define TYPE_EVT 	1
106 #define TYPE_IOCB	2
107 #define TYPE_MBOX	3
108 #define TYPE_MENLO	4
109 struct bsg_job_data {
110 	uint32_t type;
111 	union {
112 		struct lpfc_bsg_event *evt;
113 		struct lpfc_bsg_iocb iocb;
114 		struct lpfc_bsg_mbox mbox;
115 		struct lpfc_bsg_menlo menlo;
116 	} context_un;
117 };
118 
119 struct event_data {
120 	struct list_head node;
121 	uint32_t type;
122 	uint32_t immed_dat;
123 	void *data;
124 	uint32_t len;
125 };
126 
127 #define BUF_SZ_4K 4096
128 #define SLI_CT_ELX_LOOPBACK 0x10
129 
130 enum ELX_LOOPBACK_CMD {
131 	ELX_LOOPBACK_XRI_SETUP,
132 	ELX_LOOPBACK_DATA,
133 };
134 
135 #define ELX_LOOPBACK_HEADER_SZ \
136 	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
137 
138 struct lpfc_dmabufext {
139 	struct lpfc_dmabuf dma;
140 	uint32_t size;
141 	uint32_t flag;
142 };
143 
144 /**
145  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
146  * @phba: Pointer to HBA context object.
147  * @cmdiocbq: Pointer to command iocb.
148  * @rspiocbq: Pointer to response iocb.
149  *
150  * This function is the completion handler for iocbs issued using
151  * lpfc_bsg_send_mgmt_cmd function. This function is called by the
152  * ring event handler function without any lock held. This function
153  * can be called from both worker thread context and interrupt
154  * context. This function also can be called from another thread which
155  * cleans up the SLI layer objects.
156  * This function copies the contents of the response iocb to the
157  * response iocb memory object provided by the caller of
158  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
159  * sleeps for the iocb completion.
160  **/
161 static void
162 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
163 			struct lpfc_iocbq *cmdiocbq,
164 			struct lpfc_iocbq *rspiocbq)
165 {
166 	struct bsg_job_data *dd_data;
167 	struct fc_bsg_job *job;
168 	IOCB_t *rsp;
169 	struct lpfc_dmabuf *bmp;
170 	struct lpfc_nodelist *ndlp;
171 	struct lpfc_bsg_iocb *iocb;
172 	unsigned long flags;
173 	int rc = 0;
174 
175 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
176 	dd_data = cmdiocbq->context2;
177 	if (!dd_data) {
178 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
179 		lpfc_sli_release_iocbq(phba, cmdiocbq);
180 		return;
181 	}
182 
183 	iocb = &dd_data->context_un.iocb;
184 	job = iocb->set_job;
185 	job->dd_data = NULL; /* so timeout handler does not reply */
186 
187 	bmp = iocb->bmp;
188 	rsp = &rspiocbq->iocb;
189 	ndlp = cmdiocbq->context1;
190 
191 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
192 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
193 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
194 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
195 
196 	if (rsp->ulpStatus) {
197 		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
198 			switch (rsp->un.ulpWord[4] & 0xff) {
199 			case IOERR_SEQUENCE_TIMEOUT:
200 				rc = -ETIMEDOUT;
201 				break;
202 			case IOERR_INVALID_RPI:
203 				rc = -EFAULT;
204 				break;
205 			default:
206 				rc = -EACCES;
207 				break;
208 			}
209 		} else
210 			rc = -EACCES;
211 	} else
212 		job->reply->reply_payload_rcv_len =
213 			rsp->un.genreq64.bdl.bdeSize;
214 
215 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
216 	lpfc_sli_release_iocbq(phba, cmdiocbq);
217 	lpfc_nlp_put(ndlp);
218 	kfree(bmp);
219 	kfree(dd_data);
220 	/* make error code available to userspace */
221 	job->reply->result = rc;
222 	/* complete the job back to userspace */
223 	job->job_done(job);
224 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
225 	return;
226 }
227 
228 /**
229  * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
230  * @job: fc_bsg_job to handle
231  **/
232 static int
233 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
234 {
235 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
236 	struct lpfc_hba *phba = vport->phba;
237 	struct lpfc_rport_data *rdata = job->rport->dd_data;
238 	struct lpfc_nodelist *ndlp = rdata->pnode;
239 	struct ulp_bde64 *bpl = NULL;
240 	uint32_t timeout;
241 	struct lpfc_iocbq *cmdiocbq = NULL;
242 	IOCB_t *cmd;
243 	struct lpfc_dmabuf *bmp = NULL;
244 	int request_nseg;
245 	int reply_nseg;
246 	struct scatterlist *sgel = NULL;
247 	int numbde;
248 	dma_addr_t busaddr;
249 	struct bsg_job_data *dd_data;
250 	uint32_t creg_val;
251 	int rc = 0;
252 	int iocb_stat;
253 
254 	/* in case no data is transferred */
255 	job->reply->reply_payload_rcv_len = 0;
256 
257 	/* allocate our bsg tracking structure */
258 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
259 	if (!dd_data) {
260 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
261 				"2733 Failed allocation of dd_data\n");
262 		rc = -ENOMEM;
263 		goto no_dd_data;
264 	}
265 
266 	if (!lpfc_nlp_get(ndlp)) {
267 		rc = -ENODEV;
268 		goto no_ndlp;
269 	}
270 
271 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
272 	if (!bmp) {
273 		rc = -ENOMEM;
274 		goto free_ndlp;
275 	}
276 
277 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
278 		rc = -ENODEV;
279 		goto free_bmp;
280 	}
281 
282 	cmdiocbq = lpfc_sli_get_iocbq(phba);
283 	if (!cmdiocbq) {
284 		rc = -ENOMEM;
285 		goto free_bmp;
286 	}
287 
288 	cmd = &cmdiocbq->iocb;
289 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
290 	if (!bmp->virt) {
291 		rc = -ENOMEM;
292 		goto free_cmdiocbq;
293 	}
294 
295 	INIT_LIST_HEAD(&bmp->list);
296 	bpl = (struct ulp_bde64 *) bmp->virt;
297 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
298 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
299 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
300 		busaddr = sg_dma_address(sgel);
301 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
302 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
303 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
304 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
305 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
306 		bpl++;
307 	}
308 
309 	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
310 				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
311 	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
312 		busaddr = sg_dma_address(sgel);
313 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
314 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
315 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
316 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
317 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
318 		bpl++;
319 	}
320 
321 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
322 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
323 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
324 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
325 	cmd->un.genreq64.bdl.bdeSize =
326 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327 	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
328 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
329 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
330 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
331 	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
332 	cmd->ulpBdeCount = 1;
333 	cmd->ulpLe = 1;
334 	cmd->ulpClass = CLASS3;
335 	cmd->ulpContext = ndlp->nlp_rpi;
336 	if (phba->sli_rev == LPFC_SLI_REV4)
337 		cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
338 	cmd->ulpOwner = OWN_CHIP;
339 	cmdiocbq->vport = phba->pport;
340 	cmdiocbq->context3 = bmp;
341 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
342 	timeout = phba->fc_ratov * 2;
343 	cmd->ulpTimeout = timeout;
344 
345 	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
346 	cmdiocbq->context1 = ndlp;
347 	cmdiocbq->context2 = dd_data;
348 	dd_data->type = TYPE_IOCB;
349 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
350 	dd_data->context_un.iocb.set_job = job;
351 	dd_data->context_un.iocb.bmp = bmp;
352 
353 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
354 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
355 			rc = -EIO ;
356 			goto free_cmdiocbq;
357 		}
358 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
359 		writel(creg_val, phba->HCregaddr);
360 		readl(phba->HCregaddr); /* flush */
361 	}
362 
363 	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
364 	if (iocb_stat == IOCB_SUCCESS)
365 		return 0; /* done for now */
366 	else if (iocb_stat == IOCB_BUSY)
367 		rc = -EAGAIN;
368 	else
369 		rc = -EIO;
370 
371 
372 	/* iocb failed so cleanup */
373 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
374 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
375 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
376 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 
378 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
379 
380 free_cmdiocbq:
381 	lpfc_sli_release_iocbq(phba, cmdiocbq);
382 free_bmp:
383 	kfree(bmp);
384 free_ndlp:
385 	lpfc_nlp_put(ndlp);
386 no_ndlp:
387 	kfree(dd_data);
388 no_dd_data:
389 	/* make error code available to userspace */
390 	job->reply->result = rc;
391 	job->dd_data = NULL;
392 	return rc;
393 }
394 
395 /**
396  * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
397  * @phba: Pointer to HBA context object.
398  * @cmdiocbq: Pointer to command iocb.
399  * @rspiocbq: Pointer to response iocb.
400  *
401  * This function is the completion handler for iocbs issued using
402  * lpfc_bsg_rport_els_cmp function. This function is called by the
403  * ring event handler function without any lock held. This function
404  * can be called from both worker thread context and interrupt
405  * context. This function also can be called from other thread which
406  * cleans up the SLI layer objects.
407  * This function copies the contents of the response iocb to the
408  * response iocb memory object provided by the caller of
409  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
410  * sleeps for the iocb completion.
411  **/
412 static void
413 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
414 			struct lpfc_iocbq *cmdiocbq,
415 			struct lpfc_iocbq *rspiocbq)
416 {
417 	struct bsg_job_data *dd_data;
418 	struct fc_bsg_job *job;
419 	IOCB_t *rsp;
420 	struct lpfc_nodelist *ndlp;
421 	struct lpfc_dmabuf *pbuflist = NULL;
422 	struct fc_bsg_ctels_reply *els_reply;
423 	uint8_t *rjt_data;
424 	unsigned long flags;
425 	int rc = 0;
426 
427 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
428 	dd_data = cmdiocbq->context1;
429 	/* normal completion and timeout crossed paths, already done */
430 	if (!dd_data) {
431 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
432 		return;
433 	}
434 
435 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
436 	if (cmdiocbq->context2 && rspiocbq)
437 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
438 		       &rspiocbq->iocb, sizeof(IOCB_t));
439 
440 	job = dd_data->context_un.iocb.set_job;
441 	cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
442 	rspiocbq = dd_data->context_un.iocb.rspiocbq;
443 	rsp = &rspiocbq->iocb;
444 	ndlp = dd_data->context_un.iocb.ndlp;
445 
446 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
447 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
448 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
449 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
450 
451 	if (job->reply->result == -EAGAIN)
452 		rc = -EAGAIN;
453 	else if (rsp->ulpStatus == IOSTAT_SUCCESS)
454 		job->reply->reply_payload_rcv_len =
455 			rsp->un.elsreq64.bdl.bdeSize;
456 	else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
457 		job->reply->reply_payload_rcv_len =
458 			sizeof(struct fc_bsg_ctels_reply);
459 		/* LS_RJT data returned in word 4 */
460 		rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
461 		els_reply = &job->reply->reply_data.ctels_reply;
462 		els_reply->status = FC_CTELS_STATUS_REJECT;
463 		els_reply->rjt_data.action = rjt_data[3];
464 		els_reply->rjt_data.reason_code = rjt_data[2];
465 		els_reply->rjt_data.reason_explanation = rjt_data[1];
466 		els_reply->rjt_data.vendor_unique = rjt_data[0];
467 	} else
468 		rc = -EIO;
469 
470 	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
471 	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
472 	lpfc_sli_release_iocbq(phba, rspiocbq);
473 	lpfc_sli_release_iocbq(phba, cmdiocbq);
474 	lpfc_nlp_put(ndlp);
475 	kfree(dd_data);
476 	/* make error code available to userspace */
477 	job->reply->result = rc;
478 	job->dd_data = NULL;
479 	/* complete the job back to userspace */
480 	job->job_done(job);
481 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
482 	return;
483 }
484 
485 /**
486  * lpfc_bsg_rport_els - send an ELS command from a bsg request
487  * @job: fc_bsg_job to handle
488  **/
489 static int
490 lpfc_bsg_rport_els(struct fc_bsg_job *job)
491 {
492 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
493 	struct lpfc_hba *phba = vport->phba;
494 	struct lpfc_rport_data *rdata = job->rport->dd_data;
495 	struct lpfc_nodelist *ndlp = rdata->pnode;
496 	uint32_t elscmd;
497 	uint32_t cmdsize;
498 	uint32_t rspsize;
499 	struct lpfc_iocbq *rspiocbq;
500 	struct lpfc_iocbq *cmdiocbq;
501 	IOCB_t *rsp;
502 	uint16_t rpi = 0;
503 	struct lpfc_dmabuf *pcmd;
504 	struct lpfc_dmabuf *prsp;
505 	struct lpfc_dmabuf *pbuflist = NULL;
506 	struct ulp_bde64 *bpl;
507 	int request_nseg;
508 	int reply_nseg;
509 	struct scatterlist *sgel = NULL;
510 	int numbde;
511 	dma_addr_t busaddr;
512 	struct bsg_job_data *dd_data;
513 	uint32_t creg_val;
514 	int rc = 0;
515 
516 	/* in case no data is transferred */
517 	job->reply->reply_payload_rcv_len = 0;
518 
519 	/* allocate our bsg tracking structure */
520 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
521 	if (!dd_data) {
522 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
523 				"2735 Failed allocation of dd_data\n");
524 		rc = -ENOMEM;
525 		goto no_dd_data;
526 	}
527 
528 	if (!lpfc_nlp_get(ndlp)) {
529 		rc = -ENODEV;
530 		goto free_dd_data;
531 	}
532 
533 	elscmd = job->request->rqst_data.r_els.els_code;
534 	cmdsize = job->request_payload.payload_len;
535 	rspsize = job->reply_payload.payload_len;
536 	rspiocbq = lpfc_sli_get_iocbq(phba);
537 	if (!rspiocbq) {
538 		lpfc_nlp_put(ndlp);
539 		rc = -ENOMEM;
540 		goto free_dd_data;
541 	}
542 
543 	rsp = &rspiocbq->iocb;
544 	rpi = ndlp->nlp_rpi;
545 
546 	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
547 				      ndlp->nlp_DID, elscmd);
548 	if (!cmdiocbq) {
549 		rc = -EIO;
550 		goto free_rspiocbq;
551 	}
552 
553 	/* prep els iocb set context1 to the ndlp, context2 to the command
554 	 * dmabuf, context3 holds the data dmabuf
555 	 */
556 	pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
557 	prsp = (struct lpfc_dmabuf *) pcmd->list.next;
558 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
559 	kfree(pcmd);
560 	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
561 	kfree(prsp);
562 	cmdiocbq->context2 = NULL;
563 
564 	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
565 	bpl = (struct ulp_bde64 *) pbuflist->virt;
566 
567 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
568 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
569 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
570 		busaddr = sg_dma_address(sgel);
571 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
572 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
573 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
574 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
575 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
576 		bpl++;
577 	}
578 
579 	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
580 				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
581 	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
582 		busaddr = sg_dma_address(sgel);
583 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
584 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
585 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
586 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
587 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
588 		bpl++;
589 	}
590 	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
591 		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
592 	if (phba->sli_rev == LPFC_SLI_REV4)
593 		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
594 	else
595 		cmdiocbq->iocb.ulpContext = rpi;
596 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
597 	cmdiocbq->context1 = NULL;
598 	cmdiocbq->context2 = NULL;
599 
600 	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
601 	cmdiocbq->context1 = dd_data;
602 	cmdiocbq->context_un.ndlp = ndlp;
603 	cmdiocbq->context2 = rspiocbq;
604 	dd_data->type = TYPE_IOCB;
605 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
606 	dd_data->context_un.iocb.rspiocbq = rspiocbq;
607 	dd_data->context_un.iocb.set_job = job;
608 	dd_data->context_un.iocb.bmp = NULL;
609 	dd_data->context_un.iocb.ndlp = ndlp;
610 
611 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
612 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
613 			rc = -EIO;
614 			goto linkdown_err;
615 		}
616 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
617 		writel(creg_val, phba->HCregaddr);
618 		readl(phba->HCregaddr); /* flush */
619 	}
620 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
621 	lpfc_nlp_put(ndlp);
622 	if (rc == IOCB_SUCCESS)
623 		return 0; /* done for now */
624 	else if (rc == IOCB_BUSY)
625 		rc = -EAGAIN;
626 	else
627 		rc = -EIO;
628 
629 linkdown_err:
630 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
631 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
632 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
633 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
634 
635 	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
636 
637 	lpfc_sli_release_iocbq(phba, cmdiocbq);
638 
639 free_rspiocbq:
640 	lpfc_sli_release_iocbq(phba, rspiocbq);
641 
642 free_dd_data:
643 	kfree(dd_data);
644 
645 no_dd_data:
646 	/* make error code available to userspace */
647 	job->reply->result = rc;
648 	job->dd_data = NULL;
649 	return rc;
650 }
651 
652 /**
653  * lpfc_bsg_event_free - frees an allocated event structure
654  * @kref: Pointer to a kref.
655  *
656  * Called from kref_put. Back cast the kref into an event structure address.
657  * Free any events to get, delete associated nodes, free any events to see,
658  * free any data then free the event itself.
659  **/
660 static void
661 lpfc_bsg_event_free(struct kref *kref)
662 {
663 	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
664 						  kref);
665 	struct event_data *ed;
666 
667 	list_del(&evt->node);
668 
669 	while (!list_empty(&evt->events_to_get)) {
670 		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
671 		list_del(&ed->node);
672 		kfree(ed->data);
673 		kfree(ed);
674 	}
675 
676 	while (!list_empty(&evt->events_to_see)) {
677 		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
678 		list_del(&ed->node);
679 		kfree(ed->data);
680 		kfree(ed);
681 	}
682 
683 	kfree(evt);
684 }
685 
686 /**
687  * lpfc_bsg_event_ref - increments the kref for an event
688  * @evt: Pointer to an event structure.
689  **/
690 static inline void
691 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
692 {
693 	kref_get(&evt->kref);
694 }
695 
696 /**
697  * lpfc_bsg_event_unref - Uses kref_put to free an event structure
698  * @evt: Pointer to an event structure.
699  **/
700 static inline void
701 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
702 {
703 	kref_put(&evt->kref, lpfc_bsg_event_free);
704 }
705 
706 /**
707  * lpfc_bsg_event_new - allocate and initialize a event structure
708  * @ev_mask: Mask of events.
709  * @ev_reg_id: Event reg id.
710  * @ev_req_id: Event request id.
711  **/
712 static struct lpfc_bsg_event *
713 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
714 {
715 	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
716 
717 	if (!evt)
718 		return NULL;
719 
720 	INIT_LIST_HEAD(&evt->events_to_get);
721 	INIT_LIST_HEAD(&evt->events_to_see);
722 	evt->type_mask = ev_mask;
723 	evt->req_id = ev_req_id;
724 	evt->reg_id = ev_reg_id;
725 	evt->wait_time_stamp = jiffies;
726 	init_waitqueue_head(&evt->wq);
727 	kref_init(&evt->kref);
728 	return evt;
729 }
730 
731 /**
732  * diag_cmd_data_free - Frees an lpfc dma buffer extension
733  * @phba: Pointer to HBA context object.
734  * @mlist: Pointer to an lpfc dma buffer extension.
735  **/
736 static int
737 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
738 {
739 	struct lpfc_dmabufext *mlast;
740 	struct pci_dev *pcidev;
741 	struct list_head head, *curr, *next;
742 
743 	if ((!mlist) || (!lpfc_is_link_up(phba) &&
744 		(phba->link_flag & LS_LOOPBACK_MODE))) {
745 		return 0;
746 	}
747 
748 	pcidev = phba->pcidev;
749 	list_add_tail(&head, &mlist->dma.list);
750 
751 	list_for_each_safe(curr, next, &head) {
752 		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
753 		if (mlast->dma.virt)
754 			dma_free_coherent(&pcidev->dev,
755 					  mlast->size,
756 					  mlast->dma.virt,
757 					  mlast->dma.phys);
758 		kfree(mlast);
759 	}
760 	return 0;
761 }
762 
763 /**
764  * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
765  * @phba:
766  * @pring:
767  * @piocbq:
768  *
769  * This function is called when an unsolicited CT command is received.  It
770  * forwards the event to any processes registered to receive CT events.
771  **/
772 int
773 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
774 			struct lpfc_iocbq *piocbq)
775 {
776 	uint32_t evt_req_id = 0;
777 	uint32_t cmd;
778 	uint32_t len;
779 	struct lpfc_dmabuf *dmabuf = NULL;
780 	struct lpfc_bsg_event *evt;
781 	struct event_data *evt_dat = NULL;
782 	struct lpfc_iocbq *iocbq;
783 	size_t offset = 0;
784 	struct list_head head;
785 	struct ulp_bde64 *bde;
786 	dma_addr_t dma_addr;
787 	int i;
788 	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
789 	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
790 	struct lpfc_hbq_entry *hbqe;
791 	struct lpfc_sli_ct_request *ct_req;
792 	struct fc_bsg_job *job = NULL;
793 	unsigned long flags;
794 	int size = 0;
795 
796 	INIT_LIST_HEAD(&head);
797 	list_add_tail(&head, &piocbq->list);
798 
799 	if (piocbq->iocb.ulpBdeCount == 0 ||
800 	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
801 		goto error_ct_unsol_exit;
802 
803 	if (phba->link_state == LPFC_HBA_ERROR ||
804 		(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
805 		goto error_ct_unsol_exit;
806 
807 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
808 		dmabuf = bdeBuf1;
809 	else {
810 		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
811 				    piocbq->iocb.un.cont64[0].addrLow);
812 		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
813 	}
814 	if (dmabuf == NULL)
815 		goto error_ct_unsol_exit;
816 	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
817 	evt_req_id = ct_req->FsType;
818 	cmd = ct_req->CommandResponse.bits.CmdRsp;
819 	len = ct_req->CommandResponse.bits.Size;
820 	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
821 		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
822 
823 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
824 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
825 		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
826 			evt->req_id != evt_req_id)
827 			continue;
828 
829 		lpfc_bsg_event_ref(evt);
830 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
831 		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
832 		if (evt_dat == NULL) {
833 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
834 			lpfc_bsg_event_unref(evt);
835 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
836 					"2614 Memory allocation failed for "
837 					"CT event\n");
838 			break;
839 		}
840 
841 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
842 			/* take accumulated byte count from the last iocbq */
843 			iocbq = list_entry(head.prev, typeof(*iocbq), list);
844 			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
845 		} else {
846 			list_for_each_entry(iocbq, &head, list) {
847 				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
848 					evt_dat->len +=
849 					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
850 			}
851 		}
852 
853 		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
854 		if (evt_dat->data == NULL) {
855 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
856 					"2615 Memory allocation failed for "
857 					"CT event data, size %d\n",
858 					evt_dat->len);
859 			kfree(evt_dat);
860 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
861 			lpfc_bsg_event_unref(evt);
862 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
863 			goto error_ct_unsol_exit;
864 		}
865 
866 		list_for_each_entry(iocbq, &head, list) {
867 			size = 0;
868 			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
869 				bdeBuf1 = iocbq->context2;
870 				bdeBuf2 = iocbq->context3;
871 			}
872 			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
873 				if (phba->sli3_options &
874 				    LPFC_SLI3_HBQ_ENABLED) {
875 					if (i == 0) {
876 						hbqe = (struct lpfc_hbq_entry *)
877 						  &iocbq->iocb.un.ulpWord[0];
878 						size = hbqe->bde.tus.f.bdeSize;
879 						dmabuf = bdeBuf1;
880 					} else if (i == 1) {
881 						hbqe = (struct lpfc_hbq_entry *)
882 							&iocbq->iocb.unsli3.
883 							sli3Words[4];
884 						size = hbqe->bde.tus.f.bdeSize;
885 						dmabuf = bdeBuf2;
886 					}
887 					if ((offset + size) > evt_dat->len)
888 						size = evt_dat->len - offset;
889 				} else {
890 					size = iocbq->iocb.un.cont64[i].
891 						tus.f.bdeSize;
892 					bde = &iocbq->iocb.un.cont64[i];
893 					dma_addr = getPaddr(bde->addrHigh,
894 							    bde->addrLow);
895 					dmabuf = lpfc_sli_ringpostbuf_get(phba,
896 							pring, dma_addr);
897 				}
898 				if (!dmabuf) {
899 					lpfc_printf_log(phba, KERN_ERR,
900 						LOG_LIBDFC, "2616 No dmabuf "
901 						"found for iocbq 0x%p\n",
902 						iocbq);
903 					kfree(evt_dat->data);
904 					kfree(evt_dat);
905 					spin_lock_irqsave(&phba->ct_ev_lock,
906 						flags);
907 					lpfc_bsg_event_unref(evt);
908 					spin_unlock_irqrestore(
909 						&phba->ct_ev_lock, flags);
910 					goto error_ct_unsol_exit;
911 				}
912 				memcpy((char *)(evt_dat->data) + offset,
913 				       dmabuf->virt, size);
914 				offset += size;
915 				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
916 				    !(phba->sli3_options &
917 				      LPFC_SLI3_HBQ_ENABLED)) {
918 					lpfc_sli_ringpostbuf_put(phba, pring,
919 								 dmabuf);
920 				} else {
921 					switch (cmd) {
922 					case ELX_LOOPBACK_DATA:
923 						if (phba->sli_rev <
924 						    LPFC_SLI_REV4)
925 							diag_cmd_data_free(phba,
926 							(struct lpfc_dmabufext
927 							 *)dmabuf);
928 						break;
929 					case ELX_LOOPBACK_XRI_SETUP:
930 						if ((phba->sli_rev ==
931 							LPFC_SLI_REV2) ||
932 							(phba->sli3_options &
933 							LPFC_SLI3_HBQ_ENABLED
934 							)) {
935 							lpfc_in_buf_free(phba,
936 									dmabuf);
937 						} else {
938 							lpfc_post_buffer(phba,
939 									 pring,
940 									 1);
941 						}
942 						break;
943 					default:
944 						if (!(phba->sli3_options &
945 						      LPFC_SLI3_HBQ_ENABLED))
946 							lpfc_post_buffer(phba,
947 									 pring,
948 									 1);
949 						break;
950 					}
951 				}
952 			}
953 		}
954 
955 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
956 		if (phba->sli_rev == LPFC_SLI_REV4) {
957 			evt_dat->immed_dat = phba->ctx_idx;
958 			phba->ctx_idx = (phba->ctx_idx + 1) % 64;
959 			/* Provide warning for over-run of the ct_ctx array */
960 			if (phba->ct_ctx[evt_dat->immed_dat].flags &
961 			    UNSOL_VALID)
962 				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
963 						"2717 CT context array entry "
964 						"[%d] over-run: oxid:x%x, "
965 						"sid:x%x\n", phba->ctx_idx,
966 						phba->ct_ctx[
967 						    evt_dat->immed_dat].oxid,
968 						phba->ct_ctx[
969 						    evt_dat->immed_dat].SID);
970 			phba->ct_ctx[evt_dat->immed_dat].rxid =
971 				piocbq->iocb.ulpContext;
972 			phba->ct_ctx[evt_dat->immed_dat].oxid =
973 				piocbq->iocb.unsli3.rcvsli3.ox_id;
974 			phba->ct_ctx[evt_dat->immed_dat].SID =
975 				piocbq->iocb.un.rcvels.remoteID;
976 			phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
977 		} else
978 			evt_dat->immed_dat = piocbq->iocb.ulpContext;
979 
980 		evt_dat->type = FC_REG_CT_EVENT;
981 		list_add(&evt_dat->node, &evt->events_to_see);
982 		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
983 			wake_up_interruptible(&evt->wq);
984 			lpfc_bsg_event_unref(evt);
985 			break;
986 		}
987 
988 		list_move(evt->events_to_see.prev, &evt->events_to_get);
989 		lpfc_bsg_event_unref(evt);
990 
991 		job = evt->set_job;
992 		evt->set_job = NULL;
993 		if (job) {
994 			job->reply->reply_payload_rcv_len = size;
995 			/* make error code available to userspace */
996 			job->reply->result = 0;
997 			job->dd_data = NULL;
998 			/* complete the job back to userspace */
999 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1000 			job->job_done(job);
1001 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1002 		}
1003 	}
1004 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1005 
1006 error_ct_unsol_exit:
1007 	if (!list_empty(&head))
1008 		list_del(&head);
1009 	if ((phba->sli_rev < LPFC_SLI_REV4) &&
1010 	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
1011 		return 0;
1012 	return 1;
1013 }
1014 
1015 /**
1016  * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1017  * @job: SET_EVENT fc_bsg_job
1018  **/
1019 static int
1020 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1021 {
1022 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1023 	struct lpfc_hba *phba = vport->phba;
1024 	struct set_ct_event *event_req;
1025 	struct lpfc_bsg_event *evt;
1026 	int rc = 0;
1027 	struct bsg_job_data *dd_data = NULL;
1028 	uint32_t ev_mask;
1029 	unsigned long flags;
1030 
1031 	if (job->request_len <
1032 	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1033 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1034 				"2612 Received SET_CT_EVENT below minimum "
1035 				"size\n");
1036 		rc = -EINVAL;
1037 		goto job_error;
1038 	}
1039 
1040 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1041 	if (dd_data == NULL) {
1042 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1043 				"2734 Failed allocation of dd_data\n");
1044 		rc = -ENOMEM;
1045 		goto job_error;
1046 	}
1047 
1048 	event_req = (struct set_ct_event *)
1049 		job->request->rqst_data.h_vendor.vendor_cmd;
1050 	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1051 				FC_REG_EVENT_MASK);
1052 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1053 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1054 		if (evt->reg_id == event_req->ev_reg_id) {
1055 			lpfc_bsg_event_ref(evt);
1056 			evt->wait_time_stamp = jiffies;
1057 			break;
1058 		}
1059 	}
1060 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1061 
1062 	if (&evt->node == &phba->ct_ev_waiters) {
1063 		/* no event waiting struct yet - first call */
1064 		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1065 					event_req->ev_req_id);
1066 		if (!evt) {
1067 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1068 					"2617 Failed allocation of event "
1069 					"waiter\n");
1070 			rc = -ENOMEM;
1071 			goto job_error;
1072 		}
1073 
1074 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1075 		list_add(&evt->node, &phba->ct_ev_waiters);
1076 		lpfc_bsg_event_ref(evt);
1077 		evt->wait_time_stamp = jiffies;
1078 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1079 	}
1080 
1081 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1082 	evt->waiting = 1;
1083 	dd_data->type = TYPE_EVT;
1084 	dd_data->context_un.evt = evt;
1085 	evt->set_job = job; /* for unsolicited command */
1086 	job->dd_data = dd_data; /* for fc transport timeout callback*/
1087 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1088 	return 0; /* call job done later */
1089 
1090 job_error:
1091 	if (dd_data != NULL)
1092 		kfree(dd_data);
1093 
1094 	job->dd_data = NULL;
1095 	return rc;
1096 }
1097 
1098 /**
1099  * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1100  * @job: GET_EVENT fc_bsg_job
1101  **/
1102 static int
1103 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1104 {
1105 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1106 	struct lpfc_hba *phba = vport->phba;
1107 	struct get_ct_event *event_req;
1108 	struct get_ct_event_reply *event_reply;
1109 	struct lpfc_bsg_event *evt;
1110 	struct event_data *evt_dat = NULL;
1111 	unsigned long flags;
1112 	uint32_t rc = 0;
1113 
1114 	if (job->request_len <
1115 	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1116 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1117 				"2613 Received GET_CT_EVENT request below "
1118 				"minimum size\n");
1119 		rc = -EINVAL;
1120 		goto job_error;
1121 	}
1122 
1123 	event_req = (struct get_ct_event *)
1124 		job->request->rqst_data.h_vendor.vendor_cmd;
1125 
1126 	event_reply = (struct get_ct_event_reply *)
1127 		job->reply->reply_data.vendor_reply.vendor_rsp;
1128 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1129 	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1130 		if (evt->reg_id == event_req->ev_reg_id) {
1131 			if (list_empty(&evt->events_to_get))
1132 				break;
1133 			lpfc_bsg_event_ref(evt);
1134 			evt->wait_time_stamp = jiffies;
1135 			evt_dat = list_entry(evt->events_to_get.prev,
1136 					     struct event_data, node);
1137 			list_del(&evt_dat->node);
1138 			break;
1139 		}
1140 	}
1141 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1142 
1143 	/* The app may continue to ask for event data until it gets
1144 	 * an error indicating that there isn't anymore
1145 	 */
1146 	if (evt_dat == NULL) {
1147 		job->reply->reply_payload_rcv_len = 0;
1148 		rc = -ENOENT;
1149 		goto job_error;
1150 	}
1151 
1152 	if (evt_dat->len > job->request_payload.payload_len) {
1153 		evt_dat->len = job->request_payload.payload_len;
1154 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1155 				"2618 Truncated event data at %d "
1156 				"bytes\n",
1157 				job->request_payload.payload_len);
1158 	}
1159 
1160 	event_reply->type = evt_dat->type;
1161 	event_reply->immed_data = evt_dat->immed_dat;
1162 	if (evt_dat->len > 0)
1163 		job->reply->reply_payload_rcv_len =
1164 			sg_copy_from_buffer(job->request_payload.sg_list,
1165 					    job->request_payload.sg_cnt,
1166 					    evt_dat->data, evt_dat->len);
1167 	else
1168 		job->reply->reply_payload_rcv_len = 0;
1169 
1170 	if (evt_dat) {
1171 		kfree(evt_dat->data);
1172 		kfree(evt_dat);
1173 	}
1174 
1175 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1176 	lpfc_bsg_event_unref(evt);
1177 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1178 	job->dd_data = NULL;
1179 	job->reply->result = 0;
1180 	job->job_done(job);
1181 	return 0;
1182 
1183 job_error:
1184 	job->dd_data = NULL;
1185 	job->reply->result = rc;
1186 	return rc;
1187 }
1188 
1189 /**
1190  * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1191  * @phba: Pointer to HBA context object.
1192  * @cmdiocbq: Pointer to command iocb.
1193  * @rspiocbq: Pointer to response iocb.
1194  *
1195  * This function is the completion handler for iocbs issued using
1196  * lpfc_issue_ct_rsp_cmp function. This function is called by the
1197  * ring event handler function without any lock held. This function
1198  * can be called from both worker thread context and interrupt
1199  * context. This function also can be called from other thread which
1200  * cleans up the SLI layer objects.
1201  * This function copy the contents of the response iocb to the
1202  * response iocb memory object provided by the caller of
1203  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1204  * sleeps for the iocb completion.
1205  **/
1206 static void
1207 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1208 			struct lpfc_iocbq *cmdiocbq,
1209 			struct lpfc_iocbq *rspiocbq)
1210 {
1211 	struct bsg_job_data *dd_data;
1212 	struct fc_bsg_job *job;
1213 	IOCB_t *rsp;
1214 	struct lpfc_dmabuf *bmp;
1215 	struct lpfc_nodelist *ndlp;
1216 	unsigned long flags;
1217 	int rc = 0;
1218 
1219 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1220 	dd_data = cmdiocbq->context2;
1221 	/* normal completion and timeout crossed paths, already done */
1222 	if (!dd_data) {
1223 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1224 		return;
1225 	}
1226 
1227 	job = dd_data->context_un.iocb.set_job;
1228 	bmp = dd_data->context_un.iocb.bmp;
1229 	rsp = &rspiocbq->iocb;
1230 	ndlp = dd_data->context_un.iocb.ndlp;
1231 
1232 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1233 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
1234 
1235 	if (rsp->ulpStatus) {
1236 		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1237 			switch (rsp->un.ulpWord[4] & 0xff) {
1238 			case IOERR_SEQUENCE_TIMEOUT:
1239 				rc = -ETIMEDOUT;
1240 				break;
1241 			case IOERR_INVALID_RPI:
1242 				rc = -EFAULT;
1243 				break;
1244 			default:
1245 				rc = -EACCES;
1246 				break;
1247 			}
1248 		} else
1249 			rc = -EACCES;
1250 	} else
1251 		job->reply->reply_payload_rcv_len =
1252 			rsp->un.genreq64.bdl.bdeSize;
1253 
1254 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1255 	lpfc_sli_release_iocbq(phba, cmdiocbq);
1256 	lpfc_nlp_put(ndlp);
1257 	kfree(bmp);
1258 	kfree(dd_data);
1259 	/* make error code available to userspace */
1260 	job->reply->result = rc;
1261 	job->dd_data = NULL;
1262 	/* complete the job back to userspace */
1263 	job->job_done(job);
1264 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1265 	return;
1266 }
1267 
1268 /**
1269  * lpfc_issue_ct_rsp - issue a ct response
1270  * @phba: Pointer to HBA context object.
1271  * @job: Pointer to the job object.
1272  * @tag: tag index value into the ports context exchange array.
1273  * @bmp: Pointer to a dma buffer descriptor.
1274  * @num_entry: Number of enties in the bde.
1275  **/
1276 static int
1277 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1278 		  struct lpfc_dmabuf *bmp, int num_entry)
1279 {
1280 	IOCB_t *icmd;
1281 	struct lpfc_iocbq *ctiocb = NULL;
1282 	int rc = 0;
1283 	struct lpfc_nodelist *ndlp = NULL;
1284 	struct bsg_job_data *dd_data;
1285 	uint32_t creg_val;
1286 
1287 	/* allocate our bsg tracking structure */
1288 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1289 	if (!dd_data) {
1290 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1291 				"2736 Failed allocation of dd_data\n");
1292 		rc = -ENOMEM;
1293 		goto no_dd_data;
1294 	}
1295 
1296 	/* Allocate buffer for  command iocb */
1297 	ctiocb = lpfc_sli_get_iocbq(phba);
1298 	if (!ctiocb) {
1299 		rc = -ENOMEM;
1300 		goto no_ctiocb;
1301 	}
1302 
1303 	icmd = &ctiocb->iocb;
1304 	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1305 	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1306 	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1307 	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1308 	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1309 	icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1310 	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1311 	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1312 	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1313 
1314 	/* Fill in rest of iocb */
1315 	icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1316 	icmd->ulpBdeCount = 1;
1317 	icmd->ulpLe = 1;
1318 	icmd->ulpClass = CLASS3;
1319 	if (phba->sli_rev == LPFC_SLI_REV4) {
1320 		/* Do not issue unsol response if oxid not marked as valid */
1321 		if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1322 			rc = IOCB_ERROR;
1323 			goto issue_ct_rsp_exit;
1324 		}
1325 		icmd->ulpContext = phba->ct_ctx[tag].rxid;
1326 		icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1327 		ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1328 		if (!ndlp) {
1329 			lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1330 				 "2721 ndlp null for oxid %x SID %x\n",
1331 					icmd->ulpContext,
1332 					phba->ct_ctx[tag].SID);
1333 			rc = IOCB_ERROR;
1334 			goto issue_ct_rsp_exit;
1335 		}
1336 
1337 		/* Check if the ndlp is active */
1338 		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1339 			rc = -IOCB_ERROR;
1340 			goto issue_ct_rsp_exit;
1341 		}
1342 
1343 		/* get a refernece count so the ndlp doesn't go away while
1344 		 * we respond
1345 		 */
1346 		if (!lpfc_nlp_get(ndlp)) {
1347 			rc = -IOCB_ERROR;
1348 			goto issue_ct_rsp_exit;
1349 		}
1350 
1351 		icmd->un.ulpWord[3] =
1352 				phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1353 
1354 		/* The exchange is done, mark the entry as invalid */
1355 		phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1356 	} else
1357 		icmd->ulpContext = (ushort) tag;
1358 
1359 	icmd->ulpTimeout = phba->fc_ratov * 2;
1360 
1361 	/* Xmit CT response on exchange <xid> */
1362 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1363 		"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1364 		icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1365 
1366 	ctiocb->iocb_cmpl = NULL;
1367 	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1368 	ctiocb->vport = phba->pport;
1369 	ctiocb->context3 = bmp;
1370 
1371 	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1372 	ctiocb->context2 = dd_data;
1373 	ctiocb->context1 = ndlp;
1374 	dd_data->type = TYPE_IOCB;
1375 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
1376 	dd_data->context_un.iocb.rspiocbq = NULL;
1377 	dd_data->context_un.iocb.set_job = job;
1378 	dd_data->context_un.iocb.bmp = bmp;
1379 	dd_data->context_un.iocb.ndlp = ndlp;
1380 
1381 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1382 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1383 			rc = -IOCB_ERROR;
1384 			goto issue_ct_rsp_exit;
1385 		}
1386 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1387 		writel(creg_val, phba->HCregaddr);
1388 		readl(phba->HCregaddr); /* flush */
1389 	}
1390 
1391 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1392 
1393 	if (rc == IOCB_SUCCESS)
1394 		return 0; /* done for now */
1395 
1396 issue_ct_rsp_exit:
1397 	lpfc_sli_release_iocbq(phba, ctiocb);
1398 no_ctiocb:
1399 	kfree(dd_data);
1400 no_dd_data:
1401 	return rc;
1402 }
1403 
1404 /**
1405  * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1406  * @job: SEND_MGMT_RESP fc_bsg_job
1407  **/
1408 static int
1409 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1410 {
1411 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1412 	struct lpfc_hba *phba = vport->phba;
1413 	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1414 		job->request->rqst_data.h_vendor.vendor_cmd;
1415 	struct ulp_bde64 *bpl;
1416 	struct lpfc_dmabuf *bmp = NULL;
1417 	struct scatterlist *sgel = NULL;
1418 	int request_nseg;
1419 	int numbde;
1420 	dma_addr_t busaddr;
1421 	uint32_t tag = mgmt_resp->tag;
1422 	unsigned long reqbfrcnt =
1423 			(unsigned long)job->request_payload.payload_len;
1424 	int rc = 0;
1425 
1426 	/* in case no data is transferred */
1427 	job->reply->reply_payload_rcv_len = 0;
1428 
1429 	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1430 		rc = -ERANGE;
1431 		goto send_mgmt_rsp_exit;
1432 	}
1433 
1434 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1435 	if (!bmp) {
1436 		rc = -ENOMEM;
1437 		goto send_mgmt_rsp_exit;
1438 	}
1439 
1440 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1441 	if (!bmp->virt) {
1442 		rc = -ENOMEM;
1443 		goto send_mgmt_rsp_free_bmp;
1444 	}
1445 
1446 	INIT_LIST_HEAD(&bmp->list);
1447 	bpl = (struct ulp_bde64 *) bmp->virt;
1448 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1449 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
1450 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1451 		busaddr = sg_dma_address(sgel);
1452 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1453 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
1454 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
1455 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1456 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1457 		bpl++;
1458 	}
1459 
1460 	rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1461 
1462 	if (rc == IOCB_SUCCESS)
1463 		return 0; /* done for now */
1464 
1465 	/* TBD need to handle a timeout */
1466 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1467 			  job->request_payload.sg_cnt, DMA_TO_DEVICE);
1468 	rc = -EACCES;
1469 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1470 
1471 send_mgmt_rsp_free_bmp:
1472 	kfree(bmp);
1473 send_mgmt_rsp_exit:
1474 	/* make error code available to userspace */
1475 	job->reply->result = rc;
1476 	job->dd_data = NULL;
1477 	return rc;
1478 }
1479 
1480 /**
1481  * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1482  * @phba: Pointer to HBA context object.
1483  *
1484  * This function is responsible for preparing driver for diag loopback
1485  * on device.
1486  */
1487 static int
1488 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1489 {
1490 	struct lpfc_vport **vports;
1491 	struct Scsi_Host *shost;
1492 	struct lpfc_sli *psli;
1493 	struct lpfc_sli_ring *pring;
1494 	int i = 0;
1495 
1496 	psli = &phba->sli;
1497 	if (!psli)
1498 		return -ENODEV;
1499 
1500 	pring = &psli->ring[LPFC_FCP_RING];
1501 	if (!pring)
1502 		return -ENODEV;
1503 
1504 	if ((phba->link_state == LPFC_HBA_ERROR) ||
1505 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1506 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1507 		return -EACCES;
1508 
1509 	vports = lpfc_create_vport_work_array(phba);
1510 	if (vports) {
1511 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1512 			shost = lpfc_shost_from_vport(vports[i]);
1513 			scsi_block_requests(shost);
1514 		}
1515 		lpfc_destroy_vport_work_array(phba, vports);
1516 	} else {
1517 		shost = lpfc_shost_from_vport(phba->pport);
1518 		scsi_block_requests(shost);
1519 	}
1520 
1521 	while (pring->txcmplq_cnt) {
1522 		if (i++ > 500)  /* wait up to 5 seconds */
1523 			break;
1524 		msleep(10);
1525 	}
1526 	return 0;
1527 }
1528 
1529 /**
1530  * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1531  * @phba: Pointer to HBA context object.
1532  *
1533  * This function is responsible for driver exit processing of setting up
1534  * diag loopback mode on device.
1535  */
1536 static void
1537 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1538 {
1539 	struct Scsi_Host *shost;
1540 	struct lpfc_vport **vports;
1541 	int i;
1542 
1543 	vports = lpfc_create_vport_work_array(phba);
1544 	if (vports) {
1545 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1546 			shost = lpfc_shost_from_vport(vports[i]);
1547 			scsi_unblock_requests(shost);
1548 		}
1549 		lpfc_destroy_vport_work_array(phba, vports);
1550 	} else {
1551 		shost = lpfc_shost_from_vport(phba->pport);
1552 		scsi_unblock_requests(shost);
1553 	}
1554 	return;
1555 }
1556 
1557 /**
1558  * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1559  * @phba: Pointer to HBA context object.
1560  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1561  *
1562  * This function is responsible for placing an sli3  port into diagnostic
1563  * loopback mode in order to perform a diagnostic loopback test.
1564  * All new scsi requests are blocked, a small delay is used to allow the
1565  * scsi requests to complete then the link is brought down. If the link is
1566  * is placed in loopback mode then scsi requests are again allowed
1567  * so the scsi mid-layer doesn't give up on the port.
1568  * All of this is done in-line.
1569  */
1570 static int
1571 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1572 {
1573 	struct diag_mode_set *loopback_mode;
1574 	uint32_t link_flags;
1575 	uint32_t timeout;
1576 	LPFC_MBOXQ_t *pmboxq  = NULL;
1577 	int mbxstatus = MBX_SUCCESS;
1578 	int i = 0;
1579 	int rc = 0;
1580 
1581 	/* no data to return just the return code */
1582 	job->reply->reply_payload_rcv_len = 0;
1583 
1584 	if (job->request_len < sizeof(struct fc_bsg_request) +
1585 	    sizeof(struct diag_mode_set)) {
1586 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1587 				"2738 Received DIAG MODE request size:%d "
1588 				"below the minimum size:%d\n",
1589 				job->request_len,
1590 				(int)(sizeof(struct fc_bsg_request) +
1591 				sizeof(struct diag_mode_set)));
1592 		rc = -EINVAL;
1593 		goto job_error;
1594 	}
1595 
1596 	rc = lpfc_bsg_diag_mode_enter(phba);
1597 	if (rc)
1598 		goto job_error;
1599 
1600 	/* bring the link to diagnostic mode */
1601 	loopback_mode = (struct diag_mode_set *)
1602 		job->request->rqst_data.h_vendor.vendor_cmd;
1603 	link_flags = loopback_mode->type;
1604 	timeout = loopback_mode->timeout * 100;
1605 
1606 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1607 	if (!pmboxq) {
1608 		rc = -ENOMEM;
1609 		goto loopback_mode_exit;
1610 	}
1611 	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1612 	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1613 	pmboxq->u.mb.mbxOwner = OWN_HOST;
1614 
1615 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1616 
1617 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1618 		/* wait for link down before proceeding */
1619 		i = 0;
1620 		while (phba->link_state != LPFC_LINK_DOWN) {
1621 			if (i++ > timeout) {
1622 				rc = -ETIMEDOUT;
1623 				goto loopback_mode_exit;
1624 			}
1625 			msleep(10);
1626 		}
1627 
1628 		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1629 		if (link_flags == INTERNAL_LOOP_BACK)
1630 			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1631 		else
1632 			pmboxq->u.mb.un.varInitLnk.link_flags =
1633 				FLAGS_TOPOLOGY_MODE_LOOP;
1634 
1635 		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1636 		pmboxq->u.mb.mbxOwner = OWN_HOST;
1637 
1638 		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1639 						     LPFC_MBOX_TMO);
1640 
1641 		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1642 			rc = -ENODEV;
1643 		else {
1644 			spin_lock_irq(&phba->hbalock);
1645 			phba->link_flag |= LS_LOOPBACK_MODE;
1646 			spin_unlock_irq(&phba->hbalock);
1647 			/* wait for the link attention interrupt */
1648 			msleep(100);
1649 
1650 			i = 0;
1651 			while (phba->link_state != LPFC_HBA_READY) {
1652 				if (i++ > timeout) {
1653 					rc = -ETIMEDOUT;
1654 					break;
1655 				}
1656 
1657 				msleep(10);
1658 			}
1659 		}
1660 
1661 	} else
1662 		rc = -ENODEV;
1663 
1664 loopback_mode_exit:
1665 	lpfc_bsg_diag_mode_exit(phba);
1666 
1667 	/*
1668 	 * Let SLI layer release mboxq if mbox command completed after timeout.
1669 	 */
1670 	if (pmboxq && mbxstatus != MBX_TIMEOUT)
1671 		mempool_free(pmboxq, phba->mbox_mem_pool);
1672 
1673 job_error:
1674 	/* make error code available to userspace */
1675 	job->reply->result = rc;
1676 	/* complete the job back to userspace if no error */
1677 	if (rc == 0)
1678 		job->job_done(job);
1679 	return rc;
1680 }
1681 
1682 /**
1683  * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1684  * @phba: Pointer to HBA context object.
1685  * @diag: Flag for set link to diag or nomral operation state.
1686  *
1687  * This function is responsible for issuing a sli4 mailbox command for setting
1688  * link to either diag state or normal operation state.
1689  */
1690 static int
1691 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1692 {
1693 	LPFC_MBOXQ_t *pmboxq;
1694 	struct lpfc_mbx_set_link_diag_state *link_diag_state;
1695 	uint32_t req_len, alloc_len;
1696 	int mbxstatus = MBX_SUCCESS, rc;
1697 
1698 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1699 	if (!pmboxq)
1700 		return -ENOMEM;
1701 
1702 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1703 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1704 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1705 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1706 				req_len, LPFC_SLI4_MBX_EMBED);
1707 	if (alloc_len != req_len) {
1708 		rc = -ENOMEM;
1709 		goto link_diag_state_set_out;
1710 	}
1711 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1712 			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1713 			diag, phba->sli4_hba.lnk_info.lnk_tp,
1714 			phba->sli4_hba.lnk_info.lnk_no);
1715 
1716 	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1717 	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1718 	       phba->sli4_hba.lnk_info.lnk_no);
1719 	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1720 	       phba->sli4_hba.lnk_info.lnk_tp);
1721 	if (diag)
1722 		bf_set(lpfc_mbx_set_diag_state_diag,
1723 		       &link_diag_state->u.req, 1);
1724 	else
1725 		bf_set(lpfc_mbx_set_diag_state_diag,
1726 		       &link_diag_state->u.req, 0);
1727 
1728 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1729 
1730 	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1731 		rc = 0;
1732 	else
1733 		rc = -ENODEV;
1734 
1735 link_diag_state_set_out:
1736 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1737 		mempool_free(pmboxq, phba->mbox_mem_pool);
1738 
1739 	return rc;
1740 }
1741 
1742 /**
1743  * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
1744  * @phba: Pointer to HBA context object.
1745  *
1746  * This function is responsible for issuing a sli4 mailbox command for setting
1747  * up internal loopback diagnostic.
1748  */
1749 static int
1750 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
1751 {
1752 	LPFC_MBOXQ_t *pmboxq;
1753 	uint32_t req_len, alloc_len;
1754 	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1755 	int mbxstatus = MBX_SUCCESS, rc = 0;
1756 
1757 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1758 	if (!pmboxq)
1759 		return -ENOMEM;
1760 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1761 		   sizeof(struct lpfc_sli4_cfg_mhdr));
1762 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1763 				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1764 				req_len, LPFC_SLI4_MBX_EMBED);
1765 	if (alloc_len != req_len) {
1766 		mempool_free(pmboxq, phba->mbox_mem_pool);
1767 		return -ENOMEM;
1768 	}
1769 	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1770 	bf_set(lpfc_mbx_set_diag_state_link_num,
1771 	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
1772 	bf_set(lpfc_mbx_set_diag_state_link_type,
1773 	       &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
1774 	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1775 	       LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1776 
1777 	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1778 	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1779 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1780 				"3127 Failed setup loopback mode mailbox "
1781 				"command, rc:x%x, status:x%x\n", mbxstatus,
1782 				pmboxq->u.mb.mbxStatus);
1783 		rc = -ENODEV;
1784 	}
1785 	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1786 		mempool_free(pmboxq, phba->mbox_mem_pool);
1787 	return rc;
1788 }
1789 
1790 /**
1791  * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1792  * @phba: Pointer to HBA context object.
1793  *
1794  * This function set up SLI4 FC port registrations for diagnostic run, which
1795  * includes all the rpis, vfi, and also vpi.
1796  */
1797 static int
1798 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1799 {
1800 	int rc;
1801 
1802 	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1803 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1804 				"3136 Port still had vfi registered: "
1805 				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1806 				phba->pport->fc_myDID, phba->fcf.fcfi,
1807 				phba->sli4_hba.vfi_ids[phba->pport->vfi],
1808 				phba->vpi_ids[phba->pport->vpi]);
1809 		return -EINVAL;
1810 	}
1811 	rc = lpfc_issue_reg_vfi(phba->pport);
1812 	return rc;
1813 }
1814 
1815 /**
1816  * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1817  * @phba: Pointer to HBA context object.
1818  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1819  *
1820  * This function is responsible for placing an sli4 port into diagnostic
1821  * loopback mode in order to perform a diagnostic loopback test.
1822  */
1823 static int
1824 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1825 {
1826 	struct diag_mode_set *loopback_mode;
1827 	uint32_t link_flags, timeout;
1828 	int i, rc = 0;
1829 
1830 	/* no data to return just the return code */
1831 	job->reply->reply_payload_rcv_len = 0;
1832 
1833 	if (job->request_len < sizeof(struct fc_bsg_request) +
1834 	    sizeof(struct diag_mode_set)) {
1835 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1836 				"3011 Received DIAG MODE request size:%d "
1837 				"below the minimum size:%d\n",
1838 				job->request_len,
1839 				(int)(sizeof(struct fc_bsg_request) +
1840 				sizeof(struct diag_mode_set)));
1841 		rc = -EINVAL;
1842 		goto job_error;
1843 	}
1844 
1845 	rc = lpfc_bsg_diag_mode_enter(phba);
1846 	if (rc)
1847 		goto job_error;
1848 
1849 	/* indicate we are in loobpack diagnostic mode */
1850 	spin_lock_irq(&phba->hbalock);
1851 	phba->link_flag |= LS_LOOPBACK_MODE;
1852 	spin_unlock_irq(&phba->hbalock);
1853 
1854 	/* reset port to start frome scratch */
1855 	rc = lpfc_selective_reset(phba);
1856 	if (rc)
1857 		goto job_error;
1858 
1859 	/* bring the link to diagnostic mode */
1860 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1861 			"3129 Bring link to diagnostic state.\n");
1862 	loopback_mode = (struct diag_mode_set *)
1863 		job->request->rqst_data.h_vendor.vendor_cmd;
1864 	link_flags = loopback_mode->type;
1865 	timeout = loopback_mode->timeout * 100;
1866 
1867 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1868 	if (rc) {
1869 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1870 				"3130 Failed to bring link to diagnostic "
1871 				"state, rc:x%x\n", rc);
1872 		goto loopback_mode_exit;
1873 	}
1874 
1875 	/* wait for link down before proceeding */
1876 	i = 0;
1877 	while (phba->link_state != LPFC_LINK_DOWN) {
1878 		if (i++ > timeout) {
1879 			rc = -ETIMEDOUT;
1880 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1881 					"3131 Timeout waiting for link to "
1882 					"diagnostic mode, timeout:%d ms\n",
1883 					timeout * 10);
1884 			goto loopback_mode_exit;
1885 		}
1886 		msleep(10);
1887 	}
1888 
1889 	/* set up loopback mode */
1890 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1891 			"3132 Set up loopback mode:x%x\n", link_flags);
1892 
1893 	if (link_flags == INTERNAL_LOOP_BACK)
1894 		rc = lpfc_sli4_bsg_set_internal_loopback(phba);
1895 	else if (link_flags == EXTERNAL_LOOP_BACK)
1896 		rc = lpfc_hba_init_link_fc_topology(phba,
1897 						    FLAGS_TOPOLOGY_MODE_PT_PT,
1898 						    MBX_NOWAIT);
1899 	else {
1900 		rc = -EINVAL;
1901 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
1902 				"3141 Loopback mode:x%x not supported\n",
1903 				link_flags);
1904 		goto loopback_mode_exit;
1905 	}
1906 
1907 	if (!rc) {
1908 		/* wait for the link attention interrupt */
1909 		msleep(100);
1910 		i = 0;
1911 		while (phba->link_state < LPFC_LINK_UP) {
1912 			if (i++ > timeout) {
1913 				rc = -ETIMEDOUT;
1914 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1915 					"3137 Timeout waiting for link up "
1916 					"in loopback mode, timeout:%d ms\n",
1917 					timeout * 10);
1918 				break;
1919 			}
1920 			msleep(10);
1921 		}
1922 	}
1923 
1924 	/* port resource registration setup for loopback diagnostic */
1925 	if (!rc) {
1926 		/* set up a none zero myDID for loopback test */
1927 		phba->pport->fc_myDID = 1;
1928 		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
1929 	} else
1930 		goto loopback_mode_exit;
1931 
1932 	if (!rc) {
1933 		/* wait for the port ready */
1934 		msleep(100);
1935 		i = 0;
1936 		while (phba->link_state != LPFC_HBA_READY) {
1937 			if (i++ > timeout) {
1938 				rc = -ETIMEDOUT;
1939 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1940 					"3133 Timeout waiting for port "
1941 					"loopback mode ready, timeout:%d ms\n",
1942 					timeout * 10);
1943 				break;
1944 			}
1945 			msleep(10);
1946 		}
1947 	}
1948 
1949 loopback_mode_exit:
1950 	/* clear loopback diagnostic mode */
1951 	if (rc) {
1952 		spin_lock_irq(&phba->hbalock);
1953 		phba->link_flag &= ~LS_LOOPBACK_MODE;
1954 		spin_unlock_irq(&phba->hbalock);
1955 	}
1956 	lpfc_bsg_diag_mode_exit(phba);
1957 
1958 job_error:
1959 	/* make error code available to userspace */
1960 	job->reply->result = rc;
1961 	/* complete the job back to userspace if no error */
1962 	if (rc == 0)
1963 		job->job_done(job);
1964 	return rc;
1965 }
1966 
1967 /**
1968  * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
1969  * @job: LPFC_BSG_VENDOR_DIAG_MODE
1970  *
1971  * This function is responsible for responding to check and dispatch bsg diag
1972  * command from the user to proper driver action routines.
1973  */
1974 static int
1975 lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1976 {
1977 	struct Scsi_Host *shost;
1978 	struct lpfc_vport *vport;
1979 	struct lpfc_hba *phba;
1980 	int rc;
1981 
1982 	shost = job->shost;
1983 	if (!shost)
1984 		return -ENODEV;
1985 	vport = (struct lpfc_vport *)job->shost->hostdata;
1986 	if (!vport)
1987 		return -ENODEV;
1988 	phba = vport->phba;
1989 	if (!phba)
1990 		return -ENODEV;
1991 
1992 	if (phba->sli_rev < LPFC_SLI_REV4)
1993 		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
1994 	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1995 		 LPFC_SLI_INTF_IF_TYPE_2)
1996 		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
1997 	else
1998 		rc = -ENODEV;
1999 
2000 	return rc;
2001 }
2002 
2003 /**
2004  * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2005  * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2006  *
2007  * This function is responsible for responding to check and dispatch bsg diag
2008  * command from the user to proper driver action routines.
2009  */
2010 static int
2011 lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2012 {
2013 	struct Scsi_Host *shost;
2014 	struct lpfc_vport *vport;
2015 	struct lpfc_hba *phba;
2016 	struct diag_mode_set *loopback_mode_end_cmd;
2017 	uint32_t timeout;
2018 	int rc, i;
2019 
2020 	shost = job->shost;
2021 	if (!shost)
2022 		return -ENODEV;
2023 	vport = (struct lpfc_vport *)job->shost->hostdata;
2024 	if (!vport)
2025 		return -ENODEV;
2026 	phba = vport->phba;
2027 	if (!phba)
2028 		return -ENODEV;
2029 
2030 	if (phba->sli_rev < LPFC_SLI_REV4)
2031 		return -ENODEV;
2032 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2033 	    LPFC_SLI_INTF_IF_TYPE_2)
2034 		return -ENODEV;
2035 
2036 	/* clear loopback diagnostic mode */
2037 	spin_lock_irq(&phba->hbalock);
2038 	phba->link_flag &= ~LS_LOOPBACK_MODE;
2039 	spin_unlock_irq(&phba->hbalock);
2040 	loopback_mode_end_cmd = (struct diag_mode_set *)
2041 			job->request->rqst_data.h_vendor.vendor_cmd;
2042 	timeout = loopback_mode_end_cmd->timeout * 100;
2043 
2044 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2045 	if (rc) {
2046 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2047 				"3139 Failed to bring link to diagnostic "
2048 				"state, rc:x%x\n", rc);
2049 		goto loopback_mode_end_exit;
2050 	}
2051 
2052 	/* wait for link down before proceeding */
2053 	i = 0;
2054 	while (phba->link_state != LPFC_LINK_DOWN) {
2055 		if (i++ > timeout) {
2056 			rc = -ETIMEDOUT;
2057 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2058 					"3140 Timeout waiting for link to "
2059 					"diagnostic mode_end, timeout:%d ms\n",
2060 					timeout * 10);
2061 			/* there is nothing much we can do here */
2062 			break;
2063 		}
2064 		msleep(10);
2065 	}
2066 
2067 	/* reset port resource registrations */
2068 	rc = lpfc_selective_reset(phba);
2069 	phba->pport->fc_myDID = 0;
2070 
2071 loopback_mode_end_exit:
2072 	/* make return code available to userspace */
2073 	job->reply->result = rc;
2074 	/* complete the job back to userspace if no error */
2075 	if (rc == 0)
2076 		job->job_done(job);
2077 	return rc;
2078 }
2079 
2080 /**
2081  * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2082  * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2083  *
2084  * This function is to perform SLI4 diag link test request from the user
2085  * applicaiton.
2086  */
2087 static int
2088 lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2089 {
2090 	struct Scsi_Host *shost;
2091 	struct lpfc_vport *vport;
2092 	struct lpfc_hba *phba;
2093 	LPFC_MBOXQ_t *pmboxq;
2094 	struct sli4_link_diag *link_diag_test_cmd;
2095 	uint32_t req_len, alloc_len;
2096 	uint32_t timeout;
2097 	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2098 	union lpfc_sli4_cfg_shdr *shdr;
2099 	uint32_t shdr_status, shdr_add_status;
2100 	struct diag_status *diag_status_reply;
2101 	int mbxstatus, rc = 0;
2102 
2103 	shost = job->shost;
2104 	if (!shost) {
2105 		rc = -ENODEV;
2106 		goto job_error;
2107 	}
2108 	vport = (struct lpfc_vport *)job->shost->hostdata;
2109 	if (!vport) {
2110 		rc = -ENODEV;
2111 		goto job_error;
2112 	}
2113 	phba = vport->phba;
2114 	if (!phba) {
2115 		rc = -ENODEV;
2116 		goto job_error;
2117 	}
2118 
2119 	if (phba->sli_rev < LPFC_SLI_REV4) {
2120 		rc = -ENODEV;
2121 		goto job_error;
2122 	}
2123 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2124 	    LPFC_SLI_INTF_IF_TYPE_2) {
2125 		rc = -ENODEV;
2126 		goto job_error;
2127 	}
2128 
2129 	if (job->request_len < sizeof(struct fc_bsg_request) +
2130 	    sizeof(struct sli4_link_diag)) {
2131 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2132 				"3013 Received LINK DIAG TEST request "
2133 				" size:%d below the minimum size:%d\n",
2134 				job->request_len,
2135 				(int)(sizeof(struct fc_bsg_request) +
2136 				sizeof(struct sli4_link_diag)));
2137 		rc = -EINVAL;
2138 		goto job_error;
2139 	}
2140 
2141 	rc = lpfc_bsg_diag_mode_enter(phba);
2142 	if (rc)
2143 		goto job_error;
2144 
2145 	link_diag_test_cmd = (struct sli4_link_diag *)
2146 			 job->request->rqst_data.h_vendor.vendor_cmd;
2147 	timeout = link_diag_test_cmd->timeout * 100;
2148 
2149 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2150 
2151 	if (rc)
2152 		goto job_error;
2153 
2154 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2155 	if (!pmboxq) {
2156 		rc = -ENOMEM;
2157 		goto link_diag_test_exit;
2158 	}
2159 
2160 	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2161 		   sizeof(struct lpfc_sli4_cfg_mhdr));
2162 	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2163 				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2164 				     req_len, LPFC_SLI4_MBX_EMBED);
2165 	if (alloc_len != req_len) {
2166 		rc = -ENOMEM;
2167 		goto link_diag_test_exit;
2168 	}
2169 	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2170 	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2171 	       phba->sli4_hba.lnk_info.lnk_no);
2172 	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2173 	       phba->sli4_hba.lnk_info.lnk_tp);
2174 	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2175 	       link_diag_test_cmd->test_id);
2176 	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2177 	       link_diag_test_cmd->loops);
2178 	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2179 	       link_diag_test_cmd->test_version);
2180 	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2181 	       link_diag_test_cmd->error_action);
2182 
2183 	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2184 
2185 	shdr = (union lpfc_sli4_cfg_shdr *)
2186 		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2187 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2188 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2189 	if (shdr_status || shdr_add_status || mbxstatus) {
2190 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2191 				"3010 Run link diag test mailbox failed with "
2192 				"mbx_status x%x status x%x, add_status x%x\n",
2193 				mbxstatus, shdr_status, shdr_add_status);
2194 	}
2195 
2196 	diag_status_reply = (struct diag_status *)
2197 			    job->reply->reply_data.vendor_reply.vendor_rsp;
2198 
2199 	if (job->reply_len <
2200 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2201 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2202 				"3012 Received Run link diag test reply "
2203 				"below minimum size (%d): reply_len:%d\n",
2204 				(int)(sizeof(struct fc_bsg_request) +
2205 				sizeof(struct diag_status)),
2206 				job->reply_len);
2207 		rc = -EINVAL;
2208 		goto job_error;
2209 	}
2210 
2211 	diag_status_reply->mbox_status = mbxstatus;
2212 	diag_status_reply->shdr_status = shdr_status;
2213 	diag_status_reply->shdr_add_status = shdr_add_status;
2214 
2215 link_diag_test_exit:
2216 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2217 
2218 	if (pmboxq)
2219 		mempool_free(pmboxq, phba->mbox_mem_pool);
2220 
2221 	lpfc_bsg_diag_mode_exit(phba);
2222 
2223 job_error:
2224 	/* make error code available to userspace */
2225 	job->reply->result = rc;
2226 	/* complete the job back to userspace if no error */
2227 	if (rc == 0)
2228 		job->job_done(job);
2229 	return rc;
2230 }
2231 
2232 /**
2233  * lpfcdiag_loop_self_reg - obtains a remote port login id
2234  * @phba: Pointer to HBA context object
2235  * @rpi: Pointer to a remote port login id
2236  *
2237  * This function obtains a remote port login id so the diag loopback test
2238  * can send and receive its own unsolicited CT command.
2239  **/
2240 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2241 {
2242 	LPFC_MBOXQ_t *mbox;
2243 	struct lpfc_dmabuf *dmabuff;
2244 	int status;
2245 
2246 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2247 	if (!mbox)
2248 		return -ENOMEM;
2249 
2250 	if (phba->sli_rev < LPFC_SLI_REV4)
2251 		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2252 				(uint8_t *)&phba->pport->fc_sparam,
2253 				mbox, *rpi);
2254 	else {
2255 		*rpi = lpfc_sli4_alloc_rpi(phba);
2256 		status = lpfc_reg_rpi(phba, phba->pport->vpi,
2257 				phba->pport->fc_myDID,
2258 				(uint8_t *)&phba->pport->fc_sparam,
2259 				mbox, *rpi);
2260 	}
2261 
2262 	if (status) {
2263 		mempool_free(mbox, phba->mbox_mem_pool);
2264 		if (phba->sli_rev == LPFC_SLI_REV4)
2265 			lpfc_sli4_free_rpi(phba, *rpi);
2266 		return -ENOMEM;
2267 	}
2268 
2269 	dmabuff = (struct lpfc_dmabuf *) mbox->context1;
2270 	mbox->context1 = NULL;
2271 	mbox->context2 = NULL;
2272 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2273 
2274 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2275 		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2276 		kfree(dmabuff);
2277 		if (status != MBX_TIMEOUT)
2278 			mempool_free(mbox, phba->mbox_mem_pool);
2279 		if (phba->sli_rev == LPFC_SLI_REV4)
2280 			lpfc_sli4_free_rpi(phba, *rpi);
2281 		return -ENODEV;
2282 	}
2283 
2284 	if (phba->sli_rev < LPFC_SLI_REV4)
2285 		*rpi = mbox->u.mb.un.varWords[0];
2286 
2287 	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2288 	kfree(dmabuff);
2289 	mempool_free(mbox, phba->mbox_mem_pool);
2290 	return 0;
2291 }
2292 
2293 /**
2294  * lpfcdiag_loop_self_unreg - unregs from the rpi
2295  * @phba: Pointer to HBA context object
2296  * @rpi: Remote port login id
2297  *
2298  * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2299  **/
2300 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2301 {
2302 	LPFC_MBOXQ_t *mbox;
2303 	int status;
2304 
2305 	/* Allocate mboxq structure */
2306 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2307 	if (mbox == NULL)
2308 		return -ENOMEM;
2309 
2310 	if (phba->sli_rev < LPFC_SLI_REV4)
2311 		lpfc_unreg_login(phba, 0, rpi, mbox);
2312 	else
2313 		lpfc_unreg_login(phba, phba->pport->vpi,
2314 				 phba->sli4_hba.rpi_ids[rpi], mbox);
2315 
2316 	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2317 
2318 	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2319 		if (status != MBX_TIMEOUT)
2320 			mempool_free(mbox, phba->mbox_mem_pool);
2321 		return -EIO;
2322 	}
2323 	mempool_free(mbox, phba->mbox_mem_pool);
2324 	if (phba->sli_rev == LPFC_SLI_REV4)
2325 		lpfc_sli4_free_rpi(phba, rpi);
2326 	return 0;
2327 }
2328 
2329 /**
2330  * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2331  * @phba: Pointer to HBA context object
2332  * @rpi: Remote port login id
2333  * @txxri: Pointer to transmit exchange id
2334  * @rxxri: Pointer to response exchabge id
2335  *
2336  * This function obtains the transmit and receive ids required to send
2337  * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2338  * flags are used to the unsolicted response handler is able to process
2339  * the ct command sent on the same port.
2340  **/
2341 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2342 			 uint16_t *txxri, uint16_t * rxxri)
2343 {
2344 	struct lpfc_bsg_event *evt;
2345 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2346 	IOCB_t *cmd, *rsp;
2347 	struct lpfc_dmabuf *dmabuf;
2348 	struct ulp_bde64 *bpl = NULL;
2349 	struct lpfc_sli_ct_request *ctreq = NULL;
2350 	int ret_val = 0;
2351 	int time_left;
2352 	int iocb_stat = 0;
2353 	unsigned long flags;
2354 
2355 	*txxri = 0;
2356 	*rxxri = 0;
2357 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2358 				SLI_CT_ELX_LOOPBACK);
2359 	if (!evt)
2360 		return -ENOMEM;
2361 
2362 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2363 	list_add(&evt->node, &phba->ct_ev_waiters);
2364 	lpfc_bsg_event_ref(evt);
2365 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2366 
2367 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2368 	rspiocbq = lpfc_sli_get_iocbq(phba);
2369 
2370 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2371 	if (dmabuf) {
2372 		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2373 		if (dmabuf->virt) {
2374 			INIT_LIST_HEAD(&dmabuf->list);
2375 			bpl = (struct ulp_bde64 *) dmabuf->virt;
2376 			memset(bpl, 0, sizeof(*bpl));
2377 			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2378 			bpl->addrHigh =
2379 				le32_to_cpu(putPaddrHigh(dmabuf->phys +
2380 					sizeof(*bpl)));
2381 			bpl->addrLow =
2382 				le32_to_cpu(putPaddrLow(dmabuf->phys +
2383 					sizeof(*bpl)));
2384 			bpl->tus.f.bdeFlags = 0;
2385 			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2386 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2387 		}
2388 	}
2389 
2390 	if (cmdiocbq == NULL || rspiocbq == NULL ||
2391 	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2392 		dmabuf->virt == NULL) {
2393 		ret_val = -ENOMEM;
2394 		goto err_get_xri_exit;
2395 	}
2396 
2397 	cmd = &cmdiocbq->iocb;
2398 	rsp = &rspiocbq->iocb;
2399 
2400 	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2401 
2402 	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2403 	ctreq->RevisionId.bits.InId = 0;
2404 	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2405 	ctreq->FsSubType = 0;
2406 	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2407 	ctreq->CommandResponse.bits.Size = 0;
2408 
2409 
2410 	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2411 	cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2412 	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2413 	cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2414 
2415 	cmd->un.xseq64.w5.hcsw.Fctl = LA;
2416 	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2417 	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2418 	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2419 
2420 	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2421 	cmd->ulpBdeCount = 1;
2422 	cmd->ulpLe = 1;
2423 	cmd->ulpClass = CLASS3;
2424 	cmd->ulpContext = rpi;
2425 
2426 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2427 	cmdiocbq->vport = phba->pport;
2428 
2429 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2430 				rspiocbq,
2431 				(phba->fc_ratov * 2)
2432 				+ LPFC_DRVR_TIMEOUT);
2433 	if (iocb_stat) {
2434 		ret_val = -EIO;
2435 		goto err_get_xri_exit;
2436 	}
2437 	*txxri =  rsp->ulpContext;
2438 
2439 	evt->waiting = 1;
2440 	evt->wait_time_stamp = jiffies;
2441 	time_left = wait_event_interruptible_timeout(
2442 		evt->wq, !list_empty(&evt->events_to_see),
2443 		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2444 	if (list_empty(&evt->events_to_see))
2445 		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2446 	else {
2447 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
2448 		list_move(evt->events_to_see.prev, &evt->events_to_get);
2449 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2450 		*rxxri = (list_entry(evt->events_to_get.prev,
2451 				     typeof(struct event_data),
2452 				     node))->immed_dat;
2453 	}
2454 	evt->waiting = 0;
2455 
2456 err_get_xri_exit:
2457 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2458 	lpfc_bsg_event_unref(evt); /* release ref */
2459 	lpfc_bsg_event_unref(evt); /* delete */
2460 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2461 
2462 	if (dmabuf) {
2463 		if (dmabuf->virt)
2464 			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2465 		kfree(dmabuf);
2466 	}
2467 
2468 	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2469 		lpfc_sli_release_iocbq(phba, cmdiocbq);
2470 	if (rspiocbq)
2471 		lpfc_sli_release_iocbq(phba, rspiocbq);
2472 	return ret_val;
2473 }
2474 
2475 /**
2476  * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2477  * @phba: Pointer to HBA context object
2478  *
2479  * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2480  * retruns the pointer to the buffer.
2481  **/
2482 static struct lpfc_dmabuf *
2483 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2484 {
2485 	struct lpfc_dmabuf *dmabuf;
2486 	struct pci_dev *pcidev = phba->pcidev;
2487 
2488 	/* allocate dma buffer struct */
2489 	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2490 	if (!dmabuf)
2491 		return NULL;
2492 
2493 	INIT_LIST_HEAD(&dmabuf->list);
2494 
2495 	/* now, allocate dma buffer */
2496 	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2497 					  &(dmabuf->phys), GFP_KERNEL);
2498 
2499 	if (!dmabuf->virt) {
2500 		kfree(dmabuf);
2501 		return NULL;
2502 	}
2503 	memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2504 
2505 	return dmabuf;
2506 }
2507 
2508 /**
2509  * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2510  * @phba: Pointer to HBA context object.
2511  * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2512  *
2513  * This routine just simply frees a dma buffer and its associated buffer
2514  * descriptor referred by @dmabuf.
2515  **/
2516 static void
2517 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2518 {
2519 	struct pci_dev *pcidev = phba->pcidev;
2520 
2521 	if (!dmabuf)
2522 		return;
2523 
2524 	if (dmabuf->virt)
2525 		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2526 				  dmabuf->virt, dmabuf->phys);
2527 	kfree(dmabuf);
2528 	return;
2529 }
2530 
2531 /**
2532  * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2533  * @phba: Pointer to HBA context object.
2534  * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2535  *
2536  * This routine just simply frees all dma buffers and their associated buffer
2537  * descriptors referred by @dmabuf_list.
2538  **/
2539 static void
2540 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2541 			    struct list_head *dmabuf_list)
2542 {
2543 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2544 
2545 	if (list_empty(dmabuf_list))
2546 		return;
2547 
2548 	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2549 		list_del_init(&dmabuf->list);
2550 		lpfc_bsg_dma_page_free(phba, dmabuf);
2551 	}
2552 	return;
2553 }
2554 
2555 /**
2556  * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2557  * @phba: Pointer to HBA context object
2558  * @bpl: Pointer to 64 bit bde structure
2559  * @size: Number of bytes to process
2560  * @nocopydata: Flag to copy user data into the allocated buffer
2561  *
2562  * This function allocates page size buffers and populates an lpfc_dmabufext.
2563  * If allowed the user data pointed to with indataptr is copied into the kernel
2564  * memory. The chained list of page size buffers is returned.
2565  **/
2566 static struct lpfc_dmabufext *
2567 diag_cmd_data_alloc(struct lpfc_hba *phba,
2568 		   struct ulp_bde64 *bpl, uint32_t size,
2569 		   int nocopydata)
2570 {
2571 	struct lpfc_dmabufext *mlist = NULL;
2572 	struct lpfc_dmabufext *dmp;
2573 	int cnt, offset = 0, i = 0;
2574 	struct pci_dev *pcidev;
2575 
2576 	pcidev = phba->pcidev;
2577 
2578 	while (size) {
2579 		/* We get chunks of 4K */
2580 		if (size > BUF_SZ_4K)
2581 			cnt = BUF_SZ_4K;
2582 		else
2583 			cnt = size;
2584 
2585 		/* allocate struct lpfc_dmabufext buffer header */
2586 		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2587 		if (!dmp)
2588 			goto out;
2589 
2590 		INIT_LIST_HEAD(&dmp->dma.list);
2591 
2592 		/* Queue it to a linked list */
2593 		if (mlist)
2594 			list_add_tail(&dmp->dma.list, &mlist->dma.list);
2595 		else
2596 			mlist = dmp;
2597 
2598 		/* allocate buffer */
2599 		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2600 						   cnt,
2601 						   &(dmp->dma.phys),
2602 						   GFP_KERNEL);
2603 
2604 		if (!dmp->dma.virt)
2605 			goto out;
2606 
2607 		dmp->size = cnt;
2608 
2609 		if (nocopydata) {
2610 			bpl->tus.f.bdeFlags = 0;
2611 			pci_dma_sync_single_for_device(phba->pcidev,
2612 				dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2613 
2614 		} else {
2615 			memset((uint8_t *)dmp->dma.virt, 0, cnt);
2616 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2617 		}
2618 
2619 		/* build buffer ptr list for IOCB */
2620 		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2621 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2622 		bpl->tus.f.bdeSize = (ushort) cnt;
2623 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2624 		bpl++;
2625 
2626 		i++;
2627 		offset += cnt;
2628 		size -= cnt;
2629 	}
2630 
2631 	mlist->flag = i;
2632 	return mlist;
2633 out:
2634 	diag_cmd_data_free(phba, mlist);
2635 	return NULL;
2636 }
2637 
2638 /**
2639  * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2640  * @phba: Pointer to HBA context object
2641  * @rxxri: Receive exchange id
2642  * @len: Number of data bytes
2643  *
2644  * This function allocates and posts a data buffer of sufficient size to receive
2645  * an unsolicted CT command.
2646  **/
2647 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2648 			     size_t len)
2649 {
2650 	struct lpfc_sli *psli = &phba->sli;
2651 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2652 	struct lpfc_iocbq *cmdiocbq;
2653 	IOCB_t *cmd = NULL;
2654 	struct list_head head, *curr, *next;
2655 	struct lpfc_dmabuf *rxbmp;
2656 	struct lpfc_dmabuf *dmp;
2657 	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2658 	struct ulp_bde64 *rxbpl = NULL;
2659 	uint32_t num_bde;
2660 	struct lpfc_dmabufext *rxbuffer = NULL;
2661 	int ret_val = 0;
2662 	int iocb_stat;
2663 	int i = 0;
2664 
2665 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2666 	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2667 	if (rxbmp != NULL) {
2668 		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2669 		if (rxbmp->virt) {
2670 			INIT_LIST_HEAD(&rxbmp->list);
2671 			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2672 			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2673 		}
2674 	}
2675 
2676 	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
2677 		ret_val = -ENOMEM;
2678 		goto err_post_rxbufs_exit;
2679 	}
2680 
2681 	/* Queue buffers for the receive exchange */
2682 	num_bde = (uint32_t)rxbuffer->flag;
2683 	dmp = &rxbuffer->dma;
2684 
2685 	cmd = &cmdiocbq->iocb;
2686 	i = 0;
2687 
2688 	INIT_LIST_HEAD(&head);
2689 	list_add_tail(&head, &dmp->list);
2690 	list_for_each_safe(curr, next, &head) {
2691 		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2692 		list_del(curr);
2693 
2694 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2695 			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2696 			cmd->un.quexri64cx.buff.bde.addrHigh =
2697 				putPaddrHigh(mp[i]->phys);
2698 			cmd->un.quexri64cx.buff.bde.addrLow =
2699 				putPaddrLow(mp[i]->phys);
2700 			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2701 				((struct lpfc_dmabufext *)mp[i])->size;
2702 			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2703 			cmd->ulpCommand = CMD_QUE_XRI64_CX;
2704 			cmd->ulpPU = 0;
2705 			cmd->ulpLe = 1;
2706 			cmd->ulpBdeCount = 1;
2707 			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2708 
2709 		} else {
2710 			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2711 			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2712 			cmd->un.cont64[i].tus.f.bdeSize =
2713 				((struct lpfc_dmabufext *)mp[i])->size;
2714 					cmd->ulpBdeCount = ++i;
2715 
2716 			if ((--num_bde > 0) && (i < 2))
2717 				continue;
2718 
2719 			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2720 			cmd->ulpLe = 1;
2721 		}
2722 
2723 		cmd->ulpClass = CLASS3;
2724 		cmd->ulpContext = rxxri;
2725 
2726 		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2727 						0);
2728 		if (iocb_stat == IOCB_ERROR) {
2729 			diag_cmd_data_free(phba,
2730 				(struct lpfc_dmabufext *)mp[0]);
2731 			if (mp[1])
2732 				diag_cmd_data_free(phba,
2733 					  (struct lpfc_dmabufext *)mp[1]);
2734 			dmp = list_entry(next, struct lpfc_dmabuf, list);
2735 			ret_val = -EIO;
2736 			goto err_post_rxbufs_exit;
2737 		}
2738 
2739 		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2740 		if (mp[1]) {
2741 			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2742 			mp[1] = NULL;
2743 		}
2744 
2745 		/* The iocb was freed by lpfc_sli_issue_iocb */
2746 		cmdiocbq = lpfc_sli_get_iocbq(phba);
2747 		if (!cmdiocbq) {
2748 			dmp = list_entry(next, struct lpfc_dmabuf, list);
2749 			ret_val = -EIO;
2750 			goto err_post_rxbufs_exit;
2751 		}
2752 
2753 		cmd = &cmdiocbq->iocb;
2754 		i = 0;
2755 	}
2756 	list_del(&head);
2757 
2758 err_post_rxbufs_exit:
2759 
2760 	if (rxbmp) {
2761 		if (rxbmp->virt)
2762 			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2763 		kfree(rxbmp);
2764 	}
2765 
2766 	if (cmdiocbq)
2767 		lpfc_sli_release_iocbq(phba, cmdiocbq);
2768 	return ret_val;
2769 }
2770 
2771 /**
2772  * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2773  * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2774  *
2775  * This function receives a user data buffer to be transmitted and received on
2776  * the same port, the link must be up and in loopback mode prior
2777  * to being called.
2778  * 1. A kernel buffer is allocated to copy the user data into.
2779  * 2. The port registers with "itself".
2780  * 3. The transmit and receive exchange ids are obtained.
2781  * 4. The receive exchange id is posted.
2782  * 5. A new els loopback event is created.
2783  * 6. The command and response iocbs are allocated.
2784  * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2785  *
2786  * This function is meant to be called n times while the port is in loopback
2787  * so it is the apps responsibility to issue a reset to take the port out
2788  * of loopback mode.
2789  **/
2790 static int
2791 lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2792 {
2793 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2794 	struct lpfc_hba *phba = vport->phba;
2795 	struct diag_mode_test *diag_mode;
2796 	struct lpfc_bsg_event *evt;
2797 	struct event_data *evdat;
2798 	struct lpfc_sli *psli = &phba->sli;
2799 	uint32_t size;
2800 	uint32_t full_size;
2801 	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2802 	uint16_t rpi = 0;
2803 	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
2804 	IOCB_t *cmd, *rsp = NULL;
2805 	struct lpfc_sli_ct_request *ctreq;
2806 	struct lpfc_dmabuf *txbmp;
2807 	struct ulp_bde64 *txbpl = NULL;
2808 	struct lpfc_dmabufext *txbuffer = NULL;
2809 	struct list_head head;
2810 	struct lpfc_dmabuf  *curr;
2811 	uint16_t txxri = 0, rxxri;
2812 	uint32_t num_bde;
2813 	uint8_t *ptr = NULL, *rx_databuf = NULL;
2814 	int rc = 0;
2815 	int time_left;
2816 	int iocb_stat;
2817 	unsigned long flags;
2818 	void *dataout = NULL;
2819 	uint32_t total_mem;
2820 
2821 	/* in case no data is returned return just the return code */
2822 	job->reply->reply_payload_rcv_len = 0;
2823 
2824 	if (job->request_len <
2825 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2826 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2827 				"2739 Received DIAG TEST request below minimum "
2828 				"size\n");
2829 		rc = -EINVAL;
2830 		goto loopback_test_exit;
2831 	}
2832 
2833 	if (job->request_payload.payload_len !=
2834 		job->reply_payload.payload_len) {
2835 		rc = -EINVAL;
2836 		goto loopback_test_exit;
2837 	}
2838 	diag_mode = (struct diag_mode_test *)
2839 		job->request->rqst_data.h_vendor.vendor_cmd;
2840 
2841 	if ((phba->link_state == LPFC_HBA_ERROR) ||
2842 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2843 	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2844 		rc = -EACCES;
2845 		goto loopback_test_exit;
2846 	}
2847 
2848 	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2849 		rc = -EACCES;
2850 		goto loopback_test_exit;
2851 	}
2852 
2853 	size = job->request_payload.payload_len;
2854 	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2855 
2856 	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2857 		rc = -ERANGE;
2858 		goto loopback_test_exit;
2859 	}
2860 
2861 	if (full_size >= BUF_SZ_4K) {
2862 		/*
2863 		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2864 		 * then we allocate 64k and re-use that buffer over and over to
2865 		 * xfer the whole block. This is because Linux kernel has a
2866 		 * problem allocating more than 120k of kernel space memory. Saw
2867 		 * problem with GET_FCPTARGETMAPPING...
2868 		 */
2869 		if (size <= (64 * 1024))
2870 			total_mem = full_size;
2871 		else
2872 			total_mem = 64 * 1024;
2873 	} else
2874 		/* Allocate memory for ioctl data */
2875 		total_mem = BUF_SZ_4K;
2876 
2877 	dataout = kmalloc(total_mem, GFP_KERNEL);
2878 	if (dataout == NULL) {
2879 		rc = -ENOMEM;
2880 		goto loopback_test_exit;
2881 	}
2882 
2883 	ptr = dataout;
2884 	ptr += ELX_LOOPBACK_HEADER_SZ;
2885 	sg_copy_to_buffer(job->request_payload.sg_list,
2886 				job->request_payload.sg_cnt,
2887 				ptr, size);
2888 	rc = lpfcdiag_loop_self_reg(phba, &rpi);
2889 	if (rc)
2890 		goto loopback_test_exit;
2891 
2892 	if (phba->sli_rev < LPFC_SLI_REV4) {
2893 		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2894 		if (rc) {
2895 			lpfcdiag_loop_self_unreg(phba, rpi);
2896 			goto loopback_test_exit;
2897 		}
2898 
2899 		rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2900 		if (rc) {
2901 			lpfcdiag_loop_self_unreg(phba, rpi);
2902 			goto loopback_test_exit;
2903 		}
2904 	}
2905 	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2906 				SLI_CT_ELX_LOOPBACK);
2907 	if (!evt) {
2908 		lpfcdiag_loop_self_unreg(phba, rpi);
2909 		rc = -ENOMEM;
2910 		goto loopback_test_exit;
2911 	}
2912 
2913 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2914 	list_add(&evt->node, &phba->ct_ev_waiters);
2915 	lpfc_bsg_event_ref(evt);
2916 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2917 
2918 	cmdiocbq = lpfc_sli_get_iocbq(phba);
2919 	if (phba->sli_rev < LPFC_SLI_REV4)
2920 		rspiocbq = lpfc_sli_get_iocbq(phba);
2921 	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2922 
2923 	if (txbmp) {
2924 		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2925 		if (txbmp->virt) {
2926 			INIT_LIST_HEAD(&txbmp->list);
2927 			txbpl = (struct ulp_bde64 *) txbmp->virt;
2928 			txbuffer = diag_cmd_data_alloc(phba,
2929 							txbpl, full_size, 0);
2930 		}
2931 	}
2932 
2933 	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
2934 		rc = -ENOMEM;
2935 		goto err_loopback_test_exit;
2936 	}
2937 	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
2938 		rc = -ENOMEM;
2939 		goto err_loopback_test_exit;
2940 	}
2941 
2942 	cmd = &cmdiocbq->iocb;
2943 	if (phba->sli_rev < LPFC_SLI_REV4)
2944 		rsp = &rspiocbq->iocb;
2945 
2946 	INIT_LIST_HEAD(&head);
2947 	list_add_tail(&head, &txbuffer->dma.list);
2948 	list_for_each_entry(curr, &head, list) {
2949 		segment_len = ((struct lpfc_dmabufext *)curr)->size;
2950 		if (current_offset == 0) {
2951 			ctreq = curr->virt;
2952 			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2953 			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2954 			ctreq->RevisionId.bits.InId = 0;
2955 			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2956 			ctreq->FsSubType = 0;
2957 			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2958 			ctreq->CommandResponse.bits.Size   = size;
2959 			segment_offset = ELX_LOOPBACK_HEADER_SZ;
2960 		} else
2961 			segment_offset = 0;
2962 
2963 		BUG_ON(segment_offset >= segment_len);
2964 		memcpy(curr->virt + segment_offset,
2965 			ptr + current_offset,
2966 			segment_len - segment_offset);
2967 
2968 		current_offset += segment_len - segment_offset;
2969 		BUG_ON(current_offset > size);
2970 	}
2971 	list_del(&head);
2972 
2973 	/* Build the XMIT_SEQUENCE iocb */
2974 	num_bde = (uint32_t)txbuffer->flag;
2975 
2976 	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2977 	cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2978 	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2979 	cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2980 
2981 	cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2982 	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2983 	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2984 	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2985 
2986 	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2987 	cmd->ulpBdeCount = 1;
2988 	cmd->ulpLe = 1;
2989 	cmd->ulpClass = CLASS3;
2990 
2991 	if (phba->sli_rev < LPFC_SLI_REV4) {
2992 		cmd->ulpContext = txxri;
2993 	} else {
2994 		cmd->un.xseq64.bdl.ulpIoTag32 = 0;
2995 		cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
2996 		cmdiocbq->context3 = txbmp;
2997 		cmdiocbq->sli4_xritag = NO_XRI;
2998 		cmd->unsli3.rcvsli3.ox_id = 0xffff;
2999 	}
3000 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3001 	cmdiocbq->vport = phba->pport;
3002 	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3003 					     rspiocbq, (phba->fc_ratov * 2) +
3004 					     LPFC_DRVR_TIMEOUT);
3005 
3006 	if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
3007 					   (rsp->ulpStatus != IOCB_SUCCESS))) {
3008 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3009 				"3126 Failed loopback test issue iocb: "
3010 				"iocb_stat:x%x\n", iocb_stat);
3011 		rc = -EIO;
3012 		goto err_loopback_test_exit;
3013 	}
3014 
3015 	evt->waiting = 1;
3016 	time_left = wait_event_interruptible_timeout(
3017 		evt->wq, !list_empty(&evt->events_to_see),
3018 		((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
3019 	evt->waiting = 0;
3020 	if (list_empty(&evt->events_to_see)) {
3021 		rc = (time_left) ? -EINTR : -ETIMEDOUT;
3022 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3023 				"3125 Not receiving unsolicited event, "
3024 				"rc:x%x\n", rc);
3025 	} else {
3026 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
3027 		list_move(evt->events_to_see.prev, &evt->events_to_get);
3028 		evdat = list_entry(evt->events_to_get.prev,
3029 				   typeof(*evdat), node);
3030 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3031 		rx_databuf = evdat->data;
3032 		if (evdat->len != full_size) {
3033 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3034 				"1603 Loopback test did not receive expected "
3035 				"data length. actual length 0x%x expected "
3036 				"length 0x%x\n",
3037 				evdat->len, full_size);
3038 			rc = -EIO;
3039 		} else if (rx_databuf == NULL)
3040 			rc = -EIO;
3041 		else {
3042 			rc = IOCB_SUCCESS;
3043 			/* skip over elx loopback header */
3044 			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3045 			job->reply->reply_payload_rcv_len =
3046 				sg_copy_from_buffer(job->reply_payload.sg_list,
3047 						    job->reply_payload.sg_cnt,
3048 						    rx_databuf, size);
3049 			job->reply->reply_payload_rcv_len = size;
3050 		}
3051 	}
3052 
3053 err_loopback_test_exit:
3054 	lpfcdiag_loop_self_unreg(phba, rpi);
3055 
3056 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3057 	lpfc_bsg_event_unref(evt); /* release ref */
3058 	lpfc_bsg_event_unref(evt); /* delete */
3059 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3060 
3061 	if (cmdiocbq != NULL)
3062 		lpfc_sli_release_iocbq(phba, cmdiocbq);
3063 
3064 	if (rspiocbq != NULL)
3065 		lpfc_sli_release_iocbq(phba, rspiocbq);
3066 
3067 	if (txbmp != NULL) {
3068 		if (txbpl != NULL) {
3069 			if (txbuffer != NULL)
3070 				diag_cmd_data_free(phba, txbuffer);
3071 			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3072 		}
3073 		kfree(txbmp);
3074 	}
3075 
3076 loopback_test_exit:
3077 	kfree(dataout);
3078 	/* make error code available to userspace */
3079 	job->reply->result = rc;
3080 	job->dd_data = NULL;
3081 	/* complete the job back to userspace if no error */
3082 	if (rc == IOCB_SUCCESS)
3083 		job->job_done(job);
3084 	return rc;
3085 }
3086 
3087 /**
3088  * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3089  * @job: GET_DFC_REV fc_bsg_job
3090  **/
3091 static int
3092 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
3093 {
3094 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3095 	struct lpfc_hba *phba = vport->phba;
3096 	struct get_mgmt_rev *event_req;
3097 	struct get_mgmt_rev_reply *event_reply;
3098 	int rc = 0;
3099 
3100 	if (job->request_len <
3101 	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3102 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3103 				"2740 Received GET_DFC_REV request below "
3104 				"minimum size\n");
3105 		rc = -EINVAL;
3106 		goto job_error;
3107 	}
3108 
3109 	event_req = (struct get_mgmt_rev *)
3110 		job->request->rqst_data.h_vendor.vendor_cmd;
3111 
3112 	event_reply = (struct get_mgmt_rev_reply *)
3113 		job->reply->reply_data.vendor_reply.vendor_rsp;
3114 
3115 	if (job->reply_len <
3116 	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
3117 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3118 				"2741 Received GET_DFC_REV reply below "
3119 				"minimum size\n");
3120 		rc = -EINVAL;
3121 		goto job_error;
3122 	}
3123 
3124 	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3125 	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3126 job_error:
3127 	job->reply->result = rc;
3128 	if (rc == 0)
3129 		job->job_done(job);
3130 	return rc;
3131 }
3132 
3133 /**
3134  * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3135  * @phba: Pointer to HBA context object.
3136  * @pmboxq: Pointer to mailbox command.
3137  *
3138  * This is completion handler function for mailbox commands issued from
3139  * lpfc_bsg_issue_mbox function. This function is called by the
3140  * mailbox event handler function with no lock held. This function
3141  * will wake up thread waiting on the wait queue pointed by context1
3142  * of the mailbox.
3143  **/
3144 void
3145 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3146 {
3147 	struct bsg_job_data *dd_data;
3148 	struct fc_bsg_job *job;
3149 	uint32_t size;
3150 	unsigned long flags;
3151 	uint8_t *pmb, *pmb_buf;
3152 
3153 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3154 	dd_data = pmboxq->context1;
3155 	/* job already timed out? */
3156 	if (!dd_data) {
3157 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3158 		return;
3159 	}
3160 
3161 	/*
3162 	 * The outgoing buffer is readily referred from the dma buffer,
3163 	 * just need to get header part from mailboxq structure.
3164 	 */
3165 	pmb = (uint8_t *)&pmboxq->u.mb;
3166 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3167 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3168 
3169 	job = dd_data->context_un.mbox.set_job;
3170 	if (job) {
3171 		size = job->reply_payload.payload_len;
3172 		job->reply->reply_payload_rcv_len =
3173 			sg_copy_from_buffer(job->reply_payload.sg_list,
3174 					    job->reply_payload.sg_cnt,
3175 					    pmb_buf, size);
3176 		/* need to hold the lock until we set job->dd_data to NULL
3177 		 * to hold off the timeout handler returning to the mid-layer
3178 		 * while we are still processing the job.
3179 		 */
3180 		job->dd_data = NULL;
3181 		dd_data->context_un.mbox.set_job = NULL;
3182 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3183 	} else {
3184 		dd_data->context_un.mbox.set_job = NULL;
3185 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3186 	}
3187 
3188 	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3189 	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3190 	kfree(dd_data);
3191 
3192 	if (job) {
3193 		job->reply->result = 0;
3194 		job->job_done(job);
3195 	}
3196 	return;
3197 }
3198 
3199 /**
3200  * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3201  * @phba: Pointer to HBA context object.
3202  * @mb: Pointer to a mailbox object.
3203  * @vport: Pointer to a vport object.
3204  *
3205  * Some commands require the port to be offline, some may not be called from
3206  * the application.
3207  **/
3208 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3209 	MAILBOX_t *mb, struct lpfc_vport *vport)
3210 {
3211 	/* return negative error values for bsg job */
3212 	switch (mb->mbxCommand) {
3213 	/* Offline only */
3214 	case MBX_INIT_LINK:
3215 	case MBX_DOWN_LINK:
3216 	case MBX_CONFIG_LINK:
3217 	case MBX_CONFIG_RING:
3218 	case MBX_RESET_RING:
3219 	case MBX_UNREG_LOGIN:
3220 	case MBX_CLEAR_LA:
3221 	case MBX_DUMP_CONTEXT:
3222 	case MBX_RUN_DIAGS:
3223 	case MBX_RESTART:
3224 	case MBX_SET_MASK:
3225 		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3226 			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3227 				"2743 Command 0x%x is illegal in on-line "
3228 				"state\n",
3229 				mb->mbxCommand);
3230 			return -EPERM;
3231 		}
3232 	case MBX_WRITE_NV:
3233 	case MBX_WRITE_VPARMS:
3234 	case MBX_LOAD_SM:
3235 	case MBX_READ_NV:
3236 	case MBX_READ_CONFIG:
3237 	case MBX_READ_RCONFIG:
3238 	case MBX_READ_STATUS:
3239 	case MBX_READ_XRI:
3240 	case MBX_READ_REV:
3241 	case MBX_READ_LNK_STAT:
3242 	case MBX_DUMP_MEMORY:
3243 	case MBX_DOWN_LOAD:
3244 	case MBX_UPDATE_CFG:
3245 	case MBX_KILL_BOARD:
3246 	case MBX_LOAD_AREA:
3247 	case MBX_LOAD_EXP_ROM:
3248 	case MBX_BEACON:
3249 	case MBX_DEL_LD_ENTRY:
3250 	case MBX_SET_DEBUG:
3251 	case MBX_WRITE_WWN:
3252 	case MBX_SLI4_CONFIG:
3253 	case MBX_READ_EVENT_LOG:
3254 	case MBX_READ_EVENT_LOG_STATUS:
3255 	case MBX_WRITE_EVENT_LOG:
3256 	case MBX_PORT_CAPABILITIES:
3257 	case MBX_PORT_IOV_CONTROL:
3258 	case MBX_RUN_BIU_DIAG64:
3259 		break;
3260 	case MBX_SET_VARIABLE:
3261 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3262 			"1226 mbox: set_variable 0x%x, 0x%x\n",
3263 			mb->un.varWords[0],
3264 			mb->un.varWords[1]);
3265 		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3266 			&& (mb->un.varWords[1] == 1)) {
3267 			phba->wait_4_mlo_maint_flg = 1;
3268 		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
3269 			spin_lock_irq(&phba->hbalock);
3270 			phba->link_flag &= ~LS_LOOPBACK_MODE;
3271 			spin_unlock_irq(&phba->hbalock);
3272 			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3273 		}
3274 		break;
3275 	case MBX_READ_SPARM64:
3276 	case MBX_READ_TOPOLOGY:
3277 	case MBX_REG_LOGIN:
3278 	case MBX_REG_LOGIN64:
3279 	case MBX_CONFIG_PORT:
3280 	case MBX_RUN_BIU_DIAG:
3281 	default:
3282 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3283 			"2742 Unknown Command 0x%x\n",
3284 			mb->mbxCommand);
3285 		return -EPERM;
3286 	}
3287 
3288 	return 0; /* ok */
3289 }
3290 
3291 /**
3292  * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3293  * @phba: Pointer to HBA context object.
3294  *
3295  * This is routine clean up and reset BSG handling of multi-buffer mbox
3296  * command session.
3297  **/
3298 static void
3299 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3300 {
3301 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3302 		return;
3303 
3304 	/* free all memory, including dma buffers */
3305 	lpfc_bsg_dma_page_list_free(phba,
3306 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3307 	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3308 	/* multi-buffer write mailbox command pass-through complete */
3309 	memset((char *)&phba->mbox_ext_buf_ctx, 0,
3310 	       sizeof(struct lpfc_mbox_ext_buf_ctx));
3311 	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3312 
3313 	return;
3314 }
3315 
3316 /**
3317  * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3318  * @phba: Pointer to HBA context object.
3319  * @pmboxq: Pointer to mailbox command.
3320  *
3321  * This is routine handles BSG job for mailbox commands completions with
3322  * multiple external buffers.
3323  **/
3324 static struct fc_bsg_job *
3325 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3326 {
3327 	struct bsg_job_data *dd_data;
3328 	struct fc_bsg_job *job;
3329 	uint8_t *pmb, *pmb_buf;
3330 	unsigned long flags;
3331 	uint32_t size;
3332 	int rc = 0;
3333 	struct lpfc_dmabuf *dmabuf;
3334 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3335 	uint8_t *pmbx;
3336 
3337 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3338 	dd_data = pmboxq->context1;
3339 	/* has the job already timed out? */
3340 	if (!dd_data) {
3341 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3342 		job = NULL;
3343 		goto job_done_out;
3344 	}
3345 
3346 	/*
3347 	 * The outgoing buffer is readily referred from the dma buffer,
3348 	 * just need to get header part from mailboxq structure.
3349 	 */
3350 	pmb = (uint8_t *)&pmboxq->u.mb;
3351 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3352 	/* Copy the byte swapped response mailbox back to the user */
3353 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3354 	/* if there is any non-embedded extended data copy that too */
3355 	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3356 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3357 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3358 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3359 		pmbx = (uint8_t *)dmabuf->virt;
3360 		/* byte swap the extended data following the mailbox command */
3361 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3362 			&pmbx[sizeof(MAILBOX_t)],
3363 			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3364 	}
3365 
3366 	job = dd_data->context_un.mbox.set_job;
3367 	if (job) {
3368 		size = job->reply_payload.payload_len;
3369 		job->reply->reply_payload_rcv_len =
3370 			sg_copy_from_buffer(job->reply_payload.sg_list,
3371 					    job->reply_payload.sg_cnt,
3372 					    pmb_buf, size);
3373 		/* result for successful */
3374 		job->reply->result = 0;
3375 		job->dd_data = NULL;
3376 		/* need to hold the lock util we set job->dd_data to NULL
3377 		 * to hold off the timeout handler from midlayer to take
3378 		 * any action.
3379 		 */
3380 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3381 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3382 				"2937 SLI_CONFIG ext-buffer maibox command "
3383 				"(x%x/x%x) complete bsg job done, bsize:%d\n",
3384 				phba->mbox_ext_buf_ctx.nembType,
3385 				phba->mbox_ext_buf_ctx.mboxType, size);
3386 		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3387 					phba->mbox_ext_buf_ctx.nembType,
3388 					phba->mbox_ext_buf_ctx.mboxType,
3389 					dma_ebuf, sta_pos_addr,
3390 					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3391 	} else
3392 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3393 
3394 job_done_out:
3395 	if (!job)
3396 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3397 				"2938 SLI_CONFIG ext-buffer maibox "
3398 				"command (x%x/x%x) failure, rc:x%x\n",
3399 				phba->mbox_ext_buf_ctx.nembType,
3400 				phba->mbox_ext_buf_ctx.mboxType, rc);
3401 	/* state change */
3402 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3403 	kfree(dd_data);
3404 
3405 	return job;
3406 }
3407 
3408 /**
3409  * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3410  * @phba: Pointer to HBA context object.
3411  * @pmboxq: Pointer to mailbox command.
3412  *
3413  * This is completion handler function for mailbox read commands with multiple
3414  * external buffers.
3415  **/
3416 static void
3417 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3418 {
3419 	struct fc_bsg_job *job;
3420 
3421 	/* handle the BSG job with mailbox command */
3422 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3423 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3424 
3425 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3426 			"2939 SLI_CONFIG ext-buffer rd maibox command "
3427 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3428 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3429 
3430 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3431 
3432 	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3433 		lpfc_bsg_mbox_ext_session_reset(phba);
3434 
3435 	/* free base driver mailbox structure memory */
3436 	mempool_free(pmboxq, phba->mbox_mem_pool);
3437 
3438 	/* complete the bsg job if we have it */
3439 	if (job)
3440 		job->job_done(job);
3441 
3442 	return;
3443 }
3444 
3445 /**
3446  * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3447  * @phba: Pointer to HBA context object.
3448  * @pmboxq: Pointer to mailbox command.
3449  *
3450  * This is completion handler function for mailbox write commands with multiple
3451  * external buffers.
3452  **/
3453 static void
3454 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3455 {
3456 	struct fc_bsg_job *job;
3457 
3458 	/* handle the BSG job with the mailbox command */
3459 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3460 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3461 
3462 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3463 			"2940 SLI_CONFIG ext-buffer wr maibox command "
3464 			"complete, ctxState:x%x, mbxStatus:x%x\n",
3465 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3466 
3467 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3468 
3469 	/* free all memory, including dma buffers */
3470 	mempool_free(pmboxq, phba->mbox_mem_pool);
3471 	lpfc_bsg_mbox_ext_session_reset(phba);
3472 
3473 	/* complete the bsg job if we have it */
3474 	if (job)
3475 		job->job_done(job);
3476 
3477 	return;
3478 }
3479 
3480 static void
3481 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3482 				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3483 				struct lpfc_dmabuf *ext_dmabuf)
3484 {
3485 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3486 
3487 	/* pointer to the start of mailbox command */
3488 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3489 
3490 	if (nemb_tp == nemb_mse) {
3491 		if (index == 0) {
3492 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3493 				mse[index].pa_hi =
3494 				putPaddrHigh(mbx_dmabuf->phys +
3495 					     sizeof(MAILBOX_t));
3496 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3497 				mse[index].pa_lo =
3498 				putPaddrLow(mbx_dmabuf->phys +
3499 					    sizeof(MAILBOX_t));
3500 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3501 					"2943 SLI_CONFIG(mse)[%d], "
3502 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3503 					index,
3504 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3505 					mse[index].buf_len,
3506 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3507 					mse[index].pa_hi,
3508 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3509 					mse[index].pa_lo);
3510 		} else {
3511 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3512 				mse[index].pa_hi =
3513 				putPaddrHigh(ext_dmabuf->phys);
3514 			sli_cfg_mbx->un.sli_config_emb0_subsys.
3515 				mse[index].pa_lo =
3516 				putPaddrLow(ext_dmabuf->phys);
3517 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3518 					"2944 SLI_CONFIG(mse)[%d], "
3519 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3520 					index,
3521 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3522 					mse[index].buf_len,
3523 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3524 					mse[index].pa_hi,
3525 					sli_cfg_mbx->un.sli_config_emb0_subsys.
3526 					mse[index].pa_lo);
3527 		}
3528 	} else {
3529 		if (index == 0) {
3530 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3531 				hbd[index].pa_hi =
3532 				putPaddrHigh(mbx_dmabuf->phys +
3533 					     sizeof(MAILBOX_t));
3534 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3535 				hbd[index].pa_lo =
3536 				putPaddrLow(mbx_dmabuf->phys +
3537 					    sizeof(MAILBOX_t));
3538 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3539 					"3007 SLI_CONFIG(hbd)[%d], "
3540 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3541 				index,
3542 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3543 				&sli_cfg_mbx->un.
3544 				sli_config_emb1_subsys.hbd[index]),
3545 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3546 				hbd[index].pa_hi,
3547 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3548 				hbd[index].pa_lo);
3549 
3550 		} else {
3551 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3552 				hbd[index].pa_hi =
3553 				putPaddrHigh(ext_dmabuf->phys);
3554 			sli_cfg_mbx->un.sli_config_emb1_subsys.
3555 				hbd[index].pa_lo =
3556 				putPaddrLow(ext_dmabuf->phys);
3557 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3558 					"3008 SLI_CONFIG(hbd)[%d], "
3559 					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3560 				index,
3561 				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3562 				&sli_cfg_mbx->un.
3563 				sli_config_emb1_subsys.hbd[index]),
3564 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3565 				hbd[index].pa_hi,
3566 				sli_cfg_mbx->un.sli_config_emb1_subsys.
3567 				hbd[index].pa_lo);
3568 		}
3569 	}
3570 	return;
3571 }
3572 
3573 /**
3574  * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3575  * @phba: Pointer to HBA context object.
3576  * @mb: Pointer to a BSG mailbox object.
3577  * @nemb_tp: Enumerate of non-embedded mailbox command type.
3578  * @dmabuff: Pointer to a DMA buffer descriptor.
3579  *
3580  * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3581  * non-embedded external bufffers.
3582  **/
3583 static int
3584 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3585 			      enum nemb_type nemb_tp,
3586 			      struct lpfc_dmabuf *dmabuf)
3587 {
3588 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3589 	struct dfc_mbox_req *mbox_req;
3590 	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3591 	uint32_t ext_buf_cnt, ext_buf_index;
3592 	struct lpfc_dmabuf *ext_dmabuf = NULL;
3593 	struct bsg_job_data *dd_data = NULL;
3594 	LPFC_MBOXQ_t *pmboxq = NULL;
3595 	MAILBOX_t *pmb;
3596 	uint8_t *pmbx;
3597 	int rc, i;
3598 
3599 	mbox_req =
3600 	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3601 
3602 	/* pointer to the start of mailbox command */
3603 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3604 
3605 	if (nemb_tp == nemb_mse) {
3606 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3607 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3608 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3609 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3610 					"2945 Handled SLI_CONFIG(mse) rd, "
3611 					"ext_buf_cnt(%d) out of range(%d)\n",
3612 					ext_buf_cnt,
3613 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3614 			rc = -ERANGE;
3615 			goto job_error;
3616 		}
3617 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3618 				"2941 Handled SLI_CONFIG(mse) rd, "
3619 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3620 	} else {
3621 		/* sanity check on interface type for support */
3622 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3623 		    LPFC_SLI_INTF_IF_TYPE_2) {
3624 			rc = -ENODEV;
3625 			goto job_error;
3626 		}
3627 		/* nemb_tp == nemb_hbd */
3628 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3629 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3630 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3631 					"2946 Handled SLI_CONFIG(hbd) rd, "
3632 					"ext_buf_cnt(%d) out of range(%d)\n",
3633 					ext_buf_cnt,
3634 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3635 			rc = -ERANGE;
3636 			goto job_error;
3637 		}
3638 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3639 				"2942 Handled SLI_CONFIG(hbd) rd, "
3640 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3641 	}
3642 
3643 	/* before dma descriptor setup */
3644 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3645 					sta_pre_addr, dmabuf, ext_buf_cnt);
3646 
3647 	/* reject non-embedded mailbox command with none external buffer */
3648 	if (ext_buf_cnt == 0) {
3649 		rc = -EPERM;
3650 		goto job_error;
3651 	} else if (ext_buf_cnt > 1) {
3652 		/* additional external read buffers */
3653 		for (i = 1; i < ext_buf_cnt; i++) {
3654 			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3655 			if (!ext_dmabuf) {
3656 				rc = -ENOMEM;
3657 				goto job_error;
3658 			}
3659 			list_add_tail(&ext_dmabuf->list,
3660 				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3661 		}
3662 	}
3663 
3664 	/* bsg tracking structure */
3665 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3666 	if (!dd_data) {
3667 		rc = -ENOMEM;
3668 		goto job_error;
3669 	}
3670 
3671 	/* mailbox command structure for base driver */
3672 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3673 	if (!pmboxq) {
3674 		rc = -ENOMEM;
3675 		goto job_error;
3676 	}
3677 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3678 
3679 	/* for the first external buffer */
3680 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3681 
3682 	/* for the rest of external buffer descriptors if any */
3683 	if (ext_buf_cnt > 1) {
3684 		ext_buf_index = 1;
3685 		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3686 				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3687 			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3688 						ext_buf_index, dmabuf,
3689 						curr_dmabuf);
3690 			ext_buf_index++;
3691 		}
3692 	}
3693 
3694 	/* after dma descriptor setup */
3695 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3696 					sta_pos_addr, dmabuf, ext_buf_cnt);
3697 
3698 	/* construct base driver mbox command */
3699 	pmb = &pmboxq->u.mb;
3700 	pmbx = (uint8_t *)dmabuf->virt;
3701 	memcpy(pmb, pmbx, sizeof(*pmb));
3702 	pmb->mbxOwner = OWN_HOST;
3703 	pmboxq->vport = phba->pport;
3704 
3705 	/* multi-buffer handling context */
3706 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3707 	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3708 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3709 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3710 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3711 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3712 
3713 	/* callback for multi-buffer read mailbox command */
3714 	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3715 
3716 	/* context fields to callback function */
3717 	pmboxq->context1 = dd_data;
3718 	dd_data->type = TYPE_MBOX;
3719 	dd_data->context_un.mbox.pmboxq = pmboxq;
3720 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3721 	dd_data->context_un.mbox.set_job = job;
3722 	job->dd_data = dd_data;
3723 
3724 	/* state change */
3725 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3726 
3727 	/*
3728 	 * Non-embedded mailbox subcommand data gets byte swapped here because
3729 	 * the lower level driver code only does the first 64 mailbox words.
3730 	 */
3731 	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3732 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3733 		(nemb_tp == nemb_mse))
3734 		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3735 			&pmbx[sizeof(MAILBOX_t)],
3736 				sli_cfg_mbx->un.sli_config_emb0_subsys.
3737 					mse[0].buf_len);
3738 
3739 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3740 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3741 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3742 				"2947 Issued SLI_CONFIG ext-buffer "
3743 				"maibox command, rc:x%x\n", rc);
3744 		return SLI_CONFIG_HANDLED;
3745 	}
3746 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3747 			"2948 Failed to issue SLI_CONFIG ext-buffer "
3748 			"maibox command, rc:x%x\n", rc);
3749 	rc = -EPIPE;
3750 
3751 job_error:
3752 	if (pmboxq)
3753 		mempool_free(pmboxq, phba->mbox_mem_pool);
3754 	lpfc_bsg_dma_page_list_free(phba,
3755 				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3756 	kfree(dd_data);
3757 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3758 	return rc;
3759 }
3760 
3761 /**
3762  * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3763  * @phba: Pointer to HBA context object.
3764  * @mb: Pointer to a BSG mailbox object.
3765  * @dmabuff: Pointer to a DMA buffer descriptor.
3766  *
3767  * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3768  * non-embedded external bufffers.
3769  **/
3770 static int
3771 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3772 			       enum nemb_type nemb_tp,
3773 			       struct lpfc_dmabuf *dmabuf)
3774 {
3775 	struct dfc_mbox_req *mbox_req;
3776 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3777 	uint32_t ext_buf_cnt;
3778 	struct bsg_job_data *dd_data = NULL;
3779 	LPFC_MBOXQ_t *pmboxq = NULL;
3780 	MAILBOX_t *pmb;
3781 	uint8_t *mbx;
3782 	int rc = SLI_CONFIG_NOT_HANDLED, i;
3783 
3784 	mbox_req =
3785 	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3786 
3787 	/* pointer to the start of mailbox command */
3788 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3789 
3790 	if (nemb_tp == nemb_mse) {
3791 		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3792 			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3793 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3794 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3795 					"2953 Failed SLI_CONFIG(mse) wr, "
3796 					"ext_buf_cnt(%d) out of range(%d)\n",
3797 					ext_buf_cnt,
3798 					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3799 			return -ERANGE;
3800 		}
3801 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3802 				"2949 Handled SLI_CONFIG(mse) wr, "
3803 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3804 	} else {
3805 		/* sanity check on interface type for support */
3806 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3807 		    LPFC_SLI_INTF_IF_TYPE_2)
3808 			return -ENODEV;
3809 		/* nemb_tp == nemb_hbd */
3810 		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3811 		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3812 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3813 					"2954 Failed SLI_CONFIG(hbd) wr, "
3814 					"ext_buf_cnt(%d) out of range(%d)\n",
3815 					ext_buf_cnt,
3816 					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3817 			return -ERANGE;
3818 		}
3819 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3820 				"2950 Handled SLI_CONFIG(hbd) wr, "
3821 				"ext_buf_cnt:%d\n", ext_buf_cnt);
3822 	}
3823 
3824 	/* before dma buffer descriptor setup */
3825 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3826 					sta_pre_addr, dmabuf, ext_buf_cnt);
3827 
3828 	if (ext_buf_cnt == 0)
3829 		return -EPERM;
3830 
3831 	/* for the first external buffer */
3832 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3833 
3834 	/* after dma descriptor setup */
3835 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3836 					sta_pos_addr, dmabuf, ext_buf_cnt);
3837 
3838 	/* log for looking forward */
3839 	for (i = 1; i < ext_buf_cnt; i++) {
3840 		if (nemb_tp == nemb_mse)
3841 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3842 				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3843 				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3844 				mse[i].buf_len);
3845 		else
3846 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3847 				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3848 				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3849 				&sli_cfg_mbx->un.sli_config_emb1_subsys.
3850 				hbd[i]));
3851 	}
3852 
3853 	/* multi-buffer handling context */
3854 	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3855 	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3856 	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3857 	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3858 	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3859 	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3860 
3861 	if (ext_buf_cnt == 1) {
3862 		/* bsg tracking structure */
3863 		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3864 		if (!dd_data) {
3865 			rc = -ENOMEM;
3866 			goto job_error;
3867 		}
3868 
3869 		/* mailbox command structure for base driver */
3870 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3871 		if (!pmboxq) {
3872 			rc = -ENOMEM;
3873 			goto job_error;
3874 		}
3875 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3876 		pmb = &pmboxq->u.mb;
3877 		mbx = (uint8_t *)dmabuf->virt;
3878 		memcpy(pmb, mbx, sizeof(*pmb));
3879 		pmb->mbxOwner = OWN_HOST;
3880 		pmboxq->vport = phba->pport;
3881 
3882 		/* callback for multi-buffer read mailbox command */
3883 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3884 
3885 		/* context fields to callback function */
3886 		pmboxq->context1 = dd_data;
3887 		dd_data->type = TYPE_MBOX;
3888 		dd_data->context_un.mbox.pmboxq = pmboxq;
3889 		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3890 		dd_data->context_un.mbox.set_job = job;
3891 		job->dd_data = dd_data;
3892 
3893 		/* state change */
3894 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3895 
3896 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3897 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3898 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3899 					"2955 Issued SLI_CONFIG ext-buffer "
3900 					"maibox command, rc:x%x\n", rc);
3901 			return SLI_CONFIG_HANDLED;
3902 		}
3903 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3904 				"2956 Failed to issue SLI_CONFIG ext-buffer "
3905 				"maibox command, rc:x%x\n", rc);
3906 		rc = -EPIPE;
3907 		goto job_error;
3908 	}
3909 
3910 	/* wait for additoinal external buffers */
3911 	job->reply->result = 0;
3912 	job->job_done(job);
3913 	return SLI_CONFIG_HANDLED;
3914 
3915 job_error:
3916 	if (pmboxq)
3917 		mempool_free(pmboxq, phba->mbox_mem_pool);
3918 	kfree(dd_data);
3919 
3920 	return rc;
3921 }
3922 
3923 /**
3924  * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3925  * @phba: Pointer to HBA context object.
3926  * @mb: Pointer to a BSG mailbox object.
3927  * @dmabuff: Pointer to a DMA buffer descriptor.
3928  *
3929  * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3930  * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3931  * with embedded sussystem 0x1 and opcodes with external HBDs.
3932  **/
3933 static int
3934 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3935 			     struct lpfc_dmabuf *dmabuf)
3936 {
3937 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3938 	uint32_t subsys;
3939 	uint32_t opcode;
3940 	int rc = SLI_CONFIG_NOT_HANDLED;
3941 
3942 	/* state change on new multi-buffer pass-through mailbox command */
3943 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3944 
3945 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3946 
3947 	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3948 	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3949 		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3950 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
3951 		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3952 				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
3953 		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3954 			switch (opcode) {
3955 			case FCOE_OPCODE_READ_FCF:
3956 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3957 						"2957 Handled SLI_CONFIG "
3958 						"subsys_fcoe, opcode:x%x\n",
3959 						opcode);
3960 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3961 							nemb_mse, dmabuf);
3962 				break;
3963 			case FCOE_OPCODE_ADD_FCF:
3964 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3965 						"2958 Handled SLI_CONFIG "
3966 						"subsys_fcoe, opcode:x%x\n",
3967 						opcode);
3968 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3969 							nemb_mse, dmabuf);
3970 				break;
3971 			default:
3972 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3973 						"2959 Reject SLI_CONFIG "
3974 						"subsys_fcoe, opcode:x%x\n",
3975 						opcode);
3976 				rc = -EPERM;
3977 				break;
3978 			}
3979 		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3980 			switch (opcode) {
3981 			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
3982 			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
3983 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3984 						"3106 Handled SLI_CONFIG "
3985 						"subsys_comn, opcode:x%x\n",
3986 						opcode);
3987 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3988 							nemb_mse, dmabuf);
3989 				break;
3990 			default:
3991 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3992 						"3107 Reject SLI_CONFIG "
3993 						"subsys_comn, opcode:x%x\n",
3994 						opcode);
3995 				rc = -EPERM;
3996 				break;
3997 			}
3998 		} else {
3999 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4000 					"2977 Reject SLI_CONFIG "
4001 					"subsys:x%d, opcode:x%x\n",
4002 					subsys, opcode);
4003 			rc = -EPERM;
4004 		}
4005 	} else {
4006 		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4007 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4008 		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4009 				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4010 		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4011 			switch (opcode) {
4012 			case COMN_OPCODE_READ_OBJECT:
4013 			case COMN_OPCODE_READ_OBJECT_LIST:
4014 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4015 						"2960 Handled SLI_CONFIG "
4016 						"subsys_comn, opcode:x%x\n",
4017 						opcode);
4018 				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4019 							nemb_hbd, dmabuf);
4020 				break;
4021 			case COMN_OPCODE_WRITE_OBJECT:
4022 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4023 						"2961 Handled SLI_CONFIG "
4024 						"subsys_comn, opcode:x%x\n",
4025 						opcode);
4026 				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4027 							nemb_hbd, dmabuf);
4028 				break;
4029 			default:
4030 				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4031 						"2962 Not handled SLI_CONFIG "
4032 						"subsys_comn, opcode:x%x\n",
4033 						opcode);
4034 				rc = SLI_CONFIG_NOT_HANDLED;
4035 				break;
4036 			}
4037 		} else {
4038 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4039 					"2978 Not handled SLI_CONFIG "
4040 					"subsys:x%d, opcode:x%x\n",
4041 					subsys, opcode);
4042 			rc = SLI_CONFIG_NOT_HANDLED;
4043 		}
4044 	}
4045 
4046 	/* state reset on not handled new multi-buffer mailbox command */
4047 	if (rc != SLI_CONFIG_HANDLED)
4048 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4049 
4050 	return rc;
4051 }
4052 
4053 /**
4054  * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4055  * @phba: Pointer to HBA context object.
4056  *
4057  * This routine is for requesting to abort a pass-through mailbox command with
4058  * multiple external buffers due to error condition.
4059  **/
4060 static void
4061 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4062 {
4063 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4064 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4065 	else
4066 		lpfc_bsg_mbox_ext_session_reset(phba);
4067 	return;
4068 }
4069 
4070 /**
4071  * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4072  * @phba: Pointer to HBA context object.
4073  * @dmabuf: Pointer to a DMA buffer descriptor.
4074  *
4075  * This routine extracts the next mailbox read external buffer back to
4076  * user space through BSG.
4077  **/
4078 static int
4079 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
4080 {
4081 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4082 	struct lpfc_dmabuf *dmabuf;
4083 	uint8_t *pbuf;
4084 	uint32_t size;
4085 	uint32_t index;
4086 
4087 	index = phba->mbox_ext_buf_ctx.seqNum;
4088 	phba->mbox_ext_buf_ctx.seqNum++;
4089 
4090 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4091 			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4092 
4093 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4094 		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4095 			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4096 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4097 				"2963 SLI_CONFIG (mse) ext-buffer rd get "
4098 				"buffer[%d], size:%d\n", index, size);
4099 	} else {
4100 		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4101 			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4102 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4103 				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
4104 				"buffer[%d], size:%d\n", index, size);
4105 	}
4106 	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4107 		return -EPIPE;
4108 	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4109 				  struct lpfc_dmabuf, list);
4110 	list_del_init(&dmabuf->list);
4111 
4112 	/* after dma buffer descriptor setup */
4113 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4114 					mbox_rd, dma_ebuf, sta_pos_addr,
4115 					dmabuf, index);
4116 
4117 	pbuf = (uint8_t *)dmabuf->virt;
4118 	job->reply->reply_payload_rcv_len =
4119 		sg_copy_from_buffer(job->reply_payload.sg_list,
4120 				    job->reply_payload.sg_cnt,
4121 				    pbuf, size);
4122 
4123 	lpfc_bsg_dma_page_free(phba, dmabuf);
4124 
4125 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4126 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4127 				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4128 				"command session done\n");
4129 		lpfc_bsg_mbox_ext_session_reset(phba);
4130 	}
4131 
4132 	job->reply->result = 0;
4133 	job->job_done(job);
4134 
4135 	return SLI_CONFIG_HANDLED;
4136 }
4137 
4138 /**
4139  * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4140  * @phba: Pointer to HBA context object.
4141  * @dmabuf: Pointer to a DMA buffer descriptor.
4142  *
4143  * This routine sets up the next mailbox read external buffer obtained
4144  * from user space through BSG.
4145  **/
4146 static int
4147 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
4148 			struct lpfc_dmabuf *dmabuf)
4149 {
4150 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4151 	struct bsg_job_data *dd_data = NULL;
4152 	LPFC_MBOXQ_t *pmboxq = NULL;
4153 	MAILBOX_t *pmb;
4154 	enum nemb_type nemb_tp;
4155 	uint8_t *pbuf;
4156 	uint32_t size;
4157 	uint32_t index;
4158 	int rc;
4159 
4160 	index = phba->mbox_ext_buf_ctx.seqNum;
4161 	phba->mbox_ext_buf_ctx.seqNum++;
4162 	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4163 
4164 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4165 			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4166 
4167 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4168 	if (!dd_data) {
4169 		rc = -ENOMEM;
4170 		goto job_error;
4171 	}
4172 
4173 	pbuf = (uint8_t *)dmabuf->virt;
4174 	size = job->request_payload.payload_len;
4175 	sg_copy_to_buffer(job->request_payload.sg_list,
4176 			  job->request_payload.sg_cnt,
4177 			  pbuf, size);
4178 
4179 	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4180 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4181 				"2966 SLI_CONFIG (mse) ext-buffer wr set "
4182 				"buffer[%d], size:%d\n",
4183 				phba->mbox_ext_buf_ctx.seqNum, size);
4184 
4185 	} else {
4186 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4187 				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
4188 				"buffer[%d], size:%d\n",
4189 				phba->mbox_ext_buf_ctx.seqNum, size);
4190 
4191 	}
4192 
4193 	/* set up external buffer descriptor and add to external buffer list */
4194 	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4195 					phba->mbox_ext_buf_ctx.mbx_dmabuf,
4196 					dmabuf);
4197 	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4198 
4199 	/* after write dma buffer */
4200 	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4201 					mbox_wr, dma_ebuf, sta_pos_addr,
4202 					dmabuf, index);
4203 
4204 	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4205 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4206 				"2968 SLI_CONFIG ext-buffer wr all %d "
4207 				"ebuffers received\n",
4208 				phba->mbox_ext_buf_ctx.numBuf);
4209 		/* mailbox command structure for base driver */
4210 		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4211 		if (!pmboxq) {
4212 			rc = -ENOMEM;
4213 			goto job_error;
4214 		}
4215 		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4216 		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4217 		pmb = &pmboxq->u.mb;
4218 		memcpy(pmb, pbuf, sizeof(*pmb));
4219 		pmb->mbxOwner = OWN_HOST;
4220 		pmboxq->vport = phba->pport;
4221 
4222 		/* callback for multi-buffer write mailbox command */
4223 		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4224 
4225 		/* context fields to callback function */
4226 		pmboxq->context1 = dd_data;
4227 		dd_data->type = TYPE_MBOX;
4228 		dd_data->context_un.mbox.pmboxq = pmboxq;
4229 		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4230 		dd_data->context_un.mbox.set_job = job;
4231 		job->dd_data = dd_data;
4232 
4233 		/* state change */
4234 		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4235 
4236 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4237 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4238 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4239 					"2969 Issued SLI_CONFIG ext-buffer "
4240 					"maibox command, rc:x%x\n", rc);
4241 			return SLI_CONFIG_HANDLED;
4242 		}
4243 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4244 				"2970 Failed to issue SLI_CONFIG ext-buffer "
4245 				"maibox command, rc:x%x\n", rc);
4246 		rc = -EPIPE;
4247 		goto job_error;
4248 	}
4249 
4250 	/* wait for additoinal external buffers */
4251 	job->reply->result = 0;
4252 	job->job_done(job);
4253 	return SLI_CONFIG_HANDLED;
4254 
4255 job_error:
4256 	lpfc_bsg_dma_page_free(phba, dmabuf);
4257 	kfree(dd_data);
4258 
4259 	return rc;
4260 }
4261 
4262 /**
4263  * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4264  * @phba: Pointer to HBA context object.
4265  * @mb: Pointer to a BSG mailbox object.
4266  * @dmabuff: Pointer to a DMA buffer descriptor.
4267  *
4268  * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4269  * command with multiple non-embedded external buffers.
4270  **/
4271 static int
4272 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
4273 			     struct lpfc_dmabuf *dmabuf)
4274 {
4275 	int rc;
4276 
4277 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4278 			"2971 SLI_CONFIG buffer (type:x%x)\n",
4279 			phba->mbox_ext_buf_ctx.mboxType);
4280 
4281 	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4282 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4283 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4284 					"2972 SLI_CONFIG rd buffer state "
4285 					"mismatch:x%x\n",
4286 					phba->mbox_ext_buf_ctx.state);
4287 			lpfc_bsg_mbox_ext_abort(phba);
4288 			return -EPIPE;
4289 		}
4290 		rc = lpfc_bsg_read_ebuf_get(phba, job);
4291 		if (rc == SLI_CONFIG_HANDLED)
4292 			lpfc_bsg_dma_page_free(phba, dmabuf);
4293 	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4294 		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4295 			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4296 					"2973 SLI_CONFIG wr buffer state "
4297 					"mismatch:x%x\n",
4298 					phba->mbox_ext_buf_ctx.state);
4299 			lpfc_bsg_mbox_ext_abort(phba);
4300 			return -EPIPE;
4301 		}
4302 		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4303 	}
4304 	return rc;
4305 }
4306 
4307 /**
4308  * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4309  * @phba: Pointer to HBA context object.
4310  * @mb: Pointer to a BSG mailbox object.
4311  * @dmabuff: Pointer to a DMA buffer descriptor.
4312  *
4313  * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4314  * (0x9B) mailbox commands and external buffers.
4315  **/
4316 static int
4317 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4318 			    struct lpfc_dmabuf *dmabuf)
4319 {
4320 	struct dfc_mbox_req *mbox_req;
4321 	int rc = SLI_CONFIG_NOT_HANDLED;
4322 
4323 	mbox_req =
4324 	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4325 
4326 	/* mbox command with/without single external buffer */
4327 	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4328 		return rc;
4329 
4330 	/* mbox command and first external buffer */
4331 	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4332 		if (mbox_req->extSeqNum == 1) {
4333 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4334 					"2974 SLI_CONFIG mailbox: tag:%d, "
4335 					"seq:%d\n", mbox_req->extMboxTag,
4336 					mbox_req->extSeqNum);
4337 			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4338 			return rc;
4339 		} else
4340 			goto sli_cfg_ext_error;
4341 	}
4342 
4343 	/*
4344 	 * handle additional external buffers
4345 	 */
4346 
4347 	/* check broken pipe conditions */
4348 	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4349 		goto sli_cfg_ext_error;
4350 	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4351 		goto sli_cfg_ext_error;
4352 	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4353 		goto sli_cfg_ext_error;
4354 
4355 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4356 			"2975 SLI_CONFIG mailbox external buffer: "
4357 			"extSta:x%x, tag:%d, seq:%d\n",
4358 			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4359 			mbox_req->extSeqNum);
4360 	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4361 	return rc;
4362 
4363 sli_cfg_ext_error:
4364 	/* all other cases, broken pipe */
4365 	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4366 			"2976 SLI_CONFIG mailbox broken pipe: "
4367 			"ctxSta:x%x, ctxNumBuf:%d "
4368 			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4369 			phba->mbox_ext_buf_ctx.state,
4370 			phba->mbox_ext_buf_ctx.numBuf,
4371 			phba->mbox_ext_buf_ctx.mbxTag,
4372 			phba->mbox_ext_buf_ctx.seqNum,
4373 			mbox_req->extMboxTag, mbox_req->extSeqNum);
4374 
4375 	lpfc_bsg_mbox_ext_session_reset(phba);
4376 
4377 	return -EPIPE;
4378 }
4379 
4380 /**
4381  * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4382  * @phba: Pointer to HBA context object.
4383  * @mb: Pointer to a mailbox object.
4384  * @vport: Pointer to a vport object.
4385  *
4386  * Allocate a tracking object, mailbox command memory, get a mailbox
4387  * from the mailbox pool, copy the caller mailbox command.
4388  *
4389  * If offline and the sli is active we need to poll for the command (port is
4390  * being reset) and com-plete the job, otherwise issue the mailbox command and
4391  * let our completion handler finish the command.
4392  **/
4393 static uint32_t
4394 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4395 	struct lpfc_vport *vport)
4396 {
4397 	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4398 	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4399 	/* a 4k buffer to hold the mb and extended data from/to the bsg */
4400 	uint8_t *pmbx = NULL;
4401 	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4402 	struct lpfc_dmabuf *dmabuf = NULL;
4403 	struct dfc_mbox_req *mbox_req;
4404 	struct READ_EVENT_LOG_VAR *rdEventLog;
4405 	uint32_t transmit_length, receive_length, mode;
4406 	struct lpfc_mbx_sli4_config *sli4_config;
4407 	struct lpfc_mbx_nembed_cmd *nembed_sge;
4408 	struct mbox_header *header;
4409 	struct ulp_bde64 *bde;
4410 	uint8_t *ext = NULL;
4411 	int rc = 0;
4412 	uint8_t *from;
4413 	uint32_t size;
4414 
4415 
4416 	/* in case no data is transferred */
4417 	job->reply->reply_payload_rcv_len = 0;
4418 
4419 	/* sanity check to protect driver */
4420 	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4421 	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
4422 		rc = -ERANGE;
4423 		goto job_done;
4424 	}
4425 
4426 	/*
4427 	 * Don't allow mailbox commands to be sent when blocked or when in
4428 	 * the middle of discovery
4429 	 */
4430 	 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4431 		rc = -EAGAIN;
4432 		goto job_done;
4433 	}
4434 
4435 	mbox_req =
4436 	    (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4437 
4438 	/* check if requested extended data lengths are valid */
4439 	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4440 	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4441 		rc = -ERANGE;
4442 		goto job_done;
4443 	}
4444 
4445 	dmabuf = lpfc_bsg_dma_page_alloc(phba);
4446 	if (!dmabuf || !dmabuf->virt) {
4447 		rc = -ENOMEM;
4448 		goto job_done;
4449 	}
4450 
4451 	/* Get the mailbox command or external buffer from BSG */
4452 	pmbx = (uint8_t *)dmabuf->virt;
4453 	size = job->request_payload.payload_len;
4454 	sg_copy_to_buffer(job->request_payload.sg_list,
4455 			  job->request_payload.sg_cnt, pmbx, size);
4456 
4457 	/* Handle possible SLI_CONFIG with non-embedded payloads */
4458 	if (phba->sli_rev == LPFC_SLI_REV4) {
4459 		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4460 		if (rc == SLI_CONFIG_HANDLED)
4461 			goto job_cont;
4462 		if (rc)
4463 			goto job_done;
4464 		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4465 	}
4466 
4467 	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4468 	if (rc != 0)
4469 		goto job_done; /* must be negative */
4470 
4471 	/* allocate our bsg tracking structure */
4472 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4473 	if (!dd_data) {
4474 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4475 				"2727 Failed allocation of dd_data\n");
4476 		rc = -ENOMEM;
4477 		goto job_done;
4478 	}
4479 
4480 	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4481 	if (!pmboxq) {
4482 		rc = -ENOMEM;
4483 		goto job_done;
4484 	}
4485 	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4486 
4487 	pmb = &pmboxq->u.mb;
4488 	memcpy(pmb, pmbx, sizeof(*pmb));
4489 	pmb->mbxOwner = OWN_HOST;
4490 	pmboxq->vport = vport;
4491 
4492 	/* If HBA encountered an error attention, allow only DUMP
4493 	 * or RESTART mailbox commands until the HBA is restarted.
4494 	 */
4495 	if (phba->pport->stopped &&
4496 	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
4497 	    pmb->mbxCommand != MBX_RESTART &&
4498 	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
4499 	    pmb->mbxCommand != MBX_WRITE_WWN)
4500 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4501 				"2797 mbox: Issued mailbox cmd "
4502 				"0x%x while in stopped state.\n",
4503 				pmb->mbxCommand);
4504 
4505 	/* extended mailbox commands will need an extended buffer */
4506 	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4507 		from = pmbx;
4508 		ext = from + sizeof(MAILBOX_t);
4509 		pmboxq->context2 = ext;
4510 		pmboxq->in_ext_byte_len =
4511 			mbox_req->inExtWLen * sizeof(uint32_t);
4512 		pmboxq->out_ext_byte_len =
4513 			mbox_req->outExtWLen * sizeof(uint32_t);
4514 		pmboxq->mbox_offset_word = mbox_req->mbOffset;
4515 	}
4516 
4517 	/* biu diag will need a kernel buffer to transfer the data
4518 	 * allocate our own buffer and setup the mailbox command to
4519 	 * use ours
4520 	 */
4521 	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4522 		transmit_length = pmb->un.varWords[1];
4523 		receive_length = pmb->un.varWords[4];
4524 		/* transmit length cannot be greater than receive length or
4525 		 * mailbox extension size
4526 		 */
4527 		if ((transmit_length > receive_length) ||
4528 			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4529 			rc = -ERANGE;
4530 			goto job_done;
4531 		}
4532 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4533 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4534 		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4535 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4536 
4537 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4538 			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4539 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4540 		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4541 			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4542 			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4543 	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4544 		rdEventLog = &pmb->un.varRdEventLog;
4545 		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4546 		mode = bf_get(lpfc_event_log, rdEventLog);
4547 
4548 		/* receive length cannot be greater than mailbox
4549 		 * extension size
4550 		 */
4551 		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4552 			rc = -ERANGE;
4553 			goto job_done;
4554 		}
4555 
4556 		/* mode zero uses a bde like biu diags command */
4557 		if (mode == 0) {
4558 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4559 							+ sizeof(MAILBOX_t));
4560 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4561 							+ sizeof(MAILBOX_t));
4562 		}
4563 	} else if (phba->sli_rev == LPFC_SLI_REV4) {
4564 		/* Let type 4 (well known data) through because the data is
4565 		 * returned in varwords[4-8]
4566 		 * otherwise check the recieve length and fetch the buffer addr
4567 		 */
4568 		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4569 			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4570 			/* rebuild the command for sli4 using our own buffers
4571 			* like we do for biu diags
4572 			*/
4573 			receive_length = pmb->un.varWords[2];
4574 			/* receive length cannot be greater than mailbox
4575 			 * extension size
4576 			 */
4577 			if (receive_length == 0) {
4578 				rc = -ERANGE;
4579 				goto job_done;
4580 			}
4581 			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4582 						+ sizeof(MAILBOX_t));
4583 			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4584 						+ sizeof(MAILBOX_t));
4585 		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4586 			pmb->un.varUpdateCfg.co) {
4587 			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4588 
4589 			/* bde size cannot be greater than mailbox ext size */
4590 			if (bde->tus.f.bdeSize >
4591 			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4592 				rc = -ERANGE;
4593 				goto job_done;
4594 			}
4595 			bde->addrHigh = putPaddrHigh(dmabuf->phys
4596 						+ sizeof(MAILBOX_t));
4597 			bde->addrLow = putPaddrLow(dmabuf->phys
4598 						+ sizeof(MAILBOX_t));
4599 		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4600 			/* Handling non-embedded SLI_CONFIG mailbox command */
4601 			sli4_config = &pmboxq->u.mqe.un.sli4_config;
4602 			if (!bf_get(lpfc_mbox_hdr_emb,
4603 			    &sli4_config->header.cfg_mhdr)) {
4604 				/* rebuild the command for sli4 using our
4605 				 * own buffers like we do for biu diags
4606 				 */
4607 				header = (struct mbox_header *)
4608 						&pmb->un.varWords[0];
4609 				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4610 						&pmb->un.varWords[0];
4611 				receive_length = nembed_sge->sge[0].length;
4612 
4613 				/* receive length cannot be greater than
4614 				 * mailbox extension size
4615 				 */
4616 				if ((receive_length == 0) ||
4617 				    (receive_length >
4618 				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4619 					rc = -ERANGE;
4620 					goto job_done;
4621 				}
4622 
4623 				nembed_sge->sge[0].pa_hi =
4624 						putPaddrHigh(dmabuf->phys
4625 						   + sizeof(MAILBOX_t));
4626 				nembed_sge->sge[0].pa_lo =
4627 						putPaddrLow(dmabuf->phys
4628 						   + sizeof(MAILBOX_t));
4629 			}
4630 		}
4631 	}
4632 
4633 	dd_data->context_un.mbox.dmabuffers = dmabuf;
4634 
4635 	/* setup wake call as IOCB callback */
4636 	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4637 
4638 	/* setup context field to pass wait_queue pointer to wake function */
4639 	pmboxq->context1 = dd_data;
4640 	dd_data->type = TYPE_MBOX;
4641 	dd_data->context_un.mbox.pmboxq = pmboxq;
4642 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4643 	dd_data->context_un.mbox.set_job = job;
4644 	dd_data->context_un.mbox.ext = ext;
4645 	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4646 	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4647 	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4648 	job->dd_data = dd_data;
4649 
4650 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4651 	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4652 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4653 		if (rc != MBX_SUCCESS) {
4654 			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4655 			goto job_done;
4656 		}
4657 
4658 		/* job finished, copy the data */
4659 		memcpy(pmbx, pmb, sizeof(*pmb));
4660 		job->reply->reply_payload_rcv_len =
4661 			sg_copy_from_buffer(job->reply_payload.sg_list,
4662 					    job->reply_payload.sg_cnt,
4663 					    pmbx, size);
4664 		/* not waiting mbox already done */
4665 		rc = 0;
4666 		goto job_done;
4667 	}
4668 
4669 	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4670 	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4671 		return 1; /* job started */
4672 
4673 job_done:
4674 	/* common exit for error or job completed inline */
4675 	if (pmboxq)
4676 		mempool_free(pmboxq, phba->mbox_mem_pool);
4677 	lpfc_bsg_dma_page_free(phba, dmabuf);
4678 	kfree(dd_data);
4679 
4680 job_cont:
4681 	return rc;
4682 }
4683 
4684 /**
4685  * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4686  * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4687  **/
4688 static int
4689 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4690 {
4691 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4692 	struct lpfc_hba *phba = vport->phba;
4693 	struct dfc_mbox_req *mbox_req;
4694 	int rc = 0;
4695 
4696 	/* mix-and-match backward compatibility */
4697 	job->reply->reply_payload_rcv_len = 0;
4698 	if (job->request_len <
4699 	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4700 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4701 				"2737 Mix-and-match backward compability "
4702 				"between MBOX_REQ old size:%d and "
4703 				"new request size:%d\n",
4704 				(int)(job->request_len -
4705 				      sizeof(struct fc_bsg_request)),
4706 				(int)sizeof(struct dfc_mbox_req));
4707 		mbox_req = (struct dfc_mbox_req *)
4708 				job->request->rqst_data.h_vendor.vendor_cmd;
4709 		mbox_req->extMboxTag = 0;
4710 		mbox_req->extSeqNum = 0;
4711 	}
4712 
4713 	rc = lpfc_bsg_issue_mbox(phba, job, vport);
4714 
4715 	if (rc == 0) {
4716 		/* job done */
4717 		job->reply->result = 0;
4718 		job->dd_data = NULL;
4719 		job->job_done(job);
4720 	} else if (rc == 1)
4721 		/* job submitted, will complete later*/
4722 		rc = 0; /* return zero, no error */
4723 	else {
4724 		/* some error occurred */
4725 		job->reply->result = rc;
4726 		job->dd_data = NULL;
4727 	}
4728 
4729 	return rc;
4730 }
4731 
4732 /**
4733  * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4734  * @phba: Pointer to HBA context object.
4735  * @cmdiocbq: Pointer to command iocb.
4736  * @rspiocbq: Pointer to response iocb.
4737  *
4738  * This function is the completion handler for iocbs issued using
4739  * lpfc_menlo_cmd function. This function is called by the
4740  * ring event handler function without any lock held. This function
4741  * can be called from both worker thread context and interrupt
4742  * context. This function also can be called from another thread which
4743  * cleans up the SLI layer objects.
4744  * This function copies the contents of the response iocb to the
4745  * response iocb memory object provided by the caller of
4746  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4747  * sleeps for the iocb completion.
4748  **/
4749 static void
4750 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4751 			struct lpfc_iocbq *cmdiocbq,
4752 			struct lpfc_iocbq *rspiocbq)
4753 {
4754 	struct bsg_job_data *dd_data;
4755 	struct fc_bsg_job *job;
4756 	IOCB_t *rsp;
4757 	struct lpfc_dmabuf *bmp;
4758 	struct lpfc_bsg_menlo *menlo;
4759 	unsigned long flags;
4760 	struct menlo_response *menlo_resp;
4761 	int rc = 0;
4762 
4763 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
4764 	dd_data = cmdiocbq->context1;
4765 	if (!dd_data) {
4766 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4767 		return;
4768 	}
4769 
4770 	menlo = &dd_data->context_un.menlo;
4771 	job = menlo->set_job;
4772 	job->dd_data = NULL; /* so timeout handler does not reply */
4773 
4774 	spin_lock(&phba->hbalock);
4775 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
4776 	if (cmdiocbq->context2 && rspiocbq)
4777 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
4778 		       &rspiocbq->iocb, sizeof(IOCB_t));
4779 	spin_unlock(&phba->hbalock);
4780 
4781 	bmp = menlo->bmp;
4782 	rspiocbq = menlo->rspiocbq;
4783 	rsp = &rspiocbq->iocb;
4784 
4785 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
4786 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
4787 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
4788 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4789 
4790 	/* always return the xri, this would be used in the case
4791 	 * of a menlo download to allow the data to be sent as a continuation
4792 	 * of the exchange.
4793 	 */
4794 	menlo_resp = (struct menlo_response *)
4795 		job->reply->reply_data.vendor_reply.vendor_rsp;
4796 	menlo_resp->xri = rsp->ulpContext;
4797 	if (rsp->ulpStatus) {
4798 		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4799 			switch (rsp->un.ulpWord[4] & 0xff) {
4800 			case IOERR_SEQUENCE_TIMEOUT:
4801 				rc = -ETIMEDOUT;
4802 				break;
4803 			case IOERR_INVALID_RPI:
4804 				rc = -EFAULT;
4805 				break;
4806 			default:
4807 				rc = -EACCES;
4808 				break;
4809 			}
4810 		} else
4811 			rc = -EACCES;
4812 	} else
4813 		job->reply->reply_payload_rcv_len =
4814 			rsp->un.genreq64.bdl.bdeSize;
4815 
4816 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4817 	lpfc_sli_release_iocbq(phba, rspiocbq);
4818 	lpfc_sli_release_iocbq(phba, cmdiocbq);
4819 	kfree(bmp);
4820 	kfree(dd_data);
4821 	/* make error code available to userspace */
4822 	job->reply->result = rc;
4823 	/* complete the job back to userspace */
4824 	job->job_done(job);
4825 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4826 	return;
4827 }
4828 
4829 /**
4830  * lpfc_menlo_cmd - send an ioctl for menlo hardware
4831  * @job: fc_bsg_job to handle
4832  *
4833  * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
4834  * all the command completions will return the xri for the command.
4835  * For menlo data requests a gen request 64 CX is used to continue the exchange
4836  * supplied in the menlo request header xri field.
4837  **/
4838 static int
4839 lpfc_menlo_cmd(struct fc_bsg_job *job)
4840 {
4841 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4842 	struct lpfc_hba *phba = vport->phba;
4843 	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
4844 	IOCB_t *cmd, *rsp;
4845 	int rc = 0;
4846 	struct menlo_command *menlo_cmd;
4847 	struct menlo_response *menlo_resp;
4848 	struct lpfc_dmabuf *bmp = NULL;
4849 	int request_nseg;
4850 	int reply_nseg;
4851 	struct scatterlist *sgel = NULL;
4852 	int numbde;
4853 	dma_addr_t busaddr;
4854 	struct bsg_job_data *dd_data;
4855 	struct ulp_bde64 *bpl = NULL;
4856 
4857 	/* in case no data is returned return just the return code */
4858 	job->reply->reply_payload_rcv_len = 0;
4859 
4860 	if (job->request_len <
4861 	    sizeof(struct fc_bsg_request) +
4862 		sizeof(struct menlo_command)) {
4863 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4864 				"2784 Received MENLO_CMD request below "
4865 				"minimum size\n");
4866 		rc = -ERANGE;
4867 		goto no_dd_data;
4868 	}
4869 
4870 	if (job->reply_len <
4871 	    sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
4872 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4873 				"2785 Received MENLO_CMD reply below "
4874 				"minimum size\n");
4875 		rc = -ERANGE;
4876 		goto no_dd_data;
4877 	}
4878 
4879 	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
4880 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4881 				"2786 Adapter does not support menlo "
4882 				"commands\n");
4883 		rc = -EPERM;
4884 		goto no_dd_data;
4885 	}
4886 
4887 	menlo_cmd = (struct menlo_command *)
4888 		job->request->rqst_data.h_vendor.vendor_cmd;
4889 
4890 	menlo_resp = (struct menlo_response *)
4891 		job->reply->reply_data.vendor_reply.vendor_rsp;
4892 
4893 	/* allocate our bsg tracking structure */
4894 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4895 	if (!dd_data) {
4896 		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4897 				"2787 Failed allocation of dd_data\n");
4898 		rc = -ENOMEM;
4899 		goto no_dd_data;
4900 	}
4901 
4902 	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4903 	if (!bmp) {
4904 		rc = -ENOMEM;
4905 		goto free_dd;
4906 	}
4907 
4908 	cmdiocbq = lpfc_sli_get_iocbq(phba);
4909 	if (!cmdiocbq) {
4910 		rc = -ENOMEM;
4911 		goto free_bmp;
4912 	}
4913 
4914 	rspiocbq = lpfc_sli_get_iocbq(phba);
4915 	if (!rspiocbq) {
4916 		rc = -ENOMEM;
4917 		goto free_cmdiocbq;
4918 	}
4919 
4920 	rsp = &rspiocbq->iocb;
4921 
4922 	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
4923 	if (!bmp->virt) {
4924 		rc = -ENOMEM;
4925 		goto free_rspiocbq;
4926 	}
4927 
4928 	INIT_LIST_HEAD(&bmp->list);
4929 	bpl = (struct ulp_bde64 *) bmp->virt;
4930 	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
4931 				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
4932 	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
4933 		busaddr = sg_dma_address(sgel);
4934 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
4935 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
4936 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
4937 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4938 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4939 		bpl++;
4940 	}
4941 
4942 	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
4943 				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4944 	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
4945 		busaddr = sg_dma_address(sgel);
4946 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
4947 		bpl->tus.f.bdeSize = sg_dma_len(sgel);
4948 		bpl->tus.w = cpu_to_le32(bpl->tus.w);
4949 		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4950 		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4951 		bpl++;
4952 	}
4953 
4954 	cmd = &cmdiocbq->iocb;
4955 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
4956 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
4957 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
4958 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
4959 	cmd->un.genreq64.bdl.bdeSize =
4960 	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
4961 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
4962 	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
4963 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
4964 	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
4965 	cmd->ulpBdeCount = 1;
4966 	cmd->ulpClass = CLASS3;
4967 	cmd->ulpOwner = OWN_CHIP;
4968 	cmd->ulpLe = 1; /* Limited Edition */
4969 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
4970 	cmdiocbq->vport = phba->pport;
4971 	/* We want the firmware to timeout before we do */
4972 	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
4973 	cmdiocbq->context3 = bmp;
4974 	cmdiocbq->context2 = rspiocbq;
4975 	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
4976 	cmdiocbq->context1 = dd_data;
4977 	cmdiocbq->context2 = rspiocbq;
4978 	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
4979 		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
4980 		cmd->ulpPU = MENLO_PU; /* 3 */
4981 		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
4982 		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
4983 	} else {
4984 		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
4985 		cmd->ulpPU = 1;
4986 		cmd->un.ulpWord[4] = 0;
4987 		cmd->ulpContext = menlo_cmd->xri;
4988 	}
4989 
4990 	dd_data->type = TYPE_MENLO;
4991 	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
4992 	dd_data->context_un.menlo.rspiocbq = rspiocbq;
4993 	dd_data->context_un.menlo.set_job = job;
4994 	dd_data->context_un.menlo.bmp = bmp;
4995 
4996 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
4997 		MENLO_TIMEOUT - 5);
4998 	if (rc == IOCB_SUCCESS)
4999 		return 0; /* done for now */
5000 
5001 	/* iocb failed so cleanup */
5002 	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
5003 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
5004 	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
5005 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
5006 
5007 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5008 
5009 free_rspiocbq:
5010 	lpfc_sli_release_iocbq(phba, rspiocbq);
5011 free_cmdiocbq:
5012 	lpfc_sli_release_iocbq(phba, cmdiocbq);
5013 free_bmp:
5014 	kfree(bmp);
5015 free_dd:
5016 	kfree(dd_data);
5017 no_dd_data:
5018 	/* make error code available to userspace */
5019 	job->reply->result = rc;
5020 	job->dd_data = NULL;
5021 	return rc;
5022 }
5023 
5024 /**
5025  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5026  * @job: fc_bsg_job to handle
5027  **/
5028 static int
5029 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
5030 {
5031 	int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
5032 	int rc;
5033 
5034 	switch (command) {
5035 	case LPFC_BSG_VENDOR_SET_CT_EVENT:
5036 		rc = lpfc_bsg_hba_set_event(job);
5037 		break;
5038 	case LPFC_BSG_VENDOR_GET_CT_EVENT:
5039 		rc = lpfc_bsg_hba_get_event(job);
5040 		break;
5041 	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5042 		rc = lpfc_bsg_send_mgmt_rsp(job);
5043 		break;
5044 	case LPFC_BSG_VENDOR_DIAG_MODE:
5045 		rc = lpfc_bsg_diag_loopback_mode(job);
5046 		break;
5047 	case LPFC_BSG_VENDOR_DIAG_MODE_END:
5048 		rc = lpfc_sli4_bsg_diag_mode_end(job);
5049 		break;
5050 	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5051 		rc = lpfc_bsg_diag_loopback_run(job);
5052 		break;
5053 	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5054 		rc = lpfc_sli4_bsg_link_diag_test(job);
5055 		break;
5056 	case LPFC_BSG_VENDOR_GET_MGMT_REV:
5057 		rc = lpfc_bsg_get_dfc_rev(job);
5058 		break;
5059 	case LPFC_BSG_VENDOR_MBOX:
5060 		rc = lpfc_bsg_mbox_cmd(job);
5061 		break;
5062 	case LPFC_BSG_VENDOR_MENLO_CMD:
5063 	case LPFC_BSG_VENDOR_MENLO_DATA:
5064 		rc = lpfc_menlo_cmd(job);
5065 		break;
5066 	default:
5067 		rc = -EINVAL;
5068 		job->reply->reply_payload_rcv_len = 0;
5069 		/* make error code available to userspace */
5070 		job->reply->result = rc;
5071 		break;
5072 	}
5073 
5074 	return rc;
5075 }
5076 
5077 /**
5078  * lpfc_bsg_request - handle a bsg request from the FC transport
5079  * @job: fc_bsg_job to handle
5080  **/
5081 int
5082 lpfc_bsg_request(struct fc_bsg_job *job)
5083 {
5084 	uint32_t msgcode;
5085 	int rc;
5086 
5087 	msgcode = job->request->msgcode;
5088 	switch (msgcode) {
5089 	case FC_BSG_HST_VENDOR:
5090 		rc = lpfc_bsg_hst_vendor(job);
5091 		break;
5092 	case FC_BSG_RPT_ELS:
5093 		rc = lpfc_bsg_rport_els(job);
5094 		break;
5095 	case FC_BSG_RPT_CT:
5096 		rc = lpfc_bsg_send_mgmt_cmd(job);
5097 		break;
5098 	default:
5099 		rc = -EINVAL;
5100 		job->reply->reply_payload_rcv_len = 0;
5101 		/* make error code available to userspace */
5102 		job->reply->result = rc;
5103 		break;
5104 	}
5105 
5106 	return rc;
5107 }
5108 
5109 /**
5110  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5111  * @job: fc_bsg_job that has timed out
5112  *
5113  * This function just aborts the job's IOCB.  The aborted IOCB will return to
5114  * the waiting function which will handle passing the error back to userspace
5115  **/
5116 int
5117 lpfc_bsg_timeout(struct fc_bsg_job *job)
5118 {
5119 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
5120 	struct lpfc_hba *phba = vport->phba;
5121 	struct lpfc_iocbq *cmdiocb;
5122 	struct lpfc_bsg_event *evt;
5123 	struct lpfc_bsg_iocb *iocb;
5124 	struct lpfc_bsg_mbox *mbox;
5125 	struct lpfc_bsg_menlo *menlo;
5126 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5127 	struct bsg_job_data *dd_data;
5128 	unsigned long flags;
5129 
5130 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5131 	dd_data = (struct bsg_job_data *)job->dd_data;
5132 	/* timeout and completion crossed paths if no dd_data */
5133 	if (!dd_data) {
5134 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5135 		return 0;
5136 	}
5137 
5138 	switch (dd_data->type) {
5139 	case TYPE_IOCB:
5140 		iocb = &dd_data->context_un.iocb;
5141 		cmdiocb = iocb->cmdiocbq;
5142 		/* hint to completion handler that the job timed out */
5143 		job->reply->result = -EAGAIN;
5144 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5145 		/* this will call our completion handler */
5146 		spin_lock_irq(&phba->hbalock);
5147 		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5148 		spin_unlock_irq(&phba->hbalock);
5149 		break;
5150 	case TYPE_EVT:
5151 		evt = dd_data->context_un.evt;
5152 		/* this event has no job anymore */
5153 		evt->set_job = NULL;
5154 		job->dd_data = NULL;
5155 		job->reply->reply_payload_rcv_len = 0;
5156 		/* Return -EAGAIN which is our way of signallying the
5157 		 * app to retry.
5158 		 */
5159 		job->reply->result = -EAGAIN;
5160 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5161 		job->job_done(job);
5162 		break;
5163 	case TYPE_MBOX:
5164 		mbox = &dd_data->context_un.mbox;
5165 		/* this mbox has no job anymore */
5166 		mbox->set_job = NULL;
5167 		job->dd_data = NULL;
5168 		job->reply->reply_payload_rcv_len = 0;
5169 		job->reply->result = -EAGAIN;
5170 		/* the mbox completion handler can now be run */
5171 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5172 		job->job_done(job);
5173 		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5174 			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5175 		break;
5176 	case TYPE_MENLO:
5177 		menlo = &dd_data->context_un.menlo;
5178 		cmdiocb = menlo->cmdiocbq;
5179 		/* hint to completion handler that the job timed out */
5180 		job->reply->result = -EAGAIN;
5181 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5182 		/* this will call our completion handler */
5183 		spin_lock_irq(&phba->hbalock);
5184 		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5185 		spin_unlock_irq(&phba->hbalock);
5186 		break;
5187 	default:
5188 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5189 		break;
5190 	}
5191 
5192 	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5193 	 * otherwise an error message will be displayed on the console
5194 	 * so always return success (zero)
5195 	 */
5196 	return 0;
5197 }
5198