xref: /linux/drivers/scsi/qla2xxx/qla_bsg.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 
13 /* BSG support for ELS/CT pass through */
14 inline srb_t *
15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16 {
17 	srb_t *sp;
18 	struct qla_hw_data *ha = vha->hw;
19 	struct srb_ctx *ctx;
20 
21 	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 	if (!sp)
23 		goto done;
24 	ctx = kzalloc(size, GFP_KERNEL);
25 	if (!ctx) {
26 		mempool_free(sp, ha->srb_mempool);
27 		sp = NULL;
28 		goto done;
29 	}
30 
31 	memset(sp, 0, sizeof(*sp));
32 	sp->fcport = fcport;
33 	sp->ctx = ctx;
34 done:
35 	return sp;
36 }
37 
38 int
39 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
40 	struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
41 {
42 	int i, ret, num_valid;
43 	uint8_t *bcode;
44 	struct qla_fcp_prio_entry *pri_entry;
45 	uint32_t *bcode_val_ptr, bcode_val;
46 
47 	ret = 1;
48 	num_valid = 0;
49 	bcode = (uint8_t *)pri_cfg;
50 	bcode_val_ptr = (uint32_t *)pri_cfg;
51 	bcode_val = (uint32_t)(*bcode_val_ptr);
52 
53 	if (bcode_val == 0xFFFFFFFF) {
54 		/* No FCP Priority config data in flash */
55 		ql_dbg(ql_dbg_user, vha, 0x7051,
56 		    "No FCP Priority config data.\n");
57 		return 0;
58 	}
59 
60 	if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 			bcode[3] != 'S') {
62 		/* Invalid FCP priority data header*/
63 		ql_dbg(ql_dbg_user, vha, 0x7052,
64 		    "Invalid FCP Priority data header. bcode=0x%x.\n",
65 		    bcode_val);
66 		return 0;
67 	}
68 	if (flag != 1)
69 		return ret;
70 
71 	pri_entry = &pri_cfg->entry[0];
72 	for (i = 0; i < pri_cfg->num_entries; i++) {
73 		if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
74 			num_valid++;
75 		pri_entry++;
76 	}
77 
78 	if (num_valid == 0) {
79 		/* No valid FCP priority data entries */
80 		ql_dbg(ql_dbg_user, vha, 0x7053,
81 		    "No valid FCP Priority data entries.\n");
82 		ret = 0;
83 	} else {
84 		/* FCP priority data is valid */
85 		ql_dbg(ql_dbg_user, vha, 0x7054,
86 		    "Valid FCP priority data. num entries = %d.\n",
87 		    num_valid);
88 	}
89 
90 	return ret;
91 }
92 
93 static int
94 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
95 {
96 	struct Scsi_Host *host = bsg_job->shost;
97 	scsi_qla_host_t *vha = shost_priv(host);
98 	struct qla_hw_data *ha = vha->hw;
99 	int ret = 0;
100 	uint32_t len;
101 	uint32_t oper;
102 
103 	bsg_job->reply->reply_payload_rcv_len = 0;
104 
105 	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
106 		ret = -EINVAL;
107 		goto exit_fcp_prio_cfg;
108 	}
109 
110 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
111 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
112 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
113 		ret = -EBUSY;
114 		goto exit_fcp_prio_cfg;
115 	}
116 
117 	/* Get the sub command */
118 	oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
119 
120 	/* Only set config is allowed if config memory is not allocated */
121 	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
122 		ret = -EINVAL;
123 		goto exit_fcp_prio_cfg;
124 	}
125 	switch (oper) {
126 	case QLFC_FCP_PRIO_DISABLE:
127 		if (ha->flags.fcp_prio_enabled) {
128 			ha->flags.fcp_prio_enabled = 0;
129 			ha->fcp_prio_cfg->attributes &=
130 				~FCP_PRIO_ATTR_ENABLE;
131 			qla24xx_update_all_fcp_prio(vha);
132 			bsg_job->reply->result = DID_OK;
133 		} else {
134 			ret = -EINVAL;
135 			bsg_job->reply->result = (DID_ERROR << 16);
136 			goto exit_fcp_prio_cfg;
137 		}
138 		break;
139 
140 	case QLFC_FCP_PRIO_ENABLE:
141 		if (!ha->flags.fcp_prio_enabled) {
142 			if (ha->fcp_prio_cfg) {
143 				ha->flags.fcp_prio_enabled = 1;
144 				ha->fcp_prio_cfg->attributes |=
145 				    FCP_PRIO_ATTR_ENABLE;
146 				qla24xx_update_all_fcp_prio(vha);
147 				bsg_job->reply->result = DID_OK;
148 			} else {
149 				ret = -EINVAL;
150 				bsg_job->reply->result = (DID_ERROR << 16);
151 				goto exit_fcp_prio_cfg;
152 			}
153 		}
154 		break;
155 
156 	case QLFC_FCP_PRIO_GET_CONFIG:
157 		len = bsg_job->reply_payload.payload_len;
158 		if (!len || len > FCP_PRIO_CFG_SIZE) {
159 			ret = -EINVAL;
160 			bsg_job->reply->result = (DID_ERROR << 16);
161 			goto exit_fcp_prio_cfg;
162 		}
163 
164 		bsg_job->reply->result = DID_OK;
165 		bsg_job->reply->reply_payload_rcv_len =
166 			sg_copy_from_buffer(
167 			bsg_job->reply_payload.sg_list,
168 			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
169 			len);
170 
171 		break;
172 
173 	case QLFC_FCP_PRIO_SET_CONFIG:
174 		len = bsg_job->request_payload.payload_len;
175 		if (!len || len > FCP_PRIO_CFG_SIZE) {
176 			bsg_job->reply->result = (DID_ERROR << 16);
177 			ret = -EINVAL;
178 			goto exit_fcp_prio_cfg;
179 		}
180 
181 		if (!ha->fcp_prio_cfg) {
182 			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
183 			if (!ha->fcp_prio_cfg) {
184 				ql_log(ql_log_warn, vha, 0x7050,
185 				    "Unable to allocate memory for fcp prio "
186 				    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
187 				bsg_job->reply->result = (DID_ERROR << 16);
188 				ret = -ENOMEM;
189 				goto exit_fcp_prio_cfg;
190 			}
191 		}
192 
193 		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
194 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
195 		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
196 			FCP_PRIO_CFG_SIZE);
197 
198 		/* validate fcp priority data */
199 
200 		if (!qla24xx_fcp_prio_cfg_valid(vha,
201 		    (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
202 			bsg_job->reply->result = (DID_ERROR << 16);
203 			ret = -EINVAL;
204 			/* If buffer was invalidatic int
205 			 * fcp_prio_cfg is of no use
206 			 */
207 			vfree(ha->fcp_prio_cfg);
208 			ha->fcp_prio_cfg = NULL;
209 			goto exit_fcp_prio_cfg;
210 		}
211 
212 		ha->flags.fcp_prio_enabled = 0;
213 		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
214 			ha->flags.fcp_prio_enabled = 1;
215 		qla24xx_update_all_fcp_prio(vha);
216 		bsg_job->reply->result = DID_OK;
217 		break;
218 	default:
219 		ret = -EINVAL;
220 		break;
221 	}
222 exit_fcp_prio_cfg:
223 	bsg_job->job_done(bsg_job);
224 	return ret;
225 }
226 static int
227 qla2x00_process_els(struct fc_bsg_job *bsg_job)
228 {
229 	struct fc_rport *rport;
230 	fc_port_t *fcport = NULL;
231 	struct Scsi_Host *host;
232 	scsi_qla_host_t *vha;
233 	struct qla_hw_data *ha;
234 	srb_t *sp;
235 	const char *type;
236 	int req_sg_cnt, rsp_sg_cnt;
237 	int rval =  (DRIVER_ERROR << 16);
238 	uint16_t nextlid = 0;
239 	struct srb_ctx *els;
240 
241 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
242 		rport = bsg_job->rport;
243 		fcport = *(fc_port_t **) rport->dd_data;
244 		host = rport_to_shost(rport);
245 		vha = shost_priv(host);
246 		ha = vha->hw;
247 		type = "FC_BSG_RPT_ELS";
248 	} else {
249 		host = bsg_job->shost;
250 		vha = shost_priv(host);
251 		ha = vha->hw;
252 		type = "FC_BSG_HST_ELS_NOLOGIN";
253 	}
254 
255 	/* pass through is supported only for ISP 4Gb or higher */
256 	if (!IS_FWI2_CAPABLE(ha)) {
257 		ql_dbg(ql_dbg_user, vha, 0x7001,
258 		    "ELS passthru not supported for ISP23xx based adapters.\n");
259 		rval = -EPERM;
260 		goto done;
261 	}
262 
263 	/*  Multiple SG's are not supported for ELS requests */
264 	if (bsg_job->request_payload.sg_cnt > 1 ||
265 		bsg_job->reply_payload.sg_cnt > 1) {
266 		ql_dbg(ql_dbg_user, vha, 0x7002,
267 		    "Multiple SG's are not suppored for ELS requests, "
268 		    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
269 		    bsg_job->request_payload.sg_cnt,
270 		    bsg_job->reply_payload.sg_cnt);
271 		rval = -EPERM;
272 		goto done;
273 	}
274 
275 	/* ELS request for rport */
276 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
277 		/* make sure the rport is logged in,
278 		 * if not perform fabric login
279 		 */
280 		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
281 			ql_dbg(ql_dbg_user, vha, 0x7003,
282 			    "Failed to login port %06X for ELS passthru.\n",
283 			    fcport->d_id.b24);
284 			rval = -EIO;
285 			goto done;
286 		}
287 	} else {
288 		/* Allocate a dummy fcport structure, since functions
289 		 * preparing the IOCB and mailbox command retrieves port
290 		 * specific information from fcport structure. For Host based
291 		 * ELS commands there will be no fcport structure allocated
292 		 */
293 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
294 		if (!fcport) {
295 			rval = -ENOMEM;
296 			goto done;
297 		}
298 
299 		/* Initialize all required  fields of fcport */
300 		fcport->vha = vha;
301 		fcport->vp_idx = vha->vp_idx;
302 		fcport->d_id.b.al_pa =
303 			bsg_job->request->rqst_data.h_els.port_id[0];
304 		fcport->d_id.b.area =
305 			bsg_job->request->rqst_data.h_els.port_id[1];
306 		fcport->d_id.b.domain =
307 			bsg_job->request->rqst_data.h_els.port_id[2];
308 		fcport->loop_id =
309 			(fcport->d_id.b.al_pa == 0xFD) ?
310 			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
311 	}
312 
313 	if (!vha->flags.online) {
314 		ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
315 		rval = -EIO;
316 		goto done;
317 	}
318 
319 	req_sg_cnt =
320 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
321 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
322 	if (!req_sg_cnt) {
323 		rval = -ENOMEM;
324 		goto done_free_fcport;
325 	}
326 
327 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
328 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
329         if (!rsp_sg_cnt) {
330 		rval = -ENOMEM;
331 		goto done_free_fcport;
332 	}
333 
334 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
335 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
336 		ql_log(ql_log_warn, vha, 0x7008,
337 		    "dma mapping resulted in different sg counts, "
338 		    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
339 		    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
340 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
341 		rval = -EAGAIN;
342 		goto done_unmap_sg;
343 	}
344 
345 	/* Alloc SRB structure */
346 	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
347 	if (!sp) {
348 		rval = -ENOMEM;
349 		goto done_unmap_sg;
350 	}
351 
352 	els = sp->ctx;
353 	els->type =
354 		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
355 		SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
356 	els->name =
357 		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
358 		"bsg_els_rpt" : "bsg_els_hst");
359 	els->u.bsg_job = bsg_job;
360 
361 	ql_dbg(ql_dbg_user, vha, 0x700a,
362 	    "bsg rqst type: %s els type: %x - loop-id=%x "
363 	    "portid=%-2x%02x%02x.\n", type,
364 	    bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
365 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
366 
367 	rval = qla2x00_start_sp(sp);
368 	if (rval != QLA_SUCCESS) {
369 		ql_log(ql_log_warn, vha, 0x700e,
370 		    "qla2x00_start_sp failed = %d\n", rval);
371 		kfree(sp->ctx);
372 		mempool_free(sp, ha->srb_mempool);
373 		rval = -EIO;
374 		goto done_unmap_sg;
375 	}
376 	return rval;
377 
378 done_unmap_sg:
379 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
380 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
381 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
382 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
383 	goto done_free_fcport;
384 
385 done_free_fcport:
386 	if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
387 		kfree(fcport);
388 done:
389 	return rval;
390 }
391 
392 static int
393 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
394 {
395 	srb_t *sp;
396 	struct Scsi_Host *host = bsg_job->shost;
397 	scsi_qla_host_t *vha = shost_priv(host);
398 	struct qla_hw_data *ha = vha->hw;
399 	int rval = (DRIVER_ERROR << 16);
400 	int req_sg_cnt, rsp_sg_cnt;
401 	uint16_t loop_id;
402 	struct fc_port *fcport;
403 	char  *type = "FC_BSG_HST_CT";
404 	struct srb_ctx *ct;
405 
406 	req_sg_cnt =
407 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
408 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
409 	if (!req_sg_cnt) {
410 		ql_log(ql_log_warn, vha, 0x700f,
411 		    "dma_map_sg return %d for request\n", req_sg_cnt);
412 		rval = -ENOMEM;
413 		goto done;
414 	}
415 
416 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
417 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
418 	if (!rsp_sg_cnt) {
419 		ql_log(ql_log_warn, vha, 0x7010,
420 		    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
421 		rval = -ENOMEM;
422 		goto done;
423 	}
424 
425 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
426 	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
427 		ql_log(ql_log_warn, vha, 0x7011,
428 		    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
429 		    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
430 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
431 		rval = -EAGAIN;
432 		goto done_unmap_sg;
433 	}
434 
435 	if (!vha->flags.online) {
436 		ql_log(ql_log_warn, vha, 0x7012,
437 		    "Host is not online.\n");
438 		rval = -EIO;
439 		goto done_unmap_sg;
440 	}
441 
442 	loop_id =
443 		(bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
444 			>> 24;
445 	switch (loop_id) {
446 	case 0xFC:
447 		loop_id = cpu_to_le16(NPH_SNS);
448 		break;
449 	case 0xFA:
450 		loop_id = vha->mgmt_svr_loop_id;
451 		break;
452 	default:
453 		ql_dbg(ql_dbg_user, vha, 0x7013,
454 		    "Unknown loop id: %x.\n", loop_id);
455 		rval = -EINVAL;
456 		goto done_unmap_sg;
457 	}
458 
459 	/* Allocate a dummy fcport structure, since functions preparing the
460 	 * IOCB and mailbox command retrieves port specific information
461 	 * from fcport structure. For Host based ELS commands there will be
462 	 * no fcport structure allocated
463 	 */
464 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
465 	if (!fcport) {
466 		ql_log(ql_log_warn, vha, 0x7014,
467 		    "Failed to allocate fcport.\n");
468 		rval = -ENOMEM;
469 		goto done_unmap_sg;
470 	}
471 
472 	/* Initialize all required  fields of fcport */
473 	fcport->vha = vha;
474 	fcport->vp_idx = vha->vp_idx;
475 	fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
476 	fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
477 	fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
478 	fcport->loop_id = loop_id;
479 
480 	/* Alloc SRB structure */
481 	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
482 	if (!sp) {
483 		ql_log(ql_log_warn, vha, 0x7015,
484 		    "qla2x00_get_ctx_bsg_sp failed.\n");
485 		rval = -ENOMEM;
486 		goto done_free_fcport;
487 	}
488 
489 	ct = sp->ctx;
490 	ct->type = SRB_CT_CMD;
491 	ct->name = "bsg_ct";
492 	ct->u.bsg_job = bsg_job;
493 
494 	ql_dbg(ql_dbg_user, vha, 0x7016,
495 	    "bsg rqst type: %s else type: %x - "
496 	    "loop-id=%x portid=%02x%02x%02x.\n", type,
497 	    (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
498 	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
499 	    fcport->d_id.b.al_pa);
500 
501 	rval = qla2x00_start_sp(sp);
502 	if (rval != QLA_SUCCESS) {
503 		ql_log(ql_log_warn, vha, 0x7017,
504 		    "qla2x00_start_sp failed=%d.\n", rval);
505 		kfree(sp->ctx);
506 		mempool_free(sp, ha->srb_mempool);
507 		rval = -EIO;
508 		goto done_free_fcport;
509 	}
510 	return rval;
511 
512 done_free_fcport:
513 	kfree(fcport);
514 done_unmap_sg:
515 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
516 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
517 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
518 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
519 done:
520 	return rval;
521 }
522 
523 /* Set the port configuration to enable the
524  * internal loopback on ISP81XX
525  */
526 static inline int
527 qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
528     uint16_t *new_config)
529 {
530 	int ret = 0;
531 	int rval = 0;
532 	struct qla_hw_data *ha = vha->hw;
533 
534 	if (!IS_QLA81XX(ha))
535 		goto done_set_internal;
536 
537 	new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
538 	memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
539 
540 	ha->notify_dcbx_comp = 1;
541 	ret = qla81xx_set_port_config(vha, new_config);
542 	if (ret != QLA_SUCCESS) {
543 		ql_log(ql_log_warn, vha, 0x7021,
544 		    "set port config failed.\n");
545 		ha->notify_dcbx_comp = 0;
546 		rval = -EINVAL;
547 		goto done_set_internal;
548 	}
549 
550 	/* Wait for DCBX complete event */
551 	if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
552 		ql_dbg(ql_dbg_user, vha, 0x7022,
553 		    "State change notification not received.\n");
554 	} else
555 		ql_dbg(ql_dbg_user, vha, 0x7023,
556 		    "State change received.\n");
557 
558 	ha->notify_dcbx_comp = 0;
559 
560 done_set_internal:
561 	return rval;
562 }
563 
564 /* Set the port configuration to disable the
565  * internal loopback on ISP81XX
566  */
567 static inline int
568 qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
569     int wait)
570 {
571 	int ret = 0;
572 	int rval = 0;
573 	uint16_t new_config[4];
574 	struct qla_hw_data *ha = vha->hw;
575 
576 	if (!IS_QLA81XX(ha))
577 		goto done_reset_internal;
578 
579 	memset(new_config, 0 , sizeof(new_config));
580 	if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
581 			ENABLE_INTERNAL_LOOPBACK) {
582 		new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
583 		memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
584 
585 		ha->notify_dcbx_comp = wait;
586 		ret = qla81xx_set_port_config(vha, new_config);
587 		if (ret != QLA_SUCCESS) {
588 			ql_log(ql_log_warn, vha, 0x7025,
589 			    "Set port config failed.\n");
590 			ha->notify_dcbx_comp = 0;
591 			rval = -EINVAL;
592 			goto done_reset_internal;
593 		}
594 
595 		/* Wait for DCBX complete event */
596 		if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
597 			(20 * HZ))) {
598 			ql_dbg(ql_dbg_user, vha, 0x7026,
599 			    "State change notification not received.\n");
600 			ha->notify_dcbx_comp = 0;
601 			rval = -EINVAL;
602 			goto done_reset_internal;
603 		} else
604 			ql_dbg(ql_dbg_user, vha, 0x7027,
605 			    "State change received.\n");
606 
607 		ha->notify_dcbx_comp = 0;
608 	}
609 done_reset_internal:
610 	return rval;
611 }
612 
613 static int
614 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
615 {
616 	struct Scsi_Host *host = bsg_job->shost;
617 	scsi_qla_host_t *vha = shost_priv(host);
618 	struct qla_hw_data *ha = vha->hw;
619 	int rval;
620 	uint8_t command_sent;
621 	char *type;
622 	struct msg_echo_lb elreq;
623 	uint16_t response[MAILBOX_REGISTER_COUNT];
624 	uint16_t config[4], new_config[4];
625 	uint8_t *fw_sts_ptr;
626 	uint8_t *req_data = NULL;
627 	dma_addr_t req_data_dma;
628 	uint32_t req_data_len;
629 	uint8_t *rsp_data = NULL;
630 	dma_addr_t rsp_data_dma;
631 	uint32_t rsp_data_len;
632 
633 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
634 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
635 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
636 		ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
637 		return -EBUSY;
638 	}
639 
640 	if (!vha->flags.online) {
641 		ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
642 		return -EIO;
643 	}
644 
645 	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
646 		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
647 		DMA_TO_DEVICE);
648 
649 	if (!elreq.req_sg_cnt) {
650 		ql_log(ql_log_warn, vha, 0x701a,
651 		    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
652 		return -ENOMEM;
653 	}
654 
655 	elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
656 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
657 		DMA_FROM_DEVICE);
658 
659 	if (!elreq.rsp_sg_cnt) {
660 		ql_log(ql_log_warn, vha, 0x701b,
661 		    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
662 		rval = -ENOMEM;
663 		goto done_unmap_req_sg;
664 	}
665 
666 	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
667 		(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
668 		ql_log(ql_log_warn, vha, 0x701c,
669 		    "dma mapping resulted in different sg counts, "
670 		    "request_sg_cnt: %x dma_request_sg_cnt: %x "
671 		    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
672 		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
673 		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
674 		rval = -EAGAIN;
675 		goto done_unmap_sg;
676 	}
677 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
678 	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
679 		&req_data_dma, GFP_KERNEL);
680 	if (!req_data) {
681 		ql_log(ql_log_warn, vha, 0x701d,
682 		    "dma alloc failed for req_data.\n");
683 		rval = -ENOMEM;
684 		goto done_unmap_sg;
685 	}
686 
687 	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
688 		&rsp_data_dma, GFP_KERNEL);
689 	if (!rsp_data) {
690 		ql_log(ql_log_warn, vha, 0x7004,
691 		    "dma alloc failed for rsp_data.\n");
692 		rval = -ENOMEM;
693 		goto done_free_dma_req;
694 	}
695 
696 	/* Copy the request buffer in req_data now */
697 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
698 		bsg_job->request_payload.sg_cnt, req_data, req_data_len);
699 
700 	elreq.send_dma = req_data_dma;
701 	elreq.rcv_dma = rsp_data_dma;
702 	elreq.transfer_size = req_data_len;
703 
704 	elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
705 
706 	if ((ha->current_topology == ISP_CFG_F ||
707 	    (IS_QLA81XX(ha) &&
708 	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
709 	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
710 		elreq.options == EXTERNAL_LOOPBACK) {
711 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
712 		ql_dbg(ql_dbg_user, vha, 0x701e,
713 		    "BSG request type: %s.\n", type);
714 		command_sent = INT_DEF_LB_ECHO_CMD;
715 		rval = qla2x00_echo_test(vha, &elreq, response);
716 	} else {
717 		if (IS_QLA81XX(ha)) {
718 			memset(config, 0, sizeof(config));
719 			memset(new_config, 0, sizeof(new_config));
720 			if (qla81xx_get_port_config(vha, config)) {
721 				ql_log(ql_log_warn, vha, 0x701f,
722 				    "Get port config failed.\n");
723 				bsg_job->reply->reply_payload_rcv_len = 0;
724 				bsg_job->reply->result = (DID_ERROR << 16);
725 				rval = -EPERM;
726 				goto done_free_dma_req;
727 			}
728 
729 			if (elreq.options != EXTERNAL_LOOPBACK) {
730 				ql_dbg(ql_dbg_user, vha, 0x7020,
731 				    "Internal: curent port config = %x\n",
732 				    config[0]);
733 				if (qla81xx_set_internal_loopback(vha, config,
734 					new_config)) {
735 					ql_log(ql_log_warn, vha, 0x7024,
736 					    "Internal loopback failed.\n");
737 					bsg_job->reply->reply_payload_rcv_len =
738 						0;
739 					bsg_job->reply->result =
740 						(DID_ERROR << 16);
741 					rval = -EPERM;
742 					goto done_free_dma_req;
743 				}
744 			} else {
745 				/* For external loopback to work
746 				 * ensure internal loopback is disabled
747 				 */
748 				if (qla81xx_reset_internal_loopback(vha,
749 					config, 1)) {
750 					bsg_job->reply->reply_payload_rcv_len =
751 						0;
752 					bsg_job->reply->result =
753 						(DID_ERROR << 16);
754 					rval = -EPERM;
755 					goto done_free_dma_req;
756 				}
757 			}
758 
759 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
760 			ql_dbg(ql_dbg_user, vha, 0x7028,
761 			    "BSG request type: %s.\n", type);
762 
763 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
764 			rval = qla2x00_loopback_test(vha, &elreq, response);
765 
766 			if (new_config[0]) {
767 				/* Revert back to original port config
768 				 * Also clear internal loopback
769 				 */
770 				qla81xx_reset_internal_loopback(vha,
771 				    new_config, 0);
772 			}
773 
774 			if (response[0] == MBS_COMMAND_ERROR &&
775 					response[1] == MBS_LB_RESET) {
776 				ql_log(ql_log_warn, vha, 0x7029,
777 				    "MBX command error, Aborting ISP.\n");
778 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
779 				qla2xxx_wake_dpc(vha);
780 				qla2x00_wait_for_chip_reset(vha);
781 				/* Also reset the MPI */
782 				if (qla81xx_restart_mpi_firmware(vha) !=
783 				    QLA_SUCCESS) {
784 					ql_log(ql_log_warn, vha, 0x702a,
785 					    "MPI reset failed.\n");
786 				}
787 
788 				bsg_job->reply->reply_payload_rcv_len = 0;
789 				bsg_job->reply->result = (DID_ERROR << 16);
790 				rval = -EIO;
791 				goto done_free_dma_req;
792 			}
793 		} else {
794 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
795 			ql_dbg(ql_dbg_user, vha, 0x702b,
796 			    "BSG request type: %s.\n", type);
797 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
798 			rval = qla2x00_loopback_test(vha, &elreq, response);
799 		}
800 	}
801 
802 	if (rval) {
803 		ql_log(ql_log_warn, vha, 0x702c,
804 		    "Vendor request %s failed.\n", type);
805 
806 		fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
807 		    sizeof(struct fc_bsg_reply);
808 
809 		memcpy(fw_sts_ptr, response, sizeof(response));
810 		fw_sts_ptr += sizeof(response);
811 		*fw_sts_ptr = command_sent;
812 		rval = 0;
813 		bsg_job->reply->reply_payload_rcv_len = 0;
814 		bsg_job->reply->result = (DID_ERROR << 16);
815 	} else {
816 		ql_dbg(ql_dbg_user, vha, 0x702d,
817 		    "Vendor request %s completed.\n", type);
818 
819 		bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
820 			sizeof(response) + sizeof(uint8_t);
821 		bsg_job->reply->reply_payload_rcv_len =
822 			bsg_job->reply_payload.payload_len;
823 		fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
824 			sizeof(struct fc_bsg_reply);
825 		memcpy(fw_sts_ptr, response, sizeof(response));
826 		fw_sts_ptr += sizeof(response);
827 		*fw_sts_ptr = command_sent;
828 		bsg_job->reply->result = DID_OK;
829 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
830 			bsg_job->reply_payload.sg_cnt, rsp_data,
831 			rsp_data_len);
832 	}
833 	bsg_job->job_done(bsg_job);
834 
835 	dma_free_coherent(&ha->pdev->dev, rsp_data_len,
836 		rsp_data, rsp_data_dma);
837 done_free_dma_req:
838 	dma_free_coherent(&ha->pdev->dev, req_data_len,
839 		req_data, req_data_dma);
840 done_unmap_sg:
841 	dma_unmap_sg(&ha->pdev->dev,
842 	    bsg_job->reply_payload.sg_list,
843 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
844 done_unmap_req_sg:
845 	dma_unmap_sg(&ha->pdev->dev,
846 	    bsg_job->request_payload.sg_list,
847 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
848 	return rval;
849 }
850 
851 static int
852 qla84xx_reset(struct fc_bsg_job *bsg_job)
853 {
854 	struct Scsi_Host *host = bsg_job->shost;
855 	scsi_qla_host_t *vha = shost_priv(host);
856 	struct qla_hw_data *ha = vha->hw;
857 	int rval = 0;
858 	uint32_t flag;
859 
860 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
861 	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
862 	    test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
863 		ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
864 		return -EBUSY;
865 	}
866 
867 	if (!IS_QLA84XX(ha)) {
868 		ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
869 		return -EINVAL;
870 	}
871 
872 	flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
873 
874 	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
875 
876 	if (rval) {
877 		ql_log(ql_log_warn, vha, 0x7030,
878 		    "Vendor request 84xx reset failed.\n");
879 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
880 		bsg_job->reply->result = (DID_ERROR << 16);
881 
882 	} else {
883 		ql_dbg(ql_dbg_user, vha, 0x7031,
884 		    "Vendor request 84xx reset completed.\n");
885 		bsg_job->reply->result = DID_OK;
886 	}
887 
888 	bsg_job->job_done(bsg_job);
889 	return rval;
890 }
891 
892 static int
893 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
894 {
895 	struct Scsi_Host *host = bsg_job->shost;
896 	scsi_qla_host_t *vha = shost_priv(host);
897 	struct qla_hw_data *ha = vha->hw;
898 	struct verify_chip_entry_84xx *mn = NULL;
899 	dma_addr_t mn_dma, fw_dma;
900 	void *fw_buf = NULL;
901 	int rval = 0;
902 	uint32_t sg_cnt;
903 	uint32_t data_len;
904 	uint16_t options;
905 	uint32_t flag;
906 	uint32_t fw_ver;
907 
908 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
909 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
910 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
911 		return -EBUSY;
912 
913 	if (!IS_QLA84XX(ha)) {
914 		ql_dbg(ql_dbg_user, vha, 0x7032,
915 		    "Not 84xx, exiting.\n");
916 		return -EINVAL;
917 	}
918 
919 	sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
920 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
921 	if (!sg_cnt) {
922 		ql_log(ql_log_warn, vha, 0x7033,
923 		    "dma_map_sg returned %d for request.\n", sg_cnt);
924 		return -ENOMEM;
925 	}
926 
927 	if (sg_cnt != bsg_job->request_payload.sg_cnt) {
928 		ql_log(ql_log_warn, vha, 0x7034,
929 		    "DMA mapping resulted in different sg counts, "
930 		    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
931 		    bsg_job->request_payload.sg_cnt, sg_cnt);
932 		rval = -EAGAIN;
933 		goto done_unmap_sg;
934 	}
935 
936 	data_len = bsg_job->request_payload.payload_len;
937 	fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
938 		&fw_dma, GFP_KERNEL);
939 	if (!fw_buf) {
940 		ql_log(ql_log_warn, vha, 0x7035,
941 		    "DMA alloc failed for fw_buf.\n");
942 		rval = -ENOMEM;
943 		goto done_unmap_sg;
944 	}
945 
946 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
947 		bsg_job->request_payload.sg_cnt, fw_buf, data_len);
948 
949 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
950 	if (!mn) {
951 		ql_log(ql_log_warn, vha, 0x7036,
952 		    "DMA alloc failed for fw buffer.\n");
953 		rval = -ENOMEM;
954 		goto done_free_fw_buf;
955 	}
956 
957 	flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
958 	fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
959 
960 	memset(mn, 0, sizeof(struct access_chip_84xx));
961 	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
962 	mn->entry_count = 1;
963 
964 	options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
965 	if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
966 		options |= VCO_DIAG_FW;
967 
968 	mn->options = cpu_to_le16(options);
969 	mn->fw_ver =  cpu_to_le32(fw_ver);
970 	mn->fw_size =  cpu_to_le32(data_len);
971 	mn->fw_seq_size =  cpu_to_le32(data_len);
972 	mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
973 	mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
974 	mn->dseg_length = cpu_to_le32(data_len);
975 	mn->data_seg_cnt = cpu_to_le16(1);
976 
977 	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
978 
979 	if (rval) {
980 		ql_log(ql_log_warn, vha, 0x7037,
981 		    "Vendor request 84xx updatefw failed.\n");
982 
983 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
984 		bsg_job->reply->result = (DID_ERROR << 16);
985 
986 	} else {
987 		ql_dbg(ql_dbg_user, vha, 0x7038,
988 		    "Vendor request 84xx updatefw completed.\n");
989 
990 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
991 		bsg_job->reply->result = DID_OK;
992 	}
993 
994 	bsg_job->job_done(bsg_job);
995 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
996 
997 done_free_fw_buf:
998 	dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
999 
1000 done_unmap_sg:
1001 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1002 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1003 
1004 	return rval;
1005 }
1006 
1007 static int
1008 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1009 {
1010 	struct Scsi_Host *host = bsg_job->shost;
1011 	scsi_qla_host_t *vha = shost_priv(host);
1012 	struct qla_hw_data *ha = vha->hw;
1013 	struct access_chip_84xx *mn = NULL;
1014 	dma_addr_t mn_dma, mgmt_dma;
1015 	void *mgmt_b = NULL;
1016 	int rval = 0;
1017 	struct qla_bsg_a84_mgmt *ql84_mgmt;
1018 	uint32_t sg_cnt;
1019 	uint32_t data_len = 0;
1020 	uint32_t dma_direction = DMA_NONE;
1021 
1022 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1023 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1024 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1025 		ql_log(ql_log_warn, vha, 0x7039,
1026 		    "Abort active or needed.\n");
1027 		return -EBUSY;
1028 	}
1029 
1030 	if (!IS_QLA84XX(ha)) {
1031 		ql_log(ql_log_warn, vha, 0x703a,
1032 		    "Not 84xx, exiting.\n");
1033 		return -EINVAL;
1034 	}
1035 
1036 	ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1037 		sizeof(struct fc_bsg_request));
1038 	if (!ql84_mgmt) {
1039 		ql_log(ql_log_warn, vha, 0x703b,
1040 		    "MGMT header not provided, exiting.\n");
1041 		return -EINVAL;
1042 	}
1043 
1044 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1045 	if (!mn) {
1046 		ql_log(ql_log_warn, vha, 0x703c,
1047 		    "DMA alloc failed for fw buffer.\n");
1048 		return -ENOMEM;
1049 	}
1050 
1051 	memset(mn, 0, sizeof(struct access_chip_84xx));
1052 	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1053 	mn->entry_count = 1;
1054 
1055 	switch (ql84_mgmt->mgmt.cmd) {
1056 	case QLA84_MGMT_READ_MEM:
1057 	case QLA84_MGMT_GET_INFO:
1058 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1059 			bsg_job->reply_payload.sg_list,
1060 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1061 		if (!sg_cnt) {
1062 			ql_log(ql_log_warn, vha, 0x703d,
1063 			    "dma_map_sg returned %d for reply.\n", sg_cnt);
1064 			rval = -ENOMEM;
1065 			goto exit_mgmt;
1066 		}
1067 
1068 		dma_direction = DMA_FROM_DEVICE;
1069 
1070 		if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1071 			ql_log(ql_log_warn, vha, 0x703e,
1072 			    "DMA mapping resulted in different sg counts, "
1073 			    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1074 			    bsg_job->reply_payload.sg_cnt, sg_cnt);
1075 			rval = -EAGAIN;
1076 			goto done_unmap_sg;
1077 		}
1078 
1079 		data_len = bsg_job->reply_payload.payload_len;
1080 
1081 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1082 		    &mgmt_dma, GFP_KERNEL);
1083 		if (!mgmt_b) {
1084 			ql_log(ql_log_warn, vha, 0x703f,
1085 			    "DMA alloc failed for mgmt_b.\n");
1086 			rval = -ENOMEM;
1087 			goto done_unmap_sg;
1088 		}
1089 
1090 		if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1091 			mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1092 			mn->parameter1 =
1093 				cpu_to_le32(
1094 				ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1095 
1096 		} else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1097 			mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1098 			mn->parameter1 =
1099 				cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1100 
1101 			mn->parameter2 =
1102 				cpu_to_le32(
1103 				ql84_mgmt->mgmt.mgmtp.u.info.context);
1104 		}
1105 		break;
1106 
1107 	case QLA84_MGMT_WRITE_MEM:
1108 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1109 			bsg_job->request_payload.sg_list,
1110 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1111 
1112 		if (!sg_cnt) {
1113 			ql_log(ql_log_warn, vha, 0x7040,
1114 			    "dma_map_sg returned %d.\n", sg_cnt);
1115 			rval = -ENOMEM;
1116 			goto exit_mgmt;
1117 		}
1118 
1119 		dma_direction = DMA_TO_DEVICE;
1120 
1121 		if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1122 			ql_log(ql_log_warn, vha, 0x7041,
1123 			    "DMA mapping resulted in different sg counts, "
1124 			    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1125 			    bsg_job->request_payload.sg_cnt, sg_cnt);
1126 			rval = -EAGAIN;
1127 			goto done_unmap_sg;
1128 		}
1129 
1130 		data_len = bsg_job->request_payload.payload_len;
1131 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1132 			&mgmt_dma, GFP_KERNEL);
1133 		if (!mgmt_b) {
1134 			ql_log(ql_log_warn, vha, 0x7042,
1135 			    "DMA alloc failed for mgmt_b.\n");
1136 			rval = -ENOMEM;
1137 			goto done_unmap_sg;
1138 		}
1139 
1140 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1141 			bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1142 
1143 		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1144 		mn->parameter1 =
1145 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1146 		break;
1147 
1148 	case QLA84_MGMT_CHNG_CONFIG:
1149 		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1150 		mn->parameter1 =
1151 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1152 
1153 		mn->parameter2 =
1154 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1155 
1156 		mn->parameter3 =
1157 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1158 		break;
1159 
1160 	default:
1161 		rval = -EIO;
1162 		goto exit_mgmt;
1163 	}
1164 
1165 	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1166 		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1167 		mn->dseg_count = cpu_to_le16(1);
1168 		mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1169 		mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1170 		mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1171 	}
1172 
1173 	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1174 
1175 	if (rval) {
1176 		ql_log(ql_log_warn, vha, 0x7043,
1177 		    "Vendor request 84xx mgmt failed.\n");
1178 
1179 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
1180 		bsg_job->reply->result = (DID_ERROR << 16);
1181 
1182 	} else {
1183 		ql_dbg(ql_dbg_user, vha, 0x7044,
1184 		    "Vendor request 84xx mgmt completed.\n");
1185 
1186 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1187 		bsg_job->reply->result = DID_OK;
1188 
1189 		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1190 			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1191 			bsg_job->reply->reply_payload_rcv_len =
1192 				bsg_job->reply_payload.payload_len;
1193 
1194 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1195 				bsg_job->reply_payload.sg_cnt, mgmt_b,
1196 				data_len);
1197 		}
1198 	}
1199 
1200 	bsg_job->job_done(bsg_job);
1201 
1202 done_unmap_sg:
1203 	if (mgmt_b)
1204 		dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1205 
1206 	if (dma_direction == DMA_TO_DEVICE)
1207 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1208 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1209 	else if (dma_direction == DMA_FROM_DEVICE)
1210 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1211 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1212 
1213 exit_mgmt:
1214 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1215 
1216 	return rval;
1217 }
1218 
1219 static int
1220 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1221 {
1222 	struct Scsi_Host *host = bsg_job->shost;
1223 	scsi_qla_host_t *vha = shost_priv(host);
1224 	int rval = 0;
1225 	struct qla_port_param *port_param = NULL;
1226 	fc_port_t *fcport = NULL;
1227 	uint16_t mb[MAILBOX_REGISTER_COUNT];
1228 	uint8_t *rsp_ptr = NULL;
1229 
1230 	bsg_job->reply->reply_payload_rcv_len = 0;
1231 
1232 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1233 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1234 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1235 		ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
1236 		return -EBUSY;
1237 	}
1238 
1239 	if (!IS_IIDMA_CAPABLE(vha->hw)) {
1240 		ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1241 		return -EINVAL;
1242 	}
1243 
1244 	port_param = (struct qla_port_param *)((char *)bsg_job->request +
1245 		sizeof(struct fc_bsg_request));
1246 	if (!port_param) {
1247 		ql_log(ql_log_warn, vha, 0x7047,
1248 		    "port_param header not provided.\n");
1249 		return -EINVAL;
1250 	}
1251 
1252 	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1253 		ql_log(ql_log_warn, vha, 0x7048,
1254 		    "Invalid destination type.\n");
1255 		return -EINVAL;
1256 	}
1257 
1258 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1259 		if (fcport->port_type != FCT_TARGET)
1260 			continue;
1261 
1262 		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1263 			fcport->port_name, sizeof(fcport->port_name)))
1264 			continue;
1265 		break;
1266 	}
1267 
1268 	if (!fcport) {
1269 		ql_log(ql_log_warn, vha, 0x7049,
1270 		    "Failed to find port.\n");
1271 		return -EINVAL;
1272 	}
1273 
1274 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
1275 		ql_log(ql_log_warn, vha, 0x704a,
1276 		    "Port is not online.\n");
1277 		return -EINVAL;
1278 	}
1279 
1280 	if (fcport->flags & FCF_LOGIN_NEEDED) {
1281 		ql_log(ql_log_warn, vha, 0x704b,
1282 		    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1283 		return -EINVAL;
1284 	}
1285 
1286 	if (port_param->mode)
1287 		rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1288 			port_param->speed, mb);
1289 	else
1290 		rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1291 			&port_param->speed, mb);
1292 
1293 	if (rval) {
1294 		ql_log(ql_log_warn, vha, 0x704c,
1295 		    "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1296 		    "%04x %x %04x %04x.\n", fcport->port_name[0],
1297 		    fcport->port_name[1], fcport->port_name[2],
1298 		    fcport->port_name[3], fcport->port_name[4],
1299 		    fcport->port_name[5], fcport->port_name[6],
1300 		    fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1301 		rval = 0;
1302 		bsg_job->reply->result = (DID_ERROR << 16);
1303 
1304 	} else {
1305 		if (!port_param->mode) {
1306 			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1307 				sizeof(struct qla_port_param);
1308 
1309 			rsp_ptr = ((uint8_t *)bsg_job->reply) +
1310 				sizeof(struct fc_bsg_reply);
1311 
1312 			memcpy(rsp_ptr, port_param,
1313 				sizeof(struct qla_port_param));
1314 		}
1315 
1316 		bsg_job->reply->result = DID_OK;
1317 	}
1318 
1319 	bsg_job->job_done(bsg_job);
1320 	return rval;
1321 }
1322 
1323 static int
1324 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1325 	uint8_t is_update)
1326 {
1327 	uint32_t start = 0;
1328 	int valid = 0;
1329 	struct qla_hw_data *ha = vha->hw;
1330 
1331 	bsg_job->reply->reply_payload_rcv_len = 0;
1332 
1333 	if (unlikely(pci_channel_offline(ha->pdev)))
1334 		return -EINVAL;
1335 
1336 	start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1337 	if (start > ha->optrom_size) {
1338 		ql_log(ql_log_warn, vha, 0x7055,
1339 		    "start %d > optrom_size %d.\n", start, ha->optrom_size);
1340 		return -EINVAL;
1341 	}
1342 
1343 	if (ha->optrom_state != QLA_SWAITING) {
1344 		ql_log(ql_log_info, vha, 0x7056,
1345 		    "optrom_state %d.\n", ha->optrom_state);
1346 		return -EBUSY;
1347 	}
1348 
1349 	ha->optrom_region_start = start;
1350 	ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1351 	if (is_update) {
1352 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1353 			valid = 1;
1354 		else if (start == (ha->flt_region_boot * 4) ||
1355 		    start == (ha->flt_region_fw * 4))
1356 			valid = 1;
1357 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1358 		    IS_QLA8XXX_TYPE(ha))
1359 			valid = 1;
1360 		if (!valid) {
1361 			ql_log(ql_log_warn, vha, 0x7058,
1362 			    "Invalid start region 0x%x/0x%x.\n", start,
1363 			    bsg_job->request_payload.payload_len);
1364 			return -EINVAL;
1365 		}
1366 
1367 		ha->optrom_region_size = start +
1368 		    bsg_job->request_payload.payload_len > ha->optrom_size ?
1369 		    ha->optrom_size - start :
1370 		    bsg_job->request_payload.payload_len;
1371 		ha->optrom_state = QLA_SWRITING;
1372 	} else {
1373 		ha->optrom_region_size = start +
1374 		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
1375 		    ha->optrom_size - start :
1376 		    bsg_job->reply_payload.payload_len;
1377 		ha->optrom_state = QLA_SREADING;
1378 	}
1379 
1380 	ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1381 	if (!ha->optrom_buffer) {
1382 		ql_log(ql_log_warn, vha, 0x7059,
1383 		    "Read: Unable to allocate memory for optrom retrieval "
1384 		    "(%x)\n", ha->optrom_region_size);
1385 
1386 		ha->optrom_state = QLA_SWAITING;
1387 		return -ENOMEM;
1388 	}
1389 
1390 	memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1391 	return 0;
1392 }
1393 
1394 static int
1395 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1396 {
1397 	struct Scsi_Host *host = bsg_job->shost;
1398 	scsi_qla_host_t *vha = shost_priv(host);
1399 	struct qla_hw_data *ha = vha->hw;
1400 	int rval = 0;
1401 
1402 	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1403 	if (rval)
1404 		return rval;
1405 
1406 	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1407 	    ha->optrom_region_start, ha->optrom_region_size);
1408 
1409 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1410 	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1411 	    ha->optrom_region_size);
1412 
1413 	bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1414 	bsg_job->reply->result = DID_OK;
1415 	vfree(ha->optrom_buffer);
1416 	ha->optrom_buffer = NULL;
1417 	ha->optrom_state = QLA_SWAITING;
1418 	bsg_job->job_done(bsg_job);
1419 	return rval;
1420 }
1421 
1422 static int
1423 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1424 {
1425 	struct Scsi_Host *host = bsg_job->shost;
1426 	scsi_qla_host_t *vha = shost_priv(host);
1427 	struct qla_hw_data *ha = vha->hw;
1428 	int rval = 0;
1429 
1430 	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1431 	if (rval)
1432 		return rval;
1433 
1434 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1435 	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1436 	    ha->optrom_region_size);
1437 
1438 	ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1439 	    ha->optrom_region_start, ha->optrom_region_size);
1440 
1441 	bsg_job->reply->result = DID_OK;
1442 	vfree(ha->optrom_buffer);
1443 	ha->optrom_buffer = NULL;
1444 	ha->optrom_state = QLA_SWAITING;
1445 	bsg_job->job_done(bsg_job);
1446 	return rval;
1447 }
1448 
1449 static int
1450 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1451 {
1452 	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1453 	case QL_VND_LOOPBACK:
1454 		return qla2x00_process_loopback(bsg_job);
1455 
1456 	case QL_VND_A84_RESET:
1457 		return qla84xx_reset(bsg_job);
1458 
1459 	case QL_VND_A84_UPDATE_FW:
1460 		return qla84xx_updatefw(bsg_job);
1461 
1462 	case QL_VND_A84_MGMT_CMD:
1463 		return qla84xx_mgmt_cmd(bsg_job);
1464 
1465 	case QL_VND_IIDMA:
1466 		return qla24xx_iidma(bsg_job);
1467 
1468 	case QL_VND_FCP_PRIO_CFG_CMD:
1469 		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1470 
1471 	case QL_VND_READ_FLASH:
1472 		return qla2x00_read_optrom(bsg_job);
1473 
1474 	case QL_VND_UPDATE_FLASH:
1475 		return qla2x00_update_optrom(bsg_job);
1476 
1477 	default:
1478 		bsg_job->reply->result = (DID_ERROR << 16);
1479 		bsg_job->job_done(bsg_job);
1480 		return -ENOSYS;
1481 	}
1482 }
1483 
1484 int
1485 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1486 {
1487 	int ret = -EINVAL;
1488 	struct fc_rport *rport;
1489 	fc_port_t *fcport = NULL;
1490 	struct Scsi_Host *host;
1491 	scsi_qla_host_t *vha;
1492 
1493 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1494 		rport = bsg_job->rport;
1495 		fcport = *(fc_port_t **) rport->dd_data;
1496 		host = rport_to_shost(rport);
1497 		vha = shost_priv(host);
1498 	} else {
1499 		host = bsg_job->shost;
1500 		vha = shost_priv(host);
1501 	}
1502 
1503 	ql_dbg(ql_dbg_user, vha, 0x7000,
1504 	    "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
1505 
1506 	switch (bsg_job->request->msgcode) {
1507 	case FC_BSG_RPT_ELS:
1508 	case FC_BSG_HST_ELS_NOLOGIN:
1509 		ret = qla2x00_process_els(bsg_job);
1510 		break;
1511 	case FC_BSG_HST_CT:
1512 		ret = qla2x00_process_ct(bsg_job);
1513 		break;
1514 	case FC_BSG_HST_VENDOR:
1515 		ret = qla2x00_process_vendor_specific(bsg_job);
1516 		break;
1517 	case FC_BSG_HST_ADD_RPORT:
1518 	case FC_BSG_HST_DEL_RPORT:
1519 	case FC_BSG_RPT_CT:
1520 	default:
1521 		ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1522 		break;
1523 	}
1524 	return ret;
1525 }
1526 
1527 int
1528 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1529 {
1530 	scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1531 	struct qla_hw_data *ha = vha->hw;
1532 	srb_t *sp;
1533 	int cnt, que;
1534 	unsigned long flags;
1535 	struct req_que *req;
1536 	struct srb_ctx *sp_bsg;
1537 
1538 	/* find the bsg job from the active list of commands */
1539 	spin_lock_irqsave(&ha->hardware_lock, flags);
1540 	for (que = 0; que < ha->max_req_queues; que++) {
1541 		req = ha->req_q_map[que];
1542 		if (!req)
1543 			continue;
1544 
1545 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1546 			sp = req->outstanding_cmds[cnt];
1547 			if (sp) {
1548 				sp_bsg = sp->ctx;
1549 
1550 				if (((sp_bsg->type == SRB_CT_CMD) ||
1551 					(sp_bsg->type == SRB_ELS_CMD_HST))
1552 					&& (sp_bsg->u.bsg_job == bsg_job)) {
1553 					spin_unlock_irqrestore(&ha->hardware_lock, flags);
1554 					if (ha->isp_ops->abort_command(sp)) {
1555 						ql_log(ql_log_warn, vha, 0x7089,
1556 						    "mbx abort_command "
1557 						    "failed.\n");
1558 						bsg_job->req->errors =
1559 						bsg_job->reply->result = -EIO;
1560 					} else {
1561 						ql_dbg(ql_dbg_user, vha, 0x708a,
1562 						    "mbx abort_command "
1563 						    "success.\n");
1564 						bsg_job->req->errors =
1565 						bsg_job->reply->result = 0;
1566 					}
1567 					spin_lock_irqsave(&ha->hardware_lock, flags);
1568 					goto done;
1569 				}
1570 			}
1571 		}
1572 	}
1573 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1574 	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1575 	bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1576 	return 0;
1577 
1578 done:
1579 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1580 	if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1581 		kfree(sp->fcport);
1582 	kfree(sp->ctx);
1583 	mempool_free(sp, ha->srb_mempool);
1584 	return 0;
1585 }
1586