xref: /linux/drivers/scsi/qla2xxx/qla_bsg.c (revision c94cd0248ced494e0bcf82b21380e8037c4dd26b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 #include "qla_gbl.h"
8 
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13 
qla2xxx_free_fcport_work(struct work_struct * work)14 static void qla2xxx_free_fcport_work(struct work_struct *work)
15 {
16 	struct fc_port *fcport = container_of(work, typeof(*fcport),
17 	    free_work);
18 
19 	qla2x00_free_fcport(fcport);
20 }
21 
22 /* BSG support for ELS/CT pass through */
qla2x00_bsg_job_done(srb_t * sp,int res)23 void qla2x00_bsg_job_done(srb_t *sp, int res)
24 {
25 	struct bsg_job *bsg_job = sp->u.bsg_job;
26 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
27 	struct completion *comp = sp->comp;
28 
29 	ql_dbg(ql_dbg_user, sp->vha, 0x7009,
30 	    "%s: sp hdl %x, result=%x bsg ptr %p\n",
31 	    __func__, sp->handle, res, bsg_job);
32 
33 	/* ref: INIT */
34 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
35 
36 	bsg_reply->result = res;
37 	bsg_job_done(bsg_job, bsg_reply->result,
38 		       bsg_reply->reply_payload_rcv_len);
39 
40 	if (comp)
41 		complete(comp);
42 }
43 
qla2x00_bsg_sp_free(srb_t * sp)44 void qla2x00_bsg_sp_free(srb_t *sp)
45 {
46 	struct qla_hw_data *ha = sp->vha->hw;
47 	struct bsg_job *bsg_job = sp->u.bsg_job;
48 	struct fc_bsg_request *bsg_request = bsg_job->request;
49 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
50 
51 	if (sp->type == SRB_FXIOCB_BCMD) {
52 		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
53 		    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
54 
55 		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
56 			dma_unmap_sg(&ha->pdev->dev,
57 			    bsg_job->request_payload.sg_list,
58 			    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
59 
60 		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
61 			dma_unmap_sg(&ha->pdev->dev,
62 			    bsg_job->reply_payload.sg_list,
63 			    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
64 	} else {
65 
66 		if (sp->remap.remapped) {
67 			dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
68 			    sp->remap.rsp.dma);
69 			dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
70 			    sp->remap.req.dma);
71 		} else {
72 			dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
73 				bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
74 
75 			dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
76 				bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
77 		}
78 	}
79 
80 	if (sp->type == SRB_CT_CMD ||
81 	    sp->type == SRB_FXIOCB_BCMD ||
82 	    sp->type == SRB_ELS_CMD_HST) {
83 		INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
84 		queue_work(ha->wq, &sp->fcport->free_work);
85 	}
86 
87 	qla2x00_rel_sp(sp);
88 }
89 
90 int
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t * vha,struct qla_fcp_prio_cfg * pri_cfg,uint8_t flag)91 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
92 	struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
93 {
94 	int i, ret, num_valid;
95 	uint8_t *bcode;
96 	struct qla_fcp_prio_entry *pri_entry;
97 	uint32_t *bcode_val_ptr, bcode_val;
98 
99 	ret = 1;
100 	num_valid = 0;
101 	bcode = (uint8_t *)pri_cfg;
102 	bcode_val_ptr = (uint32_t *)pri_cfg;
103 	bcode_val = (uint32_t)(*bcode_val_ptr);
104 
105 	if (bcode_val == 0xFFFFFFFF) {
106 		/* No FCP Priority config data in flash */
107 		ql_dbg(ql_dbg_user, vha, 0x7051,
108 		    "No FCP Priority config data.\n");
109 		return 0;
110 	}
111 
112 	if (memcmp(bcode, "HQOS", 4)) {
113 		/* Invalid FCP priority data header*/
114 		ql_dbg(ql_dbg_user, vha, 0x7052,
115 		    "Invalid FCP Priority data header. bcode=0x%x.\n",
116 		    bcode_val);
117 		return 0;
118 	}
119 	if (flag != 1)
120 		return ret;
121 
122 	pri_entry = &pri_cfg->entry[0];
123 	for (i = 0; i < pri_cfg->num_entries; i++) {
124 		if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
125 			num_valid++;
126 		pri_entry++;
127 	}
128 
129 	if (num_valid == 0) {
130 		/* No valid FCP priority data entries */
131 		ql_dbg(ql_dbg_user, vha, 0x7053,
132 		    "No valid FCP Priority data entries.\n");
133 		ret = 0;
134 	} else {
135 		/* FCP priority data is valid */
136 		ql_dbg(ql_dbg_user, vha, 0x7054,
137 		    "Valid FCP priority data. num entries = %d.\n",
138 		    num_valid);
139 	}
140 
141 	return ret;
142 }
143 
144 static int
qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job * bsg_job)145 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
146 {
147 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
148 	struct fc_bsg_request *bsg_request = bsg_job->request;
149 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
150 	scsi_qla_host_t *vha = shost_priv(host);
151 	struct qla_hw_data *ha = vha->hw;
152 	int ret = 0;
153 	uint32_t len;
154 	uint32_t oper;
155 
156 	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
157 		ret = -EINVAL;
158 		goto exit_fcp_prio_cfg;
159 	}
160 
161 	/* Get the sub command */
162 	oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
163 
164 	/* Only set config is allowed if config memory is not allocated */
165 	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
166 		ret = -EINVAL;
167 		goto exit_fcp_prio_cfg;
168 	}
169 	switch (oper) {
170 	case QLFC_FCP_PRIO_DISABLE:
171 		if (ha->flags.fcp_prio_enabled) {
172 			ha->flags.fcp_prio_enabled = 0;
173 			ha->fcp_prio_cfg->attributes &=
174 				~FCP_PRIO_ATTR_ENABLE;
175 			qla24xx_update_all_fcp_prio(vha);
176 			bsg_reply->result = DID_OK;
177 		} else {
178 			ret = -EINVAL;
179 			bsg_reply->result = (DID_ERROR << 16);
180 			goto exit_fcp_prio_cfg;
181 		}
182 		break;
183 
184 	case QLFC_FCP_PRIO_ENABLE:
185 		if (!ha->flags.fcp_prio_enabled) {
186 			if (ha->fcp_prio_cfg) {
187 				ha->flags.fcp_prio_enabled = 1;
188 				ha->fcp_prio_cfg->attributes |=
189 				    FCP_PRIO_ATTR_ENABLE;
190 				qla24xx_update_all_fcp_prio(vha);
191 				bsg_reply->result = DID_OK;
192 			} else {
193 				ret = -EINVAL;
194 				bsg_reply->result = (DID_ERROR << 16);
195 				goto exit_fcp_prio_cfg;
196 			}
197 		}
198 		break;
199 
200 	case QLFC_FCP_PRIO_GET_CONFIG:
201 		len = bsg_job->reply_payload.payload_len;
202 		if (!len || len > FCP_PRIO_CFG_SIZE) {
203 			ret = -EINVAL;
204 			bsg_reply->result = (DID_ERROR << 16);
205 			goto exit_fcp_prio_cfg;
206 		}
207 
208 		bsg_reply->result = DID_OK;
209 		bsg_reply->reply_payload_rcv_len =
210 			sg_copy_from_buffer(
211 			bsg_job->reply_payload.sg_list,
212 			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
213 			len);
214 
215 		break;
216 
217 	case QLFC_FCP_PRIO_SET_CONFIG:
218 		len = bsg_job->request_payload.payload_len;
219 		if (!len || len > FCP_PRIO_CFG_SIZE) {
220 			bsg_reply->result = (DID_ERROR << 16);
221 			ret = -EINVAL;
222 			goto exit_fcp_prio_cfg;
223 		}
224 
225 		if (!ha->fcp_prio_cfg) {
226 			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
227 			if (!ha->fcp_prio_cfg) {
228 				ql_log(ql_log_warn, vha, 0x7050,
229 				    "Unable to allocate memory for fcp prio "
230 				    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
231 				bsg_reply->result = (DID_ERROR << 16);
232 				ret = -ENOMEM;
233 				goto exit_fcp_prio_cfg;
234 			}
235 		}
236 
237 		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
238 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
239 		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
240 			FCP_PRIO_CFG_SIZE);
241 
242 		/* validate fcp priority data */
243 
244 		if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
245 			bsg_reply->result = (DID_ERROR << 16);
246 			ret = -EINVAL;
247 			/* If buffer was invalidatic int
248 			 * fcp_prio_cfg is of no use
249 			 */
250 			vfree(ha->fcp_prio_cfg);
251 			ha->fcp_prio_cfg = NULL;
252 			goto exit_fcp_prio_cfg;
253 		}
254 
255 		ha->flags.fcp_prio_enabled = 0;
256 		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
257 			ha->flags.fcp_prio_enabled = 1;
258 		qla24xx_update_all_fcp_prio(vha);
259 		bsg_reply->result = DID_OK;
260 		break;
261 	default:
262 		ret = -EINVAL;
263 		break;
264 	}
265 exit_fcp_prio_cfg:
266 	if (!ret)
267 		bsg_job_done(bsg_job, bsg_reply->result,
268 			       bsg_reply->reply_payload_rcv_len);
269 	return ret;
270 }
271 
272 static int
qla2x00_process_els(struct bsg_job * bsg_job)273 qla2x00_process_els(struct bsg_job *bsg_job)
274 {
275 	struct fc_bsg_request *bsg_request = bsg_job->request;
276 	struct fc_rport *rport;
277 	fc_port_t *fcport = NULL;
278 	struct Scsi_Host *host;
279 	scsi_qla_host_t *vha;
280 	struct qla_hw_data *ha;
281 	srb_t *sp;
282 	const char *type;
283 	int req_sg_cnt, rsp_sg_cnt;
284 	int rval =  (DID_ERROR << 16);
285 	uint32_t els_cmd = 0;
286 	int qla_port_allocated = 0;
287 
288 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
289 		rport = fc_bsg_to_rport(bsg_job);
290 		if (!rport) {
291 			rval = -ENOMEM;
292 			goto done;
293 		}
294 		fcport = *(fc_port_t **) rport->dd_data;
295 		host = rport_to_shost(rport);
296 		vha = shost_priv(host);
297 		ha = vha->hw;
298 		type = "FC_BSG_RPT_ELS";
299 	} else {
300 		host = fc_bsg_to_shost(bsg_job);
301 		vha = shost_priv(host);
302 		ha = vha->hw;
303 		type = "FC_BSG_HST_ELS_NOLOGIN";
304 		els_cmd = bsg_request->rqst_data.h_els.command_code;
305 		if (els_cmd == ELS_AUTH_ELS)
306 			return qla_edif_process_els(vha, bsg_job);
307 	}
308 
309 	if (!vha->flags.online) {
310 		ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
311 		rval = -EIO;
312 		goto done;
313 	}
314 
315 	/* pass through is supported only for ISP 4Gb or higher */
316 	if (!IS_FWI2_CAPABLE(ha)) {
317 		ql_dbg(ql_dbg_user, vha, 0x7001,
318 		    "ELS passthru not supported for ISP23xx based adapters.\n");
319 		rval = -EPERM;
320 		goto done;
321 	}
322 
323 	/*  Multiple SG's are not supported for ELS requests */
324 	if (bsg_job->request_payload.sg_cnt > 1 ||
325 		bsg_job->reply_payload.sg_cnt > 1) {
326 		ql_dbg(ql_dbg_user, vha, 0x7002,
327 		    "Multiple SG's are not supported for ELS requests, "
328 		    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
329 		    bsg_job->request_payload.sg_cnt,
330 		    bsg_job->reply_payload.sg_cnt);
331 		rval = -ENOBUFS;
332 		goto done;
333 	}
334 
335 	/* ELS request for rport */
336 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
337 		/* make sure the rport is logged in,
338 		 * if not perform fabric login
339 		 */
340 		if (atomic_read(&fcport->state) != FCS_ONLINE) {
341 			ql_dbg(ql_dbg_user, vha, 0x7003,
342 			    "Port %06X is not online for ELS passthru.\n",
343 			    fcport->d_id.b24);
344 			rval = -EIO;
345 			goto done;
346 		}
347 	} else {
348 		/* Allocate a dummy fcport structure, since functions
349 		 * preparing the IOCB and mailbox command retrieves port
350 		 * specific information from fcport structure. For Host based
351 		 * ELS commands there will be no fcport structure allocated
352 		 */
353 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
354 		if (!fcport) {
355 			rval = -ENOMEM;
356 			goto done;
357 		}
358 
359 		qla_port_allocated = 1;
360 		/* Initialize all required  fields of fcport */
361 		fcport->vha = vha;
362 		fcport->d_id.b.al_pa =
363 			bsg_request->rqst_data.h_els.port_id[0];
364 		fcport->d_id.b.area =
365 			bsg_request->rqst_data.h_els.port_id[1];
366 		fcport->d_id.b.domain =
367 			bsg_request->rqst_data.h_els.port_id[2];
368 		fcport->loop_id =
369 			(fcport->d_id.b.al_pa == 0xFD) ?
370 			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
371 	}
372 
373 	req_sg_cnt =
374 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
375 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
376 	if (!req_sg_cnt) {
377 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
378 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
379 		rval = -ENOMEM;
380 		goto done_free_fcport;
381 	}
382 
383 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
384 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
385         if (!rsp_sg_cnt) {
386 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
387 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
388 		rval = -ENOMEM;
389 		goto done_free_fcport;
390 	}
391 
392 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
393 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
394 		ql_log(ql_log_warn, vha, 0x7008,
395 		    "dma mapping resulted in different sg counts, "
396 		    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
397 		    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
398 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
399 		rval = -EAGAIN;
400 		goto done_unmap_sg;
401 	}
402 
403 	/* Alloc SRB structure */
404 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
405 	if (!sp) {
406 		rval = -ENOMEM;
407 		goto done_unmap_sg;
408 	}
409 
410 	sp->type =
411 		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
412 		 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
413 	sp->name =
414 		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
415 		 "bsg_els_rpt" : "bsg_els_hst");
416 	sp->u.bsg_job = bsg_job;
417 	sp->free = qla2x00_bsg_sp_free;
418 	sp->done = qla2x00_bsg_job_done;
419 
420 	ql_dbg(ql_dbg_user, vha, 0x700a,
421 	    "bsg rqst type: %s els type: %x - loop-id=%x "
422 	    "portid=%-2x%02x%02x.\n", type,
423 	    bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
424 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
425 
426 	rval = qla2x00_start_sp(sp);
427 	if (rval != QLA_SUCCESS) {
428 		ql_log(ql_log_warn, vha, 0x700e,
429 		    "qla2x00_start_sp failed = %d\n", rval);
430 		qla2x00_rel_sp(sp);
431 		rval = -EIO;
432 		goto done_unmap_sg;
433 	}
434 	return rval;
435 
436 done_unmap_sg:
437 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
438 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
439 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
440 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
441 	goto done_free_fcport;
442 
443 done_free_fcport:
444 	if (qla_port_allocated)
445 		qla2x00_free_fcport(fcport);
446 done:
447 	return rval;
448 }
449 
450 static inline uint16_t
qla24xx_calc_ct_iocbs(uint16_t dsds)451 qla24xx_calc_ct_iocbs(uint16_t dsds)
452 {
453 	uint16_t iocbs;
454 
455 	iocbs = 1;
456 	if (dsds > 2) {
457 		iocbs += (dsds - 2) / 5;
458 		if ((dsds - 2) % 5)
459 			iocbs++;
460 	}
461 	return iocbs;
462 }
463 
464 static int
qla2x00_process_ct(struct bsg_job * bsg_job)465 qla2x00_process_ct(struct bsg_job *bsg_job)
466 {
467 	srb_t *sp;
468 	struct fc_bsg_request *bsg_request = bsg_job->request;
469 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
470 	scsi_qla_host_t *vha = shost_priv(host);
471 	struct qla_hw_data *ha = vha->hw;
472 	int rval = (DID_ERROR << 16);
473 	int req_sg_cnt, rsp_sg_cnt;
474 	uint16_t loop_id;
475 	struct fc_port *fcport;
476 	char  *type = "FC_BSG_HST_CT";
477 
478 	req_sg_cnt =
479 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
480 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
481 	if (!req_sg_cnt) {
482 		ql_log(ql_log_warn, vha, 0x700f,
483 		    "dma_map_sg return %d for request\n", req_sg_cnt);
484 		rval = -ENOMEM;
485 		goto done;
486 	}
487 
488 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
489 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
490 	if (!rsp_sg_cnt) {
491 		ql_log(ql_log_warn, vha, 0x7010,
492 		    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
493 		rval = -ENOMEM;
494 		goto done;
495 	}
496 
497 	if (!vha->flags.online) {
498 		ql_log(ql_log_warn, vha, 0x7012,
499 		    "Host is not online.\n");
500 		rval = -EIO;
501 		goto done_unmap_sg;
502 	}
503 
504 	loop_id =
505 		(bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
506 			>> 24;
507 	switch (loop_id) {
508 	case 0xFC:
509 		loop_id = NPH_SNS;
510 		break;
511 	case 0xFA:
512 		loop_id = vha->mgmt_svr_loop_id;
513 		break;
514 	default:
515 		ql_dbg(ql_dbg_user, vha, 0x7013,
516 		    "Unknown loop id: %x.\n", loop_id);
517 		rval = -EINVAL;
518 		goto done_unmap_sg;
519 	}
520 
521 	/* Allocate a dummy fcport structure, since functions preparing the
522 	 * IOCB and mailbox command retrieves port specific information
523 	 * from fcport structure. For Host based ELS commands there will be
524 	 * no fcport structure allocated
525 	 */
526 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
527 	if (!fcport) {
528 		ql_log(ql_log_warn, vha, 0x7014,
529 		    "Failed to allocate fcport.\n");
530 		rval = -ENOMEM;
531 		goto done_unmap_sg;
532 	}
533 
534 	/* Initialize all required  fields of fcport */
535 	fcport->vha = vha;
536 	fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
537 	fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
538 	fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
539 	fcport->loop_id = loop_id;
540 
541 	/* Alloc SRB structure */
542 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
543 	if (!sp) {
544 		ql_log(ql_log_warn, vha, 0x7015,
545 		    "qla2x00_get_sp failed.\n");
546 		rval = -ENOMEM;
547 		goto done_free_fcport;
548 	}
549 
550 	sp->type = SRB_CT_CMD;
551 	sp->name = "bsg_ct";
552 	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
553 	sp->u.bsg_job = bsg_job;
554 	sp->free = qla2x00_bsg_sp_free;
555 	sp->done = qla2x00_bsg_job_done;
556 
557 	ql_dbg(ql_dbg_user, vha, 0x7016,
558 	    "bsg rqst type: %s else type: %x - "
559 	    "loop-id=%x portid=%02x%02x%02x.\n", type,
560 	    (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
561 	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
562 	    fcport->d_id.b.al_pa);
563 
564 	rval = qla2x00_start_sp(sp);
565 	if (rval != QLA_SUCCESS) {
566 		ql_log(ql_log_warn, vha, 0x7017,
567 		    "qla2x00_start_sp failed=%d.\n", rval);
568 		qla2x00_rel_sp(sp);
569 		rval = -EIO;
570 		goto done_free_fcport;
571 	}
572 	return rval;
573 
574 done_free_fcport:
575 	qla2x00_free_fcport(fcport);
576 done_unmap_sg:
577 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
578 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
579 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
580 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
581 done:
582 	return rval;
583 }
584 
585 /* Disable loopback mode */
586 static inline int
qla81xx_reset_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,int wait,int wait2)587 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
588 			    int wait, int wait2)
589 {
590 	int ret = 0;
591 	int rval = 0;
592 	uint16_t new_config[4];
593 	struct qla_hw_data *ha = vha->hw;
594 
595 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
596 		goto done_reset_internal;
597 
598 	memset(new_config, 0 , sizeof(new_config));
599 	if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
600 	    ENABLE_INTERNAL_LOOPBACK ||
601 	    (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
602 	    ENABLE_EXTERNAL_LOOPBACK) {
603 		new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
604 		ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
605 		    (new_config[0] & INTERNAL_LOOPBACK_MASK));
606 		memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
607 
608 		ha->notify_dcbx_comp = wait;
609 		ha->notify_lb_portup_comp = wait2;
610 
611 		ret = qla81xx_set_port_config(vha, new_config);
612 		if (ret != QLA_SUCCESS) {
613 			ql_log(ql_log_warn, vha, 0x7025,
614 			    "Set port config failed.\n");
615 			ha->notify_dcbx_comp = 0;
616 			ha->notify_lb_portup_comp = 0;
617 			rval = -EINVAL;
618 			goto done_reset_internal;
619 		}
620 
621 		/* Wait for DCBX complete event */
622 		if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
623 			(DCBX_COMP_TIMEOUT * HZ))) {
624 			ql_dbg(ql_dbg_user, vha, 0x7026,
625 			    "DCBX completion not received.\n");
626 			ha->notify_dcbx_comp = 0;
627 			ha->notify_lb_portup_comp = 0;
628 			rval = -EINVAL;
629 			goto done_reset_internal;
630 		} else
631 			ql_dbg(ql_dbg_user, vha, 0x7027,
632 			    "DCBX completion received.\n");
633 
634 		if (wait2 &&
635 		    !wait_for_completion_timeout(&ha->lb_portup_comp,
636 		    (LB_PORTUP_COMP_TIMEOUT * HZ))) {
637 			ql_dbg(ql_dbg_user, vha, 0x70c5,
638 			    "Port up completion not received.\n");
639 			ha->notify_lb_portup_comp = 0;
640 			rval = -EINVAL;
641 			goto done_reset_internal;
642 		} else
643 			ql_dbg(ql_dbg_user, vha, 0x70c6,
644 			    "Port up completion received.\n");
645 
646 		ha->notify_dcbx_comp = 0;
647 		ha->notify_lb_portup_comp = 0;
648 	}
649 done_reset_internal:
650 	return rval;
651 }
652 
653 /*
654  * Set the port configuration to enable the internal or external loopback
655  * depending on the loopback mode.
656  */
657 static inline int
qla81xx_set_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,uint16_t * new_config,uint16_t mode)658 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
659 	uint16_t *new_config, uint16_t mode)
660 {
661 	int ret = 0;
662 	int rval = 0;
663 	unsigned long rem_tmo = 0, current_tmo = 0;
664 	struct qla_hw_data *ha = vha->hw;
665 
666 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
667 		goto done_set_internal;
668 
669 	if (mode == INTERNAL_LOOPBACK)
670 		new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
671 	else if (mode == EXTERNAL_LOOPBACK)
672 		new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
673 	ql_dbg(ql_dbg_user, vha, 0x70be,
674 	     "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
675 
676 	memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
677 
678 	ha->notify_dcbx_comp = 1;
679 	ret = qla81xx_set_port_config(vha, new_config);
680 	if (ret != QLA_SUCCESS) {
681 		ql_log(ql_log_warn, vha, 0x7021,
682 		    "set port config failed.\n");
683 		ha->notify_dcbx_comp = 0;
684 		rval = -EINVAL;
685 		goto done_set_internal;
686 	}
687 
688 	/* Wait for DCBX complete event */
689 	current_tmo = DCBX_COMP_TIMEOUT * HZ;
690 	while (1) {
691 		rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
692 		    current_tmo);
693 		if (!ha->idc_extend_tmo || rem_tmo) {
694 			ha->idc_extend_tmo = 0;
695 			break;
696 		}
697 		current_tmo = ha->idc_extend_tmo * HZ;
698 		ha->idc_extend_tmo = 0;
699 	}
700 
701 	if (!rem_tmo) {
702 		ql_dbg(ql_dbg_user, vha, 0x7022,
703 		    "DCBX completion not received.\n");
704 		ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
705 		/*
706 		 * If the reset of the loopback mode doesn't work take a FCoE
707 		 * dump and reset the chip.
708 		 */
709 		if (ret) {
710 			qla2xxx_dump_fw(vha);
711 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
712 		}
713 		rval = -EINVAL;
714 	} else {
715 		if (ha->flags.idc_compl_status) {
716 			ql_dbg(ql_dbg_user, vha, 0x70c3,
717 			    "Bad status in IDC Completion AEN\n");
718 			rval = -EINVAL;
719 			ha->flags.idc_compl_status = 0;
720 		} else
721 			ql_dbg(ql_dbg_user, vha, 0x7023,
722 			    "DCBX completion received.\n");
723 	}
724 
725 	ha->notify_dcbx_comp = 0;
726 	ha->idc_extend_tmo = 0;
727 
728 done_set_internal:
729 	return rval;
730 }
731 
732 static int
qla2x00_process_loopback(struct bsg_job * bsg_job)733 qla2x00_process_loopback(struct bsg_job *bsg_job)
734 {
735 	struct fc_bsg_request *bsg_request = bsg_job->request;
736 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
737 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
738 	scsi_qla_host_t *vha = shost_priv(host);
739 	struct qla_hw_data *ha = vha->hw;
740 	int rval;
741 	uint8_t command_sent;
742 	char *type;
743 	struct msg_echo_lb elreq;
744 	uint16_t response[MAILBOX_REGISTER_COUNT];
745 	uint16_t config[4], new_config[4];
746 	uint8_t *fw_sts_ptr;
747 	void *req_data = NULL;
748 	dma_addr_t req_data_dma;
749 	uint32_t req_data_len;
750 	uint8_t *rsp_data = NULL;
751 	dma_addr_t rsp_data_dma;
752 	uint32_t rsp_data_len;
753 
754 	if (!vha->flags.online) {
755 		ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
756 		return -EIO;
757 	}
758 
759 	memset(&elreq, 0, sizeof(elreq));
760 
761 	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
762 		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
763 		DMA_TO_DEVICE);
764 
765 	if (!elreq.req_sg_cnt) {
766 		ql_log(ql_log_warn, vha, 0x701a,
767 		    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
768 		return -ENOMEM;
769 	}
770 
771 	elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
772 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
773 		DMA_FROM_DEVICE);
774 
775 	if (!elreq.rsp_sg_cnt) {
776 		ql_log(ql_log_warn, vha, 0x701b,
777 		    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
778 		rval = -ENOMEM;
779 		goto done_unmap_req_sg;
780 	}
781 
782 	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
783 		(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
784 		ql_log(ql_log_warn, vha, 0x701c,
785 		    "dma mapping resulted in different sg counts, "
786 		    "request_sg_cnt: %x dma_request_sg_cnt: %x "
787 		    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
788 		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
789 		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
790 		rval = -EAGAIN;
791 		goto done_unmap_sg;
792 	}
793 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
794 	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
795 		&req_data_dma, GFP_KERNEL);
796 	if (!req_data) {
797 		ql_log(ql_log_warn, vha, 0x701d,
798 		    "dma alloc failed for req_data.\n");
799 		rval = -ENOMEM;
800 		goto done_unmap_sg;
801 	}
802 
803 	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
804 		&rsp_data_dma, GFP_KERNEL);
805 	if (!rsp_data) {
806 		ql_log(ql_log_warn, vha, 0x7004,
807 		    "dma alloc failed for rsp_data.\n");
808 		rval = -ENOMEM;
809 		goto done_free_dma_req;
810 	}
811 
812 	/* Copy the request buffer in req_data now */
813 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
814 		bsg_job->request_payload.sg_cnt, req_data, req_data_len);
815 
816 	elreq.send_dma = req_data_dma;
817 	elreq.rcv_dma = rsp_data_dma;
818 	elreq.transfer_size = req_data_len;
819 
820 	elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
821 	elreq.iteration_count =
822 	    bsg_request->rqst_data.h_vendor.vendor_cmd[2];
823 
824 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
825 	    ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
826 	    ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
827 	    get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
828 	    req_data_len == MAX_ELS_FRAME_PAYLOAD &&
829 	    elreq.options == EXTERNAL_LOOPBACK))) {
830 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
831 		ql_dbg(ql_dbg_user, vha, 0x701e,
832 		    "BSG request type: %s.\n", type);
833 		command_sent = INT_DEF_LB_ECHO_CMD;
834 		rval = qla2x00_echo_test(vha, &elreq, response);
835 	} else {
836 		if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
837 			memset(config, 0, sizeof(config));
838 			memset(new_config, 0, sizeof(new_config));
839 
840 			if (qla81xx_get_port_config(vha, config)) {
841 				ql_log(ql_log_warn, vha, 0x701f,
842 				    "Get port config failed.\n");
843 				rval = -EPERM;
844 				goto done_free_dma_rsp;
845 			}
846 
847 			if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
848 				ql_dbg(ql_dbg_user, vha, 0x70c4,
849 				    "Loopback operation already in "
850 				    "progress.\n");
851 				rval = -EAGAIN;
852 				goto done_free_dma_rsp;
853 			}
854 
855 			ql_dbg(ql_dbg_user, vha, 0x70c0,
856 			    "elreq.options=%04x\n", elreq.options);
857 
858 			if (elreq.options == EXTERNAL_LOOPBACK)
859 				if (IS_QLA8031(ha) || IS_QLA8044(ha))
860 					rval = qla81xx_set_loopback_mode(vha,
861 					    config, new_config, elreq.options);
862 				else
863 					rval = qla81xx_reset_loopback_mode(vha,
864 					    config, 1, 0);
865 			else
866 				rval = qla81xx_set_loopback_mode(vha, config,
867 				    new_config, elreq.options);
868 
869 			if (rval) {
870 				rval = -EPERM;
871 				goto done_free_dma_rsp;
872 			}
873 
874 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
875 			ql_dbg(ql_dbg_user, vha, 0x7028,
876 			    "BSG request type: %s.\n", type);
877 
878 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
879 			rval = qla2x00_loopback_test(vha, &elreq, response);
880 
881 			if (response[0] == MBS_COMMAND_ERROR &&
882 					response[1] == MBS_LB_RESET) {
883 				ql_log(ql_log_warn, vha, 0x7029,
884 				    "MBX command error, Aborting ISP.\n");
885 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
886 				qla2xxx_wake_dpc(vha);
887 				qla2x00_wait_for_chip_reset(vha);
888 				/* Also reset the MPI */
889 				if (IS_QLA81XX(ha)) {
890 					if (qla81xx_restart_mpi_firmware(vha) !=
891 					    QLA_SUCCESS) {
892 						ql_log(ql_log_warn, vha, 0x702a,
893 						    "MPI reset failed.\n");
894 					}
895 				}
896 
897 				rval = -EIO;
898 				goto done_free_dma_rsp;
899 			}
900 
901 			if (new_config[0]) {
902 				int ret;
903 
904 				/* Revert back to original port config
905 				 * Also clear internal loopback
906 				 */
907 				ret = qla81xx_reset_loopback_mode(vha,
908 				    new_config, 0, 1);
909 				if (ret) {
910 					/*
911 					 * If the reset of the loopback mode
912 					 * doesn't work take FCoE dump and then
913 					 * reset the chip.
914 					 */
915 					qla2xxx_dump_fw(vha);
916 					set_bit(ISP_ABORT_NEEDED,
917 					    &vha->dpc_flags);
918 				}
919 
920 			}
921 
922 		} else {
923 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
924 			ql_dbg(ql_dbg_user, vha, 0x702b,
925 			    "BSG request type: %s.\n", type);
926 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
927 			rval = qla2x00_loopback_test(vha, &elreq, response);
928 		}
929 	}
930 
931 	if (rval) {
932 		ql_log(ql_log_warn, vha, 0x702c,
933 		    "Vendor request %s failed.\n", type);
934 
935 		rval = 0;
936 		bsg_reply->result = (DID_ERROR << 16);
937 		bsg_reply->reply_payload_rcv_len = 0;
938 	} else {
939 		ql_dbg(ql_dbg_user, vha, 0x702d,
940 		    "Vendor request %s completed.\n", type);
941 		bsg_reply->result = (DID_OK << 16);
942 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
943 			bsg_job->reply_payload.sg_cnt, rsp_data,
944 			rsp_data_len);
945 	}
946 
947 	bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
948 	    sizeof(response) + sizeof(uint8_t);
949 	fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
950 	memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
951 			sizeof(response));
952 	fw_sts_ptr += sizeof(response);
953 	*fw_sts_ptr = command_sent;
954 
955 done_free_dma_rsp:
956 	dma_free_coherent(&ha->pdev->dev, rsp_data_len,
957 		rsp_data, rsp_data_dma);
958 done_free_dma_req:
959 	dma_free_coherent(&ha->pdev->dev, req_data_len,
960 		req_data, req_data_dma);
961 done_unmap_sg:
962 	dma_unmap_sg(&ha->pdev->dev,
963 	    bsg_job->reply_payload.sg_list,
964 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
965 done_unmap_req_sg:
966 	dma_unmap_sg(&ha->pdev->dev,
967 	    bsg_job->request_payload.sg_list,
968 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
969 	if (!rval)
970 		bsg_job_done(bsg_job, bsg_reply->result,
971 			       bsg_reply->reply_payload_rcv_len);
972 	return rval;
973 }
974 
975 static int
qla84xx_reset(struct bsg_job * bsg_job)976 qla84xx_reset(struct bsg_job *bsg_job)
977 {
978 	struct fc_bsg_request *bsg_request = bsg_job->request;
979 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
980 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
981 	scsi_qla_host_t *vha = shost_priv(host);
982 	struct qla_hw_data *ha = vha->hw;
983 	int rval = 0;
984 	uint32_t flag;
985 
986 	if (!IS_QLA84XX(ha)) {
987 		ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
988 		return -EINVAL;
989 	}
990 
991 	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
992 
993 	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
994 
995 	if (rval) {
996 		ql_log(ql_log_warn, vha, 0x7030,
997 		    "Vendor request 84xx reset failed.\n");
998 		rval = (DID_ERROR << 16);
999 
1000 	} else {
1001 		ql_dbg(ql_dbg_user, vha, 0x7031,
1002 		    "Vendor request 84xx reset completed.\n");
1003 		bsg_reply->result = DID_OK;
1004 		bsg_job_done(bsg_job, bsg_reply->result,
1005 			       bsg_reply->reply_payload_rcv_len);
1006 	}
1007 
1008 	return rval;
1009 }
1010 
1011 static int
qla84xx_updatefw(struct bsg_job * bsg_job)1012 qla84xx_updatefw(struct bsg_job *bsg_job)
1013 {
1014 	struct fc_bsg_request *bsg_request = bsg_job->request;
1015 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1016 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1017 	scsi_qla_host_t *vha = shost_priv(host);
1018 	struct qla_hw_data *ha = vha->hw;
1019 	struct verify_chip_entry_84xx *mn = NULL;
1020 	dma_addr_t mn_dma, fw_dma;
1021 	void *fw_buf = NULL;
1022 	int rval = 0;
1023 	uint32_t sg_cnt;
1024 	uint32_t data_len;
1025 	uint16_t options;
1026 	uint32_t flag;
1027 	uint32_t fw_ver;
1028 
1029 	if (!IS_QLA84XX(ha)) {
1030 		ql_dbg(ql_dbg_user, vha, 0x7032,
1031 		    "Not 84xx, exiting.\n");
1032 		return -EINVAL;
1033 	}
1034 
1035 	sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1036 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1037 	if (!sg_cnt) {
1038 		ql_log(ql_log_warn, vha, 0x7033,
1039 		    "dma_map_sg returned %d for request.\n", sg_cnt);
1040 		return -ENOMEM;
1041 	}
1042 
1043 	if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1044 		ql_log(ql_log_warn, vha, 0x7034,
1045 		    "DMA mapping resulted in different sg counts, "
1046 		    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1047 		    bsg_job->request_payload.sg_cnt, sg_cnt);
1048 		rval = -EAGAIN;
1049 		goto done_unmap_sg;
1050 	}
1051 
1052 	data_len = bsg_job->request_payload.payload_len;
1053 	fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1054 		&fw_dma, GFP_KERNEL);
1055 	if (!fw_buf) {
1056 		ql_log(ql_log_warn, vha, 0x7035,
1057 		    "DMA alloc failed for fw_buf.\n");
1058 		rval = -ENOMEM;
1059 		goto done_unmap_sg;
1060 	}
1061 
1062 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1063 		bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1064 
1065 	mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1066 	if (!mn) {
1067 		ql_log(ql_log_warn, vha, 0x7036,
1068 		    "DMA alloc failed for fw buffer.\n");
1069 		rval = -ENOMEM;
1070 		goto done_free_fw_buf;
1071 	}
1072 
1073 	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1074 	fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1075 
1076 	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1077 	mn->entry_count = 1;
1078 
1079 	options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1080 	if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1081 		options |= VCO_DIAG_FW;
1082 
1083 	mn->options = cpu_to_le16(options);
1084 	mn->fw_ver =  cpu_to_le32(fw_ver);
1085 	mn->fw_size =  cpu_to_le32(data_len);
1086 	mn->fw_seq_size =  cpu_to_le32(data_len);
1087 	put_unaligned_le64(fw_dma, &mn->dsd.address);
1088 	mn->dsd.length = cpu_to_le32(data_len);
1089 	mn->data_seg_cnt = cpu_to_le16(1);
1090 
1091 	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1092 
1093 	if (rval) {
1094 		ql_log(ql_log_warn, vha, 0x7037,
1095 		    "Vendor request 84xx updatefw failed.\n");
1096 
1097 		rval = (DID_ERROR << 16);
1098 	} else {
1099 		ql_dbg(ql_dbg_user, vha, 0x7038,
1100 		    "Vendor request 84xx updatefw completed.\n");
1101 
1102 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1103 		bsg_reply->result = DID_OK;
1104 	}
1105 
1106 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1107 
1108 done_free_fw_buf:
1109 	dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1110 
1111 done_unmap_sg:
1112 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1113 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1114 
1115 	if (!rval)
1116 		bsg_job_done(bsg_job, bsg_reply->result,
1117 			       bsg_reply->reply_payload_rcv_len);
1118 	return rval;
1119 }
1120 
1121 static int
qla84xx_mgmt_cmd(struct bsg_job * bsg_job)1122 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1123 {
1124 	struct fc_bsg_request *bsg_request = bsg_job->request;
1125 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1126 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1127 	scsi_qla_host_t *vha = shost_priv(host);
1128 	struct qla_hw_data *ha = vha->hw;
1129 	struct access_chip_84xx *mn = NULL;
1130 	dma_addr_t mn_dma, mgmt_dma;
1131 	void *mgmt_b = NULL;
1132 	int rval = 0;
1133 	struct qla_bsg_a84_mgmt *ql84_mgmt;
1134 	uint32_t sg_cnt;
1135 	uint32_t data_len = 0;
1136 	uint32_t dma_direction = DMA_NONE;
1137 
1138 	if (!IS_QLA84XX(ha)) {
1139 		ql_log(ql_log_warn, vha, 0x703a,
1140 		    "Not 84xx, exiting.\n");
1141 		return -EINVAL;
1142 	}
1143 
1144 	mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1145 	if (!mn) {
1146 		ql_log(ql_log_warn, vha, 0x703c,
1147 		    "DMA alloc failed for fw buffer.\n");
1148 		return -ENOMEM;
1149 	}
1150 
1151 	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1152 	mn->entry_count = 1;
1153 	ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1154 	switch (ql84_mgmt->mgmt.cmd) {
1155 	case QLA84_MGMT_READ_MEM:
1156 	case QLA84_MGMT_GET_INFO:
1157 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1158 			bsg_job->reply_payload.sg_list,
1159 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1160 		if (!sg_cnt) {
1161 			ql_log(ql_log_warn, vha, 0x703d,
1162 			    "dma_map_sg returned %d for reply.\n", sg_cnt);
1163 			rval = -ENOMEM;
1164 			goto exit_mgmt;
1165 		}
1166 
1167 		dma_direction = DMA_FROM_DEVICE;
1168 
1169 		if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1170 			ql_log(ql_log_warn, vha, 0x703e,
1171 			    "DMA mapping resulted in different sg counts, "
1172 			    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1173 			    bsg_job->reply_payload.sg_cnt, sg_cnt);
1174 			rval = -EAGAIN;
1175 			goto done_unmap_sg;
1176 		}
1177 
1178 		data_len = bsg_job->reply_payload.payload_len;
1179 
1180 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1181 		    &mgmt_dma, GFP_KERNEL);
1182 		if (!mgmt_b) {
1183 			ql_log(ql_log_warn, vha, 0x703f,
1184 			    "DMA alloc failed for mgmt_b.\n");
1185 			rval = -ENOMEM;
1186 			goto done_unmap_sg;
1187 		}
1188 
1189 		if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1190 			mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1191 			mn->parameter1 =
1192 				cpu_to_le32(
1193 				ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1194 
1195 		} else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1196 			mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1197 			mn->parameter1 =
1198 				cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1199 
1200 			mn->parameter2 =
1201 				cpu_to_le32(
1202 				ql84_mgmt->mgmt.mgmtp.u.info.context);
1203 		}
1204 		break;
1205 
1206 	case QLA84_MGMT_WRITE_MEM:
1207 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1208 			bsg_job->request_payload.sg_list,
1209 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1210 
1211 		if (!sg_cnt) {
1212 			ql_log(ql_log_warn, vha, 0x7040,
1213 			    "dma_map_sg returned %d.\n", sg_cnt);
1214 			rval = -ENOMEM;
1215 			goto exit_mgmt;
1216 		}
1217 
1218 		dma_direction = DMA_TO_DEVICE;
1219 
1220 		if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1221 			ql_log(ql_log_warn, vha, 0x7041,
1222 			    "DMA mapping resulted in different sg counts, "
1223 			    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1224 			    bsg_job->request_payload.sg_cnt, sg_cnt);
1225 			rval = -EAGAIN;
1226 			goto done_unmap_sg;
1227 		}
1228 
1229 		data_len = bsg_job->request_payload.payload_len;
1230 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1231 			&mgmt_dma, GFP_KERNEL);
1232 		if (!mgmt_b) {
1233 			ql_log(ql_log_warn, vha, 0x7042,
1234 			    "DMA alloc failed for mgmt_b.\n");
1235 			rval = -ENOMEM;
1236 			goto done_unmap_sg;
1237 		}
1238 
1239 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1240 			bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1241 
1242 		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1243 		mn->parameter1 =
1244 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1245 		break;
1246 
1247 	case QLA84_MGMT_CHNG_CONFIG:
1248 		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1249 		mn->parameter1 =
1250 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1251 
1252 		mn->parameter2 =
1253 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1254 
1255 		mn->parameter3 =
1256 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1257 		break;
1258 
1259 	default:
1260 		rval = -EIO;
1261 		goto exit_mgmt;
1262 	}
1263 
1264 	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1265 		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1266 		mn->dseg_count = cpu_to_le16(1);
1267 		put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1268 		mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1269 	}
1270 
1271 	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1272 
1273 	if (rval) {
1274 		ql_log(ql_log_warn, vha, 0x7043,
1275 		    "Vendor request 84xx mgmt failed.\n");
1276 
1277 		rval = (DID_ERROR << 16);
1278 
1279 	} else {
1280 		ql_dbg(ql_dbg_user, vha, 0x7044,
1281 		    "Vendor request 84xx mgmt completed.\n");
1282 
1283 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1284 		bsg_reply->result = DID_OK;
1285 
1286 		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1287 			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1288 			bsg_reply->reply_payload_rcv_len =
1289 				bsg_job->reply_payload.payload_len;
1290 
1291 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1292 				bsg_job->reply_payload.sg_cnt, mgmt_b,
1293 				data_len);
1294 		}
1295 	}
1296 
1297 done_unmap_sg:
1298 	if (mgmt_b)
1299 		dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1300 
1301 	if (dma_direction == DMA_TO_DEVICE)
1302 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1303 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1304 	else if (dma_direction == DMA_FROM_DEVICE)
1305 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1306 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1307 
1308 exit_mgmt:
1309 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1310 
1311 	if (!rval)
1312 		bsg_job_done(bsg_job, bsg_reply->result,
1313 			       bsg_reply->reply_payload_rcv_len);
1314 	return rval;
1315 }
1316 
1317 static int
qla24xx_iidma(struct bsg_job * bsg_job)1318 qla24xx_iidma(struct bsg_job *bsg_job)
1319 {
1320 	struct fc_bsg_request *bsg_request = bsg_job->request;
1321 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1322 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1323 	scsi_qla_host_t *vha = shost_priv(host);
1324 	int rval = 0;
1325 	struct qla_port_param *port_param = NULL;
1326 	fc_port_t *fcport = NULL;
1327 	int found = 0;
1328 	uint16_t mb[MAILBOX_REGISTER_COUNT];
1329 	uint8_t *rsp_ptr = NULL;
1330 
1331 	if (!IS_IIDMA_CAPABLE(vha->hw)) {
1332 		ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1333 		return -EINVAL;
1334 	}
1335 
1336 	port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1337 	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1338 		ql_log(ql_log_warn, vha, 0x7048,
1339 		    "Invalid destination type.\n");
1340 		return -EINVAL;
1341 	}
1342 
1343 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1344 		if (fcport->port_type != FCT_TARGET)
1345 			continue;
1346 
1347 		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1348 			fcport->port_name, sizeof(fcport->port_name)))
1349 			continue;
1350 
1351 		found = 1;
1352 		break;
1353 	}
1354 
1355 	if (!found) {
1356 		ql_log(ql_log_warn, vha, 0x7049,
1357 		    "Failed to find port.\n");
1358 		return -EINVAL;
1359 	}
1360 
1361 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
1362 		ql_log(ql_log_warn, vha, 0x704a,
1363 		    "Port is not online.\n");
1364 		return -EINVAL;
1365 	}
1366 
1367 	if (fcport->flags & FCF_LOGIN_NEEDED) {
1368 		ql_log(ql_log_warn, vha, 0x704b,
1369 		    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1370 		return -EINVAL;
1371 	}
1372 
1373 	if (port_param->mode)
1374 		rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1375 			port_param->speed, mb);
1376 	else
1377 		rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1378 			&port_param->speed, mb);
1379 
1380 	if (rval) {
1381 		ql_log(ql_log_warn, vha, 0x704c,
1382 		    "iiDMA cmd failed for %8phN -- "
1383 		    "%04x %x %04x %04x.\n", fcport->port_name,
1384 		    rval, fcport->fp_speed, mb[0], mb[1]);
1385 		rval = (DID_ERROR << 16);
1386 	} else {
1387 		if (!port_param->mode) {
1388 			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1389 				sizeof(struct qla_port_param);
1390 
1391 			rsp_ptr = ((uint8_t *)bsg_reply) +
1392 				sizeof(struct fc_bsg_reply);
1393 
1394 			memcpy(rsp_ptr, port_param,
1395 				sizeof(struct qla_port_param));
1396 		}
1397 
1398 		bsg_reply->result = DID_OK;
1399 		bsg_job_done(bsg_job, bsg_reply->result,
1400 			       bsg_reply->reply_payload_rcv_len);
1401 	}
1402 
1403 	return rval;
1404 }
1405 
1406 static int
qla2x00_optrom_setup(struct bsg_job * bsg_job,scsi_qla_host_t * vha,uint8_t is_update)1407 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1408 	uint8_t is_update)
1409 {
1410 	struct fc_bsg_request *bsg_request = bsg_job->request;
1411 	uint32_t start = 0;
1412 	int valid = 0;
1413 	struct qla_hw_data *ha = vha->hw;
1414 
1415 	if (unlikely(pci_channel_offline(ha->pdev)))
1416 		return -EINVAL;
1417 
1418 	start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1419 	if (start > ha->optrom_size) {
1420 		ql_log(ql_log_warn, vha, 0x7055,
1421 		    "start %d > optrom_size %d.\n", start, ha->optrom_size);
1422 		return -EINVAL;
1423 	}
1424 
1425 	if (ha->optrom_state != QLA_SWAITING) {
1426 		ql_log(ql_log_info, vha, 0x7056,
1427 		    "optrom_state %d.\n", ha->optrom_state);
1428 		return -EBUSY;
1429 	}
1430 
1431 	ha->optrom_region_start = start;
1432 	ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1433 	if (is_update) {
1434 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1435 			valid = 1;
1436 		else if (start == (ha->flt_region_boot * 4) ||
1437 		    start == (ha->flt_region_fw * 4))
1438 			valid = 1;
1439 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1440 		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1441 		    IS_QLA28XX(ha))
1442 			valid = 1;
1443 		if (!valid) {
1444 			ql_log(ql_log_warn, vha, 0x7058,
1445 			    "Invalid start region 0x%x/0x%x.\n", start,
1446 			    bsg_job->request_payload.payload_len);
1447 			return -EINVAL;
1448 		}
1449 
1450 		ha->optrom_region_size = start +
1451 		    bsg_job->request_payload.payload_len > ha->optrom_size ?
1452 		    ha->optrom_size - start :
1453 		    bsg_job->request_payload.payload_len;
1454 		ha->optrom_state = QLA_SWRITING;
1455 	} else {
1456 		ha->optrom_region_size = start +
1457 		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
1458 		    ha->optrom_size - start :
1459 		    bsg_job->reply_payload.payload_len;
1460 		ha->optrom_state = QLA_SREADING;
1461 	}
1462 
1463 	ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1464 	if (!ha->optrom_buffer) {
1465 		ql_log(ql_log_warn, vha, 0x7059,
1466 		    "Read: Unable to allocate memory for optrom retrieval "
1467 		    "(%x)\n", ha->optrom_region_size);
1468 
1469 		ha->optrom_state = QLA_SWAITING;
1470 		return -ENOMEM;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int
qla2x00_read_optrom(struct bsg_job * bsg_job)1477 qla2x00_read_optrom(struct bsg_job *bsg_job)
1478 {
1479 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1480 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1481 	scsi_qla_host_t *vha = shost_priv(host);
1482 	struct qla_hw_data *ha = vha->hw;
1483 	int rval = 0;
1484 
1485 	if (ha->flags.nic_core_reset_hdlr_active)
1486 		return -EBUSY;
1487 
1488 	mutex_lock(&ha->optrom_mutex);
1489 	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1490 	if (rval) {
1491 		mutex_unlock(&ha->optrom_mutex);
1492 		return rval;
1493 	}
1494 
1495 	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1496 	    ha->optrom_region_start, ha->optrom_region_size);
1497 
1498 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1499 	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1500 	    ha->optrom_region_size);
1501 
1502 	bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1503 	bsg_reply->result = DID_OK;
1504 	vfree(ha->optrom_buffer);
1505 	ha->optrom_buffer = NULL;
1506 	ha->optrom_state = QLA_SWAITING;
1507 	mutex_unlock(&ha->optrom_mutex);
1508 	bsg_job_done(bsg_job, bsg_reply->result,
1509 		       bsg_reply->reply_payload_rcv_len);
1510 	return rval;
1511 }
1512 
1513 static int
qla2x00_update_optrom(struct bsg_job * bsg_job)1514 qla2x00_update_optrom(struct bsg_job *bsg_job)
1515 {
1516 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1517 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1518 	scsi_qla_host_t *vha = shost_priv(host);
1519 	struct qla_hw_data *ha = vha->hw;
1520 	int rval = 0;
1521 
1522 	mutex_lock(&ha->optrom_mutex);
1523 	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1524 	if (rval) {
1525 		mutex_unlock(&ha->optrom_mutex);
1526 		return rval;
1527 	}
1528 
1529 	/* Set the isp82xx_no_md_cap not to capture minidump */
1530 	ha->flags.isp82xx_no_md_cap = 1;
1531 
1532 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1533 	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1534 	    ha->optrom_region_size);
1535 
1536 	rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1537 	    ha->optrom_region_start, ha->optrom_region_size);
1538 
1539 	if (rval) {
1540 		bsg_reply->result = -EINVAL;
1541 		rval = -EINVAL;
1542 	} else {
1543 		bsg_reply->result = DID_OK;
1544 	}
1545 	vfree(ha->optrom_buffer);
1546 	ha->optrom_buffer = NULL;
1547 	ha->optrom_state = QLA_SWAITING;
1548 	mutex_unlock(&ha->optrom_mutex);
1549 	bsg_job_done(bsg_job, bsg_reply->result,
1550 		       bsg_reply->reply_payload_rcv_len);
1551 	return rval;
1552 }
1553 
1554 static int
qla2x00_update_fru_versions(struct bsg_job * bsg_job)1555 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1556 {
1557 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1558 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1559 	scsi_qla_host_t *vha = shost_priv(host);
1560 	struct qla_hw_data *ha = vha->hw;
1561 	int rval = 0;
1562 	uint8_t bsg[DMA_POOL_SIZE];
1563 	struct qla_image_version_list *list = (void *)bsg;
1564 	struct qla_image_version *image;
1565 	uint32_t count;
1566 	dma_addr_t sfp_dma;
1567 	void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1568 
1569 	if (!sfp) {
1570 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1571 		    EXT_STATUS_NO_MEMORY;
1572 		goto done;
1573 	}
1574 
1575 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1576 	    bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1577 
1578 	image = list->version;
1579 	count = list->count;
1580 	while (count--) {
1581 		memcpy(sfp, &image->field_info, sizeof(image->field_info));
1582 		rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1583 		    image->field_address.device, image->field_address.offset,
1584 		    sizeof(image->field_info), image->field_address.option);
1585 		if (rval) {
1586 			bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1587 			    EXT_STATUS_MAILBOX;
1588 			goto dealloc;
1589 		}
1590 		image++;
1591 	}
1592 
1593 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1594 
1595 dealloc:
1596 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1597 
1598 done:
1599 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1600 	bsg_reply->result = DID_OK << 16;
1601 	bsg_job_done(bsg_job, bsg_reply->result,
1602 		       bsg_reply->reply_payload_rcv_len);
1603 
1604 	return 0;
1605 }
1606 
1607 static int
qla2x00_read_fru_status(struct bsg_job * bsg_job)1608 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1609 {
1610 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1611 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1612 	scsi_qla_host_t *vha = shost_priv(host);
1613 	struct qla_hw_data *ha = vha->hw;
1614 	int rval = 0;
1615 	uint8_t bsg[DMA_POOL_SIZE];
1616 	struct qla_status_reg *sr = (void *)bsg;
1617 	dma_addr_t sfp_dma;
1618 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1619 
1620 	if (!sfp) {
1621 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1622 		    EXT_STATUS_NO_MEMORY;
1623 		goto done;
1624 	}
1625 
1626 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1627 	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1628 
1629 	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1630 	    sr->field_address.device, sr->field_address.offset,
1631 	    sizeof(sr->status_reg), sr->field_address.option);
1632 	sr->status_reg = *sfp;
1633 
1634 	if (rval) {
1635 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1636 		    EXT_STATUS_MAILBOX;
1637 		goto dealloc;
1638 	}
1639 
1640 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1641 	    bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1642 
1643 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1644 
1645 dealloc:
1646 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1647 
1648 done:
1649 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1650 	bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1651 	bsg_reply->result = DID_OK << 16;
1652 	bsg_job_done(bsg_job, bsg_reply->result,
1653 		       bsg_reply->reply_payload_rcv_len);
1654 
1655 	return 0;
1656 }
1657 
1658 static int
qla2x00_write_fru_status(struct bsg_job * bsg_job)1659 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1660 {
1661 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1662 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1663 	scsi_qla_host_t *vha = shost_priv(host);
1664 	struct qla_hw_data *ha = vha->hw;
1665 	int rval = 0;
1666 	uint8_t bsg[DMA_POOL_SIZE];
1667 	struct qla_status_reg *sr = (void *)bsg;
1668 	dma_addr_t sfp_dma;
1669 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1670 
1671 	if (!sfp) {
1672 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1673 		    EXT_STATUS_NO_MEMORY;
1674 		goto done;
1675 	}
1676 
1677 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1678 	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1679 
1680 	*sfp = sr->status_reg;
1681 	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1682 	    sr->field_address.device, sr->field_address.offset,
1683 	    sizeof(sr->status_reg), sr->field_address.option);
1684 
1685 	if (rval) {
1686 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1687 		    EXT_STATUS_MAILBOX;
1688 		goto dealloc;
1689 	}
1690 
1691 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1692 
1693 dealloc:
1694 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1695 
1696 done:
1697 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1698 	bsg_reply->result = DID_OK << 16;
1699 	bsg_job_done(bsg_job, bsg_reply->result,
1700 		       bsg_reply->reply_payload_rcv_len);
1701 
1702 	return 0;
1703 }
1704 
1705 static int
qla2x00_write_i2c(struct bsg_job * bsg_job)1706 qla2x00_write_i2c(struct bsg_job *bsg_job)
1707 {
1708 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1709 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1710 	scsi_qla_host_t *vha = shost_priv(host);
1711 	struct qla_hw_data *ha = vha->hw;
1712 	int rval = 0;
1713 	uint8_t bsg[DMA_POOL_SIZE];
1714 	struct qla_i2c_access *i2c = (void *)bsg;
1715 	dma_addr_t sfp_dma;
1716 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1717 
1718 	if (!sfp) {
1719 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1720 		    EXT_STATUS_NO_MEMORY;
1721 		goto done;
1722 	}
1723 
1724 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1725 	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1726 
1727 	memcpy(sfp, i2c->buffer, i2c->length);
1728 	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1729 	    i2c->device, i2c->offset, i2c->length, i2c->option);
1730 
1731 	if (rval) {
1732 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1733 		    EXT_STATUS_MAILBOX;
1734 		goto dealloc;
1735 	}
1736 
1737 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1738 
1739 dealloc:
1740 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1741 
1742 done:
1743 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1744 	bsg_reply->result = DID_OK << 16;
1745 	bsg_job_done(bsg_job, bsg_reply->result,
1746 		       bsg_reply->reply_payload_rcv_len);
1747 
1748 	return 0;
1749 }
1750 
1751 static int
qla2x00_read_i2c(struct bsg_job * bsg_job)1752 qla2x00_read_i2c(struct bsg_job *bsg_job)
1753 {
1754 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1755 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1756 	scsi_qla_host_t *vha = shost_priv(host);
1757 	struct qla_hw_data *ha = vha->hw;
1758 	int rval = 0;
1759 	uint8_t bsg[DMA_POOL_SIZE];
1760 	struct qla_i2c_access *i2c = (void *)bsg;
1761 	dma_addr_t sfp_dma;
1762 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1763 
1764 	if (!sfp) {
1765 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1766 		    EXT_STATUS_NO_MEMORY;
1767 		goto done;
1768 	}
1769 
1770 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1771 	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1772 
1773 	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1774 		i2c->device, i2c->offset, i2c->length, i2c->option);
1775 
1776 	if (rval) {
1777 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1778 		    EXT_STATUS_MAILBOX;
1779 		goto dealloc;
1780 	}
1781 
1782 	memcpy(i2c->buffer, sfp, i2c->length);
1783 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1784 	    bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1785 
1786 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1787 
1788 dealloc:
1789 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1790 
1791 done:
1792 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1793 	bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1794 	bsg_reply->result = DID_OK << 16;
1795 	bsg_job_done(bsg_job, bsg_reply->result,
1796 		       bsg_reply->reply_payload_rcv_len);
1797 
1798 	return 0;
1799 }
1800 
1801 static int
qla24xx_process_bidir_cmd(struct bsg_job * bsg_job)1802 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1803 {
1804 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1805 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1806 	scsi_qla_host_t *vha = shost_priv(host);
1807 	struct qla_hw_data *ha = vha->hw;
1808 	uint32_t rval = EXT_STATUS_OK;
1809 	uint16_t req_sg_cnt = 0;
1810 	uint16_t rsp_sg_cnt = 0;
1811 	uint16_t nextlid = 0;
1812 	uint32_t tot_dsds;
1813 	srb_t *sp = NULL;
1814 	uint32_t req_data_len;
1815 	uint32_t rsp_data_len;
1816 
1817 	/* Check the type of the adapter */
1818 	if (!IS_BIDI_CAPABLE(ha)) {
1819 		ql_log(ql_log_warn, vha, 0x70a0,
1820 			"This adapter is not supported\n");
1821 		rval = EXT_STATUS_NOT_SUPPORTED;
1822 		goto done;
1823 	}
1824 
1825 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1826 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1827 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1828 		rval =  EXT_STATUS_BUSY;
1829 		goto done;
1830 	}
1831 
1832 	/* Check if host is online */
1833 	if (!vha->flags.online) {
1834 		ql_log(ql_log_warn, vha, 0x70a1,
1835 			"Host is not online\n");
1836 		rval = EXT_STATUS_DEVICE_OFFLINE;
1837 		goto done;
1838 	}
1839 
1840 	/* Check if cable is plugged in or not */
1841 	if (vha->device_flags & DFLG_NO_CABLE) {
1842 		ql_log(ql_log_warn, vha, 0x70a2,
1843 			"Cable is unplugged...\n");
1844 		rval = EXT_STATUS_INVALID_CFG;
1845 		goto done;
1846 	}
1847 
1848 	/* Check if the switch is connected or not */
1849 	if (ha->current_topology != ISP_CFG_F) {
1850 		ql_log(ql_log_warn, vha, 0x70a3,
1851 			"Host is not connected to the switch\n");
1852 		rval = EXT_STATUS_INVALID_CFG;
1853 		goto done;
1854 	}
1855 
1856 	/* Check if operating mode is P2P */
1857 	if (ha->operating_mode != P2P) {
1858 		ql_log(ql_log_warn, vha, 0x70a4,
1859 		    "Host operating mode is not P2p\n");
1860 		rval = EXT_STATUS_INVALID_CFG;
1861 		goto done;
1862 	}
1863 
1864 	mutex_lock(&ha->selflogin_lock);
1865 	if (vha->self_login_loop_id == 0) {
1866 		/* Initialize all required  fields of fcport */
1867 		vha->bidir_fcport.vha = vha;
1868 		vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1869 		vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1870 		vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1871 		vha->bidir_fcport.loop_id = vha->loop_id;
1872 
1873 		if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1874 			ql_log(ql_log_warn, vha, 0x70a7,
1875 			    "Failed to login port %06X for bidirectional IOCB\n",
1876 			    vha->bidir_fcport.d_id.b24);
1877 			mutex_unlock(&ha->selflogin_lock);
1878 			rval = EXT_STATUS_MAILBOX;
1879 			goto done;
1880 		}
1881 		vha->self_login_loop_id = nextlid - 1;
1882 
1883 	}
1884 	/* Assign the self login loop id to fcport */
1885 	mutex_unlock(&ha->selflogin_lock);
1886 
1887 	vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1888 
1889 	req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1890 		bsg_job->request_payload.sg_list,
1891 		bsg_job->request_payload.sg_cnt,
1892 		DMA_TO_DEVICE);
1893 
1894 	if (!req_sg_cnt) {
1895 		rval = EXT_STATUS_NO_MEMORY;
1896 		goto done;
1897 	}
1898 
1899 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1900 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1901 		DMA_FROM_DEVICE);
1902 
1903 	if (!rsp_sg_cnt) {
1904 		rval = EXT_STATUS_NO_MEMORY;
1905 		goto done_unmap_req_sg;
1906 	}
1907 
1908 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1909 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1910 		ql_dbg(ql_dbg_user, vha, 0x70a9,
1911 		    "Dma mapping resulted in different sg counts "
1912 		    "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1913 		    "%x dma_reply_sg_cnt: %x]\n",
1914 		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
1915 		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1916 		rval = EXT_STATUS_NO_MEMORY;
1917 		goto done_unmap_sg;
1918 	}
1919 
1920 	req_data_len = bsg_job->request_payload.payload_len;
1921 	rsp_data_len = bsg_job->reply_payload.payload_len;
1922 
1923 	if (req_data_len != rsp_data_len) {
1924 		rval = EXT_STATUS_BUSY;
1925 		ql_log(ql_log_warn, vha, 0x70aa,
1926 		    "req_data_len != rsp_data_len\n");
1927 		goto done_unmap_sg;
1928 	}
1929 
1930 	/* Alloc SRB structure */
1931 	sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1932 	if (!sp) {
1933 		ql_dbg(ql_dbg_user, vha, 0x70ac,
1934 		    "Alloc SRB structure failed\n");
1935 		rval = EXT_STATUS_NO_MEMORY;
1936 		goto done_unmap_sg;
1937 	}
1938 
1939 	/*Populate srb->ctx with bidir ctx*/
1940 	sp->u.bsg_job = bsg_job;
1941 	sp->free = qla2x00_bsg_sp_free;
1942 	sp->type = SRB_BIDI_CMD;
1943 	sp->done = qla2x00_bsg_job_done;
1944 
1945 	/* Add the read and write sg count */
1946 	tot_dsds = rsp_sg_cnt + req_sg_cnt;
1947 
1948 	rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1949 	if (rval != EXT_STATUS_OK)
1950 		goto done_free_srb;
1951 	/* the bsg request  will be completed in the interrupt handler */
1952 	return rval;
1953 
1954 done_free_srb:
1955 	mempool_free(sp, ha->srb_mempool);
1956 done_unmap_sg:
1957 	dma_unmap_sg(&ha->pdev->dev,
1958 	    bsg_job->reply_payload.sg_list,
1959 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1960 done_unmap_req_sg:
1961 	dma_unmap_sg(&ha->pdev->dev,
1962 	    bsg_job->request_payload.sg_list,
1963 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1964 done:
1965 
1966 	/* Return an error vendor specific response
1967 	 * and complete the bsg request
1968 	 */
1969 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1970 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1971 	bsg_reply->reply_payload_rcv_len = 0;
1972 	bsg_reply->result = (DID_OK) << 16;
1973 	bsg_job_done(bsg_job, bsg_reply->result,
1974 		       bsg_reply->reply_payload_rcv_len);
1975 	/* Always return success, vendor rsp carries correct status */
1976 	return 0;
1977 }
1978 
1979 static int
qlafx00_mgmt_cmd(struct bsg_job * bsg_job)1980 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1981 {
1982 	struct fc_bsg_request *bsg_request = bsg_job->request;
1983 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1984 	scsi_qla_host_t *vha = shost_priv(host);
1985 	struct qla_hw_data *ha = vha->hw;
1986 	int rval = (DID_ERROR << 16);
1987 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1988 	srb_t *sp;
1989 	int req_sg_cnt = 0, rsp_sg_cnt = 0;
1990 	struct fc_port *fcport;
1991 	char  *type = "FC_BSG_HST_FX_MGMT";
1992 
1993 	/* Copy the IOCB specific information */
1994 	piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1995 	    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1996 
1997 	/* Dump the vendor information */
1998 	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1999 	    piocb_rqst, sizeof(*piocb_rqst));
2000 
2001 	if (!vha->flags.online) {
2002 		ql_log(ql_log_warn, vha, 0x70d0,
2003 		    "Host is not online.\n");
2004 		rval = -EIO;
2005 		goto done;
2006 	}
2007 
2008 	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
2009 		req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2010 		    bsg_job->request_payload.sg_list,
2011 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2012 		if (!req_sg_cnt) {
2013 			ql_log(ql_log_warn, vha, 0x70c7,
2014 			    "dma_map_sg return %d for request\n", req_sg_cnt);
2015 			rval = -ENOMEM;
2016 			goto done;
2017 		}
2018 	}
2019 
2020 	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2021 		rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2022 		    bsg_job->reply_payload.sg_list,
2023 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2024 		if (!rsp_sg_cnt) {
2025 			ql_log(ql_log_warn, vha, 0x70c8,
2026 			    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2027 			rval = -ENOMEM;
2028 			goto done_unmap_req_sg;
2029 		}
2030 	}
2031 
2032 	ql_dbg(ql_dbg_user, vha, 0x70c9,
2033 	    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2034 	    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2035 	    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2036 
2037 	/* Allocate a dummy fcport structure, since functions preparing the
2038 	 * IOCB and mailbox command retrieves port specific information
2039 	 * from fcport structure. For Host based ELS commands there will be
2040 	 * no fcport structure allocated
2041 	 */
2042 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2043 	if (!fcport) {
2044 		ql_log(ql_log_warn, vha, 0x70ca,
2045 		    "Failed to allocate fcport.\n");
2046 		rval = -ENOMEM;
2047 		goto done_unmap_rsp_sg;
2048 	}
2049 
2050 	/* Alloc SRB structure */
2051 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2052 	if (!sp) {
2053 		ql_log(ql_log_warn, vha, 0x70cb,
2054 		    "qla2x00_get_sp failed.\n");
2055 		rval = -ENOMEM;
2056 		goto done_free_fcport;
2057 	}
2058 
2059 	/* Initialize all required  fields of fcport */
2060 	fcport->vha = vha;
2061 	fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2062 
2063 	sp->type = SRB_FXIOCB_BCMD;
2064 	sp->name = "bsg_fx_mgmt";
2065 	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2066 	sp->u.bsg_job = bsg_job;
2067 	sp->free = qla2x00_bsg_sp_free;
2068 	sp->done = qla2x00_bsg_job_done;
2069 
2070 	ql_dbg(ql_dbg_user, vha, 0x70cc,
2071 	    "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2072 	    type, piocb_rqst->func_type, fcport->loop_id);
2073 
2074 	rval = qla2x00_start_sp(sp);
2075 	if (rval != QLA_SUCCESS) {
2076 		ql_log(ql_log_warn, vha, 0x70cd,
2077 		    "qla2x00_start_sp failed=%d.\n", rval);
2078 		mempool_free(sp, ha->srb_mempool);
2079 		rval = -EIO;
2080 		goto done_free_fcport;
2081 	}
2082 	return rval;
2083 
2084 done_free_fcport:
2085 	qla2x00_free_fcport(fcport);
2086 
2087 done_unmap_rsp_sg:
2088 	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2089 		dma_unmap_sg(&ha->pdev->dev,
2090 		    bsg_job->reply_payload.sg_list,
2091 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2092 done_unmap_req_sg:
2093 	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2094 		dma_unmap_sg(&ha->pdev->dev,
2095 		    bsg_job->request_payload.sg_list,
2096 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2097 
2098 done:
2099 	return rval;
2100 }
2101 
2102 static int
qla26xx_serdes_op(struct bsg_job * bsg_job)2103 qla26xx_serdes_op(struct bsg_job *bsg_job)
2104 {
2105 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2106 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2107 	scsi_qla_host_t *vha = shost_priv(host);
2108 	int rval = 0;
2109 	struct qla_serdes_reg sr;
2110 
2111 	memset(&sr, 0, sizeof(sr));
2112 
2113 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2114 	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2115 
2116 	switch (sr.cmd) {
2117 	case INT_SC_SERDES_WRITE_REG:
2118 		rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2119 		bsg_reply->reply_payload_rcv_len = 0;
2120 		break;
2121 	case INT_SC_SERDES_READ_REG:
2122 		rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2123 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2124 		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2125 		bsg_reply->reply_payload_rcv_len = sizeof(sr);
2126 		break;
2127 	default:
2128 		ql_dbg(ql_dbg_user, vha, 0x708c,
2129 		    "Unknown serdes cmd %x.\n", sr.cmd);
2130 		rval = -EINVAL;
2131 		break;
2132 	}
2133 
2134 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2135 	    rval ? EXT_STATUS_MAILBOX : 0;
2136 
2137 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2138 	bsg_reply->result = DID_OK << 16;
2139 	bsg_job_done(bsg_job, bsg_reply->result,
2140 		       bsg_reply->reply_payload_rcv_len);
2141 	return 0;
2142 }
2143 
2144 static int
qla8044_serdes_op(struct bsg_job * bsg_job)2145 qla8044_serdes_op(struct bsg_job *bsg_job)
2146 {
2147 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2148 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2149 	scsi_qla_host_t *vha = shost_priv(host);
2150 	int rval = 0;
2151 	struct qla_serdes_reg_ex sr;
2152 
2153 	memset(&sr, 0, sizeof(sr));
2154 
2155 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2156 	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2157 
2158 	switch (sr.cmd) {
2159 	case INT_SC_SERDES_WRITE_REG:
2160 		rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2161 		bsg_reply->reply_payload_rcv_len = 0;
2162 		break;
2163 	case INT_SC_SERDES_READ_REG:
2164 		rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2165 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2166 		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2167 		bsg_reply->reply_payload_rcv_len = sizeof(sr);
2168 		break;
2169 	default:
2170 		ql_dbg(ql_dbg_user, vha, 0x7020,
2171 		    "Unknown serdes cmd %x.\n", sr.cmd);
2172 		rval = -EINVAL;
2173 		break;
2174 	}
2175 
2176 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2177 	    rval ? EXT_STATUS_MAILBOX : 0;
2178 
2179 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2180 	bsg_reply->result = DID_OK << 16;
2181 	bsg_job_done(bsg_job, bsg_reply->result,
2182 		       bsg_reply->reply_payload_rcv_len);
2183 	return 0;
2184 }
2185 
2186 static int
qla27xx_get_flash_upd_cap(struct bsg_job * bsg_job)2187 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2188 {
2189 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2190 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2191 	scsi_qla_host_t *vha = shost_priv(host);
2192 	struct qla_hw_data *ha = vha->hw;
2193 	struct qla_flash_update_caps cap;
2194 
2195 	if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2196 		return -EPERM;
2197 
2198 	memset(&cap, 0, sizeof(cap));
2199 	cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2200 			   (uint64_t)ha->fw_attributes_ext[0] << 32 |
2201 			   (uint64_t)ha->fw_attributes_h << 16 |
2202 			   (uint64_t)ha->fw_attributes;
2203 
2204 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2205 	    bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2206 	bsg_reply->reply_payload_rcv_len = sizeof(cap);
2207 
2208 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2209 	    EXT_STATUS_OK;
2210 
2211 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2212 	bsg_reply->result = DID_OK << 16;
2213 	bsg_job_done(bsg_job, bsg_reply->result,
2214 		       bsg_reply->reply_payload_rcv_len);
2215 	return 0;
2216 }
2217 
2218 static int
qla27xx_set_flash_upd_cap(struct bsg_job * bsg_job)2219 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2220 {
2221 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2222 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2223 	scsi_qla_host_t *vha = shost_priv(host);
2224 	struct qla_hw_data *ha = vha->hw;
2225 	uint64_t online_fw_attr = 0;
2226 	struct qla_flash_update_caps cap;
2227 
2228 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2229 		return -EPERM;
2230 
2231 	memset(&cap, 0, sizeof(cap));
2232 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2233 	    bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2234 
2235 	online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2236 			 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2237 			 (uint64_t)ha->fw_attributes_h << 16 |
2238 			 (uint64_t)ha->fw_attributes;
2239 
2240 	if (online_fw_attr != cap.capabilities) {
2241 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2242 		    EXT_STATUS_INVALID_PARAM;
2243 		return -EINVAL;
2244 	}
2245 
2246 	if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
2247 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2248 		    EXT_STATUS_INVALID_PARAM;
2249 		return -EINVAL;
2250 	}
2251 
2252 	bsg_reply->reply_payload_rcv_len = 0;
2253 
2254 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2255 	    EXT_STATUS_OK;
2256 
2257 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2258 	bsg_reply->result = DID_OK << 16;
2259 	bsg_job_done(bsg_job, bsg_reply->result,
2260 		       bsg_reply->reply_payload_rcv_len);
2261 	return 0;
2262 }
2263 
2264 static int
qla27xx_get_bbcr_data(struct bsg_job * bsg_job)2265 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2266 {
2267 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2268 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2269 	scsi_qla_host_t *vha = shost_priv(host);
2270 	struct qla_hw_data *ha = vha->hw;
2271 	struct qla_bbcr_data bbcr;
2272 	uint16_t loop_id, topo, sw_cap;
2273 	uint8_t domain, area, al_pa, state;
2274 	int rval;
2275 
2276 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2277 		return -EPERM;
2278 
2279 	memset(&bbcr, 0, sizeof(bbcr));
2280 
2281 	if (vha->flags.bbcr_enable)
2282 		bbcr.status = QLA_BBCR_STATUS_ENABLED;
2283 	else
2284 		bbcr.status = QLA_BBCR_STATUS_DISABLED;
2285 
2286 	if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2287 		rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2288 			&area, &domain, &topo, &sw_cap);
2289 		if (rval != QLA_SUCCESS) {
2290 			bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2291 			bbcr.state = QLA_BBCR_STATE_OFFLINE;
2292 			bbcr.mbx1 = loop_id;
2293 			goto done;
2294 		}
2295 
2296 		state = (vha->bbcr >> 12) & 0x1;
2297 
2298 		if (state) {
2299 			bbcr.state = QLA_BBCR_STATE_OFFLINE;
2300 			bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2301 		} else {
2302 			bbcr.state = QLA_BBCR_STATE_ONLINE;
2303 			bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2304 		}
2305 
2306 		bbcr.configured_bbscn = vha->bbcr & 0xf;
2307 	}
2308 
2309 done:
2310 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2311 		bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2312 	bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2313 
2314 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2315 
2316 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2317 	bsg_reply->result = DID_OK << 16;
2318 	bsg_job_done(bsg_job, bsg_reply->result,
2319 		       bsg_reply->reply_payload_rcv_len);
2320 	return 0;
2321 }
2322 
2323 static int
qla2x00_get_priv_stats(struct bsg_job * bsg_job)2324 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2325 {
2326 	struct fc_bsg_request *bsg_request = bsg_job->request;
2327 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2328 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2329 	scsi_qla_host_t *vha = shost_priv(host);
2330 	struct qla_hw_data *ha = vha->hw;
2331 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2332 	struct link_statistics *stats = NULL;
2333 	dma_addr_t stats_dma;
2334 	int rval;
2335 	uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2336 	uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2337 
2338 	if (test_bit(UNLOADING, &vha->dpc_flags))
2339 		return -ENODEV;
2340 
2341 	if (unlikely(pci_channel_offline(ha->pdev)))
2342 		return -ENODEV;
2343 
2344 	if (qla2x00_reset_active(vha))
2345 		return -EBUSY;
2346 
2347 	if (!IS_FWI2_CAPABLE(ha))
2348 		return -EPERM;
2349 
2350 	stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2351 				   GFP_KERNEL);
2352 	if (!stats) {
2353 		ql_log(ql_log_warn, vha, 0x70e2,
2354 		    "Failed to allocate memory for stats.\n");
2355 		return -ENOMEM;
2356 	}
2357 
2358 	rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2359 
2360 	if (rval == QLA_SUCCESS) {
2361 		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2362 			stats, sizeof(*stats));
2363 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2364 			bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2365 	}
2366 
2367 	bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2368 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2369 	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2370 
2371 	bsg_job->reply_len = sizeof(*bsg_reply);
2372 	bsg_reply->result = DID_OK << 16;
2373 	bsg_job_done(bsg_job, bsg_reply->result,
2374 		       bsg_reply->reply_payload_rcv_len);
2375 
2376 	dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2377 		stats, stats_dma);
2378 
2379 	return 0;
2380 }
2381 
2382 static int
qla2x00_do_dport_diagnostics(struct bsg_job * bsg_job)2383 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2384 {
2385 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2386 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2387 	scsi_qla_host_t *vha = shost_priv(host);
2388 	int rval;
2389 	struct qla_dport_diag *dd;
2390 
2391 	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2392 	    !IS_QLA28XX(vha->hw))
2393 		return -EPERM;
2394 
2395 	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2396 	if (!dd) {
2397 		ql_log(ql_log_warn, vha, 0x70db,
2398 		    "Failed to allocate memory for dport.\n");
2399 		return -ENOMEM;
2400 	}
2401 
2402 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2403 	    bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2404 
2405 	rval = qla26xx_dport_diagnostics(
2406 	    vha, dd->buf, sizeof(dd->buf), dd->options);
2407 	if (rval == QLA_SUCCESS) {
2408 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2409 		    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2410 	}
2411 
2412 	bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2413 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2414 	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2415 
2416 	bsg_job->reply_len = sizeof(*bsg_reply);
2417 	bsg_reply->result = DID_OK << 16;
2418 	bsg_job_done(bsg_job, bsg_reply->result,
2419 		       bsg_reply->reply_payload_rcv_len);
2420 
2421 	kfree(dd);
2422 
2423 	return 0;
2424 }
2425 
2426 static int
qla2x00_do_dport_diagnostics_v2(struct bsg_job * bsg_job)2427 qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
2428 {
2429 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2430 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2431 	scsi_qla_host_t *vha = shost_priv(host);
2432 	int rval;
2433 	struct qla_dport_diag_v2 *dd;
2434 	mbx_cmd_t mc;
2435 	mbx_cmd_t *mcp = &mc;
2436 	uint16_t options;
2437 
2438 	if (!IS_DPORT_CAPABLE(vha->hw))
2439 		return -EPERM;
2440 
2441 	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
2442 	if (!dd)
2443 		return -ENOMEM;
2444 
2445 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2446 			bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2447 
2448 	options  = dd->options;
2449 
2450 	/*  Check dport Test in progress */
2451 	if (options == QLA_GET_DPORT_RESULT_V2 &&
2452 	    vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
2453 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2454 					EXT_STATUS_DPORT_DIAG_IN_PROCESS;
2455 		goto dportcomplete;
2456 	}
2457 
2458 	/*  Check chip reset in progress and start/restart requests arrive */
2459 	if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
2460 	    (options == QLA_START_DPORT_TEST_V2 ||
2461 	     options == QLA_RESTART_DPORT_TEST_V2)) {
2462 		vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
2463 	}
2464 
2465 	/*  Check chip reset in progress and get result request arrive */
2466 	if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
2467 	    options == QLA_GET_DPORT_RESULT_V2) {
2468 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2469 					EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
2470 		goto dportcomplete;
2471 	}
2472 
2473 	rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
2474 
2475 	if (rval == QLA_SUCCESS) {
2476 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2477 					EXT_STATUS_OK;
2478 		if (options == QLA_START_DPORT_TEST_V2 ||
2479 		    options == QLA_RESTART_DPORT_TEST_V2) {
2480 			dd->mbx1 = mcp->mb[0];
2481 			dd->mbx2 = mcp->mb[1];
2482 			vha->dport_status |=  DPORT_DIAG_IN_PROGRESS;
2483 		} else if (options == QLA_GET_DPORT_RESULT_V2) {
2484 			dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
2485 			dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
2486 		}
2487 	} else {
2488 		dd->mbx1 = mcp->mb[0];
2489 		dd->mbx2 = mcp->mb[1];
2490 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2491 				EXT_STATUS_DPORT_DIAG_ERR;
2492 	}
2493 
2494 dportcomplete:
2495 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2496 			    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2497 
2498 	bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2499 	bsg_job->reply_len = sizeof(*bsg_reply);
2500 	bsg_reply->result = DID_OK << 16;
2501 	bsg_job_done(bsg_job, bsg_reply->result,
2502 		     bsg_reply->reply_payload_rcv_len);
2503 
2504 	kfree(dd);
2505 
2506 	return 0;
2507 }
2508 
2509 static int
qla2x00_get_flash_image_status(struct bsg_job * bsg_job)2510 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2511 {
2512 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2513 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2514 	struct qla_hw_data *ha = vha->hw;
2515 	struct qla_active_regions regions = { };
2516 	struct active_regions active_regions = { };
2517 
2518 	qla27xx_get_active_image(vha, &active_regions);
2519 	regions.global_image = active_regions.global;
2520 
2521 	if (IS_QLA27XX(ha))
2522 		regions.nvme_params = QLA27XX_PRIMARY_IMAGE;
2523 
2524 	if (IS_QLA28XX(ha)) {
2525 		qla28xx_get_aux_images(vha, &active_regions);
2526 		regions.board_config = active_regions.aux.board_config;
2527 		regions.vpd_nvram = active_regions.aux.vpd_nvram;
2528 		regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2529 		regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2530 		regions.nvme_params = active_regions.aux.nvme_params;
2531 	}
2532 
2533 	ql_dbg(ql_dbg_user, vha, 0x70e1,
2534 	    "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n",
2535 	    __func__, vha->host_no, regions.global_image,
2536 	    regions.board_config, regions.vpd_nvram,
2537 	    regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params);
2538 
2539 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2540 	    bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
2541 
2542 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2543 	bsg_reply->reply_payload_rcv_len = sizeof(regions);
2544 	bsg_reply->result = DID_OK << 16;
2545 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2546 	bsg_job_done(bsg_job, bsg_reply->result,
2547 	    bsg_reply->reply_payload_rcv_len);
2548 
2549 	return 0;
2550 }
2551 
2552 static int
qla2x00_manage_host_stats(struct bsg_job * bsg_job)2553 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2554 {
2555 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2556 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2557 	struct ql_vnd_mng_host_stats_param *req_data;
2558 	struct ql_vnd_mng_host_stats_resp rsp_data;
2559 	u32 req_data_len;
2560 	int ret = 0;
2561 
2562 	if (!vha->flags.online) {
2563 		ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2564 		return -EIO;
2565 	}
2566 
2567 	req_data_len = bsg_job->request_payload.payload_len;
2568 
2569 	if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2570 		ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2571 		return -EIO;
2572 	}
2573 
2574 	req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2575 	if (!req_data) {
2576 		ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2577 		return -ENOMEM;
2578 	}
2579 
2580 	/* Copy the request buffer in req_data */
2581 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2582 			  bsg_job->request_payload.sg_cnt, req_data,
2583 			  req_data_len);
2584 
2585 	switch (req_data->action) {
2586 	case QLA_STOP:
2587 		ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2588 		break;
2589 	case QLA_START:
2590 		ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2591 		break;
2592 	case QLA_CLEAR:
2593 		ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2594 		break;
2595 	default:
2596 		ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2597 		ret = -EIO;
2598 		break;
2599 	}
2600 
2601 	kfree(req_data);
2602 
2603 	/* Prepare response */
2604 	rsp_data.status = ret;
2605 	bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2606 
2607 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2608 	bsg_reply->reply_payload_rcv_len =
2609 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2610 				    bsg_job->reply_payload.sg_cnt,
2611 				    &rsp_data,
2612 				    sizeof(struct ql_vnd_mng_host_stats_resp));
2613 
2614 	bsg_reply->result = DID_OK;
2615 	bsg_job_done(bsg_job, bsg_reply->result,
2616 		     bsg_reply->reply_payload_rcv_len);
2617 
2618 	return ret;
2619 }
2620 
2621 static int
qla2x00_get_host_stats(struct bsg_job * bsg_job)2622 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2623 {
2624 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2625 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2626 	struct ql_vnd_stats_param *req_data;
2627 	struct ql_vnd_host_stats_resp rsp_data;
2628 	u32 req_data_len;
2629 	int ret = 0;
2630 	u64 ini_entry_count = 0;
2631 	u64 entry_count = 0;
2632 	u64 tgt_num = 0;
2633 	u64 tmp_stat_type = 0;
2634 	u64 response_len = 0;
2635 	void *data;
2636 
2637 	req_data_len = bsg_job->request_payload.payload_len;
2638 
2639 	if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2640 		ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2641 		return -EIO;
2642 	}
2643 
2644 	req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2645 	if (!req_data) {
2646 		ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2647 		return -ENOMEM;
2648 	}
2649 
2650 	/* Copy the request buffer in req_data */
2651 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2652 			  bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2653 
2654 	/* Copy stat type to work on it */
2655 	tmp_stat_type = req_data->stat_type;
2656 
2657 	if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2658 		/* Num of tgts connected to this host */
2659 		tgt_num = qla2x00_get_num_tgts(vha);
2660 		/* unset BIT_17 */
2661 		tmp_stat_type &= ~(1 << 17);
2662 	}
2663 
2664 	/* Total ini stats */
2665 	ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2666 
2667 	/* Total number of entries */
2668 	entry_count = ini_entry_count + tgt_num;
2669 
2670 	response_len = sizeof(struct ql_vnd_host_stats_resp) +
2671 		(sizeof(struct ql_vnd_stat_entry) * entry_count);
2672 
2673 	if (response_len > bsg_job->reply_payload.payload_len) {
2674 		rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2675 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2676 		bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2677 
2678 		bsg_reply->reply_payload_rcv_len =
2679 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2680 					    bsg_job->reply_payload.sg_cnt, &rsp_data,
2681 					    sizeof(struct ql_vnd_mng_host_stats_resp));
2682 
2683 		bsg_reply->result = DID_OK;
2684 		bsg_job_done(bsg_job, bsg_reply->result,
2685 			     bsg_reply->reply_payload_rcv_len);
2686 		goto host_stat_out;
2687 	}
2688 
2689 	data = kzalloc(response_len, GFP_KERNEL);
2690 	if (!data) {
2691 		ret = -ENOMEM;
2692 		goto host_stat_out;
2693 	}
2694 
2695 	ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2696 				    data, response_len);
2697 
2698 	rsp_data.status = EXT_STATUS_OK;
2699 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2700 
2701 	bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2702 							       bsg_job->reply_payload.sg_cnt,
2703 							       data, response_len);
2704 	bsg_reply->result = DID_OK;
2705 	bsg_job_done(bsg_job, bsg_reply->result,
2706 		     bsg_reply->reply_payload_rcv_len);
2707 
2708 	kfree(data);
2709 host_stat_out:
2710 	kfree(req_data);
2711 	return ret;
2712 }
2713 
2714 static struct fc_rport *
qla2xxx_find_rport(scsi_qla_host_t * vha,uint32_t tgt_num)2715 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2716 {
2717 	fc_port_t *fcport = NULL;
2718 
2719 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2720 		if (fcport->rport->number == tgt_num)
2721 			return fcport->rport;
2722 	}
2723 	return NULL;
2724 }
2725 
2726 static int
qla2x00_get_tgt_stats(struct bsg_job * bsg_job)2727 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2728 {
2729 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2730 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2731 	struct ql_vnd_tgt_stats_param *req_data;
2732 	u32 req_data_len;
2733 	int ret = 0;
2734 	u64 response_len = 0;
2735 	struct ql_vnd_tgt_stats_resp *data = NULL;
2736 	struct fc_rport *rport = NULL;
2737 
2738 	if (!vha->flags.online) {
2739 		ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2740 		return -EIO;
2741 	}
2742 
2743 	req_data_len = bsg_job->request_payload.payload_len;
2744 
2745 	if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2746 		ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2747 		return -EIO;
2748 	}
2749 
2750 	req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2751 	if (!req_data) {
2752 		ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2753 		return -ENOMEM;
2754 	}
2755 
2756 	/* Copy the request buffer in req_data */
2757 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2758 			  bsg_job->request_payload.sg_cnt,
2759 			  req_data, req_data_len);
2760 
2761 	response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2762 		sizeof(struct ql_vnd_stat_entry);
2763 
2764 	/* structure + size for one entry */
2765 	data = kzalloc(response_len, GFP_KERNEL);
2766 	if (!data) {
2767 		kfree(req_data);
2768 		return -ENOMEM;
2769 	}
2770 
2771 	if (response_len > bsg_job->reply_payload.payload_len) {
2772 		data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2773 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2774 		bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2775 
2776 		bsg_reply->reply_payload_rcv_len =
2777 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2778 					    bsg_job->reply_payload.sg_cnt, data,
2779 					    sizeof(struct ql_vnd_tgt_stats_resp));
2780 
2781 		bsg_reply->result = DID_OK;
2782 		bsg_job_done(bsg_job, bsg_reply->result,
2783 			     bsg_reply->reply_payload_rcv_len);
2784 		goto tgt_stat_out;
2785 	}
2786 
2787 	rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2788 	if (!rport) {
2789 		ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2790 		ret = EXT_STATUS_INVALID_PARAM;
2791 		data->status = EXT_STATUS_INVALID_PARAM;
2792 		goto reply;
2793 	}
2794 
2795 	ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2796 				    rport, (void *)data, response_len);
2797 
2798 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2799 reply:
2800 	bsg_reply->reply_payload_rcv_len =
2801 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2802 				    bsg_job->reply_payload.sg_cnt, data,
2803 				    response_len);
2804 	bsg_reply->result = DID_OK;
2805 	bsg_job_done(bsg_job, bsg_reply->result,
2806 		     bsg_reply->reply_payload_rcv_len);
2807 
2808 tgt_stat_out:
2809 	kfree(data);
2810 	kfree(req_data);
2811 
2812 	return ret;
2813 }
2814 
2815 static int
qla2x00_manage_host_port(struct bsg_job * bsg_job)2816 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2817 {
2818 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2819 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2820 	struct ql_vnd_mng_host_port_param *req_data;
2821 	struct ql_vnd_mng_host_port_resp rsp_data;
2822 	u32 req_data_len;
2823 	int ret = 0;
2824 
2825 	req_data_len = bsg_job->request_payload.payload_len;
2826 
2827 	if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2828 		ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2829 		return -EIO;
2830 	}
2831 
2832 	req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2833 	if (!req_data) {
2834 		ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2835 		return -ENOMEM;
2836 	}
2837 
2838 	/* Copy the request buffer in req_data */
2839 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2840 			  bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2841 
2842 	switch (req_data->action) {
2843 	case QLA_ENABLE:
2844 		ret = qla2xxx_enable_port(vha->host);
2845 		break;
2846 	case QLA_DISABLE:
2847 		ret = qla2xxx_disable_port(vha->host);
2848 		break;
2849 	default:
2850 		ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2851 		ret = -EIO;
2852 		break;
2853 	}
2854 
2855 	kfree(req_data);
2856 
2857 	/* Prepare response */
2858 	rsp_data.status = ret;
2859 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2860 	bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2861 
2862 	bsg_reply->reply_payload_rcv_len =
2863 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2864 				    bsg_job->reply_payload.sg_cnt, &rsp_data,
2865 				    sizeof(struct ql_vnd_mng_host_port_resp));
2866 	bsg_reply->result = DID_OK;
2867 	bsg_job_done(bsg_job, bsg_reply->result,
2868 		     bsg_reply->reply_payload_rcv_len);
2869 
2870 	return ret;
2871 }
2872 
2873 static int
qla2x00_process_vendor_specific(struct scsi_qla_host * vha,struct bsg_job * bsg_job)2874 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
2875 {
2876 	struct fc_bsg_request *bsg_request = bsg_job->request;
2877 
2878 	ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
2879 	    __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
2880 
2881 	switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2882 	case QL_VND_LOOPBACK:
2883 		return qla2x00_process_loopback(bsg_job);
2884 
2885 	case QL_VND_A84_RESET:
2886 		return qla84xx_reset(bsg_job);
2887 
2888 	case QL_VND_A84_UPDATE_FW:
2889 		return qla84xx_updatefw(bsg_job);
2890 
2891 	case QL_VND_A84_MGMT_CMD:
2892 		return qla84xx_mgmt_cmd(bsg_job);
2893 
2894 	case QL_VND_IIDMA:
2895 		return qla24xx_iidma(bsg_job);
2896 
2897 	case QL_VND_FCP_PRIO_CFG_CMD:
2898 		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2899 
2900 	case QL_VND_READ_FLASH:
2901 		return qla2x00_read_optrom(bsg_job);
2902 
2903 	case QL_VND_UPDATE_FLASH:
2904 		return qla2x00_update_optrom(bsg_job);
2905 
2906 	case QL_VND_SET_FRU_VERSION:
2907 		return qla2x00_update_fru_versions(bsg_job);
2908 
2909 	case QL_VND_READ_FRU_STATUS:
2910 		return qla2x00_read_fru_status(bsg_job);
2911 
2912 	case QL_VND_WRITE_FRU_STATUS:
2913 		return qla2x00_write_fru_status(bsg_job);
2914 
2915 	case QL_VND_WRITE_I2C:
2916 		return qla2x00_write_i2c(bsg_job);
2917 
2918 	case QL_VND_READ_I2C:
2919 		return qla2x00_read_i2c(bsg_job);
2920 
2921 	case QL_VND_DIAG_IO_CMD:
2922 		return qla24xx_process_bidir_cmd(bsg_job);
2923 
2924 	case QL_VND_FX00_MGMT_CMD:
2925 		return qlafx00_mgmt_cmd(bsg_job);
2926 
2927 	case QL_VND_SERDES_OP:
2928 		return qla26xx_serdes_op(bsg_job);
2929 
2930 	case QL_VND_SERDES_OP_EX:
2931 		return qla8044_serdes_op(bsg_job);
2932 
2933 	case QL_VND_GET_FLASH_UPDATE_CAPS:
2934 		return qla27xx_get_flash_upd_cap(bsg_job);
2935 
2936 	case QL_VND_SET_FLASH_UPDATE_CAPS:
2937 		return qla27xx_set_flash_upd_cap(bsg_job);
2938 
2939 	case QL_VND_GET_BBCR_DATA:
2940 		return qla27xx_get_bbcr_data(bsg_job);
2941 
2942 	case QL_VND_GET_PRIV_STATS:
2943 	case QL_VND_GET_PRIV_STATS_EX:
2944 		return qla2x00_get_priv_stats(bsg_job);
2945 
2946 	case QL_VND_DPORT_DIAGNOSTICS:
2947 		return qla2x00_do_dport_diagnostics(bsg_job);
2948 
2949 	case QL_VND_DPORT_DIAGNOSTICS_V2:
2950 		return qla2x00_do_dport_diagnostics_v2(bsg_job);
2951 
2952 	case QL_VND_EDIF_MGMT:
2953 		return qla_edif_app_mgmt(bsg_job);
2954 
2955 	case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2956 		return qla2x00_get_flash_image_status(bsg_job);
2957 
2958 	case QL_VND_MANAGE_HOST_STATS:
2959 		return qla2x00_manage_host_stats(bsg_job);
2960 
2961 	case QL_VND_GET_HOST_STATS:
2962 		return qla2x00_get_host_stats(bsg_job);
2963 
2964 	case QL_VND_GET_TGT_STATS:
2965 		return qla2x00_get_tgt_stats(bsg_job);
2966 
2967 	case QL_VND_MANAGE_HOST_PORT:
2968 		return qla2x00_manage_host_port(bsg_job);
2969 
2970 	case QL_VND_MBX_PASSTHRU:
2971 		return qla2x00_mailbox_passthru(bsg_job);
2972 
2973 	default:
2974 		return -ENOSYS;
2975 	}
2976 }
2977 
2978 int
qla24xx_bsg_request(struct bsg_job * bsg_job)2979 qla24xx_bsg_request(struct bsg_job *bsg_job)
2980 {
2981 	struct fc_bsg_request *bsg_request = bsg_job->request;
2982 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2983 	int ret = -EINVAL;
2984 	struct fc_rport *rport;
2985 	struct Scsi_Host *host;
2986 	scsi_qla_host_t *vha;
2987 
2988 	/* In case no data transferred. */
2989 	bsg_reply->reply_payload_rcv_len = 0;
2990 
2991 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2992 		rport = fc_bsg_to_rport(bsg_job);
2993 		if (!rport)
2994 			return ret;
2995 		host = rport_to_shost(rport);
2996 		vha = shost_priv(host);
2997 	} else {
2998 		host = fc_bsg_to_shost(bsg_job);
2999 		vha = shost_priv(host);
3000 	}
3001 
3002 	/* Disable port will bring down the chip, allow enable command */
3003 	if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
3004 	    bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
3005 		goto skip_chip_chk;
3006 
3007 	if (vha->hw->flags.port_isolated) {
3008 		bsg_reply->result = DID_ERROR;
3009 		/* operation not permitted */
3010 		return -EPERM;
3011 	}
3012 
3013 	if (qla2x00_chip_is_down(vha)) {
3014 		ql_dbg(ql_dbg_user, vha, 0x709f,
3015 		    "BSG: ISP abort active/needed -- cmd=%d.\n",
3016 		    bsg_request->msgcode);
3017 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3018 		return -EBUSY;
3019 	}
3020 
3021 	if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
3022 		SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3023 		return -EIO;
3024 	}
3025 
3026 skip_chip_chk:
3027 	ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
3028 	    "Entered %s msgcode=0x%x. bsg ptr %px\n",
3029 	    __func__, bsg_request->msgcode, bsg_job);
3030 
3031 	switch (bsg_request->msgcode) {
3032 	case FC_BSG_RPT_ELS:
3033 	case FC_BSG_HST_ELS_NOLOGIN:
3034 		ret = qla2x00_process_els(bsg_job);
3035 		break;
3036 	case FC_BSG_HST_CT:
3037 		ret = qla2x00_process_ct(bsg_job);
3038 		break;
3039 	case FC_BSG_HST_VENDOR:
3040 		ret = qla2x00_process_vendor_specific(vha, bsg_job);
3041 		break;
3042 	case FC_BSG_HST_ADD_RPORT:
3043 	case FC_BSG_HST_DEL_RPORT:
3044 	case FC_BSG_RPT_CT:
3045 	default:
3046 		ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
3047 		break;
3048 	}
3049 
3050 	ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
3051 	    "%s done with return %x\n", __func__, ret);
3052 
3053 	return ret;
3054 }
3055 
qla_bsg_found(struct qla_qpair * qpair,struct bsg_job * bsg_job)3056 static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
3057 {
3058 	bool found, do_bsg_done;
3059 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3060 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3061 	struct qla_hw_data *ha = vha->hw;
3062 	srb_t *sp = NULL;
3063 	int cnt;
3064 	unsigned long flags;
3065 	struct req_que *req;
3066 	int rval;
3067 	DECLARE_COMPLETION_ONSTACK(comp);
3068 	uint32_t ratov_j;
3069 
3070 	found = do_bsg_done = false;
3071 
3072 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3073 	req = qpair->req;
3074 
3075 	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
3076 		sp = req->outstanding_cmds[cnt];
3077 		if (sp &&
3078 		    (sp->type == SRB_CT_CMD ||
3079 		     sp->type == SRB_ELS_CMD_HST ||
3080 		     sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
3081 		    sp->u.bsg_job == bsg_job) {
3082 
3083 			found = true;
3084 			sp->comp = &comp;
3085 			break;
3086 		}
3087 	}
3088 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3089 
3090 	if (!found)
3091 		return false;
3092 
3093 	if (ha->flags.eeh_busy) {
3094 		/* skip over abort.  EEH handling will return the bsg. Wait for it */
3095 		rval = QLA_SUCCESS;
3096 		ql_dbg(ql_dbg_user, vha, 0x802c,
3097 			"eeh encounter. bsg %p sp=%p handle=%x \n",
3098 			bsg_job, sp, sp->handle);
3099 	} else {
3100 		rval = ha->isp_ops->abort_command(sp);
3101 		ql_dbg(ql_dbg_user, vha, 0x802c,
3102 			"Aborting bsg %p sp=%p handle=%x rval=%x\n",
3103 			bsg_job, sp, sp->handle, rval);
3104 	}
3105 
3106 	switch (rval) {
3107 	case QLA_SUCCESS:
3108 		/* Wait for the command completion. */
3109 		ratov_j = ha->r_a_tov / 10 * 4 * 1000;
3110 		ratov_j = msecs_to_jiffies(ratov_j);
3111 
3112 		if (!wait_for_completion_timeout(&comp, ratov_j)) {
3113 			ql_log(ql_log_info, vha, 0x7089,
3114 				"bsg abort timeout.  bsg=%p sp=%p handle %#x .\n",
3115 				bsg_job, sp, sp->handle);
3116 
3117 			do_bsg_done = true;
3118 		} else {
3119 			/* fw had returned the bsg */
3120 			ql_dbg(ql_dbg_user, vha, 0x708a,
3121 				"bsg abort success. bsg %p sp=%p handle=%#x\n",
3122 				bsg_job, sp, sp->handle);
3123 			do_bsg_done = false;
3124 		}
3125 		break;
3126 	default:
3127 		ql_log(ql_log_info, vha, 0x704f,
3128 			"bsg abort fail.  bsg=%p sp=%p rval=%x.\n",
3129 			bsg_job, sp, rval);
3130 
3131 		do_bsg_done = true;
3132 		break;
3133 	}
3134 
3135 	if (!do_bsg_done)
3136 		return true;
3137 
3138 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3139 	/*
3140 	 * recheck to make sure it's still the same bsg_job due to
3141 	 * qp_lock_ptr was released earlier.
3142 	 */
3143 	if (req->outstanding_cmds[cnt] &&
3144 	    req->outstanding_cmds[cnt]->u.bsg_job != bsg_job) {
3145 		/* fw had returned the bsg */
3146 		spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3147 		return true;
3148 	}
3149 	req->outstanding_cmds[cnt] = NULL;
3150 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3151 
3152 	/* ref: INIT */
3153 	sp->comp = NULL;
3154 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
3155 	bsg_reply->result = -ENXIO;
3156 	bsg_reply->reply_payload_rcv_len = 0;
3157 
3158 	ql_dbg(ql_dbg_user, vha, 0x7051,
3159 	       "%s bsg_job_done : bsg %p result %#x sp %p.\n",
3160 	       __func__, bsg_job, bsg_reply->result, sp);
3161 
3162 	bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
3163 
3164 	return true;
3165 }
3166 
3167 int
qla24xx_bsg_timeout(struct bsg_job * bsg_job)3168 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
3169 {
3170 	struct fc_bsg_request *bsg_request = bsg_job->request;
3171 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3172 	struct qla_hw_data *ha = vha->hw;
3173 	int i;
3174 	struct qla_qpair *qpair;
3175 
3176 	ql_log(ql_log_info, vha, 0x708b,
3177 	       "%s CMD timeout. bsg ptr %p msgcode %x vendor cmd %x\n",
3178 	       __func__, bsg_job, bsg_request->msgcode,
3179 	       bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
3180 
3181 	if (qla2x00_isp_reg_stat(ha)) {
3182 		ql_log(ql_log_info, vha, 0x9007,
3183 		    "PCI/Register disconnect.\n");
3184 		qla_pci_set_eeh_busy(vha);
3185 	}
3186 
3187 	if (qla_bsg_found(ha->base_qpair, bsg_job))
3188 		goto done;
3189 
3190 	/* find the bsg job from the active list of commands */
3191 	for (i = 0; i < ha->max_qpairs; i++) {
3192 		qpair = vha->hw->queue_pair_map[i];
3193 		if (!qpair)
3194 			continue;
3195 		if (qla_bsg_found(qpair, bsg_job))
3196 			goto done;
3197 	}
3198 
3199 	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
3200 
3201 done:
3202 	return 0;
3203 }
3204 
qla2x00_mailbox_passthru(struct bsg_job * bsg_job)3205 int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
3206 {
3207 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3208 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3209 	int ret = -EINVAL;
3210 	int ptsize = sizeof(struct qla_mbx_passthru);
3211 	struct qla_mbx_passthru *req_data = NULL;
3212 	uint32_t req_data_len;
3213 
3214 	req_data_len = bsg_job->request_payload.payload_len;
3215 	if (req_data_len != ptsize) {
3216 		ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
3217 		return -EIO;
3218 	}
3219 	req_data = kzalloc(ptsize, GFP_KERNEL);
3220 	if (!req_data) {
3221 		ql_log(ql_log_warn, vha, 0xf0a4,
3222 		       "req_data memory allocation failure.\n");
3223 		return -ENOMEM;
3224 	}
3225 
3226 	/* Copy the request buffer in req_data */
3227 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3228 			  bsg_job->request_payload.sg_cnt, req_data, ptsize);
3229 	ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
3230 
3231 	/* Copy the req_data in  request buffer */
3232 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
3233 			    bsg_job->reply_payload.sg_cnt, req_data, ptsize);
3234 
3235 	bsg_reply->reply_payload_rcv_len = ptsize;
3236 	if (ret == QLA_SUCCESS)
3237 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
3238 	else
3239 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
3240 
3241 	bsg_job->reply_len = sizeof(*bsg_job->reply);
3242 	bsg_reply->result = DID_OK << 16;
3243 	bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
3244 
3245 	kfree(req_data);
3246 
3247 	return ret;
3248 }
3249