1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_gbl.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13
14 static int qla28xx_validate_flash_image(struct bsg_job *bsg_job);
15
qla2xxx_free_fcport_work(struct work_struct * work)16 static void qla2xxx_free_fcport_work(struct work_struct *work)
17 {
18 struct fc_port *fcport = container_of(work, typeof(*fcport),
19 free_work);
20
21 qla2x00_free_fcport(fcport);
22 }
23
24 /* BSG support for ELS/CT pass through */
qla2x00_bsg_job_done(srb_t * sp,int res)25 void qla2x00_bsg_job_done(srb_t *sp, int res)
26 {
27 struct bsg_job *bsg_job = sp->u.bsg_job;
28 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
29 struct completion *comp = sp->comp;
30
31 ql_dbg(ql_dbg_user, sp->vha, 0x7009,
32 "%s: sp hdl %x, result=%x bsg ptr %p\n",
33 __func__, sp->handle, res, bsg_job);
34
35 /* ref: INIT */
36 kref_put(&sp->cmd_kref, qla2x00_sp_release);
37
38 bsg_reply->result = res;
39 bsg_job_done(bsg_job, bsg_reply->result,
40 bsg_reply->reply_payload_rcv_len);
41
42 if (comp)
43 complete(comp);
44 }
45
qla2x00_bsg_sp_free(srb_t * sp)46 void qla2x00_bsg_sp_free(srb_t *sp)
47 {
48 struct qla_hw_data *ha = sp->vha->hw;
49 struct bsg_job *bsg_job = sp->u.bsg_job;
50 struct fc_bsg_request *bsg_request = bsg_job->request;
51 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
52
53 if (sp->type == SRB_FXIOCB_BCMD) {
54 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
55 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
56
57 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
58 dma_unmap_sg(&ha->pdev->dev,
59 bsg_job->request_payload.sg_list,
60 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
61
62 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
63 dma_unmap_sg(&ha->pdev->dev,
64 bsg_job->reply_payload.sg_list,
65 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
66 } else {
67
68 if (sp->remap.remapped) {
69 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
70 sp->remap.rsp.dma);
71 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
72 sp->remap.req.dma);
73 } else {
74 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
75 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
76
77 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
78 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
79 }
80 }
81
82 if (sp->type == SRB_CT_CMD ||
83 sp->type == SRB_FXIOCB_BCMD ||
84 sp->type == SRB_ELS_CMD_HST) {
85 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
86 queue_work(ha->wq, &sp->fcport->free_work);
87 }
88
89 qla2x00_rel_sp(sp);
90 }
91
92 int
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t * vha,struct qla_fcp_prio_cfg * pri_cfg,uint8_t flag)93 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
94 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
95 {
96 int i, ret, num_valid;
97 uint8_t *bcode;
98 struct qla_fcp_prio_entry *pri_entry;
99 uint32_t *bcode_val_ptr, bcode_val;
100
101 ret = 1;
102 num_valid = 0;
103 bcode = (uint8_t *)pri_cfg;
104 bcode_val_ptr = (uint32_t *)pri_cfg;
105 bcode_val = (uint32_t)(*bcode_val_ptr);
106
107 if (bcode_val == 0xFFFFFFFF) {
108 /* No FCP Priority config data in flash */
109 ql_dbg(ql_dbg_user, vha, 0x7051,
110 "No FCP Priority config data.\n");
111 return 0;
112 }
113
114 if (memcmp(bcode, "HQOS", 4)) {
115 /* Invalid FCP priority data header*/
116 ql_dbg(ql_dbg_user, vha, 0x7052,
117 "Invalid FCP Priority data header. bcode=0x%x.\n",
118 bcode_val);
119 return 0;
120 }
121 if (flag != 1)
122 return ret;
123
124 pri_entry = &pri_cfg->entry[0];
125 for (i = 0; i < pri_cfg->num_entries; i++) {
126 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
127 num_valid++;
128 pri_entry++;
129 }
130
131 if (num_valid == 0) {
132 /* No valid FCP priority data entries */
133 ql_dbg(ql_dbg_user, vha, 0x7053,
134 "No valid FCP Priority data entries.\n");
135 ret = 0;
136 } else {
137 /* FCP priority data is valid */
138 ql_dbg(ql_dbg_user, vha, 0x7054,
139 "Valid FCP priority data. num entries = %d.\n",
140 num_valid);
141 }
142
143 return ret;
144 }
145
146 static int
qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job * bsg_job)147 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
148 {
149 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
150 struct fc_bsg_request *bsg_request = bsg_job->request;
151 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
152 scsi_qla_host_t *vha = shost_priv(host);
153 struct qla_hw_data *ha = vha->hw;
154 int ret = 0;
155 uint32_t len;
156 uint32_t oper;
157
158 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
159 ret = -EINVAL;
160 goto exit_fcp_prio_cfg;
161 }
162
163 /* Get the sub command */
164 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
165
166 /* Only set config is allowed if config memory is not allocated */
167 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
168 ret = -EINVAL;
169 goto exit_fcp_prio_cfg;
170 }
171 switch (oper) {
172 case QLFC_FCP_PRIO_DISABLE:
173 if (ha->flags.fcp_prio_enabled) {
174 ha->flags.fcp_prio_enabled = 0;
175 ha->fcp_prio_cfg->attributes &=
176 ~FCP_PRIO_ATTR_ENABLE;
177 qla24xx_update_all_fcp_prio(vha);
178 bsg_reply->result = DID_OK;
179 } else {
180 ret = -EINVAL;
181 bsg_reply->result = (DID_ERROR << 16);
182 goto exit_fcp_prio_cfg;
183 }
184 break;
185
186 case QLFC_FCP_PRIO_ENABLE:
187 if (!ha->flags.fcp_prio_enabled) {
188 if (ha->fcp_prio_cfg) {
189 ha->flags.fcp_prio_enabled = 1;
190 ha->fcp_prio_cfg->attributes |=
191 FCP_PRIO_ATTR_ENABLE;
192 qla24xx_update_all_fcp_prio(vha);
193 bsg_reply->result = DID_OK;
194 } else {
195 ret = -EINVAL;
196 bsg_reply->result = (DID_ERROR << 16);
197 goto exit_fcp_prio_cfg;
198 }
199 }
200 break;
201
202 case QLFC_FCP_PRIO_GET_CONFIG:
203 len = bsg_job->reply_payload.payload_len;
204 if (!len || len > FCP_PRIO_CFG_SIZE) {
205 ret = -EINVAL;
206 bsg_reply->result = (DID_ERROR << 16);
207 goto exit_fcp_prio_cfg;
208 }
209
210 bsg_reply->result = DID_OK;
211 bsg_reply->reply_payload_rcv_len =
212 sg_copy_from_buffer(
213 bsg_job->reply_payload.sg_list,
214 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
215 len);
216
217 break;
218
219 case QLFC_FCP_PRIO_SET_CONFIG:
220 len = bsg_job->request_payload.payload_len;
221 if (!len || len > FCP_PRIO_CFG_SIZE) {
222 bsg_reply->result = (DID_ERROR << 16);
223 ret = -EINVAL;
224 goto exit_fcp_prio_cfg;
225 }
226
227 if (!ha->fcp_prio_cfg) {
228 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
229 if (!ha->fcp_prio_cfg) {
230 ql_log(ql_log_warn, vha, 0x7050,
231 "Unable to allocate memory for fcp prio "
232 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
233 bsg_reply->result = (DID_ERROR << 16);
234 ret = -ENOMEM;
235 goto exit_fcp_prio_cfg;
236 }
237 }
238
239 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
240 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
241 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
242 FCP_PRIO_CFG_SIZE);
243
244 /* validate fcp priority data */
245
246 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
247 bsg_reply->result = (DID_ERROR << 16);
248 ret = -EINVAL;
249 /* If buffer was invalidatic int
250 * fcp_prio_cfg is of no use
251 */
252 vfree(ha->fcp_prio_cfg);
253 ha->fcp_prio_cfg = NULL;
254 goto exit_fcp_prio_cfg;
255 }
256
257 ha->flags.fcp_prio_enabled = 0;
258 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
259 ha->flags.fcp_prio_enabled = 1;
260 qla24xx_update_all_fcp_prio(vha);
261 bsg_reply->result = DID_OK;
262 break;
263 default:
264 ret = -EINVAL;
265 break;
266 }
267 exit_fcp_prio_cfg:
268 if (!ret)
269 bsg_job_done(bsg_job, bsg_reply->result,
270 bsg_reply->reply_payload_rcv_len);
271 return ret;
272 }
273
274 static int
qla2x00_process_els(struct bsg_job * bsg_job)275 qla2x00_process_els(struct bsg_job *bsg_job)
276 {
277 struct fc_bsg_request *bsg_request = bsg_job->request;
278 struct fc_rport *rport;
279 fc_port_t *fcport = NULL;
280 struct Scsi_Host *host;
281 scsi_qla_host_t *vha;
282 struct qla_hw_data *ha;
283 srb_t *sp;
284 const char *type;
285 int req_sg_cnt, rsp_sg_cnt;
286 int rval = (DID_ERROR << 16);
287 uint32_t els_cmd = 0;
288 int qla_port_allocated = 0;
289
290 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
291 rport = fc_bsg_to_rport(bsg_job);
292 if (!rport) {
293 rval = -ENOMEM;
294 goto done;
295 }
296 fcport = *(fc_port_t **) rport->dd_data;
297 host = rport_to_shost(rport);
298 vha = shost_priv(host);
299 ha = vha->hw;
300 type = "FC_BSG_RPT_ELS";
301 } else {
302 host = fc_bsg_to_shost(bsg_job);
303 vha = shost_priv(host);
304 ha = vha->hw;
305 type = "FC_BSG_HST_ELS_NOLOGIN";
306 els_cmd = bsg_request->rqst_data.h_els.command_code;
307 if (els_cmd == ELS_AUTH_ELS)
308 return qla_edif_process_els(vha, bsg_job);
309 }
310
311 if (!vha->flags.online) {
312 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
313 rval = -EIO;
314 goto done;
315 }
316
317 /* pass through is supported only for ISP 4Gb or higher */
318 if (!IS_FWI2_CAPABLE(ha)) {
319 ql_dbg(ql_dbg_user, vha, 0x7001,
320 "ELS passthru not supported for ISP23xx based adapters.\n");
321 rval = -EPERM;
322 goto done;
323 }
324
325 /* Multiple SG's are not supported for ELS requests */
326 if (bsg_job->request_payload.sg_cnt > 1 ||
327 bsg_job->reply_payload.sg_cnt > 1) {
328 ql_dbg(ql_dbg_user, vha, 0x7002,
329 "Multiple SG's are not supported for ELS requests, "
330 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
331 bsg_job->request_payload.sg_cnt,
332 bsg_job->reply_payload.sg_cnt);
333 rval = -ENOBUFS;
334 goto done;
335 }
336
337 /* ELS request for rport */
338 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
339 /* make sure the rport is logged in,
340 * if not perform fabric login
341 */
342 if (atomic_read(&fcport->state) != FCS_ONLINE) {
343 ql_dbg(ql_dbg_user, vha, 0x7003,
344 "Port %06X is not online for ELS passthru.\n",
345 fcport->d_id.b24);
346 rval = -EIO;
347 goto done;
348 }
349 } else {
350 /* Allocate a dummy fcport structure, since functions
351 * preparing the IOCB and mailbox command retrieves port
352 * specific information from fcport structure. For Host based
353 * ELS commands there will be no fcport structure allocated
354 */
355 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
356 if (!fcport) {
357 rval = -ENOMEM;
358 goto done;
359 }
360
361 qla_port_allocated = 1;
362 /* Initialize all required fields of fcport */
363 fcport->vha = vha;
364 fcport->d_id.b.al_pa =
365 bsg_request->rqst_data.h_els.port_id[0];
366 fcport->d_id.b.area =
367 bsg_request->rqst_data.h_els.port_id[1];
368 fcport->d_id.b.domain =
369 bsg_request->rqst_data.h_els.port_id[2];
370 fcport->loop_id =
371 (fcport->d_id.b.al_pa == 0xFD) ?
372 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
373 }
374
375 req_sg_cnt =
376 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
377 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
378 if (!req_sg_cnt) {
379 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
380 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
381 rval = -ENOMEM;
382 goto done_free_fcport;
383 }
384
385 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
386 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
387 if (!rsp_sg_cnt) {
388 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
389 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
390 rval = -ENOMEM;
391 goto done_free_fcport;
392 }
393
394 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
395 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
396 ql_log(ql_log_warn, vha, 0x7008,
397 "dma mapping resulted in different sg counts, "
398 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
399 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
400 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
401 rval = -EAGAIN;
402 goto done_unmap_sg;
403 }
404
405 /* Alloc SRB structure */
406 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
407 if (!sp) {
408 rval = -ENOMEM;
409 goto done_unmap_sg;
410 }
411
412 sp->type =
413 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
414 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
415 sp->name =
416 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
417 "bsg_els_rpt" : "bsg_els_hst");
418 sp->u.bsg_job = bsg_job;
419 sp->free = qla2x00_bsg_sp_free;
420 sp->done = qla2x00_bsg_job_done;
421
422 ql_dbg(ql_dbg_user, vha, 0x700a,
423 "bsg rqst type: %s els type: %x - loop-id=%x "
424 "portid=%-2x%02x%02x.\n", type,
425 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
426 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
427
428 rval = qla2x00_start_sp(sp);
429 if (rval != QLA_SUCCESS) {
430 ql_log(ql_log_warn, vha, 0x700e,
431 "qla2x00_start_sp failed = %d\n", rval);
432 qla2x00_rel_sp(sp);
433 rval = -EIO;
434 goto done_unmap_sg;
435 }
436 return rval;
437
438 done_unmap_sg:
439 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
440 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
441 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
442 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
443 goto done_free_fcport;
444
445 done_free_fcport:
446 if (qla_port_allocated)
447 qla2x00_free_fcport(fcport);
448 done:
449 return rval;
450 }
451
452 static inline uint16_t
qla24xx_calc_ct_iocbs(uint16_t dsds)453 qla24xx_calc_ct_iocbs(uint16_t dsds)
454 {
455 uint16_t iocbs;
456
457 iocbs = 1;
458 if (dsds > 2) {
459 iocbs += (dsds - 2) / 5;
460 if ((dsds - 2) % 5)
461 iocbs++;
462 }
463 return iocbs;
464 }
465
466 static int
qla2x00_process_ct(struct bsg_job * bsg_job)467 qla2x00_process_ct(struct bsg_job *bsg_job)
468 {
469 srb_t *sp;
470 struct fc_bsg_request *bsg_request = bsg_job->request;
471 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
472 scsi_qla_host_t *vha = shost_priv(host);
473 struct qla_hw_data *ha = vha->hw;
474 int rval = (DID_ERROR << 16);
475 int req_sg_cnt, rsp_sg_cnt;
476 uint16_t loop_id;
477 struct fc_port *fcport;
478 char *type = "FC_BSG_HST_CT";
479
480 req_sg_cnt =
481 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
482 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
483 if (!req_sg_cnt) {
484 ql_log(ql_log_warn, vha, 0x700f,
485 "dma_map_sg return %d for request\n", req_sg_cnt);
486 rval = -ENOMEM;
487 goto done;
488 }
489
490 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
491 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
492 if (!rsp_sg_cnt) {
493 ql_log(ql_log_warn, vha, 0x7010,
494 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
495 rval = -ENOMEM;
496 goto done;
497 }
498
499 if (!vha->flags.online) {
500 ql_log(ql_log_warn, vha, 0x7012,
501 "Host is not online.\n");
502 rval = -EIO;
503 goto done_unmap_sg;
504 }
505
506 loop_id =
507 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
508 >> 24;
509 switch (loop_id) {
510 case 0xFC:
511 loop_id = NPH_SNS;
512 break;
513 case 0xFA:
514 loop_id = vha->mgmt_svr_loop_id;
515 break;
516 default:
517 ql_dbg(ql_dbg_user, vha, 0x7013,
518 "Unknown loop id: %x.\n", loop_id);
519 rval = -EINVAL;
520 goto done_unmap_sg;
521 }
522
523 /* Allocate a dummy fcport structure, since functions preparing the
524 * IOCB and mailbox command retrieves port specific information
525 * from fcport structure. For Host based ELS commands there will be
526 * no fcport structure allocated
527 */
528 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
529 if (!fcport) {
530 ql_log(ql_log_warn, vha, 0x7014,
531 "Failed to allocate fcport.\n");
532 rval = -ENOMEM;
533 goto done_unmap_sg;
534 }
535
536 /* Initialize all required fields of fcport */
537 fcport->vha = vha;
538 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
539 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
540 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
541 fcport->loop_id = loop_id;
542
543 /* Alloc SRB structure */
544 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
545 if (!sp) {
546 ql_log(ql_log_warn, vha, 0x7015,
547 "qla2x00_get_sp failed.\n");
548 rval = -ENOMEM;
549 goto done_free_fcport;
550 }
551
552 sp->type = SRB_CT_CMD;
553 sp->name = "bsg_ct";
554 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
555 sp->u.bsg_job = bsg_job;
556 sp->free = qla2x00_bsg_sp_free;
557 sp->done = qla2x00_bsg_job_done;
558
559 ql_dbg(ql_dbg_user, vha, 0x7016,
560 "bsg rqst type: %s else type: %x - "
561 "loop-id=%x portid=%02x%02x%02x.\n", type,
562 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
563 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
564 fcport->d_id.b.al_pa);
565
566 rval = qla2x00_start_sp(sp);
567 if (rval != QLA_SUCCESS) {
568 ql_log(ql_log_warn, vha, 0x7017,
569 "qla2x00_start_sp failed=%d.\n", rval);
570 qla2x00_rel_sp(sp);
571 rval = -EIO;
572 goto done_free_fcport;
573 }
574 return rval;
575
576 done_free_fcport:
577 qla2x00_free_fcport(fcport);
578 done_unmap_sg:
579 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
580 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
581 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
582 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
583 done:
584 return rval;
585 }
586
587 /* Disable loopback mode */
588 static inline int
qla81xx_reset_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,int wait,int wait2)589 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
590 int wait, int wait2)
591 {
592 int ret = 0;
593 int rval = 0;
594 uint16_t new_config[4];
595 struct qla_hw_data *ha = vha->hw;
596
597 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
598 goto done_reset_internal;
599
600 memset(new_config, 0 , sizeof(new_config));
601 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
602 ENABLE_INTERNAL_LOOPBACK ||
603 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
604 ENABLE_EXTERNAL_LOOPBACK) {
605 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
606 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
607 (new_config[0] & INTERNAL_LOOPBACK_MASK));
608 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
609
610 ha->notify_dcbx_comp = wait;
611 ha->notify_lb_portup_comp = wait2;
612
613 ret = qla81xx_set_port_config(vha, new_config);
614 if (ret != QLA_SUCCESS) {
615 ql_log(ql_log_warn, vha, 0x7025,
616 "Set port config failed.\n");
617 ha->notify_dcbx_comp = 0;
618 ha->notify_lb_portup_comp = 0;
619 rval = -EINVAL;
620 goto done_reset_internal;
621 }
622
623 /* Wait for DCBX complete event */
624 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
625 (DCBX_COMP_TIMEOUT * HZ))) {
626 ql_dbg(ql_dbg_user, vha, 0x7026,
627 "DCBX completion not received.\n");
628 ha->notify_dcbx_comp = 0;
629 ha->notify_lb_portup_comp = 0;
630 rval = -EINVAL;
631 goto done_reset_internal;
632 } else
633 ql_dbg(ql_dbg_user, vha, 0x7027,
634 "DCBX completion received.\n");
635
636 if (wait2 &&
637 !wait_for_completion_timeout(&ha->lb_portup_comp,
638 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
639 ql_dbg(ql_dbg_user, vha, 0x70c5,
640 "Port up completion not received.\n");
641 ha->notify_lb_portup_comp = 0;
642 rval = -EINVAL;
643 goto done_reset_internal;
644 } else
645 ql_dbg(ql_dbg_user, vha, 0x70c6,
646 "Port up completion received.\n");
647
648 ha->notify_dcbx_comp = 0;
649 ha->notify_lb_portup_comp = 0;
650 }
651 done_reset_internal:
652 return rval;
653 }
654
655 /*
656 * Set the port configuration to enable the internal or external loopback
657 * depending on the loopback mode.
658 */
659 static inline int
qla81xx_set_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,uint16_t * new_config,uint16_t mode)660 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
661 uint16_t *new_config, uint16_t mode)
662 {
663 int ret = 0;
664 int rval = 0;
665 unsigned long rem_tmo = 0, current_tmo = 0;
666 struct qla_hw_data *ha = vha->hw;
667
668 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
669 goto done_set_internal;
670
671 if (mode == INTERNAL_LOOPBACK)
672 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
673 else if (mode == EXTERNAL_LOOPBACK)
674 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
675 ql_dbg(ql_dbg_user, vha, 0x70be,
676 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
677
678 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
679
680 ha->notify_dcbx_comp = 1;
681 ret = qla81xx_set_port_config(vha, new_config);
682 if (ret != QLA_SUCCESS) {
683 ql_log(ql_log_warn, vha, 0x7021,
684 "set port config failed.\n");
685 ha->notify_dcbx_comp = 0;
686 rval = -EINVAL;
687 goto done_set_internal;
688 }
689
690 /* Wait for DCBX complete event */
691 current_tmo = DCBX_COMP_TIMEOUT * HZ;
692 while (1) {
693 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
694 current_tmo);
695 if (!ha->idc_extend_tmo || rem_tmo) {
696 ha->idc_extend_tmo = 0;
697 break;
698 }
699 current_tmo = ha->idc_extend_tmo * HZ;
700 ha->idc_extend_tmo = 0;
701 }
702
703 if (!rem_tmo) {
704 ql_dbg(ql_dbg_user, vha, 0x7022,
705 "DCBX completion not received.\n");
706 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
707 /*
708 * If the reset of the loopback mode doesn't work take a FCoE
709 * dump and reset the chip.
710 */
711 if (ret) {
712 qla2xxx_dump_fw(vha);
713 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
714 }
715 rval = -EINVAL;
716 } else {
717 if (ha->flags.idc_compl_status) {
718 ql_dbg(ql_dbg_user, vha, 0x70c3,
719 "Bad status in IDC Completion AEN\n");
720 rval = -EINVAL;
721 ha->flags.idc_compl_status = 0;
722 } else
723 ql_dbg(ql_dbg_user, vha, 0x7023,
724 "DCBX completion received.\n");
725 }
726
727 ha->notify_dcbx_comp = 0;
728 ha->idc_extend_tmo = 0;
729
730 done_set_internal:
731 return rval;
732 }
733
734 static int
qla2x00_process_loopback(struct bsg_job * bsg_job)735 qla2x00_process_loopback(struct bsg_job *bsg_job)
736 {
737 struct fc_bsg_request *bsg_request = bsg_job->request;
738 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
739 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
740 scsi_qla_host_t *vha = shost_priv(host);
741 struct qla_hw_data *ha = vha->hw;
742 int rval;
743 uint8_t command_sent;
744 char *type;
745 struct msg_echo_lb elreq;
746 uint16_t response[MAILBOX_REGISTER_COUNT];
747 uint16_t config[4], new_config[4];
748 uint8_t *fw_sts_ptr;
749 void *req_data = NULL;
750 dma_addr_t req_data_dma;
751 uint32_t req_data_len;
752 uint8_t *rsp_data = NULL;
753 dma_addr_t rsp_data_dma;
754 uint32_t rsp_data_len;
755
756 if (!vha->flags.online) {
757 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
758 return -EIO;
759 }
760
761 memset(&elreq, 0, sizeof(elreq));
762
763 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
764 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
765 DMA_TO_DEVICE);
766
767 if (!elreq.req_sg_cnt) {
768 ql_log(ql_log_warn, vha, 0x701a,
769 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
770 return -ENOMEM;
771 }
772
773 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
774 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
775 DMA_FROM_DEVICE);
776
777 if (!elreq.rsp_sg_cnt) {
778 ql_log(ql_log_warn, vha, 0x701b,
779 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
780 rval = -ENOMEM;
781 goto done_unmap_req_sg;
782 }
783
784 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
785 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
786 ql_log(ql_log_warn, vha, 0x701c,
787 "dma mapping resulted in different sg counts, "
788 "request_sg_cnt: %x dma_request_sg_cnt: %x "
789 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
790 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
791 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
792 rval = -EAGAIN;
793 goto done_unmap_sg;
794 }
795 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
796 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
797 &req_data_dma, GFP_KERNEL);
798 if (!req_data) {
799 ql_log(ql_log_warn, vha, 0x701d,
800 "dma alloc failed for req_data.\n");
801 rval = -ENOMEM;
802 goto done_unmap_sg;
803 }
804
805 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
806 &rsp_data_dma, GFP_KERNEL);
807 if (!rsp_data) {
808 ql_log(ql_log_warn, vha, 0x7004,
809 "dma alloc failed for rsp_data.\n");
810 rval = -ENOMEM;
811 goto done_free_dma_req;
812 }
813
814 /* Copy the request buffer in req_data now */
815 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
816 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
817
818 elreq.send_dma = req_data_dma;
819 elreq.rcv_dma = rsp_data_dma;
820 elreq.transfer_size = req_data_len;
821
822 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
823 elreq.iteration_count =
824 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
825
826 if (atomic_read(&vha->loop_state) == LOOP_READY &&
827 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
828 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
829 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
830 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
831 elreq.options == EXTERNAL_LOOPBACK))) {
832 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
833 ql_dbg(ql_dbg_user, vha, 0x701e,
834 "BSG request type: %s.\n", type);
835 command_sent = INT_DEF_LB_ECHO_CMD;
836 rval = qla2x00_echo_test(vha, &elreq, response);
837 } else {
838 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
839 memset(config, 0, sizeof(config));
840 memset(new_config, 0, sizeof(new_config));
841
842 if (qla81xx_get_port_config(vha, config)) {
843 ql_log(ql_log_warn, vha, 0x701f,
844 "Get port config failed.\n");
845 rval = -EPERM;
846 goto done_free_dma_rsp;
847 }
848
849 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
850 ql_dbg(ql_dbg_user, vha, 0x70c4,
851 "Loopback operation already in "
852 "progress.\n");
853 rval = -EAGAIN;
854 goto done_free_dma_rsp;
855 }
856
857 ql_dbg(ql_dbg_user, vha, 0x70c0,
858 "elreq.options=%04x\n", elreq.options);
859
860 if (elreq.options == EXTERNAL_LOOPBACK)
861 if (IS_QLA8031(ha) || IS_QLA8044(ha))
862 rval = qla81xx_set_loopback_mode(vha,
863 config, new_config, elreq.options);
864 else
865 rval = qla81xx_reset_loopback_mode(vha,
866 config, 1, 0);
867 else
868 rval = qla81xx_set_loopback_mode(vha, config,
869 new_config, elreq.options);
870
871 if (rval) {
872 rval = -EPERM;
873 goto done_free_dma_rsp;
874 }
875
876 type = "FC_BSG_HST_VENDOR_LOOPBACK";
877 ql_dbg(ql_dbg_user, vha, 0x7028,
878 "BSG request type: %s.\n", type);
879
880 command_sent = INT_DEF_LB_LOOPBACK_CMD;
881 rval = qla2x00_loopback_test(vha, &elreq, response);
882
883 if (response[0] == MBS_COMMAND_ERROR &&
884 response[1] == MBS_LB_RESET) {
885 ql_log(ql_log_warn, vha, 0x7029,
886 "MBX command error, Aborting ISP.\n");
887 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
888 qla2xxx_wake_dpc(vha);
889 qla2x00_wait_for_chip_reset(vha);
890 /* Also reset the MPI */
891 if (IS_QLA81XX(ha)) {
892 if (qla81xx_restart_mpi_firmware(vha) !=
893 QLA_SUCCESS) {
894 ql_log(ql_log_warn, vha, 0x702a,
895 "MPI reset failed.\n");
896 }
897 }
898
899 rval = -EIO;
900 goto done_free_dma_rsp;
901 }
902
903 if (new_config[0]) {
904 int ret;
905
906 /* Revert back to original port config
907 * Also clear internal loopback
908 */
909 ret = qla81xx_reset_loopback_mode(vha,
910 new_config, 0, 1);
911 if (ret) {
912 /*
913 * If the reset of the loopback mode
914 * doesn't work take FCoE dump and then
915 * reset the chip.
916 */
917 qla2xxx_dump_fw(vha);
918 set_bit(ISP_ABORT_NEEDED,
919 &vha->dpc_flags);
920 }
921
922 }
923
924 } else {
925 type = "FC_BSG_HST_VENDOR_LOOPBACK";
926 ql_dbg(ql_dbg_user, vha, 0x702b,
927 "BSG request type: %s.\n", type);
928 command_sent = INT_DEF_LB_LOOPBACK_CMD;
929 rval = qla2x00_loopback_test(vha, &elreq, response);
930 }
931 }
932
933 if (rval) {
934 ql_log(ql_log_warn, vha, 0x702c,
935 "Vendor request %s failed.\n", type);
936
937 rval = 0;
938 bsg_reply->result = (DID_ERROR << 16);
939 bsg_reply->reply_payload_rcv_len = 0;
940 } else {
941 ql_dbg(ql_dbg_user, vha, 0x702d,
942 "Vendor request %s completed.\n", type);
943 bsg_reply->result = (DID_OK << 16);
944 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
945 bsg_job->reply_payload.sg_cnt, rsp_data,
946 rsp_data_len);
947 }
948
949 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
950 sizeof(response) + sizeof(uint8_t);
951 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
952 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
953 sizeof(response));
954 fw_sts_ptr += sizeof(response);
955 *fw_sts_ptr = command_sent;
956
957 done_free_dma_rsp:
958 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
959 rsp_data, rsp_data_dma);
960 done_free_dma_req:
961 dma_free_coherent(&ha->pdev->dev, req_data_len,
962 req_data, req_data_dma);
963 done_unmap_sg:
964 dma_unmap_sg(&ha->pdev->dev,
965 bsg_job->reply_payload.sg_list,
966 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
967 done_unmap_req_sg:
968 dma_unmap_sg(&ha->pdev->dev,
969 bsg_job->request_payload.sg_list,
970 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
971 if (!rval)
972 bsg_job_done(bsg_job, bsg_reply->result,
973 bsg_reply->reply_payload_rcv_len);
974 return rval;
975 }
976
977 static int
qla84xx_reset(struct bsg_job * bsg_job)978 qla84xx_reset(struct bsg_job *bsg_job)
979 {
980 struct fc_bsg_request *bsg_request = bsg_job->request;
981 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
982 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
983 scsi_qla_host_t *vha = shost_priv(host);
984 struct qla_hw_data *ha = vha->hw;
985 int rval = 0;
986 uint32_t flag;
987
988 if (!IS_QLA84XX(ha)) {
989 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
990 return -EINVAL;
991 }
992
993 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
994
995 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
996
997 if (rval) {
998 ql_log(ql_log_warn, vha, 0x7030,
999 "Vendor request 84xx reset failed.\n");
1000 rval = (DID_ERROR << 16);
1001
1002 } else {
1003 ql_dbg(ql_dbg_user, vha, 0x7031,
1004 "Vendor request 84xx reset completed.\n");
1005 bsg_reply->result = DID_OK;
1006 bsg_job_done(bsg_job, bsg_reply->result,
1007 bsg_reply->reply_payload_rcv_len);
1008 }
1009
1010 return rval;
1011 }
1012
1013 static int
qla84xx_updatefw(struct bsg_job * bsg_job)1014 qla84xx_updatefw(struct bsg_job *bsg_job)
1015 {
1016 struct fc_bsg_request *bsg_request = bsg_job->request;
1017 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1018 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1019 scsi_qla_host_t *vha = shost_priv(host);
1020 struct qla_hw_data *ha = vha->hw;
1021 struct verify_chip_entry_84xx *mn = NULL;
1022 dma_addr_t mn_dma, fw_dma;
1023 void *fw_buf = NULL;
1024 int rval = 0;
1025 uint32_t sg_cnt;
1026 uint32_t data_len;
1027 uint16_t options;
1028 uint32_t flag;
1029 uint32_t fw_ver;
1030
1031 if (!IS_QLA84XX(ha)) {
1032 ql_dbg(ql_dbg_user, vha, 0x7032,
1033 "Not 84xx, exiting.\n");
1034 return -EINVAL;
1035 }
1036
1037 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1038 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1039 if (!sg_cnt) {
1040 ql_log(ql_log_warn, vha, 0x7033,
1041 "dma_map_sg returned %d for request.\n", sg_cnt);
1042 return -ENOMEM;
1043 }
1044
1045 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1046 ql_log(ql_log_warn, vha, 0x7034,
1047 "DMA mapping resulted in different sg counts, "
1048 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1049 bsg_job->request_payload.sg_cnt, sg_cnt);
1050 rval = -EAGAIN;
1051 goto done_unmap_sg;
1052 }
1053
1054 data_len = bsg_job->request_payload.payload_len;
1055 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1056 &fw_dma, GFP_KERNEL);
1057 if (!fw_buf) {
1058 ql_log(ql_log_warn, vha, 0x7035,
1059 "DMA alloc failed for fw_buf.\n");
1060 rval = -ENOMEM;
1061 goto done_unmap_sg;
1062 }
1063
1064 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1065 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1066
1067 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1068 if (!mn) {
1069 ql_log(ql_log_warn, vha, 0x7036,
1070 "DMA alloc failed for fw buffer.\n");
1071 rval = -ENOMEM;
1072 goto done_free_fw_buf;
1073 }
1074
1075 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1076 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1077
1078 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1079 mn->entry_count = 1;
1080
1081 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1082 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1083 options |= VCO_DIAG_FW;
1084
1085 mn->options = cpu_to_le16(options);
1086 mn->fw_ver = cpu_to_le32(fw_ver);
1087 mn->fw_size = cpu_to_le32(data_len);
1088 mn->fw_seq_size = cpu_to_le32(data_len);
1089 put_unaligned_le64(fw_dma, &mn->dsd.address);
1090 mn->dsd.length = cpu_to_le32(data_len);
1091 mn->data_seg_cnt = cpu_to_le16(1);
1092
1093 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1094
1095 if (rval) {
1096 ql_log(ql_log_warn, vha, 0x7037,
1097 "Vendor request 84xx updatefw failed.\n");
1098
1099 rval = (DID_ERROR << 16);
1100 } else {
1101 ql_dbg(ql_dbg_user, vha, 0x7038,
1102 "Vendor request 84xx updatefw completed.\n");
1103
1104 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1105 bsg_reply->result = DID_OK;
1106 }
1107
1108 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1109
1110 done_free_fw_buf:
1111 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1112
1113 done_unmap_sg:
1114 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1115 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1116
1117 if (!rval)
1118 bsg_job_done(bsg_job, bsg_reply->result,
1119 bsg_reply->reply_payload_rcv_len);
1120 return rval;
1121 }
1122
1123 static int
qla84xx_mgmt_cmd(struct bsg_job * bsg_job)1124 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1125 {
1126 struct fc_bsg_request *bsg_request = bsg_job->request;
1127 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1128 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1129 scsi_qla_host_t *vha = shost_priv(host);
1130 struct qla_hw_data *ha = vha->hw;
1131 struct access_chip_84xx *mn = NULL;
1132 dma_addr_t mn_dma, mgmt_dma;
1133 void *mgmt_b = NULL;
1134 int rval = 0;
1135 struct qla_bsg_a84_mgmt *ql84_mgmt;
1136 uint32_t sg_cnt;
1137 uint32_t data_len = 0;
1138 uint32_t dma_direction = DMA_NONE;
1139
1140 if (!IS_QLA84XX(ha)) {
1141 ql_log(ql_log_warn, vha, 0x703a,
1142 "Not 84xx, exiting.\n");
1143 return -EINVAL;
1144 }
1145
1146 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1147 if (!mn) {
1148 ql_log(ql_log_warn, vha, 0x703c,
1149 "DMA alloc failed for fw buffer.\n");
1150 return -ENOMEM;
1151 }
1152
1153 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1154 mn->entry_count = 1;
1155 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1156 switch (ql84_mgmt->mgmt.cmd) {
1157 case QLA84_MGMT_READ_MEM:
1158 case QLA84_MGMT_GET_INFO:
1159 sg_cnt = dma_map_sg(&ha->pdev->dev,
1160 bsg_job->reply_payload.sg_list,
1161 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1162 if (!sg_cnt) {
1163 ql_log(ql_log_warn, vha, 0x703d,
1164 "dma_map_sg returned %d for reply.\n", sg_cnt);
1165 rval = -ENOMEM;
1166 goto exit_mgmt;
1167 }
1168
1169 dma_direction = DMA_FROM_DEVICE;
1170
1171 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1172 ql_log(ql_log_warn, vha, 0x703e,
1173 "DMA mapping resulted in different sg counts, "
1174 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1175 bsg_job->reply_payload.sg_cnt, sg_cnt);
1176 rval = -EAGAIN;
1177 goto done_unmap_sg;
1178 }
1179
1180 data_len = bsg_job->reply_payload.payload_len;
1181
1182 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1183 &mgmt_dma, GFP_KERNEL);
1184 if (!mgmt_b) {
1185 ql_log(ql_log_warn, vha, 0x703f,
1186 "DMA alloc failed for mgmt_b.\n");
1187 rval = -ENOMEM;
1188 goto done_unmap_sg;
1189 }
1190
1191 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1192 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1193 mn->parameter1 =
1194 cpu_to_le32(
1195 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1196
1197 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1198 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1199 mn->parameter1 =
1200 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1201
1202 mn->parameter2 =
1203 cpu_to_le32(
1204 ql84_mgmt->mgmt.mgmtp.u.info.context);
1205 }
1206 break;
1207
1208 case QLA84_MGMT_WRITE_MEM:
1209 sg_cnt = dma_map_sg(&ha->pdev->dev,
1210 bsg_job->request_payload.sg_list,
1211 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1212
1213 if (!sg_cnt) {
1214 ql_log(ql_log_warn, vha, 0x7040,
1215 "dma_map_sg returned %d.\n", sg_cnt);
1216 rval = -ENOMEM;
1217 goto exit_mgmt;
1218 }
1219
1220 dma_direction = DMA_TO_DEVICE;
1221
1222 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1223 ql_log(ql_log_warn, vha, 0x7041,
1224 "DMA mapping resulted in different sg counts, "
1225 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1226 bsg_job->request_payload.sg_cnt, sg_cnt);
1227 rval = -EAGAIN;
1228 goto done_unmap_sg;
1229 }
1230
1231 data_len = bsg_job->request_payload.payload_len;
1232 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1233 &mgmt_dma, GFP_KERNEL);
1234 if (!mgmt_b) {
1235 ql_log(ql_log_warn, vha, 0x7042,
1236 "DMA alloc failed for mgmt_b.\n");
1237 rval = -ENOMEM;
1238 goto done_unmap_sg;
1239 }
1240
1241 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1242 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1243
1244 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1245 mn->parameter1 =
1246 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1247 break;
1248
1249 case QLA84_MGMT_CHNG_CONFIG:
1250 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1251 mn->parameter1 =
1252 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1253
1254 mn->parameter2 =
1255 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1256
1257 mn->parameter3 =
1258 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1259 break;
1260
1261 default:
1262 rval = -EIO;
1263 goto exit_mgmt;
1264 }
1265
1266 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1267 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1268 mn->dseg_count = cpu_to_le16(1);
1269 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1270 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1271 }
1272
1273 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1274
1275 if (rval) {
1276 ql_log(ql_log_warn, vha, 0x7043,
1277 "Vendor request 84xx mgmt failed.\n");
1278
1279 rval = (DID_ERROR << 16);
1280
1281 } else {
1282 ql_dbg(ql_dbg_user, vha, 0x7044,
1283 "Vendor request 84xx mgmt completed.\n");
1284
1285 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1286 bsg_reply->result = DID_OK;
1287
1288 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1289 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1290 bsg_reply->reply_payload_rcv_len =
1291 bsg_job->reply_payload.payload_len;
1292
1293 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1294 bsg_job->reply_payload.sg_cnt, mgmt_b,
1295 data_len);
1296 }
1297 }
1298
1299 done_unmap_sg:
1300 if (mgmt_b)
1301 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1302
1303 if (dma_direction == DMA_TO_DEVICE)
1304 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1305 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1306 else if (dma_direction == DMA_FROM_DEVICE)
1307 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1308 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1309
1310 exit_mgmt:
1311 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1312
1313 if (!rval)
1314 bsg_job_done(bsg_job, bsg_reply->result,
1315 bsg_reply->reply_payload_rcv_len);
1316 return rval;
1317 }
1318
1319 static int
qla24xx_iidma(struct bsg_job * bsg_job)1320 qla24xx_iidma(struct bsg_job *bsg_job)
1321 {
1322 struct fc_bsg_request *bsg_request = bsg_job->request;
1323 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1324 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1325 scsi_qla_host_t *vha = shost_priv(host);
1326 int rval = 0;
1327 struct qla_port_param *port_param = NULL;
1328 fc_port_t *fcport = NULL;
1329 int found = 0;
1330 uint16_t mb[MAILBOX_REGISTER_COUNT];
1331 uint8_t *rsp_ptr = NULL;
1332
1333 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1334 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1335 return -EINVAL;
1336 }
1337
1338 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1339 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1340 ql_log(ql_log_warn, vha, 0x7048,
1341 "Invalid destination type.\n");
1342 return -EINVAL;
1343 }
1344
1345 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1346 if (fcport->port_type != FCT_TARGET)
1347 continue;
1348
1349 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1350 fcport->port_name, sizeof(fcport->port_name)))
1351 continue;
1352
1353 found = 1;
1354 break;
1355 }
1356
1357 if (!found) {
1358 ql_log(ql_log_warn, vha, 0x7049,
1359 "Failed to find port.\n");
1360 return -EINVAL;
1361 }
1362
1363 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1364 ql_log(ql_log_warn, vha, 0x704a,
1365 "Port is not online.\n");
1366 return -EINVAL;
1367 }
1368
1369 if (fcport->flags & FCF_LOGIN_NEEDED) {
1370 ql_log(ql_log_warn, vha, 0x704b,
1371 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1372 return -EINVAL;
1373 }
1374
1375 if (port_param->mode)
1376 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1377 port_param->speed, mb);
1378 else
1379 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1380 &port_param->speed, mb);
1381
1382 if (rval) {
1383 ql_log(ql_log_warn, vha, 0x704c,
1384 "iiDMA cmd failed for %8phN -- "
1385 "%04x %x %04x %04x.\n", fcport->port_name,
1386 rval, fcport->fp_speed, mb[0], mb[1]);
1387 rval = (DID_ERROR << 16);
1388 } else {
1389 if (!port_param->mode) {
1390 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1391 sizeof(struct qla_port_param);
1392
1393 rsp_ptr = ((uint8_t *)bsg_reply) +
1394 sizeof(struct fc_bsg_reply);
1395
1396 memcpy(rsp_ptr, port_param,
1397 sizeof(struct qla_port_param));
1398 }
1399
1400 bsg_reply->result = DID_OK;
1401 bsg_job_done(bsg_job, bsg_reply->result,
1402 bsg_reply->reply_payload_rcv_len);
1403 }
1404
1405 return rval;
1406 }
1407
1408 static int
qla2x00_optrom_setup(struct bsg_job * bsg_job,scsi_qla_host_t * vha,uint8_t is_update)1409 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1410 uint8_t is_update)
1411 {
1412 struct fc_bsg_request *bsg_request = bsg_job->request;
1413 uint32_t start = 0;
1414 int valid = 0;
1415 struct qla_hw_data *ha = vha->hw;
1416
1417 if (unlikely(pci_channel_offline(ha->pdev)))
1418 return -EINVAL;
1419
1420 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1421 if (start > ha->optrom_size) {
1422 ql_log(ql_log_warn, vha, 0x7055,
1423 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1424 return -EINVAL;
1425 }
1426
1427 if (ha->optrom_state != QLA_SWAITING) {
1428 ql_log(ql_log_info, vha, 0x7056,
1429 "optrom_state %d.\n", ha->optrom_state);
1430 return -EBUSY;
1431 }
1432
1433 ha->optrom_region_start = start;
1434 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1435 if (is_update) {
1436 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1437 valid = 1;
1438 else if (start == (ha->flt_region_boot * 4) ||
1439 start == (ha->flt_region_fw * 4))
1440 valid = 1;
1441 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1442 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1443 IS_QLA28XX(ha))
1444 valid = 1;
1445 if (!valid) {
1446 ql_log(ql_log_warn, vha, 0x7058,
1447 "Invalid start region 0x%x/0x%x.\n", start,
1448 bsg_job->request_payload.payload_len);
1449 return -EINVAL;
1450 }
1451
1452 ha->optrom_region_size = start +
1453 bsg_job->request_payload.payload_len > ha->optrom_size ?
1454 ha->optrom_size - start :
1455 bsg_job->request_payload.payload_len;
1456 ha->optrom_state = QLA_SWRITING;
1457 } else {
1458 ha->optrom_region_size = start +
1459 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1460 ha->optrom_size - start :
1461 bsg_job->reply_payload.payload_len;
1462 ha->optrom_state = QLA_SREADING;
1463 }
1464
1465 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1466 if (!ha->optrom_buffer) {
1467 ql_log(ql_log_warn, vha, 0x7059,
1468 "Read: Unable to allocate memory for optrom retrieval "
1469 "(%x)\n", ha->optrom_region_size);
1470
1471 ha->optrom_state = QLA_SWAITING;
1472 return -ENOMEM;
1473 }
1474
1475 return 0;
1476 }
1477
1478 static int
qla2x00_read_optrom(struct bsg_job * bsg_job)1479 qla2x00_read_optrom(struct bsg_job *bsg_job)
1480 {
1481 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1482 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1483 scsi_qla_host_t *vha = shost_priv(host);
1484 struct qla_hw_data *ha = vha->hw;
1485 int rval = 0;
1486
1487 if (ha->flags.nic_core_reset_hdlr_active)
1488 return -EBUSY;
1489
1490 mutex_lock(&ha->optrom_mutex);
1491 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1492 if (rval) {
1493 mutex_unlock(&ha->optrom_mutex);
1494 return rval;
1495 }
1496
1497 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1498 ha->optrom_region_start, ha->optrom_region_size);
1499
1500 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1501 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1502 ha->optrom_region_size);
1503
1504 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1505 bsg_reply->result = DID_OK;
1506 vfree(ha->optrom_buffer);
1507 ha->optrom_buffer = NULL;
1508 ha->optrom_state = QLA_SWAITING;
1509 mutex_unlock(&ha->optrom_mutex);
1510 bsg_job_done(bsg_job, bsg_reply->result,
1511 bsg_reply->reply_payload_rcv_len);
1512 return rval;
1513 }
1514
1515 static int
qla2x00_update_optrom(struct bsg_job * bsg_job)1516 qla2x00_update_optrom(struct bsg_job *bsg_job)
1517 {
1518 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1519 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1520 scsi_qla_host_t *vha = shost_priv(host);
1521 struct qla_hw_data *ha = vha->hw;
1522 int rval = 0;
1523
1524 mutex_lock(&ha->optrom_mutex);
1525 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1526 if (rval) {
1527 mutex_unlock(&ha->optrom_mutex);
1528 return rval;
1529 }
1530
1531 /* Set the isp82xx_no_md_cap not to capture minidump */
1532 ha->flags.isp82xx_no_md_cap = 1;
1533
1534 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1535 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1536 ha->optrom_region_size);
1537
1538 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1539 ha->optrom_region_start, ha->optrom_region_size);
1540
1541 if (rval) {
1542 bsg_reply->result = -EINVAL;
1543 rval = -EINVAL;
1544 } else {
1545 bsg_reply->result = DID_OK;
1546 }
1547 vfree(ha->optrom_buffer);
1548 ha->optrom_buffer = NULL;
1549 ha->optrom_state = QLA_SWAITING;
1550 mutex_unlock(&ha->optrom_mutex);
1551 if (!rval)
1552 bsg_job_done(bsg_job, bsg_reply->result,
1553 bsg_reply->reply_payload_rcv_len);
1554 return rval;
1555 }
1556
1557 static int
qla2x00_update_fru_versions(struct bsg_job * bsg_job)1558 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1559 {
1560 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1561 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1562 scsi_qla_host_t *vha = shost_priv(host);
1563 struct qla_hw_data *ha = vha->hw;
1564 int rval = 0;
1565 uint8_t bsg[DMA_POOL_SIZE];
1566 struct qla_image_version_list *list = (void *)bsg;
1567 struct qla_image_version *image;
1568 uint32_t count;
1569 dma_addr_t sfp_dma;
1570 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1571
1572 if (!sfp) {
1573 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1574 EXT_STATUS_NO_MEMORY;
1575 goto done;
1576 }
1577
1578 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1579 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1580
1581 image = list->version;
1582 count = list->count;
1583 while (count--) {
1584 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1585 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1586 image->field_address.device, image->field_address.offset,
1587 sizeof(image->field_info), image->field_address.option);
1588 if (rval) {
1589 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1590 EXT_STATUS_MAILBOX;
1591 goto dealloc;
1592 }
1593 image++;
1594 }
1595
1596 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1597
1598 dealloc:
1599 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1600
1601 done:
1602 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1603 bsg_reply->result = DID_OK << 16;
1604 bsg_job_done(bsg_job, bsg_reply->result,
1605 bsg_reply->reply_payload_rcv_len);
1606
1607 return 0;
1608 }
1609
1610 static int
qla2x00_read_fru_status(struct bsg_job * bsg_job)1611 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1612 {
1613 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1614 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1615 scsi_qla_host_t *vha = shost_priv(host);
1616 struct qla_hw_data *ha = vha->hw;
1617 int rval = 0;
1618 uint8_t bsg[DMA_POOL_SIZE];
1619 struct qla_status_reg *sr = (void *)bsg;
1620 dma_addr_t sfp_dma;
1621 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1622
1623 if (!sfp) {
1624 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1625 EXT_STATUS_NO_MEMORY;
1626 goto done;
1627 }
1628
1629 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1630 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1631
1632 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1633 sr->field_address.device, sr->field_address.offset,
1634 sizeof(sr->status_reg), sr->field_address.option);
1635 sr->status_reg = *sfp;
1636
1637 if (rval) {
1638 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1639 EXT_STATUS_MAILBOX;
1640 goto dealloc;
1641 }
1642
1643 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1644 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1645
1646 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1647
1648 dealloc:
1649 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1650
1651 done:
1652 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1653 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1654 bsg_reply->result = DID_OK << 16;
1655 bsg_job_done(bsg_job, bsg_reply->result,
1656 bsg_reply->reply_payload_rcv_len);
1657
1658 return 0;
1659 }
1660
1661 static int
qla2x00_write_fru_status(struct bsg_job * bsg_job)1662 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1663 {
1664 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1665 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1666 scsi_qla_host_t *vha = shost_priv(host);
1667 struct qla_hw_data *ha = vha->hw;
1668 int rval = 0;
1669 uint8_t bsg[DMA_POOL_SIZE];
1670 struct qla_status_reg *sr = (void *)bsg;
1671 dma_addr_t sfp_dma;
1672 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1673
1674 if (!sfp) {
1675 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1676 EXT_STATUS_NO_MEMORY;
1677 goto done;
1678 }
1679
1680 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1681 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1682
1683 *sfp = sr->status_reg;
1684 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1685 sr->field_address.device, sr->field_address.offset,
1686 sizeof(sr->status_reg), sr->field_address.option);
1687
1688 if (rval) {
1689 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1690 EXT_STATUS_MAILBOX;
1691 goto dealloc;
1692 }
1693
1694 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1695
1696 dealloc:
1697 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1698
1699 done:
1700 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1701 bsg_reply->result = DID_OK << 16;
1702 bsg_job_done(bsg_job, bsg_reply->result,
1703 bsg_reply->reply_payload_rcv_len);
1704
1705 return 0;
1706 }
1707
1708 static int
qla2x00_write_i2c(struct bsg_job * bsg_job)1709 qla2x00_write_i2c(struct bsg_job *bsg_job)
1710 {
1711 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1712 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1713 scsi_qla_host_t *vha = shost_priv(host);
1714 struct qla_hw_data *ha = vha->hw;
1715 int rval = 0;
1716 uint8_t bsg[DMA_POOL_SIZE];
1717 struct qla_i2c_access *i2c = (void *)bsg;
1718 dma_addr_t sfp_dma;
1719 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1720
1721 if (!sfp) {
1722 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1723 EXT_STATUS_NO_MEMORY;
1724 goto done;
1725 }
1726
1727 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1728 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1729
1730 memcpy(sfp, i2c->buffer, i2c->length);
1731 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1732 i2c->device, i2c->offset, i2c->length, i2c->option);
1733
1734 if (rval) {
1735 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1736 EXT_STATUS_MAILBOX;
1737 goto dealloc;
1738 }
1739
1740 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1741
1742 dealloc:
1743 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1744
1745 done:
1746 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1747 bsg_reply->result = DID_OK << 16;
1748 bsg_job_done(bsg_job, bsg_reply->result,
1749 bsg_reply->reply_payload_rcv_len);
1750
1751 return 0;
1752 }
1753
1754 static int
qla2x00_read_i2c(struct bsg_job * bsg_job)1755 qla2x00_read_i2c(struct bsg_job *bsg_job)
1756 {
1757 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1758 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1759 scsi_qla_host_t *vha = shost_priv(host);
1760 struct qla_hw_data *ha = vha->hw;
1761 int rval = 0;
1762 uint8_t bsg[DMA_POOL_SIZE];
1763 struct qla_i2c_access *i2c = (void *)bsg;
1764 dma_addr_t sfp_dma;
1765 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1766
1767 if (!sfp) {
1768 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1769 EXT_STATUS_NO_MEMORY;
1770 goto done;
1771 }
1772
1773 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1774 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1775
1776 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1777 i2c->device, i2c->offset, i2c->length, i2c->option);
1778
1779 if (rval) {
1780 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1781 EXT_STATUS_MAILBOX;
1782 goto dealloc;
1783 }
1784
1785 memcpy(i2c->buffer, sfp, i2c->length);
1786 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1787 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1788
1789 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1790
1791 dealloc:
1792 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1793
1794 done:
1795 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1796 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1797 bsg_reply->result = DID_OK << 16;
1798 bsg_job_done(bsg_job, bsg_reply->result,
1799 bsg_reply->reply_payload_rcv_len);
1800
1801 return 0;
1802 }
1803
1804 static int
qla24xx_process_bidir_cmd(struct bsg_job * bsg_job)1805 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1806 {
1807 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1808 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1809 scsi_qla_host_t *vha = shost_priv(host);
1810 struct qla_hw_data *ha = vha->hw;
1811 uint32_t rval = EXT_STATUS_OK;
1812 uint16_t req_sg_cnt = 0;
1813 uint16_t rsp_sg_cnt = 0;
1814 uint16_t nextlid = 0;
1815 uint32_t tot_dsds;
1816 srb_t *sp = NULL;
1817 uint32_t req_data_len;
1818 uint32_t rsp_data_len;
1819
1820 /* Check the type of the adapter */
1821 if (!IS_BIDI_CAPABLE(ha)) {
1822 ql_log(ql_log_warn, vha, 0x70a0,
1823 "This adapter is not supported\n");
1824 rval = EXT_STATUS_NOT_SUPPORTED;
1825 goto done;
1826 }
1827
1828 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1829 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1830 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1831 rval = EXT_STATUS_BUSY;
1832 goto done;
1833 }
1834
1835 /* Check if host is online */
1836 if (!vha->flags.online) {
1837 ql_log(ql_log_warn, vha, 0x70a1,
1838 "Host is not online\n");
1839 rval = EXT_STATUS_DEVICE_OFFLINE;
1840 goto done;
1841 }
1842
1843 /* Check if cable is plugged in or not */
1844 if (vha->device_flags & DFLG_NO_CABLE) {
1845 ql_log(ql_log_warn, vha, 0x70a2,
1846 "Cable is unplugged...\n");
1847 rval = EXT_STATUS_INVALID_CFG;
1848 goto done;
1849 }
1850
1851 /* Check if the switch is connected or not */
1852 if (ha->current_topology != ISP_CFG_F) {
1853 ql_log(ql_log_warn, vha, 0x70a3,
1854 "Host is not connected to the switch\n");
1855 rval = EXT_STATUS_INVALID_CFG;
1856 goto done;
1857 }
1858
1859 /* Check if operating mode is P2P */
1860 if (ha->operating_mode != P2P) {
1861 ql_log(ql_log_warn, vha, 0x70a4,
1862 "Host operating mode is not P2p\n");
1863 rval = EXT_STATUS_INVALID_CFG;
1864 goto done;
1865 }
1866
1867 mutex_lock(&ha->selflogin_lock);
1868 if (vha->self_login_loop_id == 0) {
1869 /* Initialize all required fields of fcport */
1870 vha->bidir_fcport.vha = vha;
1871 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1872 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1873 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1874 vha->bidir_fcport.loop_id = vha->loop_id;
1875
1876 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1877 ql_log(ql_log_warn, vha, 0x70a7,
1878 "Failed to login port %06X for bidirectional IOCB\n",
1879 vha->bidir_fcport.d_id.b24);
1880 mutex_unlock(&ha->selflogin_lock);
1881 rval = EXT_STATUS_MAILBOX;
1882 goto done;
1883 }
1884 vha->self_login_loop_id = nextlid - 1;
1885
1886 }
1887 /* Assign the self login loop id to fcport */
1888 mutex_unlock(&ha->selflogin_lock);
1889
1890 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1891
1892 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1893 bsg_job->request_payload.sg_list,
1894 bsg_job->request_payload.sg_cnt,
1895 DMA_TO_DEVICE);
1896
1897 if (!req_sg_cnt) {
1898 rval = EXT_STATUS_NO_MEMORY;
1899 goto done;
1900 }
1901
1902 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1903 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1904 DMA_FROM_DEVICE);
1905
1906 if (!rsp_sg_cnt) {
1907 rval = EXT_STATUS_NO_MEMORY;
1908 goto done_unmap_req_sg;
1909 }
1910
1911 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1912 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1913 ql_dbg(ql_dbg_user, vha, 0x70a9,
1914 "Dma mapping resulted in different sg counts "
1915 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1916 "%x dma_reply_sg_cnt: %x]\n",
1917 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1918 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1919 rval = EXT_STATUS_NO_MEMORY;
1920 goto done_unmap_sg;
1921 }
1922
1923 req_data_len = bsg_job->request_payload.payload_len;
1924 rsp_data_len = bsg_job->reply_payload.payload_len;
1925
1926 if (req_data_len != rsp_data_len) {
1927 rval = EXT_STATUS_BUSY;
1928 ql_log(ql_log_warn, vha, 0x70aa,
1929 "req_data_len != rsp_data_len\n");
1930 goto done_unmap_sg;
1931 }
1932
1933 /* Alloc SRB structure */
1934 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1935 if (!sp) {
1936 ql_dbg(ql_dbg_user, vha, 0x70ac,
1937 "Alloc SRB structure failed\n");
1938 rval = EXT_STATUS_NO_MEMORY;
1939 goto done_unmap_sg;
1940 }
1941
1942 /*Populate srb->ctx with bidir ctx*/
1943 sp->u.bsg_job = bsg_job;
1944 sp->free = qla2x00_bsg_sp_free;
1945 sp->type = SRB_BIDI_CMD;
1946 sp->done = qla2x00_bsg_job_done;
1947
1948 /* Add the read and write sg count */
1949 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1950
1951 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1952 if (rval != EXT_STATUS_OK)
1953 goto done_free_srb;
1954 /* the bsg request will be completed in the interrupt handler */
1955 return rval;
1956
1957 done_free_srb:
1958 mempool_free(sp, ha->srb_mempool);
1959 done_unmap_sg:
1960 dma_unmap_sg(&ha->pdev->dev,
1961 bsg_job->reply_payload.sg_list,
1962 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1963 done_unmap_req_sg:
1964 dma_unmap_sg(&ha->pdev->dev,
1965 bsg_job->request_payload.sg_list,
1966 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1967 done:
1968
1969 /* Return an error vendor specific response
1970 * and complete the bsg request
1971 */
1972 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1973 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1974 bsg_reply->reply_payload_rcv_len = 0;
1975 bsg_reply->result = (DID_OK) << 16;
1976 bsg_job_done(bsg_job, bsg_reply->result,
1977 bsg_reply->reply_payload_rcv_len);
1978 /* Always return success, vendor rsp carries correct status */
1979 return 0;
1980 }
1981
1982 static int
qlafx00_mgmt_cmd(struct bsg_job * bsg_job)1983 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1984 {
1985 struct fc_bsg_request *bsg_request = bsg_job->request;
1986 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1987 scsi_qla_host_t *vha = shost_priv(host);
1988 struct qla_hw_data *ha = vha->hw;
1989 int rval = (DID_ERROR << 16);
1990 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1991 srb_t *sp;
1992 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1993 struct fc_port *fcport;
1994 char *type = "FC_BSG_HST_FX_MGMT";
1995
1996 /* Copy the IOCB specific information */
1997 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1998 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1999
2000 /* Dump the vendor information */
2001 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
2002 piocb_rqst, sizeof(*piocb_rqst));
2003
2004 if (!vha->flags.online) {
2005 ql_log(ql_log_warn, vha, 0x70d0,
2006 "Host is not online.\n");
2007 rval = -EIO;
2008 goto done;
2009 }
2010
2011 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
2012 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2013 bsg_job->request_payload.sg_list,
2014 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2015 if (!req_sg_cnt) {
2016 ql_log(ql_log_warn, vha, 0x70c7,
2017 "dma_map_sg return %d for request\n", req_sg_cnt);
2018 rval = -ENOMEM;
2019 goto done;
2020 }
2021 }
2022
2023 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2024 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2025 bsg_job->reply_payload.sg_list,
2026 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2027 if (!rsp_sg_cnt) {
2028 ql_log(ql_log_warn, vha, 0x70c8,
2029 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2030 rval = -ENOMEM;
2031 goto done_unmap_req_sg;
2032 }
2033 }
2034
2035 ql_dbg(ql_dbg_user, vha, 0x70c9,
2036 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2037 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2038 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2039
2040 /* Allocate a dummy fcport structure, since functions preparing the
2041 * IOCB and mailbox command retrieves port specific information
2042 * from fcport structure. For Host based ELS commands there will be
2043 * no fcport structure allocated
2044 */
2045 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2046 if (!fcport) {
2047 ql_log(ql_log_warn, vha, 0x70ca,
2048 "Failed to allocate fcport.\n");
2049 rval = -ENOMEM;
2050 goto done_unmap_rsp_sg;
2051 }
2052
2053 /* Alloc SRB structure */
2054 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2055 if (!sp) {
2056 ql_log(ql_log_warn, vha, 0x70cb,
2057 "qla2x00_get_sp failed.\n");
2058 rval = -ENOMEM;
2059 goto done_free_fcport;
2060 }
2061
2062 /* Initialize all required fields of fcport */
2063 fcport->vha = vha;
2064 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2065
2066 sp->type = SRB_FXIOCB_BCMD;
2067 sp->name = "bsg_fx_mgmt";
2068 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2069 sp->u.bsg_job = bsg_job;
2070 sp->free = qla2x00_bsg_sp_free;
2071 sp->done = qla2x00_bsg_job_done;
2072
2073 ql_dbg(ql_dbg_user, vha, 0x70cc,
2074 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2075 type, piocb_rqst->func_type, fcport->loop_id);
2076
2077 rval = qla2x00_start_sp(sp);
2078 if (rval != QLA_SUCCESS) {
2079 ql_log(ql_log_warn, vha, 0x70cd,
2080 "qla2x00_start_sp failed=%d.\n", rval);
2081 mempool_free(sp, ha->srb_mempool);
2082 rval = -EIO;
2083 goto done_free_fcport;
2084 }
2085 return rval;
2086
2087 done_free_fcport:
2088 qla2x00_free_fcport(fcport);
2089
2090 done_unmap_rsp_sg:
2091 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2092 dma_unmap_sg(&ha->pdev->dev,
2093 bsg_job->reply_payload.sg_list,
2094 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2095 done_unmap_req_sg:
2096 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2097 dma_unmap_sg(&ha->pdev->dev,
2098 bsg_job->request_payload.sg_list,
2099 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2100
2101 done:
2102 return rval;
2103 }
2104
2105 static int
qla26xx_serdes_op(struct bsg_job * bsg_job)2106 qla26xx_serdes_op(struct bsg_job *bsg_job)
2107 {
2108 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2109 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2110 scsi_qla_host_t *vha = shost_priv(host);
2111 int rval = 0;
2112 struct qla_serdes_reg sr;
2113
2114 memset(&sr, 0, sizeof(sr));
2115
2116 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2117 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2118
2119 switch (sr.cmd) {
2120 case INT_SC_SERDES_WRITE_REG:
2121 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2122 bsg_reply->reply_payload_rcv_len = 0;
2123 break;
2124 case INT_SC_SERDES_READ_REG:
2125 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2126 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2127 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2128 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2129 break;
2130 default:
2131 ql_dbg(ql_dbg_user, vha, 0x708c,
2132 "Unknown serdes cmd %x.\n", sr.cmd);
2133 rval = -EINVAL;
2134 break;
2135 }
2136
2137 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2138 rval ? EXT_STATUS_MAILBOX : 0;
2139
2140 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2141 bsg_reply->result = DID_OK << 16;
2142 bsg_job_done(bsg_job, bsg_reply->result,
2143 bsg_reply->reply_payload_rcv_len);
2144 return 0;
2145 }
2146
2147 static int
qla8044_serdes_op(struct bsg_job * bsg_job)2148 qla8044_serdes_op(struct bsg_job *bsg_job)
2149 {
2150 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2151 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2152 scsi_qla_host_t *vha = shost_priv(host);
2153 int rval = 0;
2154 struct qla_serdes_reg_ex sr;
2155
2156 memset(&sr, 0, sizeof(sr));
2157
2158 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2159 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2160
2161 switch (sr.cmd) {
2162 case INT_SC_SERDES_WRITE_REG:
2163 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2164 bsg_reply->reply_payload_rcv_len = 0;
2165 break;
2166 case INT_SC_SERDES_READ_REG:
2167 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2168 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2169 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2170 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2171 break;
2172 default:
2173 ql_dbg(ql_dbg_user, vha, 0x7020,
2174 "Unknown serdes cmd %x.\n", sr.cmd);
2175 rval = -EINVAL;
2176 break;
2177 }
2178
2179 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2180 rval ? EXT_STATUS_MAILBOX : 0;
2181
2182 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2183 bsg_reply->result = DID_OK << 16;
2184 bsg_job_done(bsg_job, bsg_reply->result,
2185 bsg_reply->reply_payload_rcv_len);
2186 return 0;
2187 }
2188
2189 static int
qla27xx_get_flash_upd_cap(struct bsg_job * bsg_job)2190 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2191 {
2192 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2193 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2194 scsi_qla_host_t *vha = shost_priv(host);
2195 struct qla_hw_data *ha = vha->hw;
2196 struct qla_flash_update_caps cap;
2197
2198 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2199 return -EPERM;
2200
2201 memset(&cap, 0, sizeof(cap));
2202 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2203 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2204 (uint64_t)ha->fw_attributes_h << 16 |
2205 (uint64_t)ha->fw_attributes;
2206
2207 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2208 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2209 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2210
2211 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2212 EXT_STATUS_OK;
2213
2214 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2215 bsg_reply->result = DID_OK << 16;
2216 bsg_job_done(bsg_job, bsg_reply->result,
2217 bsg_reply->reply_payload_rcv_len);
2218 return 0;
2219 }
2220
2221 static int
qla27xx_set_flash_upd_cap(struct bsg_job * bsg_job)2222 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2223 {
2224 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2225 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2226 scsi_qla_host_t *vha = shost_priv(host);
2227 struct qla_hw_data *ha = vha->hw;
2228 uint64_t online_fw_attr = 0;
2229 struct qla_flash_update_caps cap;
2230
2231 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2232 return -EPERM;
2233
2234 memset(&cap, 0, sizeof(cap));
2235 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2236 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2237
2238 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2239 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2240 (uint64_t)ha->fw_attributes_h << 16 |
2241 (uint64_t)ha->fw_attributes;
2242
2243 if (online_fw_attr != cap.capabilities) {
2244 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2245 EXT_STATUS_INVALID_PARAM;
2246 return -EINVAL;
2247 }
2248
2249 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2250 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2251 EXT_STATUS_INVALID_PARAM;
2252 return -EINVAL;
2253 }
2254
2255 bsg_reply->reply_payload_rcv_len = 0;
2256
2257 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2258 EXT_STATUS_OK;
2259
2260 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2261 bsg_reply->result = DID_OK << 16;
2262 bsg_job_done(bsg_job, bsg_reply->result,
2263 bsg_reply->reply_payload_rcv_len);
2264 return 0;
2265 }
2266
2267 static int
qla27xx_get_bbcr_data(struct bsg_job * bsg_job)2268 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2269 {
2270 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2271 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2272 scsi_qla_host_t *vha = shost_priv(host);
2273 struct qla_hw_data *ha = vha->hw;
2274 struct qla_bbcr_data bbcr;
2275 uint16_t loop_id, topo, sw_cap;
2276 uint8_t domain, area, al_pa, state;
2277 int rval;
2278
2279 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2280 return -EPERM;
2281
2282 memset(&bbcr, 0, sizeof(bbcr));
2283
2284 if (vha->flags.bbcr_enable)
2285 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2286 else
2287 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2288
2289 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2290 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2291 &area, &domain, &topo, &sw_cap);
2292 if (rval != QLA_SUCCESS) {
2293 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2294 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2295 bbcr.mbx1 = loop_id;
2296 goto done;
2297 }
2298
2299 state = (vha->bbcr >> 12) & 0x1;
2300
2301 if (state) {
2302 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2303 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2304 } else {
2305 bbcr.state = QLA_BBCR_STATE_ONLINE;
2306 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2307 }
2308
2309 bbcr.configured_bbscn = vha->bbcr & 0xf;
2310 }
2311
2312 done:
2313 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2314 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2315 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2316
2317 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2318
2319 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2320 bsg_reply->result = DID_OK << 16;
2321 bsg_job_done(bsg_job, bsg_reply->result,
2322 bsg_reply->reply_payload_rcv_len);
2323 return 0;
2324 }
2325
2326 static int
qla2x00_get_priv_stats(struct bsg_job * bsg_job)2327 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2328 {
2329 struct fc_bsg_request *bsg_request = bsg_job->request;
2330 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2331 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2332 scsi_qla_host_t *vha = shost_priv(host);
2333 struct qla_hw_data *ha = vha->hw;
2334 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2335 struct link_statistics *stats = NULL;
2336 dma_addr_t stats_dma;
2337 int rval;
2338 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2339 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2340
2341 if (test_bit(UNLOADING, &vha->dpc_flags))
2342 return -ENODEV;
2343
2344 if (unlikely(pci_channel_offline(ha->pdev)))
2345 return -ENODEV;
2346
2347 if (qla2x00_reset_active(vha))
2348 return -EBUSY;
2349
2350 if (!IS_FWI2_CAPABLE(ha))
2351 return -EPERM;
2352
2353 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2354 GFP_KERNEL);
2355 if (!stats) {
2356 ql_log(ql_log_warn, vha, 0x70e2,
2357 "Failed to allocate memory for stats.\n");
2358 return -ENOMEM;
2359 }
2360
2361 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2362
2363 if (rval == QLA_SUCCESS) {
2364 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2365 stats, sizeof(*stats));
2366 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2367 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2368 }
2369
2370 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2371 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2372 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2373
2374 bsg_job->reply_len = sizeof(*bsg_reply);
2375 bsg_reply->result = DID_OK << 16;
2376 bsg_job_done(bsg_job, bsg_reply->result,
2377 bsg_reply->reply_payload_rcv_len);
2378
2379 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2380 stats, stats_dma);
2381
2382 return 0;
2383 }
2384
2385 static int
qla2x00_do_dport_diagnostics(struct bsg_job * bsg_job)2386 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2387 {
2388 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2389 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2390 scsi_qla_host_t *vha = shost_priv(host);
2391 int rval;
2392 struct qla_dport_diag *dd;
2393
2394 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2395 !IS_QLA28XX(vha->hw))
2396 return -EPERM;
2397
2398 dd = kmalloc_obj(*dd);
2399 if (!dd) {
2400 ql_log(ql_log_warn, vha, 0x70db,
2401 "Failed to allocate memory for dport.\n");
2402 return -ENOMEM;
2403 }
2404
2405 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2406 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2407
2408 rval = qla26xx_dport_diagnostics(
2409 vha, dd->buf, sizeof(dd->buf), dd->options);
2410 if (rval == QLA_SUCCESS) {
2411 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2412 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2413 }
2414
2415 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2416 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2417 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2418
2419 bsg_job->reply_len = sizeof(*bsg_reply);
2420 bsg_reply->result = DID_OK << 16;
2421 bsg_job_done(bsg_job, bsg_reply->result,
2422 bsg_reply->reply_payload_rcv_len);
2423
2424 kfree(dd);
2425
2426 return 0;
2427 }
2428
2429 static int
qla2x00_do_dport_diagnostics_v2(struct bsg_job * bsg_job)2430 qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
2431 {
2432 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2433 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2434 scsi_qla_host_t *vha = shost_priv(host);
2435 int rval;
2436 struct qla_dport_diag_v2 *dd;
2437 mbx_cmd_t mc;
2438 mbx_cmd_t *mcp = &mc;
2439 uint16_t options;
2440
2441 if (!IS_DPORT_CAPABLE(vha->hw))
2442 return -EPERM;
2443
2444 dd = kzalloc_obj(*dd);
2445 if (!dd)
2446 return -ENOMEM;
2447
2448 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2449 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2450
2451 options = dd->options;
2452
2453 /* Check dport Test in progress */
2454 if (options == QLA_GET_DPORT_RESULT_V2 &&
2455 vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
2456 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2457 EXT_STATUS_DPORT_DIAG_IN_PROCESS;
2458 goto dportcomplete;
2459 }
2460
2461 /* Check chip reset in progress and start/restart requests arrive */
2462 if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
2463 (options == QLA_START_DPORT_TEST_V2 ||
2464 options == QLA_RESTART_DPORT_TEST_V2)) {
2465 vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
2466 }
2467
2468 /* Check chip reset in progress and get result request arrive */
2469 if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
2470 options == QLA_GET_DPORT_RESULT_V2) {
2471 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2472 EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
2473 goto dportcomplete;
2474 }
2475
2476 rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
2477
2478 if (rval == QLA_SUCCESS) {
2479 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2480 EXT_STATUS_OK;
2481 if (options == QLA_START_DPORT_TEST_V2 ||
2482 options == QLA_RESTART_DPORT_TEST_V2) {
2483 dd->mbx1 = mcp->mb[0];
2484 dd->mbx2 = mcp->mb[1];
2485 vha->dport_status |= DPORT_DIAG_IN_PROGRESS;
2486 } else if (options == QLA_GET_DPORT_RESULT_V2) {
2487 dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
2488 dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
2489 }
2490 } else {
2491 dd->mbx1 = mcp->mb[0];
2492 dd->mbx2 = mcp->mb[1];
2493 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2494 EXT_STATUS_DPORT_DIAG_ERR;
2495 }
2496
2497 dportcomplete:
2498 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2499 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2500
2501 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2502 bsg_job->reply_len = sizeof(*bsg_reply);
2503 bsg_reply->result = DID_OK << 16;
2504 bsg_job_done(bsg_job, bsg_reply->result,
2505 bsg_reply->reply_payload_rcv_len);
2506
2507 kfree(dd);
2508
2509 return 0;
2510 }
2511
2512 static int
qla2x00_get_flash_image_status(struct bsg_job * bsg_job)2513 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2514 {
2515 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2516 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2517 struct qla_hw_data *ha = vha->hw;
2518 struct qla_active_regions regions = { };
2519 struct active_regions active_regions = { };
2520
2521 qla27xx_get_active_image(vha, &active_regions);
2522 regions.global_image = active_regions.global;
2523
2524 if (IS_QLA27XX(ha))
2525 regions.nvme_params = QLA27XX_PRIMARY_IMAGE;
2526
2527 if (IS_QLA28XX(ha)) {
2528 qla28xx_get_aux_images(vha, &active_regions);
2529 regions.board_config = active_regions.aux.board_config;
2530 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2531 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2532 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2533 regions.nvme_params = active_regions.aux.nvme_params;
2534 }
2535
2536 ql_dbg(ql_dbg_user, vha, 0x70e1,
2537 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n",
2538 __func__, vha->host_no, regions.global_image,
2539 regions.board_config, regions.vpd_nvram,
2540 regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params);
2541
2542 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2543 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2544
2545 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2546 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2547 bsg_reply->result = DID_OK << 16;
2548 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2549 bsg_job_done(bsg_job, bsg_reply->result,
2550 bsg_reply->reply_payload_rcv_len);
2551
2552 return 0;
2553 }
2554
2555 static int
qla2x00_get_drv_attr(struct bsg_job * bsg_job)2556 qla2x00_get_drv_attr(struct bsg_job *bsg_job)
2557 {
2558 struct qla_drv_attr drv_attr;
2559 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2560
2561 memset(&drv_attr, 0, sizeof(struct qla_drv_attr));
2562 drv_attr.ext_attributes |= QLA_IMG_SET_VALID_SUPPORT;
2563
2564
2565 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2566 bsg_job->reply_payload.sg_cnt, &drv_attr,
2567 sizeof(struct qla_drv_attr));
2568
2569 bsg_reply->reply_payload_rcv_len = sizeof(struct qla_drv_attr);
2570 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2571
2572 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2573 bsg_reply->result = DID_OK << 16;
2574 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
2575
2576 return 0;
2577 }
2578
2579 static int
qla2x00_manage_host_stats(struct bsg_job * bsg_job)2580 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2581 {
2582 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2583 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2584 struct ql_vnd_mng_host_stats_param *req_data;
2585 struct ql_vnd_mng_host_stats_resp rsp_data;
2586 u32 req_data_len;
2587 int ret = 0;
2588
2589 if (!vha->flags.online) {
2590 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2591 return -EIO;
2592 }
2593
2594 req_data_len = bsg_job->request_payload.payload_len;
2595
2596 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2597 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2598 return -EIO;
2599 }
2600
2601 req_data = kzalloc_obj(*req_data);
2602 if (!req_data) {
2603 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2604 return -ENOMEM;
2605 }
2606
2607 /* Copy the request buffer in req_data */
2608 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2609 bsg_job->request_payload.sg_cnt, req_data,
2610 req_data_len);
2611
2612 switch (req_data->action) {
2613 case QLA_STOP:
2614 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2615 break;
2616 case QLA_START:
2617 ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2618 break;
2619 case QLA_CLEAR:
2620 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2621 break;
2622 default:
2623 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2624 ret = -EIO;
2625 break;
2626 }
2627
2628 kfree(req_data);
2629
2630 /* Prepare response */
2631 rsp_data.status = ret;
2632 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2633
2634 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2635 bsg_reply->reply_payload_rcv_len =
2636 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2637 bsg_job->reply_payload.sg_cnt,
2638 &rsp_data,
2639 sizeof(struct ql_vnd_mng_host_stats_resp));
2640
2641 bsg_reply->result = DID_OK;
2642 if (!ret)
2643 bsg_job_done(bsg_job, bsg_reply->result,
2644 bsg_reply->reply_payload_rcv_len);
2645
2646 return ret;
2647 }
2648
2649 static int
qla2x00_get_host_stats(struct bsg_job * bsg_job)2650 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2651 {
2652 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2653 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2654 struct ql_vnd_stats_param *req_data;
2655 struct ql_vnd_host_stats_resp rsp_data;
2656 u32 req_data_len;
2657 int ret = 0;
2658 u64 ini_entry_count = 0;
2659 u64 entry_count = 0;
2660 u64 tgt_num = 0;
2661 u64 tmp_stat_type = 0;
2662 u64 response_len = 0;
2663 void *data;
2664
2665 req_data_len = bsg_job->request_payload.payload_len;
2666
2667 if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2668 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2669 return -EIO;
2670 }
2671
2672 req_data = kzalloc_obj(*req_data);
2673 if (!req_data) {
2674 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2675 return -ENOMEM;
2676 }
2677
2678 /* Copy the request buffer in req_data */
2679 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2680 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2681
2682 /* Copy stat type to work on it */
2683 tmp_stat_type = req_data->stat_type;
2684
2685 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2686 /* Num of tgts connected to this host */
2687 tgt_num = qla2x00_get_num_tgts(vha);
2688 /* unset BIT_17 */
2689 tmp_stat_type &= ~(1 << 17);
2690 }
2691
2692 /* Total ini stats */
2693 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2694
2695 /* Total number of entries */
2696 entry_count = ini_entry_count + tgt_num;
2697
2698 response_len = sizeof(struct ql_vnd_host_stats_resp) +
2699 (sizeof(struct ql_vnd_stat_entry) * entry_count);
2700
2701 if (response_len > bsg_job->reply_payload.payload_len) {
2702 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2703 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2704 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2705
2706 bsg_reply->reply_payload_rcv_len =
2707 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2708 bsg_job->reply_payload.sg_cnt, &rsp_data,
2709 sizeof(struct ql_vnd_mng_host_stats_resp));
2710
2711 bsg_reply->result = DID_OK;
2712 bsg_job_done(bsg_job, bsg_reply->result,
2713 bsg_reply->reply_payload_rcv_len);
2714 goto host_stat_out;
2715 }
2716
2717 data = kzalloc(response_len, GFP_KERNEL);
2718 if (!data) {
2719 ret = -ENOMEM;
2720 goto host_stat_out;
2721 }
2722
2723 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2724 data, response_len);
2725
2726 rsp_data.status = EXT_STATUS_OK;
2727 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2728
2729 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2730 bsg_job->reply_payload.sg_cnt,
2731 data, response_len);
2732 bsg_reply->result = DID_OK;
2733 if (!ret)
2734 bsg_job_done(bsg_job, bsg_reply->result,
2735 bsg_reply->reply_payload_rcv_len);
2736
2737 kfree(data);
2738 host_stat_out:
2739 kfree(req_data);
2740 return ret;
2741 }
2742
2743 static struct fc_rport *
qla2xxx_find_rport(scsi_qla_host_t * vha,uint32_t tgt_num)2744 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2745 {
2746 fc_port_t *fcport = NULL;
2747
2748 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2749 if (fcport->rport->number == tgt_num)
2750 return fcport->rport;
2751 }
2752 return NULL;
2753 }
2754
2755 static int
qla2x00_get_tgt_stats(struct bsg_job * bsg_job)2756 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2757 {
2758 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2759 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2760 struct ql_vnd_tgt_stats_param *req_data;
2761 u32 req_data_len;
2762 int ret = 0;
2763 u64 response_len = 0;
2764 struct ql_vnd_tgt_stats_resp *data = NULL;
2765 struct fc_rport *rport = NULL;
2766
2767 if (!vha->flags.online) {
2768 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2769 return -EIO;
2770 }
2771
2772 req_data_len = bsg_job->request_payload.payload_len;
2773
2774 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2775 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2776 return -EIO;
2777 }
2778
2779 req_data = kzalloc_obj(*req_data);
2780 if (!req_data) {
2781 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2782 return -ENOMEM;
2783 }
2784
2785 /* Copy the request buffer in req_data */
2786 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2787 bsg_job->request_payload.sg_cnt,
2788 req_data, req_data_len);
2789
2790 response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2791 sizeof(struct ql_vnd_stat_entry);
2792
2793 /* structure + size for one entry */
2794 data = kzalloc(response_len, GFP_KERNEL);
2795 if (!data) {
2796 kfree(req_data);
2797 return -ENOMEM;
2798 }
2799
2800 if (response_len > bsg_job->reply_payload.payload_len) {
2801 data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2802 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2803 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2804
2805 bsg_reply->reply_payload_rcv_len =
2806 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2807 bsg_job->reply_payload.sg_cnt, data,
2808 sizeof(struct ql_vnd_tgt_stats_resp));
2809
2810 bsg_reply->result = DID_OK;
2811 bsg_job_done(bsg_job, bsg_reply->result,
2812 bsg_reply->reply_payload_rcv_len);
2813 goto tgt_stat_out;
2814 }
2815
2816 rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2817 if (!rport) {
2818 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2819 ret = EXT_STATUS_INVALID_PARAM;
2820 data->status = EXT_STATUS_INVALID_PARAM;
2821 goto reply;
2822 }
2823
2824 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2825 rport, (void *)data, response_len);
2826
2827 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2828 reply:
2829 bsg_reply->reply_payload_rcv_len =
2830 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2831 bsg_job->reply_payload.sg_cnt, data,
2832 response_len);
2833 bsg_reply->result = DID_OK;
2834 if (!ret)
2835 bsg_job_done(bsg_job, bsg_reply->result,
2836 bsg_reply->reply_payload_rcv_len);
2837
2838 tgt_stat_out:
2839 kfree(data);
2840 kfree(req_data);
2841
2842 return ret;
2843 }
2844
2845 static int
qla2x00_manage_host_port(struct bsg_job * bsg_job)2846 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2847 {
2848 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2849 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2850 struct ql_vnd_mng_host_port_param *req_data;
2851 struct ql_vnd_mng_host_port_resp rsp_data;
2852 u32 req_data_len;
2853 int ret = 0;
2854
2855 req_data_len = bsg_job->request_payload.payload_len;
2856
2857 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2858 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2859 return -EIO;
2860 }
2861
2862 req_data = kzalloc_obj(*req_data);
2863 if (!req_data) {
2864 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2865 return -ENOMEM;
2866 }
2867
2868 /* Copy the request buffer in req_data */
2869 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2870 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2871
2872 switch (req_data->action) {
2873 case QLA_ENABLE:
2874 ret = qla2xxx_enable_port(vha->host);
2875 break;
2876 case QLA_DISABLE:
2877 ret = qla2xxx_disable_port(vha->host);
2878 break;
2879 default:
2880 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2881 ret = -EIO;
2882 break;
2883 }
2884
2885 kfree(req_data);
2886
2887 /* Prepare response */
2888 rsp_data.status = ret;
2889 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2890 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2891
2892 bsg_reply->reply_payload_rcv_len =
2893 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2894 bsg_job->reply_payload.sg_cnt, &rsp_data,
2895 sizeof(struct ql_vnd_mng_host_port_resp));
2896 bsg_reply->result = DID_OK;
2897 if (!ret)
2898 bsg_job_done(bsg_job, bsg_reply->result,
2899 bsg_reply->reply_payload_rcv_len);
2900
2901 return ret;
2902 }
2903
2904 static int
qla2x00_process_vendor_specific(struct scsi_qla_host * vha,struct bsg_job * bsg_job)2905 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
2906 {
2907 struct fc_bsg_request *bsg_request = bsg_job->request;
2908
2909 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
2910 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
2911
2912 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2913 case QL_VND_LOOPBACK:
2914 return qla2x00_process_loopback(bsg_job);
2915
2916 case QL_VND_A84_RESET:
2917 return qla84xx_reset(bsg_job);
2918
2919 case QL_VND_A84_UPDATE_FW:
2920 return qla84xx_updatefw(bsg_job);
2921
2922 case QL_VND_A84_MGMT_CMD:
2923 return qla84xx_mgmt_cmd(bsg_job);
2924
2925 case QL_VND_IIDMA:
2926 return qla24xx_iidma(bsg_job);
2927
2928 case QL_VND_FCP_PRIO_CFG_CMD:
2929 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2930
2931 case QL_VND_READ_FLASH:
2932 return qla2x00_read_optrom(bsg_job);
2933
2934 case QL_VND_UPDATE_FLASH:
2935 return qla2x00_update_optrom(bsg_job);
2936
2937 case QL_VND_SET_FRU_VERSION:
2938 return qla2x00_update_fru_versions(bsg_job);
2939
2940 case QL_VND_READ_FRU_STATUS:
2941 return qla2x00_read_fru_status(bsg_job);
2942
2943 case QL_VND_WRITE_FRU_STATUS:
2944 return qla2x00_write_fru_status(bsg_job);
2945
2946 case QL_VND_WRITE_I2C:
2947 return qla2x00_write_i2c(bsg_job);
2948
2949 case QL_VND_READ_I2C:
2950 return qla2x00_read_i2c(bsg_job);
2951
2952 case QL_VND_DIAG_IO_CMD:
2953 return qla24xx_process_bidir_cmd(bsg_job);
2954
2955 case QL_VND_FX00_MGMT_CMD:
2956 return qlafx00_mgmt_cmd(bsg_job);
2957
2958 case QL_VND_SERDES_OP:
2959 return qla26xx_serdes_op(bsg_job);
2960
2961 case QL_VND_SERDES_OP_EX:
2962 return qla8044_serdes_op(bsg_job);
2963
2964 case QL_VND_GET_FLASH_UPDATE_CAPS:
2965 return qla27xx_get_flash_upd_cap(bsg_job);
2966
2967 case QL_VND_GET_DRV_ATTR:
2968 return qla2x00_get_drv_attr(bsg_job);
2969
2970 case QL_VND_IMG_SET_VALID:
2971 return qla28xx_validate_flash_image(bsg_job);
2972
2973 case QL_VND_SET_FLASH_UPDATE_CAPS:
2974 return qla27xx_set_flash_upd_cap(bsg_job);
2975
2976 case QL_VND_GET_BBCR_DATA:
2977 return qla27xx_get_bbcr_data(bsg_job);
2978
2979 case QL_VND_GET_PRIV_STATS:
2980 case QL_VND_GET_PRIV_STATS_EX:
2981 return qla2x00_get_priv_stats(bsg_job);
2982
2983 case QL_VND_DPORT_DIAGNOSTICS:
2984 return qla2x00_do_dport_diagnostics(bsg_job);
2985
2986 case QL_VND_DPORT_DIAGNOSTICS_V2:
2987 return qla2x00_do_dport_diagnostics_v2(bsg_job);
2988
2989 case QL_VND_EDIF_MGMT:
2990 return qla_edif_app_mgmt(bsg_job);
2991
2992 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2993 return qla2x00_get_flash_image_status(bsg_job);
2994
2995 case QL_VND_MANAGE_HOST_STATS:
2996 return qla2x00_manage_host_stats(bsg_job);
2997
2998 case QL_VND_GET_HOST_STATS:
2999 return qla2x00_get_host_stats(bsg_job);
3000
3001 case QL_VND_GET_TGT_STATS:
3002 return qla2x00_get_tgt_stats(bsg_job);
3003
3004 case QL_VND_MANAGE_HOST_PORT:
3005 return qla2x00_manage_host_port(bsg_job);
3006
3007 case QL_VND_MBX_PASSTHRU:
3008 return qla2x00_mailbox_passthru(bsg_job);
3009
3010 default:
3011 return -ENOSYS;
3012 }
3013 }
3014
3015 int
qla24xx_bsg_request(struct bsg_job * bsg_job)3016 qla24xx_bsg_request(struct bsg_job *bsg_job)
3017 {
3018 struct fc_bsg_request *bsg_request = bsg_job->request;
3019 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3020 int ret = -EINVAL;
3021 struct fc_rport *rport;
3022 struct Scsi_Host *host;
3023 scsi_qla_host_t *vha;
3024
3025 /* In case no data transferred. */
3026 bsg_reply->reply_payload_rcv_len = 0;
3027
3028 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
3029 rport = fc_bsg_to_rport(bsg_job);
3030 if (!rport)
3031 return ret;
3032 host = rport_to_shost(rport);
3033 vha = shost_priv(host);
3034 } else {
3035 host = fc_bsg_to_shost(bsg_job);
3036 vha = shost_priv(host);
3037 }
3038
3039 /* Disable port will bring down the chip, allow enable command */
3040 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
3041 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
3042 goto skip_chip_chk;
3043
3044 if (vha->hw->flags.port_isolated) {
3045 bsg_reply->result = DID_ERROR;
3046 /* operation not permitted */
3047 return -EPERM;
3048 }
3049
3050 if (qla2x00_chip_is_down(vha)) {
3051 ql_dbg(ql_dbg_user, vha, 0x709f,
3052 "BSG: ISP abort active/needed -- cmd=%d.\n",
3053 bsg_request->msgcode);
3054 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3055 return -EBUSY;
3056 }
3057
3058 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
3059 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3060 return -EIO;
3061 }
3062
3063 skip_chip_chk:
3064 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
3065 "Entered %s msgcode=0x%x. bsg ptr %px\n",
3066 __func__, bsg_request->msgcode, bsg_job);
3067
3068 switch (bsg_request->msgcode) {
3069 case FC_BSG_RPT_ELS:
3070 case FC_BSG_HST_ELS_NOLOGIN:
3071 ret = qla2x00_process_els(bsg_job);
3072 break;
3073 case FC_BSG_HST_CT:
3074 ret = qla2x00_process_ct(bsg_job);
3075 break;
3076 case FC_BSG_HST_VENDOR:
3077 ret = qla2x00_process_vendor_specific(vha, bsg_job);
3078 break;
3079 case FC_BSG_HST_ADD_RPORT:
3080 case FC_BSG_HST_DEL_RPORT:
3081 case FC_BSG_RPT_CT:
3082 default:
3083 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
3084 break;
3085 }
3086
3087 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
3088 "%s done with return %x\n", __func__, ret);
3089
3090 return ret;
3091 }
3092
qla_bsg_found(struct qla_qpair * qpair,struct bsg_job * bsg_job)3093 static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
3094 {
3095 bool found, do_bsg_done;
3096 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3097 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3098 struct qla_hw_data *ha = vha->hw;
3099 srb_t *sp = NULL;
3100 int cnt;
3101 unsigned long flags;
3102 struct req_que *req;
3103 int rval;
3104 DECLARE_COMPLETION_ONSTACK(comp);
3105 uint32_t ratov_j;
3106
3107 found = do_bsg_done = false;
3108
3109 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3110 req = qpair->req;
3111
3112 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
3113 sp = req->outstanding_cmds[cnt];
3114 if (sp &&
3115 (sp->type == SRB_CT_CMD ||
3116 sp->type == SRB_ELS_CMD_HST ||
3117 sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
3118 sp->u.bsg_job == bsg_job) {
3119
3120 found = true;
3121 sp->comp = ∁
3122 break;
3123 }
3124 }
3125 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3126
3127 if (!found)
3128 return false;
3129
3130 if (ha->flags.eeh_busy) {
3131 /* skip over abort. EEH handling will return the bsg. Wait for it */
3132 rval = QLA_SUCCESS;
3133 ql_dbg(ql_dbg_user, vha, 0x802c,
3134 "eeh encounter. bsg %p sp=%p handle=%x \n",
3135 bsg_job, sp, sp->handle);
3136 } else {
3137 rval = ha->isp_ops->abort_command(sp);
3138 ql_dbg(ql_dbg_user, vha, 0x802c,
3139 "Aborting bsg %p sp=%p handle=%x rval=%x\n",
3140 bsg_job, sp, sp->handle, rval);
3141 }
3142
3143 switch (rval) {
3144 case QLA_SUCCESS:
3145 /* Wait for the command completion. */
3146 ratov_j = ha->r_a_tov / 10 * 4;
3147 ratov_j = secs_to_jiffies(ratov_j);
3148
3149 if (!wait_for_completion_timeout(&comp, ratov_j)) {
3150 ql_log(ql_log_info, vha, 0x7089,
3151 "bsg abort timeout. bsg=%p sp=%p handle %#x .\n",
3152 bsg_job, sp, sp->handle);
3153
3154 do_bsg_done = true;
3155 } else {
3156 /* fw had returned the bsg */
3157 ql_dbg(ql_dbg_user, vha, 0x708a,
3158 "bsg abort success. bsg %p sp=%p handle=%#x\n",
3159 bsg_job, sp, sp->handle);
3160 do_bsg_done = false;
3161 }
3162 break;
3163 default:
3164 ql_log(ql_log_info, vha, 0x704f,
3165 "bsg abort fail. bsg=%p sp=%p rval=%x.\n",
3166 bsg_job, sp, rval);
3167
3168 do_bsg_done = true;
3169 break;
3170 }
3171
3172 if (!do_bsg_done)
3173 return true;
3174
3175 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3176 /*
3177 * recheck to make sure it's still the same bsg_job due to
3178 * qp_lock_ptr was released earlier.
3179 */
3180 if (req->outstanding_cmds[cnt] &&
3181 req->outstanding_cmds[cnt]->u.bsg_job != bsg_job) {
3182 /* fw had returned the bsg */
3183 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3184 return true;
3185 }
3186 req->outstanding_cmds[cnt] = NULL;
3187 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3188
3189 /* ref: INIT */
3190 sp->comp = NULL;
3191 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3192 bsg_reply->result = -ENXIO;
3193 bsg_reply->reply_payload_rcv_len = 0;
3194
3195 ql_dbg(ql_dbg_user, vha, 0x7051,
3196 "%s bsg_job_done : bsg %p result %#x sp %p.\n",
3197 __func__, bsg_job, bsg_reply->result, sp);
3198
3199 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
3200
3201 return true;
3202 }
3203
3204 int
qla24xx_bsg_timeout(struct bsg_job * bsg_job)3205 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
3206 {
3207 struct fc_bsg_request *bsg_request = bsg_job->request;
3208 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3209 struct qla_hw_data *ha = vha->hw;
3210 int i;
3211 struct qla_qpair *qpair;
3212
3213 ql_log(ql_log_info, vha, 0x708b,
3214 "%s CMD timeout. bsg ptr %p msgcode %x vendor cmd %x\n",
3215 __func__, bsg_job, bsg_request->msgcode,
3216 bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
3217
3218 if (qla2x00_isp_reg_stat(ha)) {
3219 ql_log(ql_log_info, vha, 0x9007,
3220 "PCI/Register disconnect.\n");
3221 qla_pci_set_eeh_busy(vha);
3222 }
3223
3224 if (qla_bsg_found(ha->base_qpair, bsg_job))
3225 goto done;
3226
3227 /* find the bsg job from the active list of commands */
3228 for (i = 0; i < ha->max_qpairs; i++) {
3229 qpair = vha->hw->queue_pair_map[i];
3230 if (!qpair)
3231 continue;
3232 if (qla_bsg_found(qpair, bsg_job))
3233 goto done;
3234 }
3235
3236 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
3237
3238 done:
3239 return 0;
3240 }
3241
qla2x00_mailbox_passthru(struct bsg_job * bsg_job)3242 int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
3243 {
3244 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3245 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3246 int ret = -EINVAL;
3247 int ptsize = sizeof(struct qla_mbx_passthru);
3248 struct qla_mbx_passthru *req_data = NULL;
3249 uint32_t req_data_len;
3250
3251 req_data_len = bsg_job->request_payload.payload_len;
3252 if (req_data_len != ptsize) {
3253 ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
3254 return -EIO;
3255 }
3256 req_data = kzalloc(ptsize, GFP_KERNEL);
3257 if (!req_data) {
3258 ql_log(ql_log_warn, vha, 0xf0a4,
3259 "req_data memory allocation failure.\n");
3260 return -ENOMEM;
3261 }
3262
3263 /* Copy the request buffer in req_data */
3264 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3265 bsg_job->request_payload.sg_cnt, req_data, ptsize);
3266 ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
3267
3268 /* Copy the req_data in request buffer */
3269 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
3270 bsg_job->reply_payload.sg_cnt, req_data, ptsize);
3271
3272 bsg_reply->reply_payload_rcv_len = ptsize;
3273 if (ret == QLA_SUCCESS)
3274 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
3275 else
3276 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
3277
3278 bsg_job->reply_len = sizeof(*bsg_job->reply);
3279 bsg_reply->result = DID_OK << 16;
3280 if (!ret)
3281 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
3282
3283 kfree(req_data);
3284
3285 return ret;
3286 }
3287
3288 static int
qla28xx_do_validate_flash_image(struct bsg_job * bsg_job,uint16_t * state)3289 qla28xx_do_validate_flash_image(struct bsg_job *bsg_job, uint16_t *state)
3290 {
3291 struct fc_bsg_request *bsg_request = bsg_job->request;
3292 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3293 uint16_t mstate[16];
3294 uint16_t mpi_state = 0;
3295 uint16_t img_idx;
3296 int rval = QLA_SUCCESS;
3297
3298 memset(mstate, 0, sizeof(mstate));
3299
3300 rval = qla2x00_get_firmware_state(vha, mstate);
3301 if (rval != QLA_SUCCESS) {
3302 ql_log(ql_log_warn, vha, 0xffff,
3303 "MBC to get MPI state failed (%d)\n", rval);
3304 rval = -EINVAL;
3305 goto exit_flash_img;
3306 }
3307
3308 mpi_state = mstate[11];
3309
3310 if (!(mpi_state & BIT_9 && mpi_state & BIT_8 && mpi_state & BIT_15)) {
3311 ql_log(ql_log_warn, vha, 0xffff,
3312 "MPI firmware state failed (0x%02x)\n", mpi_state);
3313 rval = -EINVAL;
3314 goto exit_flash_img;
3315 }
3316
3317 rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK);
3318 if (rval != QLA_SUCCESS) {
3319 ql_log(ql_log_warn, vha, 0xffff,
3320 "Unable to lock flash semaphore.");
3321 goto exit_flash_img;
3322 }
3323
3324 img_idx = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3325
3326 rval = qla_mpipt_validate_fw(vha, img_idx, state);
3327 if (rval != QLA_SUCCESS) {
3328 ql_log(ql_log_warn, vha, 0xffff,
3329 "Failed to validate Firmware image index [0x%x].\n",
3330 img_idx);
3331 }
3332
3333 qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK);
3334
3335 exit_flash_img:
3336 return rval;
3337 }
3338
qla28xx_validate_flash_image(struct bsg_job * bsg_job)3339 static int qla28xx_validate_flash_image(struct bsg_job *bsg_job)
3340 {
3341 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3342 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3343 struct qla_hw_data *ha = vha->hw;
3344 uint16_t state = 0;
3345 int rval = 0;
3346
3347 if (!IS_QLA28XX(ha) || vha->vp_idx != 0)
3348 return -EPERM;
3349
3350 mutex_lock(&ha->optrom_mutex);
3351 rval = qla28xx_do_validate_flash_image(bsg_job, &state);
3352 if (rval)
3353 rval = -EINVAL;
3354 mutex_unlock(&ha->optrom_mutex);
3355
3356 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3357
3358 if (rval)
3359 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
3360 (state == 39) ? EXT_STATUS_IMG_SET_VALID_ERR :
3361 EXT_STATUS_IMG_SET_CONFIG_ERR;
3362 else
3363 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
3364
3365 bsg_reply->result = DID_OK << 16;
3366 bsg_reply->reply_payload_rcv_len = 0;
3367 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3368 if (!rval)
3369 bsg_job_done(bsg_job, bsg_reply->result,
3370 bsg_reply->reply_payload_rcv_len);
3371
3372 return QLA_SUCCESS;
3373 }
3374