1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_gbl.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/bsg-lib.h> 13 14 static void qla2xxx_free_fcport_work(struct work_struct *work) 15 { 16 struct fc_port *fcport = container_of(work, typeof(*fcport), 17 free_work); 18 19 qla2x00_free_fcport(fcport); 20 } 21 22 /* BSG support for ELS/CT pass through */ 23 void qla2x00_bsg_job_done(srb_t *sp, int res) 24 { 25 struct bsg_job *bsg_job = sp->u.bsg_job; 26 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 27 28 ql_dbg(ql_dbg_user, sp->vha, 0x7009, 29 "%s: sp hdl %x, result=%x bsg ptr %p\n", 30 __func__, sp->handle, res, bsg_job); 31 32 /* ref: INIT */ 33 kref_put(&sp->cmd_kref, qla2x00_sp_release); 34 35 bsg_reply->result = res; 36 bsg_job_done(bsg_job, bsg_reply->result, 37 bsg_reply->reply_payload_rcv_len); 38 } 39 40 void qla2x00_bsg_sp_free(srb_t *sp) 41 { 42 struct qla_hw_data *ha = sp->vha->hw; 43 struct bsg_job *bsg_job = sp->u.bsg_job; 44 struct fc_bsg_request *bsg_request = bsg_job->request; 45 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 46 47 if (sp->type == SRB_FXIOCB_BCMD) { 48 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 49 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 50 51 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 52 dma_unmap_sg(&ha->pdev->dev, 53 bsg_job->request_payload.sg_list, 54 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 55 56 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 57 dma_unmap_sg(&ha->pdev->dev, 58 bsg_job->reply_payload.sg_list, 59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 60 } else { 61 62 if (sp->remap.remapped) { 63 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, 64 sp->remap.rsp.dma); 65 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, 66 sp->remap.req.dma); 67 } else { 68 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 69 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 70 71 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 72 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 73 } 74 } 75 76 if (sp->type == SRB_CT_CMD || 77 sp->type == SRB_FXIOCB_BCMD || 78 sp->type == SRB_ELS_CMD_HST) { 79 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work); 80 queue_work(ha->wq, &sp->fcport->free_work); 81 } 82 83 qla2x00_rel_sp(sp); 84 } 85 86 int 87 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 88 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 89 { 90 int i, ret, num_valid; 91 uint8_t *bcode; 92 struct qla_fcp_prio_entry *pri_entry; 93 uint32_t *bcode_val_ptr, bcode_val; 94 95 ret = 1; 96 num_valid = 0; 97 bcode = (uint8_t *)pri_cfg; 98 bcode_val_ptr = (uint32_t *)pri_cfg; 99 bcode_val = (uint32_t)(*bcode_val_ptr); 100 101 if (bcode_val == 0xFFFFFFFF) { 102 /* No FCP Priority config data in flash */ 103 ql_dbg(ql_dbg_user, vha, 0x7051, 104 "No FCP Priority config data.\n"); 105 return 0; 106 } 107 108 if (memcmp(bcode, "HQOS", 4)) { 109 /* Invalid FCP priority data header*/ 110 ql_dbg(ql_dbg_user, vha, 0x7052, 111 "Invalid FCP Priority data header. bcode=0x%x.\n", 112 bcode_val); 113 return 0; 114 } 115 if (flag != 1) 116 return ret; 117 118 pri_entry = &pri_cfg->entry[0]; 119 for (i = 0; i < pri_cfg->num_entries; i++) { 120 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 121 num_valid++; 122 pri_entry++; 123 } 124 125 if (num_valid == 0) { 126 /* No valid FCP priority data entries */ 127 ql_dbg(ql_dbg_user, vha, 0x7053, 128 "No valid FCP Priority data entries.\n"); 129 ret = 0; 130 } else { 131 /* FCP priority data is valid */ 132 ql_dbg(ql_dbg_user, vha, 0x7054, 133 "Valid FCP priority data. num entries = %d.\n", 134 num_valid); 135 } 136 137 return ret; 138 } 139 140 static int 141 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) 142 { 143 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 144 struct fc_bsg_request *bsg_request = bsg_job->request; 145 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 146 scsi_qla_host_t *vha = shost_priv(host); 147 struct qla_hw_data *ha = vha->hw; 148 int ret = 0; 149 uint32_t len; 150 uint32_t oper; 151 152 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 153 ret = -EINVAL; 154 goto exit_fcp_prio_cfg; 155 } 156 157 /* Get the sub command */ 158 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 159 160 /* Only set config is allowed if config memory is not allocated */ 161 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 162 ret = -EINVAL; 163 goto exit_fcp_prio_cfg; 164 } 165 switch (oper) { 166 case QLFC_FCP_PRIO_DISABLE: 167 if (ha->flags.fcp_prio_enabled) { 168 ha->flags.fcp_prio_enabled = 0; 169 ha->fcp_prio_cfg->attributes &= 170 ~FCP_PRIO_ATTR_ENABLE; 171 qla24xx_update_all_fcp_prio(vha); 172 bsg_reply->result = DID_OK; 173 } else { 174 ret = -EINVAL; 175 bsg_reply->result = (DID_ERROR << 16); 176 goto exit_fcp_prio_cfg; 177 } 178 break; 179 180 case QLFC_FCP_PRIO_ENABLE: 181 if (!ha->flags.fcp_prio_enabled) { 182 if (ha->fcp_prio_cfg) { 183 ha->flags.fcp_prio_enabled = 1; 184 ha->fcp_prio_cfg->attributes |= 185 FCP_PRIO_ATTR_ENABLE; 186 qla24xx_update_all_fcp_prio(vha); 187 bsg_reply->result = DID_OK; 188 } else { 189 ret = -EINVAL; 190 bsg_reply->result = (DID_ERROR << 16); 191 goto exit_fcp_prio_cfg; 192 } 193 } 194 break; 195 196 case QLFC_FCP_PRIO_GET_CONFIG: 197 len = bsg_job->reply_payload.payload_len; 198 if (!len || len > FCP_PRIO_CFG_SIZE) { 199 ret = -EINVAL; 200 bsg_reply->result = (DID_ERROR << 16); 201 goto exit_fcp_prio_cfg; 202 } 203 204 bsg_reply->result = DID_OK; 205 bsg_reply->reply_payload_rcv_len = 206 sg_copy_from_buffer( 207 bsg_job->reply_payload.sg_list, 208 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 209 len); 210 211 break; 212 213 case QLFC_FCP_PRIO_SET_CONFIG: 214 len = bsg_job->request_payload.payload_len; 215 if (!len || len > FCP_PRIO_CFG_SIZE) { 216 bsg_reply->result = (DID_ERROR << 16); 217 ret = -EINVAL; 218 goto exit_fcp_prio_cfg; 219 } 220 221 if (!ha->fcp_prio_cfg) { 222 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 223 if (!ha->fcp_prio_cfg) { 224 ql_log(ql_log_warn, vha, 0x7050, 225 "Unable to allocate memory for fcp prio " 226 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 227 bsg_reply->result = (DID_ERROR << 16); 228 ret = -ENOMEM; 229 goto exit_fcp_prio_cfg; 230 } 231 } 232 233 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 234 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 235 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 236 FCP_PRIO_CFG_SIZE); 237 238 /* validate fcp priority data */ 239 240 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) { 241 bsg_reply->result = (DID_ERROR << 16); 242 ret = -EINVAL; 243 /* If buffer was invalidatic int 244 * fcp_prio_cfg is of no use 245 */ 246 vfree(ha->fcp_prio_cfg); 247 ha->fcp_prio_cfg = NULL; 248 goto exit_fcp_prio_cfg; 249 } 250 251 ha->flags.fcp_prio_enabled = 0; 252 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 253 ha->flags.fcp_prio_enabled = 1; 254 qla24xx_update_all_fcp_prio(vha); 255 bsg_reply->result = DID_OK; 256 break; 257 default: 258 ret = -EINVAL; 259 break; 260 } 261 exit_fcp_prio_cfg: 262 if (!ret) 263 bsg_job_done(bsg_job, bsg_reply->result, 264 bsg_reply->reply_payload_rcv_len); 265 return ret; 266 } 267 268 static int 269 qla2x00_process_els(struct bsg_job *bsg_job) 270 { 271 struct fc_bsg_request *bsg_request = bsg_job->request; 272 struct fc_rport *rport; 273 fc_port_t *fcport = NULL; 274 struct Scsi_Host *host; 275 scsi_qla_host_t *vha; 276 struct qla_hw_data *ha; 277 srb_t *sp; 278 const char *type; 279 int req_sg_cnt, rsp_sg_cnt; 280 int rval = (DID_ERROR << 16); 281 uint16_t nextlid = 0; 282 uint32_t els_cmd = 0; 283 284 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 285 rport = fc_bsg_to_rport(bsg_job); 286 fcport = *(fc_port_t **) rport->dd_data; 287 host = rport_to_shost(rport); 288 vha = shost_priv(host); 289 ha = vha->hw; 290 type = "FC_BSG_RPT_ELS"; 291 } else { 292 host = fc_bsg_to_shost(bsg_job); 293 vha = shost_priv(host); 294 ha = vha->hw; 295 type = "FC_BSG_HST_ELS_NOLOGIN"; 296 els_cmd = bsg_request->rqst_data.h_els.command_code; 297 if (els_cmd == ELS_AUTH_ELS) 298 return qla_edif_process_els(vha, bsg_job); 299 } 300 301 if (!vha->flags.online) { 302 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 303 rval = -EIO; 304 goto done; 305 } 306 307 /* pass through is supported only for ISP 4Gb or higher */ 308 if (!IS_FWI2_CAPABLE(ha)) { 309 ql_dbg(ql_dbg_user, vha, 0x7001, 310 "ELS passthru not supported for ISP23xx based adapters.\n"); 311 rval = -EPERM; 312 goto done; 313 } 314 315 /* Multiple SG's are not supported for ELS requests */ 316 if (bsg_job->request_payload.sg_cnt > 1 || 317 bsg_job->reply_payload.sg_cnt > 1) { 318 ql_dbg(ql_dbg_user, vha, 0x7002, 319 "Multiple SG's are not supported for ELS requests, " 320 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 321 bsg_job->request_payload.sg_cnt, 322 bsg_job->reply_payload.sg_cnt); 323 rval = -EPERM; 324 goto done; 325 } 326 327 /* ELS request for rport */ 328 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 329 /* make sure the rport is logged in, 330 * if not perform fabric login 331 */ 332 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 333 ql_dbg(ql_dbg_user, vha, 0x7003, 334 "Failed to login port %06X for ELS passthru.\n", 335 fcport->d_id.b24); 336 rval = -EIO; 337 goto done; 338 } 339 } else { 340 /* Allocate a dummy fcport structure, since functions 341 * preparing the IOCB and mailbox command retrieves port 342 * specific information from fcport structure. For Host based 343 * ELS commands there will be no fcport structure allocated 344 */ 345 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 346 if (!fcport) { 347 rval = -ENOMEM; 348 goto done; 349 } 350 351 /* Initialize all required fields of fcport */ 352 fcport->vha = vha; 353 fcport->d_id.b.al_pa = 354 bsg_request->rqst_data.h_els.port_id[0]; 355 fcport->d_id.b.area = 356 bsg_request->rqst_data.h_els.port_id[1]; 357 fcport->d_id.b.domain = 358 bsg_request->rqst_data.h_els.port_id[2]; 359 fcport->loop_id = 360 (fcport->d_id.b.al_pa == 0xFD) ? 361 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 362 } 363 364 req_sg_cnt = 365 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 366 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 367 if (!req_sg_cnt) { 368 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 369 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 370 rval = -ENOMEM; 371 goto done_free_fcport; 372 } 373 374 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 375 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 376 if (!rsp_sg_cnt) { 377 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 378 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 379 rval = -ENOMEM; 380 goto done_free_fcport; 381 } 382 383 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 384 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 385 ql_log(ql_log_warn, vha, 0x7008, 386 "dma mapping resulted in different sg counts, " 387 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 388 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 389 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 390 rval = -EAGAIN; 391 goto done_unmap_sg; 392 } 393 394 /* Alloc SRB structure */ 395 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 396 if (!sp) { 397 rval = -ENOMEM; 398 goto done_unmap_sg; 399 } 400 401 sp->type = 402 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 403 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 404 sp->name = 405 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 406 "bsg_els_rpt" : "bsg_els_hst"); 407 sp->u.bsg_job = bsg_job; 408 sp->free = qla2x00_bsg_sp_free; 409 sp->done = qla2x00_bsg_job_done; 410 411 ql_dbg(ql_dbg_user, vha, 0x700a, 412 "bsg rqst type: %s els type: %x - loop-id=%x " 413 "portid=%-2x%02x%02x.\n", type, 414 bsg_request->rqst_data.h_els.command_code, fcport->loop_id, 415 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 416 417 rval = qla2x00_start_sp(sp); 418 if (rval != QLA_SUCCESS) { 419 ql_log(ql_log_warn, vha, 0x700e, 420 "qla2x00_start_sp failed = %d\n", rval); 421 qla2x00_rel_sp(sp); 422 rval = -EIO; 423 goto done_unmap_sg; 424 } 425 return rval; 426 427 done_unmap_sg: 428 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 429 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 430 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 431 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 432 goto done_free_fcport; 433 434 done_free_fcport: 435 if (bsg_request->msgcode != FC_BSG_RPT_ELS) 436 qla2x00_free_fcport(fcport); 437 done: 438 return rval; 439 } 440 441 static inline uint16_t 442 qla24xx_calc_ct_iocbs(uint16_t dsds) 443 { 444 uint16_t iocbs; 445 446 iocbs = 1; 447 if (dsds > 2) { 448 iocbs += (dsds - 2) / 5; 449 if ((dsds - 2) % 5) 450 iocbs++; 451 } 452 return iocbs; 453 } 454 455 static int 456 qla2x00_process_ct(struct bsg_job *bsg_job) 457 { 458 srb_t *sp; 459 struct fc_bsg_request *bsg_request = bsg_job->request; 460 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 461 scsi_qla_host_t *vha = shost_priv(host); 462 struct qla_hw_data *ha = vha->hw; 463 int rval = (DID_ERROR << 16); 464 int req_sg_cnt, rsp_sg_cnt; 465 uint16_t loop_id; 466 struct fc_port *fcport; 467 char *type = "FC_BSG_HST_CT"; 468 469 req_sg_cnt = 470 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 471 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 472 if (!req_sg_cnt) { 473 ql_log(ql_log_warn, vha, 0x700f, 474 "dma_map_sg return %d for request\n", req_sg_cnt); 475 rval = -ENOMEM; 476 goto done; 477 } 478 479 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 480 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 481 if (!rsp_sg_cnt) { 482 ql_log(ql_log_warn, vha, 0x7010, 483 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 484 rval = -ENOMEM; 485 goto done; 486 } 487 488 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 489 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 490 ql_log(ql_log_warn, vha, 0x7011, 491 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 492 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 493 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 494 rval = -EAGAIN; 495 goto done_unmap_sg; 496 } 497 498 if (!vha->flags.online) { 499 ql_log(ql_log_warn, vha, 0x7012, 500 "Host is not online.\n"); 501 rval = -EIO; 502 goto done_unmap_sg; 503 } 504 505 loop_id = 506 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 507 >> 24; 508 switch (loop_id) { 509 case 0xFC: 510 loop_id = NPH_SNS; 511 break; 512 case 0xFA: 513 loop_id = vha->mgmt_svr_loop_id; 514 break; 515 default: 516 ql_dbg(ql_dbg_user, vha, 0x7013, 517 "Unknown loop id: %x.\n", loop_id); 518 rval = -EINVAL; 519 goto done_unmap_sg; 520 } 521 522 /* Allocate a dummy fcport structure, since functions preparing the 523 * IOCB and mailbox command retrieves port specific information 524 * from fcport structure. For Host based ELS commands there will be 525 * no fcport structure allocated 526 */ 527 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 528 if (!fcport) { 529 ql_log(ql_log_warn, vha, 0x7014, 530 "Failed to allocate fcport.\n"); 531 rval = -ENOMEM; 532 goto done_unmap_sg; 533 } 534 535 /* Initialize all required fields of fcport */ 536 fcport->vha = vha; 537 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; 538 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; 539 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; 540 fcport->loop_id = loop_id; 541 542 /* Alloc SRB structure */ 543 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 544 if (!sp) { 545 ql_log(ql_log_warn, vha, 0x7015, 546 "qla2x00_get_sp failed.\n"); 547 rval = -ENOMEM; 548 goto done_free_fcport; 549 } 550 551 sp->type = SRB_CT_CMD; 552 sp->name = "bsg_ct"; 553 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 554 sp->u.bsg_job = bsg_job; 555 sp->free = qla2x00_bsg_sp_free; 556 sp->done = qla2x00_bsg_job_done; 557 558 ql_dbg(ql_dbg_user, vha, 0x7016, 559 "bsg rqst type: %s else type: %x - " 560 "loop-id=%x portid=%02x%02x%02x.\n", type, 561 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), 562 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 563 fcport->d_id.b.al_pa); 564 565 rval = qla2x00_start_sp(sp); 566 if (rval != QLA_SUCCESS) { 567 ql_log(ql_log_warn, vha, 0x7017, 568 "qla2x00_start_sp failed=%d.\n", rval); 569 qla2x00_rel_sp(sp); 570 rval = -EIO; 571 goto done_free_fcport; 572 } 573 return rval; 574 575 done_free_fcport: 576 qla2x00_free_fcport(fcport); 577 done_unmap_sg: 578 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 579 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 580 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 581 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 582 done: 583 return rval; 584 } 585 586 /* Disable loopback mode */ 587 static inline int 588 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 589 int wait, int wait2) 590 { 591 int ret = 0; 592 int rval = 0; 593 uint16_t new_config[4]; 594 struct qla_hw_data *ha = vha->hw; 595 596 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 597 goto done_reset_internal; 598 599 memset(new_config, 0 , sizeof(new_config)); 600 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 601 ENABLE_INTERNAL_LOOPBACK || 602 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 603 ENABLE_EXTERNAL_LOOPBACK) { 604 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 605 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 606 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 607 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 608 609 ha->notify_dcbx_comp = wait; 610 ha->notify_lb_portup_comp = wait2; 611 612 ret = qla81xx_set_port_config(vha, new_config); 613 if (ret != QLA_SUCCESS) { 614 ql_log(ql_log_warn, vha, 0x7025, 615 "Set port config failed.\n"); 616 ha->notify_dcbx_comp = 0; 617 ha->notify_lb_portup_comp = 0; 618 rval = -EINVAL; 619 goto done_reset_internal; 620 } 621 622 /* Wait for DCBX complete event */ 623 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 624 (DCBX_COMP_TIMEOUT * HZ))) { 625 ql_dbg(ql_dbg_user, vha, 0x7026, 626 "DCBX completion not received.\n"); 627 ha->notify_dcbx_comp = 0; 628 ha->notify_lb_portup_comp = 0; 629 rval = -EINVAL; 630 goto done_reset_internal; 631 } else 632 ql_dbg(ql_dbg_user, vha, 0x7027, 633 "DCBX completion received.\n"); 634 635 if (wait2 && 636 !wait_for_completion_timeout(&ha->lb_portup_comp, 637 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 638 ql_dbg(ql_dbg_user, vha, 0x70c5, 639 "Port up completion not received.\n"); 640 ha->notify_lb_portup_comp = 0; 641 rval = -EINVAL; 642 goto done_reset_internal; 643 } else 644 ql_dbg(ql_dbg_user, vha, 0x70c6, 645 "Port up completion received.\n"); 646 647 ha->notify_dcbx_comp = 0; 648 ha->notify_lb_portup_comp = 0; 649 } 650 done_reset_internal: 651 return rval; 652 } 653 654 /* 655 * Set the port configuration to enable the internal or external loopback 656 * depending on the loopback mode. 657 */ 658 static inline int 659 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 660 uint16_t *new_config, uint16_t mode) 661 { 662 int ret = 0; 663 int rval = 0; 664 unsigned long rem_tmo = 0, current_tmo = 0; 665 struct qla_hw_data *ha = vha->hw; 666 667 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 668 goto done_set_internal; 669 670 if (mode == INTERNAL_LOOPBACK) 671 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 672 else if (mode == EXTERNAL_LOOPBACK) 673 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 674 ql_dbg(ql_dbg_user, vha, 0x70be, 675 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 676 677 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 678 679 ha->notify_dcbx_comp = 1; 680 ret = qla81xx_set_port_config(vha, new_config); 681 if (ret != QLA_SUCCESS) { 682 ql_log(ql_log_warn, vha, 0x7021, 683 "set port config failed.\n"); 684 ha->notify_dcbx_comp = 0; 685 rval = -EINVAL; 686 goto done_set_internal; 687 } 688 689 /* Wait for DCBX complete event */ 690 current_tmo = DCBX_COMP_TIMEOUT * HZ; 691 while (1) { 692 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 693 current_tmo); 694 if (!ha->idc_extend_tmo || rem_tmo) { 695 ha->idc_extend_tmo = 0; 696 break; 697 } 698 current_tmo = ha->idc_extend_tmo * HZ; 699 ha->idc_extend_tmo = 0; 700 } 701 702 if (!rem_tmo) { 703 ql_dbg(ql_dbg_user, vha, 0x7022, 704 "DCBX completion not received.\n"); 705 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 706 /* 707 * If the reset of the loopback mode doesn't work take a FCoE 708 * dump and reset the chip. 709 */ 710 if (ret) { 711 qla2xxx_dump_fw(vha); 712 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 713 } 714 rval = -EINVAL; 715 } else { 716 if (ha->flags.idc_compl_status) { 717 ql_dbg(ql_dbg_user, vha, 0x70c3, 718 "Bad status in IDC Completion AEN\n"); 719 rval = -EINVAL; 720 ha->flags.idc_compl_status = 0; 721 } else 722 ql_dbg(ql_dbg_user, vha, 0x7023, 723 "DCBX completion received.\n"); 724 } 725 726 ha->notify_dcbx_comp = 0; 727 ha->idc_extend_tmo = 0; 728 729 done_set_internal: 730 return rval; 731 } 732 733 static int 734 qla2x00_process_loopback(struct bsg_job *bsg_job) 735 { 736 struct fc_bsg_request *bsg_request = bsg_job->request; 737 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 738 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 739 scsi_qla_host_t *vha = shost_priv(host); 740 struct qla_hw_data *ha = vha->hw; 741 int rval; 742 uint8_t command_sent; 743 char *type; 744 struct msg_echo_lb elreq; 745 uint16_t response[MAILBOX_REGISTER_COUNT]; 746 uint16_t config[4], new_config[4]; 747 uint8_t *fw_sts_ptr; 748 void *req_data = NULL; 749 dma_addr_t req_data_dma; 750 uint32_t req_data_len; 751 uint8_t *rsp_data = NULL; 752 dma_addr_t rsp_data_dma; 753 uint32_t rsp_data_len; 754 755 if (!vha->flags.online) { 756 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 757 return -EIO; 758 } 759 760 memset(&elreq, 0, sizeof(elreq)); 761 762 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 763 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 764 DMA_TO_DEVICE); 765 766 if (!elreq.req_sg_cnt) { 767 ql_log(ql_log_warn, vha, 0x701a, 768 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 769 return -ENOMEM; 770 } 771 772 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 773 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 774 DMA_FROM_DEVICE); 775 776 if (!elreq.rsp_sg_cnt) { 777 ql_log(ql_log_warn, vha, 0x701b, 778 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 779 rval = -ENOMEM; 780 goto done_unmap_req_sg; 781 } 782 783 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 784 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 785 ql_log(ql_log_warn, vha, 0x701c, 786 "dma mapping resulted in different sg counts, " 787 "request_sg_cnt: %x dma_request_sg_cnt: %x " 788 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 789 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 790 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 791 rval = -EAGAIN; 792 goto done_unmap_sg; 793 } 794 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 795 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 796 &req_data_dma, GFP_KERNEL); 797 if (!req_data) { 798 ql_log(ql_log_warn, vha, 0x701d, 799 "dma alloc failed for req_data.\n"); 800 rval = -ENOMEM; 801 goto done_unmap_sg; 802 } 803 804 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 805 &rsp_data_dma, GFP_KERNEL); 806 if (!rsp_data) { 807 ql_log(ql_log_warn, vha, 0x7004, 808 "dma alloc failed for rsp_data.\n"); 809 rval = -ENOMEM; 810 goto done_free_dma_req; 811 } 812 813 /* Copy the request buffer in req_data now */ 814 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 815 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 816 817 elreq.send_dma = req_data_dma; 818 elreq.rcv_dma = rsp_data_dma; 819 elreq.transfer_size = req_data_len; 820 821 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 822 elreq.iteration_count = 823 bsg_request->rqst_data.h_vendor.vendor_cmd[2]; 824 825 if (atomic_read(&vha->loop_state) == LOOP_READY && 826 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) || 827 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && 828 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && 829 req_data_len == MAX_ELS_FRAME_PAYLOAD && 830 elreq.options == EXTERNAL_LOOPBACK))) { 831 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 832 ql_dbg(ql_dbg_user, vha, 0x701e, 833 "BSG request type: %s.\n", type); 834 command_sent = INT_DEF_LB_ECHO_CMD; 835 rval = qla2x00_echo_test(vha, &elreq, response); 836 } else { 837 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 838 memset(config, 0, sizeof(config)); 839 memset(new_config, 0, sizeof(new_config)); 840 841 if (qla81xx_get_port_config(vha, config)) { 842 ql_log(ql_log_warn, vha, 0x701f, 843 "Get port config failed.\n"); 844 rval = -EPERM; 845 goto done_free_dma_rsp; 846 } 847 848 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 849 ql_dbg(ql_dbg_user, vha, 0x70c4, 850 "Loopback operation already in " 851 "progress.\n"); 852 rval = -EAGAIN; 853 goto done_free_dma_rsp; 854 } 855 856 ql_dbg(ql_dbg_user, vha, 0x70c0, 857 "elreq.options=%04x\n", elreq.options); 858 859 if (elreq.options == EXTERNAL_LOOPBACK) 860 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 861 rval = qla81xx_set_loopback_mode(vha, 862 config, new_config, elreq.options); 863 else 864 rval = qla81xx_reset_loopback_mode(vha, 865 config, 1, 0); 866 else 867 rval = qla81xx_set_loopback_mode(vha, config, 868 new_config, elreq.options); 869 870 if (rval) { 871 rval = -EPERM; 872 goto done_free_dma_rsp; 873 } 874 875 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 876 ql_dbg(ql_dbg_user, vha, 0x7028, 877 "BSG request type: %s.\n", type); 878 879 command_sent = INT_DEF_LB_LOOPBACK_CMD; 880 rval = qla2x00_loopback_test(vha, &elreq, response); 881 882 if (response[0] == MBS_COMMAND_ERROR && 883 response[1] == MBS_LB_RESET) { 884 ql_log(ql_log_warn, vha, 0x7029, 885 "MBX command error, Aborting ISP.\n"); 886 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 887 qla2xxx_wake_dpc(vha); 888 qla2x00_wait_for_chip_reset(vha); 889 /* Also reset the MPI */ 890 if (IS_QLA81XX(ha)) { 891 if (qla81xx_restart_mpi_firmware(vha) != 892 QLA_SUCCESS) { 893 ql_log(ql_log_warn, vha, 0x702a, 894 "MPI reset failed.\n"); 895 } 896 } 897 898 rval = -EIO; 899 goto done_free_dma_rsp; 900 } 901 902 if (new_config[0]) { 903 int ret; 904 905 /* Revert back to original port config 906 * Also clear internal loopback 907 */ 908 ret = qla81xx_reset_loopback_mode(vha, 909 new_config, 0, 1); 910 if (ret) { 911 /* 912 * If the reset of the loopback mode 913 * doesn't work take FCoE dump and then 914 * reset the chip. 915 */ 916 qla2xxx_dump_fw(vha); 917 set_bit(ISP_ABORT_NEEDED, 918 &vha->dpc_flags); 919 } 920 921 } 922 923 } else { 924 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 925 ql_dbg(ql_dbg_user, vha, 0x702b, 926 "BSG request type: %s.\n", type); 927 command_sent = INT_DEF_LB_LOOPBACK_CMD; 928 rval = qla2x00_loopback_test(vha, &elreq, response); 929 } 930 } 931 932 if (rval) { 933 ql_log(ql_log_warn, vha, 0x702c, 934 "Vendor request %s failed.\n", type); 935 936 rval = 0; 937 bsg_reply->result = (DID_ERROR << 16); 938 bsg_reply->reply_payload_rcv_len = 0; 939 } else { 940 ql_dbg(ql_dbg_user, vha, 0x702d, 941 "Vendor request %s completed.\n", type); 942 bsg_reply->result = (DID_OK << 16); 943 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 944 bsg_job->reply_payload.sg_cnt, rsp_data, 945 rsp_data_len); 946 } 947 948 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 949 sizeof(response) + sizeof(uint8_t); 950 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); 951 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, 952 sizeof(response)); 953 fw_sts_ptr += sizeof(response); 954 *fw_sts_ptr = command_sent; 955 956 done_free_dma_rsp: 957 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 958 rsp_data, rsp_data_dma); 959 done_free_dma_req: 960 dma_free_coherent(&ha->pdev->dev, req_data_len, 961 req_data, req_data_dma); 962 done_unmap_sg: 963 dma_unmap_sg(&ha->pdev->dev, 964 bsg_job->reply_payload.sg_list, 965 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 966 done_unmap_req_sg: 967 dma_unmap_sg(&ha->pdev->dev, 968 bsg_job->request_payload.sg_list, 969 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 970 if (!rval) 971 bsg_job_done(bsg_job, bsg_reply->result, 972 bsg_reply->reply_payload_rcv_len); 973 return rval; 974 } 975 976 static int 977 qla84xx_reset(struct bsg_job *bsg_job) 978 { 979 struct fc_bsg_request *bsg_request = bsg_job->request; 980 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 981 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 982 scsi_qla_host_t *vha = shost_priv(host); 983 struct qla_hw_data *ha = vha->hw; 984 int rval = 0; 985 uint32_t flag; 986 987 if (!IS_QLA84XX(ha)) { 988 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 989 return -EINVAL; 990 } 991 992 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 993 994 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 995 996 if (rval) { 997 ql_log(ql_log_warn, vha, 0x7030, 998 "Vendor request 84xx reset failed.\n"); 999 rval = (DID_ERROR << 16); 1000 1001 } else { 1002 ql_dbg(ql_dbg_user, vha, 0x7031, 1003 "Vendor request 84xx reset completed.\n"); 1004 bsg_reply->result = DID_OK; 1005 bsg_job_done(bsg_job, bsg_reply->result, 1006 bsg_reply->reply_payload_rcv_len); 1007 } 1008 1009 return rval; 1010 } 1011 1012 static int 1013 qla84xx_updatefw(struct bsg_job *bsg_job) 1014 { 1015 struct fc_bsg_request *bsg_request = bsg_job->request; 1016 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1017 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1018 scsi_qla_host_t *vha = shost_priv(host); 1019 struct qla_hw_data *ha = vha->hw; 1020 struct verify_chip_entry_84xx *mn = NULL; 1021 dma_addr_t mn_dma, fw_dma; 1022 void *fw_buf = NULL; 1023 int rval = 0; 1024 uint32_t sg_cnt; 1025 uint32_t data_len; 1026 uint16_t options; 1027 uint32_t flag; 1028 uint32_t fw_ver; 1029 1030 if (!IS_QLA84XX(ha)) { 1031 ql_dbg(ql_dbg_user, vha, 0x7032, 1032 "Not 84xx, exiting.\n"); 1033 return -EINVAL; 1034 } 1035 1036 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1037 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1038 if (!sg_cnt) { 1039 ql_log(ql_log_warn, vha, 0x7033, 1040 "dma_map_sg returned %d for request.\n", sg_cnt); 1041 return -ENOMEM; 1042 } 1043 1044 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1045 ql_log(ql_log_warn, vha, 0x7034, 1046 "DMA mapping resulted in different sg counts, " 1047 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1048 bsg_job->request_payload.sg_cnt, sg_cnt); 1049 rval = -EAGAIN; 1050 goto done_unmap_sg; 1051 } 1052 1053 data_len = bsg_job->request_payload.payload_len; 1054 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1055 &fw_dma, GFP_KERNEL); 1056 if (!fw_buf) { 1057 ql_log(ql_log_warn, vha, 0x7035, 1058 "DMA alloc failed for fw_buf.\n"); 1059 rval = -ENOMEM; 1060 goto done_unmap_sg; 1061 } 1062 1063 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1064 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1065 1066 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1067 if (!mn) { 1068 ql_log(ql_log_warn, vha, 0x7036, 1069 "DMA alloc failed for fw buffer.\n"); 1070 rval = -ENOMEM; 1071 goto done_free_fw_buf; 1072 } 1073 1074 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1075 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2); 1076 1077 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1078 mn->entry_count = 1; 1079 1080 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1081 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1082 options |= VCO_DIAG_FW; 1083 1084 mn->options = cpu_to_le16(options); 1085 mn->fw_ver = cpu_to_le32(fw_ver); 1086 mn->fw_size = cpu_to_le32(data_len); 1087 mn->fw_seq_size = cpu_to_le32(data_len); 1088 put_unaligned_le64(fw_dma, &mn->dsd.address); 1089 mn->dsd.length = cpu_to_le32(data_len); 1090 mn->data_seg_cnt = cpu_to_le16(1); 1091 1092 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1093 1094 if (rval) { 1095 ql_log(ql_log_warn, vha, 0x7037, 1096 "Vendor request 84xx updatefw failed.\n"); 1097 1098 rval = (DID_ERROR << 16); 1099 } else { 1100 ql_dbg(ql_dbg_user, vha, 0x7038, 1101 "Vendor request 84xx updatefw completed.\n"); 1102 1103 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1104 bsg_reply->result = DID_OK; 1105 } 1106 1107 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1108 1109 done_free_fw_buf: 1110 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1111 1112 done_unmap_sg: 1113 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1114 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1115 1116 if (!rval) 1117 bsg_job_done(bsg_job, bsg_reply->result, 1118 bsg_reply->reply_payload_rcv_len); 1119 return rval; 1120 } 1121 1122 static int 1123 qla84xx_mgmt_cmd(struct bsg_job *bsg_job) 1124 { 1125 struct fc_bsg_request *bsg_request = bsg_job->request; 1126 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1127 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1128 scsi_qla_host_t *vha = shost_priv(host); 1129 struct qla_hw_data *ha = vha->hw; 1130 struct access_chip_84xx *mn = NULL; 1131 dma_addr_t mn_dma, mgmt_dma; 1132 void *mgmt_b = NULL; 1133 int rval = 0; 1134 struct qla_bsg_a84_mgmt *ql84_mgmt; 1135 uint32_t sg_cnt; 1136 uint32_t data_len = 0; 1137 uint32_t dma_direction = DMA_NONE; 1138 1139 if (!IS_QLA84XX(ha)) { 1140 ql_log(ql_log_warn, vha, 0x703a, 1141 "Not 84xx, exiting.\n"); 1142 return -EINVAL; 1143 } 1144 1145 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1146 if (!mn) { 1147 ql_log(ql_log_warn, vha, 0x703c, 1148 "DMA alloc failed for fw buffer.\n"); 1149 return -ENOMEM; 1150 } 1151 1152 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1153 mn->entry_count = 1; 1154 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1155 switch (ql84_mgmt->mgmt.cmd) { 1156 case QLA84_MGMT_READ_MEM: 1157 case QLA84_MGMT_GET_INFO: 1158 sg_cnt = dma_map_sg(&ha->pdev->dev, 1159 bsg_job->reply_payload.sg_list, 1160 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1161 if (!sg_cnt) { 1162 ql_log(ql_log_warn, vha, 0x703d, 1163 "dma_map_sg returned %d for reply.\n", sg_cnt); 1164 rval = -ENOMEM; 1165 goto exit_mgmt; 1166 } 1167 1168 dma_direction = DMA_FROM_DEVICE; 1169 1170 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1171 ql_log(ql_log_warn, vha, 0x703e, 1172 "DMA mapping resulted in different sg counts, " 1173 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1174 bsg_job->reply_payload.sg_cnt, sg_cnt); 1175 rval = -EAGAIN; 1176 goto done_unmap_sg; 1177 } 1178 1179 data_len = bsg_job->reply_payload.payload_len; 1180 1181 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1182 &mgmt_dma, GFP_KERNEL); 1183 if (!mgmt_b) { 1184 ql_log(ql_log_warn, vha, 0x703f, 1185 "DMA alloc failed for mgmt_b.\n"); 1186 rval = -ENOMEM; 1187 goto done_unmap_sg; 1188 } 1189 1190 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1191 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1192 mn->parameter1 = 1193 cpu_to_le32( 1194 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1195 1196 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1197 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1198 mn->parameter1 = 1199 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1200 1201 mn->parameter2 = 1202 cpu_to_le32( 1203 ql84_mgmt->mgmt.mgmtp.u.info.context); 1204 } 1205 break; 1206 1207 case QLA84_MGMT_WRITE_MEM: 1208 sg_cnt = dma_map_sg(&ha->pdev->dev, 1209 bsg_job->request_payload.sg_list, 1210 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1211 1212 if (!sg_cnt) { 1213 ql_log(ql_log_warn, vha, 0x7040, 1214 "dma_map_sg returned %d.\n", sg_cnt); 1215 rval = -ENOMEM; 1216 goto exit_mgmt; 1217 } 1218 1219 dma_direction = DMA_TO_DEVICE; 1220 1221 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1222 ql_log(ql_log_warn, vha, 0x7041, 1223 "DMA mapping resulted in different sg counts, " 1224 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1225 bsg_job->request_payload.sg_cnt, sg_cnt); 1226 rval = -EAGAIN; 1227 goto done_unmap_sg; 1228 } 1229 1230 data_len = bsg_job->request_payload.payload_len; 1231 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1232 &mgmt_dma, GFP_KERNEL); 1233 if (!mgmt_b) { 1234 ql_log(ql_log_warn, vha, 0x7042, 1235 "DMA alloc failed for mgmt_b.\n"); 1236 rval = -ENOMEM; 1237 goto done_unmap_sg; 1238 } 1239 1240 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1241 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1242 1243 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1244 mn->parameter1 = 1245 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1246 break; 1247 1248 case QLA84_MGMT_CHNG_CONFIG: 1249 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1250 mn->parameter1 = 1251 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1252 1253 mn->parameter2 = 1254 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1255 1256 mn->parameter3 = 1257 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1258 break; 1259 1260 default: 1261 rval = -EIO; 1262 goto exit_mgmt; 1263 } 1264 1265 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1266 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1267 mn->dseg_count = cpu_to_le16(1); 1268 put_unaligned_le64(mgmt_dma, &mn->dsd.address); 1269 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); 1270 } 1271 1272 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1273 1274 if (rval) { 1275 ql_log(ql_log_warn, vha, 0x7043, 1276 "Vendor request 84xx mgmt failed.\n"); 1277 1278 rval = (DID_ERROR << 16); 1279 1280 } else { 1281 ql_dbg(ql_dbg_user, vha, 0x7044, 1282 "Vendor request 84xx mgmt completed.\n"); 1283 1284 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1285 bsg_reply->result = DID_OK; 1286 1287 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1288 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1289 bsg_reply->reply_payload_rcv_len = 1290 bsg_job->reply_payload.payload_len; 1291 1292 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1293 bsg_job->reply_payload.sg_cnt, mgmt_b, 1294 data_len); 1295 } 1296 } 1297 1298 done_unmap_sg: 1299 if (mgmt_b) 1300 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1301 1302 if (dma_direction == DMA_TO_DEVICE) 1303 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1304 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1305 else if (dma_direction == DMA_FROM_DEVICE) 1306 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1307 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1308 1309 exit_mgmt: 1310 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1311 1312 if (!rval) 1313 bsg_job_done(bsg_job, bsg_reply->result, 1314 bsg_reply->reply_payload_rcv_len); 1315 return rval; 1316 } 1317 1318 static int 1319 qla24xx_iidma(struct bsg_job *bsg_job) 1320 { 1321 struct fc_bsg_request *bsg_request = bsg_job->request; 1322 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1323 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1324 scsi_qla_host_t *vha = shost_priv(host); 1325 int rval = 0; 1326 struct qla_port_param *port_param = NULL; 1327 fc_port_t *fcport = NULL; 1328 int found = 0; 1329 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1330 uint8_t *rsp_ptr = NULL; 1331 1332 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1333 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1334 return -EINVAL; 1335 } 1336 1337 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); 1338 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1339 ql_log(ql_log_warn, vha, 0x7048, 1340 "Invalid destination type.\n"); 1341 return -EINVAL; 1342 } 1343 1344 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1345 if (fcport->port_type != FCT_TARGET) 1346 continue; 1347 1348 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1349 fcport->port_name, sizeof(fcport->port_name))) 1350 continue; 1351 1352 found = 1; 1353 break; 1354 } 1355 1356 if (!found) { 1357 ql_log(ql_log_warn, vha, 0x7049, 1358 "Failed to find port.\n"); 1359 return -EINVAL; 1360 } 1361 1362 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1363 ql_log(ql_log_warn, vha, 0x704a, 1364 "Port is not online.\n"); 1365 return -EINVAL; 1366 } 1367 1368 if (fcport->flags & FCF_LOGIN_NEEDED) { 1369 ql_log(ql_log_warn, vha, 0x704b, 1370 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1371 return -EINVAL; 1372 } 1373 1374 if (port_param->mode) 1375 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1376 port_param->speed, mb); 1377 else 1378 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1379 &port_param->speed, mb); 1380 1381 if (rval) { 1382 ql_log(ql_log_warn, vha, 0x704c, 1383 "iiDMA cmd failed for %8phN -- " 1384 "%04x %x %04x %04x.\n", fcport->port_name, 1385 rval, fcport->fp_speed, mb[0], mb[1]); 1386 rval = (DID_ERROR << 16); 1387 } else { 1388 if (!port_param->mode) { 1389 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1390 sizeof(struct qla_port_param); 1391 1392 rsp_ptr = ((uint8_t *)bsg_reply) + 1393 sizeof(struct fc_bsg_reply); 1394 1395 memcpy(rsp_ptr, port_param, 1396 sizeof(struct qla_port_param)); 1397 } 1398 1399 bsg_reply->result = DID_OK; 1400 bsg_job_done(bsg_job, bsg_reply->result, 1401 bsg_reply->reply_payload_rcv_len); 1402 } 1403 1404 return rval; 1405 } 1406 1407 static int 1408 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, 1409 uint8_t is_update) 1410 { 1411 struct fc_bsg_request *bsg_request = bsg_job->request; 1412 uint32_t start = 0; 1413 int valid = 0; 1414 struct qla_hw_data *ha = vha->hw; 1415 1416 if (unlikely(pci_channel_offline(ha->pdev))) 1417 return -EINVAL; 1418 1419 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1420 if (start > ha->optrom_size) { 1421 ql_log(ql_log_warn, vha, 0x7055, 1422 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1423 return -EINVAL; 1424 } 1425 1426 if (ha->optrom_state != QLA_SWAITING) { 1427 ql_log(ql_log_info, vha, 0x7056, 1428 "optrom_state %d.\n", ha->optrom_state); 1429 return -EBUSY; 1430 } 1431 1432 ha->optrom_region_start = start; 1433 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1434 if (is_update) { 1435 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1436 valid = 1; 1437 else if (start == (ha->flt_region_boot * 4) || 1438 start == (ha->flt_region_fw * 4)) 1439 valid = 1; 1440 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1441 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 1442 IS_QLA28XX(ha)) 1443 valid = 1; 1444 if (!valid) { 1445 ql_log(ql_log_warn, vha, 0x7058, 1446 "Invalid start region 0x%x/0x%x.\n", start, 1447 bsg_job->request_payload.payload_len); 1448 return -EINVAL; 1449 } 1450 1451 ha->optrom_region_size = start + 1452 bsg_job->request_payload.payload_len > ha->optrom_size ? 1453 ha->optrom_size - start : 1454 bsg_job->request_payload.payload_len; 1455 ha->optrom_state = QLA_SWRITING; 1456 } else { 1457 ha->optrom_region_size = start + 1458 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1459 ha->optrom_size - start : 1460 bsg_job->reply_payload.payload_len; 1461 ha->optrom_state = QLA_SREADING; 1462 } 1463 1464 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 1465 if (!ha->optrom_buffer) { 1466 ql_log(ql_log_warn, vha, 0x7059, 1467 "Read: Unable to allocate memory for optrom retrieval " 1468 "(%x)\n", ha->optrom_region_size); 1469 1470 ha->optrom_state = QLA_SWAITING; 1471 return -ENOMEM; 1472 } 1473 1474 return 0; 1475 } 1476 1477 static int 1478 qla2x00_read_optrom(struct bsg_job *bsg_job) 1479 { 1480 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1481 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1482 scsi_qla_host_t *vha = shost_priv(host); 1483 struct qla_hw_data *ha = vha->hw; 1484 int rval = 0; 1485 1486 if (ha->flags.nic_core_reset_hdlr_active) 1487 return -EBUSY; 1488 1489 mutex_lock(&ha->optrom_mutex); 1490 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1491 if (rval) { 1492 mutex_unlock(&ha->optrom_mutex); 1493 return rval; 1494 } 1495 1496 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1497 ha->optrom_region_start, ha->optrom_region_size); 1498 1499 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1500 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1501 ha->optrom_region_size); 1502 1503 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; 1504 bsg_reply->result = DID_OK; 1505 vfree(ha->optrom_buffer); 1506 ha->optrom_buffer = NULL; 1507 ha->optrom_state = QLA_SWAITING; 1508 mutex_unlock(&ha->optrom_mutex); 1509 bsg_job_done(bsg_job, bsg_reply->result, 1510 bsg_reply->reply_payload_rcv_len); 1511 return rval; 1512 } 1513 1514 static int 1515 qla2x00_update_optrom(struct bsg_job *bsg_job) 1516 { 1517 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1518 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1519 scsi_qla_host_t *vha = shost_priv(host); 1520 struct qla_hw_data *ha = vha->hw; 1521 int rval = 0; 1522 1523 mutex_lock(&ha->optrom_mutex); 1524 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1525 if (rval) { 1526 mutex_unlock(&ha->optrom_mutex); 1527 return rval; 1528 } 1529 1530 /* Set the isp82xx_no_md_cap not to capture minidump */ 1531 ha->flags.isp82xx_no_md_cap = 1; 1532 1533 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1534 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1535 ha->optrom_region_size); 1536 1537 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1538 ha->optrom_region_start, ha->optrom_region_size); 1539 1540 if (rval) { 1541 bsg_reply->result = -EINVAL; 1542 rval = -EINVAL; 1543 } else { 1544 bsg_reply->result = DID_OK; 1545 } 1546 vfree(ha->optrom_buffer); 1547 ha->optrom_buffer = NULL; 1548 ha->optrom_state = QLA_SWAITING; 1549 mutex_unlock(&ha->optrom_mutex); 1550 bsg_job_done(bsg_job, bsg_reply->result, 1551 bsg_reply->reply_payload_rcv_len); 1552 return rval; 1553 } 1554 1555 static int 1556 qla2x00_update_fru_versions(struct bsg_job *bsg_job) 1557 { 1558 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1559 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1560 scsi_qla_host_t *vha = shost_priv(host); 1561 struct qla_hw_data *ha = vha->hw; 1562 int rval = 0; 1563 uint8_t bsg[DMA_POOL_SIZE]; 1564 struct qla_image_version_list *list = (void *)bsg; 1565 struct qla_image_version *image; 1566 uint32_t count; 1567 dma_addr_t sfp_dma; 1568 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1569 1570 if (!sfp) { 1571 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1572 EXT_STATUS_NO_MEMORY; 1573 goto done; 1574 } 1575 1576 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1577 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1578 1579 image = list->version; 1580 count = list->count; 1581 while (count--) { 1582 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1583 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1584 image->field_address.device, image->field_address.offset, 1585 sizeof(image->field_info), image->field_address.option); 1586 if (rval) { 1587 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1588 EXT_STATUS_MAILBOX; 1589 goto dealloc; 1590 } 1591 image++; 1592 } 1593 1594 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1595 1596 dealloc: 1597 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1598 1599 done: 1600 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1601 bsg_reply->result = DID_OK << 16; 1602 bsg_job_done(bsg_job, bsg_reply->result, 1603 bsg_reply->reply_payload_rcv_len); 1604 1605 return 0; 1606 } 1607 1608 static int 1609 qla2x00_read_fru_status(struct bsg_job *bsg_job) 1610 { 1611 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1612 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1613 scsi_qla_host_t *vha = shost_priv(host); 1614 struct qla_hw_data *ha = vha->hw; 1615 int rval = 0; 1616 uint8_t bsg[DMA_POOL_SIZE]; 1617 struct qla_status_reg *sr = (void *)bsg; 1618 dma_addr_t sfp_dma; 1619 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1620 1621 if (!sfp) { 1622 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1623 EXT_STATUS_NO_MEMORY; 1624 goto done; 1625 } 1626 1627 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1628 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1629 1630 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1631 sr->field_address.device, sr->field_address.offset, 1632 sizeof(sr->status_reg), sr->field_address.option); 1633 sr->status_reg = *sfp; 1634 1635 if (rval) { 1636 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1637 EXT_STATUS_MAILBOX; 1638 goto dealloc; 1639 } 1640 1641 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1642 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1643 1644 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1645 1646 dealloc: 1647 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1648 1649 done: 1650 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1651 bsg_reply->reply_payload_rcv_len = sizeof(*sr); 1652 bsg_reply->result = DID_OK << 16; 1653 bsg_job_done(bsg_job, bsg_reply->result, 1654 bsg_reply->reply_payload_rcv_len); 1655 1656 return 0; 1657 } 1658 1659 static int 1660 qla2x00_write_fru_status(struct bsg_job *bsg_job) 1661 { 1662 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1663 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1664 scsi_qla_host_t *vha = shost_priv(host); 1665 struct qla_hw_data *ha = vha->hw; 1666 int rval = 0; 1667 uint8_t bsg[DMA_POOL_SIZE]; 1668 struct qla_status_reg *sr = (void *)bsg; 1669 dma_addr_t sfp_dma; 1670 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1671 1672 if (!sfp) { 1673 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1674 EXT_STATUS_NO_MEMORY; 1675 goto done; 1676 } 1677 1678 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1679 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1680 1681 *sfp = sr->status_reg; 1682 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1683 sr->field_address.device, sr->field_address.offset, 1684 sizeof(sr->status_reg), sr->field_address.option); 1685 1686 if (rval) { 1687 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1688 EXT_STATUS_MAILBOX; 1689 goto dealloc; 1690 } 1691 1692 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1693 1694 dealloc: 1695 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1696 1697 done: 1698 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1699 bsg_reply->result = DID_OK << 16; 1700 bsg_job_done(bsg_job, bsg_reply->result, 1701 bsg_reply->reply_payload_rcv_len); 1702 1703 return 0; 1704 } 1705 1706 static int 1707 qla2x00_write_i2c(struct bsg_job *bsg_job) 1708 { 1709 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1710 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1711 scsi_qla_host_t *vha = shost_priv(host); 1712 struct qla_hw_data *ha = vha->hw; 1713 int rval = 0; 1714 uint8_t bsg[DMA_POOL_SIZE]; 1715 struct qla_i2c_access *i2c = (void *)bsg; 1716 dma_addr_t sfp_dma; 1717 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1718 1719 if (!sfp) { 1720 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1721 EXT_STATUS_NO_MEMORY; 1722 goto done; 1723 } 1724 1725 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1726 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1727 1728 memcpy(sfp, i2c->buffer, i2c->length); 1729 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1730 i2c->device, i2c->offset, i2c->length, i2c->option); 1731 1732 if (rval) { 1733 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1734 EXT_STATUS_MAILBOX; 1735 goto dealloc; 1736 } 1737 1738 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1739 1740 dealloc: 1741 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1742 1743 done: 1744 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1745 bsg_reply->result = DID_OK << 16; 1746 bsg_job_done(bsg_job, bsg_reply->result, 1747 bsg_reply->reply_payload_rcv_len); 1748 1749 return 0; 1750 } 1751 1752 static int 1753 qla2x00_read_i2c(struct bsg_job *bsg_job) 1754 { 1755 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1756 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1757 scsi_qla_host_t *vha = shost_priv(host); 1758 struct qla_hw_data *ha = vha->hw; 1759 int rval = 0; 1760 uint8_t bsg[DMA_POOL_SIZE]; 1761 struct qla_i2c_access *i2c = (void *)bsg; 1762 dma_addr_t sfp_dma; 1763 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1764 1765 if (!sfp) { 1766 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1767 EXT_STATUS_NO_MEMORY; 1768 goto done; 1769 } 1770 1771 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1772 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1773 1774 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1775 i2c->device, i2c->offset, i2c->length, i2c->option); 1776 1777 if (rval) { 1778 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1779 EXT_STATUS_MAILBOX; 1780 goto dealloc; 1781 } 1782 1783 memcpy(i2c->buffer, sfp, i2c->length); 1784 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1785 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1786 1787 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1788 1789 dealloc: 1790 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1791 1792 done: 1793 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1794 bsg_reply->reply_payload_rcv_len = sizeof(*i2c); 1795 bsg_reply->result = DID_OK << 16; 1796 bsg_job_done(bsg_job, bsg_reply->result, 1797 bsg_reply->reply_payload_rcv_len); 1798 1799 return 0; 1800 } 1801 1802 static int 1803 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) 1804 { 1805 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1806 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1807 scsi_qla_host_t *vha = shost_priv(host); 1808 struct qla_hw_data *ha = vha->hw; 1809 uint32_t rval = EXT_STATUS_OK; 1810 uint16_t req_sg_cnt = 0; 1811 uint16_t rsp_sg_cnt = 0; 1812 uint16_t nextlid = 0; 1813 uint32_t tot_dsds; 1814 srb_t *sp = NULL; 1815 uint32_t req_data_len; 1816 uint32_t rsp_data_len; 1817 1818 /* Check the type of the adapter */ 1819 if (!IS_BIDI_CAPABLE(ha)) { 1820 ql_log(ql_log_warn, vha, 0x70a0, 1821 "This adapter is not supported\n"); 1822 rval = EXT_STATUS_NOT_SUPPORTED; 1823 goto done; 1824 } 1825 1826 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1827 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1828 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1829 rval = EXT_STATUS_BUSY; 1830 goto done; 1831 } 1832 1833 /* Check if host is online */ 1834 if (!vha->flags.online) { 1835 ql_log(ql_log_warn, vha, 0x70a1, 1836 "Host is not online\n"); 1837 rval = EXT_STATUS_DEVICE_OFFLINE; 1838 goto done; 1839 } 1840 1841 /* Check if cable is plugged in or not */ 1842 if (vha->device_flags & DFLG_NO_CABLE) { 1843 ql_log(ql_log_warn, vha, 0x70a2, 1844 "Cable is unplugged...\n"); 1845 rval = EXT_STATUS_INVALID_CFG; 1846 goto done; 1847 } 1848 1849 /* Check if the switch is connected or not */ 1850 if (ha->current_topology != ISP_CFG_F) { 1851 ql_log(ql_log_warn, vha, 0x70a3, 1852 "Host is not connected to the switch\n"); 1853 rval = EXT_STATUS_INVALID_CFG; 1854 goto done; 1855 } 1856 1857 /* Check if operating mode is P2P */ 1858 if (ha->operating_mode != P2P) { 1859 ql_log(ql_log_warn, vha, 0x70a4, 1860 "Host operating mode is not P2p\n"); 1861 rval = EXT_STATUS_INVALID_CFG; 1862 goto done; 1863 } 1864 1865 mutex_lock(&ha->selflogin_lock); 1866 if (vha->self_login_loop_id == 0) { 1867 /* Initialize all required fields of fcport */ 1868 vha->bidir_fcport.vha = vha; 1869 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1870 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1871 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1872 vha->bidir_fcport.loop_id = vha->loop_id; 1873 1874 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1875 ql_log(ql_log_warn, vha, 0x70a7, 1876 "Failed to login port %06X for bidirectional IOCB\n", 1877 vha->bidir_fcport.d_id.b24); 1878 mutex_unlock(&ha->selflogin_lock); 1879 rval = EXT_STATUS_MAILBOX; 1880 goto done; 1881 } 1882 vha->self_login_loop_id = nextlid - 1; 1883 1884 } 1885 /* Assign the self login loop id to fcport */ 1886 mutex_unlock(&ha->selflogin_lock); 1887 1888 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1889 1890 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1891 bsg_job->request_payload.sg_list, 1892 bsg_job->request_payload.sg_cnt, 1893 DMA_TO_DEVICE); 1894 1895 if (!req_sg_cnt) { 1896 rval = EXT_STATUS_NO_MEMORY; 1897 goto done; 1898 } 1899 1900 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1901 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1902 DMA_FROM_DEVICE); 1903 1904 if (!rsp_sg_cnt) { 1905 rval = EXT_STATUS_NO_MEMORY; 1906 goto done_unmap_req_sg; 1907 } 1908 1909 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1910 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1911 ql_dbg(ql_dbg_user, vha, 0x70a9, 1912 "Dma mapping resulted in different sg counts " 1913 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1914 "%x dma_reply_sg_cnt: %x]\n", 1915 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1916 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1917 rval = EXT_STATUS_NO_MEMORY; 1918 goto done_unmap_sg; 1919 } 1920 1921 req_data_len = bsg_job->request_payload.payload_len; 1922 rsp_data_len = bsg_job->reply_payload.payload_len; 1923 1924 if (req_data_len != rsp_data_len) { 1925 rval = EXT_STATUS_BUSY; 1926 ql_log(ql_log_warn, vha, 0x70aa, 1927 "req_data_len != rsp_data_len\n"); 1928 goto done_unmap_sg; 1929 } 1930 1931 /* Alloc SRB structure */ 1932 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1933 if (!sp) { 1934 ql_dbg(ql_dbg_user, vha, 0x70ac, 1935 "Alloc SRB structure failed\n"); 1936 rval = EXT_STATUS_NO_MEMORY; 1937 goto done_unmap_sg; 1938 } 1939 1940 /*Populate srb->ctx with bidir ctx*/ 1941 sp->u.bsg_job = bsg_job; 1942 sp->free = qla2x00_bsg_sp_free; 1943 sp->type = SRB_BIDI_CMD; 1944 sp->done = qla2x00_bsg_job_done; 1945 1946 /* Add the read and write sg count */ 1947 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1948 1949 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1950 if (rval != EXT_STATUS_OK) 1951 goto done_free_srb; 1952 /* the bsg request will be completed in the interrupt handler */ 1953 return rval; 1954 1955 done_free_srb: 1956 mempool_free(sp, ha->srb_mempool); 1957 done_unmap_sg: 1958 dma_unmap_sg(&ha->pdev->dev, 1959 bsg_job->reply_payload.sg_list, 1960 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1961 done_unmap_req_sg: 1962 dma_unmap_sg(&ha->pdev->dev, 1963 bsg_job->request_payload.sg_list, 1964 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1965 done: 1966 1967 /* Return an error vendor specific response 1968 * and complete the bsg request 1969 */ 1970 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1971 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1972 bsg_reply->reply_payload_rcv_len = 0; 1973 bsg_reply->result = (DID_OK) << 16; 1974 bsg_job_done(bsg_job, bsg_reply->result, 1975 bsg_reply->reply_payload_rcv_len); 1976 /* Always return success, vendor rsp carries correct status */ 1977 return 0; 1978 } 1979 1980 static int 1981 qlafx00_mgmt_cmd(struct bsg_job *bsg_job) 1982 { 1983 struct fc_bsg_request *bsg_request = bsg_job->request; 1984 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1985 scsi_qla_host_t *vha = shost_priv(host); 1986 struct qla_hw_data *ha = vha->hw; 1987 int rval = (DID_ERROR << 16); 1988 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1989 srb_t *sp; 1990 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1991 struct fc_port *fcport; 1992 char *type = "FC_BSG_HST_FX_MGMT"; 1993 1994 /* Copy the IOCB specific information */ 1995 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1996 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1997 1998 /* Dump the vendor information */ 1999 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 2000 piocb_rqst, sizeof(*piocb_rqst)); 2001 2002 if (!vha->flags.online) { 2003 ql_log(ql_log_warn, vha, 0x70d0, 2004 "Host is not online.\n"); 2005 rval = -EIO; 2006 goto done; 2007 } 2008 2009 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 2010 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 2011 bsg_job->request_payload.sg_list, 2012 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2013 if (!req_sg_cnt) { 2014 ql_log(ql_log_warn, vha, 0x70c7, 2015 "dma_map_sg return %d for request\n", req_sg_cnt); 2016 rval = -ENOMEM; 2017 goto done; 2018 } 2019 } 2020 2021 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 2022 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 2023 bsg_job->reply_payload.sg_list, 2024 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2025 if (!rsp_sg_cnt) { 2026 ql_log(ql_log_warn, vha, 0x70c8, 2027 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 2028 rval = -ENOMEM; 2029 goto done_unmap_req_sg; 2030 } 2031 } 2032 2033 ql_dbg(ql_dbg_user, vha, 0x70c9, 2034 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 2035 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 2036 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 2037 2038 /* Allocate a dummy fcport structure, since functions preparing the 2039 * IOCB and mailbox command retrieves port specific information 2040 * from fcport structure. For Host based ELS commands there will be 2041 * no fcport structure allocated 2042 */ 2043 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2044 if (!fcport) { 2045 ql_log(ql_log_warn, vha, 0x70ca, 2046 "Failed to allocate fcport.\n"); 2047 rval = -ENOMEM; 2048 goto done_unmap_rsp_sg; 2049 } 2050 2051 /* Alloc SRB structure */ 2052 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2053 if (!sp) { 2054 ql_log(ql_log_warn, vha, 0x70cb, 2055 "qla2x00_get_sp failed.\n"); 2056 rval = -ENOMEM; 2057 goto done_free_fcport; 2058 } 2059 2060 /* Initialize all required fields of fcport */ 2061 fcport->vha = vha; 2062 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword); 2063 2064 sp->type = SRB_FXIOCB_BCMD; 2065 sp->name = "bsg_fx_mgmt"; 2066 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 2067 sp->u.bsg_job = bsg_job; 2068 sp->free = qla2x00_bsg_sp_free; 2069 sp->done = qla2x00_bsg_job_done; 2070 2071 ql_dbg(ql_dbg_user, vha, 0x70cc, 2072 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 2073 type, piocb_rqst->func_type, fcport->loop_id); 2074 2075 rval = qla2x00_start_sp(sp); 2076 if (rval != QLA_SUCCESS) { 2077 ql_log(ql_log_warn, vha, 0x70cd, 2078 "qla2x00_start_sp failed=%d.\n", rval); 2079 mempool_free(sp, ha->srb_mempool); 2080 rval = -EIO; 2081 goto done_free_fcport; 2082 } 2083 return rval; 2084 2085 done_free_fcport: 2086 qla2x00_free_fcport(fcport); 2087 2088 done_unmap_rsp_sg: 2089 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2090 dma_unmap_sg(&ha->pdev->dev, 2091 bsg_job->reply_payload.sg_list, 2092 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2093 done_unmap_req_sg: 2094 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2095 dma_unmap_sg(&ha->pdev->dev, 2096 bsg_job->request_payload.sg_list, 2097 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2098 2099 done: 2100 return rval; 2101 } 2102 2103 static int 2104 qla26xx_serdes_op(struct bsg_job *bsg_job) 2105 { 2106 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2107 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2108 scsi_qla_host_t *vha = shost_priv(host); 2109 int rval = 0; 2110 struct qla_serdes_reg sr; 2111 2112 memset(&sr, 0, sizeof(sr)); 2113 2114 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2115 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2116 2117 switch (sr.cmd) { 2118 case INT_SC_SERDES_WRITE_REG: 2119 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2120 bsg_reply->reply_payload_rcv_len = 0; 2121 break; 2122 case INT_SC_SERDES_READ_REG: 2123 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2124 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2125 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2126 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2127 break; 2128 default: 2129 ql_dbg(ql_dbg_user, vha, 0x708c, 2130 "Unknown serdes cmd %x.\n", sr.cmd); 2131 rval = -EINVAL; 2132 break; 2133 } 2134 2135 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2136 rval ? EXT_STATUS_MAILBOX : 0; 2137 2138 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2139 bsg_reply->result = DID_OK << 16; 2140 bsg_job_done(bsg_job, bsg_reply->result, 2141 bsg_reply->reply_payload_rcv_len); 2142 return 0; 2143 } 2144 2145 static int 2146 qla8044_serdes_op(struct bsg_job *bsg_job) 2147 { 2148 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2149 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2150 scsi_qla_host_t *vha = shost_priv(host); 2151 int rval = 0; 2152 struct qla_serdes_reg_ex sr; 2153 2154 memset(&sr, 0, sizeof(sr)); 2155 2156 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2157 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2158 2159 switch (sr.cmd) { 2160 case INT_SC_SERDES_WRITE_REG: 2161 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2162 bsg_reply->reply_payload_rcv_len = 0; 2163 break; 2164 case INT_SC_SERDES_READ_REG: 2165 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2166 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2167 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2168 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2169 break; 2170 default: 2171 ql_dbg(ql_dbg_user, vha, 0x7020, 2172 "Unknown serdes cmd %x.\n", sr.cmd); 2173 rval = -EINVAL; 2174 break; 2175 } 2176 2177 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2178 rval ? EXT_STATUS_MAILBOX : 0; 2179 2180 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2181 bsg_reply->result = DID_OK << 16; 2182 bsg_job_done(bsg_job, bsg_reply->result, 2183 bsg_reply->reply_payload_rcv_len); 2184 return 0; 2185 } 2186 2187 static int 2188 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) 2189 { 2190 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2191 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2192 scsi_qla_host_t *vha = shost_priv(host); 2193 struct qla_hw_data *ha = vha->hw; 2194 struct qla_flash_update_caps cap; 2195 2196 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) 2197 return -EPERM; 2198 2199 memset(&cap, 0, sizeof(cap)); 2200 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2201 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2202 (uint64_t)ha->fw_attributes_h << 16 | 2203 (uint64_t)ha->fw_attributes; 2204 2205 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2206 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2207 bsg_reply->reply_payload_rcv_len = sizeof(cap); 2208 2209 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2210 EXT_STATUS_OK; 2211 2212 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2213 bsg_reply->result = DID_OK << 16; 2214 bsg_job_done(bsg_job, bsg_reply->result, 2215 bsg_reply->reply_payload_rcv_len); 2216 return 0; 2217 } 2218 2219 static int 2220 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) 2221 { 2222 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2223 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2224 scsi_qla_host_t *vha = shost_priv(host); 2225 struct qla_hw_data *ha = vha->hw; 2226 uint64_t online_fw_attr = 0; 2227 struct qla_flash_update_caps cap; 2228 2229 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2230 return -EPERM; 2231 2232 memset(&cap, 0, sizeof(cap)); 2233 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2234 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); 2235 2236 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2237 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2238 (uint64_t)ha->fw_attributes_h << 16 | 2239 (uint64_t)ha->fw_attributes; 2240 2241 if (online_fw_attr != cap.capabilities) { 2242 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2243 EXT_STATUS_INVALID_PARAM; 2244 return -EINVAL; 2245 } 2246 2247 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2248 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2249 EXT_STATUS_INVALID_PARAM; 2250 return -EINVAL; 2251 } 2252 2253 bsg_reply->reply_payload_rcv_len = 0; 2254 2255 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2256 EXT_STATUS_OK; 2257 2258 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2259 bsg_reply->result = DID_OK << 16; 2260 bsg_job_done(bsg_job, bsg_reply->result, 2261 bsg_reply->reply_payload_rcv_len); 2262 return 0; 2263 } 2264 2265 static int 2266 qla27xx_get_bbcr_data(struct bsg_job *bsg_job) 2267 { 2268 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2269 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2270 scsi_qla_host_t *vha = shost_priv(host); 2271 struct qla_hw_data *ha = vha->hw; 2272 struct qla_bbcr_data bbcr; 2273 uint16_t loop_id, topo, sw_cap; 2274 uint8_t domain, area, al_pa, state; 2275 int rval; 2276 2277 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2278 return -EPERM; 2279 2280 memset(&bbcr, 0, sizeof(bbcr)); 2281 2282 if (vha->flags.bbcr_enable) 2283 bbcr.status = QLA_BBCR_STATUS_ENABLED; 2284 else 2285 bbcr.status = QLA_BBCR_STATUS_DISABLED; 2286 2287 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { 2288 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2289 &area, &domain, &topo, &sw_cap); 2290 if (rval != QLA_SUCCESS) { 2291 bbcr.status = QLA_BBCR_STATUS_UNKNOWN; 2292 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2293 bbcr.mbx1 = loop_id; 2294 goto done; 2295 } 2296 2297 state = (vha->bbcr >> 12) & 0x1; 2298 2299 if (state) { 2300 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2301 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; 2302 } else { 2303 bbcr.state = QLA_BBCR_STATE_ONLINE; 2304 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; 2305 } 2306 2307 bbcr.configured_bbscn = vha->bbcr & 0xf; 2308 } 2309 2310 done: 2311 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2312 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2313 bsg_reply->reply_payload_rcv_len = sizeof(bbcr); 2314 2315 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2316 2317 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2318 bsg_reply->result = DID_OK << 16; 2319 bsg_job_done(bsg_job, bsg_reply->result, 2320 bsg_reply->reply_payload_rcv_len); 2321 return 0; 2322 } 2323 2324 static int 2325 qla2x00_get_priv_stats(struct bsg_job *bsg_job) 2326 { 2327 struct fc_bsg_request *bsg_request = bsg_job->request; 2328 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2329 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2330 scsi_qla_host_t *vha = shost_priv(host); 2331 struct qla_hw_data *ha = vha->hw; 2332 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2333 struct link_statistics *stats = NULL; 2334 dma_addr_t stats_dma; 2335 int rval; 2336 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; 2337 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2338 2339 if (test_bit(UNLOADING, &vha->dpc_flags)) 2340 return -ENODEV; 2341 2342 if (unlikely(pci_channel_offline(ha->pdev))) 2343 return -ENODEV; 2344 2345 if (qla2x00_reset_active(vha)) 2346 return -EBUSY; 2347 2348 if (!IS_FWI2_CAPABLE(ha)) 2349 return -EPERM; 2350 2351 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2352 GFP_KERNEL); 2353 if (!stats) { 2354 ql_log(ql_log_warn, vha, 0x70e2, 2355 "Failed to allocate memory for stats.\n"); 2356 return -ENOMEM; 2357 } 2358 2359 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2360 2361 if (rval == QLA_SUCCESS) { 2362 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, 2363 stats, sizeof(*stats)); 2364 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2365 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2366 } 2367 2368 bsg_reply->reply_payload_rcv_len = sizeof(*stats); 2369 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2370 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2371 2372 bsg_job->reply_len = sizeof(*bsg_reply); 2373 bsg_reply->result = DID_OK << 16; 2374 bsg_job_done(bsg_job, bsg_reply->result, 2375 bsg_reply->reply_payload_rcv_len); 2376 2377 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2378 stats, stats_dma); 2379 2380 return 0; 2381 } 2382 2383 static int 2384 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) 2385 { 2386 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2387 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2388 scsi_qla_host_t *vha = shost_priv(host); 2389 int rval; 2390 struct qla_dport_diag *dd; 2391 2392 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 2393 !IS_QLA28XX(vha->hw)) 2394 return -EPERM; 2395 2396 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 2397 if (!dd) { 2398 ql_log(ql_log_warn, vha, 0x70db, 2399 "Failed to allocate memory for dport.\n"); 2400 return -ENOMEM; 2401 } 2402 2403 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2404 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2405 2406 rval = qla26xx_dport_diagnostics( 2407 vha, dd->buf, sizeof(dd->buf), dd->options); 2408 if (rval == QLA_SUCCESS) { 2409 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2410 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2411 } 2412 2413 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2414 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2415 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2416 2417 bsg_job->reply_len = sizeof(*bsg_reply); 2418 bsg_reply->result = DID_OK << 16; 2419 bsg_job_done(bsg_job, bsg_reply->result, 2420 bsg_reply->reply_payload_rcv_len); 2421 2422 kfree(dd); 2423 2424 return 0; 2425 } 2426 2427 static int 2428 qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job) 2429 { 2430 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2431 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2432 scsi_qla_host_t *vha = shost_priv(host); 2433 int rval; 2434 struct qla_dport_diag_v2 *dd; 2435 mbx_cmd_t mc; 2436 mbx_cmd_t *mcp = &mc; 2437 uint16_t options; 2438 2439 if (!IS_DPORT_CAPABLE(vha->hw)) 2440 return -EPERM; 2441 2442 dd = kzalloc(sizeof(*dd), GFP_KERNEL); 2443 if (!dd) 2444 return -ENOMEM; 2445 2446 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2447 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2448 2449 options = dd->options; 2450 2451 /* Check dport Test in progress */ 2452 if (options == QLA_GET_DPORT_RESULT_V2 && 2453 vha->dport_status & DPORT_DIAG_IN_PROGRESS) { 2454 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2455 EXT_STATUS_DPORT_DIAG_IN_PROCESS; 2456 goto dportcomplete; 2457 } 2458 2459 /* Check chip reset in progress and start/restart requests arrive */ 2460 if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS && 2461 (options == QLA_START_DPORT_TEST_V2 || 2462 options == QLA_RESTART_DPORT_TEST_V2)) { 2463 vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS; 2464 } 2465 2466 /* Check chip reset in progress and get result request arrive */ 2467 if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS && 2468 options == QLA_GET_DPORT_RESULT_V2) { 2469 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2470 EXT_STATUS_DPORT_DIAG_NOT_RUNNING; 2471 goto dportcomplete; 2472 } 2473 2474 rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp); 2475 2476 if (rval == QLA_SUCCESS) { 2477 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2478 EXT_STATUS_OK; 2479 if (options == QLA_START_DPORT_TEST_V2 || 2480 options == QLA_RESTART_DPORT_TEST_V2) { 2481 dd->mbx1 = mcp->mb[0]; 2482 dd->mbx2 = mcp->mb[1]; 2483 vha->dport_status |= DPORT_DIAG_IN_PROGRESS; 2484 } else if (options == QLA_GET_DPORT_RESULT_V2) { 2485 dd->mbx1 = le16_to_cpu(vha->dport_data[1]); 2486 dd->mbx2 = le16_to_cpu(vha->dport_data[2]); 2487 } 2488 } else { 2489 dd->mbx1 = mcp->mb[0]; 2490 dd->mbx2 = mcp->mb[1]; 2491 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2492 EXT_STATUS_DPORT_DIAG_ERR; 2493 } 2494 2495 dportcomplete: 2496 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2497 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2498 2499 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2500 bsg_job->reply_len = sizeof(*bsg_reply); 2501 bsg_reply->result = DID_OK << 16; 2502 bsg_job_done(bsg_job, bsg_reply->result, 2503 bsg_reply->reply_payload_rcv_len); 2504 2505 kfree(dd); 2506 2507 return 0; 2508 } 2509 2510 static int 2511 qla2x00_get_flash_image_status(struct bsg_job *bsg_job) 2512 { 2513 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2514 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2515 struct qla_hw_data *ha = vha->hw; 2516 struct qla_active_regions regions = { }; 2517 struct active_regions active_regions = { }; 2518 2519 qla27xx_get_active_image(vha, &active_regions); 2520 regions.global_image = active_regions.global; 2521 2522 if (IS_QLA27XX(ha)) 2523 regions.nvme_params = QLA27XX_PRIMARY_IMAGE; 2524 2525 if (IS_QLA28XX(ha)) { 2526 qla28xx_get_aux_images(vha, &active_regions); 2527 regions.board_config = active_regions.aux.board_config; 2528 regions.vpd_nvram = active_regions.aux.vpd_nvram; 2529 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; 2530 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; 2531 regions.nvme_params = active_regions.aux.nvme_params; 2532 } 2533 2534 ql_dbg(ql_dbg_user, vha, 0x70e1, 2535 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n", 2536 __func__, vha->host_no, regions.global_image, 2537 regions.board_config, regions.vpd_nvram, 2538 regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params); 2539 2540 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2541 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); 2542 2543 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2544 bsg_reply->reply_payload_rcv_len = sizeof(regions); 2545 bsg_reply->result = DID_OK << 16; 2546 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2547 bsg_job_done(bsg_job, bsg_reply->result, 2548 bsg_reply->reply_payload_rcv_len); 2549 2550 return 0; 2551 } 2552 2553 static int 2554 qla2x00_manage_host_stats(struct bsg_job *bsg_job) 2555 { 2556 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2557 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2558 struct ql_vnd_mng_host_stats_param *req_data; 2559 struct ql_vnd_mng_host_stats_resp rsp_data; 2560 u32 req_data_len; 2561 int ret = 0; 2562 2563 if (!vha->flags.online) { 2564 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); 2565 return -EIO; 2566 } 2567 2568 req_data_len = bsg_job->request_payload.payload_len; 2569 2570 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) { 2571 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2572 return -EIO; 2573 } 2574 2575 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2576 if (!req_data) { 2577 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2578 return -ENOMEM; 2579 } 2580 2581 /* Copy the request buffer in req_data */ 2582 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2583 bsg_job->request_payload.sg_cnt, req_data, 2584 req_data_len); 2585 2586 switch (req_data->action) { 2587 case QLA_STOP: 2588 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type); 2589 break; 2590 case QLA_START: 2591 ret = qla2xxx_start_stats(vha->host, req_data->stat_type); 2592 break; 2593 case QLA_CLEAR: 2594 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type); 2595 break; 2596 default: 2597 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); 2598 ret = -EIO; 2599 break; 2600 } 2601 2602 kfree(req_data); 2603 2604 /* Prepare response */ 2605 rsp_data.status = ret; 2606 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2607 2608 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2609 bsg_reply->reply_payload_rcv_len = 2610 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2611 bsg_job->reply_payload.sg_cnt, 2612 &rsp_data, 2613 sizeof(struct ql_vnd_mng_host_stats_resp)); 2614 2615 bsg_reply->result = DID_OK; 2616 bsg_job_done(bsg_job, bsg_reply->result, 2617 bsg_reply->reply_payload_rcv_len); 2618 2619 return ret; 2620 } 2621 2622 static int 2623 qla2x00_get_host_stats(struct bsg_job *bsg_job) 2624 { 2625 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2626 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2627 struct ql_vnd_stats_param *req_data; 2628 struct ql_vnd_host_stats_resp rsp_data; 2629 u32 req_data_len; 2630 int ret = 0; 2631 u64 ini_entry_count = 0; 2632 u64 entry_count = 0; 2633 u64 tgt_num = 0; 2634 u64 tmp_stat_type = 0; 2635 u64 response_len = 0; 2636 void *data; 2637 2638 req_data_len = bsg_job->request_payload.payload_len; 2639 2640 if (req_data_len != sizeof(struct ql_vnd_stats_param)) { 2641 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2642 return -EIO; 2643 } 2644 2645 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2646 if (!req_data) { 2647 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2648 return -ENOMEM; 2649 } 2650 2651 /* Copy the request buffer in req_data */ 2652 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2653 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 2654 2655 /* Copy stat type to work on it */ 2656 tmp_stat_type = req_data->stat_type; 2657 2658 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) { 2659 /* Num of tgts connected to this host */ 2660 tgt_num = qla2x00_get_num_tgts(vha); 2661 /* unset BIT_17 */ 2662 tmp_stat_type &= ~(1 << 17); 2663 } 2664 2665 /* Total ini stats */ 2666 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); 2667 2668 /* Total number of entries */ 2669 entry_count = ini_entry_count + tgt_num; 2670 2671 response_len = sizeof(struct ql_vnd_host_stats_resp) + 2672 (sizeof(struct ql_vnd_stat_entry) * entry_count); 2673 2674 if (response_len > bsg_job->reply_payload.payload_len) { 2675 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL; 2676 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; 2677 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2678 2679 bsg_reply->reply_payload_rcv_len = 2680 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2681 bsg_job->reply_payload.sg_cnt, &rsp_data, 2682 sizeof(struct ql_vnd_mng_host_stats_resp)); 2683 2684 bsg_reply->result = DID_OK; 2685 bsg_job_done(bsg_job, bsg_reply->result, 2686 bsg_reply->reply_payload_rcv_len); 2687 goto host_stat_out; 2688 } 2689 2690 data = kzalloc(response_len, GFP_KERNEL); 2691 if (!data) { 2692 ret = -ENOMEM; 2693 goto host_stat_out; 2694 } 2695 2696 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, 2697 data, response_len); 2698 2699 rsp_data.status = EXT_STATUS_OK; 2700 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2701 2702 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2703 bsg_job->reply_payload.sg_cnt, 2704 data, response_len); 2705 bsg_reply->result = DID_OK; 2706 bsg_job_done(bsg_job, bsg_reply->result, 2707 bsg_reply->reply_payload_rcv_len); 2708 2709 kfree(data); 2710 host_stat_out: 2711 kfree(req_data); 2712 return ret; 2713 } 2714 2715 static struct fc_rport * 2716 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num) 2717 { 2718 fc_port_t *fcport = NULL; 2719 2720 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2721 if (fcport->rport->number == tgt_num) 2722 return fcport->rport; 2723 } 2724 return NULL; 2725 } 2726 2727 static int 2728 qla2x00_get_tgt_stats(struct bsg_job *bsg_job) 2729 { 2730 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2731 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2732 struct ql_vnd_tgt_stats_param *req_data; 2733 u32 req_data_len; 2734 int ret = 0; 2735 u64 response_len = 0; 2736 struct ql_vnd_tgt_stats_resp *data = NULL; 2737 struct fc_rport *rport = NULL; 2738 2739 if (!vha->flags.online) { 2740 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); 2741 return -EIO; 2742 } 2743 2744 req_data_len = bsg_job->request_payload.payload_len; 2745 2746 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) { 2747 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2748 return -EIO; 2749 } 2750 2751 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2752 if (!req_data) { 2753 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2754 return -ENOMEM; 2755 } 2756 2757 /* Copy the request buffer in req_data */ 2758 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2759 bsg_job->request_payload.sg_cnt, 2760 req_data, req_data_len); 2761 2762 response_len = sizeof(struct ql_vnd_tgt_stats_resp) + 2763 sizeof(struct ql_vnd_stat_entry); 2764 2765 /* structure + size for one entry */ 2766 data = kzalloc(response_len, GFP_KERNEL); 2767 if (!data) { 2768 kfree(req_data); 2769 return -ENOMEM; 2770 } 2771 2772 if (response_len > bsg_job->reply_payload.payload_len) { 2773 data->status = EXT_STATUS_BUFFER_TOO_SMALL; 2774 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; 2775 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2776 2777 bsg_reply->reply_payload_rcv_len = 2778 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2779 bsg_job->reply_payload.sg_cnt, data, 2780 sizeof(struct ql_vnd_tgt_stats_resp)); 2781 2782 bsg_reply->result = DID_OK; 2783 bsg_job_done(bsg_job, bsg_reply->result, 2784 bsg_reply->reply_payload_rcv_len); 2785 goto tgt_stat_out; 2786 } 2787 2788 rport = qla2xxx_find_rport(vha, req_data->tgt_id); 2789 if (!rport) { 2790 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id); 2791 ret = EXT_STATUS_INVALID_PARAM; 2792 data->status = EXT_STATUS_INVALID_PARAM; 2793 goto reply; 2794 } 2795 2796 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, 2797 rport, (void *)data, response_len); 2798 2799 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2800 reply: 2801 bsg_reply->reply_payload_rcv_len = 2802 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2803 bsg_job->reply_payload.sg_cnt, data, 2804 response_len); 2805 bsg_reply->result = DID_OK; 2806 bsg_job_done(bsg_job, bsg_reply->result, 2807 bsg_reply->reply_payload_rcv_len); 2808 2809 tgt_stat_out: 2810 kfree(data); 2811 kfree(req_data); 2812 2813 return ret; 2814 } 2815 2816 static int 2817 qla2x00_manage_host_port(struct bsg_job *bsg_job) 2818 { 2819 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2820 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2821 struct ql_vnd_mng_host_port_param *req_data; 2822 struct ql_vnd_mng_host_port_resp rsp_data; 2823 u32 req_data_len; 2824 int ret = 0; 2825 2826 req_data_len = bsg_job->request_payload.payload_len; 2827 2828 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) { 2829 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2830 return -EIO; 2831 } 2832 2833 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2834 if (!req_data) { 2835 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2836 return -ENOMEM; 2837 } 2838 2839 /* Copy the request buffer in req_data */ 2840 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2841 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 2842 2843 switch (req_data->action) { 2844 case QLA_ENABLE: 2845 ret = qla2xxx_enable_port(vha->host); 2846 break; 2847 case QLA_DISABLE: 2848 ret = qla2xxx_disable_port(vha->host); 2849 break; 2850 default: 2851 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); 2852 ret = -EIO; 2853 break; 2854 } 2855 2856 kfree(req_data); 2857 2858 /* Prepare response */ 2859 rsp_data.status = ret; 2860 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2861 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp); 2862 2863 bsg_reply->reply_payload_rcv_len = 2864 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2865 bsg_job->reply_payload.sg_cnt, &rsp_data, 2866 sizeof(struct ql_vnd_mng_host_port_resp)); 2867 bsg_reply->result = DID_OK; 2868 bsg_job_done(bsg_job, bsg_reply->result, 2869 bsg_reply->reply_payload_rcv_len); 2870 2871 return ret; 2872 } 2873 2874 static int 2875 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job) 2876 { 2877 struct fc_bsg_request *bsg_request = bsg_job->request; 2878 2879 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n", 2880 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]); 2881 2882 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { 2883 case QL_VND_LOOPBACK: 2884 return qla2x00_process_loopback(bsg_job); 2885 2886 case QL_VND_A84_RESET: 2887 return qla84xx_reset(bsg_job); 2888 2889 case QL_VND_A84_UPDATE_FW: 2890 return qla84xx_updatefw(bsg_job); 2891 2892 case QL_VND_A84_MGMT_CMD: 2893 return qla84xx_mgmt_cmd(bsg_job); 2894 2895 case QL_VND_IIDMA: 2896 return qla24xx_iidma(bsg_job); 2897 2898 case QL_VND_FCP_PRIO_CFG_CMD: 2899 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2900 2901 case QL_VND_READ_FLASH: 2902 return qla2x00_read_optrom(bsg_job); 2903 2904 case QL_VND_UPDATE_FLASH: 2905 return qla2x00_update_optrom(bsg_job); 2906 2907 case QL_VND_SET_FRU_VERSION: 2908 return qla2x00_update_fru_versions(bsg_job); 2909 2910 case QL_VND_READ_FRU_STATUS: 2911 return qla2x00_read_fru_status(bsg_job); 2912 2913 case QL_VND_WRITE_FRU_STATUS: 2914 return qla2x00_write_fru_status(bsg_job); 2915 2916 case QL_VND_WRITE_I2C: 2917 return qla2x00_write_i2c(bsg_job); 2918 2919 case QL_VND_READ_I2C: 2920 return qla2x00_read_i2c(bsg_job); 2921 2922 case QL_VND_DIAG_IO_CMD: 2923 return qla24xx_process_bidir_cmd(bsg_job); 2924 2925 case QL_VND_FX00_MGMT_CMD: 2926 return qlafx00_mgmt_cmd(bsg_job); 2927 2928 case QL_VND_SERDES_OP: 2929 return qla26xx_serdes_op(bsg_job); 2930 2931 case QL_VND_SERDES_OP_EX: 2932 return qla8044_serdes_op(bsg_job); 2933 2934 case QL_VND_GET_FLASH_UPDATE_CAPS: 2935 return qla27xx_get_flash_upd_cap(bsg_job); 2936 2937 case QL_VND_SET_FLASH_UPDATE_CAPS: 2938 return qla27xx_set_flash_upd_cap(bsg_job); 2939 2940 case QL_VND_GET_BBCR_DATA: 2941 return qla27xx_get_bbcr_data(bsg_job); 2942 2943 case QL_VND_GET_PRIV_STATS: 2944 case QL_VND_GET_PRIV_STATS_EX: 2945 return qla2x00_get_priv_stats(bsg_job); 2946 2947 case QL_VND_DPORT_DIAGNOSTICS: 2948 return qla2x00_do_dport_diagnostics(bsg_job); 2949 2950 case QL_VND_DPORT_DIAGNOSTICS_V2: 2951 return qla2x00_do_dport_diagnostics_v2(bsg_job); 2952 2953 case QL_VND_EDIF_MGMT: 2954 return qla_edif_app_mgmt(bsg_job); 2955 2956 case QL_VND_SS_GET_FLASH_IMAGE_STATUS: 2957 return qla2x00_get_flash_image_status(bsg_job); 2958 2959 case QL_VND_MANAGE_HOST_STATS: 2960 return qla2x00_manage_host_stats(bsg_job); 2961 2962 case QL_VND_GET_HOST_STATS: 2963 return qla2x00_get_host_stats(bsg_job); 2964 2965 case QL_VND_GET_TGT_STATS: 2966 return qla2x00_get_tgt_stats(bsg_job); 2967 2968 case QL_VND_MANAGE_HOST_PORT: 2969 return qla2x00_manage_host_port(bsg_job); 2970 2971 case QL_VND_MBX_PASSTHRU: 2972 return qla2x00_mailbox_passthru(bsg_job); 2973 2974 default: 2975 return -ENOSYS; 2976 } 2977 } 2978 2979 int 2980 qla24xx_bsg_request(struct bsg_job *bsg_job) 2981 { 2982 struct fc_bsg_request *bsg_request = bsg_job->request; 2983 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2984 int ret = -EINVAL; 2985 struct fc_rport *rport; 2986 struct Scsi_Host *host; 2987 scsi_qla_host_t *vha; 2988 2989 /* In case no data transferred. */ 2990 bsg_reply->reply_payload_rcv_len = 0; 2991 2992 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 2993 rport = fc_bsg_to_rport(bsg_job); 2994 host = rport_to_shost(rport); 2995 vha = shost_priv(host); 2996 } else { 2997 host = fc_bsg_to_shost(bsg_job); 2998 vha = shost_priv(host); 2999 } 3000 3001 /* Disable port will bring down the chip, allow enable command */ 3002 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT || 3003 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS) 3004 goto skip_chip_chk; 3005 3006 if (vha->hw->flags.port_isolated) { 3007 bsg_reply->result = DID_ERROR; 3008 /* operation not permitted */ 3009 return -EPERM; 3010 } 3011 3012 if (qla2x00_chip_is_down(vha)) { 3013 ql_dbg(ql_dbg_user, vha, 0x709f, 3014 "BSG: ISP abort active/needed -- cmd=%d.\n", 3015 bsg_request->msgcode); 3016 SET_DID_STATUS(bsg_reply->result, DID_ERROR); 3017 return -EBUSY; 3018 } 3019 3020 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { 3021 SET_DID_STATUS(bsg_reply->result, DID_ERROR); 3022 return -EIO; 3023 } 3024 3025 skip_chip_chk: 3026 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, 3027 "Entered %s msgcode=0x%x. bsg ptr %px\n", 3028 __func__, bsg_request->msgcode, bsg_job); 3029 3030 switch (bsg_request->msgcode) { 3031 case FC_BSG_RPT_ELS: 3032 case FC_BSG_HST_ELS_NOLOGIN: 3033 ret = qla2x00_process_els(bsg_job); 3034 break; 3035 case FC_BSG_HST_CT: 3036 ret = qla2x00_process_ct(bsg_job); 3037 break; 3038 case FC_BSG_HST_VENDOR: 3039 ret = qla2x00_process_vendor_specific(vha, bsg_job); 3040 break; 3041 case FC_BSG_HST_ADD_RPORT: 3042 case FC_BSG_HST_DEL_RPORT: 3043 case FC_BSG_RPT_CT: 3044 default: 3045 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 3046 break; 3047 } 3048 3049 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, 3050 "%s done with return %x\n", __func__, ret); 3051 3052 return ret; 3053 } 3054 3055 int 3056 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 3057 { 3058 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 3059 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3060 struct qla_hw_data *ha = vha->hw; 3061 srb_t *sp; 3062 int cnt, que; 3063 unsigned long flags; 3064 struct req_que *req; 3065 3066 ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", 3067 __func__, bsg_job); 3068 3069 if (qla2x00_isp_reg_stat(ha)) { 3070 ql_log(ql_log_info, vha, 0x9007, 3071 "PCI/Register disconnect.\n"); 3072 qla_pci_set_eeh_busy(vha); 3073 } 3074 3075 /* find the bsg job from the active list of commands */ 3076 spin_lock_irqsave(&ha->hardware_lock, flags); 3077 for (que = 0; que < ha->max_req_queues; que++) { 3078 req = ha->req_q_map[que]; 3079 if (!req) 3080 continue; 3081 3082 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 3083 sp = req->outstanding_cmds[cnt]; 3084 if (sp && 3085 (sp->type == SRB_CT_CMD || 3086 sp->type == SRB_ELS_CMD_HST || 3087 sp->type == SRB_ELS_CMD_HST_NOLOGIN || 3088 sp->type == SRB_FXIOCB_BCMD) && 3089 sp->u.bsg_job == bsg_job) { 3090 req->outstanding_cmds[cnt] = NULL; 3091 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3092 3093 if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { 3094 ql_log(ql_log_warn, vha, 0x7089, 3095 "mbx abort_command failed.\n"); 3096 bsg_reply->result = -EIO; 3097 } else { 3098 ql_dbg(ql_dbg_user, vha, 0x708a, 3099 "mbx abort_command success.\n"); 3100 bsg_reply->result = 0; 3101 } 3102 spin_lock_irqsave(&ha->hardware_lock, flags); 3103 goto done; 3104 3105 } 3106 } 3107 } 3108 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3109 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 3110 bsg_reply->result = -ENXIO; 3111 return 0; 3112 3113 done: 3114 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3115 /* ref: INIT */ 3116 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3117 return 0; 3118 } 3119 3120 int qla2x00_mailbox_passthru(struct bsg_job *bsg_job) 3121 { 3122 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 3123 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3124 int ret = -EINVAL; 3125 int ptsize = sizeof(struct qla_mbx_passthru); 3126 struct qla_mbx_passthru *req_data = NULL; 3127 uint32_t req_data_len; 3128 3129 req_data_len = bsg_job->request_payload.payload_len; 3130 if (req_data_len != ptsize) { 3131 ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n"); 3132 return -EIO; 3133 } 3134 req_data = kzalloc(ptsize, GFP_KERNEL); 3135 if (!req_data) { 3136 ql_log(ql_log_warn, vha, 0xf0a4, 3137 "req_data memory allocation failure.\n"); 3138 return -ENOMEM; 3139 } 3140 3141 /* Copy the request buffer in req_data */ 3142 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 3143 bsg_job->request_payload.sg_cnt, req_data, ptsize); 3144 ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out); 3145 3146 /* Copy the req_data in request buffer */ 3147 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 3148 bsg_job->reply_payload.sg_cnt, req_data, ptsize); 3149 3150 bsg_reply->reply_payload_rcv_len = ptsize; 3151 if (ret == QLA_SUCCESS) 3152 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 3153 else 3154 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR; 3155 3156 bsg_job->reply_len = sizeof(*bsg_job->reply); 3157 bsg_reply->result = DID_OK << 16; 3158 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 3159 3160 kfree(req_data); 3161 3162 return ret; 3163 } 3164