1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2012 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 13 /* BSG support for ELS/CT pass through */ 14 void 15 qla2x00_bsg_job_done(void *data, void *ptr, int res) 16 { 17 srb_t *sp = (srb_t *)ptr; 18 struct scsi_qla_host *vha = (scsi_qla_host_t *)data; 19 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 20 21 bsg_job->reply->result = res; 22 bsg_job->job_done(bsg_job); 23 sp->free(vha, sp); 24 } 25 26 void 27 qla2x00_bsg_sp_free(void *data, void *ptr) 28 { 29 srb_t *sp = (srb_t *)ptr; 30 struct scsi_qla_host *vha = (scsi_qla_host_t *)data; 31 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 32 struct qla_hw_data *ha = vha->hw; 33 34 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 35 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 36 37 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 38 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 39 40 if (sp->type == SRB_CT_CMD || 41 sp->type == SRB_ELS_CMD_HST) 42 kfree(sp->fcport); 43 mempool_free(sp, vha->hw->srb_mempool); 44 } 45 46 int 47 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 48 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 49 { 50 int i, ret, num_valid; 51 uint8_t *bcode; 52 struct qla_fcp_prio_entry *pri_entry; 53 uint32_t *bcode_val_ptr, bcode_val; 54 55 ret = 1; 56 num_valid = 0; 57 bcode = (uint8_t *)pri_cfg; 58 bcode_val_ptr = (uint32_t *)pri_cfg; 59 bcode_val = (uint32_t)(*bcode_val_ptr); 60 61 if (bcode_val == 0xFFFFFFFF) { 62 /* No FCP Priority config data in flash */ 63 ql_dbg(ql_dbg_user, vha, 0x7051, 64 "No FCP Priority config data.\n"); 65 return 0; 66 } 67 68 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 69 bcode[3] != 'S') { 70 /* Invalid FCP priority data header*/ 71 ql_dbg(ql_dbg_user, vha, 0x7052, 72 "Invalid FCP Priority data header. bcode=0x%x.\n", 73 bcode_val); 74 return 0; 75 } 76 if (flag != 1) 77 return ret; 78 79 pri_entry = &pri_cfg->entry[0]; 80 for (i = 0; i < pri_cfg->num_entries; i++) { 81 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 82 num_valid++; 83 pri_entry++; 84 } 85 86 if (num_valid == 0) { 87 /* No valid FCP priority data entries */ 88 ql_dbg(ql_dbg_user, vha, 0x7053, 89 "No valid FCP Priority data entries.\n"); 90 ret = 0; 91 } else { 92 /* FCP priority data is valid */ 93 ql_dbg(ql_dbg_user, vha, 0x7054, 94 "Valid FCP priority data. num entries = %d.\n", 95 num_valid); 96 } 97 98 return ret; 99 } 100 101 static int 102 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) 103 { 104 struct Scsi_Host *host = bsg_job->shost; 105 scsi_qla_host_t *vha = shost_priv(host); 106 struct qla_hw_data *ha = vha->hw; 107 int ret = 0; 108 uint32_t len; 109 uint32_t oper; 110 111 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) { 112 ret = -EINVAL; 113 goto exit_fcp_prio_cfg; 114 } 115 116 /* Get the sub command */ 117 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 118 119 /* Only set config is allowed if config memory is not allocated */ 120 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 121 ret = -EINVAL; 122 goto exit_fcp_prio_cfg; 123 } 124 switch (oper) { 125 case QLFC_FCP_PRIO_DISABLE: 126 if (ha->flags.fcp_prio_enabled) { 127 ha->flags.fcp_prio_enabled = 0; 128 ha->fcp_prio_cfg->attributes &= 129 ~FCP_PRIO_ATTR_ENABLE; 130 qla24xx_update_all_fcp_prio(vha); 131 bsg_job->reply->result = DID_OK; 132 } else { 133 ret = -EINVAL; 134 bsg_job->reply->result = (DID_ERROR << 16); 135 goto exit_fcp_prio_cfg; 136 } 137 break; 138 139 case QLFC_FCP_PRIO_ENABLE: 140 if (!ha->flags.fcp_prio_enabled) { 141 if (ha->fcp_prio_cfg) { 142 ha->flags.fcp_prio_enabled = 1; 143 ha->fcp_prio_cfg->attributes |= 144 FCP_PRIO_ATTR_ENABLE; 145 qla24xx_update_all_fcp_prio(vha); 146 bsg_job->reply->result = DID_OK; 147 } else { 148 ret = -EINVAL; 149 bsg_job->reply->result = (DID_ERROR << 16); 150 goto exit_fcp_prio_cfg; 151 } 152 } 153 break; 154 155 case QLFC_FCP_PRIO_GET_CONFIG: 156 len = bsg_job->reply_payload.payload_len; 157 if (!len || len > FCP_PRIO_CFG_SIZE) { 158 ret = -EINVAL; 159 bsg_job->reply->result = (DID_ERROR << 16); 160 goto exit_fcp_prio_cfg; 161 } 162 163 bsg_job->reply->result = DID_OK; 164 bsg_job->reply->reply_payload_rcv_len = 165 sg_copy_from_buffer( 166 bsg_job->reply_payload.sg_list, 167 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 168 len); 169 170 break; 171 172 case QLFC_FCP_PRIO_SET_CONFIG: 173 len = bsg_job->request_payload.payload_len; 174 if (!len || len > FCP_PRIO_CFG_SIZE) { 175 bsg_job->reply->result = (DID_ERROR << 16); 176 ret = -EINVAL; 177 goto exit_fcp_prio_cfg; 178 } 179 180 if (!ha->fcp_prio_cfg) { 181 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 182 if (!ha->fcp_prio_cfg) { 183 ql_log(ql_log_warn, vha, 0x7050, 184 "Unable to allocate memory for fcp prio " 185 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 186 bsg_job->reply->result = (DID_ERROR << 16); 187 ret = -ENOMEM; 188 goto exit_fcp_prio_cfg; 189 } 190 } 191 192 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 193 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 194 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 195 FCP_PRIO_CFG_SIZE); 196 197 /* validate fcp priority data */ 198 199 if (!qla24xx_fcp_prio_cfg_valid(vha, 200 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 201 bsg_job->reply->result = (DID_ERROR << 16); 202 ret = -EINVAL; 203 /* If buffer was invalidatic int 204 * fcp_prio_cfg is of no use 205 */ 206 vfree(ha->fcp_prio_cfg); 207 ha->fcp_prio_cfg = NULL; 208 goto exit_fcp_prio_cfg; 209 } 210 211 ha->flags.fcp_prio_enabled = 0; 212 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 213 ha->flags.fcp_prio_enabled = 1; 214 qla24xx_update_all_fcp_prio(vha); 215 bsg_job->reply->result = DID_OK; 216 break; 217 default: 218 ret = -EINVAL; 219 break; 220 } 221 exit_fcp_prio_cfg: 222 if (!ret) 223 bsg_job->job_done(bsg_job); 224 return ret; 225 } 226 227 static int 228 qla2x00_process_els(struct fc_bsg_job *bsg_job) 229 { 230 struct fc_rport *rport; 231 fc_port_t *fcport = NULL; 232 struct Scsi_Host *host; 233 scsi_qla_host_t *vha; 234 struct qla_hw_data *ha; 235 srb_t *sp; 236 const char *type; 237 int req_sg_cnt, rsp_sg_cnt; 238 int rval = (DRIVER_ERROR << 16); 239 uint16_t nextlid = 0; 240 241 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 242 rport = bsg_job->rport; 243 fcport = *(fc_port_t **) rport->dd_data; 244 host = rport_to_shost(rport); 245 vha = shost_priv(host); 246 ha = vha->hw; 247 type = "FC_BSG_RPT_ELS"; 248 } else { 249 host = bsg_job->shost; 250 vha = shost_priv(host); 251 ha = vha->hw; 252 type = "FC_BSG_HST_ELS_NOLOGIN"; 253 } 254 255 /* pass through is supported only for ISP 4Gb or higher */ 256 if (!IS_FWI2_CAPABLE(ha)) { 257 ql_dbg(ql_dbg_user, vha, 0x7001, 258 "ELS passthru not supported for ISP23xx based adapters.\n"); 259 rval = -EPERM; 260 goto done; 261 } 262 263 /* Multiple SG's are not supported for ELS requests */ 264 if (bsg_job->request_payload.sg_cnt > 1 || 265 bsg_job->reply_payload.sg_cnt > 1) { 266 ql_dbg(ql_dbg_user, vha, 0x7002, 267 "Multiple SG's are not suppored for ELS requests, " 268 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 269 bsg_job->request_payload.sg_cnt, 270 bsg_job->reply_payload.sg_cnt); 271 rval = -EPERM; 272 goto done; 273 } 274 275 /* ELS request for rport */ 276 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 277 /* make sure the rport is logged in, 278 * if not perform fabric login 279 */ 280 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 281 ql_dbg(ql_dbg_user, vha, 0x7003, 282 "Failed to login port %06X for ELS passthru.\n", 283 fcport->d_id.b24); 284 rval = -EIO; 285 goto done; 286 } 287 } else { 288 /* Allocate a dummy fcport structure, since functions 289 * preparing the IOCB and mailbox command retrieves port 290 * specific information from fcport structure. For Host based 291 * ELS commands there will be no fcport structure allocated 292 */ 293 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 294 if (!fcport) { 295 rval = -ENOMEM; 296 goto done; 297 } 298 299 /* Initialize all required fields of fcport */ 300 fcport->vha = vha; 301 fcport->d_id.b.al_pa = 302 bsg_job->request->rqst_data.h_els.port_id[0]; 303 fcport->d_id.b.area = 304 bsg_job->request->rqst_data.h_els.port_id[1]; 305 fcport->d_id.b.domain = 306 bsg_job->request->rqst_data.h_els.port_id[2]; 307 fcport->loop_id = 308 (fcport->d_id.b.al_pa == 0xFD) ? 309 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 310 } 311 312 if (!vha->flags.online) { 313 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 314 rval = -EIO; 315 goto done; 316 } 317 318 req_sg_cnt = 319 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 320 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 321 if (!req_sg_cnt) { 322 rval = -ENOMEM; 323 goto done_free_fcport; 324 } 325 326 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 327 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 328 if (!rsp_sg_cnt) { 329 rval = -ENOMEM; 330 goto done_free_fcport; 331 } 332 333 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 334 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 335 ql_log(ql_log_warn, vha, 0x7008, 336 "dma mapping resulted in different sg counts, " 337 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 338 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 339 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 340 rval = -EAGAIN; 341 goto done_unmap_sg; 342 } 343 344 /* Alloc SRB structure */ 345 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 346 if (!sp) { 347 rval = -ENOMEM; 348 goto done_unmap_sg; 349 } 350 351 sp->type = 352 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 353 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 354 sp->name = 355 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 356 "bsg_els_rpt" : "bsg_els_hst"); 357 sp->u.bsg_job = bsg_job; 358 sp->free = qla2x00_bsg_sp_free; 359 sp->done = qla2x00_bsg_job_done; 360 361 ql_dbg(ql_dbg_user, vha, 0x700a, 362 "bsg rqst type: %s els type: %x - loop-id=%x " 363 "portid=%-2x%02x%02x.\n", type, 364 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id, 365 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 366 367 rval = qla2x00_start_sp(sp); 368 if (rval != QLA_SUCCESS) { 369 ql_log(ql_log_warn, vha, 0x700e, 370 "qla2x00_start_sp failed = %d\n", rval); 371 mempool_free(sp, ha->srb_mempool); 372 rval = -EIO; 373 goto done_unmap_sg; 374 } 375 return rval; 376 377 done_unmap_sg: 378 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 379 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 380 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 381 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 382 goto done_free_fcport; 383 384 done_free_fcport: 385 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN) 386 kfree(fcport); 387 done: 388 return rval; 389 } 390 391 inline uint16_t 392 qla24xx_calc_ct_iocbs(uint16_t dsds) 393 { 394 uint16_t iocbs; 395 396 iocbs = 1; 397 if (dsds > 2) { 398 iocbs += (dsds - 2) / 5; 399 if ((dsds - 2) % 5) 400 iocbs++; 401 } 402 return iocbs; 403 } 404 405 static int 406 qla2x00_process_ct(struct fc_bsg_job *bsg_job) 407 { 408 srb_t *sp; 409 struct Scsi_Host *host = bsg_job->shost; 410 scsi_qla_host_t *vha = shost_priv(host); 411 struct qla_hw_data *ha = vha->hw; 412 int rval = (DRIVER_ERROR << 16); 413 int req_sg_cnt, rsp_sg_cnt; 414 uint16_t loop_id; 415 struct fc_port *fcport; 416 char *type = "FC_BSG_HST_CT"; 417 418 req_sg_cnt = 419 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 420 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 421 if (!req_sg_cnt) { 422 ql_log(ql_log_warn, vha, 0x700f, 423 "dma_map_sg return %d for request\n", req_sg_cnt); 424 rval = -ENOMEM; 425 goto done; 426 } 427 428 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 429 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 430 if (!rsp_sg_cnt) { 431 ql_log(ql_log_warn, vha, 0x7010, 432 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 433 rval = -ENOMEM; 434 goto done; 435 } 436 437 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 438 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 439 ql_log(ql_log_warn, vha, 0x7011, 440 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 441 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 442 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 443 rval = -EAGAIN; 444 goto done_unmap_sg; 445 } 446 447 if (!vha->flags.online) { 448 ql_log(ql_log_warn, vha, 0x7012, 449 "Host is not online.\n"); 450 rval = -EIO; 451 goto done_unmap_sg; 452 } 453 454 loop_id = 455 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 456 >> 24; 457 switch (loop_id) { 458 case 0xFC: 459 loop_id = cpu_to_le16(NPH_SNS); 460 break; 461 case 0xFA: 462 loop_id = vha->mgmt_svr_loop_id; 463 break; 464 default: 465 ql_dbg(ql_dbg_user, vha, 0x7013, 466 "Unknown loop id: %x.\n", loop_id); 467 rval = -EINVAL; 468 goto done_unmap_sg; 469 } 470 471 /* Allocate a dummy fcport structure, since functions preparing the 472 * IOCB and mailbox command retrieves port specific information 473 * from fcport structure. For Host based ELS commands there will be 474 * no fcport structure allocated 475 */ 476 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 477 if (!fcport) { 478 ql_log(ql_log_warn, vha, 0x7014, 479 "Failed to allocate fcport.\n"); 480 rval = -ENOMEM; 481 goto done_unmap_sg; 482 } 483 484 /* Initialize all required fields of fcport */ 485 fcport->vha = vha; 486 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; 487 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; 488 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; 489 fcport->loop_id = loop_id; 490 491 /* Alloc SRB structure */ 492 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 493 if (!sp) { 494 ql_log(ql_log_warn, vha, 0x7015, 495 "qla2x00_get_sp failed.\n"); 496 rval = -ENOMEM; 497 goto done_free_fcport; 498 } 499 500 sp->type = SRB_CT_CMD; 501 sp->name = "bsg_ct"; 502 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 503 sp->u.bsg_job = bsg_job; 504 sp->free = qla2x00_bsg_sp_free; 505 sp->done = qla2x00_bsg_job_done; 506 507 ql_dbg(ql_dbg_user, vha, 0x7016, 508 "bsg rqst type: %s else type: %x - " 509 "loop-id=%x portid=%02x%02x%02x.\n", type, 510 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), 511 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 512 fcport->d_id.b.al_pa); 513 514 rval = qla2x00_start_sp(sp); 515 if (rval != QLA_SUCCESS) { 516 ql_log(ql_log_warn, vha, 0x7017, 517 "qla2x00_start_sp failed=%d.\n", rval); 518 mempool_free(sp, ha->srb_mempool); 519 rval = -EIO; 520 goto done_free_fcport; 521 } 522 return rval; 523 524 done_free_fcport: 525 kfree(fcport); 526 done_unmap_sg: 527 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 528 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 529 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 530 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 531 done: 532 return rval; 533 } 534 /* 535 * Set the port configuration to enable the internal or external loopback 536 * depending on the loopback mode. 537 */ 538 static inline int 539 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 540 uint16_t *new_config, uint16_t mode) 541 { 542 int ret = 0; 543 int rval = 0; 544 struct qla_hw_data *ha = vha->hw; 545 546 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 547 goto done_set_internal; 548 549 if (mode == INTERNAL_LOOPBACK) 550 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 551 else if (mode == EXTERNAL_LOOPBACK) 552 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 553 ql_dbg(ql_dbg_user, vha, 0x70be, 554 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 555 556 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 557 558 ha->notify_dcbx_comp = 1; 559 ret = qla81xx_set_port_config(vha, new_config); 560 if (ret != QLA_SUCCESS) { 561 ql_log(ql_log_warn, vha, 0x7021, 562 "set port config failed.\n"); 563 ha->notify_dcbx_comp = 0; 564 rval = -EINVAL; 565 goto done_set_internal; 566 } 567 568 /* Wait for DCBX complete event */ 569 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) { 570 ql_dbg(ql_dbg_user, vha, 0x7022, 571 "State change notification not received.\n"); 572 rval = -EINVAL; 573 } else { 574 if (ha->flags.idc_compl_status) { 575 ql_dbg(ql_dbg_user, vha, 0x70c3, 576 "Bad status in IDC Completion AEN\n"); 577 rval = -EINVAL; 578 ha->flags.idc_compl_status = 0; 579 } else 580 ql_dbg(ql_dbg_user, vha, 0x7023, 581 "State change received.\n"); 582 } 583 584 ha->notify_dcbx_comp = 0; 585 586 done_set_internal: 587 return rval; 588 } 589 590 /* Disable loopback mode */ 591 static inline int 592 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 593 int wait) 594 { 595 int ret = 0; 596 int rval = 0; 597 uint16_t new_config[4]; 598 struct qla_hw_data *ha = vha->hw; 599 600 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 601 goto done_reset_internal; 602 603 memset(new_config, 0 , sizeof(new_config)); 604 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 605 ENABLE_INTERNAL_LOOPBACK || 606 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 607 ENABLE_EXTERNAL_LOOPBACK) { 608 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 609 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 610 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 611 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 612 613 ha->notify_dcbx_comp = wait; 614 ret = qla81xx_set_port_config(vha, new_config); 615 if (ret != QLA_SUCCESS) { 616 ql_log(ql_log_warn, vha, 0x7025, 617 "Set port config failed.\n"); 618 ha->notify_dcbx_comp = 0; 619 rval = -EINVAL; 620 goto done_reset_internal; 621 } 622 623 /* Wait for DCBX complete event */ 624 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 625 (20 * HZ))) { 626 ql_dbg(ql_dbg_user, vha, 0x7026, 627 "State change notification not received.\n"); 628 ha->notify_dcbx_comp = 0; 629 rval = -EINVAL; 630 goto done_reset_internal; 631 } else 632 ql_dbg(ql_dbg_user, vha, 0x7027, 633 "State change received.\n"); 634 635 ha->notify_dcbx_comp = 0; 636 } 637 done_reset_internal: 638 return rval; 639 } 640 641 static int 642 qla2x00_process_loopback(struct fc_bsg_job *bsg_job) 643 { 644 struct Scsi_Host *host = bsg_job->shost; 645 scsi_qla_host_t *vha = shost_priv(host); 646 struct qla_hw_data *ha = vha->hw; 647 int rval; 648 uint8_t command_sent; 649 char *type; 650 struct msg_echo_lb elreq; 651 uint16_t response[MAILBOX_REGISTER_COUNT]; 652 uint16_t config[4], new_config[4]; 653 uint8_t *fw_sts_ptr; 654 uint8_t *req_data = NULL; 655 dma_addr_t req_data_dma; 656 uint32_t req_data_len; 657 uint8_t *rsp_data = NULL; 658 dma_addr_t rsp_data_dma; 659 uint32_t rsp_data_len; 660 661 if (!vha->flags.online) { 662 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 663 return -EIO; 664 } 665 666 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 667 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 668 DMA_TO_DEVICE); 669 670 if (!elreq.req_sg_cnt) { 671 ql_log(ql_log_warn, vha, 0x701a, 672 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 673 return -ENOMEM; 674 } 675 676 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 677 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 678 DMA_FROM_DEVICE); 679 680 if (!elreq.rsp_sg_cnt) { 681 ql_log(ql_log_warn, vha, 0x701b, 682 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 683 rval = -ENOMEM; 684 goto done_unmap_req_sg; 685 } 686 687 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 688 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 689 ql_log(ql_log_warn, vha, 0x701c, 690 "dma mapping resulted in different sg counts, " 691 "request_sg_cnt: %x dma_request_sg_cnt: %x " 692 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 693 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 694 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 695 rval = -EAGAIN; 696 goto done_unmap_sg; 697 } 698 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 699 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 700 &req_data_dma, GFP_KERNEL); 701 if (!req_data) { 702 ql_log(ql_log_warn, vha, 0x701d, 703 "dma alloc failed for req_data.\n"); 704 rval = -ENOMEM; 705 goto done_unmap_sg; 706 } 707 708 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 709 &rsp_data_dma, GFP_KERNEL); 710 if (!rsp_data) { 711 ql_log(ql_log_warn, vha, 0x7004, 712 "dma alloc failed for rsp_data.\n"); 713 rval = -ENOMEM; 714 goto done_free_dma_req; 715 } 716 717 /* Copy the request buffer in req_data now */ 718 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 719 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 720 721 elreq.send_dma = req_data_dma; 722 elreq.rcv_dma = rsp_data_dma; 723 elreq.transfer_size = req_data_len; 724 725 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 726 727 if (atomic_read(&vha->loop_state) == LOOP_READY && 728 (ha->current_topology == ISP_CFG_F || 729 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && 730 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 731 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 732 elreq.options == EXTERNAL_LOOPBACK) { 733 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 734 ql_dbg(ql_dbg_user, vha, 0x701e, 735 "BSG request type: %s.\n", type); 736 command_sent = INT_DEF_LB_ECHO_CMD; 737 rval = qla2x00_echo_test(vha, &elreq, response); 738 } else { 739 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) { 740 memset(config, 0, sizeof(config)); 741 memset(new_config, 0, sizeof(new_config)); 742 if (qla81xx_get_port_config(vha, config)) { 743 ql_log(ql_log_warn, vha, 0x701f, 744 "Get port config failed.\n"); 745 rval = -EPERM; 746 goto done_free_dma_rsp; 747 } 748 749 ql_dbg(ql_dbg_user, vha, 0x70c0, 750 "elreq.options=%04x\n", elreq.options); 751 752 if (elreq.options == EXTERNAL_LOOPBACK) 753 if (IS_QLA8031(ha)) 754 rval = qla81xx_set_loopback_mode(vha, 755 config, new_config, elreq.options); 756 else 757 rval = qla81xx_reset_loopback_mode(vha, 758 config, 1); 759 else 760 rval = qla81xx_set_loopback_mode(vha, config, 761 new_config, elreq.options); 762 763 if (rval) { 764 rval = -EPERM; 765 goto done_free_dma_rsp; 766 } 767 768 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 769 ql_dbg(ql_dbg_user, vha, 0x7028, 770 "BSG request type: %s.\n", type); 771 772 command_sent = INT_DEF_LB_LOOPBACK_CMD; 773 rval = qla2x00_loopback_test(vha, &elreq, response); 774 775 if (new_config[0]) { 776 /* Revert back to original port config 777 * Also clear internal loopback 778 */ 779 qla81xx_reset_loopback_mode(vha, 780 new_config, 0); 781 } 782 783 if (response[0] == MBS_COMMAND_ERROR && 784 response[1] == MBS_LB_RESET) { 785 ql_log(ql_log_warn, vha, 0x7029, 786 "MBX command error, Aborting ISP.\n"); 787 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 788 qla2xxx_wake_dpc(vha); 789 qla2x00_wait_for_chip_reset(vha); 790 /* Also reset the MPI */ 791 if (qla81xx_restart_mpi_firmware(vha) != 792 QLA_SUCCESS) { 793 ql_log(ql_log_warn, vha, 0x702a, 794 "MPI reset failed.\n"); 795 } 796 797 rval = -EIO; 798 goto done_free_dma_rsp; 799 } 800 } else { 801 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 802 ql_dbg(ql_dbg_user, vha, 0x702b, 803 "BSG request type: %s.\n", type); 804 command_sent = INT_DEF_LB_LOOPBACK_CMD; 805 rval = qla2x00_loopback_test(vha, &elreq, response); 806 } 807 } 808 809 if (rval) { 810 ql_log(ql_log_warn, vha, 0x702c, 811 "Vendor request %s failed.\n", type); 812 813 rval = 0; 814 bsg_job->reply->result = (DID_ERROR << 16); 815 bsg_job->reply->reply_payload_rcv_len = 0; 816 } else { 817 ql_dbg(ql_dbg_user, vha, 0x702d, 818 "Vendor request %s completed.\n", type); 819 bsg_job->reply->result = (DID_OK << 16); 820 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 821 bsg_job->reply_payload.sg_cnt, rsp_data, 822 rsp_data_len); 823 } 824 825 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 826 sizeof(response) + sizeof(uint8_t); 827 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 828 sizeof(struct fc_bsg_reply); 829 memcpy(fw_sts_ptr, response, sizeof(response)); 830 fw_sts_ptr += sizeof(response); 831 *fw_sts_ptr = command_sent; 832 833 done_free_dma_rsp: 834 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 835 rsp_data, rsp_data_dma); 836 done_free_dma_req: 837 dma_free_coherent(&ha->pdev->dev, req_data_len, 838 req_data, req_data_dma); 839 done_unmap_sg: 840 dma_unmap_sg(&ha->pdev->dev, 841 bsg_job->reply_payload.sg_list, 842 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 843 done_unmap_req_sg: 844 dma_unmap_sg(&ha->pdev->dev, 845 bsg_job->request_payload.sg_list, 846 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 847 if (!rval) 848 bsg_job->job_done(bsg_job); 849 return rval; 850 } 851 852 static int 853 qla84xx_reset(struct fc_bsg_job *bsg_job) 854 { 855 struct Scsi_Host *host = bsg_job->shost; 856 scsi_qla_host_t *vha = shost_priv(host); 857 struct qla_hw_data *ha = vha->hw; 858 int rval = 0; 859 uint32_t flag; 860 861 if (!IS_QLA84XX(ha)) { 862 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 863 return -EINVAL; 864 } 865 866 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 867 868 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 869 870 if (rval) { 871 ql_log(ql_log_warn, vha, 0x7030, 872 "Vendor request 84xx reset failed.\n"); 873 rval = (DID_ERROR << 16); 874 875 } else { 876 ql_dbg(ql_dbg_user, vha, 0x7031, 877 "Vendor request 84xx reset completed.\n"); 878 bsg_job->reply->result = DID_OK; 879 bsg_job->job_done(bsg_job); 880 } 881 882 return rval; 883 } 884 885 static int 886 qla84xx_updatefw(struct fc_bsg_job *bsg_job) 887 { 888 struct Scsi_Host *host = bsg_job->shost; 889 scsi_qla_host_t *vha = shost_priv(host); 890 struct qla_hw_data *ha = vha->hw; 891 struct verify_chip_entry_84xx *mn = NULL; 892 dma_addr_t mn_dma, fw_dma; 893 void *fw_buf = NULL; 894 int rval = 0; 895 uint32_t sg_cnt; 896 uint32_t data_len; 897 uint16_t options; 898 uint32_t flag; 899 uint32_t fw_ver; 900 901 if (!IS_QLA84XX(ha)) { 902 ql_dbg(ql_dbg_user, vha, 0x7032, 903 "Not 84xx, exiting.\n"); 904 return -EINVAL; 905 } 906 907 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 908 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 909 if (!sg_cnt) { 910 ql_log(ql_log_warn, vha, 0x7033, 911 "dma_map_sg returned %d for request.\n", sg_cnt); 912 return -ENOMEM; 913 } 914 915 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 916 ql_log(ql_log_warn, vha, 0x7034, 917 "DMA mapping resulted in different sg counts, " 918 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 919 bsg_job->request_payload.sg_cnt, sg_cnt); 920 rval = -EAGAIN; 921 goto done_unmap_sg; 922 } 923 924 data_len = bsg_job->request_payload.payload_len; 925 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 926 &fw_dma, GFP_KERNEL); 927 if (!fw_buf) { 928 ql_log(ql_log_warn, vha, 0x7035, 929 "DMA alloc failed for fw_buf.\n"); 930 rval = -ENOMEM; 931 goto done_unmap_sg; 932 } 933 934 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 935 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 936 937 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 938 if (!mn) { 939 ql_log(ql_log_warn, vha, 0x7036, 940 "DMA alloc failed for fw buffer.\n"); 941 rval = -ENOMEM; 942 goto done_free_fw_buf; 943 } 944 945 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 946 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2))); 947 948 memset(mn, 0, sizeof(struct access_chip_84xx)); 949 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 950 mn->entry_count = 1; 951 952 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 953 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 954 options |= VCO_DIAG_FW; 955 956 mn->options = cpu_to_le16(options); 957 mn->fw_ver = cpu_to_le32(fw_ver); 958 mn->fw_size = cpu_to_le32(data_len); 959 mn->fw_seq_size = cpu_to_le32(data_len); 960 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma)); 961 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma)); 962 mn->dseg_length = cpu_to_le32(data_len); 963 mn->data_seg_cnt = cpu_to_le16(1); 964 965 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 966 967 if (rval) { 968 ql_log(ql_log_warn, vha, 0x7037, 969 "Vendor request 84xx updatefw failed.\n"); 970 971 rval = (DID_ERROR << 16); 972 } else { 973 ql_dbg(ql_dbg_user, vha, 0x7038, 974 "Vendor request 84xx updatefw completed.\n"); 975 976 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 977 bsg_job->reply->result = DID_OK; 978 } 979 980 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 981 982 done_free_fw_buf: 983 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 984 985 done_unmap_sg: 986 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 987 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 988 989 if (!rval) 990 bsg_job->job_done(bsg_job); 991 return rval; 992 } 993 994 static int 995 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) 996 { 997 struct Scsi_Host *host = bsg_job->shost; 998 scsi_qla_host_t *vha = shost_priv(host); 999 struct qla_hw_data *ha = vha->hw; 1000 struct access_chip_84xx *mn = NULL; 1001 dma_addr_t mn_dma, mgmt_dma; 1002 void *mgmt_b = NULL; 1003 int rval = 0; 1004 struct qla_bsg_a84_mgmt *ql84_mgmt; 1005 uint32_t sg_cnt; 1006 uint32_t data_len = 0; 1007 uint32_t dma_direction = DMA_NONE; 1008 1009 if (!IS_QLA84XX(ha)) { 1010 ql_log(ql_log_warn, vha, 0x703a, 1011 "Not 84xx, exiting.\n"); 1012 return -EINVAL; 1013 } 1014 1015 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request + 1016 sizeof(struct fc_bsg_request)); 1017 if (!ql84_mgmt) { 1018 ql_log(ql_log_warn, vha, 0x703b, 1019 "MGMT header not provided, exiting.\n"); 1020 return -EINVAL; 1021 } 1022 1023 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1024 if (!mn) { 1025 ql_log(ql_log_warn, vha, 0x703c, 1026 "DMA alloc failed for fw buffer.\n"); 1027 return -ENOMEM; 1028 } 1029 1030 memset(mn, 0, sizeof(struct access_chip_84xx)); 1031 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1032 mn->entry_count = 1; 1033 1034 switch (ql84_mgmt->mgmt.cmd) { 1035 case QLA84_MGMT_READ_MEM: 1036 case QLA84_MGMT_GET_INFO: 1037 sg_cnt = dma_map_sg(&ha->pdev->dev, 1038 bsg_job->reply_payload.sg_list, 1039 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1040 if (!sg_cnt) { 1041 ql_log(ql_log_warn, vha, 0x703d, 1042 "dma_map_sg returned %d for reply.\n", sg_cnt); 1043 rval = -ENOMEM; 1044 goto exit_mgmt; 1045 } 1046 1047 dma_direction = DMA_FROM_DEVICE; 1048 1049 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1050 ql_log(ql_log_warn, vha, 0x703e, 1051 "DMA mapping resulted in different sg counts, " 1052 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1053 bsg_job->reply_payload.sg_cnt, sg_cnt); 1054 rval = -EAGAIN; 1055 goto done_unmap_sg; 1056 } 1057 1058 data_len = bsg_job->reply_payload.payload_len; 1059 1060 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1061 &mgmt_dma, GFP_KERNEL); 1062 if (!mgmt_b) { 1063 ql_log(ql_log_warn, vha, 0x703f, 1064 "DMA alloc failed for mgmt_b.\n"); 1065 rval = -ENOMEM; 1066 goto done_unmap_sg; 1067 } 1068 1069 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1070 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1071 mn->parameter1 = 1072 cpu_to_le32( 1073 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1074 1075 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1076 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1077 mn->parameter1 = 1078 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1079 1080 mn->parameter2 = 1081 cpu_to_le32( 1082 ql84_mgmt->mgmt.mgmtp.u.info.context); 1083 } 1084 break; 1085 1086 case QLA84_MGMT_WRITE_MEM: 1087 sg_cnt = dma_map_sg(&ha->pdev->dev, 1088 bsg_job->request_payload.sg_list, 1089 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1090 1091 if (!sg_cnt) { 1092 ql_log(ql_log_warn, vha, 0x7040, 1093 "dma_map_sg returned %d.\n", sg_cnt); 1094 rval = -ENOMEM; 1095 goto exit_mgmt; 1096 } 1097 1098 dma_direction = DMA_TO_DEVICE; 1099 1100 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1101 ql_log(ql_log_warn, vha, 0x7041, 1102 "DMA mapping resulted in different sg counts, " 1103 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1104 bsg_job->request_payload.sg_cnt, sg_cnt); 1105 rval = -EAGAIN; 1106 goto done_unmap_sg; 1107 } 1108 1109 data_len = bsg_job->request_payload.payload_len; 1110 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1111 &mgmt_dma, GFP_KERNEL); 1112 if (!mgmt_b) { 1113 ql_log(ql_log_warn, vha, 0x7042, 1114 "DMA alloc failed for mgmt_b.\n"); 1115 rval = -ENOMEM; 1116 goto done_unmap_sg; 1117 } 1118 1119 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1120 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1121 1122 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1123 mn->parameter1 = 1124 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1125 break; 1126 1127 case QLA84_MGMT_CHNG_CONFIG: 1128 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1129 mn->parameter1 = 1130 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1131 1132 mn->parameter2 = 1133 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1134 1135 mn->parameter3 = 1136 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1137 break; 1138 1139 default: 1140 rval = -EIO; 1141 goto exit_mgmt; 1142 } 1143 1144 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1145 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1146 mn->dseg_count = cpu_to_le16(1); 1147 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma)); 1148 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma)); 1149 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len); 1150 } 1151 1152 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1153 1154 if (rval) { 1155 ql_log(ql_log_warn, vha, 0x7043, 1156 "Vendor request 84xx mgmt failed.\n"); 1157 1158 rval = (DID_ERROR << 16); 1159 1160 } else { 1161 ql_dbg(ql_dbg_user, vha, 0x7044, 1162 "Vendor request 84xx mgmt completed.\n"); 1163 1164 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1165 bsg_job->reply->result = DID_OK; 1166 1167 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1168 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1169 bsg_job->reply->reply_payload_rcv_len = 1170 bsg_job->reply_payload.payload_len; 1171 1172 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1173 bsg_job->reply_payload.sg_cnt, mgmt_b, 1174 data_len); 1175 } 1176 } 1177 1178 done_unmap_sg: 1179 if (mgmt_b) 1180 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1181 1182 if (dma_direction == DMA_TO_DEVICE) 1183 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1184 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1185 else if (dma_direction == DMA_FROM_DEVICE) 1186 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1187 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1188 1189 exit_mgmt: 1190 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1191 1192 if (!rval) 1193 bsg_job->job_done(bsg_job); 1194 return rval; 1195 } 1196 1197 static int 1198 qla24xx_iidma(struct fc_bsg_job *bsg_job) 1199 { 1200 struct Scsi_Host *host = bsg_job->shost; 1201 scsi_qla_host_t *vha = shost_priv(host); 1202 int rval = 0; 1203 struct qla_port_param *port_param = NULL; 1204 fc_port_t *fcport = NULL; 1205 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1206 uint8_t *rsp_ptr = NULL; 1207 1208 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1209 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1210 return -EINVAL; 1211 } 1212 1213 port_param = (struct qla_port_param *)((char *)bsg_job->request + 1214 sizeof(struct fc_bsg_request)); 1215 if (!port_param) { 1216 ql_log(ql_log_warn, vha, 0x7047, 1217 "port_param header not provided.\n"); 1218 return -EINVAL; 1219 } 1220 1221 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1222 ql_log(ql_log_warn, vha, 0x7048, 1223 "Invalid destination type.\n"); 1224 return -EINVAL; 1225 } 1226 1227 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1228 if (fcport->port_type != FCT_TARGET) 1229 continue; 1230 1231 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1232 fcport->port_name, sizeof(fcport->port_name))) 1233 continue; 1234 break; 1235 } 1236 1237 if (!fcport) { 1238 ql_log(ql_log_warn, vha, 0x7049, 1239 "Failed to find port.\n"); 1240 return -EINVAL; 1241 } 1242 1243 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1244 ql_log(ql_log_warn, vha, 0x704a, 1245 "Port is not online.\n"); 1246 return -EINVAL; 1247 } 1248 1249 if (fcport->flags & FCF_LOGIN_NEEDED) { 1250 ql_log(ql_log_warn, vha, 0x704b, 1251 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1252 return -EINVAL; 1253 } 1254 1255 if (port_param->mode) 1256 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1257 port_param->speed, mb); 1258 else 1259 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1260 &port_param->speed, mb); 1261 1262 if (rval) { 1263 ql_log(ql_log_warn, vha, 0x704c, 1264 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- " 1265 "%04x %x %04x %04x.\n", fcport->port_name[0], 1266 fcport->port_name[1], fcport->port_name[2], 1267 fcport->port_name[3], fcport->port_name[4], 1268 fcport->port_name[5], fcport->port_name[6], 1269 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]); 1270 rval = (DID_ERROR << 16); 1271 } else { 1272 if (!port_param->mode) { 1273 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1274 sizeof(struct qla_port_param); 1275 1276 rsp_ptr = ((uint8_t *)bsg_job->reply) + 1277 sizeof(struct fc_bsg_reply); 1278 1279 memcpy(rsp_ptr, port_param, 1280 sizeof(struct qla_port_param)); 1281 } 1282 1283 bsg_job->reply->result = DID_OK; 1284 bsg_job->job_done(bsg_job); 1285 } 1286 1287 return rval; 1288 } 1289 1290 static int 1291 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, 1292 uint8_t is_update) 1293 { 1294 uint32_t start = 0; 1295 int valid = 0; 1296 struct qla_hw_data *ha = vha->hw; 1297 1298 if (unlikely(pci_channel_offline(ha->pdev))) 1299 return -EINVAL; 1300 1301 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1302 if (start > ha->optrom_size) { 1303 ql_log(ql_log_warn, vha, 0x7055, 1304 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1305 return -EINVAL; 1306 } 1307 1308 if (ha->optrom_state != QLA_SWAITING) { 1309 ql_log(ql_log_info, vha, 0x7056, 1310 "optrom_state %d.\n", ha->optrom_state); 1311 return -EBUSY; 1312 } 1313 1314 ha->optrom_region_start = start; 1315 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1316 if (is_update) { 1317 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1318 valid = 1; 1319 else if (start == (ha->flt_region_boot * 4) || 1320 start == (ha->flt_region_fw * 4)) 1321 valid = 1; 1322 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1323 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 1324 valid = 1; 1325 if (!valid) { 1326 ql_log(ql_log_warn, vha, 0x7058, 1327 "Invalid start region 0x%x/0x%x.\n", start, 1328 bsg_job->request_payload.payload_len); 1329 return -EINVAL; 1330 } 1331 1332 ha->optrom_region_size = start + 1333 bsg_job->request_payload.payload_len > ha->optrom_size ? 1334 ha->optrom_size - start : 1335 bsg_job->request_payload.payload_len; 1336 ha->optrom_state = QLA_SWRITING; 1337 } else { 1338 ha->optrom_region_size = start + 1339 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1340 ha->optrom_size - start : 1341 bsg_job->reply_payload.payload_len; 1342 ha->optrom_state = QLA_SREADING; 1343 } 1344 1345 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1346 if (!ha->optrom_buffer) { 1347 ql_log(ql_log_warn, vha, 0x7059, 1348 "Read: Unable to allocate memory for optrom retrieval " 1349 "(%x)\n", ha->optrom_region_size); 1350 1351 ha->optrom_state = QLA_SWAITING; 1352 return -ENOMEM; 1353 } 1354 1355 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 1356 return 0; 1357 } 1358 1359 static int 1360 qla2x00_read_optrom(struct fc_bsg_job *bsg_job) 1361 { 1362 struct Scsi_Host *host = bsg_job->shost; 1363 scsi_qla_host_t *vha = shost_priv(host); 1364 struct qla_hw_data *ha = vha->hw; 1365 int rval = 0; 1366 1367 if (ha->flags.nic_core_reset_hdlr_active) 1368 return -EBUSY; 1369 1370 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1371 if (rval) 1372 return rval; 1373 1374 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1375 ha->optrom_region_start, ha->optrom_region_size); 1376 1377 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1378 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1379 ha->optrom_region_size); 1380 1381 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size; 1382 bsg_job->reply->result = DID_OK; 1383 vfree(ha->optrom_buffer); 1384 ha->optrom_buffer = NULL; 1385 ha->optrom_state = QLA_SWAITING; 1386 bsg_job->job_done(bsg_job); 1387 return rval; 1388 } 1389 1390 static int 1391 qla2x00_update_optrom(struct fc_bsg_job *bsg_job) 1392 { 1393 struct Scsi_Host *host = bsg_job->shost; 1394 scsi_qla_host_t *vha = shost_priv(host); 1395 struct qla_hw_data *ha = vha->hw; 1396 int rval = 0; 1397 1398 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1399 if (rval) 1400 return rval; 1401 1402 /* Set the isp82xx_no_md_cap not to capture minidump */ 1403 ha->flags.isp82xx_no_md_cap = 1; 1404 1405 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1406 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1407 ha->optrom_region_size); 1408 1409 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1410 ha->optrom_region_start, ha->optrom_region_size); 1411 1412 bsg_job->reply->result = DID_OK; 1413 vfree(ha->optrom_buffer); 1414 ha->optrom_buffer = NULL; 1415 ha->optrom_state = QLA_SWAITING; 1416 bsg_job->job_done(bsg_job); 1417 return rval; 1418 } 1419 1420 static int 1421 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) 1422 { 1423 struct Scsi_Host *host = bsg_job->shost; 1424 scsi_qla_host_t *vha = shost_priv(host); 1425 struct qla_hw_data *ha = vha->hw; 1426 int rval = 0; 1427 uint8_t bsg[DMA_POOL_SIZE]; 1428 struct qla_image_version_list *list = (void *)bsg; 1429 struct qla_image_version *image; 1430 uint32_t count; 1431 dma_addr_t sfp_dma; 1432 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1433 if (!sfp) { 1434 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1435 EXT_STATUS_NO_MEMORY; 1436 goto done; 1437 } 1438 1439 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1440 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1441 1442 image = list->version; 1443 count = list->count; 1444 while (count--) { 1445 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1446 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1447 image->field_address.device, image->field_address.offset, 1448 sizeof(image->field_info), image->field_address.option); 1449 if (rval) { 1450 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1451 EXT_STATUS_MAILBOX; 1452 goto dealloc; 1453 } 1454 image++; 1455 } 1456 1457 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1458 1459 dealloc: 1460 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1461 1462 done: 1463 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1464 bsg_job->reply->result = DID_OK << 16; 1465 bsg_job->job_done(bsg_job); 1466 1467 return 0; 1468 } 1469 1470 static int 1471 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) 1472 { 1473 struct Scsi_Host *host = bsg_job->shost; 1474 scsi_qla_host_t *vha = shost_priv(host); 1475 struct qla_hw_data *ha = vha->hw; 1476 int rval = 0; 1477 uint8_t bsg[DMA_POOL_SIZE]; 1478 struct qla_status_reg *sr = (void *)bsg; 1479 dma_addr_t sfp_dma; 1480 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1481 if (!sfp) { 1482 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1483 EXT_STATUS_NO_MEMORY; 1484 goto done; 1485 } 1486 1487 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1488 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1489 1490 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1491 sr->field_address.device, sr->field_address.offset, 1492 sizeof(sr->status_reg), sr->field_address.option); 1493 sr->status_reg = *sfp; 1494 1495 if (rval) { 1496 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1497 EXT_STATUS_MAILBOX; 1498 goto dealloc; 1499 } 1500 1501 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1502 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1503 1504 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1505 1506 dealloc: 1507 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1508 1509 done: 1510 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1511 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); 1512 bsg_job->reply->result = DID_OK << 16; 1513 bsg_job->job_done(bsg_job); 1514 1515 return 0; 1516 } 1517 1518 static int 1519 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) 1520 { 1521 struct Scsi_Host *host = bsg_job->shost; 1522 scsi_qla_host_t *vha = shost_priv(host); 1523 struct qla_hw_data *ha = vha->hw; 1524 int rval = 0; 1525 uint8_t bsg[DMA_POOL_SIZE]; 1526 struct qla_status_reg *sr = (void *)bsg; 1527 dma_addr_t sfp_dma; 1528 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1529 if (!sfp) { 1530 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1531 EXT_STATUS_NO_MEMORY; 1532 goto done; 1533 } 1534 1535 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1536 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1537 1538 *sfp = sr->status_reg; 1539 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1540 sr->field_address.device, sr->field_address.offset, 1541 sizeof(sr->status_reg), sr->field_address.option); 1542 1543 if (rval) { 1544 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1545 EXT_STATUS_MAILBOX; 1546 goto dealloc; 1547 } 1548 1549 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1550 1551 dealloc: 1552 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1553 1554 done: 1555 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1556 bsg_job->reply->result = DID_OK << 16; 1557 bsg_job->job_done(bsg_job); 1558 1559 return 0; 1560 } 1561 1562 static int 1563 qla2x00_write_i2c(struct fc_bsg_job *bsg_job) 1564 { 1565 struct Scsi_Host *host = bsg_job->shost; 1566 scsi_qla_host_t *vha = shost_priv(host); 1567 struct qla_hw_data *ha = vha->hw; 1568 int rval = 0; 1569 uint8_t bsg[DMA_POOL_SIZE]; 1570 struct qla_i2c_access *i2c = (void *)bsg; 1571 dma_addr_t sfp_dma; 1572 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1573 if (!sfp) { 1574 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1575 EXT_STATUS_NO_MEMORY; 1576 goto done; 1577 } 1578 1579 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1580 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1581 1582 memcpy(sfp, i2c->buffer, i2c->length); 1583 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1584 i2c->device, i2c->offset, i2c->length, i2c->option); 1585 1586 if (rval) { 1587 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1588 EXT_STATUS_MAILBOX; 1589 goto dealloc; 1590 } 1591 1592 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1593 1594 dealloc: 1595 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1596 1597 done: 1598 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1599 bsg_job->reply->result = DID_OK << 16; 1600 bsg_job->job_done(bsg_job); 1601 1602 return 0; 1603 } 1604 1605 static int 1606 qla2x00_read_i2c(struct fc_bsg_job *bsg_job) 1607 { 1608 struct Scsi_Host *host = bsg_job->shost; 1609 scsi_qla_host_t *vha = shost_priv(host); 1610 struct qla_hw_data *ha = vha->hw; 1611 int rval = 0; 1612 uint8_t bsg[DMA_POOL_SIZE]; 1613 struct qla_i2c_access *i2c = (void *)bsg; 1614 dma_addr_t sfp_dma; 1615 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1616 if (!sfp) { 1617 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1618 EXT_STATUS_NO_MEMORY; 1619 goto done; 1620 } 1621 1622 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1623 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1624 1625 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1626 i2c->device, i2c->offset, i2c->length, i2c->option); 1627 1628 if (rval) { 1629 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1630 EXT_STATUS_MAILBOX; 1631 goto dealloc; 1632 } 1633 1634 memcpy(i2c->buffer, sfp, i2c->length); 1635 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1636 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1637 1638 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1639 1640 dealloc: 1641 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1642 1643 done: 1644 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1645 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c); 1646 bsg_job->reply->result = DID_OK << 16; 1647 bsg_job->job_done(bsg_job); 1648 1649 return 0; 1650 } 1651 1652 static int 1653 qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) 1654 { 1655 struct Scsi_Host *host = bsg_job->shost; 1656 scsi_qla_host_t *vha = shost_priv(host); 1657 struct qla_hw_data *ha = vha->hw; 1658 uint16_t thread_id; 1659 uint32_t rval = EXT_STATUS_OK; 1660 uint16_t req_sg_cnt = 0; 1661 uint16_t rsp_sg_cnt = 0; 1662 uint16_t nextlid = 0; 1663 uint32_t tot_dsds; 1664 srb_t *sp = NULL; 1665 uint32_t req_data_len = 0; 1666 uint32_t rsp_data_len = 0; 1667 1668 /* Check the type of the adapter */ 1669 if (!IS_BIDI_CAPABLE(ha)) { 1670 ql_log(ql_log_warn, vha, 0x70a0, 1671 "This adapter is not supported\n"); 1672 rval = EXT_STATUS_NOT_SUPPORTED; 1673 goto done; 1674 } 1675 1676 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1677 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1678 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1679 rval = EXT_STATUS_BUSY; 1680 goto done; 1681 } 1682 1683 /* Check if host is online */ 1684 if (!vha->flags.online) { 1685 ql_log(ql_log_warn, vha, 0x70a1, 1686 "Host is not online\n"); 1687 rval = EXT_STATUS_DEVICE_OFFLINE; 1688 goto done; 1689 } 1690 1691 /* Check if cable is plugged in or not */ 1692 if (vha->device_flags & DFLG_NO_CABLE) { 1693 ql_log(ql_log_warn, vha, 0x70a2, 1694 "Cable is unplugged...\n"); 1695 rval = EXT_STATUS_INVALID_CFG; 1696 goto done; 1697 } 1698 1699 /* Check if the switch is connected or not */ 1700 if (ha->current_topology != ISP_CFG_F) { 1701 ql_log(ql_log_warn, vha, 0x70a3, 1702 "Host is not connected to the switch\n"); 1703 rval = EXT_STATUS_INVALID_CFG; 1704 goto done; 1705 } 1706 1707 /* Check if operating mode is P2P */ 1708 if (ha->operating_mode != P2P) { 1709 ql_log(ql_log_warn, vha, 0x70a4, 1710 "Host is operating mode is not P2p\n"); 1711 rval = EXT_STATUS_INVALID_CFG; 1712 goto done; 1713 } 1714 1715 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1716 1717 mutex_lock(&ha->selflogin_lock); 1718 if (vha->self_login_loop_id == 0) { 1719 /* Initialize all required fields of fcport */ 1720 vha->bidir_fcport.vha = vha; 1721 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1722 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1723 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1724 vha->bidir_fcport.loop_id = vha->loop_id; 1725 1726 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1727 ql_log(ql_log_warn, vha, 0x70a7, 1728 "Failed to login port %06X for bidirectional IOCB\n", 1729 vha->bidir_fcport.d_id.b24); 1730 mutex_unlock(&ha->selflogin_lock); 1731 rval = EXT_STATUS_MAILBOX; 1732 goto done; 1733 } 1734 vha->self_login_loop_id = nextlid - 1; 1735 1736 } 1737 /* Assign the self login loop id to fcport */ 1738 mutex_unlock(&ha->selflogin_lock); 1739 1740 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1741 1742 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1743 bsg_job->request_payload.sg_list, 1744 bsg_job->request_payload.sg_cnt, 1745 DMA_TO_DEVICE); 1746 1747 if (!req_sg_cnt) { 1748 rval = EXT_STATUS_NO_MEMORY; 1749 goto done; 1750 } 1751 1752 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1753 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1754 DMA_FROM_DEVICE); 1755 1756 if (!rsp_sg_cnt) { 1757 rval = EXT_STATUS_NO_MEMORY; 1758 goto done_unmap_req_sg; 1759 } 1760 1761 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1762 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1763 ql_dbg(ql_dbg_user, vha, 0x70a9, 1764 "Dma mapping resulted in different sg counts " 1765 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1766 "%x dma_reply_sg_cnt: %x]\n", 1767 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1768 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1769 rval = EXT_STATUS_NO_MEMORY; 1770 goto done_unmap_sg; 1771 } 1772 1773 if (req_data_len != rsp_data_len) { 1774 rval = EXT_STATUS_BUSY; 1775 ql_log(ql_log_warn, vha, 0x70aa, 1776 "req_data_len != rsp_data_len\n"); 1777 goto done_unmap_sg; 1778 } 1779 1780 req_data_len = bsg_job->request_payload.payload_len; 1781 rsp_data_len = bsg_job->reply_payload.payload_len; 1782 1783 1784 /* Alloc SRB structure */ 1785 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1786 if (!sp) { 1787 ql_dbg(ql_dbg_user, vha, 0x70ac, 1788 "Alloc SRB structure failed\n"); 1789 rval = EXT_STATUS_NO_MEMORY; 1790 goto done_unmap_sg; 1791 } 1792 1793 /*Populate srb->ctx with bidir ctx*/ 1794 sp->u.bsg_job = bsg_job; 1795 sp->free = qla2x00_bsg_sp_free; 1796 sp->type = SRB_BIDI_CMD; 1797 sp->done = qla2x00_bsg_job_done; 1798 1799 /* Add the read and write sg count */ 1800 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1801 1802 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1803 if (rval != EXT_STATUS_OK) 1804 goto done_free_srb; 1805 /* the bsg request will be completed in the interrupt handler */ 1806 return rval; 1807 1808 done_free_srb: 1809 mempool_free(sp, ha->srb_mempool); 1810 done_unmap_sg: 1811 dma_unmap_sg(&ha->pdev->dev, 1812 bsg_job->reply_payload.sg_list, 1813 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1814 done_unmap_req_sg: 1815 dma_unmap_sg(&ha->pdev->dev, 1816 bsg_job->request_payload.sg_list, 1817 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1818 done: 1819 1820 /* Return an error vendor specific response 1821 * and complete the bsg request 1822 */ 1823 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1824 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1825 bsg_job->reply->reply_payload_rcv_len = 0; 1826 bsg_job->reply->result = (DID_OK) << 16; 1827 bsg_job->job_done(bsg_job); 1828 /* Always retrun success, vendor rsp carries correct status */ 1829 return 0; 1830 } 1831 1832 static int 1833 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 1834 { 1835 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 1836 case QL_VND_LOOPBACK: 1837 return qla2x00_process_loopback(bsg_job); 1838 1839 case QL_VND_A84_RESET: 1840 return qla84xx_reset(bsg_job); 1841 1842 case QL_VND_A84_UPDATE_FW: 1843 return qla84xx_updatefw(bsg_job); 1844 1845 case QL_VND_A84_MGMT_CMD: 1846 return qla84xx_mgmt_cmd(bsg_job); 1847 1848 case QL_VND_IIDMA: 1849 return qla24xx_iidma(bsg_job); 1850 1851 case QL_VND_FCP_PRIO_CFG_CMD: 1852 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 1853 1854 case QL_VND_READ_FLASH: 1855 return qla2x00_read_optrom(bsg_job); 1856 1857 case QL_VND_UPDATE_FLASH: 1858 return qla2x00_update_optrom(bsg_job); 1859 1860 case QL_VND_SET_FRU_VERSION: 1861 return qla2x00_update_fru_versions(bsg_job); 1862 1863 case QL_VND_READ_FRU_STATUS: 1864 return qla2x00_read_fru_status(bsg_job); 1865 1866 case QL_VND_WRITE_FRU_STATUS: 1867 return qla2x00_write_fru_status(bsg_job); 1868 1869 case QL_VND_WRITE_I2C: 1870 return qla2x00_write_i2c(bsg_job); 1871 1872 case QL_VND_READ_I2C: 1873 return qla2x00_read_i2c(bsg_job); 1874 1875 case QL_VND_DIAG_IO_CMD: 1876 return qla24xx_process_bidir_cmd(bsg_job); 1877 1878 default: 1879 return -ENOSYS; 1880 } 1881 } 1882 1883 int 1884 qla24xx_bsg_request(struct fc_bsg_job *bsg_job) 1885 { 1886 int ret = -EINVAL; 1887 struct fc_rport *rport; 1888 fc_port_t *fcport = NULL; 1889 struct Scsi_Host *host; 1890 scsi_qla_host_t *vha; 1891 1892 /* In case no data transferred. */ 1893 bsg_job->reply->reply_payload_rcv_len = 0; 1894 1895 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 1896 rport = bsg_job->rport; 1897 fcport = *(fc_port_t **) rport->dd_data; 1898 host = rport_to_shost(rport); 1899 vha = shost_priv(host); 1900 } else { 1901 host = bsg_job->shost; 1902 vha = shost_priv(host); 1903 } 1904 1905 if (qla2x00_reset_active(vha)) { 1906 ql_dbg(ql_dbg_user, vha, 0x709f, 1907 "BSG: ISP abort active/needed -- cmd=%d.\n", 1908 bsg_job->request->msgcode); 1909 return -EBUSY; 1910 } 1911 1912 ql_dbg(ql_dbg_user, vha, 0x7000, 1913 "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode); 1914 1915 switch (bsg_job->request->msgcode) { 1916 case FC_BSG_RPT_ELS: 1917 case FC_BSG_HST_ELS_NOLOGIN: 1918 ret = qla2x00_process_els(bsg_job); 1919 break; 1920 case FC_BSG_HST_CT: 1921 ret = qla2x00_process_ct(bsg_job); 1922 break; 1923 case FC_BSG_HST_VENDOR: 1924 ret = qla2x00_process_vendor_specific(bsg_job); 1925 break; 1926 case FC_BSG_HST_ADD_RPORT: 1927 case FC_BSG_HST_DEL_RPORT: 1928 case FC_BSG_RPT_CT: 1929 default: 1930 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 1931 break; 1932 } 1933 return ret; 1934 } 1935 1936 int 1937 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) 1938 { 1939 scsi_qla_host_t *vha = shost_priv(bsg_job->shost); 1940 struct qla_hw_data *ha = vha->hw; 1941 srb_t *sp; 1942 int cnt, que; 1943 unsigned long flags; 1944 struct req_que *req; 1945 1946 /* find the bsg job from the active list of commands */ 1947 spin_lock_irqsave(&ha->hardware_lock, flags); 1948 for (que = 0; que < ha->max_req_queues; que++) { 1949 req = ha->req_q_map[que]; 1950 if (!req) 1951 continue; 1952 1953 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1954 sp = req->outstanding_cmds[cnt]; 1955 if (sp) { 1956 if (((sp->type == SRB_CT_CMD) || 1957 (sp->type == SRB_ELS_CMD_HST)) 1958 && (sp->u.bsg_job == bsg_job)) { 1959 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1960 if (ha->isp_ops->abort_command(sp)) { 1961 ql_log(ql_log_warn, vha, 0x7089, 1962 "mbx abort_command " 1963 "failed.\n"); 1964 bsg_job->req->errors = 1965 bsg_job->reply->result = -EIO; 1966 } else { 1967 ql_dbg(ql_dbg_user, vha, 0x708a, 1968 "mbx abort_command " 1969 "success.\n"); 1970 bsg_job->req->errors = 1971 bsg_job->reply->result = 0; 1972 } 1973 spin_lock_irqsave(&ha->hardware_lock, flags); 1974 goto done; 1975 } 1976 } 1977 } 1978 } 1979 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1980 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 1981 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; 1982 return 0; 1983 1984 done: 1985 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1986 if (bsg_job->request->msgcode == FC_BSG_HST_CT) 1987 kfree(sp->fcport); 1988 mempool_free(sp, ha->srb_mempool); 1989 return 0; 1990 } 1991