1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/bsg-lib.h> 13 14 /* BSG support for ELS/CT pass through */ 15 void 16 qla2x00_bsg_job_done(void *ptr, int res) 17 { 18 srb_t *sp = ptr; 19 struct bsg_job *bsg_job = sp->u.bsg_job; 20 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 21 22 bsg_reply->result = res; 23 bsg_job_done(bsg_job, bsg_reply->result, 24 bsg_reply->reply_payload_rcv_len); 25 sp->free(sp); 26 } 27 28 void 29 qla2x00_bsg_sp_free(void *ptr) 30 { 31 srb_t *sp = ptr; 32 struct qla_hw_data *ha = sp->vha->hw; 33 struct bsg_job *bsg_job = sp->u.bsg_job; 34 struct fc_bsg_request *bsg_request = bsg_job->request; 35 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 36 37 if (sp->type == SRB_FXIOCB_BCMD) { 38 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 39 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 40 41 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 42 dma_unmap_sg(&ha->pdev->dev, 43 bsg_job->request_payload.sg_list, 44 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 45 46 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 47 dma_unmap_sg(&ha->pdev->dev, 48 bsg_job->reply_payload.sg_list, 49 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 50 } else { 51 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 52 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 53 54 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 55 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 56 } 57 58 if (sp->type == SRB_CT_CMD || 59 sp->type == SRB_FXIOCB_BCMD || 60 sp->type == SRB_ELS_CMD_HST) 61 kfree(sp->fcport); 62 qla2x00_rel_sp(sp); 63 } 64 65 int 66 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 67 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 68 { 69 int i, ret, num_valid; 70 uint8_t *bcode; 71 struct qla_fcp_prio_entry *pri_entry; 72 uint32_t *bcode_val_ptr, bcode_val; 73 74 ret = 1; 75 num_valid = 0; 76 bcode = (uint8_t *)pri_cfg; 77 bcode_val_ptr = (uint32_t *)pri_cfg; 78 bcode_val = (uint32_t)(*bcode_val_ptr); 79 80 if (bcode_val == 0xFFFFFFFF) { 81 /* No FCP Priority config data in flash */ 82 ql_dbg(ql_dbg_user, vha, 0x7051, 83 "No FCP Priority config data.\n"); 84 return 0; 85 } 86 87 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 88 bcode[3] != 'S') { 89 /* Invalid FCP priority data header*/ 90 ql_dbg(ql_dbg_user, vha, 0x7052, 91 "Invalid FCP Priority data header. bcode=0x%x.\n", 92 bcode_val); 93 return 0; 94 } 95 if (flag != 1) 96 return ret; 97 98 pri_entry = &pri_cfg->entry[0]; 99 for (i = 0; i < pri_cfg->num_entries; i++) { 100 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 101 num_valid++; 102 pri_entry++; 103 } 104 105 if (num_valid == 0) { 106 /* No valid FCP priority data entries */ 107 ql_dbg(ql_dbg_user, vha, 0x7053, 108 "No valid FCP Priority data entries.\n"); 109 ret = 0; 110 } else { 111 /* FCP priority data is valid */ 112 ql_dbg(ql_dbg_user, vha, 0x7054, 113 "Valid FCP priority data. num entries = %d.\n", 114 num_valid); 115 } 116 117 return ret; 118 } 119 120 static int 121 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) 122 { 123 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 124 struct fc_bsg_request *bsg_request = bsg_job->request; 125 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 126 scsi_qla_host_t *vha = shost_priv(host); 127 struct qla_hw_data *ha = vha->hw; 128 int ret = 0; 129 uint32_t len; 130 uint32_t oper; 131 132 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 133 ret = -EINVAL; 134 goto exit_fcp_prio_cfg; 135 } 136 137 /* Get the sub command */ 138 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 139 140 /* Only set config is allowed if config memory is not allocated */ 141 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 142 ret = -EINVAL; 143 goto exit_fcp_prio_cfg; 144 } 145 switch (oper) { 146 case QLFC_FCP_PRIO_DISABLE: 147 if (ha->flags.fcp_prio_enabled) { 148 ha->flags.fcp_prio_enabled = 0; 149 ha->fcp_prio_cfg->attributes &= 150 ~FCP_PRIO_ATTR_ENABLE; 151 qla24xx_update_all_fcp_prio(vha); 152 bsg_reply->result = DID_OK; 153 } else { 154 ret = -EINVAL; 155 bsg_reply->result = (DID_ERROR << 16); 156 goto exit_fcp_prio_cfg; 157 } 158 break; 159 160 case QLFC_FCP_PRIO_ENABLE: 161 if (!ha->flags.fcp_prio_enabled) { 162 if (ha->fcp_prio_cfg) { 163 ha->flags.fcp_prio_enabled = 1; 164 ha->fcp_prio_cfg->attributes |= 165 FCP_PRIO_ATTR_ENABLE; 166 qla24xx_update_all_fcp_prio(vha); 167 bsg_reply->result = DID_OK; 168 } else { 169 ret = -EINVAL; 170 bsg_reply->result = (DID_ERROR << 16); 171 goto exit_fcp_prio_cfg; 172 } 173 } 174 break; 175 176 case QLFC_FCP_PRIO_GET_CONFIG: 177 len = bsg_job->reply_payload.payload_len; 178 if (!len || len > FCP_PRIO_CFG_SIZE) { 179 ret = -EINVAL; 180 bsg_reply->result = (DID_ERROR << 16); 181 goto exit_fcp_prio_cfg; 182 } 183 184 bsg_reply->result = DID_OK; 185 bsg_reply->reply_payload_rcv_len = 186 sg_copy_from_buffer( 187 bsg_job->reply_payload.sg_list, 188 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 189 len); 190 191 break; 192 193 case QLFC_FCP_PRIO_SET_CONFIG: 194 len = bsg_job->request_payload.payload_len; 195 if (!len || len > FCP_PRIO_CFG_SIZE) { 196 bsg_reply->result = (DID_ERROR << 16); 197 ret = -EINVAL; 198 goto exit_fcp_prio_cfg; 199 } 200 201 if (!ha->fcp_prio_cfg) { 202 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 203 if (!ha->fcp_prio_cfg) { 204 ql_log(ql_log_warn, vha, 0x7050, 205 "Unable to allocate memory for fcp prio " 206 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 207 bsg_reply->result = (DID_ERROR << 16); 208 ret = -ENOMEM; 209 goto exit_fcp_prio_cfg; 210 } 211 } 212 213 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 214 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 215 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 216 FCP_PRIO_CFG_SIZE); 217 218 /* validate fcp priority data */ 219 220 if (!qla24xx_fcp_prio_cfg_valid(vha, 221 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 222 bsg_reply->result = (DID_ERROR << 16); 223 ret = -EINVAL; 224 /* If buffer was invalidatic int 225 * fcp_prio_cfg is of no use 226 */ 227 vfree(ha->fcp_prio_cfg); 228 ha->fcp_prio_cfg = NULL; 229 goto exit_fcp_prio_cfg; 230 } 231 232 ha->flags.fcp_prio_enabled = 0; 233 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 234 ha->flags.fcp_prio_enabled = 1; 235 qla24xx_update_all_fcp_prio(vha); 236 bsg_reply->result = DID_OK; 237 break; 238 default: 239 ret = -EINVAL; 240 break; 241 } 242 exit_fcp_prio_cfg: 243 if (!ret) 244 bsg_job_done(bsg_job, bsg_reply->result, 245 bsg_reply->reply_payload_rcv_len); 246 return ret; 247 } 248 249 static int 250 qla2x00_process_els(struct bsg_job *bsg_job) 251 { 252 struct fc_bsg_request *bsg_request = bsg_job->request; 253 struct fc_rport *rport; 254 fc_port_t *fcport = NULL; 255 struct Scsi_Host *host; 256 scsi_qla_host_t *vha; 257 struct qla_hw_data *ha; 258 srb_t *sp; 259 const char *type; 260 int req_sg_cnt, rsp_sg_cnt; 261 int rval = (DRIVER_ERROR << 16); 262 uint16_t nextlid = 0; 263 264 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 265 rport = fc_bsg_to_rport(bsg_job); 266 fcport = *(fc_port_t **) rport->dd_data; 267 host = rport_to_shost(rport); 268 vha = shost_priv(host); 269 ha = vha->hw; 270 type = "FC_BSG_RPT_ELS"; 271 } else { 272 host = fc_bsg_to_shost(bsg_job); 273 vha = shost_priv(host); 274 ha = vha->hw; 275 type = "FC_BSG_HST_ELS_NOLOGIN"; 276 } 277 278 if (!vha->flags.online) { 279 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 280 rval = -EIO; 281 goto done; 282 } 283 284 /* pass through is supported only for ISP 4Gb or higher */ 285 if (!IS_FWI2_CAPABLE(ha)) { 286 ql_dbg(ql_dbg_user, vha, 0x7001, 287 "ELS passthru not supported for ISP23xx based adapters.\n"); 288 rval = -EPERM; 289 goto done; 290 } 291 292 /* Multiple SG's are not supported for ELS requests */ 293 if (bsg_job->request_payload.sg_cnt > 1 || 294 bsg_job->reply_payload.sg_cnt > 1) { 295 ql_dbg(ql_dbg_user, vha, 0x7002, 296 "Multiple SG's are not suppored for ELS requests, " 297 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 298 bsg_job->request_payload.sg_cnt, 299 bsg_job->reply_payload.sg_cnt); 300 rval = -EPERM; 301 goto done; 302 } 303 304 /* ELS request for rport */ 305 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 306 /* make sure the rport is logged in, 307 * if not perform fabric login 308 */ 309 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 310 ql_dbg(ql_dbg_user, vha, 0x7003, 311 "Failed to login port %06X for ELS passthru.\n", 312 fcport->d_id.b24); 313 rval = -EIO; 314 goto done; 315 } 316 } else { 317 /* Allocate a dummy fcport structure, since functions 318 * preparing the IOCB and mailbox command retrieves port 319 * specific information from fcport structure. For Host based 320 * ELS commands there will be no fcport structure allocated 321 */ 322 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 323 if (!fcport) { 324 rval = -ENOMEM; 325 goto done; 326 } 327 328 /* Initialize all required fields of fcport */ 329 fcport->vha = vha; 330 fcport->d_id.b.al_pa = 331 bsg_request->rqst_data.h_els.port_id[0]; 332 fcport->d_id.b.area = 333 bsg_request->rqst_data.h_els.port_id[1]; 334 fcport->d_id.b.domain = 335 bsg_request->rqst_data.h_els.port_id[2]; 336 fcport->loop_id = 337 (fcport->d_id.b.al_pa == 0xFD) ? 338 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 339 } 340 341 req_sg_cnt = 342 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 343 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 344 if (!req_sg_cnt) { 345 rval = -ENOMEM; 346 goto done_free_fcport; 347 } 348 349 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 350 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 351 if (!rsp_sg_cnt) { 352 rval = -ENOMEM; 353 goto done_free_fcport; 354 } 355 356 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 357 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 358 ql_log(ql_log_warn, vha, 0x7008, 359 "dma mapping resulted in different sg counts, " 360 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 361 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 362 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 363 rval = -EAGAIN; 364 goto done_unmap_sg; 365 } 366 367 /* Alloc SRB structure */ 368 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 369 if (!sp) { 370 rval = -ENOMEM; 371 goto done_unmap_sg; 372 } 373 374 sp->type = 375 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 376 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 377 sp->name = 378 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 379 "bsg_els_rpt" : "bsg_els_hst"); 380 sp->u.bsg_job = bsg_job; 381 sp->free = qla2x00_bsg_sp_free; 382 sp->done = qla2x00_bsg_job_done; 383 384 ql_dbg(ql_dbg_user, vha, 0x700a, 385 "bsg rqst type: %s els type: %x - loop-id=%x " 386 "portid=%-2x%02x%02x.\n", type, 387 bsg_request->rqst_data.h_els.command_code, fcport->loop_id, 388 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 389 390 rval = qla2x00_start_sp(sp); 391 if (rval != QLA_SUCCESS) { 392 ql_log(ql_log_warn, vha, 0x700e, 393 "qla2x00_start_sp failed = %d\n", rval); 394 qla2x00_rel_sp(sp); 395 rval = -EIO; 396 goto done_unmap_sg; 397 } 398 return rval; 399 400 done_unmap_sg: 401 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 402 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 403 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 404 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 405 goto done_free_fcport; 406 407 done_free_fcport: 408 if (bsg_request->msgcode == FC_BSG_RPT_ELS) 409 kfree(fcport); 410 done: 411 return rval; 412 } 413 414 static inline uint16_t 415 qla24xx_calc_ct_iocbs(uint16_t dsds) 416 { 417 uint16_t iocbs; 418 419 iocbs = 1; 420 if (dsds > 2) { 421 iocbs += (dsds - 2) / 5; 422 if ((dsds - 2) % 5) 423 iocbs++; 424 } 425 return iocbs; 426 } 427 428 static int 429 qla2x00_process_ct(struct bsg_job *bsg_job) 430 { 431 srb_t *sp; 432 struct fc_bsg_request *bsg_request = bsg_job->request; 433 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 434 scsi_qla_host_t *vha = shost_priv(host); 435 struct qla_hw_data *ha = vha->hw; 436 int rval = (DRIVER_ERROR << 16); 437 int req_sg_cnt, rsp_sg_cnt; 438 uint16_t loop_id; 439 struct fc_port *fcport; 440 char *type = "FC_BSG_HST_CT"; 441 442 req_sg_cnt = 443 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 444 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 445 if (!req_sg_cnt) { 446 ql_log(ql_log_warn, vha, 0x700f, 447 "dma_map_sg return %d for request\n", req_sg_cnt); 448 rval = -ENOMEM; 449 goto done; 450 } 451 452 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 453 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 454 if (!rsp_sg_cnt) { 455 ql_log(ql_log_warn, vha, 0x7010, 456 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 457 rval = -ENOMEM; 458 goto done; 459 } 460 461 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 462 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 463 ql_log(ql_log_warn, vha, 0x7011, 464 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 465 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 466 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 467 rval = -EAGAIN; 468 goto done_unmap_sg; 469 } 470 471 if (!vha->flags.online) { 472 ql_log(ql_log_warn, vha, 0x7012, 473 "Host is not online.\n"); 474 rval = -EIO; 475 goto done_unmap_sg; 476 } 477 478 loop_id = 479 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 480 >> 24; 481 switch (loop_id) { 482 case 0xFC: 483 loop_id = cpu_to_le16(NPH_SNS); 484 break; 485 case 0xFA: 486 loop_id = vha->mgmt_svr_loop_id; 487 break; 488 default: 489 ql_dbg(ql_dbg_user, vha, 0x7013, 490 "Unknown loop id: %x.\n", loop_id); 491 rval = -EINVAL; 492 goto done_unmap_sg; 493 } 494 495 /* Allocate a dummy fcport structure, since functions preparing the 496 * IOCB and mailbox command retrieves port specific information 497 * from fcport structure. For Host based ELS commands there will be 498 * no fcport structure allocated 499 */ 500 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 501 if (!fcport) { 502 ql_log(ql_log_warn, vha, 0x7014, 503 "Failed to allocate fcport.\n"); 504 rval = -ENOMEM; 505 goto done_unmap_sg; 506 } 507 508 /* Initialize all required fields of fcport */ 509 fcport->vha = vha; 510 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; 511 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; 512 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; 513 fcport->loop_id = loop_id; 514 515 /* Alloc SRB structure */ 516 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 517 if (!sp) { 518 ql_log(ql_log_warn, vha, 0x7015, 519 "qla2x00_get_sp failed.\n"); 520 rval = -ENOMEM; 521 goto done_free_fcport; 522 } 523 524 sp->type = SRB_CT_CMD; 525 sp->name = "bsg_ct"; 526 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 527 sp->u.bsg_job = bsg_job; 528 sp->free = qla2x00_bsg_sp_free; 529 sp->done = qla2x00_bsg_job_done; 530 531 ql_dbg(ql_dbg_user, vha, 0x7016, 532 "bsg rqst type: %s else type: %x - " 533 "loop-id=%x portid=%02x%02x%02x.\n", type, 534 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), 535 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 536 fcport->d_id.b.al_pa); 537 538 rval = qla2x00_start_sp(sp); 539 if (rval != QLA_SUCCESS) { 540 ql_log(ql_log_warn, vha, 0x7017, 541 "qla2x00_start_sp failed=%d.\n", rval); 542 qla2x00_rel_sp(sp); 543 rval = -EIO; 544 goto done_free_fcport; 545 } 546 return rval; 547 548 done_free_fcport: 549 kfree(fcport); 550 done_unmap_sg: 551 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 552 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 553 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 554 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 555 done: 556 return rval; 557 } 558 559 /* Disable loopback mode */ 560 static inline int 561 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 562 int wait, int wait2) 563 { 564 int ret = 0; 565 int rval = 0; 566 uint16_t new_config[4]; 567 struct qla_hw_data *ha = vha->hw; 568 569 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 570 goto done_reset_internal; 571 572 memset(new_config, 0 , sizeof(new_config)); 573 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 574 ENABLE_INTERNAL_LOOPBACK || 575 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 576 ENABLE_EXTERNAL_LOOPBACK) { 577 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 578 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 579 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 580 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 581 582 ha->notify_dcbx_comp = wait; 583 ha->notify_lb_portup_comp = wait2; 584 585 ret = qla81xx_set_port_config(vha, new_config); 586 if (ret != QLA_SUCCESS) { 587 ql_log(ql_log_warn, vha, 0x7025, 588 "Set port config failed.\n"); 589 ha->notify_dcbx_comp = 0; 590 ha->notify_lb_portup_comp = 0; 591 rval = -EINVAL; 592 goto done_reset_internal; 593 } 594 595 /* Wait for DCBX complete event */ 596 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 597 (DCBX_COMP_TIMEOUT * HZ))) { 598 ql_dbg(ql_dbg_user, vha, 0x7026, 599 "DCBX completion not received.\n"); 600 ha->notify_dcbx_comp = 0; 601 ha->notify_lb_portup_comp = 0; 602 rval = -EINVAL; 603 goto done_reset_internal; 604 } else 605 ql_dbg(ql_dbg_user, vha, 0x7027, 606 "DCBX completion received.\n"); 607 608 if (wait2 && 609 !wait_for_completion_timeout(&ha->lb_portup_comp, 610 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 611 ql_dbg(ql_dbg_user, vha, 0x70c5, 612 "Port up completion not received.\n"); 613 ha->notify_lb_portup_comp = 0; 614 rval = -EINVAL; 615 goto done_reset_internal; 616 } else 617 ql_dbg(ql_dbg_user, vha, 0x70c6, 618 "Port up completion received.\n"); 619 620 ha->notify_dcbx_comp = 0; 621 ha->notify_lb_portup_comp = 0; 622 } 623 done_reset_internal: 624 return rval; 625 } 626 627 /* 628 * Set the port configuration to enable the internal or external loopback 629 * depending on the loopback mode. 630 */ 631 static inline int 632 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 633 uint16_t *new_config, uint16_t mode) 634 { 635 int ret = 0; 636 int rval = 0; 637 unsigned long rem_tmo = 0, current_tmo = 0; 638 struct qla_hw_data *ha = vha->hw; 639 640 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 641 goto done_set_internal; 642 643 if (mode == INTERNAL_LOOPBACK) 644 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 645 else if (mode == EXTERNAL_LOOPBACK) 646 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 647 ql_dbg(ql_dbg_user, vha, 0x70be, 648 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 649 650 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 651 652 ha->notify_dcbx_comp = 1; 653 ret = qla81xx_set_port_config(vha, new_config); 654 if (ret != QLA_SUCCESS) { 655 ql_log(ql_log_warn, vha, 0x7021, 656 "set port config failed.\n"); 657 ha->notify_dcbx_comp = 0; 658 rval = -EINVAL; 659 goto done_set_internal; 660 } 661 662 /* Wait for DCBX complete event */ 663 current_tmo = DCBX_COMP_TIMEOUT * HZ; 664 while (1) { 665 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 666 current_tmo); 667 if (!ha->idc_extend_tmo || rem_tmo) { 668 ha->idc_extend_tmo = 0; 669 break; 670 } 671 current_tmo = ha->idc_extend_tmo * HZ; 672 ha->idc_extend_tmo = 0; 673 } 674 675 if (!rem_tmo) { 676 ql_dbg(ql_dbg_user, vha, 0x7022, 677 "DCBX completion not received.\n"); 678 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 679 /* 680 * If the reset of the loopback mode doesn't work take a FCoE 681 * dump and reset the chip. 682 */ 683 if (ret) { 684 ha->isp_ops->fw_dump(vha, 0); 685 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 686 } 687 rval = -EINVAL; 688 } else { 689 if (ha->flags.idc_compl_status) { 690 ql_dbg(ql_dbg_user, vha, 0x70c3, 691 "Bad status in IDC Completion AEN\n"); 692 rval = -EINVAL; 693 ha->flags.idc_compl_status = 0; 694 } else 695 ql_dbg(ql_dbg_user, vha, 0x7023, 696 "DCBX completion received.\n"); 697 } 698 699 ha->notify_dcbx_comp = 0; 700 ha->idc_extend_tmo = 0; 701 702 done_set_internal: 703 return rval; 704 } 705 706 static int 707 qla2x00_process_loopback(struct bsg_job *bsg_job) 708 { 709 struct fc_bsg_request *bsg_request = bsg_job->request; 710 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 711 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 712 scsi_qla_host_t *vha = shost_priv(host); 713 struct qla_hw_data *ha = vha->hw; 714 int rval; 715 uint8_t command_sent; 716 char *type; 717 struct msg_echo_lb elreq; 718 uint16_t response[MAILBOX_REGISTER_COUNT]; 719 uint16_t config[4], new_config[4]; 720 uint8_t *fw_sts_ptr; 721 uint8_t *req_data = NULL; 722 dma_addr_t req_data_dma; 723 uint32_t req_data_len; 724 uint8_t *rsp_data = NULL; 725 dma_addr_t rsp_data_dma; 726 uint32_t rsp_data_len; 727 728 if (!vha->flags.online) { 729 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 730 return -EIO; 731 } 732 733 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 734 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 735 DMA_TO_DEVICE); 736 737 if (!elreq.req_sg_cnt) { 738 ql_log(ql_log_warn, vha, 0x701a, 739 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 740 return -ENOMEM; 741 } 742 743 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 744 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 745 DMA_FROM_DEVICE); 746 747 if (!elreq.rsp_sg_cnt) { 748 ql_log(ql_log_warn, vha, 0x701b, 749 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 750 rval = -ENOMEM; 751 goto done_unmap_req_sg; 752 } 753 754 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 755 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 756 ql_log(ql_log_warn, vha, 0x701c, 757 "dma mapping resulted in different sg counts, " 758 "request_sg_cnt: %x dma_request_sg_cnt: %x " 759 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 760 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 761 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 762 rval = -EAGAIN; 763 goto done_unmap_sg; 764 } 765 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 766 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 767 &req_data_dma, GFP_KERNEL); 768 if (!req_data) { 769 ql_log(ql_log_warn, vha, 0x701d, 770 "dma alloc failed for req_data.\n"); 771 rval = -ENOMEM; 772 goto done_unmap_sg; 773 } 774 775 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 776 &rsp_data_dma, GFP_KERNEL); 777 if (!rsp_data) { 778 ql_log(ql_log_warn, vha, 0x7004, 779 "dma alloc failed for rsp_data.\n"); 780 rval = -ENOMEM; 781 goto done_free_dma_req; 782 } 783 784 /* Copy the request buffer in req_data now */ 785 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 786 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 787 788 elreq.send_dma = req_data_dma; 789 elreq.rcv_dma = rsp_data_dma; 790 elreq.transfer_size = req_data_len; 791 792 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 793 elreq.iteration_count = 794 bsg_request->rqst_data.h_vendor.vendor_cmd[2]; 795 796 if (atomic_read(&vha->loop_state) == LOOP_READY && 797 (ha->current_topology == ISP_CFG_F || 798 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && 799 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 800 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 801 elreq.options == EXTERNAL_LOOPBACK) { 802 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 803 ql_dbg(ql_dbg_user, vha, 0x701e, 804 "BSG request type: %s.\n", type); 805 command_sent = INT_DEF_LB_ECHO_CMD; 806 rval = qla2x00_echo_test(vha, &elreq, response); 807 } else { 808 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 809 memset(config, 0, sizeof(config)); 810 memset(new_config, 0, sizeof(new_config)); 811 812 if (qla81xx_get_port_config(vha, config)) { 813 ql_log(ql_log_warn, vha, 0x701f, 814 "Get port config failed.\n"); 815 rval = -EPERM; 816 goto done_free_dma_rsp; 817 } 818 819 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 820 ql_dbg(ql_dbg_user, vha, 0x70c4, 821 "Loopback operation already in " 822 "progress.\n"); 823 rval = -EAGAIN; 824 goto done_free_dma_rsp; 825 } 826 827 ql_dbg(ql_dbg_user, vha, 0x70c0, 828 "elreq.options=%04x\n", elreq.options); 829 830 if (elreq.options == EXTERNAL_LOOPBACK) 831 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 832 rval = qla81xx_set_loopback_mode(vha, 833 config, new_config, elreq.options); 834 else 835 rval = qla81xx_reset_loopback_mode(vha, 836 config, 1, 0); 837 else 838 rval = qla81xx_set_loopback_mode(vha, config, 839 new_config, elreq.options); 840 841 if (rval) { 842 rval = -EPERM; 843 goto done_free_dma_rsp; 844 } 845 846 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 847 ql_dbg(ql_dbg_user, vha, 0x7028, 848 "BSG request type: %s.\n", type); 849 850 command_sent = INT_DEF_LB_LOOPBACK_CMD; 851 rval = qla2x00_loopback_test(vha, &elreq, response); 852 853 if (response[0] == MBS_COMMAND_ERROR && 854 response[1] == MBS_LB_RESET) { 855 ql_log(ql_log_warn, vha, 0x7029, 856 "MBX command error, Aborting ISP.\n"); 857 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 858 qla2xxx_wake_dpc(vha); 859 qla2x00_wait_for_chip_reset(vha); 860 /* Also reset the MPI */ 861 if (IS_QLA81XX(ha)) { 862 if (qla81xx_restart_mpi_firmware(vha) != 863 QLA_SUCCESS) { 864 ql_log(ql_log_warn, vha, 0x702a, 865 "MPI reset failed.\n"); 866 } 867 } 868 869 rval = -EIO; 870 goto done_free_dma_rsp; 871 } 872 873 if (new_config[0]) { 874 int ret; 875 876 /* Revert back to original port config 877 * Also clear internal loopback 878 */ 879 ret = qla81xx_reset_loopback_mode(vha, 880 new_config, 0, 1); 881 if (ret) { 882 /* 883 * If the reset of the loopback mode 884 * doesn't work take FCoE dump and then 885 * reset the chip. 886 */ 887 ha->isp_ops->fw_dump(vha, 0); 888 set_bit(ISP_ABORT_NEEDED, 889 &vha->dpc_flags); 890 } 891 892 } 893 894 } else { 895 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 896 ql_dbg(ql_dbg_user, vha, 0x702b, 897 "BSG request type: %s.\n", type); 898 command_sent = INT_DEF_LB_LOOPBACK_CMD; 899 rval = qla2x00_loopback_test(vha, &elreq, response); 900 } 901 } 902 903 if (rval) { 904 ql_log(ql_log_warn, vha, 0x702c, 905 "Vendor request %s failed.\n", type); 906 907 rval = 0; 908 bsg_reply->result = (DID_ERROR << 16); 909 bsg_reply->reply_payload_rcv_len = 0; 910 } else { 911 ql_dbg(ql_dbg_user, vha, 0x702d, 912 "Vendor request %s completed.\n", type); 913 bsg_reply->result = (DID_OK << 16); 914 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 915 bsg_job->reply_payload.sg_cnt, rsp_data, 916 rsp_data_len); 917 } 918 919 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 920 sizeof(response) + sizeof(uint8_t); 921 fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) + 922 sizeof(struct fc_bsg_reply); 923 memcpy(fw_sts_ptr, response, sizeof(response)); 924 fw_sts_ptr += sizeof(response); 925 *fw_sts_ptr = command_sent; 926 927 done_free_dma_rsp: 928 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 929 rsp_data, rsp_data_dma); 930 done_free_dma_req: 931 dma_free_coherent(&ha->pdev->dev, req_data_len, 932 req_data, req_data_dma); 933 done_unmap_sg: 934 dma_unmap_sg(&ha->pdev->dev, 935 bsg_job->reply_payload.sg_list, 936 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 937 done_unmap_req_sg: 938 dma_unmap_sg(&ha->pdev->dev, 939 bsg_job->request_payload.sg_list, 940 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 941 if (!rval) 942 bsg_job_done(bsg_job, bsg_reply->result, 943 bsg_reply->reply_payload_rcv_len); 944 return rval; 945 } 946 947 static int 948 qla84xx_reset(struct bsg_job *bsg_job) 949 { 950 struct fc_bsg_request *bsg_request = bsg_job->request; 951 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 952 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 953 scsi_qla_host_t *vha = shost_priv(host); 954 struct qla_hw_data *ha = vha->hw; 955 int rval = 0; 956 uint32_t flag; 957 958 if (!IS_QLA84XX(ha)) { 959 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 960 return -EINVAL; 961 } 962 963 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 964 965 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 966 967 if (rval) { 968 ql_log(ql_log_warn, vha, 0x7030, 969 "Vendor request 84xx reset failed.\n"); 970 rval = (DID_ERROR << 16); 971 972 } else { 973 ql_dbg(ql_dbg_user, vha, 0x7031, 974 "Vendor request 84xx reset completed.\n"); 975 bsg_reply->result = DID_OK; 976 bsg_job_done(bsg_job, bsg_reply->result, 977 bsg_reply->reply_payload_rcv_len); 978 } 979 980 return rval; 981 } 982 983 static int 984 qla84xx_updatefw(struct bsg_job *bsg_job) 985 { 986 struct fc_bsg_request *bsg_request = bsg_job->request; 987 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 988 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 989 scsi_qla_host_t *vha = shost_priv(host); 990 struct qla_hw_data *ha = vha->hw; 991 struct verify_chip_entry_84xx *mn = NULL; 992 dma_addr_t mn_dma, fw_dma; 993 void *fw_buf = NULL; 994 int rval = 0; 995 uint32_t sg_cnt; 996 uint32_t data_len; 997 uint16_t options; 998 uint32_t flag; 999 uint32_t fw_ver; 1000 1001 if (!IS_QLA84XX(ha)) { 1002 ql_dbg(ql_dbg_user, vha, 0x7032, 1003 "Not 84xx, exiting.\n"); 1004 return -EINVAL; 1005 } 1006 1007 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1008 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1009 if (!sg_cnt) { 1010 ql_log(ql_log_warn, vha, 0x7033, 1011 "dma_map_sg returned %d for request.\n", sg_cnt); 1012 return -ENOMEM; 1013 } 1014 1015 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1016 ql_log(ql_log_warn, vha, 0x7034, 1017 "DMA mapping resulted in different sg counts, " 1018 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1019 bsg_job->request_payload.sg_cnt, sg_cnt); 1020 rval = -EAGAIN; 1021 goto done_unmap_sg; 1022 } 1023 1024 data_len = bsg_job->request_payload.payload_len; 1025 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1026 &fw_dma, GFP_KERNEL); 1027 if (!fw_buf) { 1028 ql_log(ql_log_warn, vha, 0x7035, 1029 "DMA alloc failed for fw_buf.\n"); 1030 rval = -ENOMEM; 1031 goto done_unmap_sg; 1032 } 1033 1034 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1035 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1036 1037 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1038 if (!mn) { 1039 ql_log(ql_log_warn, vha, 0x7036, 1040 "DMA alloc failed for fw buffer.\n"); 1041 rval = -ENOMEM; 1042 goto done_free_fw_buf; 1043 } 1044 1045 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1046 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2))); 1047 1048 memset(mn, 0, sizeof(struct access_chip_84xx)); 1049 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1050 mn->entry_count = 1; 1051 1052 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1053 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1054 options |= VCO_DIAG_FW; 1055 1056 mn->options = cpu_to_le16(options); 1057 mn->fw_ver = cpu_to_le32(fw_ver); 1058 mn->fw_size = cpu_to_le32(data_len); 1059 mn->fw_seq_size = cpu_to_le32(data_len); 1060 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma)); 1061 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma)); 1062 mn->dseg_length = cpu_to_le32(data_len); 1063 mn->data_seg_cnt = cpu_to_le16(1); 1064 1065 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1066 1067 if (rval) { 1068 ql_log(ql_log_warn, vha, 0x7037, 1069 "Vendor request 84xx updatefw failed.\n"); 1070 1071 rval = (DID_ERROR << 16); 1072 } else { 1073 ql_dbg(ql_dbg_user, vha, 0x7038, 1074 "Vendor request 84xx updatefw completed.\n"); 1075 1076 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1077 bsg_reply->result = DID_OK; 1078 } 1079 1080 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1081 1082 done_free_fw_buf: 1083 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1084 1085 done_unmap_sg: 1086 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1087 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1088 1089 if (!rval) 1090 bsg_job_done(bsg_job, bsg_reply->result, 1091 bsg_reply->reply_payload_rcv_len); 1092 return rval; 1093 } 1094 1095 static int 1096 qla84xx_mgmt_cmd(struct bsg_job *bsg_job) 1097 { 1098 struct fc_bsg_request *bsg_request = bsg_job->request; 1099 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1100 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1101 scsi_qla_host_t *vha = shost_priv(host); 1102 struct qla_hw_data *ha = vha->hw; 1103 struct access_chip_84xx *mn = NULL; 1104 dma_addr_t mn_dma, mgmt_dma; 1105 void *mgmt_b = NULL; 1106 int rval = 0; 1107 struct qla_bsg_a84_mgmt *ql84_mgmt; 1108 uint32_t sg_cnt; 1109 uint32_t data_len = 0; 1110 uint32_t dma_direction = DMA_NONE; 1111 1112 if (!IS_QLA84XX(ha)) { 1113 ql_log(ql_log_warn, vha, 0x703a, 1114 "Not 84xx, exiting.\n"); 1115 return -EINVAL; 1116 } 1117 1118 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1119 if (!mn) { 1120 ql_log(ql_log_warn, vha, 0x703c, 1121 "DMA alloc failed for fw buffer.\n"); 1122 return -ENOMEM; 1123 } 1124 1125 memset(mn, 0, sizeof(struct access_chip_84xx)); 1126 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1127 mn->entry_count = 1; 1128 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1129 switch (ql84_mgmt->mgmt.cmd) { 1130 case QLA84_MGMT_READ_MEM: 1131 case QLA84_MGMT_GET_INFO: 1132 sg_cnt = dma_map_sg(&ha->pdev->dev, 1133 bsg_job->reply_payload.sg_list, 1134 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1135 if (!sg_cnt) { 1136 ql_log(ql_log_warn, vha, 0x703d, 1137 "dma_map_sg returned %d for reply.\n", sg_cnt); 1138 rval = -ENOMEM; 1139 goto exit_mgmt; 1140 } 1141 1142 dma_direction = DMA_FROM_DEVICE; 1143 1144 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1145 ql_log(ql_log_warn, vha, 0x703e, 1146 "DMA mapping resulted in different sg counts, " 1147 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1148 bsg_job->reply_payload.sg_cnt, sg_cnt); 1149 rval = -EAGAIN; 1150 goto done_unmap_sg; 1151 } 1152 1153 data_len = bsg_job->reply_payload.payload_len; 1154 1155 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1156 &mgmt_dma, GFP_KERNEL); 1157 if (!mgmt_b) { 1158 ql_log(ql_log_warn, vha, 0x703f, 1159 "DMA alloc failed for mgmt_b.\n"); 1160 rval = -ENOMEM; 1161 goto done_unmap_sg; 1162 } 1163 1164 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1165 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1166 mn->parameter1 = 1167 cpu_to_le32( 1168 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1169 1170 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1171 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1172 mn->parameter1 = 1173 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1174 1175 mn->parameter2 = 1176 cpu_to_le32( 1177 ql84_mgmt->mgmt.mgmtp.u.info.context); 1178 } 1179 break; 1180 1181 case QLA84_MGMT_WRITE_MEM: 1182 sg_cnt = dma_map_sg(&ha->pdev->dev, 1183 bsg_job->request_payload.sg_list, 1184 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1185 1186 if (!sg_cnt) { 1187 ql_log(ql_log_warn, vha, 0x7040, 1188 "dma_map_sg returned %d.\n", sg_cnt); 1189 rval = -ENOMEM; 1190 goto exit_mgmt; 1191 } 1192 1193 dma_direction = DMA_TO_DEVICE; 1194 1195 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1196 ql_log(ql_log_warn, vha, 0x7041, 1197 "DMA mapping resulted in different sg counts, " 1198 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1199 bsg_job->request_payload.sg_cnt, sg_cnt); 1200 rval = -EAGAIN; 1201 goto done_unmap_sg; 1202 } 1203 1204 data_len = bsg_job->request_payload.payload_len; 1205 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1206 &mgmt_dma, GFP_KERNEL); 1207 if (!mgmt_b) { 1208 ql_log(ql_log_warn, vha, 0x7042, 1209 "DMA alloc failed for mgmt_b.\n"); 1210 rval = -ENOMEM; 1211 goto done_unmap_sg; 1212 } 1213 1214 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1215 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1216 1217 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1218 mn->parameter1 = 1219 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1220 break; 1221 1222 case QLA84_MGMT_CHNG_CONFIG: 1223 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1224 mn->parameter1 = 1225 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1226 1227 mn->parameter2 = 1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1229 1230 mn->parameter3 = 1231 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1232 break; 1233 1234 default: 1235 rval = -EIO; 1236 goto exit_mgmt; 1237 } 1238 1239 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1240 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1241 mn->dseg_count = cpu_to_le16(1); 1242 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma)); 1243 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma)); 1244 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len); 1245 } 1246 1247 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1248 1249 if (rval) { 1250 ql_log(ql_log_warn, vha, 0x7043, 1251 "Vendor request 84xx mgmt failed.\n"); 1252 1253 rval = (DID_ERROR << 16); 1254 1255 } else { 1256 ql_dbg(ql_dbg_user, vha, 0x7044, 1257 "Vendor request 84xx mgmt completed.\n"); 1258 1259 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1260 bsg_reply->result = DID_OK; 1261 1262 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1263 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1264 bsg_reply->reply_payload_rcv_len = 1265 bsg_job->reply_payload.payload_len; 1266 1267 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1268 bsg_job->reply_payload.sg_cnt, mgmt_b, 1269 data_len); 1270 } 1271 } 1272 1273 done_unmap_sg: 1274 if (mgmt_b) 1275 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1276 1277 if (dma_direction == DMA_TO_DEVICE) 1278 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1279 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1280 else if (dma_direction == DMA_FROM_DEVICE) 1281 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1282 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1283 1284 exit_mgmt: 1285 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1286 1287 if (!rval) 1288 bsg_job_done(bsg_job, bsg_reply->result, 1289 bsg_reply->reply_payload_rcv_len); 1290 return rval; 1291 } 1292 1293 static int 1294 qla24xx_iidma(struct bsg_job *bsg_job) 1295 { 1296 struct fc_bsg_request *bsg_request = bsg_job->request; 1297 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1298 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1299 scsi_qla_host_t *vha = shost_priv(host); 1300 int rval = 0; 1301 struct qla_port_param *port_param = NULL; 1302 fc_port_t *fcport = NULL; 1303 int found = 0; 1304 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1305 uint8_t *rsp_ptr = NULL; 1306 1307 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1308 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1309 return -EINVAL; 1310 } 1311 1312 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); 1313 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1314 ql_log(ql_log_warn, vha, 0x7048, 1315 "Invalid destination type.\n"); 1316 return -EINVAL; 1317 } 1318 1319 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1320 if (fcport->port_type != FCT_TARGET) 1321 continue; 1322 1323 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1324 fcport->port_name, sizeof(fcport->port_name))) 1325 continue; 1326 1327 found = 1; 1328 break; 1329 } 1330 1331 if (!found) { 1332 ql_log(ql_log_warn, vha, 0x7049, 1333 "Failed to find port.\n"); 1334 return -EINVAL; 1335 } 1336 1337 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1338 ql_log(ql_log_warn, vha, 0x704a, 1339 "Port is not online.\n"); 1340 return -EINVAL; 1341 } 1342 1343 if (fcport->flags & FCF_LOGIN_NEEDED) { 1344 ql_log(ql_log_warn, vha, 0x704b, 1345 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1346 return -EINVAL; 1347 } 1348 1349 if (port_param->mode) 1350 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1351 port_param->speed, mb); 1352 else 1353 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1354 &port_param->speed, mb); 1355 1356 if (rval) { 1357 ql_log(ql_log_warn, vha, 0x704c, 1358 "iIDMA cmd failed for %8phN -- " 1359 "%04x %x %04x %04x.\n", fcport->port_name, 1360 rval, fcport->fp_speed, mb[0], mb[1]); 1361 rval = (DID_ERROR << 16); 1362 } else { 1363 if (!port_param->mode) { 1364 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1365 sizeof(struct qla_port_param); 1366 1367 rsp_ptr = ((uint8_t *)bsg_reply) + 1368 sizeof(struct fc_bsg_reply); 1369 1370 memcpy(rsp_ptr, port_param, 1371 sizeof(struct qla_port_param)); 1372 } 1373 1374 bsg_reply->result = DID_OK; 1375 bsg_job_done(bsg_job, bsg_reply->result, 1376 bsg_reply->reply_payload_rcv_len); 1377 } 1378 1379 return rval; 1380 } 1381 1382 static int 1383 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, 1384 uint8_t is_update) 1385 { 1386 struct fc_bsg_request *bsg_request = bsg_job->request; 1387 uint32_t start = 0; 1388 int valid = 0; 1389 struct qla_hw_data *ha = vha->hw; 1390 1391 if (unlikely(pci_channel_offline(ha->pdev))) 1392 return -EINVAL; 1393 1394 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1395 if (start > ha->optrom_size) { 1396 ql_log(ql_log_warn, vha, 0x7055, 1397 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1398 return -EINVAL; 1399 } 1400 1401 if (ha->optrom_state != QLA_SWAITING) { 1402 ql_log(ql_log_info, vha, 0x7056, 1403 "optrom_state %d.\n", ha->optrom_state); 1404 return -EBUSY; 1405 } 1406 1407 ha->optrom_region_start = start; 1408 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1409 if (is_update) { 1410 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1411 valid = 1; 1412 else if (start == (ha->flt_region_boot * 4) || 1413 start == (ha->flt_region_fw * 4)) 1414 valid = 1; 1415 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1416 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) 1417 valid = 1; 1418 if (!valid) { 1419 ql_log(ql_log_warn, vha, 0x7058, 1420 "Invalid start region 0x%x/0x%x.\n", start, 1421 bsg_job->request_payload.payload_len); 1422 return -EINVAL; 1423 } 1424 1425 ha->optrom_region_size = start + 1426 bsg_job->request_payload.payload_len > ha->optrom_size ? 1427 ha->optrom_size - start : 1428 bsg_job->request_payload.payload_len; 1429 ha->optrom_state = QLA_SWRITING; 1430 } else { 1431 ha->optrom_region_size = start + 1432 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1433 ha->optrom_size - start : 1434 bsg_job->reply_payload.payload_len; 1435 ha->optrom_state = QLA_SREADING; 1436 } 1437 1438 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1439 if (!ha->optrom_buffer) { 1440 ql_log(ql_log_warn, vha, 0x7059, 1441 "Read: Unable to allocate memory for optrom retrieval " 1442 "(%x)\n", ha->optrom_region_size); 1443 1444 ha->optrom_state = QLA_SWAITING; 1445 return -ENOMEM; 1446 } 1447 1448 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 1449 return 0; 1450 } 1451 1452 static int 1453 qla2x00_read_optrom(struct bsg_job *bsg_job) 1454 { 1455 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1456 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1457 scsi_qla_host_t *vha = shost_priv(host); 1458 struct qla_hw_data *ha = vha->hw; 1459 int rval = 0; 1460 1461 if (ha->flags.nic_core_reset_hdlr_active) 1462 return -EBUSY; 1463 1464 mutex_lock(&ha->optrom_mutex); 1465 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1466 if (rval) { 1467 mutex_unlock(&ha->optrom_mutex); 1468 return rval; 1469 } 1470 1471 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1472 ha->optrom_region_start, ha->optrom_region_size); 1473 1474 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1475 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1476 ha->optrom_region_size); 1477 1478 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; 1479 bsg_reply->result = DID_OK; 1480 vfree(ha->optrom_buffer); 1481 ha->optrom_buffer = NULL; 1482 ha->optrom_state = QLA_SWAITING; 1483 mutex_unlock(&ha->optrom_mutex); 1484 bsg_job_done(bsg_job, bsg_reply->result, 1485 bsg_reply->reply_payload_rcv_len); 1486 return rval; 1487 } 1488 1489 static int 1490 qla2x00_update_optrom(struct bsg_job *bsg_job) 1491 { 1492 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1493 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1494 scsi_qla_host_t *vha = shost_priv(host); 1495 struct qla_hw_data *ha = vha->hw; 1496 int rval = 0; 1497 1498 mutex_lock(&ha->optrom_mutex); 1499 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1500 if (rval) { 1501 mutex_unlock(&ha->optrom_mutex); 1502 return rval; 1503 } 1504 1505 /* Set the isp82xx_no_md_cap not to capture minidump */ 1506 ha->flags.isp82xx_no_md_cap = 1; 1507 1508 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1509 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1510 ha->optrom_region_size); 1511 1512 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1513 ha->optrom_region_start, ha->optrom_region_size); 1514 1515 bsg_reply->result = DID_OK; 1516 vfree(ha->optrom_buffer); 1517 ha->optrom_buffer = NULL; 1518 ha->optrom_state = QLA_SWAITING; 1519 mutex_unlock(&ha->optrom_mutex); 1520 bsg_job_done(bsg_job, bsg_reply->result, 1521 bsg_reply->reply_payload_rcv_len); 1522 return rval; 1523 } 1524 1525 static int 1526 qla2x00_update_fru_versions(struct bsg_job *bsg_job) 1527 { 1528 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1529 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1530 scsi_qla_host_t *vha = shost_priv(host); 1531 struct qla_hw_data *ha = vha->hw; 1532 int rval = 0; 1533 uint8_t bsg[DMA_POOL_SIZE]; 1534 struct qla_image_version_list *list = (void *)bsg; 1535 struct qla_image_version *image; 1536 uint32_t count; 1537 dma_addr_t sfp_dma; 1538 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1539 if (!sfp) { 1540 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1541 EXT_STATUS_NO_MEMORY; 1542 goto done; 1543 } 1544 1545 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1546 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1547 1548 image = list->version; 1549 count = list->count; 1550 while (count--) { 1551 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1552 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1553 image->field_address.device, image->field_address.offset, 1554 sizeof(image->field_info), image->field_address.option); 1555 if (rval) { 1556 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1557 EXT_STATUS_MAILBOX; 1558 goto dealloc; 1559 } 1560 image++; 1561 } 1562 1563 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1564 1565 dealloc: 1566 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1567 1568 done: 1569 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1570 bsg_reply->result = DID_OK << 16; 1571 bsg_job_done(bsg_job, bsg_reply->result, 1572 bsg_reply->reply_payload_rcv_len); 1573 1574 return 0; 1575 } 1576 1577 static int 1578 qla2x00_read_fru_status(struct bsg_job *bsg_job) 1579 { 1580 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1581 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1582 scsi_qla_host_t *vha = shost_priv(host); 1583 struct qla_hw_data *ha = vha->hw; 1584 int rval = 0; 1585 uint8_t bsg[DMA_POOL_SIZE]; 1586 struct qla_status_reg *sr = (void *)bsg; 1587 dma_addr_t sfp_dma; 1588 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1589 if (!sfp) { 1590 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1591 EXT_STATUS_NO_MEMORY; 1592 goto done; 1593 } 1594 1595 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1596 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1597 1598 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1599 sr->field_address.device, sr->field_address.offset, 1600 sizeof(sr->status_reg), sr->field_address.option); 1601 sr->status_reg = *sfp; 1602 1603 if (rval) { 1604 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1605 EXT_STATUS_MAILBOX; 1606 goto dealloc; 1607 } 1608 1609 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1610 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1611 1612 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1613 1614 dealloc: 1615 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1616 1617 done: 1618 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1619 bsg_reply->reply_payload_rcv_len = sizeof(*sr); 1620 bsg_reply->result = DID_OK << 16; 1621 bsg_job_done(bsg_job, bsg_reply->result, 1622 bsg_reply->reply_payload_rcv_len); 1623 1624 return 0; 1625 } 1626 1627 static int 1628 qla2x00_write_fru_status(struct bsg_job *bsg_job) 1629 { 1630 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1631 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1632 scsi_qla_host_t *vha = shost_priv(host); 1633 struct qla_hw_data *ha = vha->hw; 1634 int rval = 0; 1635 uint8_t bsg[DMA_POOL_SIZE]; 1636 struct qla_status_reg *sr = (void *)bsg; 1637 dma_addr_t sfp_dma; 1638 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1639 if (!sfp) { 1640 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1641 EXT_STATUS_NO_MEMORY; 1642 goto done; 1643 } 1644 1645 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1646 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1647 1648 *sfp = sr->status_reg; 1649 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1650 sr->field_address.device, sr->field_address.offset, 1651 sizeof(sr->status_reg), sr->field_address.option); 1652 1653 if (rval) { 1654 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1655 EXT_STATUS_MAILBOX; 1656 goto dealloc; 1657 } 1658 1659 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1660 1661 dealloc: 1662 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1663 1664 done: 1665 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1666 bsg_reply->result = DID_OK << 16; 1667 bsg_job_done(bsg_job, bsg_reply->result, 1668 bsg_reply->reply_payload_rcv_len); 1669 1670 return 0; 1671 } 1672 1673 static int 1674 qla2x00_write_i2c(struct bsg_job *bsg_job) 1675 { 1676 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1677 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1678 scsi_qla_host_t *vha = shost_priv(host); 1679 struct qla_hw_data *ha = vha->hw; 1680 int rval = 0; 1681 uint8_t bsg[DMA_POOL_SIZE]; 1682 struct qla_i2c_access *i2c = (void *)bsg; 1683 dma_addr_t sfp_dma; 1684 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1685 if (!sfp) { 1686 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1687 EXT_STATUS_NO_MEMORY; 1688 goto done; 1689 } 1690 1691 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1692 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1693 1694 memcpy(sfp, i2c->buffer, i2c->length); 1695 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1696 i2c->device, i2c->offset, i2c->length, i2c->option); 1697 1698 if (rval) { 1699 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1700 EXT_STATUS_MAILBOX; 1701 goto dealloc; 1702 } 1703 1704 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1705 1706 dealloc: 1707 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1708 1709 done: 1710 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1711 bsg_reply->result = DID_OK << 16; 1712 bsg_job_done(bsg_job, bsg_reply->result, 1713 bsg_reply->reply_payload_rcv_len); 1714 1715 return 0; 1716 } 1717 1718 static int 1719 qla2x00_read_i2c(struct bsg_job *bsg_job) 1720 { 1721 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1722 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1723 scsi_qla_host_t *vha = shost_priv(host); 1724 struct qla_hw_data *ha = vha->hw; 1725 int rval = 0; 1726 uint8_t bsg[DMA_POOL_SIZE]; 1727 struct qla_i2c_access *i2c = (void *)bsg; 1728 dma_addr_t sfp_dma; 1729 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1730 if (!sfp) { 1731 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1732 EXT_STATUS_NO_MEMORY; 1733 goto done; 1734 } 1735 1736 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1737 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1738 1739 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1740 i2c->device, i2c->offset, i2c->length, i2c->option); 1741 1742 if (rval) { 1743 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1744 EXT_STATUS_MAILBOX; 1745 goto dealloc; 1746 } 1747 1748 memcpy(i2c->buffer, sfp, i2c->length); 1749 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1750 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1751 1752 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1753 1754 dealloc: 1755 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1756 1757 done: 1758 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1759 bsg_reply->reply_payload_rcv_len = sizeof(*i2c); 1760 bsg_reply->result = DID_OK << 16; 1761 bsg_job_done(bsg_job, bsg_reply->result, 1762 bsg_reply->reply_payload_rcv_len); 1763 1764 return 0; 1765 } 1766 1767 static int 1768 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) 1769 { 1770 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1771 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1772 scsi_qla_host_t *vha = shost_priv(host); 1773 struct qla_hw_data *ha = vha->hw; 1774 uint32_t rval = EXT_STATUS_OK; 1775 uint16_t req_sg_cnt = 0; 1776 uint16_t rsp_sg_cnt = 0; 1777 uint16_t nextlid = 0; 1778 uint32_t tot_dsds; 1779 srb_t *sp = NULL; 1780 uint32_t req_data_len = 0; 1781 uint32_t rsp_data_len = 0; 1782 1783 /* Check the type of the adapter */ 1784 if (!IS_BIDI_CAPABLE(ha)) { 1785 ql_log(ql_log_warn, vha, 0x70a0, 1786 "This adapter is not supported\n"); 1787 rval = EXT_STATUS_NOT_SUPPORTED; 1788 goto done; 1789 } 1790 1791 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1792 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1793 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1794 rval = EXT_STATUS_BUSY; 1795 goto done; 1796 } 1797 1798 /* Check if host is online */ 1799 if (!vha->flags.online) { 1800 ql_log(ql_log_warn, vha, 0x70a1, 1801 "Host is not online\n"); 1802 rval = EXT_STATUS_DEVICE_OFFLINE; 1803 goto done; 1804 } 1805 1806 /* Check if cable is plugged in or not */ 1807 if (vha->device_flags & DFLG_NO_CABLE) { 1808 ql_log(ql_log_warn, vha, 0x70a2, 1809 "Cable is unplugged...\n"); 1810 rval = EXT_STATUS_INVALID_CFG; 1811 goto done; 1812 } 1813 1814 /* Check if the switch is connected or not */ 1815 if (ha->current_topology != ISP_CFG_F) { 1816 ql_log(ql_log_warn, vha, 0x70a3, 1817 "Host is not connected to the switch\n"); 1818 rval = EXT_STATUS_INVALID_CFG; 1819 goto done; 1820 } 1821 1822 /* Check if operating mode is P2P */ 1823 if (ha->operating_mode != P2P) { 1824 ql_log(ql_log_warn, vha, 0x70a4, 1825 "Host operating mode is not P2p\n"); 1826 rval = EXT_STATUS_INVALID_CFG; 1827 goto done; 1828 } 1829 1830 mutex_lock(&ha->selflogin_lock); 1831 if (vha->self_login_loop_id == 0) { 1832 /* Initialize all required fields of fcport */ 1833 vha->bidir_fcport.vha = vha; 1834 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1835 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1836 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1837 vha->bidir_fcport.loop_id = vha->loop_id; 1838 1839 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1840 ql_log(ql_log_warn, vha, 0x70a7, 1841 "Failed to login port %06X for bidirectional IOCB\n", 1842 vha->bidir_fcport.d_id.b24); 1843 mutex_unlock(&ha->selflogin_lock); 1844 rval = EXT_STATUS_MAILBOX; 1845 goto done; 1846 } 1847 vha->self_login_loop_id = nextlid - 1; 1848 1849 } 1850 /* Assign the self login loop id to fcport */ 1851 mutex_unlock(&ha->selflogin_lock); 1852 1853 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1854 1855 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1856 bsg_job->request_payload.sg_list, 1857 bsg_job->request_payload.sg_cnt, 1858 DMA_TO_DEVICE); 1859 1860 if (!req_sg_cnt) { 1861 rval = EXT_STATUS_NO_MEMORY; 1862 goto done; 1863 } 1864 1865 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1866 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1867 DMA_FROM_DEVICE); 1868 1869 if (!rsp_sg_cnt) { 1870 rval = EXT_STATUS_NO_MEMORY; 1871 goto done_unmap_req_sg; 1872 } 1873 1874 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1875 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1876 ql_dbg(ql_dbg_user, vha, 0x70a9, 1877 "Dma mapping resulted in different sg counts " 1878 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1879 "%x dma_reply_sg_cnt: %x]\n", 1880 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1881 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1882 rval = EXT_STATUS_NO_MEMORY; 1883 goto done_unmap_sg; 1884 } 1885 1886 if (req_data_len != rsp_data_len) { 1887 rval = EXT_STATUS_BUSY; 1888 ql_log(ql_log_warn, vha, 0x70aa, 1889 "req_data_len != rsp_data_len\n"); 1890 goto done_unmap_sg; 1891 } 1892 1893 req_data_len = bsg_job->request_payload.payload_len; 1894 rsp_data_len = bsg_job->reply_payload.payload_len; 1895 1896 1897 /* Alloc SRB structure */ 1898 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1899 if (!sp) { 1900 ql_dbg(ql_dbg_user, vha, 0x70ac, 1901 "Alloc SRB structure failed\n"); 1902 rval = EXT_STATUS_NO_MEMORY; 1903 goto done_unmap_sg; 1904 } 1905 1906 /*Populate srb->ctx with bidir ctx*/ 1907 sp->u.bsg_job = bsg_job; 1908 sp->free = qla2x00_bsg_sp_free; 1909 sp->type = SRB_BIDI_CMD; 1910 sp->done = qla2x00_bsg_job_done; 1911 1912 /* Add the read and write sg count */ 1913 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1914 1915 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1916 if (rval != EXT_STATUS_OK) 1917 goto done_free_srb; 1918 /* the bsg request will be completed in the interrupt handler */ 1919 return rval; 1920 1921 done_free_srb: 1922 mempool_free(sp, ha->srb_mempool); 1923 done_unmap_sg: 1924 dma_unmap_sg(&ha->pdev->dev, 1925 bsg_job->reply_payload.sg_list, 1926 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1927 done_unmap_req_sg: 1928 dma_unmap_sg(&ha->pdev->dev, 1929 bsg_job->request_payload.sg_list, 1930 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1931 done: 1932 1933 /* Return an error vendor specific response 1934 * and complete the bsg request 1935 */ 1936 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1937 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1938 bsg_reply->reply_payload_rcv_len = 0; 1939 bsg_reply->result = (DID_OK) << 16; 1940 bsg_job_done(bsg_job, bsg_reply->result, 1941 bsg_reply->reply_payload_rcv_len); 1942 /* Always return success, vendor rsp carries correct status */ 1943 return 0; 1944 } 1945 1946 static int 1947 qlafx00_mgmt_cmd(struct bsg_job *bsg_job) 1948 { 1949 struct fc_bsg_request *bsg_request = bsg_job->request; 1950 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1951 scsi_qla_host_t *vha = shost_priv(host); 1952 struct qla_hw_data *ha = vha->hw; 1953 int rval = (DRIVER_ERROR << 16); 1954 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1955 srb_t *sp; 1956 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1957 struct fc_port *fcport; 1958 char *type = "FC_BSG_HST_FX_MGMT"; 1959 1960 /* Copy the IOCB specific information */ 1961 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1962 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1963 1964 /* Dump the vendor information */ 1965 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1966 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00)); 1967 1968 if (!vha->flags.online) { 1969 ql_log(ql_log_warn, vha, 0x70d0, 1970 "Host is not online.\n"); 1971 rval = -EIO; 1972 goto done; 1973 } 1974 1975 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 1976 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1977 bsg_job->request_payload.sg_list, 1978 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1979 if (!req_sg_cnt) { 1980 ql_log(ql_log_warn, vha, 0x70c7, 1981 "dma_map_sg return %d for request\n", req_sg_cnt); 1982 rval = -ENOMEM; 1983 goto done; 1984 } 1985 } 1986 1987 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 1988 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1989 bsg_job->reply_payload.sg_list, 1990 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1991 if (!rsp_sg_cnt) { 1992 ql_log(ql_log_warn, vha, 0x70c8, 1993 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 1994 rval = -ENOMEM; 1995 goto done_unmap_req_sg; 1996 } 1997 } 1998 1999 ql_dbg(ql_dbg_user, vha, 0x70c9, 2000 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 2001 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 2002 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 2003 2004 /* Allocate a dummy fcport structure, since functions preparing the 2005 * IOCB and mailbox command retrieves port specific information 2006 * from fcport structure. For Host based ELS commands there will be 2007 * no fcport structure allocated 2008 */ 2009 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2010 if (!fcport) { 2011 ql_log(ql_log_warn, vha, 0x70ca, 2012 "Failed to allocate fcport.\n"); 2013 rval = -ENOMEM; 2014 goto done_unmap_rsp_sg; 2015 } 2016 2017 /* Alloc SRB structure */ 2018 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2019 if (!sp) { 2020 ql_log(ql_log_warn, vha, 0x70cb, 2021 "qla2x00_get_sp failed.\n"); 2022 rval = -ENOMEM; 2023 goto done_free_fcport; 2024 } 2025 2026 /* Initialize all required fields of fcport */ 2027 fcport->vha = vha; 2028 fcport->loop_id = piocb_rqst->dataword; 2029 2030 sp->type = SRB_FXIOCB_BCMD; 2031 sp->name = "bsg_fx_mgmt"; 2032 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 2033 sp->u.bsg_job = bsg_job; 2034 sp->free = qla2x00_bsg_sp_free; 2035 sp->done = qla2x00_bsg_job_done; 2036 2037 ql_dbg(ql_dbg_user, vha, 0x70cc, 2038 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 2039 type, piocb_rqst->func_type, fcport->loop_id); 2040 2041 rval = qla2x00_start_sp(sp); 2042 if (rval != QLA_SUCCESS) { 2043 ql_log(ql_log_warn, vha, 0x70cd, 2044 "qla2x00_start_sp failed=%d.\n", rval); 2045 mempool_free(sp, ha->srb_mempool); 2046 rval = -EIO; 2047 goto done_free_fcport; 2048 } 2049 return rval; 2050 2051 done_free_fcport: 2052 kfree(fcport); 2053 2054 done_unmap_rsp_sg: 2055 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2056 dma_unmap_sg(&ha->pdev->dev, 2057 bsg_job->reply_payload.sg_list, 2058 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2059 done_unmap_req_sg: 2060 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2061 dma_unmap_sg(&ha->pdev->dev, 2062 bsg_job->request_payload.sg_list, 2063 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2064 2065 done: 2066 return rval; 2067 } 2068 2069 static int 2070 qla26xx_serdes_op(struct bsg_job *bsg_job) 2071 { 2072 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2073 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2074 scsi_qla_host_t *vha = shost_priv(host); 2075 int rval = 0; 2076 struct qla_serdes_reg sr; 2077 2078 memset(&sr, 0, sizeof(sr)); 2079 2080 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2081 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2082 2083 switch (sr.cmd) { 2084 case INT_SC_SERDES_WRITE_REG: 2085 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2086 bsg_reply->reply_payload_rcv_len = 0; 2087 break; 2088 case INT_SC_SERDES_READ_REG: 2089 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2090 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2091 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2092 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2093 break; 2094 default: 2095 ql_dbg(ql_dbg_user, vha, 0x708c, 2096 "Unknown serdes cmd %x.\n", sr.cmd); 2097 rval = -EINVAL; 2098 break; 2099 } 2100 2101 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2102 rval ? EXT_STATUS_MAILBOX : 0; 2103 2104 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2105 bsg_reply->result = DID_OK << 16; 2106 bsg_job_done(bsg_job, bsg_reply->result, 2107 bsg_reply->reply_payload_rcv_len); 2108 return 0; 2109 } 2110 2111 static int 2112 qla8044_serdes_op(struct bsg_job *bsg_job) 2113 { 2114 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2115 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2116 scsi_qla_host_t *vha = shost_priv(host); 2117 int rval = 0; 2118 struct qla_serdes_reg_ex sr; 2119 2120 memset(&sr, 0, sizeof(sr)); 2121 2122 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2123 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2124 2125 switch (sr.cmd) { 2126 case INT_SC_SERDES_WRITE_REG: 2127 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2128 bsg_reply->reply_payload_rcv_len = 0; 2129 break; 2130 case INT_SC_SERDES_READ_REG: 2131 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2132 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2133 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2134 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2135 break; 2136 default: 2137 ql_dbg(ql_dbg_user, vha, 0x70cf, 2138 "Unknown serdes cmd %x.\n", sr.cmd); 2139 rval = -EINVAL; 2140 break; 2141 } 2142 2143 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2144 rval ? EXT_STATUS_MAILBOX : 0; 2145 2146 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2147 bsg_reply->result = DID_OK << 16; 2148 bsg_job_done(bsg_job, bsg_reply->result, 2149 bsg_reply->reply_payload_rcv_len); 2150 return 0; 2151 } 2152 2153 static int 2154 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) 2155 { 2156 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2157 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2158 scsi_qla_host_t *vha = shost_priv(host); 2159 struct qla_hw_data *ha = vha->hw; 2160 struct qla_flash_update_caps cap; 2161 2162 if (!(IS_QLA27XX(ha))) 2163 return -EPERM; 2164 2165 memset(&cap, 0, sizeof(cap)); 2166 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2167 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2168 (uint64_t)ha->fw_attributes_h << 16 | 2169 (uint64_t)ha->fw_attributes; 2170 2171 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2172 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2173 bsg_reply->reply_payload_rcv_len = sizeof(cap); 2174 2175 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2176 EXT_STATUS_OK; 2177 2178 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2179 bsg_reply->result = DID_OK << 16; 2180 bsg_job_done(bsg_job, bsg_reply->result, 2181 bsg_reply->reply_payload_rcv_len); 2182 return 0; 2183 } 2184 2185 static int 2186 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) 2187 { 2188 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2189 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2190 scsi_qla_host_t *vha = shost_priv(host); 2191 struct qla_hw_data *ha = vha->hw; 2192 uint64_t online_fw_attr = 0; 2193 struct qla_flash_update_caps cap; 2194 2195 if (!(IS_QLA27XX(ha))) 2196 return -EPERM; 2197 2198 memset(&cap, 0, sizeof(cap)); 2199 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2200 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); 2201 2202 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2203 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2204 (uint64_t)ha->fw_attributes_h << 16 | 2205 (uint64_t)ha->fw_attributes; 2206 2207 if (online_fw_attr != cap.capabilities) { 2208 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2209 EXT_STATUS_INVALID_PARAM; 2210 return -EINVAL; 2211 } 2212 2213 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2214 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2215 EXT_STATUS_INVALID_PARAM; 2216 return -EINVAL; 2217 } 2218 2219 bsg_reply->reply_payload_rcv_len = 0; 2220 2221 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2222 EXT_STATUS_OK; 2223 2224 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2225 bsg_reply->result = DID_OK << 16; 2226 bsg_job_done(bsg_job, bsg_reply->result, 2227 bsg_reply->reply_payload_rcv_len); 2228 return 0; 2229 } 2230 2231 static int 2232 qla27xx_get_bbcr_data(struct bsg_job *bsg_job) 2233 { 2234 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2235 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2236 scsi_qla_host_t *vha = shost_priv(host); 2237 struct qla_hw_data *ha = vha->hw; 2238 struct qla_bbcr_data bbcr; 2239 uint16_t loop_id, topo, sw_cap; 2240 uint8_t domain, area, al_pa, state; 2241 int rval; 2242 2243 if (!(IS_QLA27XX(ha))) 2244 return -EPERM; 2245 2246 memset(&bbcr, 0, sizeof(bbcr)); 2247 2248 if (vha->flags.bbcr_enable) 2249 bbcr.status = QLA_BBCR_STATUS_ENABLED; 2250 else 2251 bbcr.status = QLA_BBCR_STATUS_DISABLED; 2252 2253 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { 2254 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2255 &area, &domain, &topo, &sw_cap); 2256 if (rval != QLA_SUCCESS) { 2257 bbcr.status = QLA_BBCR_STATUS_UNKNOWN; 2258 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2259 bbcr.mbx1 = loop_id; 2260 goto done; 2261 } 2262 2263 state = (vha->bbcr >> 12) & 0x1; 2264 2265 if (state) { 2266 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2267 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; 2268 } else { 2269 bbcr.state = QLA_BBCR_STATE_ONLINE; 2270 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; 2271 } 2272 2273 bbcr.configured_bbscn = vha->bbcr & 0xf; 2274 } 2275 2276 done: 2277 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2278 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2279 bsg_reply->reply_payload_rcv_len = sizeof(bbcr); 2280 2281 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2282 2283 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2284 bsg_reply->result = DID_OK << 16; 2285 bsg_job_done(bsg_job, bsg_reply->result, 2286 bsg_reply->reply_payload_rcv_len); 2287 return 0; 2288 } 2289 2290 static int 2291 qla2x00_get_priv_stats(struct bsg_job *bsg_job) 2292 { 2293 struct fc_bsg_request *bsg_request = bsg_job->request; 2294 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2295 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2296 scsi_qla_host_t *vha = shost_priv(host); 2297 struct qla_hw_data *ha = vha->hw; 2298 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2299 struct link_statistics *stats = NULL; 2300 dma_addr_t stats_dma; 2301 int rval; 2302 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; 2303 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2304 2305 if (test_bit(UNLOADING, &vha->dpc_flags)) 2306 return -ENODEV; 2307 2308 if (unlikely(pci_channel_offline(ha->pdev))) 2309 return -ENODEV; 2310 2311 if (qla2x00_reset_active(vha)) 2312 return -EBUSY; 2313 2314 if (!IS_FWI2_CAPABLE(ha)) 2315 return -EPERM; 2316 2317 stats = dma_alloc_coherent(&ha->pdev->dev, 2318 sizeof(*stats), &stats_dma, GFP_KERNEL); 2319 if (!stats) { 2320 ql_log(ql_log_warn, vha, 0x70e2, 2321 "Failed to allocate memory for stats.\n"); 2322 return -ENOMEM; 2323 } 2324 2325 memset(stats, 0, sizeof(*stats)); 2326 2327 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2328 2329 if (rval == QLA_SUCCESS) { 2330 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3, 2331 (uint8_t *)stats, sizeof(*stats)); 2332 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2333 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2334 } 2335 2336 bsg_reply->reply_payload_rcv_len = sizeof(*stats); 2337 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2338 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2339 2340 bsg_job->reply_len = sizeof(*bsg_reply); 2341 bsg_reply->result = DID_OK << 16; 2342 bsg_job_done(bsg_job, bsg_reply->result, 2343 bsg_reply->reply_payload_rcv_len); 2344 2345 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2346 stats, stats_dma); 2347 2348 return 0; 2349 } 2350 2351 static int 2352 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) 2353 { 2354 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2355 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2356 scsi_qla_host_t *vha = shost_priv(host); 2357 int rval; 2358 struct qla_dport_diag *dd; 2359 2360 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) 2361 return -EPERM; 2362 2363 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 2364 if (!dd) { 2365 ql_log(ql_log_warn, vha, 0x70db, 2366 "Failed to allocate memory for dport.\n"); 2367 return -ENOMEM; 2368 } 2369 2370 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2371 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2372 2373 rval = qla26xx_dport_diagnostics( 2374 vha, dd->buf, sizeof(dd->buf), dd->options); 2375 if (rval == QLA_SUCCESS) { 2376 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2377 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2378 } 2379 2380 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2381 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2382 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2383 2384 bsg_job->reply_len = sizeof(*bsg_reply); 2385 bsg_reply->result = DID_OK << 16; 2386 bsg_job_done(bsg_job, bsg_reply->result, 2387 bsg_reply->reply_payload_rcv_len); 2388 2389 kfree(dd); 2390 2391 return 0; 2392 } 2393 2394 static int 2395 qla2x00_process_vendor_specific(struct bsg_job *bsg_job) 2396 { 2397 struct fc_bsg_request *bsg_request = bsg_job->request; 2398 2399 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { 2400 case QL_VND_LOOPBACK: 2401 return qla2x00_process_loopback(bsg_job); 2402 2403 case QL_VND_A84_RESET: 2404 return qla84xx_reset(bsg_job); 2405 2406 case QL_VND_A84_UPDATE_FW: 2407 return qla84xx_updatefw(bsg_job); 2408 2409 case QL_VND_A84_MGMT_CMD: 2410 return qla84xx_mgmt_cmd(bsg_job); 2411 2412 case QL_VND_IIDMA: 2413 return qla24xx_iidma(bsg_job); 2414 2415 case QL_VND_FCP_PRIO_CFG_CMD: 2416 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2417 2418 case QL_VND_READ_FLASH: 2419 return qla2x00_read_optrom(bsg_job); 2420 2421 case QL_VND_UPDATE_FLASH: 2422 return qla2x00_update_optrom(bsg_job); 2423 2424 case QL_VND_SET_FRU_VERSION: 2425 return qla2x00_update_fru_versions(bsg_job); 2426 2427 case QL_VND_READ_FRU_STATUS: 2428 return qla2x00_read_fru_status(bsg_job); 2429 2430 case QL_VND_WRITE_FRU_STATUS: 2431 return qla2x00_write_fru_status(bsg_job); 2432 2433 case QL_VND_WRITE_I2C: 2434 return qla2x00_write_i2c(bsg_job); 2435 2436 case QL_VND_READ_I2C: 2437 return qla2x00_read_i2c(bsg_job); 2438 2439 case QL_VND_DIAG_IO_CMD: 2440 return qla24xx_process_bidir_cmd(bsg_job); 2441 2442 case QL_VND_FX00_MGMT_CMD: 2443 return qlafx00_mgmt_cmd(bsg_job); 2444 2445 case QL_VND_SERDES_OP: 2446 return qla26xx_serdes_op(bsg_job); 2447 2448 case QL_VND_SERDES_OP_EX: 2449 return qla8044_serdes_op(bsg_job); 2450 2451 case QL_VND_GET_FLASH_UPDATE_CAPS: 2452 return qla27xx_get_flash_upd_cap(bsg_job); 2453 2454 case QL_VND_SET_FLASH_UPDATE_CAPS: 2455 return qla27xx_set_flash_upd_cap(bsg_job); 2456 2457 case QL_VND_GET_BBCR_DATA: 2458 return qla27xx_get_bbcr_data(bsg_job); 2459 2460 case QL_VND_GET_PRIV_STATS: 2461 case QL_VND_GET_PRIV_STATS_EX: 2462 return qla2x00_get_priv_stats(bsg_job); 2463 2464 case QL_VND_DPORT_DIAGNOSTICS: 2465 return qla2x00_do_dport_diagnostics(bsg_job); 2466 2467 default: 2468 return -ENOSYS; 2469 } 2470 } 2471 2472 int 2473 qla24xx_bsg_request(struct bsg_job *bsg_job) 2474 { 2475 struct fc_bsg_request *bsg_request = bsg_job->request; 2476 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2477 int ret = -EINVAL; 2478 struct fc_rport *rport; 2479 struct Scsi_Host *host; 2480 scsi_qla_host_t *vha; 2481 2482 /* In case no data transferred. */ 2483 bsg_reply->reply_payload_rcv_len = 0; 2484 2485 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 2486 rport = fc_bsg_to_rport(bsg_job); 2487 host = rport_to_shost(rport); 2488 vha = shost_priv(host); 2489 } else { 2490 host = fc_bsg_to_shost(bsg_job); 2491 vha = shost_priv(host); 2492 } 2493 2494 if (qla2x00_reset_active(vha)) { 2495 ql_dbg(ql_dbg_user, vha, 0x709f, 2496 "BSG: ISP abort active/needed -- cmd=%d.\n", 2497 bsg_request->msgcode); 2498 return -EBUSY; 2499 } 2500 2501 ql_dbg(ql_dbg_user, vha, 0x7000, 2502 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode); 2503 2504 switch (bsg_request->msgcode) { 2505 case FC_BSG_RPT_ELS: 2506 case FC_BSG_HST_ELS_NOLOGIN: 2507 ret = qla2x00_process_els(bsg_job); 2508 break; 2509 case FC_BSG_HST_CT: 2510 ret = qla2x00_process_ct(bsg_job); 2511 break; 2512 case FC_BSG_HST_VENDOR: 2513 ret = qla2x00_process_vendor_specific(bsg_job); 2514 break; 2515 case FC_BSG_HST_ADD_RPORT: 2516 case FC_BSG_HST_DEL_RPORT: 2517 case FC_BSG_RPT_CT: 2518 default: 2519 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 2520 break; 2521 } 2522 return ret; 2523 } 2524 2525 int 2526 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 2527 { 2528 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2529 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2530 struct qla_hw_data *ha = vha->hw; 2531 srb_t *sp; 2532 int cnt, que; 2533 unsigned long flags; 2534 struct req_que *req; 2535 2536 /* find the bsg job from the active list of commands */ 2537 spin_lock_irqsave(&ha->hardware_lock, flags); 2538 for (que = 0; que < ha->max_req_queues; que++) { 2539 req = ha->req_q_map[que]; 2540 if (!req) 2541 continue; 2542 2543 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 2544 sp = req->outstanding_cmds[cnt]; 2545 if (sp) { 2546 if (((sp->type == SRB_CT_CMD) || 2547 (sp->type == SRB_ELS_CMD_HST) || 2548 (sp->type == SRB_FXIOCB_BCMD)) 2549 && (sp->u.bsg_job == bsg_job)) { 2550 req->outstanding_cmds[cnt] = NULL; 2551 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2552 if (ha->isp_ops->abort_command(sp)) { 2553 ql_log(ql_log_warn, vha, 0x7089, 2554 "mbx abort_command " 2555 "failed.\n"); 2556 scsi_req(bsg_job->req)->result = 2557 bsg_reply->result = -EIO; 2558 } else { 2559 ql_dbg(ql_dbg_user, vha, 0x708a, 2560 "mbx abort_command " 2561 "success.\n"); 2562 scsi_req(bsg_job->req)->result = 2563 bsg_reply->result = 0; 2564 } 2565 spin_lock_irqsave(&ha->hardware_lock, flags); 2566 goto done; 2567 } 2568 } 2569 } 2570 } 2571 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2572 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2573 scsi_req(bsg_job->req)->result = bsg_reply->result = -ENXIO; 2574 return 0; 2575 2576 done: 2577 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2578 sp->free(sp); 2579 return 0; 2580 } 2581